python爬取电影记录并生成excel表格

2023-04-27,,

#coding=utf-8
import urllib2
import re
import xlwt
import smtplib
import random
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication

# 1.先获取整个页面信息

# Urllib 模块提供了读取web页面数据的接口,我们可以像读取本地文件一样读取www和ftp上的数据。
# 首先,我们定义了一个getHtml()函数:
# urllib.urlopen()方法用于打开一个URL地址。
# read()方法用于读取URL上的数据,向getHtml()函数传递一个网址,并把整个页面下载下来。执行程序就会把整个网页打印输出。
#
# #添加头部header
# Agent_list = [ ]
# user_agent = random.choice(Agent_list)
# page.add_header('User-Agent',user_agent) 
#
#
# #定义opener,设置代理IP
# ip_list = []
# httpproxy_handler = urllib2.ProxyHandler({'http':random.choice(ip_list)})
# opener = urllib2.build_opener(httpproxy_handler)
# urllib2.install_opener(opener)

def getHtml(url):
    Agent_list = ['Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36',
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11"
    ]    
    ip_list = ['223.198.16.58:9999','110.244.12.96:9999','61.145.8.103:9999','223.198.19.214:9999','112.85.125.111:9999']    
    user_agent = random.choice(Agent_list)    
    httpproxy_handler = urllib2.ProxyHandler({'http':random.choice(ip_list)})
    opener = urllib2.build_opener(httpproxy_handler)    
    urllib2.install_opener(opener)
    page = urllib2.Request(url)    
    page.add_header('User-Agent',user_agent)    
    response = urllib2.urlopen(page) 
    html = response.read()    
    return html

# 2.筛选页面中想要的数据

# 我们又创建了geturl()函数,用于在获取的整个页面中筛选需要的视频连接。
# re模块主要包含了正则表达式:
# r"<a href='(/html/gndy/+.+html)'",表示查找类似/html/gndy/jddy/20190607/58704.html地址的超链接。
# 正则表达式输出的是括号()里面的内容。
#
# 正则表达式示例:r'(.*) are (.*?) .*'
#               首先,这是一个字符串,前面的一个 r 表示字符串为非转义的原始字符串,让编译器忽略反斜杠,也就是忽略转义字符。
#               但是这个字符串里没有反斜杠,所以这个 r 可有可无。
#               . :匹配一个除了换行符任意一个字符
#               ^ :只有后面跟的字符串在开头,才能匹配上
#               * :它控制它前面那个字符,他前面那个字符出现0到多个都可以匹配上 
#               + :匹配前面那个字符1到多次
#               ?:匹配前面那个字符0到1个,多余的只匹配一个
#              (.*) 第一个匹配分组,.* 代表匹配除换行符之外的所有字符。
#              (.*?) 第二个匹配分组,.*? 后面多个问号,代表非贪婪模式,也就是说只匹配符合条件的最少字符
#               后面的一个 .* 没有括号包围,所以不是分组,匹配效果和第一个一样,但是不计入匹配结果中。
#
# re.search("com","COM",re.I).group()
#              re.I 使匹配对大小写不敏感
#              re.L 做本地化识别(locale-aware)匹配
#              re.M 多行匹配,影响^和$
#              re.S 使.匹配包括换行在内的所有字符
#              
# <a href="">..</a>表示超链接
# re.compile() 可以把正则表达式编译成一个正则表达式对象.
# re.findall() 方法读取html 中包含 urlre(正则表达式)的数据。
# 运行脚本将得到整个页面中包含图片的URL地址。
#
# reg = r"<a href='(/html/gndy/.*)</a><br/>"
# 匹配类似<a href='/html/gndy/jddy/20160320/50523.html'>IMDB评分8分左右影片400余部</a><br/> 并输出括号里面的内容
# 得到 /html/gndy/jddy/20160320/50523.html'>IMDB评分8分左右影片400余部
# 可以用  reg = r"<a href='(/html/gndy/.*?)</a><br/>"
# 可以用  reg = r"<a href='(/html/gndy/.+)</a><br/>"
# 可以用  reg = r"<a href='(/html/gndy/.+?)</a><br/>"
# 
# re.split(r"'>+",resource_url) 将得到的结果‘/html/gndy/jddy/20160320/50523.html'>IMDB评分8分左右影片400余部’ 按照‘>切割成两部分。
# down_addr = '' + down_page[j] down_page无法显示汉字,所以做了下转化
#
# for i in range (1,20) 查询需要的条记录,从1开始的原因是因为第0个记录不是需要的数据。

def geturl(html):    
     reg = r"<a href='(/html/gndy/.*)</a><br/>"     
     urlre = re.compile(reg)     
     urllist = re.findall(urlre,html)    
     wbk = xlwt.Workbook(encoding='gbk')         
     worksheet = wbk.add_sheet('My worksheet')     
     list1=('Name','Page','Url')     
     for i in range(1,20):
         resource_url = "https://dytt8.net" + urllist[i]       
         result = re.split(r"'>+",resource_url)           
         Agent_list = ['Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36',
         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
         "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
         "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
         "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
         "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
         "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
         "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
         "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
         "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
         "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
         "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
         "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
         "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
         "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
         "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
         ]         
         ip_list = ['223.198.16.58:9999','110.244.12.96:9999','61.145.8.103:9999','223.198.19.214:9999','112.85.125.111:9999']    
         user_agent = random.choice(Agent_list)    
         httpproxy_handler = urllib2.ProxyHandler({'http':random.choice(ip_list)})
         opener = urllib2.build_opener(httpproxy_handler)    
         urllib2.install_opener(opener)          
         user_agent = random.choice(Agent_list)                 
         down_page = urllib2.Request(result[0])         
         down_page.add_header('User-Agent',user_agent)
         print down_page.get_header('User-agent')
         response_page = urllib2.urlopen(down_page)    
         down_html = response_page.read()         
         addr_code = r'<a href="(ftp://.*)">'         
         addr_re = re.compile(addr_code)
         down_url = re.findall(addr_re,down_html)
         down_addr = '' + down_url[0]         
         if i == 1:         
             for list in range(0,len(list1)):             
                  worksheet.write(i-1,list,list1[list])             
         else:         
             worksheet.write(i-1,0,result[1])            
             worksheet.write(i-1,1,result[0])            
             worksheet.write(i-1,2,down_addr)             
         time.sleep(5)         
     wbk.save('renew.xls')

#3. 发送邮件

def send_mail():
    user = 'xxxx.com'    
    pwd = 'xxxxs'    
    to = 'xxxx'   
    msg = MIMEMultipart()   
    msg["Subject"] = '电影记录'    
    msg ["From"] = user  
    msg ["To"] = to    
    part1 = MIMEText("你好,\n\n       电影记录见附件。")    
    msg.attach(part1)    
    part2 = MIMEApplication (open(r'E:\2xx3\python脚本\html\renew.xls','rb').read())    
    part2.add_header('Content-Disposition','attachment',filename='renew.xls')   
    msg.attach(part2)    
    s = smtplib.SMTP("smtp.139.com",timeout=30)    
    s.login(user,pwd)    
    s.sendmail(user,to,msg.as_string())    
    s.close()                                     
html = getHtml("https://www.dytt8.net/index0.html")
geturl(html)
send_mail()