from bs4 import BeautifulSoup #网页解析，获取数据
import re #正则表达式，文字匹配
import xlwt #进行Excel操作
from urllib import request,error #制定URL，获取网页数据
# import sqlite3 #进行SQLite数据库操作

findLink = re.compile(r'<a href="(.*?)">')  #影片链接查找
# findImgSrc = re.compile(r'<img"(.*?)"', re.S)   #图片链接查找，忽视换行符
findImgSrc = re.compile(r'<img(.*?)>')   #图片链接查找，忽视换行符
findTitle = re.compile(r'<span class="title">(.*?)</span>')    #影片片名
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>')   #影片评分
findJudge = re.compile(r'<span>(\d+?)人评价</span>') #评价人数
findInq = re.compile(r'<span class="inq">(.*?)</span>')  #电影概况
findBd = re.compile(r'<p class="">(.*?)</p>', re.S)  #影片其他相关内容

def getdata(baseurl):    # 获取数据
    datalist = []
    for i in range(0, 250, 25):
        # 豆瓣top250，一页有25部，所以要调用10次
        url = baseurl + str(i)    # 拼接网址
        html = askURL(url)
        soup = BeautifulSoup(html, 'html.parser')    # 逐一解析网页
        for item in soup.find_all('div', class_="item"):
            # print(item)    # 查看item全部信息（测试）
            data = {}    # 保存一部影片的全部信息
            # print(type(item))
            item = str(item)
            # print(item)
            link = re.findall(findLink, item)
            data['link'] = link
            ImgSrc = re.findall(findImgSrc, item)
            data['ImgSrc'] = ImgSrc
            Title = re.findall(findTitle, item)
            data['Title'] = Title
            Rating = re.findall(findRating, item)
            data['Rating'] = Rating
            Judge = re.findall(findJudge, item)
            data['Judge'] = Judge
            Inq = re.findall(findInq, item)
            data['Inq'] = Inq
            Bd = re.findall(findBd, item)
            data['Bd'] = Bd
            # print(item)
            # print(data) #查看结果（测试）
            datalist.append(data)   #爬取结果添加到结果列表中
    return datalist

def askURL(url):   #得到一个指定的URL的网页内容
    headers = {    # 模拟浏览器头部信息
        'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
    }
    req = request.Request(url=url, headers=headers)    # 构造请求信息
    html = ''
    try:    # 尝试发送请求并获取响应信息
        response = request.urlopen(req, timeout=3)
        html = response.read().decode('utf-8')
        # print(html)
    except Exception as err:
        print('发生错误：', err)    # 输出错误信息
    return html

def savedata(datalist, savepath):    #保存数据
    file = xlwt.Workbook(encoding='utf-8')
    top250 = file.add_sheet('top250')
    print('数据保存中...')
    names = ['序号','电影链接','图片链接','影片名字','影片评分','评价人数','影片概况','其他']
    row = len(datalist)
    for i in range(8):
        top250.write(0, i, names[i])
    for i in range(0, row):
        top250.write(i+1, 0, str(i+1))
        j = 1
        for data in datalist[i]:
            top250.write(i+1, j, datalist[i][data])
            j += 1
    file.save(savepath)
    print('数据爬取完毕！')
 
if __name__ == '__main__':
    baseurl = 'https://movie.douban.com/top250?start='    #入口网址
    print('开始爬取数据...')
    datalist = getdata(baseurl) #爬取数据
    savepath = r'top250.xls'    #文件名
    savedata(datalist, savepath)    #保存爬取到的数据


