
from requests.exceptions import RequestException
from lxml import etree
import requests
import json
import time
import random
import re


def getPage(url):
    data = {}
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,fr;q=0.7',
        'Connection': 'keep-alive',
        'Cookie': 'bid=Bay5aR1FC54; ap_v=0,6.0; ll="118286"; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1551317637%2C%22https%3A%2F%2Fbook.douban.com%2Ftop250%3Fstart%3D25%22%5D; _pk_ses.100001.4cf6=*; __yadk_uid=O36zDuJpXmUXBiEVe5H4JwyF9yljRpVY; _vwo_uuid_v2=D6C05CC8BCF662B6A4B44A5A8BF2571DF|64dbcc914287acd7f9721d0604f99e75; _pk_id.100001.4cf6=4a78046ee6afbb38.1551317637.1.1551317728.1551317637.',
        'Cache-Control': 'max-age=0',
        'Host': 'movie.douban.com',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
    }
    try:
        res = requests.get(url, data=data, headers=headers)
        if res.status_code == 200:
            return res.content.decode('utf-8')
        else:
            return None
    except RequestException:
        return None


def parsePage(content):
    htmlpat = content
    # 初始化解析html文档，并返回根节点对象,etree.HTML返回对象是bytes类型
    html = etree.HTML(content)
    # 获取25部电影信息
    items = html.xpath("//div[@class='article']/ol[@class='grid_view']/li")
    # 遍历并解析每部电影具体信息
    # 对导演和主演的正则表达式
    directorpat = ' <p class="">[\s\S]*?导演: (.*?)&nbsp'
    actorpat = '主演:(.*?)<br>'
    releasedatapat = '<br>[\s\S]*?([0-9]+)&nbsp;/&nbsp;.*?&nbsp;/&nbsp;剧情'
    areapat = '<br>[\s\S]*?&nbsp;/&nbsp;(.*?)&nbsp;/&nbsp;剧情'
    stylepat = '<br>\n\s+[0-9]{4}&nbsp;/&nbsp;.*?&nbsp;/&nbsp;([\s\S]*?)\n\s+</p>'
    directorlist = re.findall(directorpat, htmlpat)
    actorlist = re.findall(actorpat, htmlpat)
    releasedatalist = re.findall(releasedatapat, htmlpat)
    arealist = re.findall(areapat, htmlpat)
    stylelist = re.findall(stylepat, htmlpat)
    # print(directorlist)
    # print(actorlist)
    # print(releasedatalist)
    # print(arealist)
    # print(stylelist)
    for item in items:
        # quote存在为空值情况,所以要单独处理
        quote = item.xpath(".//div[@class='info']/div[@class='bd']/p[@class='quote']/span/text()")
        if len(quote):
            quote = quote[0]
        else:
            quote = '无简介'

        i = 0
        yield {
            'Id': item.xpath(".//div[@class='pic']/em/text()")[0],
            'title': item.xpath(".//div[@class='pic']/a/img/@alt")[0],
            'link': item.xpath(".//div[@class='pic']/a/@href")[0],
            'pic': item.xpath(".//img[@width='100']/@src"),
            'score': item.xpath(".//div[@class='info']/div[@class='bd']/div[@class='star']/span[@class='rating_num']/text()")[0],
            'director': directorlist[i],
            'actor': actorlist[i],
            'quote': quote,
            'releasedata': releasedatalist[i],
            'area': arealist[i],
            'style': stylelist[i]
        }
        i += 1


def writeFile(content):
    '''解析爬取网页中内容，并返回结果'''
    with open("./豆瓣电影xpath.txt", 'a', encoding="utf-8") as f:
        f.write(json.dumps(content, ensure_ascii=False) + "\n")  # 编码中文默认也是ascii编码，所以记得指定ensur_ascii=False


def main(page):
    '''主程序函数，负责调度执行爬取处理'''
    url = 'https://movie.douban.com/top250?start=' + str(25 * page)
    html = getPage(url)  # 执行爬取
    if html:
        for item in parsePage(html):  # 执行解析并遍历
            pass
            print(item)
            writeFile(item)  # 执行写操作


if __name__ == '__main__':
    for page in range(0, 11):
        main(page)
        time.sleep(random.randint(2, 5))
