# coding:utf-8
from hashlib import md5
import requests
from lxml import etree
import os
from tool import Excel

"""

https://movie.douban.com/review/best/?start=0
https://movie.douban.com/review/best/?start=20
发现每一页的url有这样的规则 -> start+20递增

"""

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0',

    'Cookie': 'bid=pGaaMWgfwvU; __yadk_uid=g5W33OtlFgAbOwakC34XJV2FvgNWnXCi; ll="118237"; _vwo_uuid_v2=D54545080C7AB6402AEBA576F1EE96FC2|cc667b2a6207825649ccaaefd50307b7; gr_user_id=4724545d-8790-4ab5-8fc9-5f043572457b; __gads=ID=843c61564170e164:T=1572395426:S=ALNI_Mb78ZI-68qqrr5WN73bqJdu0uW9ZQ; __utmz=30149280.1577442260.15.14.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1577442260.12.11.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmc=30149280; __utmc=223695111; trc_cookie_storage=taboola%2520global%253Auser-id%3Da0551504-18fd-456d-9ca1-934317743f8d-tuct3ecb408; ap_v=0,6.0; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1578286808%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D7dG_izi9kOnpKevbbnk6nMM1Mj0b6Dw_9fx5fpcTj5lM-M21jeq-2PKzlQL8JnnA%26wd%3D%26eqid%3Da17a1a3e0001a090000000035e05dbd0%22%5D; _pk_ses.100001.4cf6=*; __utma=30149280.184804189.1567244256.1578282362.1578286808.18; __utmb=30149280.0.10.1578286808; __utma=223695111.2129009241.1565240742.1578282362.1578286808.15; __utmb=223695111.0.10.1578286808; _pk_id.100001.4cf6=025d9d3ee4988027.1565240741.15.1578287290.1578284720.'

}


def get_per_url():
    """
    获取每一页的url
    :return:
    """
    for i in range(0, 100, 20):
        url = 'https://movie.douban.com/review/best/?start={}'.format(str(i))
        yield url


def parse_per_page(per_url):
    """
    解析每一页，获取每一个电影的评分url 和 影评url
    :param per_url:
    :return:
    """
    response = requests.get(per_url, headers=headers)
    # 获取每一个电影的影评详情页和评分详情页
    root = etree.HTML(response.text)
    results = root.xpath('//div[@data-cid]/div')
    for result in results:
        movie_grade_url = result.xpath('a/@href')[0]
        movie_comment_url = result.xpath('div[@class="main-bd"]/h2/a/@href')[0]
        # 评分url、影评url
        # print(movie_grade_url, movie_comment_url)
        yield movie_grade_url, movie_comment_url


def download_img(img_folder, img_src, img_name):
    """
    下载图片
    :param img_folder: 图片的上级目录
    :param img_src: 图片的src
    :param img_name: 图片名称
    :return:
    """
    response = requests.get(img_src, headers=headers)
    print('正在下载：{} {}'.format(img_name, img_src))
    with open('{}/{}.jpg'.format(img_folder, img_name), 'wb') as f:
        f.write(response.content)


def parse_movie_infos(grade_url, comment_url):
    """
    获取电影的详情信息
    :param grade_url: 电影评分的url
    :param comment_url: 电影影评的url
    :return:
    """
    # 获取电影的评分、电影名、电影图片
    response = requests.get(grade_url, headers=headers)
    root = etree.HTML(response.text)
    movie_name = root.xpath('//div[@id="content"]/h1/span/text()')
    movie_name = '暂无电影名称' if not movie_name else movie_name[0]

    movie_grade = root.xpath('//div[@class="rating_self clearfix"]/strong/text()')
    movie_grade = '暂无评分' if not movie_grade else movie_grade[0]

    movie_img_src = root.xpath('//a[@class="nbgnbg"]/img/@src')
    movie_img_src = '暂无图片src' if not movie_img_src else movie_img_src[0]

    movie_img_name = root.xpath('//a[@class="nbgnbg"]/img/@alt')
    movie_img_name = '暂无图片名称' if not movie_img_name else movie_img_name[0]
    # TODO 处理图片
    if not os.path.exists('电影图片'):
        os.mkdir('电影图片')
    # 下载图片
    if movie_img_src != '暂无图片src':
        download_img('电影图片', movie_img_src, movie_img_name)

    # 电影名、电影评分、影评url、影评
    # 获取评分电影影评、影评图片
    response = requests.get(comment_url, headers=headers)
    root = etree.HTML(response.text)
    movie_comment = root.xpath('//div[@class="review-content clearfix"]/p/text()')
    movie_comment = ''.join(movie_comment)
    # 影评图片
    comment_imgs = root.xpath('//div[@class="review-content clearfix"]//img/@src')
    # TODO 处理图片
    for movie_img_src in comment_imgs:
        try:
            # 设置t_movie_name 的原因是怕文件夹名称非法，非法时加密处理文件夹名称
            t_movie_name = movie_name
            if not os.path.exists('影评图片/{}'.format(movie_name)):
                os.makedirs('影评图片/{}'.format(movie_name))
        except NotADirectoryError:
            # 图片名不能用来创建文件夹的时候，使用md5加密一下
            t_movie_name = md5(movie_name.encode('utf-8')).hexdigest()
            if not os.path.exists('影评图片/{}'.format(t_movie_name)):
                os.makedirs('影评图片/{}'.format(t_movie_name))
        # 下载图片
        # 使用img_url加密后的字符串作为图片的名称，既不会出错 又防止被覆盖
        movie_img_name = md5(movie_img_src.encode('utf-8')).hexdigest()
        download_img('影评图片/{}'.format(t_movie_name), movie_img_src, movie_img_name)

    # 返回电影名称/电影评分/影评url/影评
    yield [movie_name, movie_grade, comment_url, movie_comment]


if __name__ == '__main__':
    # 创建excel对象
    xcl = Excel()

    # row从1开始，每写入xls一行数据，row += 1
    row = 1
    for url in get_per_url():
        # 根据url，获取 评分url 和 影评url  （一个元组）
        for movie_grade_url, movie_comment_url in parse_per_page(url):
            # 根据movie_grade_url，movie_comment_url爬取电影的其他信息
            for movie_info in parse_movie_infos(movie_grade_url, movie_comment_url):
                # movie_info是一个列表，是parse_movie_infos返回的电影的信息
                # xcl.write_data(row,movie_info)写入一条电影数据
                xcl.write_data(row, movie_info)
            row += 1

        # 写入数十条数据再保存数据。
        xcl.save_excel_data()
