import time

import requests
import json
from lxml import etree

import re

import pandas as pd

top_url = "https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&page_limit=50&page_start=0"
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
    "Cookie": 'll="118183"; bid=2V06GThHXgs; __yadk_uid=2nvOcoOiX0nBbj2LJbWnvcT9m0AbDuzi; __gads=ID=a19bcb5e2749a155-22db2c0138d90055:T=1673251486:RT=1673251486:S=ALNI_MaAgb4_W5x0P4kdYGRV9ljaB1vt3Q; _vwo_uuid_v2=D9007959ADBE6FC1C9CEC907323BB6A4B|85fe58d4e2f6a67cf4581a0311cb7e27; __utmc=30149280; __utmc=223695111; Hm_lvt_16a14f3002af32bf3a75dfe352478639=1676019933; Hm_lpvt_16a14f3002af32bf3a75dfe352478639=1676019945; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1676101495%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3Dlyt1DJpDt73KUhP18wVJVMOPx90QyabD4VV1Tiq5eCeSE--Nzyi9mTfJVnaR64U3%26wd%3D%26eqid%3Dbf40d33d000cc92b0000000663e74759%22%5D; _pk_ses.100001.4cf6=*; __utma=30149280.1306283625.1673251468.1676017995.1676101495.5; __utmb=30149280.0.10.1676101495; __utmz=30149280.1676101495.5.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=223695111.1562687523.1673251468.1676017999.1676101495.5; __utmb=223695111.0.10.1676101495; __utmz=223695111.1676101495.5.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; ap_v=0,6.0; __gpi=UID=00000ba0548eb3c8:T=1673251486:RT=1676101496:S=ALNI_MZhFMhkHD8LJVTSKe_91Kfu33HcBA; dbcl2="267574031:IhwWHvS/nPI"; ck=80r7; frodotk_db="0e26815653fc86182a738402fbf59248"; push_noty_num=0; push_doumail_num=0; _pk_id.100001.4cf6=c053146a965238ca.1673251468.5.1676103416.1676019957.'
}
if __name__ == '__main__':

    # 用于保存电影的详情信息
    movie_df = pd.DataFrame(data=None, columns=["id",
                                                "title",
                                                "rate",
                                                "director",
                                                "author",
                                                "actor",
                                                "types",
                                                "country",
                                                "lang",
                                                "release_date",
                                                "runtime"])

    # 同于保存评价内容的df
    comment_df = pd.DataFrame(data=None, columns=[
        "id",
        "comment_user",
        "comment_score",
        "comment_time",
        "comment_location",
        "comment_content",
    ])

    # 发起请求获取电影列表
    response = requests.get(top_url, headers=headers)

    json_obj = json.loads(response.text)

    for subject in json_obj["subjects"]:

        time.sleep(2)
        print('正在爬取')

        # 总的评分
        rate = subject["rate"]
        # 标题
        title = subject["title"]
        url = subject["url"]
        # 编号
        id = subject["id"]

        # 发起请求获取电影的详情
        desc_response = requests.get(url, headers=headers)

        # 使用xpath解析
        html = etree.HTML(desc_response.text)
        # 导演
        director = html.xpath("//div[@id='info']/span[1]/span[2]/a/text()")
        # 编制
        author = html.xpath("//div[@id='info']/span[2]/span[2]/a/text()")
        # 主演
        actor = html.xpath("//div[@id='info']/span[3]/span[2]/a/text()")
        # 类型
        types = html.xpath("//div[@id='info']/span[@property='v:genre']/text()")

        # 取出国家语言
        lan_list = html.xpath("//div[@id='info']/text()")

        # 清洗数据
        # 在列表推导式的后面增加if ,用于过滤数据
        lan_list = [line.strip() for line in lan_list if line.strip() != "" and line.strip() != "/"]

        # 国家地区
        country = lan_list[0]

        # 语言
        lang = lan_list[1]

        # 上映时间
        release_date = html.xpath("//div[@id='info']/span[@property='v:initialReleaseDate']/text()")
        # 时长
        runtime = html.xpath("//div[@id='info']/span[@property='v:runtime']/text()")[0]

        # 将电影详情保存到df中
        movie_df.loc[len(movie_df)] = [
            id,
            title,
            rate,
            "|".join(director),
            "|".join(author),
            "|".join(actor),
            "|".join(types),
            country,
            lang,
            "|".join(release_date),
            runtime
        ]

        ##################获取电影评价#####################
        comment_url = "https://movie.douban.com/subject/{}/comments?sort=new_score&status=P"
        # 发起请求获取评价
        comment_rep = requests.get(comment_url.format(id), headers=headers)

        comment_html = etree.HTML(comment_rep.text)

        comments = comment_html.xpath("//div[@class='comment-item ']")

        for comment in comments:
            # 用户
            comment_user = comment.xpath("./div[@class='comment']/h3/span[@class='comment-info']/a/text()")[0]

            # 时间
            comment_time = \
                comment.xpath(
                    "./div[@class='comment']/h3/span[@class='comment-info']/span[@class='comment-time ']/text()")[
                    0].strip()

            # 地区
            comment_location = comment.xpath(
                "./div[@class='comment']/h3/span[@class='comment-info']/span[@class='comment-location']/text()")

            # 内容
            comment_content = comment.xpath("./div[@class='comment']/p[@class=' comment-content']/span/text()")[0]

            # 去掉换行符
            comment_content = comment_content.replace("\n", "。")

            # score
            comment_score = comment.xpath("./div[@class='comment']/h3/span[@class='comment-info']/span[2]/@class")[0]

            comment_score = re.search("\d", comment_score)

            if comment_score != None:
                # 取出评分
                comment_score = comment_score.group()
            else:
                comment_score = "0"

            # 将评价保存到df
            comment_df.loc[len(comment_df)] = [
                id,
                comment_user,
                comment_score,
                comment_time,
                "|".join(comment_location),
                comment_content
            ]

    movie_df.to_csv("../data/movie.txt", sep="\t", header=False, index=False)
    comment_df.to_csv("../data/comment.txt", sep="\t", header=False, index=False)
