import requests
from lxml import etree
import pandas as pd
import time
import re

# 构建一个空的DF
meive = pd.DataFrame(data=None, columns=[
    "share_id",
    "daoyan",
    "bianju",
    "actor",
    "leixing",
    "diqu",
    "yuyan",
    "youmin",
    "time",
    "runtime",
    "score"
])

comment_df = pd.DataFrame(data=None, columns=[
    "share_id",
    "pjr",
    "rating",
    "comment_time",
    "comment_location",
    "vote_count",
    "comment_content"
])

header = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"
}

# 增加请求头，防止请求被拦截
response = requests.get("https://movie.douban.com/cinema/nowplaying/hefei/", headers=header)

# 获取网页内容
text = response.text
print(text)

# 转换成对象
etree_html = etree.HTML(text)

# 解析数据
a_list = etree_html.xpath("//li[@class='stitle']/a")

for a in a_list:
    name = a.xpath("text()")[0].strip()
    # 获取电影网址
    url = a.xpath('@href')[0]
    print(f"正在爬取电影:{name},{url}")

    # 继续访问电影的详细信息
    r = requests.get(url, headers=header)

    html = etree.HTML(r.text)

    # 影片的编号
    share_id = html.xpath("//a[@share-id]/@share-id")[0]

    # 导演
    daoyan = html.xpath("//div[@id='info']/span[1]/span[@class='attrs']/a/text()")
    # 编剧
    bianju = html.xpath("//div[@id='info']/span[2]/span[@class='attrs']/a/text()")
    # 主演
    actor = html.xpath("//span[@class='actor']/span[@class='attrs']/a/text()")
    # 类型
    leixing = html.xpath("//div[@id='info']/span[@property='v:genre']/text()")

    com = html.xpath("//div[@id='info']/text()")
    # 清洗数据
    com = [i.strip() for i in com]
    com = list(filter(lambda i: i != '' and i != '/', com))

    # 地区
    diqu = com[0]
    # 语言
    yuyan = com[1]

    # 又名
    youmin = com[2]

    # 上映时间
    sy_time = html.xpath("//span[@property='v:initialReleaseDate']/text()")

    # 片长
    runtime = html.xpath("//span[@property='v:runtime']/text()")

    # 评分
    score = html.xpath("//strong[@property='v:average']/text()")
    if len(score) == 0:
        score = 0
    else:
        score = score[0]

    # 将数据保存到df中
    meive.loc[len(meive)] = [
        share_id,
        "|".join(daoyan),
        "|".join(bianju),
        "|".join(actor),
        "|".join(leixing),
        diqu,
        yuyan,
        youmin,
        "|".join(sy_time),
        runtime[0],
        score
    ]

    # 循环爬取评价
    for page in range(10):
        print(f"正在爬取第{page + 1}页评价")
        start = page * 100
        # 爬取评价
        comments_url = f"https://movie.douban.com/subject/{share_id}/comments?start={start}&limit=100&status=P&sort=new_score"

        # 访问页面
        comments_response = requests.get(comments_url, headers=header)

        comments_html = etree.HTML(comments_response.text)

        # 获取一页中所有的评价节点
        comments = comments_html.xpath("//div[@class='comment-item ']")
        # 循环解析每一个评价
        for comment in comments:
            # 评价内容
            comment_content = comment.xpath("./div/p/span/text()")[0].replace("\n", "。").replace("\r", "。").strip()
            # 有用
            vote_count = comment.xpath("./div/h3/span[@class='comment-vote']/span/text()")[0]
            pjr = comment.xpath("./div/h3/span[@class='comment-info']/a/text()")[0]

            rating_str = comment.xpath("./div/h3/span[@class='comment-info']/span/@class")[0]

            rating_search = re.search("\d", rating_str)
            # 评分
            rating = 0
            if rating_search is not None:
                rating = rating_search.group(0)

            # 时间
            comment_time = comment.xpath("./div/h3/span[@class='comment-info']/span[@class='comment-time ']/text()")[
                0].strip()
            # 地区
            comment_location = \
                comment.xpath("./div/h3/span[@class='comment-info']/span[@class='comment-location']/text()")

            comment_df.loc[len(comment_df)] = [
                share_id,
                pjr,
                rating,
                comment_time,
                "".join(comment_location),
                vote_count,
                comment_content
            ]

    # 停一会
    time.sleep(2)

# 阿静数据保存到文件
meive.to_csv("../data/豆瓣电影.txt", sep=",", header=False, index=False)
comment_df.to_csv("../data/豆瓣电影评价.txt", sep=",", header=False, index=False)
