import csv
import time

from selectolax.parser import HTMLParser
import requests
from bs4 import BeautifulSoup

# 读取meadia_id的文件路径
path = 'index_info.csv'
# 保存电影详细信息的文件
sava_path = 'mv_info.csv'
# 页面请求失败的队列
failList = []
# 请求错误的media_id保存的路径
fail_mdid_path = 'fail_mdid.txt'


def save_false_mdid(list: list):
    with open(fail_mdid_path, mode='a', encoding='utf-8') as f:
        f.write(str(list) + '\n')


def deal_fail(list):
    li = list
    if len(list) == 0:
        return
    for item in list:
        list = get_pageinfo(item, "")
        if list > 0:
            sava_list(list)
            li.remove(item)
        else:
            li.append(item)
    deal_fail(li)


def sava_list(list: list):
    with open(sava_path, mode='a', encoding='utf-8', newline="") as file:
        write = csv.writer(file)
        write.writerow(list)
        print(f"保存{list[0]}成功！")


def get_pageinfo(media_id, subTitle) -> list:
    """
    抓取页面信息返回一个列表
    :param media_id: media_id
    :param subTitle: 子标题
    :param score: 评分
    :return: 标题，子标题，标签，总播放，追剧人数，弹幕数，长评数，短评数，上映时间，播放时长，评分，评分人数
    """
    url = f"https://www.bilibili.com/bangumi/media/md{media_id}"
    data = {
        "User-Agent":
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36 Edg/118.0.2088.57",
        "Referer": "https://www.bilibili.com",
    }
    resp = requests.get(url, headers=data)
    resp.encoding = resp.apparent_encoding
    html = resp.text
    if resp.status_code != 200:
        print(f"{media_id} 页面访问失败")
        return []

    # 解析网页内容
    tree = HTMLParser(html)

    # 获取标题
    title_node = tree.css("span[class*='media-info-title-t']")[0]
    title = title_node.text(strip=True)
    # print(title) # 让子弹飞

    # 获取标签列表
    labels = []
    for item in tree.css('span[class="media-tag"]'):
        labels.append(item.text(strip=True))
    # print(labels)# ['剧情', '喜剧', '动作']
    label = ",".join(labels)

    # 获取电影数据
    info_list = tree.css("span[class*='media-info-count']")
    # 播放次数、追剧人数、弹幕总数
    order = info_list[0].css("em")[0].text(strip=True)
    order_num = info_list[1].css("em")[0].text(strip=True)
    comment_num = info_list[2].css("em")[0].text(strip=True)
    # print(order, order_num, comment_num)# 1.5亿 193万 106.8万

    # 获取评论数
    page = BeautifulSoup(html, "html.parser")
    res1 = str(page.find("div", class_="media-tab-nav"))
    long_cmt = short_cmt = ""
    cmt1 = HTMLParser(res1).css("li")[1].text(strip=True).replace(" ", "")
    if cmt1.__contains__("("):
        long_cmt = cmt1.split("(")[1].split(")")[0]
    cmt2 = HTMLParser(res1).css("li")[2].text(strip=True).replace(" ", "")
    if cmt2.__contains__("("):
        short_cmt = cmt2.split("(")[1].split(")")[0]
    # long_cmt = cmt1.split("(")[1].split(")")[0]
    # short_cmt = cmt2.split("(")[1].split(")")[0]
    # print(long_cmt, short_cmt)# 106 30418

    # 获取上映时间
    res2 = page.find("div", class_="media-info-time")
    index_show = HTMLParser(str(res2)).css("span")[0].text(strip=True)
    # 播放时长
    show_time = HTMLParser(str(res2)).css("span")[1].text(strip=True)
    # print(index_show, show_time)

    # 获取评分、人数
    score = score_num = ""
    for node in tree.css("div[class*='media-info-score']"):
        text = node.text(strip=True)
        if len(text) > 0:
            score = text
    for node in tree.css("div[class*='media-info-review-times']"):
        score_num = node.text(strip=True)
    # print(score_num, res3.text)# 139521 139521人评
    # print(score) # 9.9

    return [title, subTitle, label, order, order_num, comment_num, long_cmt, short_cmt, index_show, show_time, score,
            score_num]


with open(path, mode='r', encoding='utf-8') as f:
    csv_read = csv.reader(f)
    for row in csv_read:
        # 获取media_id
        media_id = row[1]
        title = row[3]
        subTitle = row[4]
        print(media_id, title, subTitle)
        # 获取页面信息并保存
        try:
            list = get_pageinfo(media_id, subTitle)
            if len(list) > 0:
                sava_list(list)
            else:
                failList.append(media_id)
        except Exception as e:
            # 处理保存异常
            save_false_mdid([media_id, title, subTitle])
            print(e)

        time.sleep(0.5)

# 处理失败队列
print("失败的页面id：", failList)
deal_fail(failList)
