import csv
import time

import requests
from bs4 import BeautifulSoup
from Fan import Fan
from selectolax.parser import HTMLParser

source_file = "page.csv"
fail_file = "fail1.csv"
result_file = "result1.csv"


def get_info(fan_ju: Fan) -> bool:
    flag = False
    ep_id = fan_ju.ep_id
    season_id = fan_ju.season_id
    page_url = fan_ju.link

    if ep_id is None or season_id is None:
        return flag

    # 爬取年份、标签
    fan_ju.label, fan_ju.year = get_year_label(page_url)

    info_url = f"https://api.bilibili.com/pgc/season/episode/web/info?ep_id={ep_id}"
    stat_url = f"https://api.bilibili.com/pgc/web/season/stat?season_id={season_id}"
    headers = {
        "Origin":
            "https://www.bilibili.com",
        "User-Agent":
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
    }

    # 拿到info请求信息
    resp = requests.get(info_url, headers=headers)
    resp.encoding = resp.apparent_encoding
    json1 = resp.json()
    fan_ju.like = json1['data']['stat']['like']  # 点赞
    fan_ju.coin = json1['data']['stat']['coin']  # 投币数
    fan_ju.favorite = json1['data']['stat']['favorite']  # 收藏
    fan_ju.share = json1['data']['stat']['share']  # 分享
    fan_ju.reply = json1['data']['stat']['reply']  # 评论数

    # 拿到stat请求信息
    resp = requests.get(stat_url, headers=headers)
    resp.encoding = resp.apparent_encoding
    json2 = resp.json()
    fan_ju.views = json2['result']['views']  # 总播放数
    fan_ju.danmakus = json2['result']['danmakus']  # 弹幕数
    fan_ju.series_follow = json2['result']['series_follow']  # 系列追番人数

    flag = True
    # print(fan_ju.to_string())

    return flag


def get_year_label(url: str) -> tuple[str, str]:
    """
    访问页面拿到年份、label
    :param url: 页面url
    :return: 含有年份、label的列表
    """
    label = year = ""
    headers = {
        "Origin":
            "https://www.bilibili.com",
        "User-Agent":
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
    }
    resp = requests.get(url, headers=headers)
    resp.encoding = resp.apparent_encoding
    html = resp.text
    # bs4解析
    page = BeautifulSoup(html, "html.parser")
    div = str(page.findAll("div", class_="mediainfo_mediaDesc__jjRiB")[1])
    # 通过css选择器提取
    tree = HTMLParser(div)
    nodes = tree.css('span')
    label = nodes[0].text().replace("·", "").replace(" ", "")
    year = nodes[1].text().replace("·", "").replace(" ", "")

    return label, year


"""
<div class="mediainfo_mediaDesc__jjRiB"><span>漫画改 / 奇幻 / 热血 / 战斗<!-- --> ·
 </span><span>2023<!-- --> · </span><span>已完结, 全13话<!-- --> · </span><span><span>
 <a class="mediainfo_avLink__iyzyV" href="//www.bilibili.com/video/BV1qh4y1d74E/" 
 rel="noreferrer" target="_blank">BV1qh4y1d74E</a></span></span></div>
"""


def start_read_page(file: str):
    # 创建番剧对象
    fan = Fan()
    with open(file, mode='r', encoding='utf-8', newline="") as file:
        csv_read = csv.reader(file)
        count = 1
        success = 0
        fail = 0
        # 临时存储番剧信息
        my_list = []
        for row in csv_read:
            fan.media_id = row[0]
            fan.ep_id = row[1]
            # 类型：独家1 会员专享0
            fan.badge_type = row[3]
            # 全13话
            fan.index_show = row[4]
            # 是否完结 1
            fan.is_finish = row[5]
            # 链接
            fan.link = row[6]
            # 评分
            fan.score = row[9]
            # season_id
            fan.season_id = row[10]
            # 子标题
            fan.subTitle = row[13]
            # 标题
            fan.title = row[14]
            try:
                if not get_info(fan):
                    # 失败就记录关键信息写入文件
                    save_fail_info([fan.title, fan.ep_id, fan.link, fan.season_id])
                    fail += 1
                    print(count, fan.title, "失败!!", f"成功：{success} / 失败：{fail}")
                else:
                    my_list.append(fan)
                    success += 1
                    print(count, "成功", f"成功：{success} / 失败：{fail}")
            except Exception as e:
                # 出现异常，记录关键信息写入文件
                save_fail_info([fan.title, fan.ep_id, fan.link, fan.season_id])
                fail += 1
                print(count, fan.title, "失败!!", f"成功：{success} / 失败：{fail}")
            count += 1
            # time.sleep(0.01)
        # 保存所有番剧信息
        save_info(my_list)


def save_info(list: list):
    """
    保存番剧信息到文件
    :param list: 番剧信息实体
    :return:
    """
    with open(result_file, mode='a', encoding="utf-8", newline="") as f:
        csv_write = csv.writer(f)
        for fan in list:
            fan_list = fan.to_list()
            csv_write.writerow(fan_list)


def save_fail_info(fail_list: list):
    """
    访问番剧信息失败，保存失败的信息
    :param fail_list: 失败的番剧信息列表
    :return:
    """
    with open(fail_file, mode='a', encoding="utf-8", newline="") as f:
        csv_write = csv.writer(f)
        csv_write.writerow(fail_list)


if __name__ == '__main__':
    start_read_page(source_file)
