import scrapy
import json
from urllib.parse import urlencode
import os
from pathlib import Path
from ..items import BilibiliMovieItem


class BiliBiliMovieSpider(scrapy.Spider):
    name = "movie"
    allowed_domains = ["api.bilibili.com", "bilibili.com"]

    custom_settings = {
        "CONCURRENT_REQUESTS": 8,
        "DOWNLOAD_DELAY": 0.5,
        "CONCURRENT_REQUESTS_PER_DOMAIN": 8,
        "AUTOTHROTTLE_ENABLED": True,
        "AUTOTHROTTLE_START_DELAY": 0.5,
        "AUTOTHROTTLE_MAX_DELAY": 0.6,
        "FEED_FORMAT": "json",
        "FEED_EXPORT_ENCODING": "utf-8",  # 移除 `-sig`，避免可能的 BOM 问题
        "FEED_EXPORT_FIELDS": [
            "title",
            "bvid",
            "views",
            "danmu",
            "like",
            "coin",
            "favorite",
            "share",
            "video_url",
        ],
        "ITEM_PIPELINES": {
            "bilibili.pipelines.BilibiliPipeline": 300,
        },
    }

    rank_url = "https://api.bilibili.com/pgc/season/rank/web/list?day=3&season_type=2&web_location=333.934"
    ep_list_api = "https://api.bilibili.com/pgc/view/web/ep/list?season_id={}"
    episode_info_api = "https://api.bilibili.com/pgc/season/episode/web/info?ep_id={}"

    def __init__(self, output_path=None, *args, **kwargs):
        super(BiliBiliMovieSpider, self).__init__(*args, **kwargs)
        project_root = Path(__file__).resolve().parent.parent.parent.parent
        default_path = (
            project_root / "data_analysis_flask" / "static" / "movie.json"
        )
        self.output_path = Path(output_path) if output_path else default_path
        os.makedirs(self.output_path.parent, exist_ok=True)

        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                          "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
            "Referer": "https://www.bilibili.com/",
            "Accept-Encoding": "gzip, deflate",  # 禁用 Brotli
        }

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(BiliBiliMovieSpider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.settings.set("FEED_URI", str(spider.output_path), priority="cmdline")
        return spider

    def start_requests(self):
        yield scrapy.Request(
            url=self.rank_url,
            headers=self.headers,
            callback=self.parse_rank_list,
        )

    def parse_rank_list(self, response):
        # 清理可能的 `ZMNBSF` 前缀
        cleaned_text = response.text.lstrip('\ufeff').replace('ZMNBSF', '')
        try:
            data = json.loads(cleaned_text)
        except json.JSONDecodeError as e:
            self.logger.error(f"JSON 解析错误: {e}")
            return

        if data.get("code") != 0:
            self.logger.error(f"排行榜API错误: {data.get('message')}")
            return

        rank_list = data.get("data", {}).get("list", [])
        self.logger.info(f"排行榜共 {len(rank_list)} 条")

        for idx, video in enumerate(rank_list, 1):
            season_id = video.get("season_id")
            season_title = video.get("title", "")
            if not season_id:
                self.logger.warning(f"第{idx}条无 season_id，跳过。标题: {season_title}")
                continue

            yield scrapy.Request(
                url=self.ep_list_api.format(season_id),
                headers=self.headers,
                callback=self.parse_episode_list,
                meta={"season_id": season_id, "season_title": season_title},
            )

    def parse_episode_list(self, response):
        # 清理可能的 `ZMNBSF` 前缀
        cleaned_text = response.text.lstrip('\ufeff').replace('ZMNBSF', '')
        try:
            data = json.loads(cleaned_text)
        except json.JSONDecodeError as e:
            self.logger.error(f"JSON 解析错误: {e}")
            return

        if data.get("code") != 0:
            self.logger.error(f"EP列表API错误: {data.get('message')}")
            return

        season_id = response.meta["season_id"]
        season_title = response.meta["season_title"]
        episodes = data.get("result", {}).get("episodes", [])
        self.logger.info(f"season_id={season_id} 有 {len(episodes)} 集")

        # 筛选原版：含“原版”的优先，否则第一集
        chosen = None
        for ep in episodes:
            if "原版" in ep.get("title", "") or "原版" in ep.get("show_title", ""):
                chosen = ep
                break
        if not chosen and episodes:
            chosen = episodes[0]

        if not chosen:
            self.logger.warning(f"season_id={season_id} 集数为0，跳过")
            return

        ep_id = chosen.get("ep_id") or chosen.get("id")
        bvid = chosen.get("bvid", "")
        self.logger.info(f"season_id={season_id} 选中 ep_id={ep_id}，bvid={bvid}")

        yield scrapy.Request(
            url=self.episode_info_api.format(ep_id),
            headers=self.headers,
            callback=self.parse_episode_detail,
            meta={
                "season_title": season_title,
                "bvid": bvid,
                "ep_id": ep_id,
            },
        )

    def parse_episode_detail(self, response):
        # 清理可能的 `ZMNBSF` 前缀
        cleaned_text = response.text.lstrip('\ufeff').replace('ZMNBSF', '')
        try:
            data = json.loads(cleaned_text)
        except json.JSONDecodeError as e:
            self.logger.error(f"JSON 解析错误: {e}")
            return

        if data.get("code") != 0:
            self.logger.error(f"详情API错误: {data.get('message')}")
            return

        d = data.get("data", {})
        stat = d.get("stat", {})

        item = BilibiliMovieItem()
        item["title"] = response.meta["season_title"]
        item["bvid"] = response.meta.get("bvid", "")
        item["views"] = stat.get("view", 0)
        item["danmu"] = stat.get("dm", 0)
        item["like"] = stat.get("like", 0)
        item["coin"] = stat.get("coin", 0)
        item["favorite"] = stat.get("favorite", 0)
        item["share"] = stat.get("share", 0)
        item["video_url"] = (
            f"https://www.bilibili.com/bangumi/play/ep{response.meta['ep_id']}"
        )

        self.logger.info(f"完成抓取: {item['title']}")
        yield item

    def closed(self, reason):
        """爬虫结束时检查 JSON 文件，清理可能的 `ZMNBSF`"""
        if self.output_path.exists():
            with open(self.output_path, 'r', encoding='utf-8') as f:
                content = f.read()
            # 移除所有 `ZMNBSF` 字符串
            cleaned_content = content.replace('ZMNBSF', '')
            with open(self.output_path, 'w', encoding='utf-8') as f:
                f.write(cleaned_content)
            self.logger.info(f"已清理 JSON 文件中的 `ZMNBSF`")
