import scrapy
from ..items import BilibiliShortplayItem
import json
import time
import random
from pathlib import Path

class BiliShortPlaySpider(scrapy.Spider):
    name = "shortplay"
    allowed_domains = ["api.bilibili.com"]
    total_wanted = 150
    total_got = 0
    item_count = 0  # 实际抓取成功的 item 数量
    visited_bvids = set()

    output_path = Path(__file__).resolve().parent.parent.parent.parent / 'data_analysis_flask' / 'static' / 'shortplay.json'
    custom_settings = {
        'CONCURRENT_REQUESTS': 8,
        'DOWNLOAD_DELAY': 0.5,
        'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
        'AUTOTHROTTLE_ENABLED': True,
        'AUTOTHROTTLE_START_DELAY': 1.0,
        'AUTOTHROTTLE_MAX_DELAY': 3.0,
        'RETRY_ENABLED': True,
        'RETRY_TIMES': 5,
        'LOG_LEVEL': 'INFO',
        'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429], # 重试状态码
        'FEED_FORMAT':'json',
        'FEED_URI': str(output_path)
        # 指定输出路径
    }

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    def start_requests(self):
        base_url = "https://api.bilibili.com/x/web-interface/region/feed/rcmd"
        headers = {
            "User-Agent": "Mozilla/5.0",
            "Referer": "https://www.bilibili.com/",
            "Origin": "https://www.bilibili.com",
        }

        # 尽可能多发一些请求，确保能凑够 150 个
        for _ in range(30):  # 不再只限制10页，动态尝试
            t = int(time.time()) + random.randint(1, 1000)
            url = f"{base_url}?display_id=1&request_cnt=15&from_region=1021&device=web&plat=30&web_location=333.40138&wts={t}"
            yield scrapy.Request(url=url, headers=headers, callback=self.parse_list)

    def parse_list(self, response):
        try:
            json_data = json.loads(response.text)

            # 检查API返回状态码
            if json_data.get("code") != 0:
                self.logger.warning(f"API返回错误: code={json_data.get('code')}, message={json_data.get('message')}")
                return

            # 兼容data为list或dict的情况
            data_content = json_data.get("data")
            if isinstance(data_content, list):  # 如果data是列表
                videos = data_content
            elif isinstance(data_content, dict):  # 如果data是字典
                videos = data_content.get("archives", [])
            else:
                self.logger.warning(f"未知的data格式: {type(data_content)}")
                return

            if not videos:
                self.logger.warning("视频列表为空")
                return

            # 处理视频数据
            for video in videos:
                if self.total_got >= self.total_wanted:
                    break
                bvid = video.get("bvid")
                if bvid and bvid not in self.visited_bvids:
                    self.visited_bvids.add(bvid)
                    detail_url = f"https://api.bilibili.com/x/web-interface/view?bvid={bvid}"
                    yield scrapy.Request(
                        url=detail_url,
                        headers=response.request.headers,
                        callback=self.parse_detail,
                        meta={"retry_times": 0}  # 添加重试计数
                    )

        except Exception as e:
            self.logger.error(f"解析异常: {str(e)}, URL={response.url}")

    def parse_detail(self, response):
        if self.total_got >= self.total_wanted:
            return  # 达到目标数，停止爬取

        try:
            data = response.json().get("data", {})
            if not data:
                self.logger.warning(f"详情数据为空: {response.url}")
                return  # 如果没有数据，跳过
        except json.JSONDecodeError as e:
            self.logger.error(f"解析详情失败: {response.url}, 错误: {str(e)}")
            return  # 出现错误时，返回以避免进一步处理

        item = BilibiliShortplayItem()
        item["title"] = data.get("title", "未知")
        stat = data.get("stat", {})
        item["views"] = stat.get("view", 0)
        item["bullet_comments"] = stat.get("danmaku", 0)
        item["likes"] = stat.get("like", 0)
        item["coins"] = stat.get("coin", 0)
        item["stars"] = stat.get("favorite", 0)
        item["shares"] = stat.get("share", 0)

        self.total_got += 1
        yield item

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(BiliShortPlaySpider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.settings.set('FEED_URI', str(spider.output_path))
        return spider
