import scrapy
import json
from urllib.parse import urlencode
from pathlib import Path
from ..items import BilibiliGameItem

class BiliBiliGameSpider(scrapy.Spider):
    # 爬虫基础配置
    name = 'game'
    allowed_domains = ['api.bilibili.com']

    # 常量定义
    TOTAL_ITEMS = 100
    PER_PAGE = 15
    OUTPUT_PATH = Path(__file__).resolve().parents[3] / 'data_analysis_flask' / 'static' / 'game.json'

    # Scrapy框架设置
    custom_settings = {
        'CONCURRENT_REQUESTS': 8,
        'DOWNLOAD_DELAY': 0.5,
        'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
        'AUTOTHROTTLE_ENABLED': True,
        'FEED_FORMAT': 'json',
        'FEED_URI': str(OUTPUT_PATH)  # 指定输出路径
    }

    # API基础参数
    BASE_PARAMS = {
        "display_id": "2",
        "request_cnt": str(PER_PAGE),  # 使用常量值
        "from_region": "1008",
        "device": "web",
        "plat": "30",
        "web_location": "333.40138"
    }

    # 请求头配置
    HEADERS = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Referer': 'https://www.bilibili.com/'
    }

    def start_requests(self):
        """起始请求入口"""
        yield self._build_api_request(offset=0)

    def _build_api_request(self, offset):
        """构建列表页API请求"""
        params = {**self.BASE_PARAMS, "offset": str(offset)}
        url = "https://api.bilibili.com/x/web-interface/region/feed/rcmd?" + urlencode(params)

        return scrapy.Request(
            url=url,
            headers=self.HEADERS,
            callback=self._parse_video_list,
            meta={'offset': offset}
        )

    def _parse_video_list(self, response):
        """解析视频列表响应"""
        try:
            data = json.loads(response.text)
            if data.get('code') != 0:
                self.logger.error(f'API错误: {data.get("message")}')
                return

            current_offset = response.meta['offset']
            videos = data.get('data', {}).get('archives', [])

            # 处理当前页视频
            for video in videos:
                if bvid := video.get('bvid'):
                    yield self._request_video_detail(bvid)

            # 分页逻辑
            next_offset = current_offset + len(videos)
            if (next_offset < self.TOTAL_ITEMS) and (len(videos) == self.PER_PAGE):
                yield self._build_api_request(offset=next_offset)

        except Exception as e:
            self.logger.error(f'列表解析失败: {str(e)}')

    def _request_video_detail(self, bvid):
        """构建视频详情请求"""
        url = f"https://api.bilibili.com/x/web-interface/view?bvid={bvid}"
        return scrapy.Request(
            url=url,
            headers=self.HEADERS,
            callback=self._parse_video_detail
        )

    def _parse_video_detail(self, response):
        """解析视频详情数据"""
        try:
            data = json.loads(response.text)
            if data.get('code') != 0:
                self.logger.error(f'详情API错误: {data.get("message")}')
                return

            video = data.get('data', {})
            stat = video.get('stat', {})

            # 使用字典解包创建Item
            yield BilibiliGameItem(
                title=video.get('title', '').strip(),
                up_name=video.get('owner', {}).get('name', '未知UP主'),
                video_url=f"https://www.bilibili.com/video/{video.get('bvid', '')}",
                views=str(stat.get('view', 0)),
                danmu=str(stat.get('danmaku', 0)),
                like=str(stat.get('like', 0)),
                favorite=str(stat.get('favorite', 0)),
                coin=str(stat.get('coin', 0)),
                share=str(stat.get('share', 0))
            )

        except Exception as e:
            self.logger.error(f'详情解析失败: {str(e)}')
