import scrapy
import json
from urllib.parse import urlencode
from pathlib import Path
from ..items import KnowledgeItem


class KnowledgeInfoSpider(scrapy.Spider):
    name = 'knowledge'
    allowed_domains = ['api.bilibili.com']
    custom_settings = {
        'FEED_FORMAT':'json',
        'DOWNLOAD_DELAY': 3,  # 固定3秒间隔
        'CONCURRENT_REQUESTS': 1,  # 单线程运行
        'AUTOTHROTTLE_ENABLED': True,
        'DEFAULT_REQUEST_HEADERS': {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Referer': 'https://www.bilibili.com/'
        }
    }

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.target_count = 150  # 目标获取150条数据
        self.collected_count = 0  # 已收集计数器
        self.collected_items=[]

        # 设置输出路径 (项目根目录/data_analysis_flask/templates/game.json)
        project_root = Path(__file__).resolve().parent.parent.parent.parent
        self.output_path = project_root / 'data_analysis_flask' / 'static' / 'knowledge.json'

    def start_requests(self):
        """初始请求配置"""
        api_params = {
            'rid': 36,  # 知识区ID
            'ps': 15,  # 每页爬15个视频
            'pn': 1  # 起始页码
        }
        api_url = 'https://api.bilibili.com/x/web-interface/dynamic/region?' + urlencode(api_params)
        yield scrapy.Request(api_url, callback=self.parse_api)


    def parse_api(self, response):
        """解析API响应并控制采集数量"""
        data = json.loads(response.text)
        if data.get('code') == 0 and self.collected_count < self.target_count:
            # 处理当前页视频数据
            for video in data['data']['archives']:
                if self.collected_count >= self.target_count:
                    break  # 达到目标数量则停止

                item = KnowledgeItem()

                # 基础信息 (修改点1)
                item['bvid'] = video.get('bvid', '')
                item['title'] = video.get('title', '')
                item['up_name'] = video.get('owner', {}).get('name', '')  # 修改：owner.name 不是 up_name

                # 统计信息 (修改点2)
                stat = video.get('stat', {})
                item['views'] = stat.get('view', 0)  # 修改：view 不是 views
                item['likes'] = stat.get('like', 0)  # 修改：like 不是 likes
                item['coins'] = stat.get('coin', 0)  # 修改：coin 不是 coins
                item['stars'] = stat.get('favorite', 0)  # 修改：favorite 不是 stars
                item['shares'] = stat.get('share', 0)  # 修改：share 不是 shares
                item['bullet_comments'] = stat.get('danmaku', 0)  # 修改：danmaku 不是 bullet_comments

                self.collected_items.append(item)  # 新增收
                self.collected_count += 1
                yield item

            # 继续翻页（严格3秒间隔由DOWNLOAD_DELAY保证）
            if self.collected_count < self.target_count:
                current_page = int(response.url.split('pn=')[-1].split('&')[0])
                next_page = current_page + 1
                next_url = response.url.replace(f'pn={current_page}', f'pn={next_page}')
                yield scrapy.Request(next_url, callback=self.parse_api)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider =super(KnowledgeInfoSpider,cls).from_crawler(crawler, *args, **kwargs)
        crawler.settings.set('FEED_URI',str(spider.output_path))
        return spider
