import scrapy
import json
import time
import random
from urllib.parse import urlencode
from ..items import BilibiliDance
from pathlib import Path

class BiliBiliDanceSpider(scrapy.Spider):
    name = 'dance'
    allowed_domains = ['api.bilibili.com']

    OUTPUT_PATH = Path(__file__).resolve().parents[3] / 'data_analysis_flask' / 'static' / 'dance.json'

    # 自动设置输出文件
    custom_settings = {
        'CONCURRENT_REQUESTS': 8,
        'DOWNLOAD_DELAY': 0.5,
        'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
        'AUTOTHROTTLE_ENABLED': True,
        'AUTOTHROTTLE_START_DELAY': 1.0,
        'AUTOTHROTTLE_MAX_DELAY': 3.0,
        'RETRY_TIMES': 3,
        'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
        'FEED_URI': str(OUTPUT_PATH) ,  # 自动输出到dance.json
        'FEED_FORMAT': 'json',
        'FEED_EXPORT_ENCODING': 'utf-8',
    }

    # 内置默认Cookie（需定期更新）
    DEFAULT_COOKIE = (
        "buvid3=D5BAF880-F954-DF4D-E65F-05DB9DDD476E40078infoc; "
        "b_nut=1729831440; "
        "_uuid=656E294A-A310B-BE7E-AF5B-9F767311224140806infoc; "
        "SESSDATA=d9280b46%2C1766802365%2C47876%2A62CjAGROBdeowSpuEJYUU79rOSNe5WlCP3DG35XPw_DYIWHeQczYc2h3Fzpzc1x_D8UtMSVkxkZ0dONVBSQnB0WWctTFRmeVBnYm9ScjdaUklPeXhoQ2NVc25TQldXNVpGcF9GVmdmNkJsbXNmZFNWYUw4ZDJVanFhLTZsRmczSVdaUl9wcW1vV3RRIIEC; "
        "bili_jct=8f1778a2cccf3ad9339a892b0ed3227a; "
        "sid=8p7pmz5u; "
        "DedeUserID=650507091; "
        "DedeUserID__ckMd5=ea18f7a57d3c38d0"
    )

    USER_AGENTS = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        # 其他UA保持不变...
    ]

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.total_items = 150
        self.per_page = 15
        self.base_params = {
            "rid": "129",
            "pn": "1",
            "ps": str(self.per_page),
            "type": "0",
            "web_location": "1550101",
            "wts": str(int(time.time())),
        }

        # 使用内置Cookie
        self.headers = {
            'Referer': 'https://www.bilibili.com/',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Origin': 'https://www.bilibili.com',
            'Connection': 'keep-alive',
            'Cookie': self.DEFAULT_COOKIE  # 直接使用内置Cookie
        }

        self.proxy_enabled = kwargs.get('proxy', False)
        self.proxy_list = [
            "http://proxy1.example.com:8080",
            "http://proxy2.example.com:8080"
        ] if self.proxy_enabled else []

    def start_requests(self):
        self.logger.info("使用内置Cookie启动爬虫，输出文件: dance.json")
        self.logger.info(f"目标爬取数量: {self.total_items}条")
        yield self.build_request(page=1)

    def build_request(self, page):
        # 每次请求动态生成随机UA
        headers = self.headers.copy()
        headers['User-Agent'] = random.choice(self.USER_AGENTS)

        params = self.base_params.copy()
        params['pn'] = str(page)
        params['wts'] = str(int(time.time()))
        params['version'] = "1"
        params['ts'] = str(int(time.time()))

        url = "https://api.bilibili.com/x/web-interface/dynamic/region?" + urlencode(params)

        request = scrapy.Request(
            url=url,
            headers=headers,  # 使用动态UA
            callback=self.parse_api,
            meta={'page': page},
            errback=self.handle_error
        )

        # 如果启用了代理，随机选择一个代理
        if self.proxy_enabled and self.proxy_list:
            request.meta['proxy'] = random.choice(self.proxy_list)

        return request

    def build_detail_request(self, bvid, item):
        """构建视频详情API请求"""
        # 每次请求动态生成随机UA
        headers = self.headers.copy()
        headers['User-Agent'] = random.choice(self.USER_AGENTS)

        detail_url = f"https://api.bilibili.com/x/web-interface/view?bvid={bvid}"

        request = scrapy.Request(
            url=detail_url,
            headers=headers,  # 使用动态UA
            callback=self.parse_video_detail,
            meta={'item': item},
            errback=self.handle_detail_error
        )

        # 如果启用了代理，随机选择一个代理
        if self.proxy_enabled and self.proxy_list:
            request.meta['proxy'] = random.choice(self.proxy_list)

        return request

    def parse_api(self, response):
        try:
            data = json.loads(response.text)
            if data.get('code') != 0:
                msg = data.get('message', '未知错误')
                self.logger.error(f'API错误 {data.get("code")}: {msg}')
                if data.get('code') == -412:
                    self.logger.warning("触发反爬机制，建议添加Cookie或使用代理")
                return

            current_page = response.meta['page']
            items = data.get('data', {}).get('archives', [])

            if not items:
                self.logger.warning(f"第 {current_page} 页未获取到数据")
                return

            for video in items:
                item = BilibiliDance()
                item['title'] = video.get('title', '').strip()
                item['views'] = str(video.get('stat', {}).get('view', 0))
                item['bullet_comments'] = str(video.get('stat', {}).get('danmaku', 0))
                item['likes'] = str(video.get('stat', {}).get('like', 0))
                item['up_name'] = video.get('owner', {}).get('name', video.get('author', {}).get('name', '未知UP主'))
                item['video_url'] = f"https://www.bilibili.com/video/{video.get('bvid', '')}"
                item['pubdate'] = video.get('pubdate', 0)

                # 获取视频bvid用于详情请求
                bvid = video.get('bvid')
                if bvid:
                    yield self.build_detail_request(bvid, item)
                else:
                    yield item

            # 计算已爬取数量
            crawled_count = current_page * self.per_page
            self.logger.info(f'已爬取 {min(crawled_count, self.total_items)}/{self.total_items} 条数据')

            # 判断是否需要继续请求
            if crawled_count < self.total_items and len(items) == self.per_page:
                yield self.build_request(page=current_page + 1)

        except json.JSONDecodeError:
            self.logger.error(f'JSON解析失败: {response.text[:200]}')
        except Exception as e:
            self.logger.error(f'解析失败: {str(e)}', exc_info=True)

    def parse_video_detail(self, response):
        """解析视频详情API数据"""
        try:
            item = response.meta['item']
            data = json.loads(response.text)

            if data.get('code') != 0:
                self.logger.error(f"详情API错误: {data.get('message')}")
                yield item
                return

            detail_data = data.get('data', {})
            stat_data = detail_data.get('stat', {})

            # 添加收藏数、分享数和投币数
            item['stars'] = str(stat_data.get('favorite', 0))  # 收藏数
            item['shares'] = str(stat_data.get('share', 0))  # 分享数
            item['coins'] = str(stat_data.get('coin', 0))  # 投币数

            # 添加其他统计信息
            item['replies'] = str(stat_data.get('reply', 0))  # 评论数

            # 添加视频描述
            item['description'] = detail_data.get('desc', '')

            # 添加视频时长
            item['duration'] = self.format_duration(detail_data.get('duration', 0))

            # 添加视频标签
            tags = [tag['tag_name'] for tag in detail_data.get('tags', [])]
            item['tags'] = ', '.join(tags) if tags else ''

            # 添加发布时间（可读格式）
            item['pubdate_str'] = time.strftime(
                "%Y-%m-%d %H:%M:%S",
                time.localtime(detail_data.get('pubdate', 0))
            ) if detail_data.get('pubdate') else ''

            yield item

        except json.JSONDecodeError:
            self.logger.error(f'详情JSON解析失败: {response.text[:200]}')
            yield response.meta['item']
        except Exception as e:
            self.logger.error(f'详情解析失败: {str(e)}', exc_info=True)
            yield response.meta['item']

    def format_duration(self, seconds):
        """将秒数转换为 HH:MM:SS 格式"""
        seconds = int(seconds)
        hours, remainder = divmod(seconds, 3600)
        minutes, seconds = divmod(remainder, 60)
        return f"{hours:02d}:{minutes:02d}:{seconds:02d}" if hours else f"{minutes:02d}:{seconds:02d}"

    def handle_error(self, failure):
        self.logger.error(f"列表页请求失败: {failure.value}")
        if failure.check(scrapy.exceptions.IgnoreRequest):
            self.logger.warning("请求被忽略，可能是重复URL")
        elif failure.check(scrapy.exceptions.TwistedWebError):
            self.logger.warning("网络连接错误，建议检查代理设置")

    def handle_detail_error(self, failure):
        self.logger.error(f"详情页请求失败: {failure.value}")
        if 'item' in failure.request.meta:
            yield failure.request.meta['item']


