import hashlib
import json
from urllib.parse import urlencode
import scrapy
from lxml import etree
from lxml.etree import XMLSyntaxError
from fake_useragent import UserAgent


class BilibiliDanmukeSpider(scrapy.Spider):
    name = "bilibili_danmuke"
    allowed_domains = ["bilibili.com"]
    start_urls = ["https://bilibili.com"]

    def __init__(self, bvid='BV12udNYqEvL', *args,
                 **kwargs):
        super(BilibiliDanmukeSpider, self).__init__(*args, **kwargs)
        self.bvid = bvid
        self.ua = UserAgent()
        self.headers = {
            'User-Agent': self.ua.random,
            'Referer': f'https://www.bilibili.com/video/{self.bvid}',
            'Cookie': '''buvid3=8A4FF108-AF60-AF88-21BB-38D6E9E6BBD139100infoc; b_nut=1727228439; _uuid=F36637D1-3E64-5446-8352-B9FAE937998239795infoc; buvid4=C231C1D5-0E5E-5DAE-679E-6E6E39C5F62139780-024092501-iwnLJHw%2FMzLsZ5FoW51oLA%3D%3D; enable_web_push=DISABLE; DedeUserID=431059000; DedeUserID__ckMd5=5de52f6d0bddf2e6; rpdid=|(m)mJ~J|J~0J'u~k~kk)l|R; header_theme_version=CLOSE; buvid_fp_plain=undefined; fingerprint=f80fb1b0859eacd0833cd6c441479cbf; buvid_fp=f80fb1b0859eacd0833cd6c441479cbf; CURRENT_QUALITY=80; SESSDATA=6e08edbc%2C1757143968%2Caf8b7%2A31CjAYQkl3WvZ2qrCrvVbq4v7j7_LSTwX056m2INwiyHuknNg24-N_pcXrfwafm4hrYyESVnV5MzZzOWpjOUpIclFaakpvcTltSVNPMFhzb3RRcWpiZnBpMFdReDdwQV9JcUw3N21iazVYZXBwV3NZaWM2ZU1wRFF4ZlRXZUliQXlKVWl5YlFCUUJBIIEC; bili_jct=9b9400b76d3992e2577f4bd58b622f05; sid=8urzzxpc; b_lsid=8D71CBA5_1958395C482; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NDE5Mjg1ODksImlhdCI6MTc0MTY2OTMyOSwicGx0IjotMX0.J1y9NLsmTfxoVq0esmNTUNAKvy_scYWHEy1atEqExaY; bili_ticket_expires=1741928529; enable_feed_channel=ENABLE; bmg_af_switch=1; bmg_src_def_domain=i1.hdslb.com; CURRENT_FNVAL=4048; home_feed_column=4; browser_resolution=966-695; bp_t_offset_431059000=1042955060335607808'''
        }

    def start_requests(self):
        url = 'https://api.bilibili.com/x/web-interface/view'
        params = {'bvid': self.bvid}
        # 将参数拼接到URL（GET请求）
        url_with_params = f"{url}?{urlencode(params)}"
        self.logger.info(f"User-Agent: {self.headers['User-Agent']}")
        yield scrapy.Request(url_with_params, headers=self.headers, callback=self.parse_video,
                             meta={'bvid': self.bvid})

    def parse_video(self, response):
        try:
            data = json.loads(response.text)
            if data.get('code') == 0:
                cid = int(data['data']['cid'])
                vedio_name = data['data']['title']
                self.logger.info("成功获取视频{name}的cid: {cid}".format(name=vedio_name, cid=cid))
                danmaku_url = 'https://comment.bilibili.com/{cid}.xml'.format(cid=cid)
                yield scrapy.Request(danmaku_url, headers=self.headers, callback=self.parse_danmaku,
                                     meta={'bvid': response.meta['bvid']})
            else:
                self.logger.error(f"API请求失败: {data.get('message')}")
        except Exception as e:
            self.logger.error(f"解析响应失败: {str(e)}")

    def parse_danmaku(self, response, *args, **kwargs):
        try:
            bvid = response.meta['bvid']
            html_text = response.text
            try:
                root = etree.fromstring(html_text.encode())
            except XMLSyntaxError as e:
                self.logger.error(f"XML解析失败: {str(e)}")
                return
            if not root:
                self.logger.error("XML文档为空")
                return
            danmaku_nodes = root.xpath('//d')
            if not danmaku_nodes:
                self.logger.warning("未找到弹幕数据")
                return

            for node in danmaku_nodes:
                p_value = node.xpath('./@p')[0]
                danmaku_info = p_value.split(',')
                if len(danmaku_info) < 9:
                    self.logger.error(f"弹幕格式错误: {p_value}")
                    continue

                danmaku_time = danmaku_info[0]  # 弹幕出现时间（秒）
                danmaku_timestamp = danmaku_info[4]  # 发送时间戳
                danmaku_user_id = danmaku_info[7]  # 用户唯一ID
                danmaku_id = danmaku_info[8]  # 弹幕ID
                danmaku_text = node.text.strip() if node.text else ""

                item = {
                    'bvid': bvid,
                    'danmaku_id': danmaku_id,
                    'time': danmaku_time,
                    'user_id': danmaku_user_id,
                    'timestamp': danmaku_timestamp,
                    'text': danmaku_text
                }
                item_str = json.dumps(item, ensure_ascii=False)
                hash_val = hashlib.md5(item_str.encode("utf-8")).hexdigest()
                item['hash_val'] = hash_val

                yield item

        except KeyError as e:
            self.logger.error(f"Meta数据缺失: {str(e)}")
        except Exception as e:
            self.logger.error(f"弹幕解析失败: {str(e)}")
