import requests
import json
import time
import re
import pandas as pd
import os
from fake_useragent import UserAgent

class BilibiliDanmuCrawler:
    def __init__(self):
        self.ua = UserAgent()
        self.session = requests.Session()
        # 设置请求头，模拟真实浏览器
        self.session.headers.update({
            'User-Agent': self.ua.random,
            'Referer': 'https://www.bilibili.com/',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        })
    
    def search_videos_simple(self, keyword, page=1, page_size=20):
        """
        简化版视频搜索（避免复杂的API签名）
        """
        print(f"搜索关键词: {keyword}, 第{page}页")
        
        # 使用B站搜索页面
        url = "https://search.bilibili.com/video"
        params = {
            'keyword': keyword,
            'page': page,
            'order': 'totalrank'  # 综合排序
        }
        
        try:
            response = self.session.get(url, params=params, timeout=10)
            response.encoding = 'utf-8'
            
            videos = []
            
            # 使用正则表达式提取视频bvid和标题（简化方法）
            # 匹配bvid模式：BV开头+10个数字字母
            bvid_pattern = r'"bvid":"(BV[0-9A-Za-z]{10})"'
            title_pattern = r'title="([^"]*?)"'
            
            bvids = re.findall(bvid_pattern, response.text)
            titles = re.findall(title_pattern, response.text)
            
            # 取前page_size个结果
            for i in range(min(page_size, len(bvids))):
                videos.append({
                    'bvid': bvids[i],
                    'title': titles[i] if i < len(titles) else '未知标题'
                })
            
            print(f"找到 {len(videos)} 个视频")
            return videos
            
        except Exception as e:
            print(f"搜索失败: {e}")
            return []
    
    def get_video_cid(self, bvid):
        """
        通过bvid获取视频的cid（弹幕ID）
        """
        print(f"获取视频 {bvid} 的cid...")
        
        url = "https://api.bilibili.com/x/web-interface/view"
        params = {'bvid': bvid}
        
        try:
            response = self.session.get(url, params=params, timeout=10)
            data = response.json()
            
            if data['code'] == 0:
                cid = data['data']['cid']
                print(f"成功获取cid: {cid}")
                return str(cid)
            else:
                print(f"获取cid失败: {data['message']}")
                return None
                
        except Exception as e:
            print(f"获取cid异常: {e}")
            return None
    
    def get_danmu_data(self, cid):
        """
        通过cid获取弹幕数据
        """
        print(f"获取弹幕，cid: {cid}")
        
        url = "https://api.bilibili.com/x/v1/dm/list.so"
        params = {'oid': cid}
        
        try:
            response = self.session.get(url, params=params, timeout=10)
            response.encoding = 'utf-8'
            
            # 解析XML格式的弹幕
            danmu_list = []
            pattern = r'<d p=".*?">(.*?)</d>'
            matches = re.findall(pattern, response.text)
            
            for match in matches:
                danmu_list.append(match)
            
            print(f"获取到 {len(danmu_list)} 条弹幕")
            return danmu_list
            
        except Exception as e:
            print(f"获取弹幕失败: {e}")
            return []
    
    def crawl_danmu_by_keywords(self, keywords, total_videos=30):
        """
        主爬取函数 - 爬取指定关键词的视频弹幕
        """
        all_danmu_data = []
        
        for keyword in keywords:
            print(f"\n{'='*50}")
            print(f"开始处理关键词: {keyword}")
            print(f"{'='*50}")
            
            # 每个关键词爬取的视频数
            videos_per_keyword = total_videos // len(keywords)
            videos_collected = 0
            page = 1
            
            while videos_collected < videos_per_keyword:
                # 搜索视频
                search_results = self.search_videos_simple(keyword, page=page)
                
                if not search_results:
                    print("没有更多视频了")
                    break
                
                # 处理每个视频
                for video in search_results:
                    if videos_collected >= videos_per_keyword:
                        break
                    
                    print(f"\n处理视频 {videos_collected + 1}/{videos_per_keyword}")
                    print(f"标题: {video['title'][:30]}...")
                    print(f"BVID: {video['bvid']}")
                    
                    # 获取cid
                    cid = self.get_video_cid(video['bvid'])
                    if not cid:
                        continue
                    
                    # 获取弹幕
                    danmus = self.get_danmu_data(cid)
                    
                    # 保存弹幕数据
                    for danmu in danmus:
                        all_danmu_data.append({
                            'bvid': video['bvid'],
                            'title': video['title'],
                            'keyword': keyword,
                            'danmu': danmu
                        })
                    
                    videos_collected += 1
                    
                    # 礼貌延迟，避免请求过快
                    time.sleep(1)
                
                page += 1
                time.sleep(2)  # 翻页延迟
        
        # 转换为DataFrame
        df = pd.DataFrame(all_danmu_data)
        return df
    
    def save_data(self, df, filename):
        """
        保存数据到CSV文件
        """
        # 确保数据目录存在
        os.makedirs('data/raw_danmu', exist_ok=True)
        
        filepath = f'data/raw_danmu/{filename}.csv'
        df.to_csv(filepath, index=False, encoding='utf-8-sig')
        
        print(f"\n数据已保存到: {filepath}")
        print(f"总弹幕数量: {len(df)}")
        print(f"涉及视频数量: {df['bvid'].nunique()}")
        
        # 保存统计信息
        stats = {
            'total_danmu': len(df),
            'total_videos': df['bvid'].nunique(),
            'keyword_distribution': df['keyword'].value_counts().to_dict(),
            'crawl_time': time.strftime('%Y-%m-%d %H:%M:%S')
        }
        
        with open(f'data/raw_danmu/{filename}_stats.json', 'w', encoding='utf-8') as f:
            json.dump(stats, f, ensure_ascii=False, indent=2)