from datetime import datetime, timedelta
import requests
import json
import os
import time
import pandas as pd
import asyncio
import aiohttp
from typing import List, Dict, Any, Optional, Union
import logging

# B站视频id配置类
class BilibiliVideoConfig:
    """B站视频id配置类，用于管理文化波动仪分析样本视频"""
    
    # 文化波动仪分析样本视频列表
    # 格式：{"bvid": "视频BV号", "description": "视频描述"}
    CULTURE_WAVE_SAMPLES = [
        {"bvid": "BV1x54y1e7zf", "description": "《黑神话:悟空》实机演示"},
        {"bvid": "BV1nwauzREuU", "description": "阅兵观礼台十年对比看世界格局的重塑"},
        {"bvid": "BV1fq4y1g7hq", "description": "5G技术"},
        {"bvid": "BV1y34y1s7x7", "description": "电影台词画面"},
        {"bvid": "BV1Yr4AedEaQ", "description": "生活体验与反思"},
        {"bvid": "BV176M3zPEZu", "description": "热门单曲"},
        {"bvid": "BV1MqY9zYEc7", "description": "银河系位于巨型空洞中心"},
        {"bvid": "BV1fv411n73Q", "description": "学习励志视频"},
        {"bvid": "BV1oC4y1k7iT", "description": "从狭义相对论到广义相对论理解时间"},
        {"bvid": "BV1mE411r7XG", "description": "《哪吒之魔童降世》娱乐视频剪辑"},
        {"bvid": "BV1Yae8zkEHS", "description": "“2025年暑假全网爆火的歌曲”"}
    ]
       
    # 按分区分类的视频样本
    CATEGORY_SAMPLES = {
        "tech": ["BV1x54y1e7zf", "BV1fq4y1g7hq"],  # 科技
        "science": ["BV1MqY9zYEc7", "BV1oC4y1k7iT"],  # 科学
        "lifestyle": ["BV1nwauzREuU", "BV1Yr4AedEaQ", "BV1fv411n73Q"],  # 生活方式
        "entertainment": ["BV1y34y1s7x7", "BV176M3zPEZu", "BV1mE411r7XG", "BV1Yae8zkEHS"]  # 娱乐创新
    }
    
    # 完善文化圈子分区配置
    CULTURE_CIRCLES = {
        "知识区": [
            {"bvid": "BV1rk4y1R7id", "description": "罗翔说刑法经典案例"},
            {"bvid": "BV1V3AmeQEEV", "description": "知识区科普视频"},
            {"bvid": "BV1je4y1n7CE", "description": "诺贝尔物理学奖"},
        ],
        "鬼畜区": [
            {"bvid": "BV1Mx411Q7u5", "description": "鬼畜区热门作品"},
            {"bvid": "BV1q7411x7H6", "description": "鬼畜创意视频"},
            {"bvid": "BV1s5411A7Ui", "description": "经典鬼畜remix"},
        ],
        "生活区": [
            {"bvid": "BV1244y1p7kt", "description": "何同学科技生活视频"},
            {"bvid": "BV11u4y1A7QT", "description": "生活区VLOG"},
            {"bvid": "BV1yt4y1h7N2", "description": "Z世代生活方式"},
        ],
        "科技区": [
            {"bvid": "BV1pFczegEnm", "description": "AI工具使用"},
            {"bvid": "BV1yF41117MX", "description": "量子计算解释"},
            {"bvid": "BV1Qa4116759", "description": "元宇宙应用实践"},
        ],
        "游戏区": [
            {"bvid": "BV1mB4y1e7Lh", "description": "《原神》热门剧情"},
            {"bvid": "BV1vp4y1a7Ea", "description": "游戏区热门视频"},
            {"bvid": "BV1AE4m1d7XT", "description": "《黑神话：悟空》攻略"},
        ],
        "音乐区": [
            {"bvid": "BV1ua411p7iA", "description": "音乐区热门MV"},
            {"bvid": "BV1iktdzuE73", "description": "2025年热门音乐"},
            {"bvid": "BV1KM4m1z7Y5", "description": "AIGC音乐创作"},
        ]
    }
    
    @classmethod
    def get_videos_by_circle(cls, circle_name: str) -> List[Dict]:
        """根据文化圈子获取视频列表"""
        return cls.CULTURE_CIRCLES.get(circle_name, [])
    
    @classmethod
    def get_all_circles(cls) -> List[str]:
        """获取所有文化圈子名称"""
        return list(cls.CULTURE_CIRCLES.keys())
    
    @classmethod
    def get_all_bvds(cls) -> List[str]:
        """获取所有配置的视频BV号"""
        return [video["bvid"] for video in cls.CULTURE_WAVE_SAMPLES]
    
    @classmethod
    def get_video_by_category(cls, category: str) -> List[str]:
        """按分区获取视频BV号"""
        return cls.CATEGORY_SAMPLES.get(category, [])

    @classmethod
    def get_video_with_description(cls, bvid: str) -> Optional[Dict]:
        """根据BV号获取视频信息(含描述)"""
        for video in cls.CULTURE_WAVE_SAMPLES:
            if video["bvid"] == bvid:
                return video
        return None


# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("bilibili_api.log", encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger("BilibiliDataCollector")


class BilibiliDataCollector:
    def __init__(self, cache_dir: str = "data", request_timeout: int = 15):
        """
        初始化B站数据收集器
        
        Args:
            cache_dir: 缓存目录
            request_timeout: 请求超时时间(秒)
        """
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Referer': 'https://www.bilibili.com/',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Origin': 'https://www.bilibili.com',
            'Connection': 'keep-alive',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-site',
        }
        
        self.cache_dir = cache_dir
        self.request_timeout = request_timeout
        self.session = requests.Session()
        self.session.headers.update(self.headers)

        # 设置更真实的Cookie
        self.session.cookies.update({
            'browser_resolution': '1920-1080',
            'CURRENT_FNVAL': '4048',
        })
        
        # 初始化异步可用性标志
        self.HAS_AIOHTTP = self._check_async_availability()
        
        # 确保缓存目录存在
        os.makedirs(cache_dir, exist_ok=True)
        
        # 视频配置实例
        self.video_config = BilibiliVideoConfig()

        logger.info("BilibiliDataCollector初始化完成")
    
    def _check_async_availability(self) -> bool:
        """检查异步库是否可用"""
        try:
            import aiohttp
            import asyncio
            return True
        except ImportError:
            logger.warning("aiohttp 未安装，异步功能将不可用")
            return False
    
    def load_cached_data(self, video_id: str) -> Optional[Dict]:
        """
        加载缓存数据
        
        Args:
            video_id: 视频ID (BVID)
            
        Returns:
            缓存数据或None
        """
        cache_file = os.path.join(self.cache_dir, f"{video_id}.json")
        try:
            if os.path.exists(cache_file):
                with open(cache_file, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                    logger.info(f"从缓存加载数据: {video_id}")
                    return data
            return None
        except (json.JSONDecodeError, IOError) as e:
            logger.error(f"读取缓存文件失败: {e}")
            return None
    
    def save_to_cache(self, data: Dict, video_id: str) -> bool:
        """
        保存数据到缓存
        
        Args:
            data: 要保存的数据
            video_id: 视频ID (BVID)
            
        Returns:
            是否成功保存
        """
        try:
            cache_file = os.path.join(self.cache_dir, f"{video_id}.json")
            with open(cache_file, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            logger.info(f"数据已缓存到: {cache_file}")
            return True
        except IOError as e:
            logger.error(f"保存缓存文件失败: {e}")
            return False
    
    def get_video_info(self, bvid: str, use_cache: bool = True) -> Optional[Dict]:
        """
        获取视频信息（带缓存功能）
        
        Args:
            bvid: 视频BV号
            use_cache: 是否使用缓存
            
        Returns:
            视频信息字典或None
        """
        # 先尝试从缓存读取
        if use_cache:
            cached_data = self.load_cached_data(bvid)
            if cached_data:
                return cached_data
        
        # 缓存中没有或不允许使用缓存，则从API获取
        url = f"https://api.bilibili.com/x/web-interface/view?bvid={bvid}"
        try:
            response = self.session.get(url, timeout=self.request_timeout)
            response.raise_for_status()
            
            data = response.json()
            if data.get('code') == 0:
                video_data = data['data']
                # 保存到缓存
                self.save_to_cache(video_data, bvid)
                logger.info(f"成功获取视频信息: {bvid}")
                return video_data
            else:
                logger.warning(f"API返回错误: {data.get('message')}")
                return None
            
        except requests.exceptions.RequestException as e:
            logger.error(f"获取视频信息失败: {e}")
            return None
        except json.JSONDecodeError as e:
            logger.error(f"解析JSON响应失败: {e}")
            return None
    
    def get_video_stat(self, bvid: str) -> Optional[Dict]:
        """
        获取视频统计数据
        
        Args:
            bvid: 视频BV号
            
        Returns:
            视频统计数据或None
        """
        video_info = self.get_video_info(bvid)
        if not video_info:
            return None
        
        stat = video_info.get('stat', {})
        return {
            'view': stat.get('view', 0),        # 播放数
            'danmaku': stat.get('danmaku', 0),  # 弹幕数
            'reply': stat.get('reply', 0),      # 评论数
            'favorite': stat.get('favorite', 0), # 收藏数
            'coin': stat.get('coin', 0),        # 硬币数
            'share': stat.get('share', 0),      # 分享数
            'like': stat.get('like', 0),        # 点赞数
        }
    
    async def get_comments_page(self, session: aiohttp.ClientSession, 
                               oid: int, page: int, semaphore: asyncio.Semaphore) -> Optional[Dict]:
        """
        异步获取单页评论数据
        
        Args:
            session: aiohttp会话
            oid: 视频aid
            page: 页码
            semaphore: 信号量用于控制并发
            
        Returns:
            评论数据或None
        """
        async with semaphore:
            url = f"https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn={page}&type=1&oid={oid}&sort=2"
            try:
                async with session.get(url, timeout=aiohttp.ClientTimeout(total=self.request_timeout)) as response:
                    if response.status == 200:
                        data = await response.json()
                        if data.get('code') == 0:
                            return data
                        else:
                            logger.warning(f"评论API返回错误: {data.get('message')}")
                    return None
            except Exception as e:
                logger.error(f"请求评论页失败: {e}")
                return None
            finally:
                # 添加延时避免请求过于频繁
                await asyncio.sleep(0.5)
    
    async def get_real_comments(self, bvid: str, max_count: int = 100, 
                               max_concurrent: int = 5) -> List[Dict]:
        """
        获取真实B站评论数据
        
        Args:
            bvid: 视频BV号
            max_count: 最大评论数
            max_concurrent: 最大并发请求数
            
        Returns:
            评论列表
        """
        if not self.HAS_AIOHTTP:
            logger.warning("aiohttp不可用，无法使用异步功能")
            return []
        
        # 先获取视频的aid（数字ID）
        video_info = self.get_video_info(bvid)
        if not video_info or 'aid' not in video_info:
            logger.error(f"无法获取视频信息或aid: {bvid}")
            return []
        
        aid = video_info['aid']
        comments = []
        page = 1
        semaphore = asyncio.Semaphore(max_concurrent)
        
        try:
            async with aiohttp.ClientSession(headers=self.headers) as session:
                while len(comments) < max_count:
                    # 获取第一页以了解总页数
                    if page == 1:
                        first_page_data = await self.get_comments_page(session, aid, page, semaphore)
                        if not first_page_data or not first_page_data.get('data', {}).get('replies'):
                            break
                        
                        total = first_page_data['data']['page']['count']
                        page_size = first_page_data['data']['page']['size']
                        total_pages = min((total + page_size - 1) // page_size, 10)  # 限制最多10页
                        
                        # 处理第一页评论
                        for reply in first_page_data['data']['replies']:
                            if len(comments) >= max_count:
                                break
                            comments.append({
                                'text': reply['content']['message'],
                                'like': reply['like'],
                                'time': datetime.fromtimestamp(reply['ctime']).strftime('%Y-%m-%d %H:%M:%S'),
                                'user': reply['member']['uname'],
                                'user_id': reply['member']['mid'],
                                'rpid': reply['rpid']  # 评论ID
                            })
                        page += 1
                    
                    # 获取剩余页面的评论
                    if page <= total_pages and len(comments) < max_count:
                        tasks = []
                        for p in range(page, min(page + max_concurrent, total_pages + 1)):
                            if len(comments) >= max_count:
                                break
                            tasks.append(self.get_comments_page(session, aid, p, semaphore))
                        
                        results = await asyncio.gather(*tasks)
                        
                        for data in results:
                            if not data or not data.get('data', {}).get('replies'):
                                continue
                                
                            for reply in data['data']['replies']:
                                if len(comments) >= max_count:
                                    break
                                comments.append({
                                    'text': reply['content']['message'],
                                    'like': reply['like'],
                                    'time': datetime.fromtimestamp(reply['ctime']).strftime('%Y-%m-%d %H:%M:%S'),
                                    'user': reply['member']['uname'],
                                    'user_id': reply['member']['mid'],
                                    'rpid': reply['rpid']  # 评论ID
                                })
                        
                        page += max_concurrent
                    else:
                        break
                        
        except Exception as e:
            logger.error(f"获取评论过程中发生错误: {e}")
        
        logger.info(f"成功获取 {len(comments)} 条评论")
        return comments[:max_count]
    def get_real_data(self, bvid: str, max_comments: int = 200) -> List[Dict]:
        """
        获取真实B站数据（多重尝试策略）
        
        Args:
            bvid: 视频BV号
            max_comments: 最大评论数
            
        Returns:
            评论数据列表
        """
        logger.info(f"开始获取视频 {bvid} 的真实数据")
        
        # 方法1: 尝试异步获取
        try:
            if self.HAS_AIOHTTP:
                logger.info("尝试方法1: 异步获取评论...")
                comments = asyncio.run(self.get_real_comments(bvid, max_comments))
                if comments and len(comments) > 0:
                    logger.info(f"异步获取成功: {len(comments)} 条评论")
                    # 为真实数据添加标记
                    for comment in comments:
                        comment['data_source'] = 'real'
                    return comments
        except Exception as e:
            logger.error(f"异步获取失败: {e}")
        
        # 方法2: 尝试同步获取第一页
        try:
            logger.info("尝试方法2: 同步获取评论...")
            video_info = self.get_video_info(bvid)
            if video_info and 'aid' in video_info:
                aid = video_info['aid']
                url = f"https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn=1&type=1&oid={aid}&sort=2"
                
                response = self.session.get(url, timeout=self.request_timeout)
                if response.status_code == 200:
                    data = response.json()
                    if data.get('code') == 0 and data.get('data', {}).get('replies'):
                        comments = []
                        for reply in data['data']['replies'][:max_comments]:
                            comments.append({
                                'text': reply['content']['message'],
                                'like': reply['like'],
                                'time': datetime.fromtimestamp(reply['ctime']).strftime('%Y-%m-%d %H:%M:%S'),
                                'user': reply['member']['uname'],
                                'user_id': reply['member']['mid'],
                                'rpid': reply['rpid'],
                                'data_source': 'real'  # 标记为真实数据
                            })
                        if comments:
                            logger.info(f"同步获取成功: {len(comments)} 条评论")
                            return comments
        except Exception as e:
            logger.error(f"同步获取失败: {e}")
        
        # 方法3: 尝试热门评论接口
        try:
            logger.info("尝试方法3: 热门评论接口...")
            video_info = self.get_video_info(bvid)
            if video_info and 'aid' in video_info:
                aid = video_info['aid']
                url = f"https://api.bilibili.com/x/v2/reply/main?jsonp=jsonp&next=0&type=1&oid={aid}&mode=3"
                
                response = self.session.get(url, timeout=self.request_timeout)
                if response.status_code == 200:
                    data = response.json()
                    if data.get('code') == 0 and data.get('data', {}).get('replies'):
                        comments = []
                        for reply in data['data']['replies'][:max_comments]:
                            comments.append({
                                'text': reply['content']['message'],
                                'like': reply['like'],
                                'time': datetime.fromtimestamp(reply['ctime']).strftime('%Y-%m-%d %H:%M:%S'),
                                'user': reply['member']['uname'],
                                'user_id': reply['member']['mid'],
                                'rpid': reply['rpid'],
                                'data_source': 'real'
                            })
                        if comments:
                            logger.info(f"热门评论获取成功: {len(comments)} 条评论")
                            return comments
        except Exception as e:
            logger.error(f"热门评论获取失败: {e}")
        
        # 所有真实数据获取方式都失败，使用增强版模拟数据
        logger.info("所有真实数据获取方式都失败，使用增强版模拟数据")
        return self._get_enhanced_simulated_comments(bvid, max_comments)

    def _get_enhanced_simulated_comments(self, bvid: str, count: int = 50) -> List[Dict]:
        """
        生成增强版模拟评论数据，根据视频BV号生成更相关的评论
        
        Args:
            bvid: 视频BV号
            count: 评论数量
            
        Returns:
            模拟评论列表
        """
        # 获取视频信息以生成相关评论
        video_info = self.get_video_info(bvid)
        title = video_info.get('title', '这个视频') if video_info else '这个视频'
        
        # 根据视频描述生成相关评论
        description = self.get_video_description(bvid)
        
        # 基础评论模板
        base_comments = [
            f"{title}太棒了，学到了很多！",
            f"UP主讲得很清楚，点赞！",
            f"有点没看懂{description}，能再解释一下吗？",
            f"期待下一期视频！",
            f"这个观点不太同意，我觉得...",
            f"哈哈哈哈，太有趣了！",
            f"已三连，支持优质内容！",
            f"背景音乐有点大，听不清说话",
            f"{description}技术太厉害了，想学！",
            f"什么时候出下一期啊？等不及了！",
            f"这个{description}分析得很透彻",
            f"文化波动仪的角度很有意思",
            f"从网络热力学看{title}很有启发",
            f"情绪熵增理论在这里得到了很好的体现",
            f"这个视频反映了当代圈子文化的特点",
            f"文化演化的过程在这里很清晰",
            f"B站社区的文化波动很有意思",
            f"这个视频是文化波动的一个典型案例",
            f"从熵增角度看这个视频很有深度",
            f"网络热力学在这里得到了很好的应用"
        ]
        
        comments = []
        base_time = datetime.now()
        
        for i in range(count):
            comment_time = base_time - timedelta(minutes=i*5)
            comment_text = base_comments[i % len(base_comments)]
            
            comments.append({
                'text': comment_text,
                'like': i * 2 + 1,
                'time': comment_time.strftime('%Y-%m-%d %H:%M:%S'),
                'user': f'用户{i+10000}',
                'user_id': 10000 + i,
                'rpid': 20000 + i,
                'sentiment': 0.3 + (i % 10) * 0.07,  # 模拟情感值
                'data_source': 'simulated'  # 标记为模拟数据
            })
        
        logger.info(f"生成 {len(comments)} 条模拟评论")
        return comments
    
    def save_to_csv(self, data: List[Dict], filename: str) -> Optional[pd.DataFrame]:
        """
        保存数据到CSV文件
        
        Args:
            data: 要保存的数据列表
            filename: 文件名
            
        Returns:
            DataFrame对象或None
        """
        try:
            if not filename.endswith('.csv'):
                filename += '.csv'
                
            filepath = os.path.join(self.cache_dir, filename)
            df = pd.DataFrame(data)
            df.to_csv(filepath, index=False, encoding='utf-8-sig')
            logger.info(f"数据已保存到: {filepath}")
            return df
        except Exception as e:
            logger.error(f"保存CSV文件失败: {e}")
            return None
    
    def get_popular_videos(self, category: str = "all", page: int = 1, page_size: int = 20) -> Optional[List[Dict]]:
        """
        获取热门视频列表
        
        Args:
            category: 分类 ("all", "science", "technology", etc.)
            page: 页码
            page_size: 每页数量
            
        Returns:
            视频列表或None
        """
        # 这里只是示例，实际需要根据B站API调整
        url = "https://api.bilibili.com/x/web-interface/popular"
        try:
            response = self.session.get(url, params={'ps': page_size, 'pn': page}, timeout=self.request_timeout)
            response.raise_for_status()
            
            data = response.json()
            if data.get('code') == 0:
                return data['data']['list']
            return None
        except Exception as e:
            logger.error(f"获取热门视频失败: {e}")
            return None
    
    def get_configured_videos(self, category: str = None) -> List[Dict]:
        """
        获取配置的视频列表
        
        Args:
            category: 分区名称
            
        Returns:
            视频配置列表
        """
        try:
            if category:
                bvids = self.video_config.get_video_by_category(category)
                videos = []
                for bvid in bvids:
                    video_info = self.video_config.get_video_with_description(bvid)
                    if video_info:
                        videos.append(video_info)
                return videos
            else:
                return self.video_config.CULTURE_WAVE_SAMPLES
        except Exception as e:
            logger.error(f"获取配置视频失败: {e}")
            return []
    
    def get_video_description(self, bvid: str) -> str:
        """
        根据BV号获取视频描述
        
        Args:
            bvid: 视频BV号
            
        Returns:
            视频描述
        """
        video_info = self.video_config.get_video_with_description(bvid)
        if video_info:
            return video_info.get('description', '未知描述')
        return '未知描述'
    
    def get_videos_by_culture_circle(self, circle_name: str) -> List[Dict]:
        """
        根据文化圈子获取视频列表
        
        Args:
            circle_name: 文化圈子名称
            
        Returns:
            该圈子的视频列表
        """
        return self.video_config.get_videos_by_circle(circle_name)

    def get_available_culture_circles(self) -> List[str]:
        """
        获取可用的文化圈子列表
        
        Returns:
            文化圈子名称列表
        """
        return self.video_config.get_all_circles()
        
    def batch_collect_data(self, bvids: List[str] = None, max_comments: int = 100) -> Dict[str, Any]:
        """
        批量收集多个视频的数据
        
        Args:
            bvids: 视频BV号列表，如果为None则使用配置的所有视频
            max_comments: 每个视频最大评论数
            
        Returns:
            收集到的数据字典
        """
        if bvids is None:
            bvids = self.video_config.get_all_bvds()
        
        results = {}
        for bvid in bvids:
            logger.info(f"开始收集视频 {bvid} 的数据...")
            try:
                # 获取视频信息
                video_info = self.get_video_info(bvid)
                
                # 获取评论数据
                comments = self.get_real_data(bvid, max_comments)
                
                # 获取视频描述
                description = self.video_config.get_video_with_description(bvid)
                
                results[bvid] = {
                    'video_info': video_info,
                    'comments': comments,
                    'stat': self.get_video_stat(bvid) if video_info else None,
                    'description': description
                }
                
                logger.info(f"视频 {bvid} 数据收集完成")
                
            except Exception as e:
                logger.error(f"收集视频 {bvid} 数据时出错: {e}")
                results[bvid] = {'error': str(e)}
        
        return results

    def export_batch_results(self, batch_results: Dict[str, Any], output_dir: str = "batch_results"):
        """
        导出批量收集结果
        
        Args:
            batch_results: 批量收集的结果数据
            output_dir: 输出目录
        """
        os.makedirs(output_dir, exist_ok=True)

        for bvid, data in batch_results.items():
            if 'error' not in data:
                # 保存评论数据
                if data.get('comments'):
                    comment_filename = f"comments_{bvid}.csv"
                    self.save_to_csv(data['comments'], os.path.join(output_dir, comment_filename))
                # 保存视频信息
                if data.get('video_info'):
                    info_filename = f"video_info_{bvid}.json"
                    with open(os.path.join(output_dir, info_filename), 'w', encoding='utf-8') as f:
                        json.dump(data['video_info'], f, ensure_ascii=False, indent=2)


# 创建全局实例
bilibili_collector = BilibiliDataCollector()

# 示例用法
if __name__ == "__main__":
    # 获取配置的所有视频
    all_videos = bilibili_collector.get_configured_videos()
    print(f"配置的视频数量：{len(all_videos)}")
    for video in all_videos:
        print(f"BV号:{video['bvid']},描述:{video['description']}")

    # 按分区获取视频
    game_videos = bilibili_collector.get_configured_videos("tech")
    print(f"\n科技区视频:{[v['bvid'] for v in game_videos]}")

    # 单个视频示例
    print("\n=== 单个视频数据收集 ===")
    video_info = bilibili_collector.get_video_info("BV1x54y1e7zf")
    if video_info:
        print(f"视频标题: {video_info.get('title')}")
        print(f"播放量: {video_info.get('stat', {}).get('view')}")
    
    # 获取评论
    comments = bilibili_collector.get_real_data("BV1x54y1e7zf", max_comments=50)
    print(f"获取到 {len(comments)} 条评论")
    
    # 保存到CSV
    if comments:
        bilibili_collector.save_to_csv(comments, "comments_example.csv")
        print("评论数据已保存到 comments_example.csv")