import asyncio
import time
import random
from browser_driver import BrowserDriver
from login_handler import LoginHandler
from video_link_crawler import VideoLinkCrawler
from danmu_fetcher import DanmuFetcher
from data_processor import DataProcessor
from excel_generator import ExcelGenerator  # 新增导入
import re

class BilibiliTop300DanmuCrawler:
    """B站综合排序前300弹幕爬虫"""
    
    def __init__(self):
        self.browser_driver = BrowserDriver()
        self.data_processor = DataProcessor()
        self.driver = None
        self.video_links = []
    
    async def process_video_danmu(self, danmu_fetcher, video_url, index):
        """处理单个视频的弹幕"""
        bvid_match = re.search(r'BV[0-9A-Za-z]+', video_url)
        if not bvid_match:
            print(f"无法从URL提取BVID: {video_url}")
            return False
            
        bvid = bvid_match.group()
        print(f"处理第 {index} 个视频: {bvid}")
        
        cid = danmu_fetcher.get_video_cid(bvid)
        if not cid:
            print(f"无法获取CID，跳过")
            return False
            
        print(f"获取到CID: {cid}")
        
        danmus = danmu_fetcher.fetch_danmu_data(cid, bvid)
        
        self.data_processor.add_danmu_data(video_url, bvid, cid, danmus, index)
        print(f"收集到 {len(danmus)} 条弹幕")
        return True

    async def crawl_top300_danmu(self):
        """爬取前300个视频的弹幕"""
        print("开始爬取综合排序前300的视频弹幕...")
        
        video_link_crawler = VideoLinkCrawler(self.driver)
        video_links = video_link_crawler.get_top300_video_links()
        if not video_links:
            print("无法获取视频链接，程序终止")
            return
            
        self.video_links = video_links
        danmu_fetcher = DanmuFetcher(self.driver)
        
        print(f"准备处理 {len(video_links)} 个视频")
        
        success_count = 0
        for i, video_url in enumerate(video_links):
            try:
                success = await self.process_video_danmu(danmu_fetcher, video_url, i+1)
                if success:
                    success_count += 1
            except Exception as e:
                print(f"处理第 {i+1} 个视频时出错: {e}")
            
            print(f"进度: {i+1}/{len(video_links)} (成功: {success_count})")
            
            delay = random.uniform(3, 6)
            await asyncio.sleep(delay)
            
            if (i + 1) % 10 == 0:
                self.data_processor.save_progress()
        
        print(f"成功处理 {success_count}/{len(video_links)} 个视频")
    
    def generate_excel_report(self):
        """生成Excel报告"""
        if not self.data_processor.danmu_data:
            print("没有弹幕数据，无法生成Excel报告")
            return False
        
        print("开始生成Excel报告...")
        excel_generator = ExcelGenerator(self.data_processor.danmu_data)
        return excel_generator.generate_excel_report()

async def main():
    print("=== B站综合排序前300弹幕爬虫启动 ===")
    
    crawler = BilibiliTop300DanmuCrawler()
    crawler.driver = crawler.browser_driver.init_driver()
    
    if not crawler.driver:
        return
    
    try:
        login_handler = LoginHandler(crawler.driver)
        if not login_handler.manual_login():
            return
        
        await crawler.crawl_top300_danmu()
        
        crawler.data_processor.save_results()
        crawler.data_processor.analyze_results()
        
        print("\n开始生成词云图...")
        crawler.data_processor.generate_wordcloud()
        
        # 新增：生成Excel报告
        print("\n开始生成Excel报告...")
        crawler.generate_excel_report()
        
        print("=== 爬虫任务完成 ===")
        
    except Exception as e:
        print(f"程序运行出错: {e}")
        import traceback
        traceback.print_exc()
        
    finally:
        if crawler.driver:
            crawler.driver.quit()
            print("浏览器已关闭")

if __name__ == "__main__":
    asyncio.run(main())