import asyncio
import logging
from typing import Optional, Dict, Any
from datetime import datetime

from api.models.video_models import VideoInfo
from crawlers.douyin.web.web_crawler import DouyinWebCrawler
from crawlers.tiktok.web.web_crawler import TikTokWebCrawler  
from crawlers.bilibili.web.web_crawler import BilibiliWebCrawler
from crawlers.hybrid.hybrid_crawler import HybridCrawler

logger = logging.getLogger(__name__)


class VideoService:
    """视频解析服务核心类"""
    
    def __init__(self):
        """初始化各平台爬虫"""
        try:
            self.douyin_crawler = DouyinWebCrawler()
            logger.info("抖音爬虫初始化成功")
        except Exception as e:
            logger.error(f"抖音爬虫初始化失败: {e}")
            self.douyin_crawler = None
            
        try:
            self.tiktok_crawler = TikTokWebCrawler()
            logger.info("TikTok爬虫初始化成功")
        except Exception as e:
            logger.error(f"TikTok爬虫初始化失败: {e}")
            self.tiktok_crawler = None
            
        try:
            self.bilibili_crawler = BilibiliWebCrawler()
            logger.info("哔哩哔哩爬虫初始化成功")
        except Exception as e:
            logger.error(f"哔哩哔哩爬虫初始化失败: {e}")
            self.bilibili_crawler = None
            
        try:
            self.hybrid_crawler = HybridCrawler()
            logger.info("混合爬虫初始化成功")
        except Exception as e:
            logger.error(f"混合爬虫初始化失败: {e}")
            self.hybrid_crawler = None
        
        # 统计信息
        self.stats = {
            "total_parsed": 0,
            "success_count": 0,
            "error_count": 0,
            "parse_times": []
        }
    
    async def parse_video(self, url: str, platform: str) -> Optional[VideoInfo]:
        """
        解析视频信息
        
        Args:
            url: 视频链接
            platform: 平台类型 (douyin, tiktok, bilibili)
            
        Returns:
            VideoInfo: 视频信息对象，解析失败返回None
        """
        start_time = datetime.now()
        
        try:
            self.stats["total_parsed"] += 1
            logger.info(f"开始解析 {platform} 视频: {url}")
            
            # 根据平台选择对应的爬虫
            crawler = self._get_crawler(platform)
            if not crawler:
                raise Exception(f"平台 {platform} 的爬虫未初始化或不支持")
            
            # 执行解析
            raw_data = await self._execute_crawler(crawler, url, platform)
            if not raw_data:
                raise Exception("爬虫返回空数据")
            
            # 转换为标准格式
            video_info = self._convert_to_video_info(raw_data, platform, url)
            
            # 记录成功
            self.stats["success_count"] += 1
            parse_time = (datetime.now() - start_time).total_seconds()
            self.stats["parse_times"].append(parse_time)
            
            logger.info(f"视频解析成功: {video_info.title} (耗时: {parse_time:.2f}s)")
            return video_info
            
        except Exception as e:
            self.stats["error_count"] += 1
            logger.error(f"视频解析失败: {url} - {str(e)}")
            return None
    
    def _get_crawler(self, platform: str):
        """根据平台获取对应的爬虫实例"""
        crawler_map = {
            "douyin": self.douyin_crawler,
            "tiktok": self.tiktok_crawler, 
            "bilibili": self.bilibili_crawler
        }
        return crawler_map.get(platform)
    
    async def _execute_crawler(self, crawler, url: str, platform: str) -> Optional[Dict]:
        """执行爬虫解析"""
        try:
            if platform == "douyin":
                # 抖音需要先提取aweme_id
                aweme_id = self._extract_douyin_id(url)
                if not aweme_id:
                    raise Exception("无法提取抖音视频ID")
                return await crawler.fetch_one_video(aweme_id)
                
            elif platform == "tiktok":
                # TikTok直接传入URL
                return await crawler.fetch_one_video(url)
                
            elif platform == "bilibili":
                # 哔哩哔哩需要提取bvid
                bvid = self._extract_bilibili_id(url)
                if not bvid:
                    raise Exception("无法提取哔哩哔哩视频ID")
                return await crawler.fetch_one_video(bvid)
                
            else:
                # 使用混合爬虫
                if self.hybrid_crawler:
                    return await self.hybrid_crawler.fetch_one_video(url)
                else:
                    raise Exception("混合爬虫未初始化")
                    
        except Exception as e:
            logger.error(f"爬虫执行失败: {str(e)}")
            return None
    
    def _extract_douyin_id(self, url: str) -> Optional[str]:
        """从抖音URL中提取aweme_id"""
        try:
            import re
            # 匹配各种抖音URL格式中的ID
            patterns = [
                r'/video/(\d+)',
                r'aweme_id=(\d+)',
                r'item_id=(\d+)',
                r'/(\d+)/?'
            ]
            
            for pattern in patterns:
                match = re.search(pattern, url)
                if match:
                    return match.group(1)
            return None
        except Exception:
            return None
    
    def _extract_bilibili_id(self, url: str) -> Optional[str]:
        """从哔哩哔哩URL中提取bvid"""
        try:
            import re
            # 匹配BV号
            match = re.search(r'(BV[a-zA-Z0-9]+)', url)
            if match:
                return match.group(1)
            return None
        except Exception:
            return None
    
    def _convert_to_video_info(self, raw_data: Dict, platform: str, original_url: str) -> VideoInfo:
        """将原始数据转换为标准VideoInfo格式"""
        try:
            # 根据不同平台的数据结构进行转换
            if platform == "douyin":
                return self._convert_douyin_data(raw_data, original_url)
            elif platform == "tiktok":
                return self._convert_tiktok_data(raw_data, original_url) 
            elif platform == "bilibili":
                return self._convert_bilibili_data(raw_data, original_url)
            else:
                return self._convert_hybrid_data(raw_data, original_url)
                
        except Exception as e:
            logger.error(f"数据转换失败: {str(e)}")
            raise
    
    def _convert_douyin_data(self, data: Dict, original_url: str) -> VideoInfo:
        """转换抖音数据格式"""
        aweme = data.get("aweme_detail", {})
        
        return VideoInfo(
            id=aweme.get("aweme_id", ""),
            title=aweme.get("desc", ""),
            author=aweme.get("author", {}).get("nickname", ""),
            author_id=aweme.get("author", {}).get("unique_id", ""),
            platform="douyin",
            download_url=aweme.get("video", {}).get("play_addr", {}).get("url_list", [""])[0],
            original_url=original_url,
            cover_url=aweme.get("video", {}).get("cover", {}).get("url_list", [""])[0],
            duration=aweme.get("video", {}).get("duration", 0) // 1000,  # 转换为秒
            view_count=aweme.get("statistics", {}).get("play_count", 0),
            like_count=aweme.get("statistics", {}).get("digg_count", 0),
            comment_count=aweme.get("statistics", {}).get("comment_count", 0),
            share_count=aweme.get("statistics", {}).get("share_count", 0),
            create_time=str(aweme.get("create_time", "")),
            description=aweme.get("desc", "")
        )
    
    def _convert_tiktok_data(self, data: Dict, original_url: str) -> VideoInfo:
        """转换TikTok数据格式"""
        item_info = data.get("itemInfo", {}).get("itemStruct", {})
        
        return VideoInfo(
            id=item_info.get("id", ""),
            title=item_info.get("desc", ""),
            author=item_info.get("author", {}).get("nickname", ""),
            author_id=item_info.get("author", {}).get("uniqueId", ""),
            platform="tiktok", 
            download_url=item_info.get("video", {}).get("downloadAddr", ""),
            original_url=original_url,
            cover_url=item_info.get("video", {}).get("cover", ""),
            duration=item_info.get("video", {}).get("duration", 0),
            view_count=item_info.get("stats", {}).get("playCount", 0),
            like_count=item_info.get("stats", {}).get("diggCount", 0),
            comment_count=item_info.get("stats", {}).get("commentCount", 0),
            share_count=item_info.get("stats", {}).get("shareCount", 0),
            create_time=str(item_info.get("createTime", "")),
            description=item_info.get("desc", "")
        )
    
    def _convert_bilibili_data(self, data: Dict, original_url: str) -> VideoInfo:
        """转换哔哩哔哩数据格式"""
        video_data = data.get("data", {})
        
        return VideoInfo(
            id=video_data.get("bvid", ""),
            title=video_data.get("title", ""),
            author=video_data.get("owner", {}).get("name", ""),
            author_id=str(video_data.get("owner", {}).get("mid", "")),
            platform="bilibili",
            download_url="",  # 哔哩哔哩需要特殊处理下载链接
            original_url=original_url,
            cover_url=video_data.get("pic", ""),
            duration=video_data.get("duration", 0),
            view_count=video_data.get("stat", {}).get("view", 0),
            like_count=video_data.get("stat", {}).get("like", 0),
            comment_count=video_data.get("stat", {}).get("reply", 0),
            share_count=video_data.get("stat", {}).get("share", 0),
            create_time=str(video_data.get("pubdate", "")),
            description=video_data.get("desc", "")
        )
    
    def _convert_hybrid_data(self, data: Dict, original_url: str) -> VideoInfo:
        """转换混合爬虫数据格式"""
        # 混合爬虫已经返回标准格式，添加original_url并使用
        data["original_url"] = original_url
        return VideoInfo(**data)
    
    async def get_stats(self) -> Dict[str, Any]:
        """获取服务统计信息"""
        total = self.stats["total_parsed"]
        success = self.stats["success_count"]
        
        success_rate = (success / total * 100) if total > 0 else 0
        avg_parse_time = (
            sum(self.stats["parse_times"]) / len(self.stats["parse_times"])
            if self.stats["parse_times"] else 0
        )
        
        return {
            "total_parsed": total,
            "success_count": success,
            "error_count": self.stats["error_count"],
            "success_rate": round(success_rate, 2),
            "avg_parse_time": round(avg_parse_time, 2)
        }