import asyncio
import os
import tempfile
from typing import Dict, Any, Optional, List
from urllib.parse import urlparse, parse_qs
import yt_dlp
from datetime import datetime

from config.logging_config import ProjectLoggers
from utils.file_helper import FileHelper

logger = ProjectLoggers.get_service_logger()

class TwitterDownloaderService:
    """Twitter/X视频下载服务"""
    
    def __init__(self):
        self.temp_dir = tempfile.gettempdir()
        
    def _get_tweet_id_from_url(self, url: str) -> Optional[str]:
        """从Twitter URL中提取推文ID"""
        try:
            parsed_url = urlparse(url)
            
            # 处理不同类型的Twitter URL
            if any(domain in parsed_url.netloc for domain in ['twitter.com', 'x.com', 'mobile.twitter.com']):
                path_parts = parsed_url.path.split('/')
                
                if '/status/' in parsed_url.path:
                    # 标准推文: https://twitter.com/username/status/TWEET_ID
                    # 或 X.com: https://x.com/username/status/TWEET_ID
                    status_index = path_parts.index('status')
                    if status_index + 1 < len(path_parts):
                        tweet_id = path_parts[status_index + 1]
                        # 移除可能的查询参数
                        return tweet_id.split('?')[0]
                elif '/i/web/status/' in parsed_url.path:
                    # 简化URL: https://twitter.com/i/web/status/TWEET_ID
                    web_index = path_parts.index('web')
                    if web_index + 2 < len(path_parts) and path_parts[web_index + 1] == 'status':
                        tweet_id = path_parts[web_index + 2]
                        return tweet_id.split('?')[0]
                        
            return None
        except Exception as e:
            logger.error(f"解析Twitter URL失败: {str(e)}")
            return None
    
    def _is_twitter_url(self, url: str) -> bool:
        """检查是否为有效的Twitter URL"""
        try:
            parsed_url = urlparse(url)
            twitter_domains = ['twitter.com', 'x.com', 'mobile.twitter.com', 'www.twitter.com', 'www.x.com']
            
            if any(domain in parsed_url.netloc for domain in twitter_domains):
                # 检查是否包含推文相关路径
                tweet_patterns = ['/status/', '/i/web/status/']
                return any(pattern in parsed_url.path for pattern in tweet_patterns)
            
            return False
        except:
            return False
    
    async def get_tweet_info(self, twitter_url: str) -> Dict[str, Any]:
        """获取Twitter推文信息"""
        try:
            if not self._is_twitter_url(twitter_url):
                return {
                    "success": False,
                    "error": "不是有效的Twitter链接"
                }
            
            # 配置yt-dlp选项
            ydl_opts = {
                'quiet': True,
                'no_warnings': True,
                'extractaudio': False,
                'format': 'best',
                # Twitter需要的特殊配置
                'cookiefile': None,  # 可以添加cookie文件路径
                'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                # Twitter特定选项
                'extractor_args': {
                    'twitter': {
                        'api': ['syndication'],  # 使用syndication API
                    }
                }
            }
            
            # 异步执行推文信息获取
            def _extract_info():
                with yt_dlp.YoutubeDL(ydl_opts) as ydl:
                    return ydl.extract_info(twitter_url, download=False)
            
            loop = asyncio.get_event_loop()
            info = await loop.run_in_executor(None, _extract_info)
            
            if not info:
                return {
                    "success": False,
                    "error": "无法获取推文信息，可能是私有推文或需要登录"
                }
            
            # 提取有用的信息
            tweet_info = {
                "tweet_id": info.get('id'),
                "title": info.get('title') or info.get('description', '').split('\n')[0][:200],
                "description": info.get('description', ''),
                "uploader": info.get('uploader'),
                "uploader_id": info.get('uploader_id'),
                "uploader_url": info.get('uploader_url'),
                "duration": info.get('duration'),
                "view_count": info.get('view_count'),
                "like_count": info.get('like_count'),
                "repost_count": info.get('repost_count'),
                "comment_count": info.get('comment_count'),
                "upload_date": info.get('upload_date'),
                "timestamp": info.get('timestamp'),
                "thumbnail": info.get('thumbnail'),
                "webpage_url": info.get('webpage_url'),
                "is_video": True if info.get('vcodec') != 'none' else False,
                "width": info.get('width'),
                "height": info.get('height'),
                "formats": []
            }
            
            # 获取可用格式
            formats = info.get('formats', [])
            for fmt in formats:
                if fmt.get('vcodec') != 'none':  # 只要包含视频的格式
                    format_info = {
                        "format_id": fmt.get('format_id'),
                        "ext": fmt.get('ext'),
                        "resolution": fmt.get('resolution') or f"{fmt.get('width', 'unknown')}x{fmt.get('height', 'unknown')}",
                        "width": fmt.get('width'),
                        "height": fmt.get('height'),
                        "filesize": fmt.get('filesize'),
                        "filesize_approx": fmt.get('filesize_approx'),
                        "url": fmt.get('url'),
                        "quality": fmt.get('quality'),
                        "format_note": fmt.get('format_note'),
                        "fps": fmt.get('fps'),
                        "vcodec": fmt.get('vcodec'),
                        "acodec": fmt.get('acodec'),
                        "tbr": fmt.get('tbr'),  # 总比特率
                        "vbr": fmt.get('vbr'),  # 视频比特率
                        "abr": fmt.get('abr')   # 音频比特率
                    }
                    tweet_info["formats"].append(format_info)
            
            # 按质量排序
            tweet_info["formats"] = sorted(
                tweet_info["formats"], 
                key=lambda x: (x.get('quality') or 0), 
                reverse=True
            )
            
            logger.info(f"成功获取Twitter推文信息: {tweet_info['title']}")
            
            return {
                "success": True,
                "data": tweet_info
            }
            
        except Exception as e:
            logger.error(f"获取Twitter推文信息失败: {str(e)}")
            return {
                "success": False,
                "error": f"获取推文信息失败: {str(e)}"
            }
    
    async def get_download_urls(self, twitter_url: str, quality: str = "best") -> Dict[str, Any]:
        """获取Twitter视频下载链接"""
        try:
            if not self._is_twitter_url(twitter_url):
                return {
                    "success": False,
                    "error": "不是有效的Twitter链接"
                }
            
            # 根据质量设置格式选择器
            quality_map = {
                "1080p": "best[height<=1080]",
                "720p": "best[height<=720]", 
                "480p": "best[height<=480]",
                "360p": "best[height<=360]",
                "240p": "best[height<=240]",
                "best": "best",
                "worst": "worst"
            }
            
            format_selector = quality_map.get(quality, "best")
            
            ydl_opts = {
                'quiet': True,
                'no_warnings': True,
                'format': format_selector,
                'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'extractor_args': {
                    'twitter': {
                        'api': ['syndication'],
                    }
                }
            }
            
            def _extract_info():
                with yt_dlp.YoutubeDL(ydl_opts) as ydl:
                    return ydl.extract_info(twitter_url, download=False)
            
            loop = asyncio.get_event_loop()
            info = await loop.run_in_executor(None, _extract_info)
            
            if not info:
                return {
                    "success": False,
                    "error": "无法获取下载链接，可能是私有推文或需要登录"
                }
            
            # 获取最佳格式的下载链接
            download_url = info.get('url')
            if not download_url:
                return {
                    "success": False,
                    "error": "无法获取有效的下载链接"
                }
            
            result = {
                "tweet_id": info.get('id'),
                "title": info.get('title') or info.get('description', '').split('\n')[0][:200],
                "description": info.get('description', ''),
                "uploader": info.get('uploader'),
                "uploader_id": info.get('uploader_id'),
                "uploader_url": info.get('uploader_url'),
                "download_url": download_url,
                "format": info.get('ext'),
                "resolution": info.get('resolution') or f"{info.get('width', 'unknown')}x{info.get('height', 'unknown')}",
                "width": info.get('width'),
                "height": info.get('height'),
                "filesize": info.get('filesize'),
                "filesize_approx": info.get('filesize_approx'),
                "duration": info.get('duration'),
                "thumbnail": info.get('thumbnail'),
                "fps": info.get('fps'),
                "vcodec": info.get('vcodec'),
                "acodec": info.get('acodec'),
                "tbr": info.get('tbr'),
                "vbr": info.get('vbr'),
                "abr": info.get('abr'),
                "webpage_url": info.get('webpage_url'),
                "like_count": info.get('like_count'),
                "view_count": info.get('view_count'),
                "repost_count": info.get('repost_count'),
                "comment_count": info.get('comment_count'),
                "timestamp": info.get('timestamp'),
                "is_video": True if info.get('vcodec') != 'none' else False
            }
            
            logger.info(f"成功获取Twitter下载链接: {result['title']}")
            
            return {
                "success": True,
                "data": result
            }
            
        except Exception as e:
            logger.error(f"获取Twitter下载链接失败: {str(e)}")
            return {
                "success": False,
                "error": f"获取下载链接失败: {str(e)}"
            }
    
    async def get_multiple_quality_urls(self, twitter_url: str) -> Dict[str, Any]:
        """获取多种质量的下载链接"""
        try:
            if not self._is_twitter_url(twitter_url):
                return {
                    "success": False,
                    "error": "不是有效的Twitter链接"
                }
            
            # 首先获取所有格式信息
            info_result = await self.get_tweet_info(twitter_url)
            if not info_result["success"]:
                return info_result
            
            tweet_info = info_result["data"]
            formats = tweet_info.get("formats", [])
            
            if not formats:
                return {
                    "success": False,
                    "error": "没有找到可用的视频格式"
                }
            
            # 组织不同质量的下载链接
            quality_urls = {}
            
            # 按分辨率分组
            resolution_groups = {}
            for fmt in formats:
                resolution = fmt.get('resolution', 'unknown')
                if resolution != 'unknown' and fmt.get('url'):
                    if resolution not in resolution_groups:
                        resolution_groups[resolution] = []
                    resolution_groups[resolution].append(fmt)
            
            # 选择每个分辨率的最佳格式
            for resolution, res_formats in resolution_groups.items():
                # 按质量分数排序，选择最好的
                best_format = sorted(res_formats, key=lambda x: x.get('quality', 0), reverse=True)[0]
                
                quality_urls[resolution] = {
                    "url": best_format.get('url'),
                    "format": best_format.get('ext'),
                    "width": best_format.get('width'),
                    "height": best_format.get('height'),
                    "filesize": best_format.get('filesize'),
                    "filesize_approx": best_format.get('filesize_approx'),
                    "fps": best_format.get('fps'),
                    "vcodec": best_format.get('vcodec'),
                    "acodec": best_format.get('acodec'),
                    "tbr": best_format.get('tbr'),
                    "vbr": best_format.get('vbr'),
                    "abr": best_format.get('abr')
                }
            
            result = {
                "tweet_id": tweet_info.get('tweet_id'),
                "title": tweet_info.get('title'),
                "description": tweet_info.get('description'),
                "uploader": tweet_info.get('uploader'),
                "uploader_id": tweet_info.get('uploader_id'),
                "uploader_url": tweet_info.get('uploader_url'),
                "duration": tweet_info.get('duration'),
                "thumbnail": tweet_info.get('thumbnail'),
                "like_count": tweet_info.get('like_count'),
                "view_count": tweet_info.get('view_count'),
                "repost_count": tweet_info.get('repost_count'),
                "comment_count": tweet_info.get('comment_count'),
                "timestamp": tweet_info.get('timestamp'),
                "quality_urls": quality_urls,
                "available_qualities": list(quality_urls.keys()),
                "is_video": tweet_info.get('is_video')
            }
            
            logger.info(f"成功获取Twitter多质量下载链接: {result['title']}")
            
            return {
                "success": True,
                "data": result
            }
            
        except Exception as e:
            logger.error(f"获取Twitter多质量下载链接失败: {str(e)}")
            return {
                "success": False,
                "error": f"获取多质量下载链接失败: {str(e)}"
            } 