"""
性能优化模块 - 提升视频生成速度
主要优化点：
1. 智能任务轮询（自适应间隔）
2. 并行语音生成
3. 批量LLM调用优化
4. 进度缓存机制
"""

import asyncio
import time
import os
from pathlib import Path
from typing import List, Dict, Optional
from dataclasses import dataclass
import aiohttp


@dataclass
class TaskStatus:
    """任务状态"""
    task_id: str
    status: str  # PENDING, RUNNING, SUCCEEDED, FAILED
    start_time: float
    check_count: int = 0
    last_check: float = 0


class AdaptiveTaskPoller:
    """自适应任务轮询器 - 根据任务状态动态调整轮询间隔"""
    
    def __init__(self, 
                 initial_interval: float = 1.0,
                 min_interval: float = 0.5,
                 max_interval: float = 5.0,
                 backoff_factor: float = 1.5):
        """
        Args:
            initial_interval: 初始轮询间隔（秒）
            min_interval: 最小间隔
            max_interval: 最大间隔
            backoff_factor: 退避因子（任务长时间未完成时增加间隔）
        """
        self.initial_interval = initial_interval
        self.min_interval = min_interval
        self.max_interval = max_interval
        self.backoff_factor = backoff_factor
        
    def get_interval(self, elapsed_time: float, check_count: int) -> float:
        """
        根据已等待时间和检查次数计算轮询间隔
        
        策略：
        - 前10秒：快速轮询（0.5-1秒）
        - 10-30秒：正常轮询（1-2秒）
        - 30秒以上：慢速轮询（2-5秒），避免浪费请求
        """
        if elapsed_time < 10:
            # 早期快速检查
            return self.min_interval
        elif elapsed_time < 30:
            # 中期正常检查
            return self.initial_interval
        else:
            # 后期慢速检查（任务可能较慢）
            interval = min(
                self.initial_interval * (self.backoff_factor ** (check_count // 10)),
                self.max_interval
            )
            return interval
    
    async def poll_task(self,
                       session: aiohttp.ClientSession,
                       task_id: str,
                       api_key: str,
                       max_wait: int = 180) -> Optional[str]:
        """轮询任务直到完成（优化版）"""
        url = f"https://dashscope.aliyuncs.com/api/v1/tasks/{task_id}"
        headers = {"Authorization": f"Bearer {api_key}"}
        
        start_time = time.time()
        check_count = 0
        last_status = None
        
        while time.time() - start_time < max_wait:
            check_count += 1
            elapsed = time.time() - start_time
            
            try:
                async with session.get(url, headers=headers, timeout=10) as resp:
                    if resp.status == 200:
                        result = await resp.json()
                        output = result.get('output', {})
                        status = output.get('task_status', 'UNKNOWN')
                        
                        # 状态变化时打印
                        if status != last_status:
                            print(f"   [{int(elapsed)}s] 状态: {status}")
                            last_status = status
                        
                        if status == 'SUCCEEDED':
                            results = output.get('results', [])
                            if results:
                                return results[0].get('url')
                            raise Exception("任务成功但无结果")
                        elif status == 'FAILED':
                            error_msg = output.get('message', 'Unknown error')
                            raise Exception(f"任务失败: {error_msg}")
                        
                        # 计算下次轮询间隔
                        interval = self.get_interval(elapsed, check_count)
                        await asyncio.sleep(interval)
                    else:
                        await asyncio.sleep(2)
            except asyncio.TimeoutError:
                await asyncio.sleep(2)
            except Exception as e:
                if "任务失败" in str(e) or "任务成功" in str(e):
                    raise
                await asyncio.sleep(2)
        
        raise TimeoutError(f"任务超时: 等待 {int(time.time() - start_time)} 秒")


class ParallelSpeechGenerator:
    """并行语音生成器"""
    
    def __init__(self, 
                 api_key: str,
                 max_concurrent: int = 3,
                 request_interval: float = 0.3):
        """
        Args:
            api_key: DashScope API Key
            max_concurrent: 最大并发数（语音生成API限制较宽松）
            request_interval: 请求间隔（秒）
        """
        self.api_key = api_key
        self.max_concurrent = max_concurrent
        self.request_interval = request_interval
        
    async def generate_speech_chunk(self,
                                   session: aiohttp.ClientSession,
                                   semaphore: asyncio.Semaphore,
                                   synthesizer,
                                   chunk_text: str,
                                   save_file: Path,
                                   voice: str = "longxiaochun_v2") -> bool:
        """生成单个语音片段"""
        async with semaphore:
            try:
                # 使用同步SDK（DashScope SDK不支持异步）
                # 在异步环境中运行同步代码
                loop = asyncio.get_event_loop()
                await loop.run_in_executor(
                    None,
                    lambda: synthesizer.call(
                        save_file=save_file,
                        transcript=chunk_text,
                        voice=voice
                    )
                )
                
                # 验证文件
                if save_file.exists() and os.path.getsize(save_file) >= 1000:
                    await asyncio.sleep(self.request_interval)
                    return True
                else:
                    if save_file.exists():
                        save_file.unlink()
                    return False
            except Exception as e:
                if save_file.exists() and os.path.getsize(save_file) < 1000:
                    save_file.unlink()
                print(f"   ⚠️  语音生成失败: {str(e)[:60]}")
                return False
    
    async def generate_page_speech(self,
                                   synthesizer,
                                   page_text: str,
                                   page_idx: int,
                                   save_path: Path,
                                   max_chars: int = 400,
                                   voice: str = "longxiaochun_v2",
                                   max_retries: int = 3) -> bool:
        """为单个页面生成语音（支持分块）"""
        from mm_story_agent.modality_agents.speech_agent import split_text_for_tts
        
        chunks = split_text_for_tts(page_text, max_chars=max_chars)
        total_chunks = len(chunks)
        
        if total_chunks == 0:
            return False
        
        # 准备所有文件路径
        files_to_generate = []
        for chunk_idx, chunk_text in enumerate(chunks, 1):
            if total_chunks == 1:
                target_file = save_path / f"p{page_idx}.wav"
            else:
                target_file = save_path / f"p{page_idx}_{chunk_idx:03d}.wav"
            files_to_generate.append((target_file, chunk_text))
        
        # 并行生成所有片段（使用独立的semaphore控制页面内并发）
        page_semaphore = asyncio.Semaphore(min(self.max_concurrent, len(files_to_generate)))
        async with aiohttp.ClientSession() as session:
            tasks = [
                self.generate_speech_chunk(
                    session, page_semaphore, synthesizer, chunk_text, target_file, voice
                )
                for target_file, chunk_text in files_to_generate
            ]
            results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 检查结果
        success_count = sum(1 for r in results if r is True)
        return success_count == len(files_to_generate)
    
    async def generate_batch(self,
                            pages: List[str],
                            save_path: Path,
                            voice: str = "longxiaochun_v2",
                            max_chars: int = 400,
                            max_retries: int = 3) -> Dict[int, bool]:
        """批量并行生成语音"""
        from mm_story_agent.modality_agents.speech_agent import CosyVoiceSynthesizer, files_are_valid
        
        synthesizer = CosyVoiceSynthesizer(api_key=self.api_key)
        
        print(f"\n{'='*60}")
        print(f"🎙️ 并行语音生成（{self.max_concurrent}个并发）")
        print(f"{'='*60}\n")
        
        # 创建任务列表
        task_list = []
        for idx, page_text in enumerate(pages, 1):
            if not page_text.strip():
                continue
            
            # 检查是否已存在
            single_file = save_path / f"p{idx}.wav"
            chunk_files = sorted(save_path.glob(f"p{idx}_*.wav"))
            
            if files_are_valid([single_file]) or files_are_valid(chunk_files):
                print(f"  ℹ️  页面 {idx}: 已存在，跳过")
                continue
            
            task = self.generate_page_speech(
                synthesizer, page_text, idx, save_path, max_chars, voice, max_retries
            )
            task_list.append((idx, task))
        
        if not task_list:
            print("  所有页面语音已存在，跳过生成")
            return {}
        
        # 并行执行（限制总并发数）
        page_semaphore = asyncio.Semaphore(self.max_concurrent)
        
        async def run_with_semaphore(task, idx):
            async with page_semaphore:
                return idx, await task
        
        task_results = await asyncio.gather(
            *[run_with_semaphore(task, idx) for idx, task in task_list],
            return_exceptions=True
        )
        
        # 处理结果
        success_dict = {}
        for result in task_results:
            if isinstance(result, Exception):
                print(f"  ❌ 任务异常: {result}")
            else:
                idx, success = result
                success_dict[idx] = success
                status = "✅" if success else "❌"
                print(f"  {status} 页面 {idx}: {'成功' if success else '失败'}")
        
        return success_dict


class ProgressCache:
    """进度缓存 - 跳过已完成的步骤"""
    
    def __init__(self, cache_dir: Path):
        self.cache_dir = cache_dir
        self.cache_dir.mkdir(parents=True, exist_ok=True)
    
    def get_cache_path(self, stage: str) -> Path:
        """获取缓存文件路径"""
        return self.cache_dir / f"{stage}.cache"
    
    def save(self, stage: str, data: dict):
        """保存进度"""
        import json
        cache_file = self.get_cache_path(stage)
        with open(cache_file, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
    
    def load(self, stage: str) -> Optional[dict]:
        """加载进度"""
        import json
        cache_file = self.get_cache_path(stage)
        if cache_file.exists():
            try:
                with open(cache_file, 'r', encoding='utf-8') as f:
                    return json.load(f)
            except:
                return None
        return None
    
    def clear(self, stage: str = None):
        """清除缓存"""
        if stage:
            cache_file = self.get_cache_path(stage)
            if cache_file.exists():
                cache_file.unlink()
        else:
            # 清除所有缓存
            for cache_file in self.cache_dir.glob("*.cache"):
                cache_file.unlink()

