"""大模型客户端统一接口"""

import asyncio
import json
import time
import hashlib
from abc import ABC, abstractmethod
from typing import Dict, Any, List, Optional, Union
from pathlib import Path
import aiohttp
from datetime import datetime, timedelta

from ..models.config import LLMConfig
from ..models.file_info import FileInfo
from ..models.directory_rule import DirectoryRule, AnalysisContext
from ..utils.logger import get_logger


class LLMClientBase(ABC):
    """大模型客户端基类"""
    
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.logger = get_logger()
    
    @abstractmethod
    async def analyze_files(self, files: List[FileInfo], 
                           context: AnalysisContext) -> List[DirectoryRule]:
        """分析文件并生成目录规则"""
        pass
    
    @abstractmethod
    async def complete(self, prompt: str) -> str:
        """通用文本补全"""
        pass
    
    @abstractmethod
    def is_available(self) -> bool:
        """检查服务是否可用"""
        pass


class OllamaClient(LLMClientBase):
    """Ollama本地模型客户端"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.endpoint = config.get('endpoint', 'http://localhost:11434')
        self.model = config.get('model', 'qwen2.5:7b')
        self.timeout = config.get('timeout', 30)
    
    def is_available(self) -> bool:
        """检查Ollama服务是否可用"""
        try:
            import aiohttp
            # 这里只能返回配置状态，实际可用性需要异步检查
            return self.config.get('endpoint') is not None
        except:
            return False
    
    async def complete(self, prompt: str) -> str:
        """文本补全"""
        payload = {
            "model": self.model,
            "prompt": prompt,
            "stream": False,
            "options": {
                "temperature": 0.1,
                "top_p": 0.9
            }
        }
        
        try:
            async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.timeout)) as session:
                async with session.post(
                    f"{self.endpoint}/api/generate",
                    json=payload
                ) as response:
                    response.raise_for_status()
                    result = await response.json()
                    return result.get('response', '')
        
        except Exception as e:
            self.logger.error(f"Ollama请求失败: {e}")
            raise
    
    async def analyze_files(self, files: List[FileInfo], 
                           context: AnalysisContext) -> List[DirectoryRule]:
        """分析文件"""
        from .prompt_engine import PromptEngine
        
        prompt_engine = PromptEngine()
        prompt = prompt_engine.build_analysis_prompt(files, context)
        
        start_time = time.time()
        try:
            response = await self.complete(prompt)
            response_time = time.time() - start_time
            
            # 解析JSON响应
            rules = self._parse_analysis_response(response, files)
            
            self.logger.log_llm_request(
                provider="ollama",
                model=self.model,
                prompt_length=len(prompt),
                response_time=response_time,
                success=True,
                file_count=len(files)
            )
            
            return rules
            
        except Exception as e:
            response_time = time.time() - start_time
            self.logger.log_llm_request(
                provider="ollama",
                model=self.model,
                prompt_length=len(prompt),
                response_time=response_time,
                success=False,
                error=str(e)
            )
            raise
    
    def _parse_analysis_response(self, response: str, files: List[FileInfo]) -> List[DirectoryRule]:
        """解析分析响应"""
        try:
            # 尝试解析JSON
            if response.strip().startswith('['):
                # 批量结果
                results = json.loads(response)
            else:
                # 单个结果，尝试提取JSON
                import re
                json_match = re.search(r'\{.*\}', response, re.DOTALL)
                if json_match:
                    result = json.loads(json_match.group())
                    results = [result] * len(files)
                else:
                    raise ValueError("无法解析响应中的JSON")
            
            rules = []
            for i, file_info in enumerate(files):
                result = results[i] if i < len(results) else results[0]
                
                rule = DirectoryRule(
                    category=result.get('category', '其他'),
                    subcategory=result.get('subcategory'),
                    suggested_path=Path(result.get('suggested_path', f'其他/{file_info.name}')),
                    confidence=result.get('confidence', 0.5),
                    keywords=result.get('keywords', []),
                    date_based=result.get('date_based', False),
                    reasoning=result.get('reasoning', ''),
                    alternative_paths=[Path(p) for p in result.get('alternative_paths', [])]
                )
                rules.append(rule)
            
            return rules
            
        except Exception as e:
            self.logger.error(f"解析Ollama响应失败: {e}, 响应内容: {response[:200]}...")
            # 返回默认规则
            return [
                DirectoryRule(
                    category='其他',
                    suggested_path=Path(f'其他/{file.name}'),
                    confidence=0.3,
                    reasoning='解析失败，使用默认分类'
                ) for file in files
            ]


class OpenAIClient(LLMClientBase):
    """OpenAI API客户端"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.api_key = config.get('api_key', '')
        self.model = config.get('model', 'gpt-4')
        self.base_url = config.get('base_url', 'https://api.openai.com/v1')
        self.timeout = config.get('timeout', 30)
    
    def is_available(self) -> bool:
        """检查API是否可用"""
        return bool(self.api_key)
    
    async def complete(self, prompt: str) -> str:
        """文本补全"""
        if not self.is_available():
            raise ValueError("OpenAI API密钥未配置")
        
        headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json'
        }
        
        payload = {
            "model": self.model,
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "temperature": 0.1,
            "max_tokens": 2000
        }
        
        try:
            async with httpx.AsyncClient(timeout=self.timeout) as client:
                response = await client.post(
                    f"{self.base_url}/chat/completions",
                    headers=headers,
                    json=payload
                )
                response.raise_for_status()
                result = response.json()
                return result['choices'][0]['message']['content']
        
        except Exception as e:
            self.logger.error(f"OpenAI请求失败: {e}")
            raise
    
    async def analyze_files(self, files: List[FileInfo], 
                           context: AnalysisContext) -> List[DirectoryRule]:
        """分析文件"""
        from .prompt_engine import PromptEngine
        
        prompt_engine = PromptEngine()
        prompt = prompt_engine.build_analysis_prompt(files, context)
        
        start_time = time.time()
        try:
            response = await self.complete(prompt)
            response_time = time.time() - start_time
            
            rules = self._parse_analysis_response(response, files)
            
            self.logger.log_llm_request(
                provider="openai",
                model=self.model,
                prompt_length=len(prompt),
                response_time=response_time,
                success=True,
                file_count=len(files)
            )
            
            return rules
            
        except Exception as e:
            response_time = time.time() - start_time
            self.logger.log_llm_request(
                provider="openai",
                model=self.model,
                prompt_length=len(prompt),
                response_time=response_time,
                success=False,
                error=str(e)
            )
            raise
    
    def _parse_analysis_response(self, response: str, files: List[FileInfo]) -> List[DirectoryRule]:
        """解析分析响应 - 复用Ollama的解析逻辑"""
        return OllamaClient._parse_analysis_response(self, response, files)


class LLMClient:
    """大模型客户端管理器"""
    
    def __init__(self, config: LLMConfig):
        self.config = config
        self.logger = get_logger()
        self._clients: Dict[str, LLMClientBase] = {}
        self.request_cache: Dict[str, Dict[str, Any]] = {}
        self._cache_ttl = config.cache_ttl
        
        self._init_clients()
    
    def _init_clients(self) -> None:
        """初始化客户端"""
        # Ollama客户端
        if self.config.local_enabled:
            self._clients['local'] = OllamaClient({
                'endpoint': self.config.local_endpoint,
                'model': self.config.local_model,
                'timeout': self.config.request_timeout
            })
        
        # OpenAI客户端
        if self.config.openai_enabled and self.config.openai_api_key:
            self._clients['openai'] = OpenAIClient({
                'api_key': self.config.openai_api_key,
                'model': self.config.openai_model,
                'base_url': self.config.openai_base_url,
                'timeout': self.config.request_timeout
            })
        
        # 可以继续添加Claude、智谱AI等客户端
    
    async def analyze_files(self, files: List[FileInfo], 
                           context: AnalysisContext) -> List[DirectoryRule]:
        """智能分析文件"""
        # 设置任务复杂度
        context.set_complexity(len(files), bool(context.user_description))
        
        # 检查缓存
        cache_key = self._generate_cache_key(files, context)
        cached_result = self._get_cached_result(cache_key)
        if cached_result:
            self.logger.info(f"使用缓存结果", cache_key=cache_key)
            return cached_result
        
        # 选择最佳客户端
        client = self._select_best_client(context)
        if not client:
            raise RuntimeError("没有可用的LLM客户端")
        
        try:
            # 执行分析
            rules = await self._analyze_with_retry(client, files, context)
            
            # 缓存结果
            self._cache_result(cache_key, rules)
            
            return rules
            
        except Exception as e:
            self.logger.error(f"文件分析失败: {e}")
            # 返回基于规则的降级结果
            return self._fallback_analysis(files, context)
    
    def _select_best_client(self, context: AnalysisContext) -> Optional[LLMClientBase]:
        """选择最佳客户端"""
        # 优先级策略
        if context.complexity == 'simple' and 'local' in self._clients:
            client = self._clients['local']
            if client.is_available():
                return client
        
        # 尝试云端客户端
        for provider in ['openai', 'claude', 'zhipu']:
            if provider in self._clients:
                client = self._clients[provider]
                if client.is_available():
                    return client
        
        # 最后尝试本地客户端
        if 'local' in self._clients:
            return self._clients['local']
        
        return None
    
    async def _analyze_with_retry(self, client: LLMClientBase, 
                                 files: List[FileInfo], 
                                 context: AnalysisContext) -> List[DirectoryRule]:
        """带重试的分析"""
        max_retries = self.config.max_retries
        
        for attempt in range(max_retries):
            try:
                return await client.analyze_files(files, context)
            except Exception as e:
                if attempt == max_retries - 1:
                    raise
                
                # 指数退避
                wait_time = 2 ** attempt
                self.logger.warning(f"分析失败，{wait_time}秒后重试: {e}")
                await asyncio.sleep(wait_time)
        
        raise RuntimeError("分析重试次数超限")
    
    def _generate_cache_key(self, files: List[FileInfo], 
                           context: AnalysisContext) -> str:
        """生成缓存键"""
        import hashlib
        
        # 组合文件名和上下文信息
        content = {
            'files': [f.name for f in files],
            'user_description': context.user_description,
            'existing_structure': context.existing_structure[:10],  # 限制长度
            'complexity': context.complexity
        }
        
        content_str = json.dumps(content, sort_keys=True)
        return hashlib.md5(content_str.encode()).hexdigest()
    
    def _get_cached_result(self, cache_key: str) -> Optional[List[DirectoryRule]]:
        """获取缓存结果"""
        if cache_key in self.request_cache:
            cached = self.request_cache[cache_key]
            if datetime.now() < cached['expires_at']:
                return cached['rules']
            else:
                del self.request_cache[cache_key]
        return None
    
    def _cache_result(self, cache_key: str, rules: List[DirectoryRule]) -> None:
        """缓存结果"""
        expires_at = datetime.now() + timedelta(seconds=self._cache_ttl)
        self.request_cache[cache_key] = {
            'rules': rules,
            'expires_at': expires_at
        }
    
    def _fallback_analysis(self, files: List[FileInfo], 
                          context: AnalysisContext) -> List[DirectoryRule]:
        """降级分析 - 基于规则的简单分类"""
        rules = []
        
        for file_info in files:
            category = file_info.get_category_by_extension()
            
            # 简单的路径生成
            if category == 'documents':
                suggested_path = Path(f"文档/{file_info.name}")
            elif category == 'images':
                suggested_path = Path(f"图片/{file_info.name}")
            elif category == 'videos':
                suggested_path = Path(f"视频/{file_info.name}")
            else:
                suggested_path = Path(f"其他/{category}/{file_info.name}")
            
            rule = DirectoryRule(
                category=category,
                suggested_path=suggested_path,
                confidence=0.6,
                keywords=[category],
                reasoning="基于文件扩展名的降级分类"
            )
            rules.append(rule)
        
        return rules
    
    def _select_provider(self, complexity: str = "simple") -> str:
        """选择提供商（测试用）"""
        if complexity == "simple" and self.config.local_enabled:
            return "local"
        elif self.config.openai_enabled:
            return "openai"
        else:
            return self.config.default_provider
    
    def _build_prompt(self, files: List[FileInfo], context: AnalysisContext) -> str:
        """构建提示（测试用）"""
        prompt_parts = []
        
        prompt_parts.append("请分析以下文件并建议归档路径：")
        prompt_parts.append("")
        
        for i, file_info in enumerate(files, 1):
            prompt_parts.append(f"{i}. {file_info.name}")
        
        if context.user_description:
            prompt_parts.append(f"用户描述: {context.user_description}")
        
        if context.existing_structure:
            prompt_parts.append("现有结构：")
            for structure in context.existing_structure:
                prompt_parts.append(f"- {structure}")
        
        return "\n".join(prompt_parts)
    
    def _parse_llm_response(self, response: str, num_files: int) -> List[DirectoryRule]:
        """解析LLM响应（测试用）"""
        import re
        
        rules = []
        
        # 尝试解析结构化响应
        file_pattern = r'(\d+)\.\s*([^\n]+)'
        path_pattern = r'建议路径：([^\n]+)'
        category_pattern = r'分类：([^\n]+)'
        confidence_pattern = r'置信度：([0-9.]+)'
        reason_pattern = r'原因：([^\n]+)'
        
        file_matches = re.findall(file_pattern, response)
        
        for i, (file_num, file_name) in enumerate(file_matches):
            # 查找对应的分析信息
            start_pos = response.find(f"{file_num}. {file_name}")
            if i + 1 < len(file_matches):
                end_pos = response.find(f"{file_matches[i+1][0]}. {file_matches[i+1][1]}")
            else:
                end_pos = len(response)
            
            section = response[start_pos:end_pos]
            
            # 提取信息
            path_match = re.search(path_pattern, section)
            category_match = re.search(category_pattern, section)
            confidence_match = re.search(confidence_pattern, section)
            reason_match = re.search(reason_pattern, section)
            
            suggested_path = Path(path_match.group(1).strip()) if path_match else Path("其他")
            category = category_match.group(1).strip() if category_match else "其他"
            confidence = float(confidence_match.group(1)) if confidence_match else 0.5
            reasoning = reason_match.group(1).strip() if reason_match else "LLM分析结果"
            
            rule = DirectoryRule(
                category=category,
                suggested_path=suggested_path,
                confidence=confidence,
                keywords=[],
                reasoning=reasoning
            )
            rules.append(rule)
        
        # 如果解析失败，返回默认规则
        while len(rules) < num_files:
            rule = DirectoryRule(
                category="其他",
                suggested_path=Path("其他"),
                confidence=0.1,
                keywords=[],
                reasoning="解析失败，使用默认规则"
            )
            rules.append(rule)
        
        return rules[:num_files]
    
    def _get_ollama_client(self) -> Optional[OllamaClient]:
        """获取Ollama客户端（测试用）"""
        return self._clients.get('local')
    
    def _get_openai_client(self) -> Optional[OpenAIClient]:
        """获取OpenAI客户端（测试用）"""
        return self._clients.get('openai')