"""
AI模型接口
提供统一的AI模型调用接口，支持本地模型和云端API
"""

import logging
from abc import ABC, abstractmethod
from typing import Dict, List, Any, Optional, Union
import asyncio
import threading
from concurrent.futures import ThreadPoolExecutor
import time

from config.ai_config import AIModelConfig, AIModelType, ai_config_manager


class AIModelInterface(ABC):
    """AI模型抽象接口"""
    
    def __init__(self, config: AIModelConfig):
        self.config = config
        self.logger = logging.getLogger(__name__)
        self._model = None
        self._tokenizer = None
        self._is_loaded = False
    
    @abstractmethod
    def load_model(self) -> bool:
        """加载模型"""
        pass
    
    @abstractmethod
    def unload_model(self) -> bool:
        """卸载模型"""
        pass
    
    @abstractmethod
    def generate_text(self, prompt: str, **kwargs) -> Optional[str]:
        """生成文本"""
        pass
    
    @abstractmethod
    def batch_generate(self, prompts: List[str], **kwargs) -> List[Optional[str]]:
        """批量生成文本"""
        pass
    
    @property
    def is_loaded(self) -> bool:
        """检查模型是否已加载"""
        return self._is_loaded


class OpenAIModel(AIModelInterface):
    """OpenAI模型接口"""
    
    def __init__(self, config: AIModelConfig):
        super().__init__(config)
        self.client = None
    
    def load_model(self) -> bool:
        """加载OpenAI客户端"""
        try:
            import openai
            
            openai.api_key = self.config.api_key
            if self.config.api_base:
                openai.api_base = self.config.api_base
            if self.config.api_version:
                openai.api_version = self.config.api_version
            
            self.client = openai
            self._is_loaded = True
            self.logger.info("OpenAI客户端加载成功")
            return True
            
        except Exception as e:
            self.logger.error(f"OpenAI客户端加载失败: {str(e)}")
            return False
    
    def unload_model(self) -> bool:
        """卸载模型"""
        self.client = None
        self._is_loaded = False
        return True
    
    def generate_text(self, prompt: str, **kwargs) -> Optional[str]:
        """生成文本"""
        if not self._is_loaded:
            if not self.load_model():
                return None
        
        try:
            response = self.client.ChatCompletion.create(
                model=self.config.model_name,
                messages=[{"role": "user", "content": prompt}],
                max_tokens=kwargs.get("max_tokens", self.config.max_tokens),
                temperature=kwargs.get("temperature", self.config.temperature),
                timeout=self.config.timeout
            )
            
            return response.choices[0].message.content.strip()
            
        except Exception as e:
            self.logger.error(f"OpenAI文本生成失败: {str(e)}")
            return None
    
    def batch_generate(self, prompts: List[str], **kwargs) -> List[Optional[str]]:
        """批量生成文本"""
        results = []
        for prompt in prompts:
            result = self.generate_text(prompt, **kwargs)
            results.append(result)
            # 添加延迟避免API限制
            time.sleep(0.1)
        return results


class ClaudeModel(AIModelInterface):
    """Claude模型接口"""
    
    def __init__(self, config: AIModelConfig):
        super().__init__(config)
        self.client = None
    
    def load_model(self) -> bool:
        """加载Claude客户端"""
        try:
            import anthropic
            
            self.client = anthropic.Anthropic(
                api_key=self.config.api_key,
                base_url=self.config.api_base
            )
            self._is_loaded = True
            self.logger.info("Claude客户端加载成功")
            return True
            
        except Exception as e:
            self.logger.error(f"Claude客户端加载失败: {str(e)}")
            return False
    
    def unload_model(self) -> bool:
        """卸载模型"""
        self.client = None
        self._is_loaded = False
        return True
    
    def generate_text(self, prompt: str, **kwargs) -> Optional[str]:
        """生成文本"""
        if not self._is_loaded:
            if not self.load_model():
                return None
        
        try:
            response = self.client.messages.create(
                model=self.config.model_name,
                messages=[{"role": "user", "content": prompt}],
                max_tokens=kwargs.get("max_tokens", self.config.max_tokens),
                temperature=kwargs.get("temperature", self.config.temperature)
            )
            
            return response.content[0].text.strip()
            
        except Exception as e:
            self.logger.error(f"Claude文本生成失败: {str(e)}")
            return None
    
    def batch_generate(self, prompts: List[str], **kwargs) -> List[Optional[str]]:
        """批量生成文本"""
        results = []
        for prompt in prompts:
            result = self.generate_text(prompt, **kwargs)
            results.append(result)
            # 添加延迟避免API限制
            time.sleep(0.2)
        return results


class LocalLLMModel(AIModelInterface):
    """本地LLM模型接口"""
    
    def __init__(self, config: AIModelConfig):
        super().__init__(config)
        self.model = None
        self.tokenizer = None
        self.device = None
    
    def load_model(self) -> bool:
        """加载本地模型"""
        try:
            from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM
            import torch
            
            # 确定设备
            if self.config.device == "auto":
                self.device = "cuda" if torch.cuda.is_available() else "cpu"
            else:
                self.device = self.config.device
            
            self.logger.info(f"正在加载本地模型 {self.config.model_name} 到设备 {self.device}")
            
            # 加载tokenizer
            if self.config.local_model_path:
                self.tokenizer = AutoTokenizer.from_pretrained(
                    self.config.local_model_path,
                    trust_remote_code=self.config.trust_remote_code
                )
            else:
                self.tokenizer = AutoTokenizer.from_pretrained(
                    self.config.model_name,
                    trust_remote_code=self.config.trust_remote_code
                )
            
            # 设置模型参数
            model_kwargs = {
                "trust_remote_code": self.config.trust_remote_code,
                "device_map": "auto" if self.device == "cuda" else None,
                "torch_dtype": getattr(torch, self.config.torch_dtype) if self.config.torch_dtype != "auto" else "auto"
            }
            
            if self.config.load_in_8bit:
                model_kwargs["load_in_8bit"] = True
            elif self.config.load_in_4bit:
                model_kwargs["load_in_4bit"] = True
            
            # 加载模型
            if self.config.local_model_path:
                self.model = AutoModelForCausalLM.from_pretrained(
                    self.config.local_model_path,
                    **model_kwargs
                )
            else:
                self.model = AutoModelForCausalLM.from_pretrained(
                    self.config.model_name,
                    **model_kwargs
                )
            
            if not model_kwargs.get("device_map") and self.device == "cuda":
                self.model = self.model.to(self.device)
            
            self.model.eval()
            self._is_loaded = True
            self.logger.info("本地模型加载成功")
            return True
            
        except Exception as e:
            self.logger.error(f"本地模型加载失败: {str(e)}")
            return False
    
    def unload_model(self) -> bool:
        """卸载模型"""
        try:
            if self.model:
                del self.model
            if self.tokenizer:
                del self.tokenizer
            
            # 清理GPU内存
            import torch
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            self.model = None
            self.tokenizer = None
            self._is_loaded = False
            self.logger.info("本地模型卸载成功")
            return True
            
        except Exception as e:
            self.logger.error(f"本地模型卸载失败: {str(e)}")
            return False
    
    def generate_text(self, prompt: str, **kwargs) -> Optional[str]:
        """生成文本"""
        if not self._is_loaded:
            if not self.load_model():
                return None
        
        try:
            import torch
            
            # 编码输入
            inputs = self.tokenizer.encode(prompt, return_tensors="pt")
            if self.device == "cuda":
                inputs = inputs.to(self.device)
            
            # 生成参数
            generate_kwargs = {
                "max_length": len(inputs[0]) + kwargs.get("max_tokens", self.config.max_tokens),
                "temperature": kwargs.get("temperature", self.config.temperature),
                "do_sample": True,
                "pad_token_id": self.tokenizer.eos_token_id,
                "eos_token_id": self.tokenizer.eos_token_id
            }
            
            # 生成文本
            with torch.no_grad():
                outputs = self.model.generate(inputs, **generate_kwargs)
            
            # 解码输出
            generated = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            
            # 移除输入部分，只返回生成的部分
            response = generated[len(prompt):].strip()
            return response
            
        except Exception as e:
            self.logger.error(f"本地模型文本生成失败: {str(e)}")
            return None
    
    def batch_generate(self, prompts: List[str], **kwargs) -> List[Optional[str]]:
        """批量生成文本"""
        results = []
        for prompt in prompts:
            result = self.generate_text(prompt, **kwargs)
            results.append(result)
        return results


class OllamaModel(AIModelInterface):
    """Ollama模型接口"""
    
    def __init__(self, config: AIModelConfig):
        super().__init__(config)
        self.api_base = config.api_base or "http://localhost:11434"
    
    def load_model(self) -> bool:
        """检查Ollama服务连接"""
        try:
            import requests
            
            # 检查Ollama服务是否运行
            response = requests.get(f"{self.api_base}/api/tags", timeout=5)
            if response.status_code == 200:
                models_data = response.json()
                available_models = [model['name'] for model in models_data.get('models', [])]
                
                # 检查指定的模型是否可用
                if self.config.model_name in available_models:
                    self._is_loaded = True
                    self.logger.info(f"Ollama模型 {self.config.model_name} 连接成功")
                    return True
                else:
                    self.logger.error(f"Ollama模型 {self.config.model_name} 不可用。可用模型: {available_models}")
                    return False
            else:
                self.logger.error(f"无法连接到Ollama服务: {response.status_code}")
                return False
                
        except Exception as e:
            self.logger.error(f"Ollama服务连接失败: {str(e)}")
            return False
    
    def unload_model(self) -> bool:
        """卸载模型"""
        self._is_loaded = False
        return True
    
    def generate_text(self, prompt: str, **kwargs) -> Optional[str]:
        """生成文本"""
        if not self._is_loaded:
            if not self.load_model():
                return None
        
        try:
            import requests
            
            # 构建请求数据
            data = {
                "model": self.config.model_name,
                "prompt": prompt,
                "stream": False,
                "options": {
                    "temperature": kwargs.get("temperature", self.config.temperature),
                    "num_predict": kwargs.get("max_tokens", self.config.max_tokens)
                }
            }
            
            # 发送请求
            response = requests.post(
                f"{self.api_base}/api/generate",
                json=data,
                timeout=self.config.timeout
            )
            
            if response.status_code == 200:
                result = response.json()
                return result.get("response", "").strip()
            else:
                self.logger.error(f"Ollama API请求失败: {response.status_code} - {response.text}")
                return None
                
        except Exception as e:
            self.logger.error(f"Ollama文本生成失败: {str(e)}")
            return None
    
    def batch_generate(self, prompts: List[str], **kwargs) -> List[Optional[str]]:
        """批量生成文本"""
        results = []
        for prompt in prompts:
            result = self.generate_text(prompt, **kwargs)
            results.append(result)
            # 添加小延迟避免过载
            import time
            time.sleep(0.1)
        return results


class ModelFactory:
    """模型工厂类"""
    
    @staticmethod
    def create_model(config: AIModelConfig) -> Optional[AIModelInterface]:
        """根据配置创建模型实例"""
        try:
            if config.model_type == AIModelType.OPENAI:
                return OpenAIModel(config)
            elif config.model_type == AIModelType.CLAUDE:
                return ClaudeModel(config)
            elif config.model_type == AIModelType.LOCAL_LLM:
                return LocalLLMModel(config)
            elif config.model_type == AIModelType.OLLAMA:
                return OllamaModel(config)
            # 可以继续添加其他模型类型
            else:
                logging.error(f"不支持的模型类型: {config.model_type}")
                return None
                
        except Exception as e:
            logging.error(f"创建模型失败: {str(e)}")
            return None


class AIModelManager:
    """AI模型管理器"""
    
    def __init__(self):
        self.models: Dict[str, AIModelInterface] = {}
        self.active_model: Optional[str] = None
        self.lock = threading.RLock()
        self.logger = logging.getLogger(__name__)
    
    def load_model(self, config_name: str) -> bool:
        """加载模型"""
        config = ai_config_manager.get_config(config_name)
        if not config:
            self.logger.error(f"AI配置不存在: {config_name}")
            return False
        
        with self.lock:
            if config_name in self.models:
                return True  # 已经加载
            
            model = ModelFactory.create_model(config)
            if not model:
                return False
            
            if model.load_model():
                self.models[config_name] = model
                if not self.active_model:
                    self.active_model = config_name
                self.logger.info(f"模型加载成功: {config_name}")
                return True
            else:
                return False
    
    def unload_model(self, config_name: str) -> bool:
        """卸载模型"""
        with self.lock:
            if config_name in self.models:
                model = self.models[config_name]
                if model.unload_model():
                    del self.models[config_name]
                    if self.active_model == config_name:
                        self.active_model = list(self.models.keys())[0] if self.models else None
                    self.logger.info(f"模型卸载成功: {config_name}")
                    return True
            return False
    
    def set_active_model(self, config_name: str) -> bool:
        """设置活跃模型"""
        with self.lock:
            if config_name in self.models:
                self.active_model = config_name
                return True
            return False
    
    def get_active_model(self) -> Optional[AIModelInterface]:
        """获取活跃模型"""
        with self.lock:
            if self.active_model and self.active_model in self.models:
                return self.models[self.active_model]
            return None
    
    def generate_text(self, prompt: str, model_name: Optional[str] = None, **kwargs) -> Optional[str]:
        """生成文本"""
        model = None
        if model_name:
            model = self.models.get(model_name)
        else:
            model = self.get_active_model()
        
        if not model:
            self.logger.error("没有可用的模型")
            return None
        
        return model.generate_text(prompt, **kwargs)
    
    def batch_generate(self, prompts: List[str], model_name: Optional[str] = None, **kwargs) -> List[Optional[str]]:
        """批量生成文本"""
        model = None
        if model_name:
            model = self.models.get(model_name)
        else:
            model = self.get_active_model()
        
        if not model:
            self.logger.error("没有可用的模型")
            return [None] * len(prompts)
        
        return model.batch_generate(prompts, **kwargs)
    
    def cleanup(self):
        """清理所有模型"""
        with self.lock:
            for name in list(self.models.keys()):
                self.unload_model(name)


# 全局模型管理器实例
ai_model_manager = AIModelManager()