from typing import Dict, Any, Optional, List, Tuple
import json
import logging
import concurrent.futures
from langchain_openai import ChatOpenAI
from .config_loader import get_config_loader

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class LLMClient:
    """共享的大模型客户端，避免重复初始化"""
    
    _instance: Optional['LLMClient'] = None
    _client: Optional[ChatOpenAI] = None
    
    def __new__(cls, config_path: str = None):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
            cls._instance._initialize(config_path)
        return cls._instance
    
    def _initialize(self, config_path: str):
        """初始化大模型客户端"""
        try:
            self.config_loader = get_config_loader(config_path)
            self.config = self.config_loader.config
            self.client = self._create_client()
            
            # 验证配置
            if not self.config_loader.validate_config():
                logger.warning("配置验证失败，请检查环境变量设置")
                self.client = None  # 确保客户端为None
        except Exception as e:
            logger.error(f"初始化LLM客户端失败: {str(e)}")
            self.client = None

    def _create_client(self) -> Optional[ChatOpenAI]:
        """创建大模型客户端"""
        try:
            deepseek_config = self.config.get("deepseek_api", {})
            api_key = deepseek_config.get("api_key", "")
            base_url = deepseek_config.get("base_url", "")
            model = deepseek_config.get("model", "deepseek-chat")
            temperature = deepseek_config.get("temperature", 0.7)
            max_tokens = deepseek_config.get("max_tokens", 2000)
            
            # 检查API密钥是否有效
            if not api_key or api_key in ["Your Key", "your_key", "YOUR_KEY", "api_key"]:
                logger.warning("未配置有效的API密钥，将使用回退模式")
                return None
                
            try:
                logger.info(f"正在初始化共享大模型客户端，使用密钥: {api_key[:4]}...")
                return ChatOpenAI(
                    model=model,
                    openai_api_key=api_key,
                    openai_api_base=base_url,
                    temperature=temperature,
                    max_tokens=max_tokens
                )
            except Exception as e:
                logger.error(f"无法连接到API: {str(e)}")
                logger.info("切换到回退模式")
                return None
        except Exception as e:
            logger.error(f"创建LLM客户端失败: {str(e)}")
            return None

    def get_client(self) -> Optional[ChatOpenAI]:
        """获取大模型客户端实例"""
        return self.client

    def get_config(self) -> Dict[str, Any]:
        """获取配置"""
        return self.config
    
    def generate_text(self, prompt: str, max_tokens: int = 2000, temperature: float = 0.7) -> str:
        """
        生成文本内容
        
        Args:
            prompt: 提示词
            max_tokens: 最大令牌数
            temperature: 生成温度
            
        Returns:
            生成的文本内容
        """
        try:
            if not self.client:
                logger.warning("LLM客户端不可用，使用基础文本处理")
                # 返回提示词的基础处理，作为最低级别的回退
                return f"[LLM服务暂不可用]\n提示词: {prompt[:100]}...\n\n请检查API密钥配置并重新启动服务。"
            
            # 调用LLM生成内容
            response = self.client.invoke(prompt)
            return response.content
        except Exception as e:
            logger.error(f"LLM生成失败: {str(e)}")
            # 返回错误信息和回退内容
            return f"[LLM服务调用失败]\n错误信息: {str(e)}\n\n请稍后重试或检查API配置。"
    
    def generate_text_parallel(self, prompts: List[Tuple[str, int, float]], max_workers: int = 3) -> List[str]:
        """
        并行生成多个文本内容
        
        Args:
            prompts: 提示词列表，每个元素为(prompt, max_tokens, temperature)元组
            max_workers: 最大工作线程数
            
        Returns:
            生成的文本内容列表，与输入提示词顺序一致
        """
        if not self.client:
            logger.warning("LLM客户端不可用，无法并行生成内容")
            return [f"[LLM服务暂不可用]\n提示词: {prompt[:50]}..." for prompt, _, _ in prompts]
        
        results = []
        
        def _generate_single(prompt_data):
            """单个提示词的生成函数"""
            prompt, max_tokens, temperature = prompt_data
            try:
                response = self.client.invoke(prompt)
                return response.content
            except Exception as e:
                logger.error(f"并行LLM生成失败: {str(e)}")
                return f"[LLM服务调用失败]\n错误信息: {str(e)}"
        
        try:
            # 使用线程池并行处理多个提示词
            with concurrent.futures.ThreadPoolExecutor(max_workers=min(max_workers, len(prompts))) as executor:
                # 提交所有任务
                future_to_index = {executor.submit(_generate_single, prompt_data): i for i, prompt_data in enumerate(prompts)}
                
                # 准备结果列表
                results = [None] * len(prompts)
                
                # 收集结果并保持原始顺序
                for future in concurrent.futures.as_completed(future_to_index):
                    index = future_to_index[future]
                    try:
                        results[index] = future.result()
                    except Exception as e:
                        logger.error(f"并行任务执行异常: {str(e)}")
                        results[index] = f"[并行生成失败]\n错误信息: {str(e)}"
            
            return results
        except Exception as e:
            logger.error(f"并行生成过程失败: {str(e)}")
            # 如果并行处理失败，回退到串行处理
            return [self.generate_text(prompt, max_tokens, temperature) for prompt, max_tokens, temperature in prompts]
    
    def is_available(self) -> bool:
        """
        检查LLM服务是否可用
        
        Returns:
            是否可用
        """
        return self.client is not None


# 全局访问函数
def get_llm_client(config_path: str = "config.json") -> LLMClient:
    """获取全局LLM客户端实例"""
    return LLMClient(config_path)
