"""
LLM客户端 - 支持多个LLM服务商
"""
import os
import json
from typing import List, Dict, Optional


class LLMClient:
    """与多个LLM服务商交互的统一接口"""
    
    def __init__(self):
        self.openai_client = None
        self.anthropic_client = None
    
    def chat(
        self,
        messages: List[Dict],
        system_prompt: str,
        model: str,
        temperature: float = 0.8,
        api_key: Optional[str] = None,
        provider: str = "OpenAI"
    ) -> str:
        """
        调用LLM API获取响应
        
        Args:
            messages: 消息历史列表
            system_prompt: 系统提示词
            model: 模型名称
            temperature: 温度参数
            api_key: API密钥
            provider: 服务商名称
        
        Returns:
            str: LLM的响应文本
        """
        if provider == "OpenAI":
            return self._call_openai(messages, system_prompt, model, temperature, api_key)
        elif provider == "Claude":
            return self._call_claude(messages, system_prompt, model, temperature, api_key)
        elif provider == "Ollama":
            return self._call_ollama(messages, system_prompt, model, temperature)
        else:
            return self._call_local(messages, system_prompt, model, temperature)
    
    def _call_openai(
        self,
        messages: List[Dict],
        system_prompt: str,
        model: str,
        temperature: float,
        api_key: str
    ) -> str:
        """调用OpenAI API"""
        try:
            import openai
            openai.api_key = api_key or os.getenv("OPENAI_API_KEY")
            
            # 格式化消息
            formatted_messages = [{"role": "system", "content": system_prompt}]
            for msg in messages:
                formatted_messages.append({
                    "role": msg["role"],
                    "content": msg["content"]
                })
            
            response = openai.ChatCompletion.create(
                model=model,
                messages=formatted_messages,
                temperature=temperature,
                max_tokens=1000
            )
            
            return response.choices[0].message.content.strip()
        
        except Exception as e:
            raise Exception(f"OpenAI API调用失败: {str(e)}")
    
    def _call_claude(
        self,
        messages: List[Dict],
        system_prompt: str,
        model: str,
        temperature: float,
        api_key: str
    ) -> str:
        """调用Claude API"""
        try:
            import anthropic
            
            client = anthropic.Anthropic(api_key=api_key or os.getenv("ANTHROPIC_API_KEY"))
            
            # 格式化消息
            formatted_messages = []
            for msg in messages:
                formatted_messages.append({
                    "role": msg["role"],
                    "content": msg["content"]
                })
            
            response = client.messages.create(
                model=model,
                max_tokens=1000,
                system=system_prompt,
                messages=formatted_messages,
                temperature=temperature
            )
            
            return response.content[0].text.strip()
        
        except Exception as e:
            raise Exception(f"Claude API调用失败: {str(e)}")
    
    def _call_ollama(
        self,
        messages: List[Dict],
        system_prompt: str,
        model: str,
        temperature: float
    ) -> str:
        """调用本地Ollama模型"""
        try:
            import requests
            
            # 格式化消息
            formatted_messages = [{"role": "system", "content": system_prompt}]
            for msg in messages:
                formatted_messages.append({
                    "role": msg["role"],
                    "content": msg["content"]
                })
            
            response = requests.post(
                "http://localhost:11434/api/chat",
                json={
                    "model": model,
                    "messages": formatted_messages,
                    "temperature": temperature,
                    "stream": False
                }
            )
            
            if response.status_code == 200:
                result = response.json()
                return result.get("message", {}).get("content", "").strip()
            else:
                raise Exception(f"Ollama API错误: {response.status_code}")
        
        except Exception as e:
            raise Exception(f"Ollama调用失败: {str(e)}")
    
    def _call_local(
        self,
        messages: List[Dict],
        system_prompt: str,
        model: str,
        temperature: float
    ) -> str:
        """调用本地模型（示例实现）"""
        # 这是一个简单的本地模型实现示例
        # 实际应用中可以集成更复杂的本地LLM
        try:
            from transformers import AutoModelForCausalLM, AutoTokenizer
            
            # 加载模型和分词器
            tokenizer = AutoTokenizer.from_pretrained(model)
            model_obj = AutoModelForCausalLM.from_pretrained(model)
            
            # 构建输入
            prompt = system_prompt + "\n\n"
            for msg in messages[-5:]:  # 只用最后5条消息
                prompt += f"{msg['role']}: {msg['content']}\n"
            prompt += "assistant: "
            
            # 生成响应
            inputs = tokenizer.encode(prompt, return_tensors="pt")
            outputs = model_obj.generate(
                inputs,
                max_length=inputs.shape[1] + 200,
                temperature=temperature,
                do_sample=True,
                top_p=0.95
            )
            
            response = tokenizer.decode(outputs[0], skip_special_tokens=True)
            # 提取assistant的响应部分
            response = response[len(prompt):].strip()
            return response
        
        except Exception as e:
            raise Exception(f"本地模型调用失败: {str(e)}")
