"""
LLM客户端封装
支持多种LLM提供商: OpenAI, Ollama, 自定义兼容接口
"""

import os
import logging
from typing import List, Dict, Optional, Union, Any
from enum import Enum
import requests
import json

logger = logging.getLogger(__name__)


class LLMProvider(Enum):
    """LLM提供商枚举"""
    OPENAI = "openai"
    OLLAMA = "ollama"
    VLLM = "vllm"
    CUSTOM = "custom"


class LLMClient:
    """
    统一的LLM客户端接口
    
    支持:
    - OpenAI API (包括兼容接口如Groq、Together AI等)
    - Ollama本地部署
    - 自定义接口
    """
    
    def __init__(
        self,
        provider: str = None,
        api_key: str = None,
        base_url: str = None,
        model: str = None,
        temperature: float = 0.7,
        max_tokens: int = 2000
    ):
        """
        初始化LLM客户端
        
        Args:
            provider: 提供商(openai/ollama/custom)
            api_key: API密钥
            base_url: API基础URL
            model: 模型名称
            temperature: 温度参数(0-1)
            max_tokens: 最大生成tokens
        """
        # 从环境变量读取配置(如果未提供)
        self.provider = LLMProvider(provider or os.getenv("LLM_PROVIDER", "openai"))
        self.api_key = api_key or os.getenv("OPENAI_API_KEY", "")
        self.base_url = base_url or os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
        self.model = model or os.getenv("OPENAI_MODEL", "gpt-4")
        self.temperature = float(os.getenv("OPENAI_TEMPERATURE", temperature))
        self.max_tokens = int(os.getenv("OPENAI_MAX_TOKENS", max_tokens))
        
        # 初始化客户端
        self._init_client()
        
        logger.info(f"LLM客户端初始化: provider={self.provider.value}, model={self.model}, base_url={self.base_url}")
    
    def _init_client(self):
        """初始化具体的客户端"""
        if self.provider == LLMProvider.OPENAI:
            try:
                from openai import OpenAI
                self.client = OpenAI(
                    api_key=self.api_key,
                    base_url=self.base_url
                )
                logger.info("OpenAI客户端初始化成功")
            except ImportError:
                logger.error("请安装openai库: pip install openai")
                raise
        
        elif self.provider == LLMProvider.OLLAMA:
            # Ollama使用HTTP请求,不需要特殊客户端
            self.ollama_url = self.base_url or "http://localhost:11434"
            self.model = self.model or "llama3.1"
            logger.info(f"Ollama客户端初始化: {self.ollama_url}")
        
        elif self.provider == LLMProvider.VLLM:
            # vLLM使用OpenAI兼容接口
            try:
                from openai import OpenAI
                self.vllm_url = self.base_url or "http://localhost:8000/v1"
                self.model = self.model or "meta-llama/Llama-3.1-8B-Instruct"
                self.client = OpenAI(
                    api_key=self.api_key or "EMPTY",  # vLLM不需要API key
                    base_url=self.vllm_url
                )
                logger.info(f"vLLM客户端初始化: {self.vllm_url}, model: {self.model}")
            except ImportError:
                logger.error("请安装openai库: pip install openai")
                raise
        
        else:
            logger.info("使用自定义LLM接口")
    
    def chat(
        self,
        messages: List[Dict[str, str]],
        tools: Optional[List[Dict]] = None,
        stream: bool = False,
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None
    ) -> Union[Any, str]:
        """
        统一的聊天接口
        
        Args:
            messages: 消息列表 [{"role": "user", "content": "..."}]
            tools: 工具函数列表(OpenAI格式)
            stream: 是否流式响应
            temperature: 温度参数(可选覆盖默认值)
            max_tokens: 最大tokens(可选覆盖默认值)
        
        Returns:
            响应对象或流式生成器
        """
        temp = temperature if temperature is not None else self.temperature
        max_tok = max_tokens if max_tokens is not None else self.max_tokens
        
        if self.provider == LLMProvider.OPENAI:
            return self._chat_openai(messages, tools, stream, temp, max_tok)
        
        elif self.provider == LLMProvider.OLLAMA:
            return self._chat_ollama(messages, stream, temp, max_tok)
        
        elif self.provider == LLMProvider.VLLM:
            # vLLM使用OpenAI兼容接口，但工具调用支持需要特殊配置
            # 如果vLLM未启用工具调用，则不传递tools参数
            return self._chat_vllm(messages, tools, stream, temp, max_tok)
        
        else:
            return self._chat_custom(messages, tools, stream, temp, max_tok)
    
    def _chat_openai(
        self,
        messages: List[Dict],
        tools: Optional[List[Dict]],
        stream: bool,
        temperature: float,
        max_tokens: int
    ):
        """OpenAI API调用"""
        try:
            kwargs = {
                "model": self.model,
                "messages": messages,
                "temperature": temperature,
                "max_tokens": max_tokens,
                "stream": stream
            }
            
            # 如果提供了tools,添加到请求中
            if tools:
                kwargs["tools"] = tools
                kwargs["tool_choice"] = "auto"
            
            response = self.client.chat.completions.create(**kwargs)
            
            if stream:
                return self._handle_openai_stream(response)
            else:
                return response
                
        except Exception as e:
            logger.error(f"OpenAI API调用失败: {str(e)}")
            raise
    
    def _handle_openai_stream(self, response):
        """处理OpenAI流式响应"""
        for chunk in response:
            if chunk.choices[0].delta.content:
                yield chunk.choices[0].delta.content
    
    def _chat_ollama(
        self,
        messages: List[Dict],
        stream: bool,
        temperature: float,
        max_tokens: int
    ):
        """Ollama API调用"""
        try:
            url = f"{self.ollama_url}/api/chat"
            payload = {
                "model": self.model,
                "messages": messages,
                "stream": stream,
                "options": {
                    "temperature": temperature,
                    "num_predict": max_tokens
                }
            }
            
            response = requests.post(url, json=payload, stream=stream)
            response.raise_for_status()
            
            if stream:
                return self._handle_ollama_stream(response)
            else:
                return response.json()
                
        except Exception as e:
            logger.error(f"Ollama API调用失败: {str(e)}")
            raise
    
    def _handle_ollama_stream(self, response):
        """处理Ollama流式响应"""
        for line in response.iter_lines():
            if line:
                data = json.loads(line)
                if data.get("message", {}).get("content"):
                    yield data["message"]["content"]
    
    def _chat_vllm(
        self,
        messages: List[Dict],
        tools: Optional[List[Dict]],
        stream: bool,
        temperature: float,
        max_tokens: int
    ):
        """vLLM API调用（OpenAI兼容，但工具调用需要特殊处理）"""
        try:
            kwargs = {
                "model": self.model,
                "messages": messages,
                "temperature": temperature,
                "max_tokens": max_tokens,
                "stream": stream
            }
            
            # vLLM的工具调用支持需要服务端配置
            # 如果传入tools但vLLM未配置，会报错
            # 这里我们先不传递tools，改用prompt engineering方式
            # if tools:
            #     kwargs["tools"] = tools
            
            if stream:
                return self.client.chat.completions.create(**kwargs)
            else:
                return self.client.chat.completions.create(**kwargs)
                
        except Exception as e:
            logger.error(f"vLLM API调用失败: {str(e)}")
            raise
    
    def _chat_custom(
        self,
        messages: List[Dict],
        tools: Optional[List[Dict]],
        stream: bool,
        temperature: float,
        max_tokens: int
    ):
        """自定义API调用"""
        try:
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            }
            
            payload = {
                "model": self.model,
                "messages": messages,
                "temperature": temperature,
                "max_tokens": max_tokens,
                "stream": stream
            }
            
            if tools:
                payload["tools"] = tools
            
            response = requests.post(
                f"{self.base_url}/chat/completions",
                headers=headers,
                json=payload,
                stream=stream
            )
            response.raise_for_status()
            
            if stream:
                return self._handle_custom_stream(response)
            else:
                return response.json()
                
        except Exception as e:
            logger.error(f"自定义API调用失败: {str(e)}")
            raise
    
    def _handle_custom_stream(self, response):
        """处理自定义接口流式响应"""
        for line in response.iter_lines():
            if line:
                line = line.decode('utf-8')
                if line.startswith('data: '):
                    data = line[6:]
                    if data != '[DONE]':
                        chunk = json.loads(data)
                        if chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
                            yield chunk['choices'][0]['delta']['content']
    
    def extract_response_content(self, response) -> str:
        """
        从响应中提取文本内容
        
        Args:
            response: LLM响应对象
        
        Returns:
            提取的文本内容
        """
        if self.provider in [LLMProvider.OPENAI, LLMProvider.VLLM]:
            if hasattr(response, 'choices'):
                return response.choices[0].message.content
        
        elif self.provider == LLMProvider.OLLAMA:
            if isinstance(response, dict):
                return response.get('message', {}).get('content', '')
        
        else:
            if isinstance(response, dict):
                return response.get('choices', [{}])[0].get('message', {}).get('content', '')
        
        return str(response)
    
    def extract_tool_calls(self, response) -> Optional[List[Dict]]:
        """
        从响应中提取工具调用
        
        Args:
            response: LLM响应对象
        
        Returns:
            工具调用列表或None
        """
        if self.provider in [LLMProvider.OPENAI, LLMProvider.VLLM]:
            if hasattr(response, 'choices'):
                message = response.choices[0].message
                if hasattr(message, 'tool_calls') and message.tool_calls:
                    return [
                        {
                            "id": tc.id,
                            "name": tc.function.name,
                            "arguments": json.loads(tc.function.arguments)
                        }
                        for tc in message.tool_calls
                    ]
        
        return None


# ============================================
# 使用示例
# ============================================
if __name__ == "__main__":
    # 配置日志
    logging.basicConfig(level=logging.INFO)
    
    # 示例1: 使用OpenAI
    print("=" * 60)
    print("示例1: OpenAI API")
    print("=" * 60)
    
    try:
        client = LLMClient(provider="openai")
        
        messages = [
            {"role": "system", "content": "你是一个生物信息学助手。"},
            {"role": "user", "content": "请简单介绍CRISPR-Cas9系统。"}
        ]
        
        response = client.chat(messages, temperature=0.7, max_tokens=500)
        content = client.extract_response_content(response)
        
        print(f"响应: {content}")
        
    except Exception as e:
        print(f"错误: {e}")
    
    # 示例2: 使用Ollama(如果有本地部署)
    print("\n" + "=" * 60)
    print("示例2: Ollama本地模型")
    print("=" * 60)
    
    try:
        client = LLMClient(
            provider="ollama",
            base_url="http://localhost:11434",
            model="llama3.1"
        )
        
        messages = [
            {"role": "user", "content": "What is CRISPR?"}
        ]
        
        response = client.chat(messages)
        content = client.extract_response_content(response)
        
        print(f"响应: {content}")
        
    except Exception as e:
        print(f"Ollama未运行或出错: {e}")
    
    # 示例3: 工具调用
    print("\n" + "=" * 60)
    print("示例3: 工具调用(Function Calling)")
    print("=" * 60)
    
    try:
        client = LLMClient(provider="openai")
        
        tools = [
            {
                "type": "function",
                "function": {
                    "name": "search_protein",
                    "description": "搜索蛋白质数据库",
                    "parameters": {
                        "type": "object",
                        "properties": {
                            "species": {"type": "string", "description": "物种名称"},
                            "protein_type": {"type": "string", "description": "蛋白质类型"}
                        }
                    }
                }
            }
        ]
        
        messages = [
            {"role": "system", "content": "你是助手,可以调用工具。"},
            {"role": "user", "content": "帮我找大肠杆菌的Acr蛋白"}
        ]
        
        response = client.chat(messages, tools=tools)
        tool_calls = client.extract_tool_calls(response)
        
        if tool_calls:
            print(f"检测到工具调用: {tool_calls}")
        else:
            print(f"普通响应: {client.extract_response_content(response)}")
            
    except Exception as e:
        print(f"错误: {e}")
    
    print("\n✅ LLM客户端测试完成")

