import os
import httpx
from dotenv import load_dotenv

# 全局设置禁用CoreML
os.environ["TOKENIZERS_PARALLELISM"] = "false"

# 创建一个自定义的HTTP客户端，用于处理OpenAI v1 SDK的HTTP请求
def create_custom_http_client(base_url=None, timeout=30.0):
    """创建符合OpenAI v1 SDK要求的HTTP客户端"""
    # 创建httpx.Client实例，OpenAI v1 SDK可以直接使用
    return httpx.Client(
        timeout=timeout,
        base_url=base_url
    )

# 确保在初始化客户端前删除环境变量中的代理设置
def clear_proxy_env():
    """清除可能导致问题的代理环境变量"""
    proxy_vars = ['http_proxy', 'https_proxy', 'HTTP_PROXY', 'HTTPS_PROXY']
    cleared_vars = []
    
    for var in proxy_vars:
        if var in os.environ:
            cleared_vars.append((var, os.environ.pop(var)))
    
    return cleared_vars

# 恢复代理环境变量
def restore_proxy_env(cleared_vars):
    """恢复之前清除的代理环境变量"""
    for var, value in cleared_vars:
        os.environ[var] = value

class LLMWrapper:
    def __init__(self):
        """初始化LLM包装器"""
        # 加载环境变量
        load_dotenv()
        
        # 从环境变量获取配置
        self.provider = os.getenv("LLM_PROVIDER", "deepseek")  # 默认使用DeepSeek
        
        # 根据提供商设置默认模型
        if self.provider == "openai":
            self.model = os.getenv("LLM_MODEL", "gpt-3.5-turbo")
        elif self.provider == "deepseek":
            self.model = os.getenv("DEEPSEEK_MODEL", "deepseek-chat")
        else:
            self.model = "deepseek-chat"
            
        self.temperature = float(os.getenv("LLM_TEMPERATURE", "0.7"))
        self.client = None
        
        print(f"初始化LLM包装器: 提供商={self.provider}, 模型={self.model}, 温度={self.temperature}")
        
        try:
            if self.provider == "openai":
                # OpenAI配置
                openai_api_key = os.getenv("OPENAI_API_KEY")
                
                print("初始化OpenAI客户端")
                
                try:
                    from openai import OpenAI
                    
                    # 清除可能导致问题的代理环境变量
                    cleared_proxies = clear_proxy_env()
                    
                    try:
                        # 使用自定义HTTP客户端避免proxies参数问题
                        custom_http_client = CustomHTTPClient(
                            timeout=30.0
                        )
                        
                        self.client = OpenAI(
                            api_key=openai_api_key,
                            http_client=custom_http_client
                        )
                        print("成功初始化OpenAI客户端")
                    finally:
                        # 恢复代理环境变量
                        restore_proxy_env(cleared_proxies)
                except Exception as e:
                    print(f"初始化OpenAI客户端失败: {type(e).__name__}: {str(e)}")
                    # 最后的备选方案：完全跳过初始化
                    print("使用备选方案：跳过LLM客户端初始化")
                    self.client = None
            elif self.provider == "deepseek":
                # DeepSeek配置
                deepseek_api_key = os.getenv("DEEPSEEK_API_KEY")
                deepseek_base_url = os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1")
                self.model = os.getenv("DEEPSEEK_MODEL", "deepseek-chat")
                
                print(f"初始化DeepSeek客户端，模型: {self.model}")
                print(f"DeepSeek API URL: {deepseek_base_url}")
                
                try:
                    # 使用OpenAI客户端库访问DeepSeek
                    from openai import OpenAI
                    
                    # 清除可能导致问题的代理环境变量
                    cleared_proxies = clear_proxy_env()
                    
                    try:
                        # 使用自定义HTTP客户端工厂函数创建客户端
                        custom_http_client = create_custom_http_client(
                            base_url=deepseek_base_url,
                            timeout=30.0
                        )
                        
                        # 创建客户端实例
                        self.client = OpenAI(
                            api_key=deepseek_api_key,
                            base_url=deepseek_base_url,
                            http_client=custom_http_client
                        )
                        print("成功初始化DeepSeek客户端")
                    finally:
                        # 恢复代理环境变量
                        restore_proxy_env(cleared_proxies)
                except Exception as e:
                    print(f"导入OpenAI库或初始化DeepSeek客户端失败: {type(e).__name__}: {str(e)}")
                    print("使用备选方案：跳过LLM客户端初始化")
                    self.client = None
                except Exception as e:
                    print(f"导入OpenAI库或初始化DeepSeek客户端失败: {type(e).__name__}: {str(e)}")
                    self.client = None
            else:
                print(f"不支持的LLM提供商: {self.provider}")
                self.client = None
                
        except Exception as e:
            print(f"初始化LLM客户端失败: {type(e).__name__}: {str(e)}")
            print(f"异常详细信息: {repr(e)}")
            # 不抛出异常，允许服务继续运行，但LLM功能会受限
            self.client = None
        
        print(f"LLM客户端初始化完成，客户端状态: {'已初始化' if self.client else '未初始化'}")
    
    def generate_answer(self, query, context_docs):
        """基于查询和上下文文档生成回答"""
        # 检查客户端是否初始化
        if not self.client:
            print("警告: LLM客户端未初始化，无法生成回答")
            return {
                "answer": "抱歉，LLM服务暂不可用，请稍后再试。",
                "model_used": self.model,
                "token_usage": None,
                "error": "LLM客户端未初始化"
            }
        
        # 构建系统提示
        system_prompt = "你是一个智能助手，根据提供的上下文信息回答用户的问题。\n请确保你的回答完全基于提供的上下文信息，不要添加外部知识。\n如果上下文信息不足以回答问题，请明确表示无法回答。"
        
        # 构建上下文文本
        context_text = "\n".join([doc["content"] for doc in context_docs])
        
        # 构建用户提示
        user_prompt = f"问题: {query}\n\n上下文信息:\n{context_text}\n\n请基于上述上下文信息回答问题。"
        
        # 使用API调用逻辑
        try:
            print(f"调用LLM API生成回答: 模型={self.model}")
            
            response = self.client.chat.completions.create(
                model=self.model,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
                temperature=self.temperature,
                max_tokens=1000
            )
            
            # 提取回答
            answer = response.choices[0].message.content.strip()
            
            print("成功生成回答")
            
            return {
                "answer": answer,
                "model_used": self.model,
                "token_usage": response.usage.model_dump() if hasattr(response, 'usage') else None
            }
        except Exception as e:
            print(f"生成回答时出错: {type(e).__name__}: {str(e)}")
            
            # 打印更详细的错误信息
            if hasattr(e, 'status_code'):
                print(f"HTTP状态码: {e.status_code}")
            if hasattr(e, 'response'):
                try:
                    print(f"响应内容: {e.response.json()}")
                except:
                    pass
            
            # 提供友好的错误信息
            return {
                "answer": "抱歉，生成回答时遇到错误，请稍后再试。",
                "model_used": self.model,
                "token_usage": None,
                "error": str(e)
            }

# 简单测试函数（仅在直接运行此文件时执行）
if __name__ == "__main__":
    print("开始测试LLM包装器...")
    llm = LLMWrapper()
    print(f"测试完成，客户端状态: {'已初始化' if llm.client else '未初始化'}")