#!/usr/bin/env python3
import os
import sys
import httpx
from openai import OpenAI, NotFoundError
from dotenv import load_dotenv
import time

# 加载环境变量
load_dotenv()

# 确保在初始化客户端前删除环境变量中的代理设置
def clear_proxy_env():
    """清除可能导致问题的代理环境变量"""
    proxy_vars = ['http_proxy', 'https_proxy', 'HTTP_PROXY', 'HTTPS_PROXY']
    cleared_vars = []
    
    for var in proxy_vars:
        if var in os.environ:
            cleared_vars.append((var, os.environ.pop(var)))
    
    return cleared_vars

# 恢复代理环境变量
def restore_proxy_env(cleared_vars):
    """恢复之前清除的代理环境变量"""
    for var, value in cleared_vars:
        os.environ[var] = value

def create_custom_http_client(base_url=None, timeout=30.0):
    """创建符合OpenAI v1 SDK要求的HTTP客户端"""
    # 创建httpx.Client实例，OpenAI v1 SDK可以直接使用
    return httpx.Client(
        timeout=timeout,
        base_url=base_url
    )

class DeepSeekAPITester:
    def __init__(self):
        # 收集配置信息
        self.deepseek_api_key = os.getenv("DEEPSEEK_API_KEY")
        self.deepseek_base_url = os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1")
        self.embed_model = os.getenv("DEEPSEEK_EMBED_MODEL", os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002"))
        self.llm_model = os.getenv("DEEPSEEK_MODEL", "deepseek-chat")
        
    def print_config(self):
        """打印当前的配置信息"""
        print("=" * 60)
        print("DeepSeek API 配置信息")
        print("=" * 60)
        print(f"API Key: {'已设置' if self.deepseek_api_key else '未设置'}")
        print(f"Base URL: {self.deepseek_base_url}")
        print(f"嵌入模型: {self.embed_model}")
        print(f"LLM模型: {self.llm_model}")
        print("=" * 60)
        
    def check_connection(self):
        """检查与API服务器的网络连接"""
        print("\n[测试1] 检查网络连接...")
        try:
            # 直接使用httpx测试连接
            with httpx.Client(timeout=10) as client:
                # 尝试获取API的基本URL
                url = self.deepseek_base_url.rstrip('/') + '/models'
                print(f"尝试访问URL: {url}")
                headers = {"Authorization": f"Bearer {self.deepseek_api_key}"}
                response = client.get(url, headers=headers)
                print(f"HTTP状态码: {response.status_code}")
                if response.status_code == 200:
                    print("连接成功！")
                    try:
                        models = response.json()
                        print(f"可用模型数量: {len(models.get('data', []))}")
                        if len(models.get('data', [])) > 0:
                            print("前几个可用模型:")
                            for i, model in enumerate(models['data'][:3]):
                                print(f"  {i+1}. {model.get('id', '未知模型')}")
                    except Exception as e:
                        print(f"解析模型列表失败: {e}")
                        print(f"响应内容: {response.text[:200]}...")
                else:
                    print(f"连接失败: {response.status_code}")
                    print(f"响应内容: {response.text[:500]}...")
        except Exception as e:
            print(f"网络连接测试失败: {type(e).__name__}: {str(e)}")
        
    def test_embedding_api(self):
        """测试嵌入模型API"""
        print("\n[测试2] 测试嵌入模型API...")
        
        # 测试多种可能的模型名称
        test_models = [
            self.embed_model,  # 当前配置的模型
            "text-embedding-ada-002",  # OpenAI兼容的模型
            "deepseek-embed-text-v1",  # 可能的DeepSeek嵌入模型版本
            "deepseek-embed-text"
        ]
        
        test_text = "这是一个测试文本，用于生成嵌入向量。"
        
        # 使用不同的HTTP客户端配置进行测试
        for client_type in ["default", "custom"]:
            print(f"\n--- 使用{client_type} HTTP客户端 ---")
            
            try:
                # 清除可能导致问题的代理环境变量
                cleared_proxies = clear_proxy_env()
                
                try:
                    if client_type == "custom":
                        http_client = create_custom_http_client(
                            base_url=self.deepseek_base_url,
                            timeout=30.0
                        )
                        client = OpenAI(
                            api_key=self.deepseek_api_key,
                            base_url=self.deepseek_base_url,
                            http_client=http_client
                        )
                    else:
                        client = OpenAI(
                            api_key=self.deepseek_api_key,
                            base_url=self.deepseek_base_url
                        )
                    
                    # 尝试不同的模型
                    for model in test_models:
                        if not model:  # 跳过空模型
                            continue
                        
                        print(f"\n尝试模型: {model}")
                        try:
                            start_time = time.time()
                            response = client.embeddings.create(
                                input=[test_text],
                                model=model
                            )
                            end_time = time.time()
                            
                            print(f"  调用成功！耗时: {end_time - start_time:.2f}秒")
                            print(f"  返回的向量长度: {len(response.data[0].embedding)}")
                            print(f"  向量前5个值: {response.data[0].embedding[:5]}")
                            
                            # 找到可用模型后返回
                            return model
                        except NotFoundError as e:
                            print(f"  模型未找到错误: {str(e)}")
                        except Exception as e:
                            print(f"  API调用错误: {type(e).__name__}: {str(e)}")
                            if hasattr(e, 'status_code'):
                                print(f"  HTTP状态码: {e.status_code}")
                            if hasattr(e, 'response'):
                                try:
                                    print(f"  响应内容: {e.response.json()}")
                                except:
                                    pass
                finally:
                    # 恢复代理环境变量
                    restore_proxy_env(cleared_proxies)
            except Exception as e:
                print(f"  客户端初始化错误: {type(e).__name__}: {str(e)}")
        
        print("\n所有模型测试失败，请检查DeepSeek API配置！")
        return None
    
    def test_llm_api(self):
        """测试LLM API是否正常工作"""
        print("\n[测试3] 测试LLM API...")
        
        try:
            # 清除可能导致问题的代理环境变量
            cleared_proxies = clear_proxy_env()
            
            try:
                # 使用自定义HTTP客户端避免proxies参数问题
                http_client = create_custom_http_client(
                    base_url=self.deepseek_base_url,
                    timeout=30.0
                )
                
                client = OpenAI(
                    api_key=self.deepseek_api_key,
                    base_url=self.deepseek_base_url,
                    http_client=http_client
                )
                
                print(f"尝试调用模型: {self.llm_model}")
                response = client.chat.completions.create(
                    model=self.llm_model,
                    messages=[{"role": "user", "content": "你好，这是一个测试。"}]
                )
                
                print(f"调用成功！")
                print(f"响应内容: {response.choices[0].message.content}")
                return True
            finally:
                # 恢复代理环境变量
                restore_proxy_env(cleared_proxies)
        except Exception as e:
            print(f"LLM API调用错误: {type(e).__name__}: {str(e)}")
            if hasattr(e, 'status_code'):
                print(f"HTTP状态码: {e.status_code}")
            if hasattr(e, 'response'):
                try:
                    print(f"响应内容: {e.response.json()}")
                except:
                    pass
            return False
    
    def suggest_fixes(self):
        """提供可能的解决方案建议"""
        print("\n[解决方案建议]")
        print("=" * 60)
        
        # 检查API Key
        if not self.deepseek_api_key or self.deepseek_api_key == "your-api-key-here":
            print("1. 请确保设置了有效的DeepSeek API Key")
        else:
            print("1. API Key已设置")
        
        # 检查Base URL
        if self.deepseek_base_url != "https://api.deepseek.com/v1":
            print(f"2. DeepSeek API Base URL可能不正确，建议使用: https://api.deepseek.com/v1")
        else:
            print("2. Base URL设置为标准值")
        
        # 检查嵌入模型
        print("3. 尝试使用以下嵌入模型名称之一:")
        print("   - deepseek-embed-text")
        print("   - deepseek-embed-text-v1")
        print("   - text-embedding-ada-002 (OpenAI兼容模式)")
        
        # 检查网络连接
        print("4. 确保您的网络可以访问DeepSeek API服务器")
        print("5. 检查DeepSeek API的使用状态，确保您的API Key有足够的配额")
        print("\n如果以上方法都不奏效，建议临时切换到ChromaDB内置的嵌入模型，修改.env文件中的设置：")
        print("USE_EXTERNAL_EMBEDDING=false")
        print("=" * 60)

if __name__ == "__main__":
    print("\nDeepSeek API 诊断工具")
    print("=" * 60)
    print("该工具将帮助诊断DeepSeek API调用问题")
    print("=" * 60)
    
    tester = DeepSeekAPITester()
    tester.print_config()
    
    # 检查网络连接
    tester.check_connection()
    
    # 测试嵌入API
    best_model = tester.test_embedding_api()
    if best_model:
        print(f"\n找到可用的嵌入模型: {best_model}")
        print(f"建议更新.env文件中的DEEPSEEK_EMBED_MODEL={best_model}")
    
    # 测试LLM API
    tester.test_llm_api()
    
    # 提供解决方案建议
    tester.suggest_fixes()
    
    print("\n诊断完成！")