import os
from dotenv import load_dotenv
from openai import OpenAI

class SiliconFlowLLM():
    def __init__(self):
        load_dotenv()
        self.SILICON_FLOW_API_KEY = os.getenv("SILICON_FLOW_API_KEY")
        self.SILICON_FLOW_BASE_URL= os.getenv("SILICON_FLOW_BASE_URL")

        # models
        self.SILICON_FLOW_REASONING_MODEL = os.getenv("SILICON_FLOW_REASONING_MODEL")
        self.SILICON_FLOW_NL2SQL_MODEL = os.getenv("SILICON_FLOW_NL2SQL_MODEL")
        self.SILICON_FLOW_HELPER_MODEL = os.getenv("SILICON_FLOW_HELPER_MODEL")
        print("-" * 50)
        print(f"""\ncoder llm: {self.SILICON_FLOW_NL2SQL_MODEL}\nreasoning llm: {self.SILICON_FLOW_REASONING_MODEL}\nhelper llm: {self.SILICON_FLOW_HELPER_MODEL}\n""")
        print("-" * 50)

        # client
        self.client = OpenAI(api_key=self.SILICON_FLOW_API_KEY,
                             base_url=self.SILICON_FLOW_BASE_URL)

    def call_coder(self, query, prompt):
        response = self.client.chat.completions.create(
            model=self.SILICON_FLOW_NL2SQL_MODEL,
            messages=[
                {"role": "system", "content": prompt},
                {"role": "user", "content": query}
            ],
            temperature=0.7,
            max_tokens=4096
        )
        content = response.choices[0].message.content
        return content

    def call_llm(self, query, prompt):
        response = self.client.chat.completions.create(
            model=self.SILICON_FLOW_REASONING_MODEL,
            messages=[
                {"role": "system", "content": prompt},
                {"role": "user", "content": query}
            ],
            temperature=0.7,
            max_tokens=4096
        )
        content = response.choices[0].message.content
        return content

    def call_helper(self, query, prompt):
        response = self.client.chat.completions.create(
            model=self.SILICON_FLOW_HELPER_MODEL,
            messages=[
                {"role": "system", "content": prompt},
                {"role": "user", "content": query}
            ],
            temperature=0.7,
            max_tokens=4096
        )
        content = response.choices[0].message.content
        return content


# import os
# import sys
# import requests
# from dotenv import load_dotenv
# from openai import OpenAI
#
#
# def debug_siliconflow_connection():
#     """调试 SiliconFlow LLM 连接问题"""
#     # 加载环境变量
#     load_dotenv()
#
#     # 获取关键配置
#     api_key = os.getenv("SILICON_FLOW_API_KEY")
#     base_url = os.getenv("SILICON_FLOW_BASE_URL")
#     helper_model = os.getenv("SILICON_FLOW_HELPER_MODEL")
#     reasoning_model = os.getenv("SILICON_FLOW_REASONING_MODEL")
#     nl2sql_model = os.getenv("SILICON_FLOW_NL2SQL_MODEL")
#
#     print("=== SiliconFlow 连接调试 ===")
#     print(f"API Key: {'已设置' if api_key else '未设置或为空'}")
#     print(f"Base URL: {base_url}")
#     print(f"Helper Model: {helper_model}")
#     print(f"Reasoning Model: {reasoning_model}")
#     print(f"NL2SQL Model: {nl2sql_model}")
#
#     # 检查 URL 格式
#     if base_url:
#         # 检查 URL 协议
#         if not (base_url.startswith("http://") or base_url.startswith("https://")):
#             print("\n⚠️ 警告: URL 应以 http:// 或 https:// 开头")
#             suggested_url = f"https://{base_url}"
#             print(f"建议尝试: {suggested_url}")
#
#             # 检查 URL 结尾是否有 /v1
#         if not base_url.endswith("/v1") and not base_url.endswith("/v1/"):
#             print("\n⚠️ 警告: OpenAI 兼容 API 通常以 /v1 结尾")
#             suggested_url = f"{base_url}/v1" if not base_url.endswith("/") else f"{base_url}v1"
#             print(f"建议尝试: {suggested_url}")
#
#             # 尝试连接到 API 根路径
#     print("\n=== 测试 API 连接 ===")
#     try:
#         # 尝试访问基础 URL
#         base_response = requests.get(base_url, timeout=10)
#         print(f"基础 URL 响应状态码: {base_response.status_code}")
#         if base_response.status_code == 200:
#             print("✅ 基础 URL 连接成功")
#         else:
#             print(f"❌ 基础 URL 连接失败: {base_response.status_code}")
#             if base_response.text:
#                 print(f"响应内容: {base_response.text[:500]}")
#     except Exception as e:
#         print(f"❌ 基础 URL 连接错误: {str(e)}")
#
#         # 尝试访问模型列表
#     models_url = f"{base_url}/models" if base_url.endswith("/v1") or base_url.endswith(
#         "/v1/") else f"{base_url}/v1/models"
#     try:
#         # 设置请求头
#         headers = {"Authorization": f"Bearer {api_key}"} if api_key else {}
#         models_response = requests.get(models_url, headers=headers, timeout=10)
#         print(f"\n模型列表 URL ({models_url}) 响应状态码: {models_response.status_code}")
#         if models_response.status_code == 200:
#             print("✅ 模型列表 API 访问成功")
#             try:
#                 models_data = models_response.json()
#                 print(f"可用模型: {[model.get('id') for model in models_data.get('data', [])]}")
#             except:
#                 print(f"响应内容不是有效的 JSON: {models_response.text[:500]}")
#         else:
#             print(f"❌ 模型列表 API 访问失败: {models_response.status_code}")
#             print(f"响应内容: {models_response.text[:500]}")
#     except Exception as e:
#         print(f"❌ 模型列表 API 访问错误: {str(e)}")
#
#         # 尝试使用 OpenAI 客户端连接
#     print("\n=== 测试 OpenAI 客户端连接 ===")
#     try:
#         client = OpenAI(api_key=api_key, base_url=base_url)
#         # 尝试列出模型
#         try:
#             models = client.models.list()
#             print("✅ OpenAI 客户端模型列表调用成功")
#             print(f"可用模型: {[model.id for model in models.data]}")
#         except Exception as e:
#             print(f"❌ OpenAI 客户端模型列表调用失败: {str(e)}")
#
#             # 尝试简单的聊天完成请求 (使用尽可能简单的参数)
#         try:
#             # 尝试使用简单的模型名称
#             simple_model_names = [
#                 helper_model,  # 原始名称
#                 helper_model.split('/')[-1],  # 去掉组织前缀
#                 helper_model.lower()  # 全小写
#             ]
#
#             for model_name in simple_model_names:
#                 try:
#                     print(f"\n尝试使用模型名: {model_name}")
#                     response = client.chat.completions.create(
#                         model=model_name,
#                         messages=[{"role": "user", "content": "Hello"}],
#                         max_tokens=10
#                     )
#                     print(f"✅ 聊天完成请求成功 (模型: {model_name})")
#                     print(f"响应: {response.choices[0].message.content}")
#                     # 找到工作的模型后停止尝试
#                     break
#                 except Exception as model_e:
#                     print(f"❌ 模型 '{model_name}' 调用失败: {str(model_e)}")
#         except Exception as e:
#             print(f"❌ 聊天完成请求失败: {str(e)}")
#
#     except Exception as e:
#         print(f"❌ OpenAI 客户端初始化失败: {str(e)}")
#
#     print("\n=== 调试总结 ===")
#     print("1. 检查你的 .env 文件中的 SILICON_FLOW_BASE_URL 是否正确")
#     print("2. 确保 URL 格式正确，包含协议前缀和适当的路径")
#     print("3. 验证 API 服务器是否正在运行并且可以访问")
#     print("4. 检查模型名称格式，可能需要简化或更改格式")
#     print("5. 确认 API 服务器是否兼容 OpenAI API 规范")
#
#
# if __name__ == "__main__":
#     debug_siliconflow_connection()