# -*- coding: utf-8 -*-
# @Author: hunan
# @Date: 2024/12/3 14:45
# @Description:
import json
import time

import requests

from product_upload.util.basic.common_util import extract_and_merge_json

openai_key = ["1oiNn7T7SjwhqFDA2TpRqoBezw8yzI8N88Zbh8emel61hVJv7P8oJQQJ99BEACYeBjFXJ3w3AAABACOGzL4o", "5o0jchoQZYCvM69j6H6CaavC29V6YiPz9Nj5wR8o5ncXpZTfcJy3JQQJ99BEACYeBjFXJ3w3AAABACOGohWQ"]

llama_key = [
    "eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwMTMyNjE3NTk2NjYwODY0MzY5MCIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTg5NDY3NTczNiwidXVpZCI6IjM5NTkwOTRhLWY3ZDMtNDFjNC05ZGEzLThmODIwODdlM2IwNSIsIm5hbWUiOiJMbGFtYTMuMy0yIiwiZXhwaXJlc19hdCI6IjIwMzAtMDEtMTVUMDI6NDg6NTYrMDAwMCJ9.N0ONc4K0QQRbHMWX3dgBHG4DrBI0eu1mCBcPoUqttO4",
    "eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwMTMyNjE3NTk2NjYwODY0MzY5MCIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTg5MjI2MjczMCwidXVpZCI6ImQ0ODJlMWFkLTQxMDItNGI5NC1iMjU5LWY2ZDg0MmFhMzRlYyIsIm5hbWUiOiJMbGFtYSAzLjMiLCJleHBpcmVzX2F0IjoiMjAyOS0xMi0xOFQwNDozMjoxMCswMDAwIn0.0wEHueESC6ngXAedyB6jNaXaSadAljEDjpXO7pFeVzM"]

siliconflow_key = ["sk-dnpoqkszrqlmahhsebfhhixomhfwftqpwaoubhnexkzftsdn"]

openrouter_key = ["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"]  # 添加 OpenRouter API Key

# 统一请求方法
def model_request(is_json, content_text: str, system_text: str = "", model: str = "gpt-4o-mini", try_count=2, site_url="", site_name=""):
    """
    统一的模型请求方法，根据model参数自动判断使用哪个API
    
    Args:
        is_json: 返回结果是否需要解析为JSON
        content_text: 用户输入内容
        system_text: 系统提示内容
        model: 模型名称，支持:
               - 以"gpt-"开头的OpenAI模型
               - "llama-3.3"或"llama"表示Llama模型
               - "deepseek"表示SiliconFlow模型
               - "gemini-2.0-flash"或"gemini-2.0-flash-lite"表示OpenRouter模型
        try_count: 重试次数
        site_url: OpenRouter需要的网站URL
        site_name: OpenRouter需要的网站名称
        
    Returns:
        模型返回的响应内容，根据is_json决定是否转换为JSON
    """
    if model.startswith("gpt-"):
        return _openai_request(is_json, content_text, system_text, model, try_count)
    elif model.startswith("llama") or model == "llama-3.3":
        return _llama_request(is_json, content_text, system_text, try_count)
    elif model.startswith("deepseek"):
        model_name = "deepseek-ai/DeepSeek-V3" if model == "deepseek" else model
        return _siliconflow_request(content_text, model_name)
    elif model.startswith("gemini-2.0-flash"):
        # 推断实际model名称
        if "lite" in model:
            actual_model = "google/gemini-2.0-flash-lite-001"
        else:
            actual_model = "google/gemini-2.0-flash-001"
        return _openrouter_request(is_json, content_text, system_text, actual_model, try_count, site_url, site_name)
    else:
        print(f"不支持的模型: {model}")
        return {} if is_json else ""

# OpenAI API请求
def _openai_request(is_json, content_text: str, system_text: str, model: str = "gpt-4o-mini", try_count=2):
    url = f"https://treatlife.openai.azure.com/openai/deployments/{model}/chat/completions?api-version=2024-10-21"
    headers = {"Content-Type": "application/json", "api-key": openai_key[int(time.time()) % 2]}
    data = {"messages": [{"role": "system", "content": system_text}, {"role": "user", "content": content_text}]}
    
    for attempt in range(try_count):
        try:
            response = requests.post(url, headers=headers, json=data, timeout=22)
            if response.status_code == 200:
                res_text = response.json()["choices"][0]["message"]["content"]
                if is_json:
                    return extract_and_merge_json(res_text)
                return res_text
            else:
                print(f"请求失败,状态码:{response.status_code}")
        except Exception as e:
            if "connect timeout" not in str(e) and "timed out" not in str(e):
                print(f"请求出错了, 错误原因:\n{e}")
            time.sleep(1)
    
    if is_json:
        return {}
    return ""

# Llama API请求
def _llama_request(is_json, content_text: str, system_text: str, try_count=2):
    url = 'https://api.studio.nebius.ai/v1/chat/completions'
    headers = {'Content-Type': 'application/json', 'Accept': '*/*', 'Authorization': f'Bearer {llama_key[int(time.time()) % 2]}'}
    data = {"temperature": 0, "model": "meta-llama/Llama-3.3-70B-Instruct", "messages": [{"role": "system", "content": f"""{system_text}"""}, {"role": "user", "content": [{"type": "text", "text": f"""{content_text}"""}]}]}
    
    for attempt in range(try_count):
        try:
            response = requests.post(url, headers=headers, json=data, timeout=22)
            if response.status_code == 200:
                res_text = response.json()["choices"][0]["message"]["content"]
                if is_json:
                    return extract_and_merge_json(res_text)
                return res_text
            else:
                print(f"请求失败,状态码:{response.status_code}")
        except Exception as e:
            if "connect timeout" not in str(e) and "timed out" not in str(e):
                print(f"请求出错了, 错误原因:\n{e}")
            time.sleep(1)
    
    if is_json:
        return {}
    return ""

# SiliconFlow API请求
def _siliconflow_request(content_text: str, model: str = "deepseek-ai/DeepSeek-V3"):
    url = "https://api.siliconflow.cn/v1/chat/completions"
    headers = {
        'Authorization': 'Bearer ' + siliconflow_key[0],
        'Content-Type': 'application/json'
    }
    data = {
        "model": model,
        "messages": [
            {
                "role": "user",
                "content": content_text
            }
        ],
        "stream": False,
        "temperature": 0.6,
        "top_p": 0.95,
        "top_k": 50,
        "frequency_penalty": 0.5,
        "n": 1,
        "response_format": {
            "type": "text"
        },
    }
    
    try:
        response = requests.post(url, headers=headers, json=data)
        if response.status_code == 200:
            return extract_and_merge_json(response.json())
        else:
            print(f"请求失败,状态码:{response.status_code}")
    except Exception as e:
        print(f"请求出错了, 错误原因:\n{e}")
    
    return {}

# OpenRouter API请求
def _openrouter_request(is_json, content_text: str, system_text: str, model: str, try_count=2, site_url="", site_name=""):
    url = "https://openrouter.ai/api/v1/chat/completions"
    headers = {
        "Authorization": f"Bearer {openrouter_key[0]}",
        "Content-Type": "application/json",
    }
    
    # 添加可选的引用头
    if site_url:
        headers["HTTP-Referer"] = site_url
    if site_name:
        headers["X-Title"] = site_name
    
    # 构建消息体
    messages = []
    if system_text:
        messages.append({"role": "system", "content": system_text})
    
    # 支持简单的文本请求格式
    if isinstance(content_text, str):
        user_content = [{"type": "text", "text": content_text}]
    else:
        # 假设已经是正确的格式化内容
        user_content = content_text
    
    messages.append({"role": "user", "content": user_content})
    
    data = {
        "model": model,
        "messages": messages
    }
    
    for attempt in range(try_count):
        try:
            response = requests.post(url, headers=headers, json=data, timeout=25)
            if response.status_code == 200:
                res_text = response.json()["choices"][0]["message"]["content"]
                if is_json:
                    return extract_and_merge_json(res_text)
                return res_text
            else:
                print(f"请求失败,状态码:{response.status_code}, 响应:{response.text}")
        except Exception as e:
            if "connect timeout" not in str(e) and "timed out" not in str(e):
                print(f"请求出错了, 错误原因:\n{e}")
            time.sleep(1)
    
    if is_json:
        return {}
    return ""

# 保留兼容性的方法
def openai_rsp(is_json, content_text: str, system_text: str, model: str = "gpt-4o-mini", try_count=2):
    """兼容旧版本API调用"""
    return model_request(is_json, content_text, system_text, model, try_count)

def llama_rsp(is_json, content_text: str, system_text: str, try_count=2):
    """兼容旧版本API调用"""
    return model_request(is_json, content_text, system_text, "llama-3.3", try_count)

def siliconflow_rsp(content_text: str, model: str = "deepseek-ai/DeepSeek-V3"):
    """兼容旧版本API调用"""
    return model_request(True, content_text, "", model)

if __name__ == '__main__':
    print(model_request(True, "hi", 'Please reply using JSON, {"rsp":""}', model="gemini-2.0-flash-001"))
    pass
