import os
from volcenginesdkarkruntime import Ark
import requests
from openai import OpenAI
def text2text(prompt):

    return hunyuan(prompt)
    pass

def text2text_doubao(prompt):

    client = Ark(api_key="3afe9e28-2034-4d05-917c-ee1293e35270")
    completion = client.chat.completions.create(
        model="doubao-1-5-pro-32k-250115",
        messages=[
            {"role": "system", "content": "你是豆包，是由字节跳动开发的 AI 人工智能助手"},
            {"role": "user", "content": f"{prompt}"},
        ],
    )
    return completion.choices[0].message.content

def text2text_qwen_7b_coder(prompt):
    print("text2text_qwen_7b_coder has be used")



    url = "https://api.siliconflow.cn/v1/chat/completions"

    payload = {
        "model": "Qwen/Qwen2.5-Coder-7B-Instruct",
        "messages": [
            {
                "role": "user",
                "content": f"{prompt}"
            }
        ],
        "stream": False,
        "max_tokens": 8192,
        "stop": None,
        "temperature": 0.7,
        "top_p": 0.7,
        "top_k": 50,
        "frequency_penalty": 0.5,
        "n": 1,
        "response_format": {"type": "text"},
    }
    headers = {
        "Authorization": "Bearer sk-wdotugwguprsmeimsowcbehipjlkyaabkgbxnudkkhvjhumm",
        "Content-Type": "application/json"
    }

    response = requests.request("POST", url, json=payload, headers=headers)


    return response.json()['choices'][0]['message']['content']


    pass

def text2text_deepseek_7b_math(prompt):
    print("text2text_deepseek_7b_math is be used")

    url = "https://api.siliconflow.cn/v1/chat/completions"

    payload = {
        "model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
        "messages": [
            {
                "role": "user",
                "content": f"{prompt}"
            }
        ],
        "stream": False,
        "max_tokens": 8192,
        "stop": None,
        "temperature": 0.7,
        "top_p": 0.7,
        "top_k": 50,
        "frequency_penalty": 0.5,
        "n": 1,
        "response_format": {"type": "text"},
    }
    headers = {
        "Authorization": "Bearer sk-wdotugwguprsmeimsowcbehipjlkyaabkgbxnudkkhvjhumm",
        "Content-Type": "application/json"
    }

    response = requests.request("POST", url, json=payload, headers=headers)


    return response.json()['choices'][0]['message']['content']


    pass

def text2text_internlm2_5_7b_chat(prompt):
    print("text2text_internlm2_5_7b_chat is used")

    url = "https://api.siliconflow.cn/v1/chat/completions"

    payload = {
        "model": "internlm/internlm2_5-7b-chat",
        "messages": [
            {
                "role": "user",
                "content": f"{prompt}"
            }
        ],
        "stream": False,
        "max_tokens": 4096,
        "stop": None,
        "temperature": 0.7,
        "top_p": 0.7,
        "top_k": 50,
        "frequency_penalty": 0.5,
        "n": 1,
        "response_format": {"type": "text"},
    }
    headers = {
        "Authorization": "Bearer sk-wdotugwguprsmeimsowcbehipjlkyaabkgbxnudkkhvjhumm",
        "Content-Type": "application/json"
    }

    response = requests.request("POST", url, json=payload, headers=headers)

    print(response.text)

    return response.json()['choices'][0]['message']['content']



    pass

def Qwen_Qwen3_235B_A22B(prompt):
    url = "https://api.siliconflow.cn/v1/chat/completions"

    payload = {
        "model": "Qwen/Qwen3-235B-A22B",
        "messages": [
            {
                "role": "user",
                "content": f"{prompt}"
            }
        ],
        "stream": False,
        "max_tokens": 8192,
        "enable_thinking": False,
        "thinking_budget": 4096,
        "min_p": 0.05,
        "stop": None,
        "temperature": 0.7,
        "top_p": 0.7,
        "top_k": 50,
        "frequency_penalty": 0.5,
        "n": 1,
        "response_format": {"type": "text"},
    }
    headers = {
        "Authorization": "Bearer sk-wdotugwguprsmeimsowcbehipjlkyaabkgbxnudkkhvjhumm",
        "Content-Type": "application/json"
    }

    response = requests.request("POST", url, json=payload, headers=headers)

    return response.json()['choices'][0]['message']['content']
    
    pass


def DeepSeek_V3(prompt):
    url = "https://api.siliconflow.cn/v1/chat/completions"

    payload = {
        "model": "deepseek-ai/DeepSeek-R1",
        "messages": [
            {
                "role": "user",
                "content": f"{prompt}"
            }
        ],
        "stream": False,
        "max_tokens": 8192,
        "enable_thinking": False,
        "thinking_budget": 4096,
        "min_p": 0.05,
        "stop": None,
        "temperature": 0.7,
        "top_p": 0.7,
        "top_k": 50,
        "frequency_penalty": 0.5,
        "n": 1,
        "response_format": {"type": "text"},
    }
    headers = {
        "Authorization": "Bearer sk-wdotugwguprsmeimsowcbehipjlkyaabkgbxnudkkhvjhumm",
        "Content-Type": "application/json"
    }

    response = requests.request("POST", url, json=payload, headers=headers)

    return response.json()['choices'][0]['message']['content']
    
    pass

def deepseek_chat(prompt):



    # for backward compatibility, you can still use `https://api.deepseek.com/v1` as `base_url`.
    client = OpenAI(api_key="sk-90a4749e889f44ef8d0bc6d9b36b21d5", base_url="https://api.deepseek.com")

    response = client.chat.completions.create(
        model="deepseek-reasoner",
        messages=[
            {"role": "system", "content": "You are a helpful assistant"},
            {"role": "user", "content": f"{prompt}"},
    ],
        max_tokens=8192,
        temperature=0.7,
        stream=False
    )

    return response.choices[0].message.content
    pass

def hunyuan(prompt):
    # 构造 client
    client = OpenAI(
        api_key="sk-AvC1TVsx3UqCaIRVu09quOJJ0FycekHaiyIZQs5724xQu9wl",  # 混元 APIKey
        base_url="https://api.hunyuan.cloud.tencent.com/v1",  # 混元 endpoint
    )

    completion = client.chat.completions.create(
        model="hunyuan-turbos-latest",
        messages=[
            {
                "role": "user",
                "content": f"{prompt}"
            }
        ],
        extra_body={
            "enable_enhancement": False,  # <- 自定义参数
            "max_tokens":8192,
        },
    )
    
    return completion.choices[0].message.content

    pass
if __name__ == "__main__":
    print(hunyuan("洗面奶没洗"))
