from modelscope import AutoModelForCausalLM, AutoTokenizer
from openai import OpenAI
import openai

def chatgpt(prompt):
    openai.base_url = "https://free.v36.cm/v1/"
    openai.default_headers = {"x-foo": "true"}
    # openai.api_key = "sk-ygs1G1n4rb74v8fJ51F6B4D3749a4d978aB99b4308D2A993"
    openai.api_key = "sk-GbQ7V9AKYPUSLsu88e76A5C7B152449cAbFa0f5eCbD18aE3"
    completion = openai.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
            {
                "role": "system",
                "content": "你是一个数学题答案提取助手，只需根据用户输入提取最终答案，答案必须出现在原文中。"
            },
            {
                "role": "user",
                "content": prompt
            }
        ],
    )
    return completion.choices[0].message.content.strip()

def deepseek(prompt):
    client = OpenAI(
        api_key="sk-5efe95c95a7f4c809e044c9d5a459708",
        base_url="https://api.deepseek.com/v1"
    )

    response = client.chat.completions.create(
        model="deepseek-reasoner",
        # model="deepseek-chat",
        messages=[
            {"role": "user", "content": prompt},
        ],
        stream=False
    )

    return response.choices[0].message.content

def qwen_72B(prompt):
    client = OpenAI(
        api_key="sk-106a65e0d0514713aa29bf1721c834ef", # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )
    response = client.chat.completions.create(
        model="qwen2.5-72b-instruct", # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        messages=[
            {'role': 'system', 'content': 'You are a helpful assistant.'},
            {'role': 'user', 'content': prompt}
        ]
    )

    return response.choices[0].message.content

def qwen_3B(prompt):
    client = OpenAI(
        api_key="sk-106a65e0d0514713aa29bf1721c834ef", # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )
    response = client.chat.completions.create(
        model="qwen2.5-3b-instruct", # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        messages=[
            {'role': 'system', 'content': 'You are a helpful assistant.'},
            {'role': 'user', 'content': prompt}
        ]
    )

    return response.choices[0].message.content

def qwen_7B(prompt):
    client = OpenAI(
        api_key="sk-106a65e0d0514713aa29bf1721c834ef", # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )
    response = client.chat.completions.create(
        model="qwen2-7b-instruct", # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        messages=[
            {'role': 'system', 'content': 'You are a helpful assistant.'},
            {'role': 'user', 'content': prompt}
        ]
    )

    return response.choices[0].message.content

def qwen_3B_local(prompt):
    model_name = '/home/work_nfs9/sywang/ckpt/Qwen2.5-3B-Instruct'
    # model_name = '/home/work_nfs9/sywang/ckpt/Qwen2.5-7B-Instruct'

    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype="auto",
        device_map="auto"
    )
    tokenizer = AutoTokenizer.from_pretrained(model_name)

    messages = [
        {"role": "system", "content": "你是Qwen，一个由阿里云开发的智能助手。"},
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
    generated_ids = model.generate(
        **model_inputs,
        max_new_tokens=256,
        do_sample=True,
        temperature=0.95,
        top_p=0.9
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

    return response


def qwen_7B_local(prompt):
    # model_name = '/home/work_nfs9/sywang/ckpt/Qwen2.5-3B-Instruct'
    model_name = '/home/work_nfs9/sywang/ckpt/Qwen2.5-7B-Instruct'

    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype="auto",
        device_map="auto"
    )
    tokenizer = AutoTokenizer.from_pretrained(model_name)

    messages = [
        {"role": "system", "content": "你是Qwen，一个由阿里云开发的智能助手。"},
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
    generated_ids = model.generate(
        **model_inputs,
        max_new_tokens=256,
        do_sample=True,
        temperature=0.95,
        top_p=0.9
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

    return response

# if __name__ == '__main__':
#     print(qwen_local('明传奇《义侠记》是____的作品。\n\nA. 王济\nB. 沈璟\nC. 梁辰鱼\nD. 汤显祖'))