import os
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import TextStreamer
from modelscope import snapshot_download
import torch
from vllm import LLM, SamplingParams

# lora微调后的模型
model_name_or_path = '/changzheng/PracticalTraining/lyj/lingdongai/models/qwen/qwen2_5-14b-instruct-lingdonglora'

# 初始化全局变量
llm = None
tokenizer = None
sampling_params = None

# 初始化模型和 tokenizer
def qwen_14b_init_vllm():

    # 设置 CUDA 设备为 GPU 1
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    global llm, tokenizer
    sum = 1

    # 设置 CUDA 设备为 GPU 2 (在设置的上下文中，它是索引0)
    torch.cuda.set_device(0)

    # 加载模型
    llm = LLM(model=model_name_or_path, tensor_parallel_size=sum, dtype=torch.float16)
    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
    text = "通义千问14b_vllm加载完毕"
    print(text)
    return text

# 使用 VLLM 进行推理
def qwen_14B_inference(messages):
    
    # 聊天模板的消息
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )

    # 清理未使用的显存
    torch.cuda.empty_cache()

    global sampling_params
    sampling_params = SamplingParams(temperature=0.7, top_p=0.8, repetition_penalty=1.05, max_tokens=512)

    # 使用混合精度进行推理
    with torch.cuda.amp.autocast():
        outputs = llm.generate([text], sampling_params)

    # 返回生成的文本
    for output in outputs:
        generated_text = output.outputs[0].text
        return generated_text


# 推理裁决
def qwen_14B_judge(messages):

    # 聊天模板的消息
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )

    # 清理未使用的显存
    torch.cuda.empty_cache()

    global sampling_params
    sampling_params = SamplingParams(temperature=0.7, top_p=0.8, repetition_penalty=1.05, max_tokens=512)

    # 使用混合精度进行推理
    with torch.cuda.amp.autocast():
        outputs = llm.generate([text], sampling_params)

    # 返回生成的文本
    for output in outputs:
        generated_text = output.outputs[0].text
        return generated_text

if __name__ == '__main__':
    qwen_14b_init_vllm()
    while True:
        user_input = input("请输入一些内容: ")
        generated_text = qwen_14B_inference(user_input)
        print("灵动AI:", generated_text)
        
        # 清理未使用的显存
        torch.cuda.empty_cache()