import os
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import TextStreamer
from modelscope import snapshot_download
import torch
from vllm import LLM, SamplingParams


# def Qwen_Coder_7B_inference(info):

#     """
#     # 指定要下载的模型名称
#     model_name = 'Qwen/Qwen2.5-7B-Instruct'  # 这是您想要下载的模型名称

#     # 指定缓存目录（可选）
#     cache_dir = './models'  # 模型将下载到这个目录

#     # 下载模型
#     snapshot_download(model_name, cache_dir=cache_dir)

#     print(f"Model {model_name} downloaded to {cache_dir}")
#     """
 
#     # 检查是否有可用的GPU
#     device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#     print(f"Using device: {device}")
    
#     # 定义模型路径
#     mode_name_or_path = '/changzheng/PracticalTraining/lyj/lingdongai/models/model/Qwen/Qwen2___5-7B-Instruct'
    
#     def get_model():
#         # 从预训练的模型中获取 tokenizer
#         tokenizer = AutoTokenizer.from_pretrained(mode_name_or_path, trust_remote_code=True)
#         tokenizer.pad_token = tokenizer.eos_token
#         # 从预训练的模型中获取模型，并设置模型参数
#         model = AutoModelForCausalLM.from_pretrained(mode_name_or_path, torch_dtype=torch.bfloat16).to(device)
    
#         return tokenizer, model
    
#     # 加载 Qwen2.5 的 model 和 tokenizer
#     tokenizer, model = get_model()
    
#     prompt = "根据用户的输入, 合适的回答问题"
#     messages = [
#         {"role": "system", "content": info},
#         {"role": "user", "content": prompt}
#     ]
#     text = tokenizer.apply_chat_template(
#         messages,
#         tokenize=False,
#         add_generation_prompt=True
#     )
#     model_inputs = tokenizer([text], return_tensors="pt").to(device)
    
#     generated_ids = model.generate(
#         **model_inputs,
#         max_new_tokens=512
#     )
#     generated_ids = [
#         output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
#     ]
    
#     response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
#     print(response)
#     return response


# lora微调后的模型
model_name_or_path = '/changzheng/PracticalTraining/lyj/lingdongai/models/qwen/qwen2_5-7b-instruct-lingdonglora'

# 初始化全局变量
llm = None
tokenizer = None
sampling_params = None

# 初始化模型和 tokenizer
def qwen_7b_init_vllm():

    # 设置 CUDA 设备为 GPU 2
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"

    global llm, tokenizer
    sum = 1

    # 设置 CUDA 设备为 GPU 2 (在设置的上下文中，它是索引0)
    torch.cuda.set_device(0)

    # 加载模型
    llm = LLM(model=model_name_or_path, tensor_parallel_size=sum, dtype=torch.float16)
    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
    text = "通义千问7b_vllm加载完毕"
    print(text)
    return text

# 使用 VLLM 进行推理
def qwen_7B_inference(info, prompt):
    messages = [
        {"role": "system", "content": prompt},
        {"role": "user", "content": info}
    ]

    # 聊天模板的消息
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )

    # 清理未使用的显存
    torch.cuda.empty_cache()

    global sampling_params
    sampling_params = SamplingParams(temperature=0.7, top_p=0.8, repetition_penalty=1.05, max_tokens=512)

    # 使用混合精度进行推理
    with torch.cuda.amp.autocast():
        outputs = llm.generate([text], sampling_params)

    # 返回生成的文本
    for output in outputs:
        generated_text = output.outputs[0].text
        return generated_text


if __name__ == '__main__':
    qwen_7b_init_vllm()
    while True:
        user_input = input("请输入一些内容: ")
        generated_text = qwen_7B_inference(user_input)
        print("灵动AI:", generated_text)
        
        # 清理未使用的显存
        torch.cuda.empty_cache()