from transformers import AutoTokenizer, AutoModelForCausalLM      # 加载 Qwen-2.5 的 tokenizer 和模型
model_name = "Qwen-2.5"# 替换为你的模型路径
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name,
load_in_4bit=Ture
, trust_remote_code=True).cuda()

def query_model(prompt):
   inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
   outputs = model.generate(inputs.input_ids, max_length=512, do_sample=True)
   return tokenizer.decode(outputs[0], skip_special_tokens=True)

# 测试模型

prompt = "你好！请介绍一下你自己。"
response = query_model(prompt)
print("模型回答:", response)


class QwenAgent:
  def __init__(self, model, tokenizer):
    self.model = model
    self.tokenizer = tokenizer
    self.memory = []  # 用于存储对话历史
  def respond(self, user_input):
    context = "\n".join(self.memory)
    prompt = f"以下是对话历史：\n{context}\n用户：{user_input}\nAI："
    inputs = self.tokenizer(prompt, return_tensors="pt").to("cuda")
    outputs = self.model.generate(inputs.input_ids, max_length=512, do_sample=True)
    response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
    self.memory.append(f"用户：{user_input}")
    self.memory.append(f"AI：{response}")
    return response

# 创建 Agent 实例
qwen_agent = QwenAgent(model, tokenizer)
  # 测试 Agent
while True:
  user_input = input("你：")
  if user_input.lower() in ["退出", "exit", "quit"]:
    break
  response = qwen_agent.respond(user_input)
  print("AI：", response)
