from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# 加载模型和分词器
# model_path = 'Qwen/Qwen1.5-1.8B'
model_path = '/home/madengyun/Qwen1.5-1.8B'
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(
    model_path,
    torch_dtype=torch.float16,  # 半精度减少显存
    device_map="auto"           # 自动选择 GPU/CPU
)


# 输入文本
input_text = "Who are you"

# 编码输入
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)

# 生成输出
outputs = model.generate(
    **inputs,
    max_new_tokens=50,          # 生成最大长度
    do_sample=True,             # 启用随机采样
    temperature=0.7,            # 控制随机性
)

# 解码输出
print(tokenizer.decode(outputs[0], skip_special_tokens=True))