from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

model_path = "./Qwen3-0.6B-gptqmodel-8bit"

model_path = "/workspace/local_model-dir/Qwen3-0.6B"

tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)

model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, device_map="auto")

prompt = "如何做西红柿炒鸡蛋？"
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")  # 移动到 GPU

# 4. 生成文本
outputs = model.generate(
    **inputs,
    max_new_tokens=200,           # 生成的最大 token 数
    temperature=0.7,              # 控制随机性（0~1，越高越随机）
    do_sample=True,               # 启用采样
    top_p=0.9,                    # 核采样（保留概率最高的 90%）
)


# 5. 解码输出
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(response)

