
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
from transformers import AutoTokenizer
import torch

print("CUDA是否可用: ", torch.cuda.is_available())

# Define base model and output directory
model_id = "./gpt2"
out_dir = model_id + "-GPTQ"


device = "cuda:0" if torch.cuda.is_available() else "cpu"

# Reload model and tokenizer
model = AutoGPTQForCausalLM.from_quantized(
    out_dir,
    device=device,
    # use_triton=True,
    use_safetensors=True,
)
tokenizer = AutoTokenizer.from_pretrained(out_dir)


from transformers import pipeline

# generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
# result = generator("I have a dream", do_sample=True, max_length=50)[0]['generated_text']
# print(result)

# 正确的生成方式
inputs = tokenizer("I have a dream", return_tensors="pt").to("cuda:0")
outputs = model.generate(
    input_ids=inputs.input_ids,  # 必须使用命名参数
    attention_mask=inputs.attention_mask,
    max_length=50,
    do_sample=True
)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))