#gpu的加速

from transformers import GPT2Tokenizer, GPT2LMHeadModel
device="cuda:0"
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
model.to(device)
# 准备输入文本
input_text = "你叫什么呢？"
input_ids = tokenizer.encode(input_text, return_tensors='pt').to(device)
print(input_ids)
# 使用模型生成文本
output = model.generate(input_ids)

# 解码生成的文本
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
print("Generated Text:", generated_text)