# 本地离线调用GPT2
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# 本地模型位置：具体包含config.json的目录，且仅支持绝对路径
model_dir = "..."

# 加载模型和分词器
model = AutoModelForCausalLM.from_pretrained(model_dir)
tokenizer = AutoTokenizer.from_pretrained(model_dir)

# 使用加载的模型和分词器创建生成文本的pipeline
generator = pipeline("text-generation", model = model, tokenizer = tokenizer, device = "cpu")

# 生成文本
output = generator("你好，我是一款语言模型.", max_length = 500, mu_return_sequences = 1)
print(output)
