# 本地离线调用GPT2
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# 本地模型位置：具体包含config.json的目录，且仅支持绝对路径
model_dir = "..."

# 加载模型和分词器
model = AutoModelForCausalLM.from_pretrained(model_dir)
tokenizer = AutoTokenizer.from_pretrained(model_dir)

# 使用加载的模型和分词器创建生成文本的pipeline
generator = pipeline("text-generation", model = model, tokenizer = tokenizer, device = "cuda")

# 生成文本
output = generator(
    "你好，我是一款语言模型.",
    max_length = 500,
    num_return_sequences = 1,
    truncation = True,
    temperature = 0.7,
    top_k = 50,
    top_p = 0.9,
    clean_up_tokenization_spaces = True
)
print(output)
