from transformers import AutoModelForCausalLM, AutoTokenizer, TextGenerationPipeline
import torch

cache_dir = "../my_model_cache/gpt2-chinese"
tokenizer = AutoTokenizer.from_pretrained(
    cache_dir + r"/models--uer--gpt2-chinese-cluecorpussmall/snapshots/c2c0249d8a2731f269414cc3b22dff021f8e07a3")
model = AutoModelForCausalLM.from_pretrained(
    cache_dir + r"/models--uer--gpt2-chinese-cluecorpussmall/snapshots/c2c0249d8a2731f269414cc3b22dff021f8e07a3")

# 加载我们自己训练的权重（中文诗词）
# map_location="cpu" gpu训练，cpu使用添加这么一个配置
model.load_state_dict(torch.load("net.pt", map_location="cpu"))

# 使用系统自带的pipeline工具生成内容
pipline = TextGenerationPipeline(model, tokenizer, device=0)

print(pipline("天高", max_length=24))
