from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

token = "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"
model_name = "uer/gpt2-chinese-cluecorpussmall"
# 项目根路径
cache_dir = "/Users/wupeng/pythonProjects/huggingface/models/uer/gpt2-chinese-cluecorpussmall"
# 加载模型
model = AutoModelForCausalLM.from_pretrained(model_name, cache_dir=cache_dir, token=token)
# 加载分词器
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir, token=token)

