from transformers import GPT2Tokenizer, GPT2LMHeadModel, AutoTokenizer, AutoModelForCausalLM import os import torch print("TRANSFORMERS_CACHE", os.environ['TRANSFORMERS_CACHE']) print("Fetching model...") tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") model = AutoModelForCausalLM.from_pretrained("af1tang/personaGPT")