import gradio as gr import transformers from transformers import AutoTokenizer, AutoModelForCausalLM SAVED_CHECKPOINT = 'mikegarts/distilgpt2-erichmariaremarque' MIN_WORDS = 100 def get_model(): model = AutoModelForCausalLM.from_pretrained(SAVED_CHECKPOINT) tokenizer = AutoTokenizer.from_pretrained(SAVED_CHECKPOINT) return model, tokenizer def generate(prompt): model, tokenizer = get_model() input_context = prompt input_ids = tokenizer.encode(input_context, return_tensors="pt").to(model.device) outputs = model.generate( input_ids=input_ids, max_length=100, temperature=0.7, num_return_sequences=3, do_sample=True, # forced_eos_token_id=tokenizer.encode('.')[0] ) return tokenizer.decode(outputs[0], skip_special_tokens=True).rsplit('.', 1)[0] + '.' def predict(prompt): return generate(prompt=prompt) title = "What would Remarques say?" description = """ The bot was trained to complete your prompt as if it was a begining of a paragraph of Remarque's book. """ gr.Interface( fn=predict, inputs="textbox", outputs="text", title=title, description=description, examples=[["I was drinking because"], ["Who is Karl for me?"], ["My most brutal mistake was"]] ).launch(debug=True)