import gradio as gr import tensorflow as tf from transformers import GPT2LMHeadModel, GPT2Tokenizer global tokenizer, model, script_speaker_name, script_responder_name, convo tokenizer = GPT2Tokenizer.from_pretrained("ethzanalytics/ai-msgbot-gpt2-XL-dialogue") model = GPT2LMHeadModel.from_pretrained("ethzanalytics/ai-msgbot-gpt2-XL-dialogue", pad_token_id=tokenizer.eos_token_id) script_speaker_name = "person alpha" script_responder_name = "person beta" convo = '' def output(prompt): sentence = convo + '\n' + script_speaker_name + ': ' + prompt + '\n' + script_responder_name + ': ' input_ids = tokenizer.encode(sentence, return_tensors='pt') # generate text until the output length (which includes the context length) reaches 50 output = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2, early_stopping=True) convo = tokenizer.decode(output[0], skip_special_tokens=True)) iface = gr.Interface(fn=output, inputs="text", outputs="text") iface.launch()