File size: 1,154 Bytes
3739f5a
 
9915fdd
 
 
a5056e5
ac0a21e
 
 
e1bc4b5
ac0a21e
e1bc4b5
ec0e6cc
8c39a5e
a5056e5
ac0a21e
 
ec0e6cc
94f5c4a
8c39a5e
9a94120
ec0e6cc
 
3739f5a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import gradio as gr

import tensorflow as tf
from transformers import GPT2LMHeadModel, GPT2Tokenizer

global tokenizer, model, script_speaker_name, script_responder_name, convo
tokenizer = GPT2Tokenizer.from_pretrained("ethzanalytics/ai-msgbot-gpt2-XL-dialogue")
model = GPT2LMHeadModel.from_pretrained("ethzanalytics/ai-msgbot-gpt2-XL-dialogue", pad_token_id=tokenizer.eos_token_id)

script_speaker_name = "person alpha"

script_responder_name = "person beta"
def output(prompt, output_length):
    global convo
    sentence = convo + '\n' + script_speaker_name + ': ' + prompt + '\n' + script_responder_name + ': '
    input_ids = tokenizer.encode(sentence, return_tensors='pt')
    # generate text until the output length (which includes the context length) reaches 50
    output = model.generate(input_ids, max_new_tokens=output_length, num_beams=5, no_repeat_ngram_size=2, early_stopping=True)
    convo = tokenizer.decode(output[0], skip_special_tokens=True)
    return convo
convo = ''

iface = gr.Interface(fn=output, inputs=["text", Slider(minimum=0.0, maximum=1.0, step=0.05, default=0.4, label="Output Length")], outputs="text")
iface.launch()