jdoexbox360 commited on
Commit
ec0e6cc
1 Parent(s): 9a94120

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -10,14 +10,15 @@ model = GPT2LMHeadModel.from_pretrained("ethzanalytics/ai-msgbot-gpt2-XL-dialogu
10
  script_speaker_name = "person alpha"
11
 
12
  script_responder_name = "person beta"
13
- def output(prompt):
14
  global convo
15
  sentence = convo + '\n' + script_speaker_name + ': ' + prompt + '\n' + script_responder_name + ': '
16
  input_ids = tokenizer.encode(sentence, return_tensors='pt')
17
  # generate text until the output length (which includes the context length) reaches 50
18
- output = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2, early_stopping=True)
19
  convo = tokenizer.decode(output[0], skip_special_tokens=True)
20
  return convo
21
  convo = ''
22
- iface = gr.Interface(fn=output, inputs="text", outputs="text")
 
23
  iface.launch()
 
10
  script_speaker_name = "person alpha"
11
 
12
  script_responder_name = "person beta"
13
+ def output(prompt, output_length):
14
  global convo
15
  sentence = convo + '\n' + script_speaker_name + ': ' + prompt + '\n' + script_responder_name + ': '
16
  input_ids = tokenizer.encode(sentence, return_tensors='pt')
17
  # generate text until the output length (which includes the context length) reaches 50
18
+ output = model.generate(input_ids, max_new_tokens=output_length, num_beams=5, no_repeat_ngram_size=2, early_stopping=True)
19
  convo = tokenizer.decode(output[0], skip_special_tokens=True)
20
  return convo
21
  convo = ''
22
+
23
+ iface = gr.Interface(fn=output, inputs=["text", Slider(minimum=0.0, maximum=1.0, step=0.05, default=0.4, label="Output Length")], outputs="text")
24
  iface.launch()