Spaces:
Runtime error
Runtime error
Luis Oala
commited on
Commit
·
64e11c7
1
Parent(s):
b936af7
Update app.py
Browse files
app.py
CHANGED
@@ -3,46 +3,18 @@ import gradio as gr
|
|
3 |
from gradio import mix
|
4 |
from transformers import pipeline, set_seed
|
5 |
|
6 |
-
#title = "trustworthy artificial intelligence workshop - content generator"
|
7 |
-
description = "based on the gpt2 demo interface by <a href='https://huggingface.co/spaces/docs-demos/gpt2/tree/main'>ahsen khaliq</a>"
|
8 |
-
|
9 |
-
#io1 = gr.Interface.load("huggingface/distilgpt2")
|
10 |
-
|
11 |
generator = pipeline('text-generation', model='gpt2')
|
12 |
|
13 |
-
|
14 |
-
#io2 = gr.Interface.load("huggingface/gpt2-large")
|
15 |
-
|
16 |
-
#io3 = gr.Interface.load("huggingface/gpt2-medium")
|
17 |
-
|
18 |
-
#io4 = gr.Interface.load("huggingface/gpt2-xl")
|
19 |
-
|
20 |
def inference(text, seed):
|
21 |
-
"""
|
22 |
-
if model == "gpt2-large":
|
23 |
-
outtext = io2(text)
|
24 |
-
elif model == "gpt2-medium":
|
25 |
-
outtext = io3(text)
|
26 |
-
elif model == "gpt2-xl":
|
27 |
-
outtext = io4(text)
|
28 |
-
else:
|
29 |
-
outtext = io1(text)
|
30 |
-
"""
|
31 |
-
#outtext = io2(text)
|
32 |
set_seed(int(seed))
|
33 |
outtext = generator(text, max_length=100, num_return_sequences=1)['generated_text'] #get the string from the return dict with key 'generated text'
|
34 |
return outtext
|
35 |
|
36 |
-
|
37 |
-
|
38 |
gr.Interface(
|
39 |
inference,
|
40 |
-
[gr.inputs.Radio(choices=["trustworthy artificial intelligence"], label="input"), gr.inputs.Slider(minimum=0., maximum=1000.,label="seed")],
|
41 |
-
#,gr.inputs.Dropdown(choices=["distilgpt2","gpt2-medium","gpt2-large","gpt2-xl"], type="value", default="gpt2-medium", label="model")],
|
42 |
gr.outputs.Textbox(label="gpt-2 proposal"),
|
43 |
-
|
44 |
-
|
45 |
-
cache_examples=True).launch(enable_queue=True,
|
46 |
-
allow_flagging="manual")
|
47 |
|
48 |
#TODO: add credits at bottom
|
|
|
3 |
from gradio import mix
|
4 |
from transformers import pipeline, set_seed
|
5 |
|
|
|
|
|
|
|
|
|
|
|
6 |
generator = pipeline('text-generation', model='gpt2')
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
def inference(text, seed):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
set_seed(int(seed))
|
10 |
outtext = generator(text, max_length=100, num_return_sequences=1)['generated_text'] #get the string from the return dict with key 'generated text'
|
11 |
return outtext
|
12 |
|
|
|
|
|
13 |
gr.Interface(
|
14 |
inference,
|
15 |
+
[gr.inputs.Radio(choices=["trustworthy artificial intelligence"], label="input"), gr.inputs.Slider(minimum=0., maximum=1000.,label="seed")],
|
|
|
16 |
gr.outputs.Textbox(label="gpt-2 proposal"),
|
17 |
+
cache_examples=True,
|
18 |
+
allow_flagging="manual").launch(enable_queue=True)
|
|
|
|
|
19 |
|
20 |
#TODO: add credits at bottom
|