s3nh commited on
Commit
0555380
1 Parent(s): bef125f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -25
app.py CHANGED
@@ -68,30 +68,35 @@ def inference(text, input):
68
  return output
69
 
70
  def choose_model(name):
71
-
72
  return load_model(name), load_tokenizer(name)
73
 
74
- name = gr.inputs.Dropdown(["s3nh/pythia-1.4b-deduped-16k-steps-self-instruct-polish", "s3nh/pythia-410m-91k-steps-self-instruct-polish"], label= "output")
75
- model = load_model(name)
76
- tokenizer = load_tokenizer(name)
77
-
78
- io = gr.Interface(
79
- inference,
80
- inputs = [gr.Textbox(
81
- lines = 3,
82
- max_lines = 10,
83
- placeholder = "Add question here",
84
- interactive = True,
85
- show_label = False
86
- ),
87
- gr.Textbox(
88
- lines = 3,
89
- max_lines = 10,
90
- placeholder = "Add context here",
91
- interactive = True,
92
- show_label = False
93
- )],
94
- outputs = [gr.Textbox(lines = 1, label = 'Pythia410m', interactive = False)],
95
- cache_examples = False,
96
- )
97
- io.launch()
 
 
 
 
 
 
 
68
  return output
69
 
70
  def choose_model(name):
 
71
  return load_model(name), load_tokenizer(name)
72
 
73
+ demo = gr.Blocks()
74
+
75
+ with demo:
76
+
77
+
78
+ name = gr.inputs.Dropdown(["s3nh/pythia-1.4b-deduped-16k-steps-self-instruct-polish", "s3nh/pythia-410m-91k-steps-self-instruct-polish"], "value", label= "output")
79
+ model = load_model(name)
80
+ tokenizer = load_tokenizer(name)
81
+
82
+ io = gr.Interface(
83
+ inference,
84
+ inputs = [gr.Textbox(
85
+ lines = 3,
86
+ max_lines = 10,
87
+ placeholder = "Add question here",
88
+ interactive = True,
89
+ show_label = False
90
+ ),
91
+ gr.Textbox(
92
+ lines = 3,
93
+ max_lines = 10,
94
+ placeholder = "Add context here",
95
+ interactive = True,
96
+ show_label = False
97
+ )],
98
+ outputs = [gr.Textbox(lines = 1, label = 'Pythia410m', interactive = False)],
99
+ cache_examples = False,
100
+ )
101
+
102
+ demo.launch()