s3nh commited on
Commit
1578b0f
1 Parent(s): 1584982

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -15
app.py CHANGED
@@ -57,10 +57,11 @@ def evaluate(instruction, input=None):
57
  output_scores=True,
58
  max_new_tokens=256
59
  )
 
60
  for s in generation_output.sequences:
61
  output = tokenizer.decode(s)
62
- print("Response:", output.split("### Response:")[1].strip())
63
-
64
 
65
  def inference(text):
66
  output = evaluate(instruction = text, input = input)
@@ -69,22 +70,20 @@ def inference(text):
69
  io = gr.Interface(
70
  inference,
71
  gr.Textbox(
72
- lines = 3, max_lines = 10,
 
73
  placeholder = "Add question here",
74
  interactive = True,
75
  show_label = False
76
  ),
77
- # gr.Textbox(
78
- # lines = 3,
79
- # max_lines = 25,
80
- # placeholder = "add context here",
81
- # interactive = True,
82
- # show_label = False
83
- # ),
84
- outputs = gr.Textbox(lines = 2, label = 'Pythia410m output', interactive = False),
85
  cache_examples = False,
86
  )
87
- io.launch()
88
-
89
-
90
- #gr.Interface.load("models/s3nh/pythia-410m-70k-steps-self-instruct-polish").launch()
 
57
  output_scores=True,
58
  max_new_tokens=256
59
  )
60
+ result = []
61
  for s in generation_output.sequences:
62
  output = tokenizer.decode(s)
63
+ results.append( output.split("### Response:")[1].strip())
64
+ return ' '.join(el for el in results)
65
 
66
  def inference(text):
67
  output = evaluate(instruction = text, input = input)
 
70
  io = gr.Interface(
71
  inference,
72
  gr.Textbox(
73
+ lines = 3,
74
+ max_lines = 10,
75
  placeholder = "Add question here",
76
  interactive = True,
77
  show_label = False
78
  ),
79
+ gr.Textbox(
80
+ lines = 3,
81
+ max_lines = 10,
82
+ placeholder = "Add context here",
83
+ interactive = True,
84
+ show_label = False
85
+ )
86
+ outputs = [gr.Textbox(lines = 1, label = 'Pythia410m', interactive = False)],
87
  cache_examples = False,
88
  )
89
+ io.launch()