Rachel1809 commited on
Commit
017304c
β€’
1 Parent(s): 0bd78e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -19,14 +19,14 @@ vectorizer.adapt(X.values)
19
  def evaluate_comment(Comment):
20
  processed_Comment = vectorizer([Comment])
21
  res = model.predict(processed_Comment)
22
- display = {}
23
  text = ''
24
  for i, col in enumerate(df.columns[2:]):
25
  display[col] = 'πŸ†˜' if res[0][i] > 0.5 else 'πŸ˜„'
 
26
 
27
- return display
28
 
29
- interface = gr.Interface(fn = evaluate_comment, live = True, title='ToxClass', inputs = gr.inputs.Textbox(lines = 4, placeholder='Comment to evaluate'),
30
- outputs = 'label', description="An NLP model that classifies level of toxicity of the sentence.")
31
 
32
  interface.launch()
 
19
  def evaluate_comment(Comment):
20
  processed_Comment = vectorizer([Comment])
21
  res = model.predict(processed_Comment)
 
22
  text = ''
23
  for i, col in enumerate(df.columns[2:]):
24
  display[col] = 'πŸ†˜' if res[0][i] > 0.5 else 'πŸ˜„'
25
+ text += '{}: {}\n'.format(col, 'πŸ†˜πŸ†˜πŸ†˜' if res[0][i] > 0.5 else 'πŸ˜„πŸ˜„πŸ˜„')
26
 
27
+ return text
28
 
29
+ interface = gr.Interface(fn = evaluate_comment, live = True, title='ToxClass', inputs = gr.inputs.Textbox(lines = 4, label='Comment', placeholder='Comment to evaluate'),
30
+ outputs = gr.Textbox(lines=4, label='Evaluation'), description="An NLP model that classifies level of toxicity of the sentence.")
31
 
32
  interface.launch()