rdose commited on
Commit
1a9f873
1 Parent(s): bcd193d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -154,14 +154,18 @@ def inference(input_batch,isurl,use_archive,limit_companies=10):
154
  input_batch_content = input_batch_r
155
 
156
  print("[i] Batch size:",len(input_batch_content))
157
-
158
  prob_outs = _inference_classifier(input_batch_content)
 
159
  #sentiment = _inference_sentiment_model_via_api_query({"inputs": extracted['content']})
160
- #sentiment = _inference_sentiment_model_pipeline(input_batch_content )[0]
161
  #summary = _inference_summary_model_pipeline(input_batch_content )[0]['generated_text']
162
  #ner_labels = _inference_ner_spancat(input_batch_content ,summary, penalty = 0.8, limit_outputs=limit_companies)
 
 
 
163
 
164
- return prob_outs #ner_labels, {'E':float(prob_outs[0]),"S":float(prob_outs[1]),"G":float(prob_outs[2])},{sentiment['label']:float(sentiment['score'])},"**Summary:**\n\n" + summary
165
 
166
  title = "ESG API Demo"
167
  description = """This is a demonstration of the full ESG pipeline backend where given a list of URL (english, news) the news contents are extracted, using extractnet, and fed to three models:
@@ -191,7 +195,7 @@ demo = gr.Interface(fn=inference,
191
  gr.Dropdown(label='data type', choices=['text','url'], type='index', value='url'),
192
  gr.Checkbox(label='if url parse cached in archive.org'),
193
  gr.Slider(minimum=1, maximum=10, step=1, label='Limit NER output', value=5)],
194
- outputs=[gr.Dataframe(label='output raw', col_count=1, datatype='number', type='array', wrap=True)],#, header=OUT_HEADERS)],
195
  #gr.Label(label='Company'),
196
  #gr.Label(label='ESG'),
197
  #gr.Label(label='Sentiment'),
 
154
  input_batch_content = input_batch_r
155
 
156
  print("[i] Batch size:",len(input_batch_content))
157
+ print("[i] Running ESG classifier inference...")
158
  prob_outs = _inference_classifier(input_batch_content)
159
+ print("[i] Running sentiment using",MODEL_SENTIMENT_ANALYSIS ,"inference...")
160
  #sentiment = _inference_sentiment_model_via_api_query({"inputs": extracted['content']})
161
+ sentiment = _inference_sentiment_model_pipeline(input_batch_content )[0]
162
  #summary = _inference_summary_model_pipeline(input_batch_content )[0]['generated_text']
163
  #ner_labels = _inference_ner_spancat(input_batch_content ,summary, penalty = 0.8, limit_outputs=limit_companies)
164
+ df = pd.DataFrame(prob_outs,columns =['E','S','G'])
165
+ df['sent_lbl'] = sentiment['label']
166
+ df['sent_score'] = sentiment['score']
167
 
168
+ return df #ner_labels, {'E':float(prob_outs[0]),"S":float(prob_outs[1]),"G":float(prob_outs[2])},{sentiment['label']:float(sentiment['score'])},"**Summary:**\n\n" + summary
169
 
170
  title = "ESG API Demo"
171
  description = """This is a demonstration of the full ESG pipeline backend where given a list of URL (english, news) the news contents are extracted, using extractnet, and fed to three models:
 
195
  gr.Dropdown(label='data type', choices=['text','url'], type='index', value='url'),
196
  gr.Checkbox(label='if url parse cached in archive.org'),
197
  gr.Slider(minimum=1, maximum=10, step=1, label='Limit NER output', value=5)],
198
+ outputs=[gr.Dataframe(label='output raw', col_count=1, type='pandas', wrap=True, header=OUT_HEADERS)],
199
  #gr.Label(label='Company'),
200
  #gr.Label(label='ESG'),
201
  #gr.Label(label='Sentiment'),