rdose commited on
Commit
7d1b388
1 Parent(s): ab24997

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +154 -0
app.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import onnxruntime
3
+ import onnx
4
+ import gradio as gr
5
+ import requests
6
+ import json
7
+ from extractnet import Extractor
8
+ import math
9
+ from transformers import AutoTokenizer
10
+ import spacy
11
+ import os
12
+ from transformers import pipeline
13
+ import itertools
14
+
15
+ MODEL_TRANSFORMER_BASED = "distilbert-base-uncased"
16
+ MODEL_ONNX_FNAME = "ESG_classifier.onnx"
17
+ MODEL_SENTIMENT_ANALYSIS = "ProsusAI/finbert"
18
+ MODEL_SUMMARY_PEGASUS = "oMateos2020/pegasus-newsroom-cnn_full-adafactor-bs6"
19
+
20
+ #API_HF_SENTIMENT_URL = "https://api-inference.huggingface.co/models/cardiffnlp/twitter-roberta-base-sentiment"
21
+
22
+ def _inference_ner_spancat(text, summary, penalty=0.5, normalise=True, limit_outputs=10):
23
+ nlp = spacy.load("en_pipeline")
24
+ doc = nlp(text)
25
+ spans = doc.spans["sc"]
26
+ comp_raw_text = dict( sorted( dict(zip([str(x) for x in spans],[float(x)*penalty for x in spans.attrs['scores']])).items(), key=lambda x: x[1], reverse=True) )
27
+ doc = nlp(summary)
28
+ spans = doc.spans["sc"]
29
+ exceeds_one = 0.0
30
+ for comp_s in spans:
31
+ if str(comp_s) in comp_raw_text.keys():
32
+ comp_raw_text[str(comp_s)] = comp_raw_text[str(comp_s)] / penalty
33
+ temp_max = comp_raw_text[str(comp_s)]if comp_raw_text[str(comp_s)] > 1.0 else 0.0
34
+ exceeds_one = comp_raw_text[str(comp_s)] if temp_max > exceeds_one else exceeds_one
35
+ #This "exceeds_one" is a bit confusing. So the thing is that the penalty is reverted for each time the company appears in the summary and hence the value can exceed one when the company appears more than once. The normalisation means that all the other scores are divided by the maximum when any value exceeds one
36
+ if normalise and (exceeds_one > 1):
37
+ comp_raw_text = {k: v/exceeds_one for k, v in comp_raw_text.items()}
38
+
39
+ return dict(itertools.islice(sorted(comp_raw_text.items(), key=lambda x: x[1], reverse=True), limit_outputs))
40
+
41
+ def _inference_summary_model_pipeline(text):
42
+ pipe = pipeline("text2text-generation", model=MODEL_SUMMARY_PEGASUS)
43
+ return pipe(text,truncation='longest_first')
44
+
45
+ def _inference_sentiment_model_pipeline(text):
46
+ tokenizer_kwargs = {'padding':True,'truncation':True,'max_length':512}#,'return_tensors':'pt'}
47
+ pipe = pipeline("sentiment-analysis", model=MODEL_SENTIMENT_ANALYSIS )
48
+ return pipe(text,**tokenizer_kwargs)
49
+
50
+ #def _inference_sentiment_model_via_api_query(payload):
51
+ # response = requests.post(API_HF_SENTIMENT_URL , headers={"Authorization": os.environ['hf_api_token']}, json=payload)
52
+ # return response.json()
53
+
54
+ def convert_listwords_text(list_words):
55
+ text = ""
56
+ for word in list_words:
57
+ text = text + " " + word
58
+ return text
59
+
60
+ def clean_text(text):
61
+ nlp = spacy.load("en_core_web_sm")
62
+ nlp.max_length=2000000
63
+ if (text != ""):
64
+ list_word = []
65
+
66
+ for token in nlp(text):
67
+ if (not token.is_punct
68
+ and not token.is_stop
69
+ and not token.like_url
70
+ and not token.is_space
71
+ and not token.like_email
72
+ #and not token.like_num
73
+ and not token.pos_ == "CONJ"):
74
+
75
+ list_word.append(token.lemma_)
76
+
77
+ return convert_listwords_text(list_words=list_word)
78
+ else:
79
+ return -1
80
+
81
+ def sigmoid(x):
82
+ return 1 / (1 + np.exp(-x))
83
+
84
+ def to_numpy(tensor):
85
+ return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
86
+
87
+ def is_in_archive(url):
88
+ try:
89
+ r = requests.get('http://archive.org/wayback/available?url='+url)
90
+ archive = json.loads(r.text)
91
+
92
+ if archive['archived_snapshots'] :
93
+ archive['archived_snapshots']['closest']
94
+ return {'archived':archive['archived_snapshots']['closest']['available'], 'url':archive['archived_snapshots']['closest']['url'],'error':0}
95
+ else:
96
+ return {'archived':False, 'url':"", 'error':0}
97
+ except:
98
+ print(f"[E] Quering URL ({url}) from archive.org")
99
+ return {'archived':False, 'url':"", 'error':-1}
100
+
101
+ #def _inference_ner(text):
102
+ # return labels
103
+
104
+ def _inference_classifier(text):
105
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_TRANSFORMER_BASED)
106
+ inputs = tokenizer(clean_text(text), return_tensors="np", padding="max_length", truncation=True) #this assumes head-only!
107
+ ort_session = onnxruntime.InferenceSession(MODEL_ONNX_FNAME)
108
+ onnx_model = onnx.load(MODEL_ONNX_FNAME)
109
+ onnx.checker.check_model(onnx_model)
110
+
111
+ # compute ONNX Runtime output prediction
112
+ ort_outs = ort_session.run(None, input_feed=dict(inputs))
113
+
114
+ return sigmoid(ort_outs[0])[0]
115
+
116
+ def inference(url,use_archive,limit_companies=10):
117
+ if use_archive:
118
+ archive = is_in_archive(url)
119
+ if archive['archived']:
120
+ url = archive['url']
121
+ #Extract the data from url
122
+ extracted = Extractor().extract(requests.get(url).text)
123
+ prob_outs = _inference_classifier(extracted['content'])
124
+ #sentiment = _inference_sentiment_model_via_api_query({"inputs": extracted['content']})
125
+ sentiment = _inference_sentiment_model_pipeline(extracted['content'])[0]
126
+ summary = _inference_summary_model_pipeline(extracted['content'])[0]['generated_text']
127
+ ner_labels = _inference_ner_spancat(extracted['content'],summary, penalty = 0.8, limit_outputs=limit_companies)
128
+
129
+ return ner_labels, {'E':float(prob_outs[0]),"S":float(prob_outs[1]),"G":float(prob_outs[2])},{sentiment['label']:float(sentiment['score'])},"**Summary:**\n\n" + summary
130
+
131
+ title = "ESG API Demo"
132
+ description = """This is a demonstration of the full ESG pipeline backend where given a URL (english, news) the news contents are extracted, using extractnet, and fed to three models:
133
+
134
+ - An off-the-shelf sentiment classification model (ProsusAI/finbert)
135
+ - A custom NER for the company extraction
136
+ - A custom ESG classifier for the ESG labeling of the news (the extracted text is also lemmatised prior to be fed to this classifier)
137
+
138
+ API input parameters:
139
+ - URL: text. Url of the news (english)
140
+ - `use_archive`: boolean. The model will extract the archived version in archive.org of the url indicated. This is useful with old news and to bypass news behind paywall
141
+ - `limit_companies`: integer. Number of found relevant companies to report.
142
+
143
+ """
144
+ examples = [['https://www.bbc.com/news/uk-62732447',False,5],
145
+ ['https://www.bbc.com/news/business-62747401',False,5],
146
+ ['https://www.bbc.com/news/technology-62744858',False,5],
147
+ ['https://www.bbc.com/news/science-environment-62758811',False,5],
148
+ ['https://www.theguardian.com/business/2022/sep/02/nord-stream-1-gazprom-announces-indefinite-shutdown-of-pipeline',False,5],
149
+ ['https://www.bbc.com/news/world-europe-62766867',False,5],
150
+ ['https://www.bbc.com/news/business-62524031',False,5],
151
+ ['https://www.bbc.com/news/business-62728621',False,5],
152
+ ['https://www.bbc.com/news/science-environment-62680423',False,5]]
153
+ demo = gr.Interface(fn=inference, inputs=[gr.Textbox(label='URL'),gr.Checkbox(label='grab cached from archive.org'), gr.Slider(minimum=1, maximum=10, step=1, label='Limit NER output')], outputs=[gr.Label(label='Company'), gr.Label(label='ESG'),gr.Label(label='Sentiment'),gr.Markdown()], title=title, description=description, examples=examples)
154
+ demo.launch()