File size: 7,232 Bytes
7d1b388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import numpy as np
import onnxruntime
import onnx
import gradio as gr
import requests
import json
from extractnet import Extractor
import math
from transformers import AutoTokenizer
import spacy
import os
from transformers import pipeline
import itertools

MODEL_TRANSFORMER_BASED = "distilbert-base-uncased"
MODEL_ONNX_FNAME = "ESG_classifier.onnx"
MODEL_SENTIMENT_ANALYSIS = "ProsusAI/finbert"
MODEL_SUMMARY_PEGASUS = "oMateos2020/pegasus-newsroom-cnn_full-adafactor-bs6"

#API_HF_SENTIMENT_URL = "https://api-inference.huggingface.co/models/cardiffnlp/twitter-roberta-base-sentiment" 

def _inference_ner_spancat(text, summary, penalty=0.5, normalise=True, limit_outputs=10):
    nlp = spacy.load("en_pipeline")
    doc = nlp(text)
    spans = doc.spans["sc"]
    comp_raw_text = dict( sorted( dict(zip([str(x) for x in spans],[float(x)*penalty for x in spans.attrs['scores']])).items(), key=lambda x: x[1], reverse=True) )
    doc = nlp(summary)
    spans = doc.spans["sc"]
    exceeds_one = 0.0
    for comp_s in spans:
        if str(comp_s) in comp_raw_text.keys():
            comp_raw_text[str(comp_s)] = comp_raw_text[str(comp_s)] / penalty
            temp_max = comp_raw_text[str(comp_s)]if comp_raw_text[str(comp_s)] > 1.0 else 0.0
            exceeds_one = comp_raw_text[str(comp_s)] if temp_max > exceeds_one else exceeds_one 
    #This "exceeds_one" is a bit confusing. So the thing is that the penalty is reverted for each time the company appears in the summary and hence the value can exceed one when the company appears more than once. The normalisation means that all the other scores are divided by the maximum when any value exceeds one
    if normalise and (exceeds_one > 1):
        comp_raw_text = {k: v/exceeds_one for k, v in comp_raw_text.items()}
   
    return dict(itertools.islice(sorted(comp_raw_text.items(), key=lambda x: x[1], reverse=True), limit_outputs))

def _inference_summary_model_pipeline(text):
    pipe = pipeline("text2text-generation", model=MODEL_SUMMARY_PEGASUS)
    return pipe(text,truncation='longest_first')

def _inference_sentiment_model_pipeline(text):
    tokenizer_kwargs = {'padding':True,'truncation':True,'max_length':512}#,'return_tensors':'pt'}
    pipe = pipeline("sentiment-analysis", model=MODEL_SENTIMENT_ANALYSIS )
    return pipe(text,**tokenizer_kwargs)

#def _inference_sentiment_model_via_api_query(payload):
#    response = requests.post(API_HF_SENTIMENT_URL , headers={"Authorization": os.environ['hf_api_token']}, json=payload)
#    return response.json()

def convert_listwords_text(list_words):
  text = ""
  for word in list_words:
      text = text + " " + word
  return text

def clean_text(text):
  nlp = spacy.load("en_core_web_sm")
  nlp.max_length=2000000
  if (text != ""):
    list_word = []

    for token in nlp(text):
        if (not token.is_punct
            and not token.is_stop
            and not token.like_url
            and not token.is_space
            and not token.like_email
            #and not token.like_num
            and not token.pos_ == "CONJ"):
            
            list_word.append(token.lemma_)

    return convert_listwords_text(list_words=list_word)
  else:
    return -1

def sigmoid(x):
  return 1 / (1 + np.exp(-x))

def to_numpy(tensor):
    return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()

def is_in_archive(url):
    try:
        r = requests.get('http://archive.org/wayback/available?url='+url)
        archive = json.loads(r.text)
    
        if archive['archived_snapshots'] :
            archive['archived_snapshots']['closest']
            return {'archived':archive['archived_snapshots']['closest']['available'], 'url':archive['archived_snapshots']['closest']['url'],'error':0}
        else:
            return {'archived':False, 'url':"", 'error':0}
    except:
        print(f"[E] Quering URL ({url}) from archive.org")
        return {'archived':False, 'url':"", 'error':-1}

#def _inference_ner(text):
#    return labels

def _inference_classifier(text):
    tokenizer = AutoTokenizer.from_pretrained(MODEL_TRANSFORMER_BASED)
    inputs = tokenizer(clean_text(text), return_tensors="np", padding="max_length", truncation=True) #this assumes head-only!
    ort_session = onnxruntime.InferenceSession(MODEL_ONNX_FNAME)
    onnx_model = onnx.load(MODEL_ONNX_FNAME)
    onnx.checker.check_model(onnx_model)

    # compute ONNX Runtime output prediction
    ort_outs = ort_session.run(None, input_feed=dict(inputs))

    return sigmoid(ort_outs[0])[0]

def inference(url,use_archive,limit_companies=10):
    if use_archive:
        archive = is_in_archive(url)
        if archive['archived']:
            url = archive['url']
    #Extract the data from url
    extracted = Extractor().extract(requests.get(url).text)
    prob_outs = _inference_classifier(extracted['content'])
    #sentiment = _inference_sentiment_model_via_api_query({"inputs": extracted['content']})
    sentiment = _inference_sentiment_model_pipeline(extracted['content'])[0]
    summary = _inference_summary_model_pipeline(extracted['content'])[0]['generated_text']
    ner_labels = _inference_ner_spancat(extracted['content'],summary, penalty = 0.8, limit_outputs=limit_companies)

    return ner_labels, {'E':float(prob_outs[0]),"S":float(prob_outs[1]),"G":float(prob_outs[2])},{sentiment['label']:float(sentiment['score'])},"**Summary:**\n\n" + summary

title = "ESG API Demo"
description = """This is a demonstration of the full ESG pipeline backend where given a URL (english, news) the news contents are extracted, using extractnet, and fed to three models:

- An off-the-shelf sentiment classification model (ProsusAI/finbert)
- A custom NER for the company extraction
- A custom ESG classifier for the ESG labeling of the news (the extracted text is also lemmatised prior to be fed to this classifier) 

API input parameters:
- URL: text. Url of the news (english)
- `use_archive`: boolean. The model will extract the archived version in archive.org of the url indicated. This is useful with old news and to bypass news behind paywall
- `limit_companies`: integer. Number of found relevant companies to report.

"""
examples = [['https://www.bbc.com/news/uk-62732447',False,5],
            ['https://www.bbc.com/news/business-62747401',False,5],
            ['https://www.bbc.com/news/technology-62744858',False,5],
            ['https://www.bbc.com/news/science-environment-62758811',False,5],
            ['https://www.theguardian.com/business/2022/sep/02/nord-stream-1-gazprom-announces-indefinite-shutdown-of-pipeline',False,5],
            ['https://www.bbc.com/news/world-europe-62766867',False,5],
            ['https://www.bbc.com/news/business-62524031',False,5],
            ['https://www.bbc.com/news/business-62728621',False,5],
            ['https://www.bbc.com/news/science-environment-62680423',False,5]]
demo = gr.Interface(fn=inference, inputs=[gr.Textbox(label='URL'),gr.Checkbox(label='grab cached from archive.org'), gr.Slider(minimum=1, maximum=10, step=1, label='Limit NER output')], outputs=[gr.Label(label='Company'), gr.Label(label='ESG'),gr.Label(label='Sentiment'),gr.Markdown()], title=title, description=description, examples=examples)
demo.launch()