import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification import gradio as gr from torch import nn tokenizer = AutoTokenizer.from_pretrained("nlpaueb/sec-bert-base") model_fine_tuned_pt = AutoModelForSequenceClassification.from_pretrained("model/") di = {0: 'Access to Communications', 1: 'Access to Finance', 2: 'Access to Health Care', 3: 'Accounting', 4: 'Biodiversity & Land Use', 5: 'Board', 6: 'Business Ethics', 7: 'Carbon Emissions', 8: 'Chemical Safety', 9: 'Climate Change Vulnerability', 10: 'Community Relations', 11: 'Consumer Financial Protection', 12: 'Controversial Sourcing', 13: 'Electronic Waste', 14: 'Financing Environmental Impact', 15: 'Health & Demographic Risk', 16: 'Human Capital Development', 17: 'Labor Management', 18: 'Opportunities in Clean Tech', 19: 'Opportunities in Green Building', 20: 'Opportunities in Nutrition & Health', 21: 'Opportunities in Renewable Energy', 22: 'Ownership & Control', 23: 'Packaging Material & Waste', 24: 'Pay', 25: 'Privacy & Data Security', 26: 'Product Carbon Footprint', 27: 'Product Safety & Quality', 28: 'Raw Material Sourcing', 29: 'Responsible Investment', 30: 'Supply Chain Labor Standards', 31: 'Toxic Emissions & Waste', 32: 'Water Stress'} def fns(input_text): predict_input_pt = tokenizer(input_text, truncation = True, padding = True, return_tensors = 'pt' ) ouput_pt = model_fine_tuned_pt(**predict_input_pt) prediction_value_pt = torch.argmax(ouput_pt[0], dim = 1 ).item() probab = nn.functional.softmax(ouput_pt[0], dim=-1).max().item() if probab>=.7: ans = di[prediction_value_pt] else: ans = "Sorry! We are not confident of the ESG issue in this case." return ans demo = gr.Interface(title="ESG Issue Detector (EID)", fn=fns, inputs="text", outputs="text", examples=["Partners Capital Appoints Kristen Eshak Weldon Global Head of ESG and Impact Investing","United Natural Foods Unveils Sustainability Goals Covering Climate, Waste Reduction and Food Access"]) demo.launch()