nbroad commited on
Commit
f23f112
1 Parent(s): 4400790

commit files to HF hub

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ tags:
5
+ - text-classification
6
+ - bert
7
+ - pytorch
8
+ license: apache-2.0
9
+ widget:
10
+ - text: "In fiscal year 2019, we reduced our comprehensive carbon footprint for the fourth consecutive year—down 35 percent compared to 2015, when Apple’s carbon emissions peaked, even as net revenue increased by 11 percent over that same period. In the past year, we avoided over 10 million metric tons from our emissions reduction initiatives—like our Supplier Clean Energy Program, which lowered our footprint by 4.4 million metric tons. "
11
+ example_title: "Carbon Footprint"
12
+ ---
13
+
14
+ # ESG BERT
15
+
16
+ (Uploaded from https://github.com/mukut03/ESG-BERT)
17
+
18
+ **Domain Specific BERT Model for Text Mining in Sustainable Investing**
19
+
20
+ Read more about this pre-trained model [here.](https://towardsdatascience.com/nlp-meets-sustainable-investing-d0542b3c264b?source=friends_link&sk=1f7e6641c3378aaff319a81decf387bf)
21
+
22
+ **In collaboration with [Charan Pothireddi](https://www.linkedin.com/in/sree-charan-pothireddi-6a0a3587/) and [Parabole.ai](https://www.linkedin.com/in/sree-charan-pothireddi-6a0a3587/)**
23
+
24
+
25
+ ### Labels
26
+
27
+ 0: Business_Ethics
28
+ 1: Data_Security
29
+ 2: Access_And_Affordability
30
+ 3: Business_Model_Resilience
31
+ 4: Competitive_Behavior
32
+ 5: Critical_Incident_Risk_Management
33
+ 6: Customer_Welfare
34
+ 7: Director_Removal
35
+ 8: Employee_Engagement_Inclusion_And_Diversity
36
+ 9: Employee_Health_And_Safety
37
+ 10: Human_Rights_And_Community_Relations
38
+ 11: Labor_Practices
39
+ 12: Management_Of_Legal_And_Regulatory_Framework
40
+ 13: Physical_Impacts_Of_Climate_Change
41
+ 14: Product_Quality_And_Safety
42
+ 15: Product_Design_And_Lifecycle_Management
43
+ 16: Selling_Practices_And_Product_Labeling
44
+ 17: Supply_Chain_Management
45
+ 18: Systemic_Risk_Management
46
+ 19: Waste_And_Hazardous_Materials_Management
47
+ 20: Water_And_Wastewater_Management
48
+ 21: Air_Quality
49
+ 22: Customer_Privacy
50
+ 23: Ecological_Impacts
51
+ 24: Energy_Management
52
+ 25: GHG_Emissions
53
+
54
+
55
+ ### References:
56
+ [1] - ---
57
+
58
+ https://medium.com/analytics-vidhya/deploy-huggingface-s-bert-to-production-with-pytorch-serve-27b068026d18
config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": ".",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "Business_Ethics",
14
+ "1": "Data_Security",
15
+ "2": "Access_And_Affordability",
16
+ "3": "Business_Model_Resilience",
17
+ "4": "Competitive_Behavior",
18
+ "5": "Critical_Incident_Risk_Management",
19
+ "6": "Customer_Welfare",
20
+ "7": "Director_Removal",
21
+ "8": "Employee_Engagement_Inclusion_And_Diversity",
22
+ "9": "Employee_Health_And_Safety",
23
+ "10": "Human_Rights_And_Community_Relations",
24
+ "11": "Labor_Practices",
25
+ "12": "Management_Of_Legal_And_Regulatory_Framework",
26
+ "13": "Physical_Impacts_Of_Climate_Change",
27
+ "14": "Product_Quality_And_Safety",
28
+ "15": "Product_Design_And_Lifecycle_Management",
29
+ "16": "Selling_Practices_And_Product_Labeling",
30
+ "17": "Supply_Chain_Management",
31
+ "18": "Systemic_Risk_Management",
32
+ "19": "Waste_And_Hazardous_Materials_Management",
33
+ "20": "Water_And_Wastewater_Management",
34
+ "21": "Air_Quality",
35
+ "22": "Customer_Privacy",
36
+ "23": "Ecological_Impacts",
37
+ "24": "Energy_Management",
38
+ "25": "GHG_Emissions"
39
+ },
40
+ "initializer_range": 0.02,
41
+ "intermediate_size": 3072,
42
+ "label2id": {
43
+ "Access_And_Affordability": 2,
44
+ "Air_Quality": 21,
45
+ "Business_Ethics": 0,
46
+ "Business_Model_Resilience": 3,
47
+ "Competitive_Behavior": 4,
48
+ "Critical_Incident_Risk_Management": 5,
49
+ "Customer_Privacy": 22,
50
+ "Customer_Welfare": 6,
51
+ "Data_Security": 1,
52
+ "Director_Removal": 7,
53
+ "Ecological_Impacts": 23,
54
+ "Employee_Engagement_Inclusion_And_Diversity": 8,
55
+ "Employee_Health_And_Safety": 9,
56
+ "Energy_Management": 24,
57
+ "GHG_Emissions": 25,
58
+ "Human_Rights_And_Community_Relations": 10,
59
+ "Labor_Practices": 11,
60
+ "Management_Of_Legal_And_Regulatory_Framework": 12,
61
+ "Physical_Impacts_Of_Climate_Change": 13,
62
+ "Product_Design_And_Lifecycle_Management": 15,
63
+ "Product_Quality_And_Safety": 14,
64
+ "Selling_Practices_And_Product_Labeling": 16,
65
+ "Supply_Chain_Management": 17,
66
+ "Systemic_Risk_Management": 18,
67
+ "Waste_And_Hazardous_Materials_Management": 19,
68
+ "Water_And_Wastewater_Management": 20
69
+ },
70
+ "layer_norm_eps": 1e-12,
71
+ "max_position_embeddings": 512,
72
+ "model_type": "bert",
73
+ "num_attention_heads": 12,
74
+ "num_hidden_layers": 12,
75
+ "pad_token_id": 0,
76
+ "position_embedding_type": "absolute",
77
+ "torch_dtype": "float32",
78
+ "transformers_version": "4.14.1",
79
+ "type_vocab_size": 2,
80
+ "use_cache": true,
81
+ "vocab_size": 30522
82
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caa781011a42ba2f73e0293c53fda580e5fcd03169380634ae07752c78eb9259
3
+ size 438090221
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": "./special_tokens_map.json", "name_or_path": ".", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff