haryoaw commited on
Commit
935607a
1 Parent(s): 4944214

Initial Commit

Browse files
Files changed (5) hide show
  1. README.md +117 -0
  2. config.json +159 -0
  3. eval_results_ml.json +1 -0
  4. model.safetensors +3 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: mit
4
+ base_model: microsoft/mdeberta-v3-base
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - massive
9
+ metrics:
10
+ - accuracy
11
+ - f1
12
+ model-index:
13
+ - name: scenario-NON-KD-SCR-D2_data-AmazonScience_massive_all_1_144
14
+ results:
15
+ - task:
16
+ name: Text Classification
17
+ type: text-classification
18
+ dataset:
19
+ name: massive
20
+ type: massive
21
+ config: all_1.1
22
+ split: validation
23
+ args: all_1.1
24
+ metrics:
25
+ - name: Accuracy
26
+ type: accuracy
27
+ value: 0.8158178516024065
28
+ - name: F1
29
+ type: f1
30
+ value: 0.790068262479823
31
+ ---
32
+
33
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
34
+ should probably proofread and complete it, then remove this comment. -->
35
+
36
+ # scenario-NON-KD-SCR-D2_data-AmazonScience_massive_all_1_144
37
+
38
+ This model is a fine-tuned version of [microsoft/mdeberta-v3-base](https://huggingface.co/microsoft/mdeberta-v3-base) on the massive dataset.
39
+ It achieves the following results on the evaluation set:
40
+ - Loss: 1.9525
41
+ - Accuracy: 0.8158
42
+ - F1: 0.7901
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 5e-05
62
+ - train_batch_size: 32
63
+ - eval_batch_size: 32
64
+ - seed: 44
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 10
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
72
+ |:-------------:|:------:|:------:|:---------------:|:--------:|:------:|
73
+ | 1.2857 | 0.2672 | 5000 | 1.2982 | 0.6464 | 0.5527 |
74
+ | 0.9859 | 0.5344 | 10000 | 1.0042 | 0.7331 | 0.6625 |
75
+ | 0.8126 | 0.8017 | 15000 | 0.9054 | 0.7613 | 0.6991 |
76
+ | 0.5568 | 1.0689 | 20000 | 0.8391 | 0.7828 | 0.7319 |
77
+ | 0.5531 | 1.3361 | 25000 | 0.8316 | 0.7886 | 0.7394 |
78
+ | 0.5497 | 1.6033 | 30000 | 0.7894 | 0.8011 | 0.7618 |
79
+ | 0.5099 | 1.8706 | 35000 | 0.7805 | 0.8050 | 0.7616 |
80
+ | 0.3327 | 2.1378 | 40000 | 0.8676 | 0.8040 | 0.7633 |
81
+ | 0.3482 | 2.4050 | 45000 | 0.8556 | 0.8060 | 0.7700 |
82
+ | 0.3506 | 2.6722 | 50000 | 0.8309 | 0.8087 | 0.7816 |
83
+ | 0.3508 | 2.9394 | 55000 | 0.8149 | 0.8105 | 0.7683 |
84
+ | 0.221 | 3.2067 | 60000 | 0.9645 | 0.8070 | 0.7760 |
85
+ | 0.222 | 3.4739 | 65000 | 0.9305 | 0.8113 | 0.7836 |
86
+ | 0.2414 | 3.7411 | 70000 | 0.9195 | 0.8122 | 0.7846 |
87
+ | 0.2032 | 4.0083 | 75000 | 0.9858 | 0.8141 | 0.7855 |
88
+ | 0.1457 | 4.2756 | 80000 | 1.0865 | 0.8130 | 0.7885 |
89
+ | 0.155 | 4.5428 | 85000 | 1.0413 | 0.8133 | 0.7830 |
90
+ | 0.1535 | 4.8100 | 90000 | 1.0934 | 0.8157 | 0.7887 |
91
+ | 0.0888 | 5.0772 | 95000 | 1.2135 | 0.8152 | 0.7896 |
92
+ | 0.0931 | 5.3444 | 100000 | 1.3402 | 0.8121 | 0.7857 |
93
+ | 0.1024 | 5.6117 | 105000 | 1.2838 | 0.8107 | 0.7848 |
94
+ | 0.1044 | 5.8789 | 110000 | 1.3039 | 0.8133 | 0.7885 |
95
+ | 0.0595 | 6.1461 | 115000 | 1.4268 | 0.8129 | 0.7877 |
96
+ | 0.0678 | 6.4133 | 120000 | 1.4729 | 0.8132 | 0.7866 |
97
+ | 0.0676 | 6.6806 | 125000 | 1.5201 | 0.8127 | 0.7859 |
98
+ | 0.0779 | 6.9478 | 130000 | 1.4956 | 0.8151 | 0.7905 |
99
+ | 0.0429 | 7.2150 | 135000 | 1.6860 | 0.8142 | 0.7897 |
100
+ | 0.0507 | 7.4822 | 140000 | 1.6751 | 0.8124 | 0.7842 |
101
+ | 0.0463 | 7.7495 | 145000 | 1.7002 | 0.8133 | 0.7866 |
102
+ | 0.034 | 8.0167 | 150000 | 1.7596 | 0.8135 | 0.7885 |
103
+ | 0.0254 | 8.2839 | 155000 | 1.8539 | 0.8133 | 0.7876 |
104
+ | 0.0294 | 8.5511 | 160000 | 1.8675 | 0.8146 | 0.7862 |
105
+ | 0.0296 | 8.8183 | 165000 | 1.8644 | 0.8142 | 0.7862 |
106
+ | 0.0174 | 9.0856 | 170000 | 1.9111 | 0.8151 | 0.7899 |
107
+ | 0.0159 | 9.3528 | 175000 | 1.9342 | 0.8156 | 0.7896 |
108
+ | 0.0171 | 9.6200 | 180000 | 1.9399 | 0.8161 | 0.7901 |
109
+ | 0.0209 | 9.8872 | 185000 | 1.9525 | 0.8158 | 0.7901 |
110
+
111
+
112
+ ### Framework versions
113
+
114
+ - Transformers 4.44.2
115
+ - Pytorch 2.1.1+cu121
116
+ - Datasets 2.14.5
117
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/mdeberta-v3-base",
3
+ "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6",
18
+ "7": "LABEL_7",
19
+ "8": "LABEL_8",
20
+ "9": "LABEL_9",
21
+ "10": "LABEL_10",
22
+ "11": "LABEL_11",
23
+ "12": "LABEL_12",
24
+ "13": "LABEL_13",
25
+ "14": "LABEL_14",
26
+ "15": "LABEL_15",
27
+ "16": "LABEL_16",
28
+ "17": "LABEL_17",
29
+ "18": "LABEL_18",
30
+ "19": "LABEL_19",
31
+ "20": "LABEL_20",
32
+ "21": "LABEL_21",
33
+ "22": "LABEL_22",
34
+ "23": "LABEL_23",
35
+ "24": "LABEL_24",
36
+ "25": "LABEL_25",
37
+ "26": "LABEL_26",
38
+ "27": "LABEL_27",
39
+ "28": "LABEL_28",
40
+ "29": "LABEL_29",
41
+ "30": "LABEL_30",
42
+ "31": "LABEL_31",
43
+ "32": "LABEL_32",
44
+ "33": "LABEL_33",
45
+ "34": "LABEL_34",
46
+ "35": "LABEL_35",
47
+ "36": "LABEL_36",
48
+ "37": "LABEL_37",
49
+ "38": "LABEL_38",
50
+ "39": "LABEL_39",
51
+ "40": "LABEL_40",
52
+ "41": "LABEL_41",
53
+ "42": "LABEL_42",
54
+ "43": "LABEL_43",
55
+ "44": "LABEL_44",
56
+ "45": "LABEL_45",
57
+ "46": "LABEL_46",
58
+ "47": "LABEL_47",
59
+ "48": "LABEL_48",
60
+ "49": "LABEL_49",
61
+ "50": "LABEL_50",
62
+ "51": "LABEL_51",
63
+ "52": "LABEL_52",
64
+ "53": "LABEL_53",
65
+ "54": "LABEL_54",
66
+ "55": "LABEL_55",
67
+ "56": "LABEL_56",
68
+ "57": "LABEL_57",
69
+ "58": "LABEL_58",
70
+ "59": "LABEL_59"
71
+ },
72
+ "initializer_range": 0.02,
73
+ "intermediate_size": 3072,
74
+ "label2id": {
75
+ "LABEL_0": 0,
76
+ "LABEL_1": 1,
77
+ "LABEL_10": 10,
78
+ "LABEL_11": 11,
79
+ "LABEL_12": 12,
80
+ "LABEL_13": 13,
81
+ "LABEL_14": 14,
82
+ "LABEL_15": 15,
83
+ "LABEL_16": 16,
84
+ "LABEL_17": 17,
85
+ "LABEL_18": 18,
86
+ "LABEL_19": 19,
87
+ "LABEL_2": 2,
88
+ "LABEL_20": 20,
89
+ "LABEL_21": 21,
90
+ "LABEL_22": 22,
91
+ "LABEL_23": 23,
92
+ "LABEL_24": 24,
93
+ "LABEL_25": 25,
94
+ "LABEL_26": 26,
95
+ "LABEL_27": 27,
96
+ "LABEL_28": 28,
97
+ "LABEL_29": 29,
98
+ "LABEL_3": 3,
99
+ "LABEL_30": 30,
100
+ "LABEL_31": 31,
101
+ "LABEL_32": 32,
102
+ "LABEL_33": 33,
103
+ "LABEL_34": 34,
104
+ "LABEL_35": 35,
105
+ "LABEL_36": 36,
106
+ "LABEL_37": 37,
107
+ "LABEL_38": 38,
108
+ "LABEL_39": 39,
109
+ "LABEL_4": 4,
110
+ "LABEL_40": 40,
111
+ "LABEL_41": 41,
112
+ "LABEL_42": 42,
113
+ "LABEL_43": 43,
114
+ "LABEL_44": 44,
115
+ "LABEL_45": 45,
116
+ "LABEL_46": 46,
117
+ "LABEL_47": 47,
118
+ "LABEL_48": 48,
119
+ "LABEL_49": 49,
120
+ "LABEL_5": 5,
121
+ "LABEL_50": 50,
122
+ "LABEL_51": 51,
123
+ "LABEL_52": 52,
124
+ "LABEL_53": 53,
125
+ "LABEL_54": 54,
126
+ "LABEL_55": 55,
127
+ "LABEL_56": 56,
128
+ "LABEL_57": 57,
129
+ "LABEL_58": 58,
130
+ "LABEL_59": 59,
131
+ "LABEL_6": 6,
132
+ "LABEL_7": 7,
133
+ "LABEL_8": 8,
134
+ "LABEL_9": 9
135
+ },
136
+ "layer_norm_eps": 1e-07,
137
+ "max_position_embeddings": 512,
138
+ "max_relative_positions": -1,
139
+ "model_type": "deberta-v2",
140
+ "norm_rel_ebd": "layer_norm",
141
+ "num_attention_heads": 12,
142
+ "num_hidden_layers": 6,
143
+ "pad_token_id": 0,
144
+ "pooler_dropout": 0,
145
+ "pooler_hidden_act": "gelu",
146
+ "pooler_hidden_size": 768,
147
+ "pos_att_type": [
148
+ "p2c",
149
+ "c2p"
150
+ ],
151
+ "position_biased_input": false,
152
+ "position_buckets": 256,
153
+ "relative_attention": true,
154
+ "share_att_key": true,
155
+ "torch_dtype": "float32",
156
+ "transformers_version": "4.44.2",
157
+ "type_vocab_size": 0,
158
+ "vocab_size": 251000
159
+ }
eval_results_ml.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"zh-CN": {"f1": 0.7662833162101182, "accuracy": 0.8096839273705447}, "id-ID": {"f1": 0.791936085937182, "accuracy": 0.8298587760591796}, "he-IL": {"f1": 0.7546995868698819, "accuracy": 0.8090114324142569}, "ca-ES": {"f1": 0.7716627285190718, "accuracy": 0.8137188971082717}, "lv-LV": {"f1": 0.7756578263764201, "accuracy": 0.8026227303295226}, "el-GR": {"f1": 0.7728765565056955, "accuracy": 0.8177538668459986}, "sv-SE": {"f1": 0.7632187394667299, "accuracy": 0.8157363819771352}, "ms-MY": {"f1": 0.7878501426458103, "accuracy": 0.82817753866846}, "ko-KR": {"f1": 0.7609420439653891, "accuracy": 0.8096839273705447}, "hi-IN": {"f1": 0.7690039456608021, "accuracy": 0.8096839273705447}, "es-ES": {"f1": 0.7598383609511135, "accuracy": 0.8080026899798252}, "jv-ID": {"f1": 0.7554621226813978, "accuracy": 0.8039677202420982}, "am-ET": {"f1": 0.7085069676445124, "accuracy": 0.7484868863483524}, "tl-PH": {"f1": 0.7509640040573857, "accuracy": 0.7972427706792199}, "hy-AM": {"f1": 0.7374190519240533, "accuracy": 0.7999327505043712}, "fi-FI": {"f1": 0.7579482093300729, "accuracy": 0.7948890383322125}, "sl-SL": {"f1": 0.7377782065280359, "accuracy": 0.7948890383322125}, "af-ZA": {"f1": 0.7556122198128261, "accuracy": 0.8059852051109617}, "km-KH": {"f1": 0.6890731324720444, "accuracy": 0.7420981842636181}, "ml-IN": {"f1": 0.7572675222479256, "accuracy": 0.8069939475453934}, "bn-BD": {"f1": 0.7437141399115078, "accuracy": 0.796570275722932}, "zh-TW": {"f1": 0.7776751382297559, "accuracy": 0.7958977807666443}, "mn-MN": {"f1": 0.7767218654514655, "accuracy": 0.8174176193678547}, "th-TH": {"f1": 0.7941892608022791, "accuracy": 0.8103564223268326}, "ur-PK": {"f1": 0.7444451193761809, "accuracy": 0.7908540685944856}, "my-MM": {"f1": 0.7460507100912702, "accuracy": 0.7972427706792199}, "cy-GB": {"f1": 0.738387716586206, "accuracy": 0.7979152656355077}, "hu-HU": {"f1": 0.7430028758685533, "accuracy": 0.7881640887693342}, "ja-JP": {"f1": 0.7678909759702931, "accuracy": 0.808338937457969}, "sq-AL": {"f1": 0.7814638611193184, "accuracy": 0.7989240080699395}, "it-IT": {"f1": 0.7787302675572307, "accuracy": 0.8295225285810356}, "ru-RU": {"f1": 0.7814629919692825, "accuracy": 0.8211163416274377}, "de-DE": {"f1": 0.7533074303816854, "accuracy": 0.8053127101546739}, "fr-FR": {"f1": 0.7715758795456081, "accuracy": 0.8133826496301277}, "vi-VN": {"f1": 0.7599538441367071, "accuracy": 0.8029589778076665}, "is-IS": {"f1": 0.7698857108361212, "accuracy": 0.8113651647612643}, "kn-IN": {"f1": 0.7406854018951025, "accuracy": 0.7908540685944856}, "nl-NL": {"f1": 0.7822114085697252, "accuracy": 0.8190988567585743}, "ta-IN": {"f1": 0.7482914737771544, "accuracy": 0.7854741089441829}, "tr-TR": {"f1": 0.7450953099750486, "accuracy": 0.7955615332885003}, "az-AZ": {"f1": 0.7953001260325713, "accuracy": 0.8127101546738399}, "te-IN": {"f1": 0.7350467977273795, "accuracy": 0.7881640887693342}, "pt-PT": {"f1": 0.7601369696806045, "accuracy": 0.8090114324142569}, "ka-GE": {"f1": 0.7317818920681861, "accuracy": 0.7639542703429725}, "ro-RO": {"f1": 0.7659765947189487, "accuracy": 0.8093476798924009}, "da-DK": {"f1": 0.7666088714747586, "accuracy": 0.8227975790181573}, "ar-SA": {"f1": 0.7077296786746656, "accuracy": 0.7468056489576328}, "nb-NO": {"f1": 0.7602826217015248, "accuracy": 0.8190988567585743}, "sw-KE": {"f1": 0.7407670028895473, "accuracy": 0.7844653665097512}, "fa-IR": {"f1": 0.7672339912551724, "accuracy": 0.8184263618022865}, "pl-PL": {"f1": 0.7699483673864845, "accuracy": 0.8080026899798252}, "en-US": {"f1": 0.7782275509992851, "accuracy": 0.8217888365837256}, "all": {"f1": 0.7579118440386778, "accuracy": 0.8021830220888728}}
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:382350467badbca264403e2aca38c82736d849a37d610f021048a69ce7167173
3
+ size 945325776
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:350494c250a7a02b29e526f46ad3e0c2d10caa4ca30ba758344a4189a1488a08
3
+ size 5304