versae commited on
Commit
843ce7b
1 Parent(s): b633633

Gaussian 512 fine-tuned POS CoNLL 2002 es with f1/acc 0.9646/0.9697

Browse files
all_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9696918164125239,
4
+ "eval_f1": 0.9646365422396856,
5
+ "eval_loss": 0.1131255105137825,
6
+ "eval_precision": 0.964786677301998,
7
+ "eval_recall": 0.9644864538965712,
8
+ "eval_runtime": 21.8306,
9
+ "eval_samples": 1916,
10
+ "eval_samples_per_second": 87.767,
11
+ "eval_steps_per_second": 5.497,
12
+ "train_loss": 0.10113097262245223,
13
+ "train_runtime": 1391.9068,
14
+ "train_samples": 8324,
15
+ "train_samples_per_second": 29.901,
16
+ "train_steps_per_second": 1.872
17
+ }
config.json ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bertin-project/bertin-base-gaussian-exp-512seqlen",
3
+ "architectures": [
4
+ "RobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "finetuning_task": "pos",
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": 0,
16
+ "1": 1,
17
+ "2": 2,
18
+ "3": 3,
19
+ "4": 4,
20
+ "5": 5,
21
+ "6": 6,
22
+ "7": 7,
23
+ "8": 8,
24
+ "9": 9,
25
+ "10": 10,
26
+ "11": 11,
27
+ "12": 12,
28
+ "13": 13,
29
+ "14": 14,
30
+ "15": 15,
31
+ "16": 16,
32
+ "17": 17,
33
+ "18": 18,
34
+ "19": 19,
35
+ "20": 20,
36
+ "21": 21,
37
+ "22": 22,
38
+ "23": 23,
39
+ "24": 24,
40
+ "25": 25,
41
+ "26": 26,
42
+ "27": 27,
43
+ "28": 28,
44
+ "29": 29,
45
+ "30": 30,
46
+ "31": 31,
47
+ "32": 32,
48
+ "33": 33,
49
+ "34": 34,
50
+ "35": 35,
51
+ "36": 36,
52
+ "37": 37,
53
+ "38": 38,
54
+ "39": 39,
55
+ "40": 40,
56
+ "41": 41,
57
+ "42": 42,
58
+ "43": 43,
59
+ "44": 44,
60
+ "45": 45,
61
+ "46": 46,
62
+ "47": 47,
63
+ "48": 48,
64
+ "49": 49,
65
+ "50": 50,
66
+ "51": 51,
67
+ "52": 52,
68
+ "53": 53,
69
+ "54": 54,
70
+ "55": 55,
71
+ "56": 56,
72
+ "57": 57,
73
+ "58": 58,
74
+ "59": 59
75
+ },
76
+ "initializer_range": 0.02,
77
+ "intermediate_size": 3072,
78
+ "label2id": {
79
+ "0": 0,
80
+ "1": 1,
81
+ "2": 2,
82
+ "3": 3,
83
+ "4": 4,
84
+ "5": 5,
85
+ "6": 6,
86
+ "7": 7,
87
+ "8": 8,
88
+ "9": 9,
89
+ "10": 10,
90
+ "11": 11,
91
+ "12": 12,
92
+ "13": 13,
93
+ "14": 14,
94
+ "15": 15,
95
+ "16": 16,
96
+ "17": 17,
97
+ "18": 18,
98
+ "19": 19,
99
+ "20": 20,
100
+ "21": 21,
101
+ "22": 22,
102
+ "23": 23,
103
+ "24": 24,
104
+ "25": 25,
105
+ "26": 26,
106
+ "27": 27,
107
+ "28": 28,
108
+ "29": 29,
109
+ "30": 30,
110
+ "31": 31,
111
+ "32": 32,
112
+ "33": 33,
113
+ "34": 34,
114
+ "35": 35,
115
+ "36": 36,
116
+ "37": 37,
117
+ "38": 38,
118
+ "39": 39,
119
+ "40": 40,
120
+ "41": 41,
121
+ "42": 42,
122
+ "43": 43,
123
+ "44": 44,
124
+ "45": 45,
125
+ "46": 46,
126
+ "47": 47,
127
+ "48": 48,
128
+ "49": 49,
129
+ "50": 50,
130
+ "51": 51,
131
+ "52": 52,
132
+ "53": 53,
133
+ "54": 54,
134
+ "55": 55,
135
+ "56": 56,
136
+ "57": 57,
137
+ "58": 58,
138
+ "59": 59
139
+ },
140
+ "layer_norm_eps": 1e-05,
141
+ "max_position_embeddings": 514,
142
+ "model_type": "roberta",
143
+ "num_attention_heads": 12,
144
+ "num_hidden_layers": 12,
145
+ "pad_token_id": 1,
146
+ "position_embedding_type": "absolute",
147
+ "torch_dtype": "float32",
148
+ "transformers_version": "4.9.0.dev0",
149
+ "type_vocab_size": 1,
150
+ "use_cache": true,
151
+ "vocab_size": 50265
152
+ }
eval_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9696918164125239,
4
+ "eval_f1": 0.9646365422396856,
5
+ "eval_loss": 0.1131255105137825,
6
+ "eval_precision": 0.964786677301998,
7
+ "eval_recall": 0.9644864538965712,
8
+ "eval_runtime": 21.8306,
9
+ "eval_samples": 1916,
10
+ "eval_samples_per_second": 87.767,
11
+ "eval_steps_per_second": 5.497
12
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:804059e230f69819051c05748336cdcc1bf40e9ea3e52b36a907243fcd80b617
3
+ size 496489393
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": true, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "special_tokens_map_file": null, "name_or_path": "bertin-project/bertin-base-gaussian-exp-512seqlen", "tokenizer_class": "RobertaTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.10113097262245223,
4
+ "train_runtime": 1391.9068,
5
+ "train_samples": 8324,
6
+ "train_samples_per_second": 29.901,
7
+ "train_steps_per_second": 1.872
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "global_step": 2605,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.96,
12
+ "learning_rate": 4.040307101727447e-05,
13
+ "loss": 0.2963,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.92,
18
+ "learning_rate": 3.080614203454895e-05,
19
+ "loss": 0.0874,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 2.88,
24
+ "learning_rate": 2.1209213051823416e-05,
25
+ "loss": 0.0608,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 3.84,
30
+ "learning_rate": 1.161228406909789e-05,
31
+ "loss": 0.0433,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 4.8,
36
+ "learning_rate": 2.015355086372361e-06,
37
+ "loss": 0.0327,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 5.0,
42
+ "step": 2605,
43
+ "total_flos": 1.08808736772096e+16,
44
+ "train_loss": 0.10113097262245223,
45
+ "train_runtime": 1391.9068,
46
+ "train_samples_per_second": 29.901,
47
+ "train_steps_per_second": 1.872
48
+ }
49
+ ],
50
+ "max_steps": 2605,
51
+ "num_train_epochs": 5,
52
+ "total_flos": 1.08808736772096e+16,
53
+ "trial_name": null,
54
+ "trial_params": null
55
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed05a0e84a577b641fe3f312077173620696d209dad6ea42d96e8a23de030ce5
3
+ size 2735
vocab.json ADDED
The diff for this file is too large to render. See raw diff