edumunozsala
commited on
Commit
•
24ddaab
1
Parent(s):
a5f7e6d
Training in progress, epoch 1
Browse files- .gitignore +1 -0
- config.json +175 -0
- last-checkpoint/config.json +175 -0
- last-checkpoint/merges.txt +0 -0
- last-checkpoint/optimizer.pt +3 -0
- last-checkpoint/pytorch_model.bin +3 -0
- last-checkpoint/rng_state.pth +3 -0
- last-checkpoint/scaler.pt +3 -0
- last-checkpoint/scheduler.pt +3 -0
- last-checkpoint/special_tokens_map.json +1 -0
- last-checkpoint/tokenizer.json +0 -0
- last-checkpoint/tokenizer_config.json +1 -0
- last-checkpoint/trainer_state.json +81 -0
- last-checkpoint/training_args.bin +3 -0
- last-checkpoint/vocab.json +0 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- training_args.bin +3 -0
- vocab.json +0 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
checkpoint-*/
|
config.json
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"EncoderDecoderModel"
|
4 |
+
],
|
5 |
+
"decoder": {
|
6 |
+
"_name_or_path": "bertin-project/bertin-roberta-base-spanish",
|
7 |
+
"add_cross_attention": true,
|
8 |
+
"architectures": [
|
9 |
+
"RobertaForMaskedLM"
|
10 |
+
],
|
11 |
+
"attention_probs_dropout_prob": 0.1,
|
12 |
+
"bad_words_ids": null,
|
13 |
+
"bos_token_id": 0,
|
14 |
+
"chunk_size_feed_forward": 0,
|
15 |
+
"classifier_dropout": null,
|
16 |
+
"cross_attention_hidden_size": null,
|
17 |
+
"decoder_start_token_id": null,
|
18 |
+
"diversity_penalty": 0.0,
|
19 |
+
"do_sample": false,
|
20 |
+
"early_stopping": false,
|
21 |
+
"encoder_no_repeat_ngram_size": 0,
|
22 |
+
"eos_token_id": 2,
|
23 |
+
"finetuning_task": null,
|
24 |
+
"forced_bos_token_id": null,
|
25 |
+
"forced_eos_token_id": null,
|
26 |
+
"gradient_checkpointing": false,
|
27 |
+
"hidden_act": "gelu",
|
28 |
+
"hidden_dropout_prob": 0.1,
|
29 |
+
"hidden_size": 768,
|
30 |
+
"id2label": {
|
31 |
+
"0": "LABEL_0",
|
32 |
+
"1": "LABEL_1"
|
33 |
+
},
|
34 |
+
"initializer_range": 0.02,
|
35 |
+
"intermediate_size": 3072,
|
36 |
+
"is_decoder": true,
|
37 |
+
"is_encoder_decoder": false,
|
38 |
+
"label2id": {
|
39 |
+
"LABEL_0": 0,
|
40 |
+
"LABEL_1": 1
|
41 |
+
},
|
42 |
+
"layer_norm_eps": 1e-05,
|
43 |
+
"length_penalty": 1.0,
|
44 |
+
"max_length": 20,
|
45 |
+
"max_position_embeddings": 514,
|
46 |
+
"min_length": 0,
|
47 |
+
"model_type": "roberta",
|
48 |
+
"no_repeat_ngram_size": 0,
|
49 |
+
"num_attention_heads": 12,
|
50 |
+
"num_beam_groups": 1,
|
51 |
+
"num_beams": 1,
|
52 |
+
"num_hidden_layers": 12,
|
53 |
+
"num_return_sequences": 1,
|
54 |
+
"output_attentions": false,
|
55 |
+
"output_hidden_states": false,
|
56 |
+
"output_scores": false,
|
57 |
+
"pad_token_id": 1,
|
58 |
+
"position_embedding_type": "absolute",
|
59 |
+
"prefix": null,
|
60 |
+
"problem_type": null,
|
61 |
+
"pruned_heads": {},
|
62 |
+
"remove_invalid_values": false,
|
63 |
+
"repetition_penalty": 1.0,
|
64 |
+
"return_dict": true,
|
65 |
+
"return_dict_in_generate": false,
|
66 |
+
"sep_token_id": null,
|
67 |
+
"task_specific_params": null,
|
68 |
+
"temperature": 1.0,
|
69 |
+
"tie_encoder_decoder": false,
|
70 |
+
"tie_word_embeddings": true,
|
71 |
+
"tokenizer_class": null,
|
72 |
+
"top_k": 50,
|
73 |
+
"top_p": 1.0,
|
74 |
+
"torch_dtype": null,
|
75 |
+
"torchscript": false,
|
76 |
+
"transformers_version": "4.12.3",
|
77 |
+
"type_vocab_size": 1,
|
78 |
+
"use_bfloat16": false,
|
79 |
+
"use_cache": true,
|
80 |
+
"vocab_size": 50265
|
81 |
+
},
|
82 |
+
"decoder_start_token_id": 0,
|
83 |
+
"early_stopping": true,
|
84 |
+
"encoder": {
|
85 |
+
"_name_or_path": "bertin-project/bertin-roberta-base-spanish",
|
86 |
+
"add_cross_attention": false,
|
87 |
+
"architectures": [
|
88 |
+
"RobertaForMaskedLM"
|
89 |
+
],
|
90 |
+
"attention_probs_dropout_prob": 0.1,
|
91 |
+
"bad_words_ids": null,
|
92 |
+
"bos_token_id": 0,
|
93 |
+
"chunk_size_feed_forward": 0,
|
94 |
+
"classifier_dropout": null,
|
95 |
+
"cross_attention_hidden_size": null,
|
96 |
+
"decoder_start_token_id": null,
|
97 |
+
"diversity_penalty": 0.0,
|
98 |
+
"do_sample": false,
|
99 |
+
"early_stopping": false,
|
100 |
+
"encoder_no_repeat_ngram_size": 0,
|
101 |
+
"eos_token_id": 2,
|
102 |
+
"finetuning_task": null,
|
103 |
+
"forced_bos_token_id": null,
|
104 |
+
"forced_eos_token_id": null,
|
105 |
+
"gradient_checkpointing": false,
|
106 |
+
"hidden_act": "gelu",
|
107 |
+
"hidden_dropout_prob": 0.1,
|
108 |
+
"hidden_size": 768,
|
109 |
+
"id2label": {
|
110 |
+
"0": "LABEL_0",
|
111 |
+
"1": "LABEL_1"
|
112 |
+
},
|
113 |
+
"initializer_range": 0.02,
|
114 |
+
"intermediate_size": 3072,
|
115 |
+
"is_decoder": false,
|
116 |
+
"is_encoder_decoder": false,
|
117 |
+
"label2id": {
|
118 |
+
"LABEL_0": 0,
|
119 |
+
"LABEL_1": 1
|
120 |
+
},
|
121 |
+
"layer_norm_eps": 1e-05,
|
122 |
+
"length_penalty": 1.0,
|
123 |
+
"max_length": 20,
|
124 |
+
"max_position_embeddings": 514,
|
125 |
+
"min_length": 0,
|
126 |
+
"model_type": "roberta",
|
127 |
+
"no_repeat_ngram_size": 0,
|
128 |
+
"num_attention_heads": 12,
|
129 |
+
"num_beam_groups": 1,
|
130 |
+
"num_beams": 1,
|
131 |
+
"num_hidden_layers": 12,
|
132 |
+
"num_return_sequences": 1,
|
133 |
+
"output_attentions": false,
|
134 |
+
"output_hidden_states": false,
|
135 |
+
"output_scores": false,
|
136 |
+
"pad_token_id": 1,
|
137 |
+
"position_embedding_type": "absolute",
|
138 |
+
"prefix": null,
|
139 |
+
"problem_type": null,
|
140 |
+
"pruned_heads": {},
|
141 |
+
"remove_invalid_values": false,
|
142 |
+
"repetition_penalty": 1.0,
|
143 |
+
"return_dict": true,
|
144 |
+
"return_dict_in_generate": false,
|
145 |
+
"sep_token_id": null,
|
146 |
+
"task_specific_params": null,
|
147 |
+
"temperature": 1.0,
|
148 |
+
"tie_encoder_decoder": false,
|
149 |
+
"tie_word_embeddings": true,
|
150 |
+
"tokenizer_class": null,
|
151 |
+
"top_k": 50,
|
152 |
+
"top_p": 1.0,
|
153 |
+
"torch_dtype": null,
|
154 |
+
"torchscript": false,
|
155 |
+
"transformers_version": "4.12.3",
|
156 |
+
"type_vocab_size": 1,
|
157 |
+
"use_bfloat16": false,
|
158 |
+
"use_cache": true,
|
159 |
+
"vocab_size": 50265
|
160 |
+
},
|
161 |
+
"eos_token_id": 2,
|
162 |
+
"is_encoder_decoder": true,
|
163 |
+
"length_penalty": 2.0,
|
164 |
+
"max_length": 16,
|
165 |
+
"min_length": 4,
|
166 |
+
"model_type": "encoder-decoder",
|
167 |
+
"no_repeat_ngram_size": 1,
|
168 |
+
"num_beams": 4,
|
169 |
+
"pad_token_id": 1,
|
170 |
+
"repetition_penalty": 2.0,
|
171 |
+
"tie_encoder_decoder": true,
|
172 |
+
"torch_dtype": "float32",
|
173 |
+
"transformers_version": null,
|
174 |
+
"vocab_size": 50265
|
175 |
+
}
|
last-checkpoint/config.json
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"EncoderDecoderModel"
|
4 |
+
],
|
5 |
+
"decoder": {
|
6 |
+
"_name_or_path": "bertin-project/bertin-roberta-base-spanish",
|
7 |
+
"add_cross_attention": true,
|
8 |
+
"architectures": [
|
9 |
+
"RobertaForMaskedLM"
|
10 |
+
],
|
11 |
+
"attention_probs_dropout_prob": 0.1,
|
12 |
+
"bad_words_ids": null,
|
13 |
+
"bos_token_id": 0,
|
14 |
+
"chunk_size_feed_forward": 0,
|
15 |
+
"classifier_dropout": null,
|
16 |
+
"cross_attention_hidden_size": null,
|
17 |
+
"decoder_start_token_id": null,
|
18 |
+
"diversity_penalty": 0.0,
|
19 |
+
"do_sample": false,
|
20 |
+
"early_stopping": false,
|
21 |
+
"encoder_no_repeat_ngram_size": 0,
|
22 |
+
"eos_token_id": 2,
|
23 |
+
"finetuning_task": null,
|
24 |
+
"forced_bos_token_id": null,
|
25 |
+
"forced_eos_token_id": null,
|
26 |
+
"gradient_checkpointing": false,
|
27 |
+
"hidden_act": "gelu",
|
28 |
+
"hidden_dropout_prob": 0.1,
|
29 |
+
"hidden_size": 768,
|
30 |
+
"id2label": {
|
31 |
+
"0": "LABEL_0",
|
32 |
+
"1": "LABEL_1"
|
33 |
+
},
|
34 |
+
"initializer_range": 0.02,
|
35 |
+
"intermediate_size": 3072,
|
36 |
+
"is_decoder": true,
|
37 |
+
"is_encoder_decoder": false,
|
38 |
+
"label2id": {
|
39 |
+
"LABEL_0": 0,
|
40 |
+
"LABEL_1": 1
|
41 |
+
},
|
42 |
+
"layer_norm_eps": 1e-05,
|
43 |
+
"length_penalty": 1.0,
|
44 |
+
"max_length": 20,
|
45 |
+
"max_position_embeddings": 514,
|
46 |
+
"min_length": 0,
|
47 |
+
"model_type": "roberta",
|
48 |
+
"no_repeat_ngram_size": 0,
|
49 |
+
"num_attention_heads": 12,
|
50 |
+
"num_beam_groups": 1,
|
51 |
+
"num_beams": 1,
|
52 |
+
"num_hidden_layers": 12,
|
53 |
+
"num_return_sequences": 1,
|
54 |
+
"output_attentions": false,
|
55 |
+
"output_hidden_states": false,
|
56 |
+
"output_scores": false,
|
57 |
+
"pad_token_id": 1,
|
58 |
+
"position_embedding_type": "absolute",
|
59 |
+
"prefix": null,
|
60 |
+
"problem_type": null,
|
61 |
+
"pruned_heads": {},
|
62 |
+
"remove_invalid_values": false,
|
63 |
+
"repetition_penalty": 1.0,
|
64 |
+
"return_dict": true,
|
65 |
+
"return_dict_in_generate": false,
|
66 |
+
"sep_token_id": null,
|
67 |
+
"task_specific_params": null,
|
68 |
+
"temperature": 1.0,
|
69 |
+
"tie_encoder_decoder": false,
|
70 |
+
"tie_word_embeddings": true,
|
71 |
+
"tokenizer_class": null,
|
72 |
+
"top_k": 50,
|
73 |
+
"top_p": 1.0,
|
74 |
+
"torch_dtype": null,
|
75 |
+
"torchscript": false,
|
76 |
+
"transformers_version": "4.12.3",
|
77 |
+
"type_vocab_size": 1,
|
78 |
+
"use_bfloat16": false,
|
79 |
+
"use_cache": true,
|
80 |
+
"vocab_size": 50265
|
81 |
+
},
|
82 |
+
"decoder_start_token_id": 0,
|
83 |
+
"early_stopping": true,
|
84 |
+
"encoder": {
|
85 |
+
"_name_or_path": "bertin-project/bertin-roberta-base-spanish",
|
86 |
+
"add_cross_attention": false,
|
87 |
+
"architectures": [
|
88 |
+
"RobertaForMaskedLM"
|
89 |
+
],
|
90 |
+
"attention_probs_dropout_prob": 0.1,
|
91 |
+
"bad_words_ids": null,
|
92 |
+
"bos_token_id": 0,
|
93 |
+
"chunk_size_feed_forward": 0,
|
94 |
+
"classifier_dropout": null,
|
95 |
+
"cross_attention_hidden_size": null,
|
96 |
+
"decoder_start_token_id": null,
|
97 |
+
"diversity_penalty": 0.0,
|
98 |
+
"do_sample": false,
|
99 |
+
"early_stopping": false,
|
100 |
+
"encoder_no_repeat_ngram_size": 0,
|
101 |
+
"eos_token_id": 2,
|
102 |
+
"finetuning_task": null,
|
103 |
+
"forced_bos_token_id": null,
|
104 |
+
"forced_eos_token_id": null,
|
105 |
+
"gradient_checkpointing": false,
|
106 |
+
"hidden_act": "gelu",
|
107 |
+
"hidden_dropout_prob": 0.1,
|
108 |
+
"hidden_size": 768,
|
109 |
+
"id2label": {
|
110 |
+
"0": "LABEL_0",
|
111 |
+
"1": "LABEL_1"
|
112 |
+
},
|
113 |
+
"initializer_range": 0.02,
|
114 |
+
"intermediate_size": 3072,
|
115 |
+
"is_decoder": false,
|
116 |
+
"is_encoder_decoder": false,
|
117 |
+
"label2id": {
|
118 |
+
"LABEL_0": 0,
|
119 |
+
"LABEL_1": 1
|
120 |
+
},
|
121 |
+
"layer_norm_eps": 1e-05,
|
122 |
+
"length_penalty": 1.0,
|
123 |
+
"max_length": 20,
|
124 |
+
"max_position_embeddings": 514,
|
125 |
+
"min_length": 0,
|
126 |
+
"model_type": "roberta",
|
127 |
+
"no_repeat_ngram_size": 0,
|
128 |
+
"num_attention_heads": 12,
|
129 |
+
"num_beam_groups": 1,
|
130 |
+
"num_beams": 1,
|
131 |
+
"num_hidden_layers": 12,
|
132 |
+
"num_return_sequences": 1,
|
133 |
+
"output_attentions": false,
|
134 |
+
"output_hidden_states": false,
|
135 |
+
"output_scores": false,
|
136 |
+
"pad_token_id": 1,
|
137 |
+
"position_embedding_type": "absolute",
|
138 |
+
"prefix": null,
|
139 |
+
"problem_type": null,
|
140 |
+
"pruned_heads": {},
|
141 |
+
"remove_invalid_values": false,
|
142 |
+
"repetition_penalty": 1.0,
|
143 |
+
"return_dict": true,
|
144 |
+
"return_dict_in_generate": false,
|
145 |
+
"sep_token_id": null,
|
146 |
+
"task_specific_params": null,
|
147 |
+
"temperature": 1.0,
|
148 |
+
"tie_encoder_decoder": false,
|
149 |
+
"tie_word_embeddings": true,
|
150 |
+
"tokenizer_class": null,
|
151 |
+
"top_k": 50,
|
152 |
+
"top_p": 1.0,
|
153 |
+
"torch_dtype": null,
|
154 |
+
"torchscript": false,
|
155 |
+
"transformers_version": "4.12.3",
|
156 |
+
"type_vocab_size": 1,
|
157 |
+
"use_bfloat16": false,
|
158 |
+
"use_cache": true,
|
159 |
+
"vocab_size": 50265
|
160 |
+
},
|
161 |
+
"eos_token_id": 2,
|
162 |
+
"is_encoder_decoder": true,
|
163 |
+
"length_penalty": 2.0,
|
164 |
+
"max_length": 16,
|
165 |
+
"min_length": 4,
|
166 |
+
"model_type": "encoder-decoder",
|
167 |
+
"no_repeat_ngram_size": 1,
|
168 |
+
"num_beams": 4,
|
169 |
+
"pad_token_id": 1,
|
170 |
+
"repetition_penalty": 2.0,
|
171 |
+
"tie_encoder_decoder": true,
|
172 |
+
"torch_dtype": "float32",
|
173 |
+
"transformers_version": null,
|
174 |
+
"vocab_size": 50265
|
175 |
+
}
|
last-checkpoint/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
last-checkpoint/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37a83631a6eaa5b644deaa6a74a747be618c044273259915edd1192749a27d61
|
3 |
+
size 1224701433
|
last-checkpoint/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32d379000491104e12d8f8f5c0c1846cb78fd70566cee9ae37abc8c17c1e8b7c
|
3 |
+
size 614807605
|
last-checkpoint/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff810b09080fb0cb75ce66b92963bff3282ee58b84499e6e35f8db39805af09e
|
3 |
+
size 14503
|
last-checkpoint/scaler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f695270a3a57ed6287a329cf2221f3dee69b2e52349672fb4e441788696ce16
|
3 |
+
size 559
|
last-checkpoint/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44a93e9dcf19073791e7fbe2b56d05d5698693fc00638ccb46493089a7f97298
|
3 |
+
size 623
|
last-checkpoint/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
|
last-checkpoint/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
last-checkpoint/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "special_tokens_map_file": null, "name_or_path": "bertin-project/bertin-roberta-base-spanish", "tokenizer_class": "RobertaTokenizer"}
|
last-checkpoint/trainer_state.json
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.0001521383092040196,
|
3 |
+
"best_model_checkpoint": "/opt/ml/model/checkpoint-4627",
|
4 |
+
"epoch": 1.0,
|
5 |
+
"global_step": 4627,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.11,
|
12 |
+
"learning_rate": 2.976e-05,
|
13 |
+
"loss": 3.0635,
|
14 |
+
"step": 500
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.22,
|
18 |
+
"learning_rate": 2.9674896220231594e-05,
|
19 |
+
"loss": 0.1023,
|
20 |
+
"step": 1000
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.32,
|
24 |
+
"learning_rate": 2.9347170635787637e-05,
|
25 |
+
"loss": 0.0253,
|
26 |
+
"step": 1500
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 0.43,
|
30 |
+
"learning_rate": 2.9019445051343676e-05,
|
31 |
+
"loss": 0.012,
|
32 |
+
"step": 2000
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 0.54,
|
36 |
+
"learning_rate": 2.8691719466899716e-05,
|
37 |
+
"loss": 0.007,
|
38 |
+
"step": 2500
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 0.65,
|
42 |
+
"learning_rate": 2.8363993882455756e-05,
|
43 |
+
"loss": 0.0049,
|
44 |
+
"step": 3000
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.76,
|
48 |
+
"learning_rate": 2.80362682980118e-05,
|
49 |
+
"loss": 0.0042,
|
50 |
+
"step": 3500
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"epoch": 0.86,
|
54 |
+
"learning_rate": 2.770854271356784e-05,
|
55 |
+
"loss": 0.0034,
|
56 |
+
"step": 4000
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 0.97,
|
60 |
+
"learning_rate": 2.738081712912388e-05,
|
61 |
+
"loss": 0.0027,
|
62 |
+
"step": 4500
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 1.0,
|
66 |
+
"eval_loss": 0.0001521383092040196,
|
67 |
+
"eval_rouge2_fmeasure": 0.0001,
|
68 |
+
"eval_rouge2_precision": 0.0001,
|
69 |
+
"eval_rouge2_recall": 0.0002,
|
70 |
+
"eval_runtime": 1140.1653,
|
71 |
+
"eval_samples_per_second": 5.646,
|
72 |
+
"eval_steps_per_second": 0.706,
|
73 |
+
"step": 4627
|
74 |
+
}
|
75 |
+
],
|
76 |
+
"max_steps": 46270,
|
77 |
+
"num_train_epochs": 10,
|
78 |
+
"total_flos": 1.46662238093616e+16,
|
79 |
+
"trial_name": null,
|
80 |
+
"trial_params": null
|
81 |
+
}
|
last-checkpoint/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4f49d9f13eb265fbabb362db55f0940178eabce78a3c984abeafa7497b5d3cc
|
3 |
+
size 3119
|
last-checkpoint/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32d379000491104e12d8f8f5c0c1846cb78fd70566cee9ae37abc8c17c1e8b7c
|
3 |
+
size 614807605
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "special_tokens_map_file": null, "name_or_path": "bertin-project/bertin-roberta-base-spanish", "tokenizer_class": "RobertaTokenizer"}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4f49d9f13eb265fbabb362db55f0940178eabce78a3c984abeafa7497b5d3cc
|
3 |
+
size 3119
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|