sanchit-gandhi HF staff commited on
Commit
758a5a8
1 Parent(s): c787ad4

Training in progress, step 500

Browse files
Files changed (40) hide show
  1. .gitattributes +1 -0
  2. .gitignore +1 -0
  3. config.json +285 -0
  4. merges.txt +0 -0
  5. preprocessor_config.json +9 -0
  6. pytorch_model.bin +3 -0
  7. run.sh +1 -1
  8. run_xtreme_s.py +27 -2
  9. runs/May03_12-39-14_sanchit--v100/1651584502.8814862/events.out.tfevents.1651584502.sanchit--v100.38894.1 +3 -0
  10. runs/May03_12-39-14_sanchit--v100/events.out.tfevents.1651584502.sanchit--v100.38894.0 +3 -0
  11. runs/May03_15-33-55_sanchit--v100/1651592261.409201/events.out.tfevents.1651592261.sanchit--v100.40140.1 +3 -0
  12. runs/May03_15-33-55_sanchit--v100/events.out.tfevents.1651592261.sanchit--v100.40140.0 +3 -0
  13. runs/May03_15-58-13_sanchit--v100/1651596054.9030492/events.out.tfevents.1651596054.sanchit--v100.40903.1 +3 -0
  14. runs/May03_15-58-13_sanchit--v100/events.out.tfevents.1651596054.sanchit--v100.40903.0 +3 -0
  15. runs/May03_17-15-22_sanchit--v100/1651598399.7088904/events.out.tfevents.1651598399.sanchit--v100.42111.1 +3 -0
  16. runs/May03_17-15-22_sanchit--v100/events.out.tfevents.1651598399.sanchit--v100.42111.0 +3 -0
  17. special_tokens_map.json +1 -0
  18. sweep.yaml +2 -1
  19. tokenizer.json +0 -0
  20. tokenizer_config.json +1 -0
  21. training_args.bin +3 -0
  22. vocab.json +0 -0
  23. wandb/debug-cli.log +155 -0
  24. wandb/debug-internal.log +1 -0
  25. wandb/debug.log +1 -0
  26. wandb/latest-run +1 -0
  27. wandb/run-20220503_171959-a6039xud/files/config.yaml +0 -0
  28. wandb/run-20220503_171959-a6039xud/files/output.log +0 -0
  29. wandb/run-20220503_171959-a6039xud/files/requirements.txt +287 -0
  30. wandb/run-20220503_171959-a6039xud/files/wandb-metadata.json +57 -0
  31. wandb/run-20220503_171959-a6039xud/files/wandb-summary.json +0 -0
  32. wandb/run-20220503_171959-a6039xud/logs/debug-internal.log +0 -0
  33. wandb/run-20220503_171959-a6039xud/logs/debug.log +28 -0
  34. wandb/run-20220503_171959-a6039xud/run-a6039xud.wandb +3 -0
  35. wandb/sweep-39ci3gkf/config-a6039xud.yaml +44 -0
  36. wandb/sweep-y3ak427l/config-irggvkgd.yaml +44 -0
  37. wandb/sweep-y3ak427l/config-ldsojzle.yaml +44 -0
  38. wandb/sweep-y3ak427l/config-qv3vjr6j.yaml +44 -0
  39. wandb/sweep-y3ak427l/config-vz5ppd75.yaml +44 -0
  40. wandb/sweep-y3ak427l/config-xur584bd.yaml +44 -0
.gitattributes CHANGED
@@ -26,3 +26,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
29
+ wandb/run-20220503_171959-a6039xud/run-a6039xud.wandb filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ checkpoint-*/
config.json ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./",
3
+ "architectures": [
4
+ "SpeechEncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "facebook/bart-large",
8
+ "activation_dropout": 0.1,
9
+ "activation_function": "gelu",
10
+ "add_bias_logits": false,
11
+ "add_cross_attention": true,
12
+ "add_final_layer_norm": false,
13
+ "architectures": [
14
+ "BartModel"
15
+ ],
16
+ "attention_dropout": 0.1,
17
+ "bad_words_ids": null,
18
+ "bos_token_id": 0,
19
+ "chunk_size_feed_forward": 0,
20
+ "classif_dropout": 0.1,
21
+ "classifier_dropout": 0.0,
22
+ "cross_attention_hidden_size": null,
23
+ "d_model": 1024,
24
+ "decoder_attention_heads": 16,
25
+ "decoder_ffn_dim": 4096,
26
+ "decoder_layerdrop": 0.0,
27
+ "decoder_layers": 12,
28
+ "decoder_start_token_id": 2,
29
+ "diversity_penalty": 0.0,
30
+ "do_sample": false,
31
+ "dropout": 0.1,
32
+ "early_stopping": true,
33
+ "encoder_attention_heads": 16,
34
+ "encoder_ffn_dim": 4096,
35
+ "encoder_layerdrop": 0.0,
36
+ "encoder_layers": 12,
37
+ "encoder_no_repeat_ngram_size": 0,
38
+ "eos_token_id": 2,
39
+ "exponential_decay_length_penalty": null,
40
+ "finetuning_task": null,
41
+ "forced_bos_token_id": 0,
42
+ "forced_eos_token_id": 2,
43
+ "gradient_checkpointing": false,
44
+ "id2label": {
45
+ "0": "LABEL_0",
46
+ "1": "LABEL_1",
47
+ "2": "LABEL_2"
48
+ },
49
+ "init_std": 0.02,
50
+ "is_decoder": true,
51
+ "is_encoder_decoder": false,
52
+ "label2id": {
53
+ "LABEL_0": 0,
54
+ "LABEL_1": 1,
55
+ "LABEL_2": 2
56
+ },
57
+ "length_penalty": 1.0,
58
+ "max_length": 20,
59
+ "max_position_embeddings": 1024,
60
+ "min_length": 0,
61
+ "model_type": "bart",
62
+ "no_repeat_ngram_size": 3,
63
+ "normalize_before": false,
64
+ "num_beam_groups": 1,
65
+ "num_beams": 4,
66
+ "num_hidden_layers": 12,
67
+ "num_return_sequences": 1,
68
+ "output_attentions": false,
69
+ "output_hidden_states": false,
70
+ "output_scores": false,
71
+ "pad_token_id": 1,
72
+ "prefix": null,
73
+ "problem_type": null,
74
+ "pruned_heads": {},
75
+ "remove_invalid_values": false,
76
+ "repetition_penalty": 1.0,
77
+ "return_dict": true,
78
+ "return_dict_in_generate": false,
79
+ "scale_embedding": false,
80
+ "sep_token_id": null,
81
+ "task_specific_params": {
82
+ "summarization": {
83
+ "length_penalty": 1.0,
84
+ "max_length": 128,
85
+ "min_length": 12,
86
+ "num_beams": 4
87
+ },
88
+ "summarization_cnn": {
89
+ "length_penalty": 2.0,
90
+ "max_length": 142,
91
+ "min_length": 56,
92
+ "num_beams": 4
93
+ },
94
+ "summarization_xsum": {
95
+ "length_penalty": 1.0,
96
+ "max_length": 62,
97
+ "min_length": 11,
98
+ "num_beams": 6
99
+ }
100
+ },
101
+ "temperature": 1.0,
102
+ "tie_encoder_decoder": false,
103
+ "tie_word_embeddings": true,
104
+ "tokenizer_class": null,
105
+ "top_k": 50,
106
+ "top_p": 1.0,
107
+ "torch_dtype": null,
108
+ "torchscript": false,
109
+ "transformers_version": "4.19.0.dev0",
110
+ "typical_p": 1.0,
111
+ "use_bfloat16": false,
112
+ "use_cache": true,
113
+ "vocab_size": 50265
114
+ },
115
+ "decoder_start_token_id": 0,
116
+ "encoder": {
117
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
118
+ "activation_dropout": 0.0,
119
+ "adapter_kernel_size": 3,
120
+ "adapter_stride": 2,
121
+ "add_adapter": true,
122
+ "add_cross_attention": false,
123
+ "apply_spec_augment": true,
124
+ "architectures": [
125
+ "Wav2Vec2ForPreTraining"
126
+ ],
127
+ "attention_dropout": 0.1,
128
+ "bad_words_ids": null,
129
+ "bos_token_id": 1,
130
+ "chunk_size_feed_forward": 0,
131
+ "classifier_proj_size": 256,
132
+ "codevector_dim": 768,
133
+ "contrastive_logits_temperature": 0.1,
134
+ "conv_bias": true,
135
+ "conv_dim": [
136
+ 512,
137
+ 512,
138
+ 512,
139
+ 512,
140
+ 512,
141
+ 512,
142
+ 512
143
+ ],
144
+ "conv_kernel": [
145
+ 10,
146
+ 3,
147
+ 3,
148
+ 3,
149
+ 3,
150
+ 2,
151
+ 2
152
+ ],
153
+ "conv_stride": [
154
+ 5,
155
+ 2,
156
+ 2,
157
+ 2,
158
+ 2,
159
+ 2,
160
+ 2
161
+ ],
162
+ "cross_attention_hidden_size": null,
163
+ "ctc_loss_reduction": "sum",
164
+ "ctc_zero_infinity": false,
165
+ "decoder_start_token_id": null,
166
+ "diversity_loss_weight": 0.1,
167
+ "diversity_penalty": 0.0,
168
+ "do_sample": false,
169
+ "do_stable_layer_norm": true,
170
+ "early_stopping": false,
171
+ "encoder_no_repeat_ngram_size": 0,
172
+ "eos_token_id": 2,
173
+ "exponential_decay_length_penalty": null,
174
+ "feat_extract_activation": "gelu",
175
+ "feat_extract_dropout": 0.0,
176
+ "feat_extract_norm": "layer",
177
+ "feat_proj_dropout": 0.0,
178
+ "feat_quantizer_dropout": 0.0,
179
+ "final_dropout": 0.0,
180
+ "finetuning_task": null,
181
+ "forced_bos_token_id": null,
182
+ "forced_eos_token_id": null,
183
+ "gradient_checkpointing": false,
184
+ "hidden_act": "gelu",
185
+ "hidden_dropout": 0.06862889720223829,
186
+ "hidden_size": 1024,
187
+ "id2label": {
188
+ "0": "LABEL_0",
189
+ "1": "LABEL_1"
190
+ },
191
+ "initializer_range": 0.02,
192
+ "intermediate_size": 4096,
193
+ "is_decoder": false,
194
+ "is_encoder_decoder": false,
195
+ "label2id": {
196
+ "LABEL_0": 0,
197
+ "LABEL_1": 1
198
+ },
199
+ "layer_norm_eps": 1e-05,
200
+ "layerdrop": 0.0,
201
+ "length_penalty": 1.0,
202
+ "mask_feature_length": 10,
203
+ "mask_feature_min_masks": 0,
204
+ "mask_feature_prob": 0.0,
205
+ "mask_time_length": 10,
206
+ "mask_time_min_masks": 2,
207
+ "mask_time_prob": 0.1,
208
+ "max_length": 20,
209
+ "min_length": 0,
210
+ "model_type": "wav2vec2",
211
+ "no_repeat_ngram_size": 0,
212
+ "num_adapter_layers": 3,
213
+ "num_attention_heads": 16,
214
+ "num_beam_groups": 1,
215
+ "num_beams": 1,
216
+ "num_codevector_groups": 2,
217
+ "num_codevectors_per_group": 320,
218
+ "num_conv_pos_embedding_groups": 16,
219
+ "num_conv_pos_embeddings": 128,
220
+ "num_feat_extract_layers": 7,
221
+ "num_hidden_layers": 24,
222
+ "num_negatives": 100,
223
+ "num_return_sequences": 1,
224
+ "output_attentions": false,
225
+ "output_hidden_size": 1024,
226
+ "output_hidden_states": false,
227
+ "output_scores": false,
228
+ "pad_token_id": 0,
229
+ "prefix": null,
230
+ "problem_type": null,
231
+ "proj_codevector_dim": 768,
232
+ "pruned_heads": {},
233
+ "remove_invalid_values": false,
234
+ "repetition_penalty": 1.0,
235
+ "return_dict": true,
236
+ "return_dict_in_generate": false,
237
+ "sep_token_id": null,
238
+ "task_specific_params": null,
239
+ "tdnn_dilation": [
240
+ 1,
241
+ 2,
242
+ 3,
243
+ 1,
244
+ 1
245
+ ],
246
+ "tdnn_dim": [
247
+ 512,
248
+ 512,
249
+ 512,
250
+ 512,
251
+ 1500
252
+ ],
253
+ "tdnn_kernel": [
254
+ 5,
255
+ 3,
256
+ 3,
257
+ 1,
258
+ 1
259
+ ],
260
+ "temperature": 1.0,
261
+ "tie_encoder_decoder": false,
262
+ "tie_word_embeddings": true,
263
+ "tokenizer_class": null,
264
+ "top_k": 50,
265
+ "top_p": 1.0,
266
+ "torch_dtype": "float32",
267
+ "torchscript": false,
268
+ "transformers_version": "4.19.0.dev0",
269
+ "typical_p": 1.0,
270
+ "use_bfloat16": false,
271
+ "use_weighted_layer_sum": false,
272
+ "vocab_size": 32,
273
+ "xvector_output_dim": 512
274
+ },
275
+ "eos_token_id": 2,
276
+ "is_encoder_decoder": true,
277
+ "max_length": 40,
278
+ "model_type": "speech-encoder-decoder",
279
+ "pad_token_id": 1,
280
+ "processor_class": "Wav2Vec2Processor",
281
+ "tie_word_embeddings": false,
282
+ "torch_dtype": "float32",
283
+ "transformers_version": null,
284
+ "use_cache": false
285
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:008e06044c5250309092dc773bd06898ae5bf91e5bddf24e216307b3e34cbee0
3
+ size 2353867057
run.sh CHANGED
@@ -1,5 +1,5 @@
1
  #!/usr/bin/env bash
2
- CUDA_VISIBLE_DEVICES=1 python run_xtreme_s.py \
3
  --model_name_or_path="./" \
4
  --task="covost2" \
5
  --language="fr.en" \
1
  #!/usr/bin/env bash
2
+ CUDA_VISIBLE_DEVICES=0 python run_xtreme_s.py \
3
  --model_name_or_path="./" \
4
  --task="covost2" \
5
  --language="fr.en" \
run_xtreme_s.py CHANGED
@@ -41,6 +41,7 @@ from transformers import (
41
  HfArgumentParser,
42
  Seq2SeqTrainer,
43
  Seq2SeqTrainingArguments,
 
44
  Trainer,
45
  set_seed,
46
  )
@@ -547,9 +548,9 @@ def main():
547
 
548
  def remove_special_characters(batch):
549
  if chars_to_ignore_regex is not None:
550
- batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[target_column_name]).lower() + " "
551
  else:
552
- batch["target_text"] = batch[target_column_name].lower() + " "
553
  return batch
554
 
555
  if is_text_target:
@@ -565,6 +566,30 @@ def main():
565
  unk_token = data_args.unk_token
566
  pad_token = data_args.pad_token
567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
568
  # 3. Next, let's load the config as we might need it to create
569
  # the tokenizer
570
  config = AutoConfig.from_pretrained(
41
  HfArgumentParser,
42
  Seq2SeqTrainer,
43
  Seq2SeqTrainingArguments,
44
+ SpeechEncoderDecoderModel,
45
  Trainer,
46
  set_seed,
47
  )
548
 
549
  def remove_special_characters(batch):
550
  if chars_to_ignore_regex is not None:
551
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[target_column_name]).lower()
552
  else:
553
+ batch["target_text"] = batch[target_column_name].lower()
554
  return batch
555
 
556
  if is_text_target:
566
  unk_token = data_args.unk_token
567
  pad_token = data_args.pad_token
568
 
569
+
570
+ encoder_id = "facebook/wav2vec2-xls-r-300m"
571
+ decoder_id = "facebook/bart-large"
572
+
573
+ model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True)
574
+ model.config.encoder.feat_proj_dropout = 0.0
575
+ model.config.encoder.final_dropout = 0.0
576
+ model.config.encoder.mask_time_prob = 0.1
577
+ model.config.decoder_start_token_id = model.decoder.config.bos_token_id
578
+ model.config.pad_token_id = model.decoder.config.pad_token_id
579
+ model.config.eos_token_id = model.decoder.config.eos_token_id
580
+ model.config.max_length = 40
581
+ model.config.num_beams = 1
582
+ model.config.encoder.layerdrop = 0.0
583
+ model.config.use_cache = False
584
+ model.config.processor_class = "Wav2Vec2Processor"
585
+
586
+ model.save_pretrained(model_args.model_name_or_path)
587
+
588
+ feature_etxractor = AutoFeatureExtractor.from_pretrained(encoder_id)
589
+ feature_etxractor.save_pretrained(model_args.model_name_or_path)
590
+ tokenizer = AutoTokenizer.from_pretrained(decoder_id)
591
+ tokenizer.save_pretrained(model_args.model_name_or_path)
592
+
593
  # 3. Next, let's load the config as we might need it to create
594
  # the tokenizer
595
  config = AutoConfig.from_pretrained(
runs/May03_12-39-14_sanchit--v100/1651584502.8814862/events.out.tfevents.1651584502.sanchit--v100.38894.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a5e50bb326f116678bb0d2af94ee25e92d82ce3268121a68b288dd930e13470
3
+ size 5184
runs/May03_12-39-14_sanchit--v100/events.out.tfevents.1651584502.sanchit--v100.38894.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65a28e37ffdeeb562c734e7a4815296918b93b818763a73a70ab20d29627105c
3
+ size 10592
runs/May03_15-33-55_sanchit--v100/1651592261.409201/events.out.tfevents.1651592261.sanchit--v100.40140.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c3651206b236f0c31a3ff08bbeb2416a7fde03f54ccb583a7df7f8256f3fd29
3
+ size 5184
runs/May03_15-33-55_sanchit--v100/events.out.tfevents.1651592261.sanchit--v100.40140.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c965accf9ea2be03062bfecc4c45cdb54eb6b4b444ea37a2c9533c8db4fed044
3
+ size 87941
runs/May03_15-58-13_sanchit--v100/1651596054.9030492/events.out.tfevents.1651596054.sanchit--v100.40903.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76e39c8c39b9be3c4531fd72d066671f7365b550e8079fee72b6060a36be59f6
3
+ size 5184
runs/May03_15-58-13_sanchit--v100/events.out.tfevents.1651596054.sanchit--v100.40903.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:410298c8949435d86a040fb00c0e0a7bde2f0445544abeadadae587ba569bfba
3
+ size 73654
runs/May03_17-15-22_sanchit--v100/1651598399.7088904/events.out.tfevents.1651598399.sanchit--v100.42111.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61b94cd769015be4bbc81b78b8074c963d947a90c91e7cf517abaad8c3ff3078
3
+ size 5184
runs/May03_17-15-22_sanchit--v100/events.out.tfevents.1651598399.sanchit--v100.42111.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:031a66a2aa473ef2b64f357fd6f9159fb836bed0b5c5036f7eb9c55c533a7ad7
3
+ size 88290
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
sweep.yaml CHANGED
@@ -4,6 +4,7 @@ command:
4
  - --overwrite_output_dir
5
  - --freeze_feature_encoder
6
  - --gradient_checkpointing
 
7
  - --fp16
8
  - --group_by_length
9
  - --do_train
@@ -26,7 +27,7 @@ parameters:
26
  eval_split_name:
27
  value: test
28
  output_dir:
29
- value: ./output_dir
30
  num_train_epochs:
31
  value: 3
32
  per_device_train_batch_size:
4
  - --overwrite_output_dir
5
  - --freeze_feature_encoder
6
  - --gradient_checkpointing
7
+ - --predict_with_generate
8
  - --fp16
9
  - --group_by_length
10
  - --do_train
27
  eval_split_name:
28
  value: test
29
  output_dir:
30
+ value: ./
31
  num_train_epochs:
32
  value: 3
33
  per_device_train_batch_size:
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"errors": "replace", "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": "<mask>", "add_prefix_space": false, "trim_offsets": true, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "./", "tokenizer_class": "BartTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94e05d5641aa58db52aee32293e970801889668d13a72d205ffa6fe324861d4b
3
+ size 3247
vocab.json ADDED
The diff for this file is too large to render. See raw diff
wandb/debug-cli.log ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-05-03 17:09:46 INFO Running runs: []
2
+ 2022-05-03 17:09:46 INFO Agent received command: run
3
+ 2022-05-03 17:09:46 INFO Agent starting run with config:
4
+ eval_split_name: test
5
+ eval_steps: 500
6
+ evaluation_strategy: steps
7
+ generation_max_length: 40
8
+ generation_num_beams: 1
9
+ gradient_accumulation_steps: 8
10
+ greater_is_better: True
11
+ hidden_dropout: 0.036619638921206475
12
+ language: fr.en
13
+ learning_rate: 0.00024391819705381628
14
+ logging_steps: 1
15
+ max_duration_in_seconds: 20
16
+ metric_for_best_model: bleu
17
+ model_name_or_path: ./
18
+ num_train_epochs: 3
19
+ output_dir: ./output_dir
20
+ per_device_eval_batch_size: 4
21
+ per_device_train_batch_size: 4
22
+ save_steps: 500
23
+ task: covost2
24
+ warmup_steps: 500
25
+ 2022-05-03 17:09:46 INFO About to run command: python3 run_xtreme_s.py --overwrite_output_dir --freeze_feature_encoder --gradient_checkpointing --predict_with_generate --fp16 --group_by_length --do_train --do_eval --load_best_model_at_end --push_to_hub --use_auth_token --eval_split_name=test --eval_steps=500 --evaluation_strategy=steps --generation_max_length=40 --generation_num_beams=1 --gradient_accumulation_steps=8 --greater_is_better=True --hidden_dropout=0.036619638921206475 --language=fr.en --learning_rate=0.00024391819705381628 --logging_steps=1 --max_duration_in_seconds=20 --metric_for_best_model=bleu --model_name_or_path=./ --num_train_epochs=3 --output_dir=./output_dir --per_device_eval_batch_size=4 --per_device_train_batch_size=4 --save_steps=500 --task=covost2 --warmup_steps=500
26
+ 2022-05-03 17:09:51 INFO Running runs: ['vz5ppd75']
27
+ 2022-05-03 17:10:26 INFO Cleaning up finished run: vz5ppd75
28
+ 2022-05-03 17:10:28 INFO Agent received command: run
29
+ 2022-05-03 17:10:28 INFO Agent starting run with config:
30
+ eval_split_name: test
31
+ eval_steps: 500
32
+ evaluation_strategy: steps
33
+ generation_max_length: 40
34
+ generation_num_beams: 1
35
+ gradient_accumulation_steps: 8
36
+ greater_is_better: True
37
+ hidden_dropout: 0.1875094322808032
38
+ language: fr.en
39
+ learning_rate: 0.00024438201183496223
40
+ logging_steps: 1
41
+ max_duration_in_seconds: 20
42
+ metric_for_best_model: bleu
43
+ model_name_or_path: ./
44
+ num_train_epochs: 3
45
+ output_dir: ./output_dir
46
+ per_device_eval_batch_size: 4
47
+ per_device_train_batch_size: 4
48
+ save_steps: 500
49
+ task: covost2
50
+ warmup_steps: 500
51
+ 2022-05-03 17:10:36 INFO Running runs: []
52
+ 2022-05-03 17:10:36 INFO Agent received command: run
53
+ 2022-05-03 17:10:36 INFO Agent starting run with config:
54
+ eval_split_name: test
55
+ eval_steps: 500
56
+ evaluation_strategy: steps
57
+ generation_max_length: 40
58
+ generation_num_beams: 1
59
+ gradient_accumulation_steps: 8
60
+ greater_is_better: True
61
+ hidden_dropout: 0.055722391000930585
62
+ language: fr.en
63
+ learning_rate: 0.0006457481677728278
64
+ logging_steps: 1
65
+ max_duration_in_seconds: 20
66
+ metric_for_best_model: bleu
67
+ model_name_or_path: ./
68
+ num_train_epochs: 3
69
+ output_dir: ./output_dir
70
+ per_device_eval_batch_size: 4
71
+ per_device_train_batch_size: 4
72
+ save_steps: 500
73
+ task: covost2
74
+ warmup_steps: 500
75
+ 2022-05-03 17:10:36 INFO About to run command: python3 run_xtreme_s.py --overwrite_output_dir --freeze_feature_encoder --gradient_checkpointing --predict_with_generate --fp16 --group_by_length --do_train --do_eval --load_best_model_at_end --push_to_hub --use_auth_token --eval_split_name=test --eval_steps=500 --evaluation_strategy=steps --generation_max_length=40 --generation_num_beams=1 --gradient_accumulation_steps=8 --greater_is_better=True --hidden_dropout=0.055722391000930585 --language=fr.en --learning_rate=0.0006457481677728278 --logging_steps=1 --max_duration_in_seconds=20 --metric_for_best_model=bleu --model_name_or_path=./ --num_train_epochs=3 --output_dir=./output_dir --per_device_eval_batch_size=4 --per_device_train_batch_size=4 --save_steps=500 --task=covost2 --warmup_steps=500
76
+ 2022-05-03 17:10:41 INFO Running runs: ['ldsojzle']
77
+ 2022-05-03 17:11:07 INFO Cleaning up finished run: ldsojzle
78
+ 2022-05-03 17:11:07 INFO Agent received command: run
79
+ 2022-05-03 17:11:07 INFO Agent starting run with config:
80
+ eval_split_name: test
81
+ eval_steps: 500
82
+ evaluation_strategy: steps
83
+ generation_max_length: 40
84
+ generation_num_beams: 1
85
+ gradient_accumulation_steps: 8
86
+ greater_is_better: True
87
+ hidden_dropout: 0.056807662149569525
88
+ language: fr.en
89
+ learning_rate: 0.0005558468401613797
90
+ logging_steps: 1
91
+ max_duration_in_seconds: 20
92
+ metric_for_best_model: bleu
93
+ model_name_or_path: ./
94
+ num_train_epochs: 3
95
+ output_dir: ./output_dir
96
+ per_device_eval_batch_size: 4
97
+ per_device_train_batch_size: 4
98
+ save_steps: 500
99
+ task: covost2
100
+ warmup_steps: 500
101
+ 2022-05-03 17:11:07 INFO About to run command: python3 run_xtreme_s.py --overwrite_output_dir --freeze_feature_encoder --gradient_checkpointing --predict_with_generate --fp16 --group_by_length --do_train --do_eval --load_best_model_at_end --push_to_hub --use_auth_token --eval_split_name=test --eval_steps=500 --evaluation_strategy=steps --generation_max_length=40 --generation_num_beams=1 --gradient_accumulation_steps=8 --greater_is_better=True --hidden_dropout=0.056807662149569525 --language=fr.en --learning_rate=0.0005558468401613797 --logging_steps=1 --max_duration_in_seconds=20 --metric_for_best_model=bleu --model_name_or_path=./ --num_train_epochs=3 --output_dir=./output_dir --per_device_eval_batch_size=4 --per_device_train_batch_size=4 --save_steps=500 --task=covost2 --warmup_steps=500
102
+ 2022-05-03 17:11:12 INFO Running runs: ['qv3vjr6j']
103
+ 2022-05-03 17:10:28 INFO About to run command: python3 run_xtreme_s.py --overwrite_output_dir --freeze_feature_encoder --gradient_checkpointing --predict_with_generate --fp16 --group_by_length --do_train --do_eval --load_best_model_at_end --push_to_hub --use_auth_token --eval_split_name=test --eval_steps=500 --evaluation_strategy=steps --generation_max_length=40 --generation_num_beams=1 --gradient_accumulation_steps=8 --greater_is_better=True --hidden_dropout=0.1875094322808032 --language=fr.en --learning_rate=0.00024438201183496223 --logging_steps=1 --max_duration_in_seconds=20 --metric_for_best_model=bleu --model_name_or_path=./ --num_train_epochs=3 --output_dir=./output_dir --per_device_eval_batch_size=4 --per_device_train_batch_size=4 --save_steps=500 --task=covost2 --warmup_steps=500
104
+ 2022-05-03 17:11:29 INFO Running runs: ['irggvkgd']
105
+ 2022-05-03 17:11:37 INFO Cleaning up finished run: qv3vjr6j
106
+ 2022-05-03 17:11:37 INFO Agent received command: run
107
+ 2022-05-03 17:11:37 INFO Agent starting run with config:
108
+ eval_split_name: test
109
+ eval_steps: 500
110
+ evaluation_strategy: steps
111
+ generation_max_length: 40
112
+ generation_num_beams: 1
113
+ gradient_accumulation_steps: 8
114
+ greater_is_better: True
115
+ hidden_dropout: 0.03413483050532159
116
+ language: fr.en
117
+ learning_rate: 0.00022086866790135088
118
+ logging_steps: 1
119
+ max_duration_in_seconds: 20
120
+ metric_for_best_model: bleu
121
+ model_name_or_path: ./
122
+ num_train_epochs: 3
123
+ output_dir: ./output_dir
124
+ per_device_eval_batch_size: 4
125
+ per_device_train_batch_size: 4
126
+ save_steps: 500
127
+ task: covost2
128
+ warmup_steps: 500
129
+ 2022-05-03 17:11:37 INFO About to run command: python3 run_xtreme_s.py --overwrite_output_dir --freeze_feature_encoder --gradient_checkpointing --predict_with_generate --fp16 --group_by_length --do_train --do_eval --load_best_model_at_end --push_to_hub --use_auth_token --eval_split_name=test --eval_steps=500 --evaluation_strategy=steps --generation_max_length=40 --generation_num_beams=1 --gradient_accumulation_steps=8 --greater_is_better=True --hidden_dropout=0.03413483050532159 --language=fr.en --learning_rate=0.00022086866790135088 --logging_steps=1 --max_duration_in_seconds=20 --metric_for_best_model=bleu --model_name_or_path=./ --num_train_epochs=3 --output_dir=./output_dir --per_device_eval_batch_size=4 --per_device_train_batch_size=4 --save_steps=500 --task=covost2 --warmup_steps=500
130
+ 2022-05-03 17:15:19 INFO Running runs: []
131
+ 2022-05-03 17:15:19 INFO Agent received command: run
132
+ 2022-05-03 17:15:19 INFO Agent starting run with config:
133
+ eval_split_name: test
134
+ eval_steps: 500
135
+ evaluation_strategy: steps
136
+ generation_max_length: 40
137
+ generation_num_beams: 1
138
+ gradient_accumulation_steps: 8
139
+ greater_is_better: True
140
+ hidden_dropout: 0.06862889720223829
141
+ language: fr.en
142
+ learning_rate: 0.0004848089062550082
143
+ logging_steps: 1
144
+ max_duration_in_seconds: 20
145
+ metric_for_best_model: bleu
146
+ model_name_or_path: ./
147
+ num_train_epochs: 3
148
+ output_dir: ./
149
+ per_device_eval_batch_size: 4
150
+ per_device_train_batch_size: 4
151
+ save_steps: 500
152
+ task: covost2
153
+ warmup_steps: 500
154
+ 2022-05-03 17:15:19 INFO About to run command: python3 run_xtreme_s.py --overwrite_output_dir --freeze_feature_encoder --gradient_checkpointing --predict_with_generate --fp16 --group_by_length --do_train --do_eval --load_best_model_at_end --push_to_hub --use_auth_token --eval_split_name=test --eval_steps=500 --evaluation_strategy=steps --generation_max_length=40 --generation_num_beams=1 --gradient_accumulation_steps=8 --greater_is_better=True --hidden_dropout=0.06862889720223829 --language=fr.en --learning_rate=0.0004848089062550082 --logging_steps=1 --max_duration_in_seconds=20 --metric_for_best_model=bleu --model_name_or_path=./ --num_train_epochs=3 --output_dir=./ --per_device_eval_batch_size=4 --per_device_train_batch_size=4 --save_steps=500 --task=covost2 --warmup_steps=500
155
+ 2022-05-03 17:15:24 INFO Running runs: ['a6039xud']
wandb/debug-internal.log ADDED
@@ -0,0 +1 @@
 
1
+ run-20220503_171959-a6039xud/logs/debug-internal.log
wandb/debug.log ADDED
@@ -0,0 +1 @@
 
1
+ run-20220503_171959-a6039xud/logs/debug.log
wandb/latest-run ADDED
@@ -0,0 +1 @@
 
1
+ run-20220503_171959-a6039xud
wandb/run-20220503_171959-a6039xud/files/config.yaml ADDED
The diff for this file is too large to render. See raw diff
wandb/run-20220503_171959-a6039xud/files/output.log ADDED
The diff for this file is too large to render. See raw diff
wandb/run-20220503_171959-a6039xud/files/requirements.txt ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==1.0.0
2
+ aiohttp==3.8.1
3
+ aiosignal==1.2.0
4
+ alembic==1.7.7
5
+ anyio==3.5.0
6
+ appdirs==1.4.4
7
+ apscheduler==3.9.1
8
+ argon2-cffi-bindings==21.2.0
9
+ argon2-cffi==21.3.0
10
+ arrow==1.2.2
11
+ asttokens==2.0.5
12
+ astunparse==1.6.3
13
+ async-timeout==4.0.2
14
+ attrs==21.4.0
15
+ audioread==2.1.9
16
+ autopage==0.5.0
17
+ babel==2.9.1
18
+ backcall==0.2.0
19
+ backoff==1.11.1
20
+ binaryornot==0.4.4
21
+ bitsandbytes-cuda113==0.26.0
22
+ black==22.1.0
23
+ bleach==4.1.0
24
+ boto3==1.16.34
25
+ botocore==1.19.63
26
+ brotli==1.0.9
27
+ cachetools==5.0.0
28
+ certifi==2021.10.8
29
+ cffi==1.15.0
30
+ chardet==4.0.0
31
+ charset-normalizer==2.0.11
32
+ chex==0.1.0
33
+ click==8.0.3
34
+ cliff==3.10.1
35
+ clldutils==3.10.1
36
+ cmaes==0.8.2
37
+ cmd2==2.4.0
38
+ codecarbon==1.2.0
39
+ colorlog==6.6.0
40
+ cookiecutter==1.7.3
41
+ cryptography==36.0.2
42
+ csvw==1.11.0
43
+ cycler==0.11.0
44
+ dash-bootstrap-components==1.1.0
45
+ dash-core-components==2.0.0
46
+ dash-html-components==2.0.0
47
+ dash-table==5.0.0
48
+ dash==2.3.1
49
+ datasets==2.1.1.dev0
50
+ debugpy==1.5.1
51
+ decorator==5.1.1
52
+ defusedxml==0.7.1
53
+ deprecated==1.2.13
54
+ dill==0.3.4
55
+ dlinfo==1.2.1
56
+ dm-tree==0.1.6
57
+ docker-pycreds==0.4.0
58
+ docker==4.4.4
59
+ entrypoints==0.4
60
+ execnet==1.9.0
61
+ executing==0.8.2
62
+ faiss-cpu==1.7.2
63
+ filelock==3.4.2
64
+ fire==0.4.0
65
+ flake8==4.0.1
66
+ flask-compress==1.11
67
+ flask==2.1.1
68
+ flatbuffers==1.12
69
+ flax==0.4.0
70
+ fonttools==4.29.1
71
+ frozenlist==1.3.0
72
+ fsspec==2022.1.0
73
+ fugashi==1.1.2
74
+ gast==0.5.3
75
+ gitdb==4.0.9
76
+ gitpython==3.1.18
77
+ google-auth-oauthlib==0.4.6
78
+ google-auth==2.6.0
79
+ google-pasta==0.2.0
80
+ greenlet==1.1.2
81
+ grpcio==1.43.0
82
+ h5py==3.6.0
83
+ hf-doc-builder==0.2.0
84
+ huggingface-hub==0.4.0
85
+ hypothesis==6.36.1
86
+ idna==3.3
87
+ importlib-metadata==4.10.1
88
+ iniconfig==1.1.1
89
+ ipadic==1.0.0
90
+ ipdb==0.13.9
91
+ ipykernel==6.8.0
92
+ ipython-genutils==0.2.0
93
+ ipython==8.0.1
94
+ ipywidgets==7.6.5
95
+ isodate==0.6.1
96
+ isort==5.10.1
97
+ itsdangerous==2.1.2
98
+ jax==0.2.28
99
+ jaxlib==0.1.76+cuda11.cudnn82
100
+ jedi==0.18.1
101
+ jinja2-time==0.2.0
102
+ jinja2==3.0.3
103
+ jiwer==2.3.0
104
+ jmespath==0.10.0
105
+ joblib==1.1.0
106
+ json5==0.9.6
107
+ jsonschema==4.4.0
108
+ jupyter-client==7.1.2
109
+ jupyter-console==6.4.0
110
+ jupyter-core==4.9.1
111
+ jupyter-server==1.13.5
112
+ jupyter==1.0.0
113
+ jupyterlab-pygments==0.1.2
114
+ jupyterlab-server==2.10.3
115
+ jupyterlab-widgets==1.0.2
116
+ jupyterlab==3.2.9
117
+ keras-preprocessing==1.1.2
118
+ keras==2.8.0
119
+ kiwisolver==1.3.2
120
+ kubernetes==12.0.1
121
+ libclang==13.0.0
122
+ librosa==0.8.1
123
+ llvmlite==0.38.0
124
+ mako==1.2.0
125
+ markdown==3.3.6
126
+ markupsafe==2.0.1
127
+ matplotlib-inline==0.1.3
128
+ matplotlib==3.5.1
129
+ mccabe==0.6.1
130
+ mistune==0.8.4
131
+ msgpack==1.0.3
132
+ multidict==6.0.2
133
+ multiprocess==0.70.12.2
134
+ mypy-extensions==0.4.3
135
+ nbclassic==0.3.5
136
+ nbclient==0.5.10
137
+ nbconvert==6.4.1
138
+ nbformat==5.1.3
139
+ nest-asyncio==1.5.4
140
+ nltk==3.7
141
+ notebook==6.4.8
142
+ numba==0.55.1
143
+ numpy==1.21.5
144
+ oauthlib==3.2.0
145
+ onnx==1.11.0
146
+ onnxconverter-common==1.9.0
147
+ opt-einsum==3.3.0
148
+ optax==0.1.0
149
+ optuna==2.10.0
150
+ packaging==21.3
151
+ pandas==1.4.0
152
+ pandocfilters==1.5.0
153
+ parameterized==0.8.1
154
+ parso==0.8.3
155
+ pathspec==0.9.0
156
+ pathtools==0.1.2
157
+ pbr==5.8.1
158
+ pexpect==4.8.0
159
+ phonemizer==3.0.1
160
+ pickleshare==0.7.5
161
+ pillow==9.0.0
162
+ pint==0.16.1
163
+ pip==22.0.2
164
+ pkg-resources==0.0.0
165
+ plac==1.3.5
166
+ platformdirs==2.4.1
167
+ plotly==5.6.0
168
+ pluggy==1.0.0
169
+ pooch==1.6.0
170
+ portalocker==2.0.0
171
+ poyo==0.5.0
172
+ prettytable==3.2.0
173
+ prometheus-client==0.13.1
174
+ promise==2.3
175
+ prompt-toolkit==3.0.26
176
+ protobuf==3.19.4
177
+ psutil==5.9.0
178
+ ptyprocess==0.7.0
179
+ pure-eval==0.2.2
180
+ py-cpuinfo==8.0.0
181
+ py==1.11.0
182
+ pyarrow==6.0.1
183
+ pyasn1-modules==0.2.8
184
+ pyasn1==0.4.8
185
+ pycodestyle==2.8.0
186
+ pycparser==2.21
187
+ pyctcdecode==0.3.0
188
+ pyflakes==2.4.0
189
+ pygments==2.11.2
190
+ pygtrie==2.4.2
191
+ pynvml==11.4.1
192
+ pyopenssl==22.0.0
193
+ pyparsing==3.0.7
194
+ pyperclip==1.8.2
195
+ pypng==0.0.21
196
+ pyrsistent==0.18.1
197
+ pytest-forked==1.4.0
198
+ pytest-timeout==2.1.0
199
+ pytest-xdist==2.5.0
200
+ pytest==7.1.1
201
+ python-dateutil==2.8.2
202
+ python-levenshtein==0.12.2
203
+ python-slugify==6.1.1
204
+ pytz-deprecation-shim==0.1.0.post0
205
+ pytz==2021.3
206
+ pyyaml==5.4.1
207
+ pyzmq==22.3.0
208
+ qtconsole==5.2.2
209
+ qtpy==2.0.1
210
+ ray==1.11.0
211
+ redis==4.2.2
212
+ regex==2022.1.18
213
+ requests-oauthlib==1.3.1
214
+ requests==2.27.1
215
+ resampy==0.2.2
216
+ responses==0.18.0
217
+ rfc3986==2.0.0
218
+ rouge-score==0.0.4
219
+ rsa==4.8
220
+ s3transfer==0.3.7
221
+ sacrebleu==1.5.1
222
+ sacremoses==0.0.47
223
+ scikit-learn==1.0.2
224
+ scipy==1.7.3
225
+ segments==2.2.0
226
+ send2trash==1.8.0
227
+ sentencepiece==0.1.96
228
+ sentry-sdk==1.5.6
229
+ setuptools==44.1.1
230
+ shortuuid==1.0.8
231
+ sigopt==8.3.0
232
+ six==1.16.0
233
+ smmap==5.0.0
234
+ sniffio==1.2.0
235
+ sortedcontainers==2.4.0
236
+ soundfile==0.10.3.post1
237
+ sqlalchemy==1.4.34
238
+ stack-data==0.1.4
239
+ stevedore==3.5.0
240
+ tabulate==0.8.9
241
+ tenacity==8.0.1
242
+ tensorboard-data-server==0.6.1
243
+ tensorboard-plugin-wit==1.8.1
244
+ tensorboard==2.8.0
245
+ tensorboardx==2.5
246
+ tensorflow-io-gcs-filesystem==0.24.0
247
+ tensorflow==2.8.0
248
+ termcolor==1.1.0
249
+ terminado==0.13.1
250
+ testpath==0.5.0
251
+ text-unidecode==1.3
252
+ tf-estimator-nightly==2.8.0.dev2021122109
253
+ tf2onnx==1.9.3
254
+ threadpoolctl==3.1.0
255
+ timeout-decorator==0.5.0
256
+ timm==0.5.4
257
+ tokenizers==0.11.4
258
+ toml==0.10.2
259
+ tomli==2.0.0
260
+ toolz==0.11.2
261
+ torch==1.10.2+cu113
262
+ torchaudio==0.10.2+cu113
263
+ torchvision==0.11.3
264
+ tornado==6.1
265
+ tqdm==4.62.3
266
+ traitlets==5.1.1
267
+ transformers==4.18.0.dev0
268
+ typing-extensions==3.10.0.2
269
+ tzdata==2022.1
270
+ tzlocal==4.2
271
+ unidic-lite==1.0.8
272
+ unidic==1.1.0
273
+ uritemplate==4.1.1
274
+ urllib3==1.26.8
275
+ wandb==0.12.10
276
+ wasabi==0.9.1
277
+ wcwidth==0.2.5
278
+ webencodings==0.5.1
279
+ websocket-client==1.2.3
280
+ werkzeug==2.0.2
281
+ wheel==0.37.1
282
+ widgetsnbextension==3.5.2
283
+ wrapt==1.14.0
284
+ xxhash==2.0.2
285
+ yarl==1.7.2
286
+ yaspin==2.1.0
287
+ zipp==3.7.0
wandb/run-20220503_171959-a6039xud/files/wandb-metadata.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.11.0-1028-gcp-x86_64-with-glibc2.33",
3
+ "python": "3.9.5",
4
+ "heartbeatAt": "2022-05-03T17:20:04.283739",
5
+ "startedAt": "2022-05-03T17:19:59.978222",
6
+ "docker": null,
7
+ "gpu": "Tesla V100-SXM2-16GB",
8
+ "gpu_count": 2,
9
+ "cpu_count": 16,
10
+ "cuda": null,
11
+ "args": [
12
+ "--overwrite_output_dir",
13
+ "--freeze_feature_encoder",
14
+ "--gradient_checkpointing",
15
+ "--predict_with_generate",
16
+ "--fp16",
17
+ "--group_by_length",
18
+ "--do_train",
19
+ "--do_eval",
20
+ "--load_best_model_at_end",
21
+ "--push_to_hub",
22
+ "--use_auth_token",
23
+ "--eval_split_name=test",
24
+ "--eval_steps=500",
25
+ "--evaluation_strategy=steps",
26
+ "--generation_max_length=40",
27
+ "--generation_num_beams=1",
28
+ "--gradient_accumulation_steps=8",
29
+ "--greater_is_better=True",
30
+ "--hidden_dropout=0.06862889720223829",
31
+ "--language=fr.en",
32
+ "--learning_rate=0.0004848089062550082",
33
+ "--logging_steps=1",
34
+ "--max_duration_in_seconds=20",
35
+ "--metric_for_best_model=bleu",
36
+ "--model_name_or_path=./",
37
+ "--num_train_epochs=3",
38
+ "--output_dir=./",
39
+ "--per_device_eval_batch_size=4",
40
+ "--per_device_train_batch_size=4",
41
+ "--save_steps=500",
42
+ "--task=covost2",
43
+ "--warmup_steps=500"
44
+ ],
45
+ "state": "running",
46
+ "program": "/home/sanchit_huggingface_co/xtreme_s_xlsr_2_bart_covost2_fr_en/run_xtreme_s.py",
47
+ "codePath": "run_xtreme_s.py",
48
+ "git": {
49
+ "remote": "https://huggingface.co/sanchit-gandhi/xtreme_s_xlsr_2_bart_covost2_fr_en",
50
+ "commit": "c787ad4df48cedb3c1492e0593442fbabf32819f"
51
+ },
52
+ "email": "sanchit@huggingface.co",
53
+ "root": "/home/sanchit_huggingface_co/xtreme_s_xlsr_2_bart_covost2_fr_en",
54
+ "host": "sanchit--v100",
55
+ "username": "sanchit_huggingface_co",
56
+ "executable": "/home/sanchit_huggingface_co/gcp/bin/python3"
57
+ }
wandb/run-20220503_171959-a6039xud/files/wandb-summary.json ADDED
The diff for this file is too large to render. See raw diff
wandb/run-20220503_171959-a6039xud/logs/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
wandb/run-20220503_171959-a6039xud/logs/debug.log ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-05-03 17:19:59,980 INFO MainThread:42111 [wandb_setup.py:_flush():75] Loading settings from /home/sanchit_huggingface_co/.config/wandb/settings
2
+ 2022-05-03 17:19:59,980 INFO MainThread:42111 [wandb_setup.py:_flush():75] Loading settings from wandb/settings
3
+ 2022-05-03 17:19:59,980 INFO MainThread:42111 [wandb_setup.py:_flush():75] Loading settings from environment variables: {'entity': 'sanchit-gandhi', 'project': 'xtreme_s_xlsr_2_bart_covost2_fr_en', 'sweep_id': '39ci3gkf', 'root_dir': '/home/sanchit_huggingface_co/xtreme_s_xlsr_2_bart_covost2_fr_en', 'run_id': 'a6039xud', 'sweep_param_path': '/home/sanchit_huggingface_co/xtreme_s_xlsr_2_bart_covost2_fr_en/wandb/sweep-39ci3gkf/config-a6039xud.yaml'}
4
+ 2022-05-03 17:19:59,980 INFO MainThread:42111 [wandb_setup.py:_flush():75] Inferring run settings from compute environment: {'program_relpath': 'run_xtreme_s.py', 'program': '/home/sanchit_huggingface_co/xtreme_s_xlsr_2_bart_covost2_fr_en/run_xtreme_s.py'}
5
+ 2022-05-03 17:19:59,980 INFO MainThread:42111 [wandb_init.py:_log_setup():386] Logging user logs to /home/sanchit_huggingface_co/xtreme_s_xlsr_2_bart_covost2_fr_en/wandb/run-20220503_171959-a6039xud/logs/debug.log
6
+ 2022-05-03 17:19:59,980 INFO MainThread:42111 [wandb_init.py:_log_setup():387] Logging internal logs to /home/sanchit_huggingface_co/xtreme_s_xlsr_2_bart_covost2_fr_en/wandb/run-20220503_171959-a6039xud/logs/debug-internal.log
7
+ 2022-05-03 17:19:59,980 INFO MainThread:42111 [wandb_init.py:init():420] calling init triggers
8
+ 2022-05-03 17:19:59,980 INFO MainThread:42111 [wandb_init.py:init():425] wandb.init called with sweep_config: {'eval_split_name': 'test', 'eval_steps': 500, 'evaluation_strategy': 'steps', 'generation_max_length': 40, 'generation_num_beams': 1, 'gradient_accumulation_steps': 8, 'greater_is_better': True, 'hidden_dropout': 0.06862889720223829, 'language': 'fr.en', 'learning_rate': 0.0004848089062550082, 'logging_steps': 1, 'max_duration_in_seconds': 20, 'metric_for_best_model': 'bleu', 'model_name_or_path': './', 'num_train_epochs': 3, 'output_dir': './', 'per_device_eval_batch_size': 4, 'per_device_train_batch_size': 4, 'save_steps': 500, 'task': 'covost2', 'warmup_steps': 500}
9
+ config: {}
10
+ 2022-05-03 17:19:59,981 INFO MainThread:42111 [wandb_init.py:init():471] starting backend
11
+ 2022-05-03 17:19:59,981 INFO MainThread:42111 [backend.py:_multiprocessing_setup():99] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
12
+ 2022-05-03 17:20:00,059 INFO MainThread:42111 [backend.py:ensure_launched():219] starting backend process...
13
+ 2022-05-03 17:20:00,134 INFO MainThread:42111 [backend.py:ensure_launched():224] started backend process with pid: 42356
14
+ 2022-05-03 17:20:00,137 INFO MainThread:42111 [wandb_init.py:init():480] backend started and connected
15
+ 2022-05-03 17:20:00,140 INFO MainThread:42111 [wandb_run.py:_config_callback():966] config_cb None None {'eval_split_name': 'test', 'eval_steps': 500, 'evaluation_strategy': 'steps', 'generation_max_length': 40, 'generation_num_beams': 1, 'gradient_accumulation_steps': 8, 'greater_is_better': True, 'hidden_dropout': 0.06862889720223829, 'language': 'fr.en', 'learning_rate': 0.0004848089062550082, 'logging_steps': 1, 'max_duration_in_seconds': 20, 'metric_for_best_model': 'bleu', 'model_name_or_path': './', 'num_train_epochs': 3, 'output_dir': './', 'per_device_eval_batch_size': 4, 'per_device_train_batch_size': 4, 'save_steps': 500, 'task': 'covost2', 'warmup_steps': 500}
16
+ 2022-05-03 17:20:00,155 INFO MainThread:42111 [wandb_init.py:init():550] updated telemetry
17
+ 2022-05-03 17:20:00,359 INFO MainThread:42111 [wandb_init.py:init():581] communicating current version
18
+ 2022-05-03 17:20:01,247 INFO MainThread:42111 [wandb_init.py:init():586] got version response upgrade_message: "wandb version 0.12.16 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
19
+
20
+ 2022-05-03 17:20:01,248 INFO MainThread:42111 [wandb_init.py:init():596] communicating run to backend with 30 second timeout
21
+ 2022-05-03 17:20:01,757 INFO MainThread:42111 [wandb_init.py:init():624] starting run threads in backend
22
+ 2022-05-03 17:20:04,342 INFO MainThread:42111 [wandb_run.py:_console_start():1827] atexit reg
23
+ 2022-05-03 17:20:04,342 INFO MainThread:42111 [wandb_run.py:_redirect():1701] redirect: SettingsConsole.REDIRECT
24
+ 2022-05-03 17:20:04,343 INFO MainThread:42111 [wandb_run.py:_redirect():1706] Redirecting console.
25
+ 2022-05-03 17:20:04,345 INFO MainThread:42111 [wandb_run.py:_redirect():1762] Redirects installed.
26
+ 2022-05-03 17:20:04,345 INFO MainThread:42111 [wandb_init.py:init():651] run started, returning control to user process
27
+ 2022-05-03 17:20:04,348 INFO MainThread:42111 [wandb_run.py:_config_callback():966] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'torch.float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 40, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'architectures': ['SpeechEncoderDecoderModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': 0, 'task_specific_params': None, 'problem_type': None, '_name_or_path': './', 'transformers_version': None, 'decoder': {'vocab_size': 50265, 'max_position_embeddings': 1024, 'd_model': 1024, 'encoder_ffn_dim': 4096, 'encoder_layers': 12, 'encoder_attention_heads': 16, 'decoder_ffn_dim': 4096, 'decoder_layers': 12, 'decoder_attention_heads': 16, 'dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.1, 'activation_function': 'gelu', 'init_std': 0.02, 'encoder_layerdrop': 0.0, 'decoder_layerdrop': 0.0, 'classifier_dropout': 0.0, 'use_cache': True, 'num_hidden_layers': 12, 'scale_embedding': False, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': True, 'cross_attention_hidden_size': None, 'add_cross_attention': True, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': True, 'num_beams': 4, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 3, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': 0, 'forced_eos_token_id': 2, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'architectures': ['BartModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': 2, 'task_specific_params': {'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}}, 'problem_type': None, '_name_or_path': 'facebook/bart-large', 'transformers_version': '4.19.0.dev0', 'add_bias_logits': False, 'add_final_layer_norm': False, 'classif_dropout': 0.1, 'gradient_checkpointing': False, 'normalize_before': False, 'model_type': 'bart'}, 'encoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-xls-r-300m', 'transformers_version': '4.19.0.dev0', 'feat_extract_dropout': 0.0, 'gradient_checkpointing': False, 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.06862889720223829, 'attention_dropout': 0.1, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.0, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.1, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': True, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'model_type': 'wav2vec2'}, 'model_type': 'speech-encoder-decoder', 'processor_class': 'Wav2Vec2Processor', 'use_cache': False, 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'prediction_loss_only': False, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'eval_accumulation_steps': 'None', 'eval_delay': 0, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/May03_17-15-22_sanchit--v100', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_total_limit': 'None', 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'data_seed': 'None', 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': True, 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': "['tensorboard', 'wandb', 'codecarbon']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'gradient_checkpointing': True, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'sortish_sampler': False, 'predict_with_generate': True, 'train_batch_size': 4, 'eval_batch_size': 4}
28
+ 2022-05-03 17:20:04,351 INFO MainThread:42111 [wandb_watch.py:watch():43] Watching
wandb/run-20220503_171959-a6039xud/run-a6039xud.wandb ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2d3034118c3e235104590124be355a3a0b9fcb07d29b2485f05cfac95b555bf
3
+ size 54318872
wandb/sweep-39ci3gkf/config-a6039xud.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ eval_split_name:
4
+ value: test
5
+ eval_steps:
6
+ value: 500
7
+ evaluation_strategy:
8
+ value: steps
9
+ generation_max_length:
10
+ value: 40
11
+ generation_num_beams:
12
+ value: 1
13
+ gradient_accumulation_steps:
14
+ value: 8
15
+ greater_is_better:
16
+ value: true
17
+ hidden_dropout:
18
+ value: 0.06862889720223829
19
+ language:
20
+ value: fr.en
21
+ learning_rate:
22
+ value: 0.0004848089062550082
23
+ logging_steps:
24
+ value: 1
25
+ max_duration_in_seconds:
26
+ value: 20
27
+ metric_for_best_model:
28
+ value: bleu
29
+ model_name_or_path:
30
+ value: ./
31
+ num_train_epochs:
32
+ value: 3
33
+ output_dir:
34
+ value: ./
35
+ per_device_eval_batch_size:
36
+ value: 4
37
+ per_device_train_batch_size:
38
+ value: 4
39
+ save_steps:
40
+ value: 500
41
+ task:
42
+ value: covost2
43
+ warmup_steps:
44
+ value: 500
wandb/sweep-y3ak427l/config-irggvkgd.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ eval_split_name:
4
+ value: test
5
+ eval_steps:
6
+ value: 500
7
+ evaluation_strategy:
8
+ value: steps
9
+ generation_max_length:
10
+ value: 40
11
+ generation_num_beams:
12
+ value: 1
13
+ gradient_accumulation_steps:
14
+ value: 8
15
+ greater_is_better:
16
+ value: true
17
+ hidden_dropout:
18
+ value: 0.1875094322808032
19
+ language:
20
+ value: fr.en
21
+ learning_rate:
22
+ value: 0.00024438201183496223
23
+ logging_steps:
24
+ value: 1
25
+ max_duration_in_seconds:
26
+ value: 20
27
+ metric_for_best_model:
28
+ value: bleu
29
+ model_name_or_path:
30
+ value: ./
31
+ num_train_epochs:
32
+ value: 3
33
+ output_dir:
34
+ value: ./output_dir
35
+ per_device_eval_batch_size:
36
+ value: 4
37
+ per_device_train_batch_size:
38
+ value: 4
39
+ save_steps:
40
+ value: 500
41
+ task:
42
+ value: covost2
43
+ warmup_steps:
44
+ value: 500
wandb/sweep-y3ak427l/config-ldsojzle.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ eval_split_name:
4
+ value: test
5
+ eval_steps:
6
+ value: 500
7
+ evaluation_strategy:
8
+ value: steps
9
+ generation_max_length:
10
+ value: 40
11
+ generation_num_beams:
12
+ value: 1
13
+ gradient_accumulation_steps:
14
+ value: 8
15
+ greater_is_better:
16
+ value: true
17
+ hidden_dropout:
18
+ value: 0.055722391000930585
19
+ language:
20
+ value: fr.en
21
+ learning_rate:
22
+ value: 0.0006457481677728278
23
+ logging_steps:
24
+ value: 1
25
+ max_duration_in_seconds:
26
+ value: 20
27
+ metric_for_best_model:
28
+ value: bleu
29
+ model_name_or_path:
30
+ value: ./
31
+ num_train_epochs:
32
+ value: 3
33
+ output_dir:
34
+ value: ./output_dir
35
+ per_device_eval_batch_size:
36
+ value: 4
37
+ per_device_train_batch_size:
38
+ value: 4
39
+ save_steps:
40
+ value: 500
41
+ task:
42
+ value: covost2
43
+ warmup_steps:
44
+ value: 500
wandb/sweep-y3ak427l/config-qv3vjr6j.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ eval_split_name:
4
+ value: test
5
+ eval_steps:
6
+ value: 500
7
+ evaluation_strategy:
8
+ value: steps
9
+ generation_max_length:
10
+ value: 40
11
+ generation_num_beams:
12
+ value: 1
13
+ gradient_accumulation_steps:
14
+ value: 8
15
+ greater_is_better:
16
+ value: true
17
+ hidden_dropout:
18
+ value: 0.056807662149569525
19
+ language:
20
+ value: fr.en
21
+ learning_rate:
22
+ value: 0.0005558468401613797
23
+ logging_steps:
24
+ value: 1
25
+ max_duration_in_seconds:
26
+ value: 20
27
+ metric_for_best_model:
28
+ value: bleu
29
+ model_name_or_path:
30
+ value: ./
31
+ num_train_epochs:
32
+ value: 3
33
+ output_dir:
34
+ value: ./output_dir
35
+ per_device_eval_batch_size:
36
+ value: 4
37
+ per_device_train_batch_size:
38
+ value: 4
39
+ save_steps:
40
+ value: 500
41
+ task:
42
+ value: covost2
43
+ warmup_steps:
44
+ value: 500
wandb/sweep-y3ak427l/config-vz5ppd75.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ eval_split_name:
4
+ value: test
5
+ eval_steps:
6
+ value: 500
7
+ evaluation_strategy:
8
+ value: steps
9
+ generation_max_length:
10
+ value: 40
11
+ generation_num_beams:
12
+ value: 1
13
+ gradient_accumulation_steps:
14
+ value: 8
15
+ greater_is_better:
16
+ value: true
17
+ hidden_dropout:
18
+ value: 0.036619638921206475
19
+ language:
20
+ value: fr.en
21
+ learning_rate:
22
+ value: 0.00024391819705381628
23
+ logging_steps:
24
+ value: 1
25
+ max_duration_in_seconds:
26
+ value: 20
27
+ metric_for_best_model:
28
+ value: bleu
29
+ model_name_or_path:
30
+ value: ./
31
+ num_train_epochs:
32
+ value: 3
33
+ output_dir:
34
+ value: ./output_dir
35
+ per_device_eval_batch_size:
36
+ value: 4
37
+ per_device_train_batch_size:
38
+ value: 4
39
+ save_steps:
40
+ value: 500
41
+ task:
42
+ value: covost2
43
+ warmup_steps:
44
+ value: 500
wandb/sweep-y3ak427l/config-xur584bd.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ eval_split_name:
4
+ value: test
5
+ eval_steps:
6
+ value: 500
7
+ evaluation_strategy:
8
+ value: steps
9
+ generation_max_length:
10
+ value: 40
11
+ generation_num_beams:
12
+ value: 1
13
+ gradient_accumulation_steps:
14
+ value: 8
15
+ greater_is_better:
16
+ value: true
17
+ hidden_dropout:
18
+ value: 0.03413483050532159
19
+ language:
20
+ value: fr.en
21
+ learning_rate:
22
+ value: 0.00022086866790135088
23
+ logging_steps:
24
+ value: 1
25
+ max_duration_in_seconds:
26
+ value: 20
27
+ metric_for_best_model:
28
+ value: bleu
29
+ model_name_or_path:
30
+ value: ./
31
+ num_train_epochs:
32
+ value: 3
33
+ output_dir:
34
+ value: ./output_dir
35
+ per_device_eval_batch_size:
36
+ value: 4
37
+ per_device_train_batch_size:
38
+ value: 4
39
+ save_steps:
40
+ value: 500
41
+ task:
42
+ value: covost2
43
+ warmup_steps:
44
+ value: 500