sanchit-gandhi HF staff commited on
Commit
8c0ea7f
1 Parent(s): b1adb5d
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7285b3dc9e5948807a019ca6ef6f0e6906b7ee5f5df7b54f5b4d3edb5fc9295e
3
- size 3087090488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4037918156ace2010dc63b440c1cb40eb20519d3e00db942e461310b69141da
3
+ size 1962672704
run_librispeech.sh CHANGED
@@ -8,15 +8,14 @@ CUDA_VISIBLE_DEVICES=1 python run_speech_recognition_seq2seq.py \
8
  --output_dir="./" \
9
  --preprocessing_num_workers="1" \
10
  --length_column_name="input_length" \
11
- --overwrite_output_dir \
12
- --num_train_epochs="20" \
13
  --per_device_train_batch_size="8" \
14
  --per_device_eval_batch_size="8" \
15
  --gradient_accumulation_steps="4" \
16
  --generation_max_length="40" \
17
  --generation_num_beams="1" \
18
- --learning_rate="1e-5" \
19
- --warmup_steps="1500" \
20
  --evaluation_strategy="steps" \
21
  --text_column_name="text" \
22
  --save_steps="1500" \
 
8
  --output_dir="./" \
9
  --preprocessing_num_workers="1" \
10
  --length_column_name="input_length" \
11
+ --num_train_epochs="10" \
 
12
  --per_device_train_batch_size="8" \
13
  --per_device_eval_batch_size="8" \
14
  --gradient_accumulation_steps="4" \
15
  --generation_max_length="40" \
16
  --generation_num_beams="1" \
17
+ --learning_rate="5e-6" \
18
+ --warmup_steps="1" \
19
  --evaluation_strategy="steps" \
20
  --text_column_name="text" \
21
  --save_steps="1500" \
runs/Mar22_09-43-31_sanchit--v100/1647942245.8723397/events.out.tfevents.1647942245.sanchit--v100.28799.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:023b1f94c7c83692f32b524cdc0564feb6575075993a352d49245633f5cb4168
3
+ size 4973
runs/Mar22_09-43-31_sanchit--v100/events.out.tfevents.1647942245.sanchit--v100.28799.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79f4906479e41c9fb04dbdb15fc16f65c273445968e0703b4fab67b92a13ea8e
3
+ size 9217
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f4f5cc9e51e1021b8fa699625bf6dc9ded0122f755ddc86c59d92bc08aa6cb2
3
  size 3119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06fa7821d09961cd241516145a69a71e9038d56760a9b05e9a4d5fdc9fb6abab
3
  size 3119
wandb/debug-internal.log CHANGED
@@ -1 +1 @@
1
- run-20220320_205317-13fe1w7o/logs/debug-internal.log
 
1
+ run-20220322_094406-dk50d3c6/logs/debug-internal.log
wandb/debug.log CHANGED
@@ -1 +1 @@
1
- run-20220320_205317-13fe1w7o/logs/debug.log
 
1
+ run-20220322_094406-dk50d3c6/logs/debug.log
wandb/latest-run CHANGED
@@ -1 +1 @@
1
- run-20220320_205317-13fe1w7o
 
1
+ run-20220322_094406-dk50d3c6
wandb/run-20220320_205317-13fe1w7o/files/config.yaml CHANGED
@@ -12413,7 +12413,14 @@ _wandb:
12413
  - 1
12414
  - 5
12415
  - 11
 
 
 
 
 
12416
  3:
 
 
12417
  - 13
12418
  4: 3.9.5
12419
  5: 0.12.10
 
12413
  - 1
12414
  - 5
12415
  - 11
12416
+ 2:
12417
+ - 1
12418
+ - 5
12419
+ - 11
12420
+ - 12
12421
  3:
12422
+ - 1
12423
+ - 7
12424
  - 13
12425
  4: 3.9.5
12426
  5: 0.12.10
wandb/run-20220320_205317-13fe1w7o/files/output.log CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93a9ddb32faa240a1e6f127d25e88d0496a2acdb3094c299d4fb2348af9c9841
3
- size 20837744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90f71bb80f11890b0cac5a2c47081e4f0b3447789beb433a9324208591c8e4ca
3
+ size 20866534
wandb/run-20220320_205317-13fe1w7o/files/wandb-summary.json CHANGED
The diff for this file is too large to render. See raw diff
 
wandb/run-20220320_205317-13fe1w7o/logs/debug-internal.log CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cca7294a398a9f7f78639dd2a70d5d67944266d2d5d636c94f57b97c8d194ab
3
- size 28887039
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddfabea5070e4f931f8429c56cb5099ea467d239d7be5806ab49646bf1bd14d3
3
+ size 28923693
wandb/run-20220320_205317-13fe1w7o/logs/debug.log CHANGED
@@ -25,3 +25,109 @@ config: {}
25
  2022-03-20 20:53:18,789 INFO MainThread:15700 [wandb_init.py:init():651] run started, returning control to user process
26
  2022-03-20 20:53:18,792 INFO MainThread:15700 [wandb_run.py:_config_callback():966] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'torch.float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 50, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['SpeechEncoderDecoderModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': 0, 'task_specific_params': None, 'problem_type': None, '_name_or_path': './', 'transformers_version': None, 'decoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': True, 'cross_attention_hidden_size': None, 'add_cross_attention': True, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['RobertaForMaskedLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'roberta-large', 'transformers_version': '4.17.0.dev0', 'vocab_size': 50265, 'hidden_size': 1024, 'num_hidden_layers': 24, 'num_attention_heads': 16, 'hidden_act': 'gelu', 'intermediate_size': 4096, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 514, 'type_vocab_size': 1, 'initializer_range': 0.02, 'layer_norm_eps': 1e-05, 'position_embedding_type': 'absolute', 'use_cache': False, 'classifier_dropout': None, 'model_type': 'roberta'}, 'encoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-large-lv60', 'transformers_version': '4.17.0.dev0', 'feat_extract_dropout': 0.0, 'gradient_checkpointing': False, 'hidden_dropout_prob': 0.1, 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.1, 'feat_proj_dropout': 0.0, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.1, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'model_type': 'wav2vec2'}, 'model_type': 'speech-encoder-decoder', 'processor_class': 'Wav2Vec2Processor', 'use_cache': False, 'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 4, 'eval_accumulation_steps': 'None', 'learning_rate': 1e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 20.0, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 1500, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Mar20_20-52-47_sanchit--v100', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 1500, 'save_total_limit': 1, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 1500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': False, 'metric_for_best_model': 'None', 'greater_is_better': 'None', 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['tensorboard', 'wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': 40, 'generation_num_beams': 1, 'train_batch_size': 8, 'eval_batch_size': 8}
27
  2022-03-20 20:53:18,795 INFO MainThread:15700 [wandb_watch.py:watch():43] Watching
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  2022-03-20 20:53:18,789 INFO MainThread:15700 [wandb_init.py:init():651] run started, returning control to user process
26
  2022-03-20 20:53:18,792 INFO MainThread:15700 [wandb_run.py:_config_callback():966] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'torch.float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 50, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['SpeechEncoderDecoderModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': 0, 'task_specific_params': None, 'problem_type': None, '_name_or_path': './', 'transformers_version': None, 'decoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': True, 'cross_attention_hidden_size': None, 'add_cross_attention': True, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['RobertaForMaskedLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'roberta-large', 'transformers_version': '4.17.0.dev0', 'vocab_size': 50265, 'hidden_size': 1024, 'num_hidden_layers': 24, 'num_attention_heads': 16, 'hidden_act': 'gelu', 'intermediate_size': 4096, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 514, 'type_vocab_size': 1, 'initializer_range': 0.02, 'layer_norm_eps': 1e-05, 'position_embedding_type': 'absolute', 'use_cache': False, 'classifier_dropout': None, 'model_type': 'roberta'}, 'encoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-large-lv60', 'transformers_version': '4.17.0.dev0', 'feat_extract_dropout': 0.0, 'gradient_checkpointing': False, 'hidden_dropout_prob': 0.1, 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.1, 'feat_proj_dropout': 0.0, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.1, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'model_type': 'wav2vec2'}, 'model_type': 'speech-encoder-decoder', 'processor_class': 'Wav2Vec2Processor', 'use_cache': False, 'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 4, 'eval_accumulation_steps': 'None', 'learning_rate': 1e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 20.0, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 1500, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Mar20_20-52-47_sanchit--v100', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 1500, 'save_total_limit': 1, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 1500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': False, 'metric_for_best_model': 'None', 'greater_is_better': 'None', 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['tensorboard', 'wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': 40, 'generation_num_beams': 1, 'train_batch_size': 8, 'eval_batch_size': 8}
27
  2022-03-20 20:53:18,795 INFO MainThread:15700 [wandb_watch.py:watch():43] Watching
28
+ 2022-03-22 05:40:34,532 INFO MainThread:15700 [wandb_run.py:_atexit_cleanup():1797] got exitcode: 1
29
+ 2022-03-22 05:40:34,536 INFO MainThread:15700 [wandb_run.py:_restore():1769] restore
30
+ 2022-03-22 05:40:37,094 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
31
+ wandb_count: 1
32
+ }
33
+ pusher_stats {
34
+ uploaded_bytes: 2076
35
+ total_bytes: 2076
36
+ }
37
+
38
+ 2022-03-22 05:40:37,296 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
39
+ wandb_count: 1
40
+ }
41
+ pusher_stats {
42
+ uploaded_bytes: 2076
43
+ total_bytes: 2076
44
+ }
45
+
46
+ 2022-03-22 05:40:38,187 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
47
+ wandb_count: 1
48
+ }
49
+ pusher_stats {
50
+ uploaded_bytes: 2076
51
+ total_bytes: 2076
52
+ }
53
+
54
+ 2022-03-22 05:40:38,289 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
55
+ wandb_count: 5
56
+ }
57
+ pusher_stats {
58
+ uploaded_bytes: 2076
59
+ total_bytes: 22860023
60
+ }
61
+
62
+ 2022-03-22 05:40:38,391 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
63
+ wandb_count: 5
64
+ }
65
+ pusher_stats {
66
+ uploaded_bytes: 11528977
67
+ total_bytes: 22860023
68
+ }
69
+
70
+ 2022-03-22 05:40:38,493 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
71
+ wandb_count: 5
72
+ }
73
+ pusher_stats {
74
+ uploaded_bytes: 22833937
75
+ total_bytes: 22860023
76
+ }
77
+
78
+ 2022-03-22 05:40:38,594 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
79
+ wandb_count: 5
80
+ }
81
+ pusher_stats {
82
+ uploaded_bytes: 22860023
83
+ total_bytes: 22860023
84
+ }
85
+
86
+ 2022-03-22 05:40:38,696 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
87
+ wandb_count: 5
88
+ }
89
+ pusher_stats {
90
+ uploaded_bytes: 22860023
91
+ total_bytes: 22860023
92
+ }
93
+
94
+ 2022-03-22 05:40:38,798 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
95
+ wandb_count: 5
96
+ }
97
+ pusher_stats {
98
+ uploaded_bytes: 22860023
99
+ total_bytes: 22860023
100
+ }
101
+
102
+ 2022-03-22 05:40:38,899 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
103
+ wandb_count: 5
104
+ }
105
+ pusher_stats {
106
+ uploaded_bytes: 22860023
107
+ total_bytes: 22860023
108
+ }
109
+
110
+ 2022-03-22 05:40:39,563 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
111
+ wandb_count: 5
112
+ }
113
+ pusher_stats {
114
+ uploaded_bytes: 22860023
115
+ total_bytes: 22860023
116
+ }
117
+
118
+ 2022-03-22 05:40:39,715 INFO MainThread:15700 [wandb_run.py:_wait_for_finish():1929] got exit ret: done: true
119
+ exit_result {
120
+ }
121
+ file_counts {
122
+ wandb_count: 5
123
+ }
124
+ pusher_stats {
125
+ uploaded_bytes: 22860023
126
+ total_bytes: 22860023
127
+ }
128
+ local_info {
129
+ }
130
+
131
+ 2022-03-22 05:40:40,864 INFO MainThread:15700 [wandb_run.py:_append_history():2144] rendering history
132
+ 2022-03-22 05:40:40,865 INFO MainThread:15700 [wandb_run.py:_append_summary():2102] rendering summary
133
+ 2022-03-22 05:40:40,866 INFO MainThread:15700 [wandb_run.py:_append_files():2194] logging synced files
wandb/run-20220320_205317-13fe1w7o/run-13fe1w7o.wandb CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc584fa32866581aa312e0bb5e6f8da16bbf6729330af258ee64c4e32f6ebc37
3
- size 1258830711
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70198de64e8f4ae8e6e98d3ef1b9b43d7cfd935da1c4fe60e44dc500fd014f1f
3
+ size 1260506629
wandb/run-20220322_094406-dk50d3c6/files/config.yaml ADDED
@@ -0,0 +1,719 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _n_gpu:
4
+ desc: null
5
+ value: 1
6
+ _name_or_path:
7
+ desc: null
8
+ value: ./
9
+ _wandb:
10
+ desc: null
11
+ value:
12
+ cli_version: 0.12.10
13
+ framework: huggingface
14
+ huggingface_version: 4.17.0.dev0
15
+ is_jupyter_run: false
16
+ is_kaggle_kernel: false
17
+ m:
18
+ - 1: train/global_step
19
+ 6:
20
+ - 3
21
+ - 1: train/train_runtime
22
+ 5: 1
23
+ 6:
24
+ - 1
25
+ - 1: train/train_samples_per_second
26
+ 5: 1
27
+ 6:
28
+ - 1
29
+ - 1: train/train_steps_per_second
30
+ 5: 1
31
+ 6:
32
+ - 1
33
+ - 1: train/total_flos
34
+ 5: 1
35
+ 6:
36
+ - 1
37
+ - 1: train/train_loss
38
+ 5: 1
39
+ 6:
40
+ - 1
41
+ - 1: train/epoch
42
+ 5: 1
43
+ 6:
44
+ - 1
45
+ python_version: 3.9.5
46
+ start_time: 1647942246
47
+ t:
48
+ 1:
49
+ - 1
50
+ - 5
51
+ - 11
52
+ 3:
53
+ - 13
54
+ 4: 3.9.5
55
+ 5: 0.12.10
56
+ 6: 4.17.0.dev0
57
+ 8:
58
+ - 5
59
+ adafactor:
60
+ desc: null
61
+ value: false
62
+ adam_beta1:
63
+ desc: null
64
+ value: 0.9
65
+ adam_beta2:
66
+ desc: null
67
+ value: 0.999
68
+ adam_epsilon:
69
+ desc: null
70
+ value: 1.0e-08
71
+ add_cross_attention:
72
+ desc: null
73
+ value: false
74
+ architectures:
75
+ desc: null
76
+ value:
77
+ - SpeechEncoderDecoderModel
78
+ bad_words_ids:
79
+ desc: null
80
+ value: null
81
+ bf16:
82
+ desc: null
83
+ value: false
84
+ bf16_full_eval:
85
+ desc: null
86
+ value: false
87
+ bos_token_id:
88
+ desc: null
89
+ value: null
90
+ chunk_size_feed_forward:
91
+ desc: null
92
+ value: 0
93
+ cross_attention_hidden_size:
94
+ desc: null
95
+ value: null
96
+ dataloader_drop_last:
97
+ desc: null
98
+ value: false
99
+ dataloader_num_workers:
100
+ desc: null
101
+ value: 0
102
+ dataloader_pin_memory:
103
+ desc: null
104
+ value: true
105
+ ddp_bucket_cap_mb:
106
+ desc: null
107
+ value: None
108
+ ddp_find_unused_parameters:
109
+ desc: null
110
+ value: None
111
+ debug:
112
+ desc: null
113
+ value: '[]'
114
+ decoder:
115
+ desc: null
116
+ value:
117
+ _name_or_path: roberta-large
118
+ add_cross_attention: true
119
+ architectures:
120
+ - RobertaForMaskedLM
121
+ attention_probs_dropout_prob: 0.1
122
+ bad_words_ids: null
123
+ bos_token_id: 0
124
+ chunk_size_feed_forward: 0
125
+ classifier_dropout: null
126
+ cross_attention_hidden_size: null
127
+ decoder_start_token_id: null
128
+ diversity_penalty: 0.0
129
+ do_sample: false
130
+ early_stopping: false
131
+ encoder_no_repeat_ngram_size: 0
132
+ eos_token_id: 2
133
+ finetuning_task: null
134
+ forced_bos_token_id: null
135
+ forced_eos_token_id: null
136
+ hidden_act: gelu
137
+ hidden_dropout_prob: 0.1
138
+ hidden_size: 1024
139
+ id2label:
140
+ '0': LABEL_0
141
+ '1': LABEL_1
142
+ initializer_range: 0.02
143
+ intermediate_size: 4096
144
+ is_decoder: true
145
+ is_encoder_decoder: false
146
+ label2id:
147
+ LABEL_0: 0
148
+ LABEL_1: 1
149
+ layer_norm_eps: 1.0e-05
150
+ length_penalty: 1.0
151
+ max_length: 20
152
+ max_position_embeddings: 514
153
+ min_length: 0
154
+ model_type: roberta
155
+ no_repeat_ngram_size: 0
156
+ num_attention_heads: 16
157
+ num_beam_groups: 1
158
+ num_beams: 1
159
+ num_hidden_layers: 24
160
+ num_return_sequences: 1
161
+ output_attentions: false
162
+ output_hidden_states: false
163
+ output_scores: false
164
+ pad_token_id: 1
165
+ position_embedding_type: absolute
166
+ prefix: null
167
+ problem_type: null
168
+ pruned_heads: {}
169
+ remove_invalid_values: false
170
+ repetition_penalty: 1.0
171
+ return_dict: true
172
+ return_dict_in_generate: false
173
+ sep_token_id: null
174
+ task_specific_params: null
175
+ temperature: 1.0
176
+ tie_encoder_decoder: false
177
+ tie_word_embeddings: true
178
+ tokenizer_class: null
179
+ top_k: 50
180
+ top_p: 1.0
181
+ torch_dtype: null
182
+ torchscript: false
183
+ transformers_version: 4.17.0.dev0
184
+ type_vocab_size: 1
185
+ use_bfloat16: false
186
+ use_cache: false
187
+ vocab_size: 50265
188
+ decoder_start_token_id:
189
+ desc: null
190
+ value: 0
191
+ deepspeed:
192
+ desc: null
193
+ value: None
194
+ disable_tqdm:
195
+ desc: null
196
+ value: false
197
+ diversity_penalty:
198
+ desc: null
199
+ value: 0.0
200
+ do_eval:
201
+ desc: null
202
+ value: true
203
+ do_predict:
204
+ desc: null
205
+ value: false
206
+ do_sample:
207
+ desc: null
208
+ value: false
209
+ do_train:
210
+ desc: null
211
+ value: true
212
+ early_stopping:
213
+ desc: null
214
+ value: false
215
+ encoder:
216
+ desc: null
217
+ value:
218
+ _name_or_path: facebook/wav2vec2-large-lv60
219
+ activation_dropout: 0.1
220
+ adapter_kernel_size: 3
221
+ adapter_stride: 2
222
+ add_adapter: false
223
+ add_cross_attention: false
224
+ apply_spec_augment: true
225
+ architectures:
226
+ - Wav2Vec2ForPreTraining
227
+ attention_dropout: 0.1
228
+ bad_words_ids: null
229
+ bos_token_id: 1
230
+ chunk_size_feed_forward: 0
231
+ classifier_proj_size: 256
232
+ codevector_dim: 768
233
+ contrastive_logits_temperature: 0.1
234
+ conv_bias: true
235
+ conv_dim:
236
+ - 512
237
+ - 512
238
+ - 512
239
+ - 512
240
+ - 512
241
+ - 512
242
+ - 512
243
+ conv_kernel:
244
+ - 10
245
+ - 3
246
+ - 3
247
+ - 3
248
+ - 3
249
+ - 2
250
+ - 2
251
+ conv_stride:
252
+ - 5
253
+ - 2
254
+ - 2
255
+ - 2
256
+ - 2
257
+ - 2
258
+ - 2
259
+ cross_attention_hidden_size: null
260
+ ctc_loss_reduction: sum
261
+ ctc_zero_infinity: false
262
+ decoder_start_token_id: null
263
+ diversity_loss_weight: 0.1
264
+ diversity_penalty: 0.0
265
+ do_sample: false
266
+ do_stable_layer_norm: true
267
+ early_stopping: false
268
+ encoder_no_repeat_ngram_size: 0
269
+ eos_token_id: 2
270
+ feat_extract_activation: gelu
271
+ feat_extract_dropout: 0.0
272
+ feat_extract_norm: layer
273
+ feat_proj_dropout: 0.0
274
+ feat_quantizer_dropout: 0.0
275
+ final_dropout: 0.0
276
+ finetuning_task: null
277
+ forced_bos_token_id: null
278
+ forced_eos_token_id: null
279
+ gradient_checkpointing: false
280
+ hidden_act: gelu
281
+ hidden_dropout: 0.1
282
+ hidden_dropout_prob: 0.1
283
+ hidden_size: 1024
284
+ id2label:
285
+ '0': LABEL_0
286
+ '1': LABEL_1
287
+ initializer_range: 0.02
288
+ intermediate_size: 4096
289
+ is_decoder: false
290
+ is_encoder_decoder: false
291
+ label2id:
292
+ LABEL_0: 0
293
+ LABEL_1: 1
294
+ layer_norm_eps: 1.0e-05
295
+ layerdrop: 0.0
296
+ length_penalty: 1.0
297
+ mask_feature_length: 10
298
+ mask_feature_min_masks: 0
299
+ mask_feature_prob: 0.0
300
+ mask_time_length: 10
301
+ mask_time_min_masks: 2
302
+ mask_time_prob: 0.1
303
+ max_length: 20
304
+ min_length: 0
305
+ model_type: wav2vec2
306
+ no_repeat_ngram_size: 0
307
+ num_adapter_layers: 3
308
+ num_attention_heads: 16
309
+ num_beam_groups: 1
310
+ num_beams: 1
311
+ num_codevector_groups: 2
312
+ num_codevectors_per_group: 320
313
+ num_conv_pos_embedding_groups: 16
314
+ num_conv_pos_embeddings: 128
315
+ num_feat_extract_layers: 7
316
+ num_hidden_layers: 24
317
+ num_negatives: 100
318
+ num_return_sequences: 1
319
+ output_attentions: false
320
+ output_hidden_size: 1024
321
+ output_hidden_states: false
322
+ output_scores: false
323
+ pad_token_id: 0
324
+ prefix: null
325
+ problem_type: null
326
+ proj_codevector_dim: 768
327
+ pruned_heads: {}
328
+ remove_invalid_values: false
329
+ repetition_penalty: 1.0
330
+ return_dict: true
331
+ return_dict_in_generate: false
332
+ sep_token_id: null
333
+ task_specific_params: null
334
+ tdnn_dilation:
335
+ - 1
336
+ - 2
337
+ - 3
338
+ - 1
339
+ - 1
340
+ tdnn_dim:
341
+ - 512
342
+ - 512
343
+ - 512
344
+ - 512
345
+ - 1500
346
+ tdnn_kernel:
347
+ - 5
348
+ - 3
349
+ - 3
350
+ - 1
351
+ - 1
352
+ temperature: 1.0
353
+ tie_encoder_decoder: false
354
+ tie_word_embeddings: true
355
+ tokenizer_class: null
356
+ top_k: 50
357
+ top_p: 1.0
358
+ torch_dtype: null
359
+ torchscript: false
360
+ transformers_version: 4.17.0.dev0
361
+ use_bfloat16: false
362
+ use_weighted_layer_sum: false
363
+ vocab_size: 32
364
+ xvector_output_dim: 512
365
+ encoder_no_repeat_ngram_size:
366
+ desc: null
367
+ value: 0
368
+ eos_token_id:
369
+ desc: null
370
+ value: 2
371
+ eval_accumulation_steps:
372
+ desc: null
373
+ value: None
374
+ eval_batch_size:
375
+ desc: null
376
+ value: 8
377
+ eval_steps:
378
+ desc: null
379
+ value: 1500
380
+ evaluation_strategy:
381
+ desc: null
382
+ value: steps
383
+ finetuning_task:
384
+ desc: null
385
+ value: null
386
+ forced_bos_token_id:
387
+ desc: null
388
+ value: null
389
+ forced_eos_token_id:
390
+ desc: null
391
+ value: null
392
+ fp16:
393
+ desc: null
394
+ value: true
395
+ fp16_backend:
396
+ desc: null
397
+ value: auto
398
+ fp16_full_eval:
399
+ desc: null
400
+ value: false
401
+ fp16_opt_level:
402
+ desc: null
403
+ value: O1
404
+ generation_max_length:
405
+ desc: null
406
+ value: 40
407
+ generation_num_beams:
408
+ desc: null
409
+ value: 1
410
+ gradient_accumulation_steps:
411
+ desc: null
412
+ value: 4
413
+ gradient_checkpointing:
414
+ desc: null
415
+ value: true
416
+ greater_is_better:
417
+ desc: null
418
+ value: None
419
+ group_by_length:
420
+ desc: null
421
+ value: true
422
+ half_precision_backend:
423
+ desc: null
424
+ value: amp
425
+ hub_model_id:
426
+ desc: null
427
+ value: None
428
+ hub_strategy:
429
+ desc: null
430
+ value: every_save
431
+ hub_token:
432
+ desc: null
433
+ value: <HUB_TOKEN>
434
+ id2label:
435
+ desc: null
436
+ value:
437
+ '0': LABEL_0
438
+ '1': LABEL_1
439
+ ignore_data_skip:
440
+ desc: null
441
+ value: false
442
+ is_decoder:
443
+ desc: null
444
+ value: false
445
+ is_encoder_decoder:
446
+ desc: null
447
+ value: true
448
+ label2id:
449
+ desc: null
450
+ value:
451
+ LABEL_0: 0
452
+ LABEL_1: 1
453
+ label_names:
454
+ desc: null
455
+ value: None
456
+ label_smoothing_factor:
457
+ desc: null
458
+ value: 0.0
459
+ learning_rate:
460
+ desc: null
461
+ value: 5.0e-06
462
+ length_column_name:
463
+ desc: null
464
+ value: input_length
465
+ length_penalty:
466
+ desc: null
467
+ value: 1.0
468
+ load_best_model_at_end:
469
+ desc: null
470
+ value: false
471
+ local_rank:
472
+ desc: null
473
+ value: -1
474
+ log_level:
475
+ desc: null
476
+ value: -1
477
+ log_level_replica:
478
+ desc: null
479
+ value: -1
480
+ log_on_each_node:
481
+ desc: null
482
+ value: true
483
+ logging_dir:
484
+ desc: null
485
+ value: ./runs/Mar22_09-43-31_sanchit--v100
486
+ logging_first_step:
487
+ desc: null
488
+ value: false
489
+ logging_nan_inf_filter:
490
+ desc: null
491
+ value: true
492
+ logging_steps:
493
+ desc: null
494
+ value: 1
495
+ logging_strategy:
496
+ desc: null
497
+ value: steps
498
+ lr_scheduler_type:
499
+ desc: null
500
+ value: linear
501
+ max_grad_norm:
502
+ desc: null
503
+ value: 1.0
504
+ max_length:
505
+ desc: null
506
+ value: 50
507
+ max_steps:
508
+ desc: null
509
+ value: -1
510
+ metric_for_best_model:
511
+ desc: null
512
+ value: None
513
+ min_length:
514
+ desc: null
515
+ value: 0
516
+ model_type:
517
+ desc: null
518
+ value: speech-encoder-decoder
519
+ mp_parameters:
520
+ desc: null
521
+ value: ''
522
+ no_cuda:
523
+ desc: null
524
+ value: false
525
+ no_repeat_ngram_size:
526
+ desc: null
527
+ value: 0
528
+ num_beam_groups:
529
+ desc: null
530
+ value: 1
531
+ num_beams:
532
+ desc: null
533
+ value: 1
534
+ num_return_sequences:
535
+ desc: null
536
+ value: 1
537
+ num_train_epochs:
538
+ desc: null
539
+ value: 10.0
540
+ optim:
541
+ desc: null
542
+ value: adamw_hf
543
+ output_attentions:
544
+ desc: null
545
+ value: false
546
+ output_dir:
547
+ desc: null
548
+ value: ./
549
+ output_hidden_states:
550
+ desc: null
551
+ value: false
552
+ output_scores:
553
+ desc: null
554
+ value: false
555
+ overwrite_output_dir:
556
+ desc: null
557
+ value: false
558
+ pad_token_id:
559
+ desc: null
560
+ value: 1
561
+ past_index:
562
+ desc: null
563
+ value: -1
564
+ per_device_eval_batch_size:
565
+ desc: null
566
+ value: 8
567
+ per_device_train_batch_size:
568
+ desc: null
569
+ value: 8
570
+ per_gpu_eval_batch_size:
571
+ desc: null
572
+ value: None
573
+ per_gpu_train_batch_size:
574
+ desc: null
575
+ value: None
576
+ predict_with_generate:
577
+ desc: null
578
+ value: true
579
+ prediction_loss_only:
580
+ desc: null
581
+ value: false
582
+ prefix:
583
+ desc: null
584
+ value: null
585
+ problem_type:
586
+ desc: null
587
+ value: null
588
+ processor_class:
589
+ desc: null
590
+ value: Wav2Vec2Processor
591
+ pruned_heads:
592
+ desc: null
593
+ value: {}
594
+ push_to_hub:
595
+ desc: null
596
+ value: true
597
+ push_to_hub_model_id:
598
+ desc: null
599
+ value: None
600
+ push_to_hub_organization:
601
+ desc: null
602
+ value: None
603
+ push_to_hub_token:
604
+ desc: null
605
+ value: <PUSH_TO_HUB_TOKEN>
606
+ remove_invalid_values:
607
+ desc: null
608
+ value: false
609
+ remove_unused_columns:
610
+ desc: null
611
+ value: true
612
+ repetition_penalty:
613
+ desc: null
614
+ value: 1.0
615
+ report_to:
616
+ desc: null
617
+ value: '[''tensorboard'', ''wandb'']'
618
+ resume_from_checkpoint:
619
+ desc: null
620
+ value: None
621
+ return_dict:
622
+ desc: null
623
+ value: true
624
+ return_dict_in_generate:
625
+ desc: null
626
+ value: false
627
+ run_name:
628
+ desc: null
629
+ value: ./
630
+ save_on_each_node:
631
+ desc: null
632
+ value: false
633
+ save_steps:
634
+ desc: null
635
+ value: 1500
636
+ save_strategy:
637
+ desc: null
638
+ value: steps
639
+ save_total_limit:
640
+ desc: null
641
+ value: 1
642
+ seed:
643
+ desc: null
644
+ value: 42
645
+ sep_token_id:
646
+ desc: null
647
+ value: null
648
+ sharded_ddp:
649
+ desc: null
650
+ value: '[]'
651
+ skip_memory_metrics:
652
+ desc: null
653
+ value: true
654
+ sortish_sampler:
655
+ desc: null
656
+ value: false
657
+ task_specific_params:
658
+ desc: null
659
+ value: null
660
+ temperature:
661
+ desc: null
662
+ value: 1.0
663
+ tf32:
664
+ desc: null
665
+ value: None
666
+ tie_encoder_decoder:
667
+ desc: null
668
+ value: false
669
+ tie_word_embeddings:
670
+ desc: null
671
+ value: false
672
+ tokenizer_class:
673
+ desc: null
674
+ value: null
675
+ top_k:
676
+ desc: null
677
+ value: 50
678
+ top_p:
679
+ desc: null
680
+ value: 1.0
681
+ torch_dtype:
682
+ desc: null
683
+ value: torch.float32
684
+ torchscript:
685
+ desc: null
686
+ value: false
687
+ tpu_metrics_debug:
688
+ desc: null
689
+ value: false
690
+ tpu_num_cores:
691
+ desc: null
692
+ value: None
693
+ train_batch_size:
694
+ desc: null
695
+ value: 8
696
+ transformers_version:
697
+ desc: null
698
+ value: null
699
+ use_bfloat16:
700
+ desc: null
701
+ value: false
702
+ use_cache:
703
+ desc: null
704
+ value: false
705
+ use_legacy_prediction_loop:
706
+ desc: null
707
+ value: false
708
+ warmup_ratio:
709
+ desc: null
710
+ value: 0.0
711
+ warmup_steps:
712
+ desc: null
713
+ value: 1
714
+ weight_decay:
715
+ desc: null
716
+ value: 0.0
717
+ xpu_backend:
718
+ desc: null
719
+ value: None
wandb/run-20220322_094406-dk50d3c6/files/output.log ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 0%| | 0/8920 [00:00<?, ?it/s]
3
+ {'train_runtime': 18.8534, 'train_samples_per_second': 15136.821, 'train_steps_per_second': 473.125, 'train_loss': 0.0, 'epoch': 18.5}
4
+ Training completed. Do not forget to share your model on huggingface.co/models =)
5
+ 0%| | 0/8920 [00:17<?, ?it/s]
6
+ Skipping the first batches: 0%| | 0/1776 [00:18<?, ?it/s]
7
+ [INFO|trainer.py:2114] 2022-03-22 09:44:24,681 >> Saving model checkpoint to ./
8
+ [INFO|configuration_utils.py:438] 2022-03-22 09:44:24,683 >> Configuration saved in ./config.json
9
+ [INFO|modeling_utils.py:1081] 2022-03-22 09:44:40,163 >> Model weights saved in ./pytorch_model.bin
10
+ [INFO|feature_extraction_utils.py:324] 2022-03-22 09:44:40,165 >> Configuration saved in ./preprocessor_config.json
11
+ [INFO|trainer.py:2114] 2022-03-22 09:44:40,166 >> Saving model checkpoint to ./
12
+ [INFO|configuration_utils.py:438] 2022-03-22 09:44:40,168 >> Configuration saved in ./config.json
13
+ Exception in thread NetStatThr:
14
+ Traceback (most recent call last):
15
+ File "/usr/lib/python3.9/threading.py", line 954, in _bootstrap_inner
16
+ self.run()
17
+ File "/usr/lib/python3.9/threading.py", line 892, in run
18
+ self._target(*self._args, **self._kwargs)
19
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/wandb_run.py", line 148, in check_network_status
20
+ status_response = self._interface.communicate_network_status()
21
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/interface/interface.py", line 125, in communicate_network_status
22
+ resp = self._communicate_network_status(status)
23
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/interface/interface_shared.py", line 388, in _communicate_network_status
24
+ resp = self._communicate(req, local=True)
25
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/interface/interface_shared.py", line 213, in _communicate
26
+ return self._communicate_async(rec, local=local).get(timeout=timeout)
27
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/interface/interface_shared.py", line 218, in _communicate_async
28
+ raise Exception("The wandb backend process has shutdown")
29
+ Exception: The wandb backend process has shutdown
30
+ Traceback (most recent call last):
31
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/torch/serialization.py", line 379, in save
32
+ _save(obj, opened_zipfile, pickle_module, pickle_protocol)
33
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/torch/serialization.py", line 499, in _save
34
+ zip_file.write_record(name, storage.data_ptr(), num_bytes)
35
+ KeyboardInterrupt
36
+ During handling of the above exception, another exception occurred:
37
+ Traceback (most recent call last):
38
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/torch/serialization.py", line 380, in save
39
+ return
40
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/torch/serialization.py", line 259, in __exit__
41
+ self.file_like.write_end_of_file()
42
+ RuntimeError: [enforce fail at inline_container.cc:300] . unexpected pos 1962672704 vs 1962672592
43
+ During handling of the above exception, another exception occurred:
44
+ Traceback (most recent call last):
45
+ File "/home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/run_speech_recognition_seq2seq.py", line 539, in <module>
46
+ main()
47
+ File "/home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/run_speech_recognition_seq2seq.py", line 492, in main
48
+ trainer.save_model() # Saves the feature extractor too for easy upload
49
+ File "/home/sanchit_huggingface_co/transformers/src/transformers/trainer.py", line 2080, in save_model
50
+ self.push_to_hub(commit_message="Model save")
51
+ File "/home/sanchit_huggingface_co/transformers/src/transformers/trainer.py", line 2812, in push_to_hub
52
+ self.save_model(_internal_call=True)
53
+ File "/home/sanchit_huggingface_co/transformers/src/transformers/trainer.py", line 2076, in save_model
54
+ self._save(output_dir)
55
+ File "/home/sanchit_huggingface_co/transformers/src/transformers/trainer.py", line 2128, in _save
56
+ self.model.save_pretrained(output_dir, state_dict=state_dict)
57
+ File "/home/sanchit_huggingface_co/transformers/src/transformers/modeling_utils.py", line 1079, in save_pretrained
58
+ save_function(state_dict, output_model_file)
59
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/torch/serialization.py", line 381, in save
60
+ _legacy_save(obj, opened_file, pickle_module, pickle_protocol)
61
+ File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/torch/serialization.py", line 214, in __exit__
62
+ self.file_like.close()
63
+ KeyboardInterrupt
wandb/run-20220322_094406-dk50d3c6/files/requirements.txt ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==1.0.0
2
+ aiohttp==3.8.1
3
+ aiosignal==1.2.0
4
+ anyio==3.5.0
5
+ appdirs==1.4.4
6
+ argon2-cffi-bindings==21.2.0
7
+ argon2-cffi==21.3.0
8
+ asttokens==2.0.5
9
+ async-timeout==4.0.2
10
+ attrs==21.4.0
11
+ audioread==2.1.9
12
+ babel==2.9.1
13
+ backcall==0.2.0
14
+ bitsandbytes-cuda113==0.26.0
15
+ black==22.1.0
16
+ bleach==4.1.0
17
+ cachetools==5.0.0
18
+ certifi==2021.10.8
19
+ cffi==1.15.0
20
+ charset-normalizer==2.0.11
21
+ chex==0.1.0
22
+ click==8.0.3
23
+ clldutils==3.10.1
24
+ colorlog==6.6.0
25
+ csvw==1.11.0
26
+ cycler==0.11.0
27
+ datasets==1.18.3
28
+ debugpy==1.5.1
29
+ decorator==5.1.1
30
+ defusedxml==0.7.1
31
+ dill==0.3.4
32
+ dlinfo==1.2.1
33
+ dm-tree==0.1.6
34
+ docker-pycreds==0.4.0
35
+ entrypoints==0.4
36
+ executing==0.8.2
37
+ filelock==3.4.2
38
+ flatbuffers==2.0
39
+ flax==0.4.0
40
+ fonttools==4.29.1
41
+ frozenlist==1.3.0
42
+ fsspec==2022.1.0
43
+ gitdb==4.0.9
44
+ gitpython==3.1.27
45
+ google-auth-oauthlib==0.4.6
46
+ google-auth==2.6.0
47
+ grpcio==1.43.0
48
+ huggingface-hub==0.4.0
49
+ hypothesis==6.36.1
50
+ idna==3.3
51
+ importlib-metadata==4.10.1
52
+ ipdb==0.13.9
53
+ ipykernel==6.8.0
54
+ ipython-genutils==0.2.0
55
+ ipython==8.0.1
56
+ ipywidgets==7.6.5
57
+ isodate==0.6.1
58
+ jax==0.2.28
59
+ jaxlib==0.1.76+cuda11.cudnn82
60
+ jedi==0.18.1
61
+ jinja2==3.0.3
62
+ jiwer==2.3.0
63
+ joblib==1.1.0
64
+ json5==0.9.6
65
+ jsonschema==4.4.0
66
+ jupyter-client==7.1.2
67
+ jupyter-console==6.4.0
68
+ jupyter-core==4.9.1
69
+ jupyter-server==1.13.5
70
+ jupyter==1.0.0
71
+ jupyterlab-pygments==0.1.2
72
+ jupyterlab-server==2.10.3
73
+ jupyterlab-widgets==1.0.2
74
+ jupyterlab==3.2.9
75
+ kiwisolver==1.3.2
76
+ librosa==0.8.1
77
+ llvmlite==0.38.0
78
+ markdown==3.3.6
79
+ markupsafe==2.0.1
80
+ matplotlib-inline==0.1.3
81
+ matplotlib==3.5.1
82
+ mistune==0.8.4
83
+ msgpack==1.0.3
84
+ multidict==6.0.2
85
+ multiprocess==0.70.12.2
86
+ mypy-extensions==0.4.3
87
+ nbclassic==0.3.5
88
+ nbclient==0.5.10
89
+ nbconvert==6.4.1
90
+ nbformat==5.1.3
91
+ nest-asyncio==1.5.4
92
+ notebook==6.4.8
93
+ numba==0.55.1
94
+ numpy==1.21.5
95
+ oauthlib==3.2.0
96
+ opt-einsum==3.3.0
97
+ optax==0.1.0
98
+ packaging==21.3
99
+ pandas==1.4.0
100
+ pandocfilters==1.5.0
101
+ parso==0.8.3
102
+ pathspec==0.9.0
103
+ pathtools==0.1.2
104
+ pexpect==4.8.0
105
+ phonemizer==3.0.1
106
+ pickleshare==0.7.5
107
+ pillow==9.0.0
108
+ pip==22.0.2
109
+ pkg-resources==0.0.0
110
+ platformdirs==2.4.1
111
+ pooch==1.6.0
112
+ prometheus-client==0.13.1
113
+ promise==2.3
114
+ prompt-toolkit==3.0.26
115
+ protobuf==3.19.4
116
+ psutil==5.9.0
117
+ ptyprocess==0.7.0
118
+ pure-eval==0.2.2
119
+ pyarrow==6.0.1
120
+ pyasn1-modules==0.2.8
121
+ pyasn1==0.4.8
122
+ pycparser==2.21
123
+ pyctcdecode==0.3.0
124
+ pygments==2.11.2
125
+ pygtrie==2.4.2
126
+ pyparsing==3.0.7
127
+ pyrsistent==0.18.1
128
+ python-dateutil==2.8.2
129
+ python-levenshtein==0.12.2
130
+ pytz==2021.3
131
+ pyyaml==6.0
132
+ pyzmq==22.3.0
133
+ qtconsole==5.2.2
134
+ qtpy==2.0.1
135
+ regex==2022.1.18
136
+ requests-oauthlib==1.3.1
137
+ requests==2.27.1
138
+ resampy==0.2.2
139
+ rfc3986==2.0.0
140
+ rsa==4.8
141
+ sacremoses==0.0.47
142
+ scikit-learn==1.0.2
143
+ scipy==1.7.3
144
+ segments==2.2.0
145
+ send2trash==1.8.0
146
+ sentry-sdk==1.5.6
147
+ setuptools==44.1.1
148
+ shortuuid==1.0.8
149
+ six==1.16.0
150
+ smmap==5.0.0
151
+ sniffio==1.2.0
152
+ sortedcontainers==2.4.0
153
+ soundfile==0.10.3.post1
154
+ stack-data==0.1.4
155
+ tabulate==0.8.9
156
+ tensorboard-data-server==0.6.1
157
+ tensorboard-plugin-wit==1.8.1
158
+ tensorboard==2.8.0
159
+ termcolor==1.1.0
160
+ terminado==0.13.1
161
+ testpath==0.5.0
162
+ threadpoolctl==3.1.0
163
+ tokenizers==0.11.4
164
+ toml==0.10.2
165
+ tomli==2.0.0
166
+ toolz==0.11.2
167
+ torch==1.10.2+cu113
168
+ torchaudio==0.10.2+cu113
169
+ tornado==6.1
170
+ tqdm==4.62.3
171
+ traitlets==5.1.1
172
+ transformers==4.17.0.dev0
173
+ typing-extensions==3.10.0.2
174
+ uritemplate==4.1.1
175
+ urllib3==1.26.8
176
+ wandb==0.12.10
177
+ wcwidth==0.2.5
178
+ webencodings==0.5.1
179
+ websocket-client==1.2.3
180
+ werkzeug==2.0.2
181
+ wheel==0.37.1
182
+ widgetsnbextension==3.5.2
183
+ xxhash==2.0.2
184
+ yarl==1.7.2
185
+ yaspin==2.1.0
186
+ zipp==3.7.0
wandb/run-20220322_094406-dk50d3c6/files/wandb-metadata.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.11.0-1028-gcp-x86_64-with-glibc2.33",
3
+ "python": "3.9.5",
4
+ "heartbeatAt": "2022-03-22T09:44:07.352031",
5
+ "startedAt": "2022-03-22T09:44:06.223354",
6
+ "docker": null,
7
+ "gpu": "Tesla V100-SXM2-16GB",
8
+ "gpu_count": 2,
9
+ "cpu_count": 16,
10
+ "cuda": null,
11
+ "args": [
12
+ "--dataset_name=librispeech_asr",
13
+ "--model_name_or_path=./",
14
+ "--dataset_config_name=clean",
15
+ "--train_split_name=train.100",
16
+ "--eval_split_name=validation",
17
+ "--output_dir=./",
18
+ "--preprocessing_num_workers=1",
19
+ "--length_column_name=input_length",
20
+ "--num_train_epochs=10",
21
+ "--per_device_train_batch_size=8",
22
+ "--per_device_eval_batch_size=8",
23
+ "--gradient_accumulation_steps=4",
24
+ "--generation_max_length=40",
25
+ "--generation_num_beams=1",
26
+ "--learning_rate=5e-6",
27
+ "--warmup_steps=1",
28
+ "--evaluation_strategy=steps",
29
+ "--text_column_name=text",
30
+ "--save_steps=1500",
31
+ "--eval_steps=1500",
32
+ "--logging_steps=1",
33
+ "--save_total_limit=1",
34
+ "--freeze_feature_encoder",
35
+ "--gradient_checkpointing",
36
+ "--fp16",
37
+ "--group_by_length",
38
+ "--predict_with_generate",
39
+ "--do_lower_case",
40
+ "--do_eval",
41
+ "--do_train",
42
+ "--push_to_hub",
43
+ "--use_auth_token"
44
+ ],
45
+ "state": "running",
46
+ "program": "/home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/run_speech_recognition_seq2seq.py",
47
+ "codePath": "run_speech_recognition_seq2seq.py",
48
+ "git": {
49
+ "remote": "https://huggingface.co/sanchit-gandhi/wav2vec2-2-roberta-no-adapter-regularisation",
50
+ "commit": "b1adb5d25491507607d3af93c76df3608caf1fb0"
51
+ },
52
+ "email": "sanchit@huggingface.co",
53
+ "root": "/home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation",
54
+ "host": "sanchit--v100",
55
+ "username": "sanchit_huggingface_co",
56
+ "executable": "/home/sanchit_huggingface_co/gcp/bin/python"
57
+ }
wandb/run-20220322_094406-dk50d3c6/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"train/train_runtime": 18.8534, "train/train_samples_per_second": 15136.821, "train/train_steps_per_second": 473.125, "train/total_flos": 0.0, "train/train_loss": 0.0, "train/epoch": 18.5, "train/global_step": 16500, "_runtime": 18, "_timestamp": 1647942264, "_step": 0}
wandb/run-20220322_094406-dk50d3c6/logs/debug-internal.log ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-03-22 09:44:07,143 INFO MainThread:28952 [internal.py:wandb_internal():89] W&B internal server running at pid: 28952, started at: 2022-03-22 09:44:07.142893
2
+ 2022-03-22 09:44:07,145 DEBUG HandlerThread:28952 [handler.py:handle_request():131] handle_request: check_version
3
+ 2022-03-22 09:44:07,145 INFO WriterThread:28952 [datastore.py:open_for_write():77] open: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/run-dk50d3c6.wandb
4
+ 2022-03-22 09:44:07,146 DEBUG SenderThread:28952 [sender.py:send():235] send: header
5
+ 2022-03-22 09:44:07,147 DEBUG SenderThread:28952 [sender.py:send_request():249] send_request: check_version
6
+ 2022-03-22 09:44:07,214 DEBUG SenderThread:28952 [sender.py:send():235] send: run
7
+ 2022-03-22 09:44:07,345 INFO SenderThread:28952 [dir_watcher.py:__init__():169] watching files in: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files
8
+ 2022-03-22 09:44:07,345 INFO SenderThread:28952 [sender.py:_start_run_threads():809] run started: dk50d3c6 with start time 1647942246
9
+ 2022-03-22 09:44:07,345 DEBUG SenderThread:28952 [sender.py:send():235] send: summary
10
+ 2022-03-22 09:44:07,345 INFO SenderThread:28952 [sender.py:_save_file():944] saving file wandb-summary.json with policy end
11
+ 2022-03-22 09:44:07,345 DEBUG HandlerThread:28952 [handler.py:handle_request():131] handle_request: run_start
12
+ 2022-03-22 09:44:07,351 DEBUG HandlerThread:28952 [meta.py:__init__():36] meta init
13
+ 2022-03-22 09:44:07,351 DEBUG HandlerThread:28952 [meta.py:__init__():50] meta init done
14
+ 2022-03-22 09:44:07,352 DEBUG HandlerThread:28952 [meta.py:probe():210] probe
15
+ 2022-03-22 09:44:07,358 DEBUG HandlerThread:28952 [meta.py:_setup_git():200] setup git
16
+ 2022-03-22 09:44:07,371 DEBUG HandlerThread:28952 [meta.py:_setup_git():207] setup git done
17
+ 2022-03-22 09:44:07,371 DEBUG HandlerThread:28952 [meta.py:_save_pip():54] save pip
18
+ 2022-03-22 09:44:07,372 DEBUG HandlerThread:28952 [meta.py:_save_pip():68] save pip done
19
+ 2022-03-22 09:44:07,372 DEBUG HandlerThread:28952 [meta.py:probe():248] probe done
20
+ 2022-03-22 09:44:07,446 DEBUG SenderThread:28952 [sender.py:send():235] send: files
21
+ 2022-03-22 09:44:07,447 INFO SenderThread:28952 [sender.py:_save_file():944] saving file wandb-metadata.json with policy now
22
+ 2022-03-22 09:44:07,451 DEBUG HandlerThread:28952 [handler.py:handle_request():131] handle_request: stop_status
23
+ 2022-03-22 09:44:07,452 DEBUG SenderThread:28952 [sender.py:send_request():249] send_request: stop_status
24
+ 2022-03-22 09:44:07,495 DEBUG SenderThread:28952 [sender.py:send():235] send: config
25
+ 2022-03-22 09:44:07,496 DEBUG SenderThread:28952 [sender.py:send():235] send: metric
26
+ 2022-03-22 09:44:07,496 DEBUG SenderThread:28952 [sender.py:send():235] send: metric
27
+ 2022-03-22 09:44:07,496 WARNING SenderThread:28952 [sender.py:send_metric():902] Seen metric with glob (shouldnt happen)
28
+ 2022-03-22 09:44:07,757 INFO Thread-11 :28952 [upload_job.py:push():137] Uploaded file /tmp/tmphh6di6ecwandb/3layvpmg-wandb-metadata.json
29
+ 2022-03-22 09:44:08,347 INFO Thread-8 :28952 [dir_watcher.py:_on_file_created():217] file/dir created: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files/requirements.txt
30
+ 2022-03-22 09:44:08,347 INFO Thread-8 :28952 [dir_watcher.py:_on_file_created():217] file/dir created: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files/wandb-metadata.json
31
+ 2022-03-22 09:44:08,347 INFO Thread-8 :28952 [dir_watcher.py:_on_file_created():217] file/dir created: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files/output.log
32
+ 2022-03-22 09:44:08,347 INFO Thread-8 :28952 [dir_watcher.py:_on_file_created():217] file/dir created: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files/wandb-summary.json
33
+ 2022-03-22 09:44:10,346 INFO Thread-8 :28952 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files/output.log
34
+ 2022-03-22 09:44:22,703 DEBUG HandlerThread:28952 [handler.py:handle_request():131] handle_request: stop_status
35
+ 2022-03-22 09:44:22,703 DEBUG SenderThread:28952 [sender.py:send_request():249] send_request: stop_status
36
+ 2022-03-22 09:44:24,681 DEBUG SenderThread:28952 [sender.py:send():235] send: metric
37
+ 2022-03-22 09:44:24,681 DEBUG SenderThread:28952 [sender.py:send():235] send: metric
38
+ 2022-03-22 09:44:24,681 DEBUG SenderThread:28952 [sender.py:send():235] send: metric
39
+ 2022-03-22 09:44:24,681 DEBUG SenderThread:28952 [sender.py:send():235] send: metric
40
+ 2022-03-22 09:44:24,682 DEBUG SenderThread:28952 [sender.py:send():235] send: metric
41
+ 2022-03-22 09:44:24,682 DEBUG SenderThread:28952 [sender.py:send():235] send: metric
42
+ 2022-03-22 09:44:24,682 DEBUG SenderThread:28952 [sender.py:send():235] send: history
43
+ 2022-03-22 09:44:24,682 DEBUG SenderThread:28952 [sender.py:send():235] send: summary
44
+ 2022-03-22 09:44:24,684 INFO SenderThread:28952 [sender.py:_save_file():944] saving file wandb-summary.json with policy end
45
+ 2022-03-22 09:44:25,351 INFO Thread-8 :28952 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files/wandb-summary.json
46
+ 2022-03-22 09:44:26,351 INFO Thread-8 :28952 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files/output.log
47
+ 2022-03-22 09:44:35,816 DEBUG SenderThread:28952 [sender.py:send():235] send: stats
48
+ 2022-03-22 09:44:37,748 DEBUG HandlerThread:28952 [handler.py:handle_request():131] handle_request: stop_status
49
+ 2022-03-22 09:44:37,748 DEBUG SenderThread:28952 [sender.py:send_request():249] send_request: stop_status
50
+ 2022-03-22 09:44:38,355 INFO Thread-8 :28952 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files/config.yaml
51
+ 2022-03-22 09:44:42,356 INFO Thread-8 :28952 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/files/output.log
52
+ 2022-03-22 09:44:43,532 WARNING MainThread:28952 [internal.py:wandb_internal():152] Internal process interrupt: 1
53
+ 2022-03-22 09:44:43,701 WARNING MainThread:28952 [internal.py:wandb_internal():152] Internal process interrupt: 2
54
+ 2022-03-22 09:44:43,702 ERROR MainThread:28952 [internal.py:wandb_internal():155] Internal process interrupted.
55
+ 2022-03-22 09:44:44,051 INFO MainThread:28952 [internal.py:handle_exit():79] Internal process exited
wandb/run-20220322_094406-dk50d3c6/logs/debug.log ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [wandb_setup.py:_flush():75] Loading settings from /home/sanchit_huggingface_co/.config/wandb/settings
2
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [wandb_setup.py:_flush():75] Loading settings from /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/settings
3
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [wandb_setup.py:_flush():75] Loading settings from environment variables: {}
4
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [wandb_setup.py:_flush():75] Inferring run settings from compute environment: {'program_relpath': 'run_speech_recognition_seq2seq.py', 'program': '/home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/run_speech_recognition_seq2seq.py'}
5
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [wandb_init.py:_log_setup():386] Logging user logs to /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/logs/debug.log
6
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [wandb_init.py:_log_setup():387] Logging internal logs to /home/sanchit_huggingface_co/wav2vec2-2-roberta-no-adapter-regularisation/wandb/run-20220322_094406-dk50d3c6/logs/debug-internal.log
7
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [wandb_init.py:init():420] calling init triggers
8
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [wandb_init.py:init():425] wandb.init called with sweep_config: {}
9
+ config: {}
10
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [wandb_init.py:init():471] starting backend
11
+ 2022-03-22 09:44:06,225 INFO MainThread:28799 [backend.py:_multiprocessing_setup():99] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
12
+ 2022-03-22 09:44:06,280 INFO MainThread:28799 [backend.py:ensure_launched():219] starting backend process...
13
+ 2022-03-22 09:44:06,331 INFO MainThread:28799 [backend.py:ensure_launched():224] started backend process with pid: 28952
14
+ 2022-03-22 09:44:06,333 INFO MainThread:28799 [wandb_init.py:init():480] backend started and connected
15
+ 2022-03-22 09:44:06,344 INFO MainThread:28799 [wandb_init.py:init():550] updated telemetry
16
+ 2022-03-22 09:44:06,466 INFO MainThread:28799 [wandb_init.py:init():581] communicating current version
17
+ 2022-03-22 09:44:07,212 INFO MainThread:28799 [wandb_init.py:init():586] got version response upgrade_message: "wandb version 0.12.11 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
18
+
19
+ 2022-03-22 09:44:07,213 INFO MainThread:28799 [wandb_init.py:init():596] communicating run to backend with 30 second timeout
20
+ 2022-03-22 09:44:07,345 INFO MainThread:28799 [wandb_init.py:init():624] starting run threads in backend
21
+ 2022-03-22 09:44:07,451 INFO MainThread:28799 [wandb_run.py:_console_start():1827] atexit reg
22
+ 2022-03-22 09:44:07,451 INFO MainThread:28799 [wandb_run.py:_redirect():1701] redirect: SettingsConsole.REDIRECT
23
+ 2022-03-22 09:44:07,452 INFO MainThread:28799 [wandb_run.py:_redirect():1706] Redirecting console.
24
+ 2022-03-22 09:44:07,453 INFO MainThread:28799 [wandb_run.py:_redirect():1762] Redirects installed.
25
+ 2022-03-22 09:44:07,453 INFO MainThread:28799 [wandb_init.py:init():651] run started, returning control to user process
26
+ 2022-03-22 09:44:07,456 INFO MainThread:28799 [wandb_run.py:_config_callback():966] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'torch.float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 50, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['SpeechEncoderDecoderModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': 0, 'task_specific_params': None, 'problem_type': None, '_name_or_path': './', 'transformers_version': None, 'decoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': True, 'cross_attention_hidden_size': None, 'add_cross_attention': True, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['RobertaForMaskedLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'roberta-large', 'transformers_version': '4.17.0.dev0', 'vocab_size': 50265, 'hidden_size': 1024, 'num_hidden_layers': 24, 'num_attention_heads': 16, 'hidden_act': 'gelu', 'intermediate_size': 4096, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 514, 'type_vocab_size': 1, 'initializer_range': 0.02, 'layer_norm_eps': 1e-05, 'position_embedding_type': 'absolute', 'use_cache': False, 'classifier_dropout': None, 'model_type': 'roberta'}, 'encoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-large-lv60', 'transformers_version': '4.17.0.dev0', 'feat_extract_dropout': 0.0, 'gradient_checkpointing': False, 'hidden_dropout_prob': 0.1, 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.1, 'feat_proj_dropout': 0.0, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.1, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'model_type': 'wav2vec2'}, 'model_type': 'speech-encoder-decoder', 'processor_class': 'Wav2Vec2Processor', 'use_cache': False, 'output_dir': './', 'overwrite_output_dir': False, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 4, 'eval_accumulation_steps': 'None', 'learning_rate': 5e-06, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10.0, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 1, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Mar22_09-43-31_sanchit--v100', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 1500, 'save_total_limit': 1, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 1500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': False, 'metric_for_best_model': 'None', 'greater_is_better': 'None', 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['tensorboard', 'wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': 40, 'generation_num_beams': 1, 'train_batch_size': 8, 'eval_batch_size': 8}
27
+ 2022-03-22 09:44:07,459 INFO MainThread:28799 [wandb_watch.py:watch():43] Watching
28
+ 2022-03-22 09:44:50,125 INFO MainThread:28799 [wandb_run.py:_atexit_cleanup():1797] got exitcode: 255
29
+ 2022-03-22 09:44:50,127 INFO MainThread:28799 [wandb_run.py:_restore():1769] restore
30
+ 2022-03-22 09:44:52,599 INFO MainThread:28799 [wandb_run.py:_restore():1769] restore
wandb/run-20220322_094406-dk50d3c6/run-dk50d3c6.wandb ADDED
Binary file (9 kB). View file