sanchit-gandhi's picture
Saving weights and logs of epoch 1
8319971
raw
history blame
No virus
43.8 kB
05/07/2022 17:43:00 - INFO - __main__ - Training/evaluation parameters FlaxSeq2SeqTrainingArguments(
_n_gpu=-1,
adafactor=False,
adam_beta1=0.9,
adam_beta2=0.999,
adam_epsilon=1e-08,
bf16=False,
bf16_full_eval=False,
data_seed=None,
dataloader_drop_last=False,
dataloader_num_workers=0,
dataloader_pin_memory=True,
ddp_bucket_cap_mb=None,
ddp_find_unused_parameters=None,
debug=,
deepspeed=None,
disable_tqdm=None,
do_eval=True,
do_predict=True,
do_train=True,
eval_accumulation_steps=None,
eval_delay=0,
eval_steps=5,
evaluation_strategy=no,
final_generation_max_length=50,
final_generation_num_beams=2,
fp16=False,
fp16_backend=auto,
fp16_full_eval=False,
fp16_opt_level=O1,
generation_length_penalty=1,
generation_max_length=40,
generation_num_beams=1,
gradient_accumulation_steps=1,
gradient_checkpointing=False,
greater_is_better=None,
group_by_length=False,
half_precision_backend=auto,
hub_model_id=None,
hub_strategy=every_save,
hub_token=<HUB_TOKEN>,
ignore_data_skip=False,
label_names=None,
label_smoothing_factor=0.0,
learning_rate=0.0003,
length_column_name=input_length,
load_best_model_at_end=False,
local_rank=-1,
log_level=passive,
log_level_replica=passive,
log_on_each_node=True,
logging_dir=None,
logging_first_step=False,
logging_nan_inf_filter=True,
logging_steps=1,
logging_strategy=steps,
lr_scheduler_type=linear,
matmul_precision=default,
max_grad_norm=1.0,
max_steps=15,
metric_for_best_model=None,
mp_parameters=,
no_cuda=False,
num_train_epochs=5.0,
optim=adamw_hf,
output_dir=./,
overwrite_output_dir=True,
past_index=-1,
per_device_eval_batch_size=2,
per_device_train_batch_size=4,
precision=full,
predict_with_generate=True,
prediction_loss_only=False,
push_to_hub=False,
push_to_hub_model_id=None,
push_to_hub_organization=None,
push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
remove_unused_columns=True,
report_to=None,
resume_from_checkpoint=None,
run_name=None,
save_on_each_node=False,
save_steps=10,
save_strategy=steps,
save_total_limit=1,
seed=42,
sharded_ddp=,
skip_memory_metrics=True,
sortish_sampler=False,
tf32=None,
tpu_metrics_debug=False,
tpu_num_cores=None,
use_legacy_prediction_loop=False,
warmup_ratio=0.0,
warmup_steps=500,
weight_decay=0.0,
xpu_backend=None,
)
05/07/2022 17:43:00 - INFO - __main__ - JAX devices: 8, matmul precision: default
05/07/2022 17:43:01 - WARNING - datasets.builder - Reusing dataset librispeech_asr (/home/sanchitgandhi/cache/huggingface/datasets/hf-internal-testing___librispeech_asr/clean/2.1.0/d3bc4c2bc2078fcde3ad0f0f635862e4c0fef78ba94c4a34c4c250a097af240b)
05/07/2022 17:43:01 - WARNING - datasets.builder - Reusing dataset librispeech_asr (/home/sanchitgandhi/cache/huggingface/datasets/hf-internal-testing___librispeech_asr/clean/2.1.0/d3bc4c2bc2078fcde3ad0f0f635862e4c0fef78ba94c4a34c4c250a097af240b)
loading configuration file ./config.json
You passed along `num_labels=3` with an incompatible id to label map: {'0': 'LABEL_0', '1': 'LABEL_1'}. The number of labels wil be overwritten to 2.
Model config SpeechEncoderDecoderConfig {
"_name_or_path": "./",
"architectures": [
"SpeechEncoderDecoderModel"
],
"decoder": {
"_name_or_path": "",
"activation_dropout": 0.0,
"activation_function": "gelu",
"add_cross_attention": true,
"architectures": null,
"attention_dropout": 0.1,
"bad_words_ids": null,
"bos_token_id": 0,
"chunk_size_feed_forward": 0,
"classifier_dropout": 0.0,
"cross_attention_hidden_size": null,
"d_model": 16,
"decoder_attention_heads": 4,
"decoder_ffn_dim": 4,
"decoder_layerdrop": 0.0,
"decoder_layers": 2,
"decoder_start_token_id": 2,
"diversity_penalty": 0.0,
"do_sample": false,
"dropout": 0.1,
"early_stopping": false,
"encoder_attention_heads": 4,
"encoder_ffn_dim": 4,
"encoder_layerdrop": 0.0,
"encoder_layers": 2,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": 2,
"fuse_matmuls": false,
"gradient_checkpointing": false,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"init_std": 0.02,
"is_decoder": true,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"length_penalty": 1.0,
"max_length": 20,
"max_position_embeddings": 100,
"min_length": 0,
"model_type": "bart",
"no_repeat_ngram_size": 0,
"num_beam_groups": 1,
"num_beams": 1,
"num_hidden_layers": 2,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 1,
"prefix": null,
"problem_type": null,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"scale_embedding": false,
"sep_token_id": null,
"task_specific_params": null,
"temperature": 1.0,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"transformers_version": "4.18.0.dev0",
"typical_p": 1.0,
"use_bfloat16": false,
"use_cache": true,
"use_scan": true,
"vocab_size": 1000
},
"decoder_start_token_id": 0,
"encoder": {
"_name_or_path": "",
"activation_dropout": 0.1,
"adapter_kernel_size": 3,
"adapter_stride": 2,
"add_adapter": true,
"add_cross_attention": false,
"apply_spec_augment": true,
"architectures": null,
"attention_dropout": 0.1,
"bad_words_ids": null,
"bos_token_id": 1,
"chunk_size_feed_forward": 0,
"classifier_proj_size": 256,
"codevector_dim": 256,
"contrastive_logits_temperature": 0.1,
"conv_bias": false,
"conv_dim": [
32,
32,
32
],
"conv_kernel": [
8,
8,
8
],
"conv_stride": [
4,
4,
4
],
"cross_attention_hidden_size": null,
"ctc_loss_reduction": "sum",
"ctc_zero_infinity": false,
"decoder_start_token_id": null,
"diversity_loss_weight": 0.1,
"diversity_penalty": 0.0,
"do_sample": false,
"do_stable_layer_norm": true,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"exponential_decay_length_penalty": null,
"feat_extract_activation": "gelu",
"feat_extract_dropout": 0.0,
"feat_extract_norm": "layer",
"feat_proj_dropout": 0.0,
"feat_quantizer_dropout": 0.0,
"final_dropout": 0.0,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"fuse_matmuls": false,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout": 0.1,
"hidden_dropout_prob": 0.1,
"hidden_size": 16,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_range": 0.02,
"intermediate_size": 20,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-05,
"layerdrop": 0.0,
"length_penalty": 1.0,
"mask_feature_length": 10,
"mask_feature_min_masks": 0,
"mask_feature_prob": 0.0,
"mask_time_length": 10,
"mask_time_min_masks": 2,
"mask_time_prob": 0.1,
"max_length": 20,
"min_length": 0,
"model_type": "wav2vec2",
"no_repeat_ngram_size": 0,
"num_adapter_layers": 3,
"num_attention_heads": 2,
"num_beam_groups": 1,
"num_beams": 1,
"num_codevector_groups": 2,
"num_codevectors_per_group": 320,
"num_conv_pos_embedding_groups": 2,
"num_conv_pos_embeddings": 16,
"num_feat_extract_layers": 3,
"num_hidden_layers": 4,
"num_negatives": 10,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_size": 16,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 0,
"prefix": null,
"problem_type": null,
"proj_codevector_dim": 256,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"task_specific_params": null,
"tdnn_dilation": [
1,
2,
3,
1,
1
],
"tdnn_dim": [
512,
512,
512,
512,
1500
],
"tdnn_kernel": [
5,
3,
3,
1,
1
],
"temperature": 1.0,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"transformers_version": "4.18.0.dev0",
"typical_p": 1.0,
"use_bfloat16": false,
"use_scan": true,
"use_weighted_layer_sum": false,
"vocab_size": 32,
"xvector_output_dim": 512
},
"eos_token_id": 2,
"is_encoder_decoder": true,
"max_length": 40,
"model_type": "speech-encoder-decoder",
"pad_token_id": 1,
"processor_class": "Wav2Vec2Processor",
"tie_word_embeddings": false,
"transformers_version": null,
"use_cache": false
}
loading feature extractor configuration file ./preprocessor_config.json
Feature extractor Wav2Vec2FeatureExtractor {
"do_normalize": true,
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
"feature_size": 1,
"padding_side": "right",
"padding_value": 0.0,
"return_attention_mask": false,
"sampling_rate": 16000
}
Didn't find file ./added_tokens.json. We won't load it.
loading file ./vocab.json
loading file ./merges.txt
loading file ./tokenizer.json
loading file None
loading file ./special_tokens_map.json
loading file ./tokenizer_config.json
loading weights file ./flax_model.msgpack
05/07/2022 17:43:02 - WARNING - datasets.builder - Reusing dataset librispeech_asr (/home/sanchitgandhi/cache/huggingface/datasets/hf-internal-testing___librispeech_asr/clean/2.1.0/d3bc4c2bc2078fcde3ad0f0f635862e4c0fef78ba94c4a34c4c250a097af240b)
05/07/2022 17:43:02 - WARNING - datasets.builder - Reusing dataset librispeech_asr (/home/sanchitgandhi/cache/huggingface/datasets/hf-internal-testing___librispeech_asr/clean/2.1.0/d3bc4c2bc2078fcde3ad0f0f635862e4c0fef78ba94c4a34c4c250a097af240b)
All model checkpoint weights were used when initializing FlaxSpeechEncoderDecoderModel.
All the weights of FlaxSpeechEncoderDecoderModel were initialized from the model checkpoint at ./.
If your task is similar to the task the model of the checkpoint was trained on, you can already use FlaxSpeechEncoderDecoderModel for predictions without further training.
preprocess train dataset: 0%| | 0/73 [00:00<?, ?ex/s]
05/07/2022 17:43:19 - WARNING - datasets.arrow_dataset - Loading cached processed dataset at /home/sanchitgandhi/cache/huggingface/datasets/hf-internal-testing___librispeech_asr/clean/2.1.0/d3bc4c2bc2078fcde3ad0f0f635862e4c0fef78ba94c4a34c4c250a097af240b/cache-172908eb2439798c.arrow
05/07/2022 17:43:19 - WARNING - datasets.arrow_dataset - Loading cached processed dataset at /home/sanchitgandhi/cache/huggingface/datasets/hf-internal-testing___librispeech_asr/clean/2.1.0/d3bc4c2bc2078fcde3ad0f0f635862e4c0fef78ba94c4a34c4c250a097af240b/cache-760f29b7172d4ca5.arrow
preprocess train dataset: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 73/73 [00:05<00:00, 12.74ex/s]
preprocess train dataset: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 66/66 [00:00<00:00, 344.26ex/s]
preprocess train dataset: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 69/69 [00:00<00:00, 348.17ex/s]
100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:00<00:00, 25.07ba/s]
100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:00<00:00, 1008.49ba/s]
100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:00<00:00, 1090.00ba/s]
100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:00<00:00, 682.44ba/s]
100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:00<00:00, 777.44ba/s]
100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1/1 [00:00<00:00, 695.92ba/s]
Feature extractor saved in ./preprocessor_config.json
tokenizer config file saved in ./tokenizer_config.json
Special tokens file saved in ./special_tokens_map.json
Configuration saved in ./config.json
loading feature extractor configuration file ./preprocessor_config.json
loading configuration file ./config.json
You passed along `num_labels=3` with an incompatible id to label map: {'0': 'LABEL_0', '1': 'LABEL_1'}. The number of labels wil be overwritten to 2.
Model config SpeechEncoderDecoderConfig {
"_name_or_path": "./",
"architectures": [
"SpeechEncoderDecoderModel"
],
"decoder": {
"_name_or_path": "",
"activation_dropout": 0.0,
"activation_function": "gelu",
"add_cross_attention": true,
"architectures": null,
"attention_dropout": 0.1,
"bad_words_ids": null,
"bos_token_id": 0,
"chunk_size_feed_forward": 0,
"classifier_dropout": 0.0,
"cross_attention_hidden_size": null,
"d_model": 16,
"decoder_attention_heads": 4,
"decoder_ffn_dim": 4,
"decoder_layerdrop": 0.0,
"decoder_layers": 2,
"decoder_start_token_id": 2,
"diversity_penalty": 0.0,
"do_sample": false,
"dropout": 0.1,
"early_stopping": false,
"encoder_attention_heads": 4,
"encoder_ffn_dim": 4,
"encoder_layerdrop": 0.0,
"encoder_layers": 2,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": 2,
"fuse_matmuls": false,
"gradient_checkpointing": false,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"init_std": 0.02,
"is_decoder": true,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"length_penalty": 1.0,
"max_length": 20,
"max_position_embeddings": 100,
"min_length": 0,
"model_type": "bart",
"no_repeat_ngram_size": 0,
"num_beam_groups": 1,
"num_beams": 1,
"num_hidden_layers": 2,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 1,
"prefix": null,
"problem_type": null,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"scale_embedding": false,
"sep_token_id": null,
"task_specific_params": null,
"temperature": 1.0,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"transformers_version": "4.18.0.dev0",
"typical_p": 1.0,
"use_bfloat16": false,
"use_cache": true,
"use_scan": true,
"vocab_size": 1000
},
"decoder_start_token_id": 0,
"encoder": {
"_name_or_path": "",
"activation_dropout": 0.1,
"adapter_kernel_size": 3,
"adapter_stride": 2,
"add_adapter": true,
"add_cross_attention": false,
"apply_spec_augment": true,
"architectures": null,
"attention_dropout": 0.1,
"bad_words_ids": null,
"bos_token_id": 1,
"chunk_size_feed_forward": 0,
"classifier_proj_size": 256,
"codevector_dim": 256,
"contrastive_logits_temperature": 0.1,
"conv_bias": false,
"conv_dim": [
32,
32,
32
],
"conv_kernel": [
8,
8,
8
],
"conv_stride": [
4,
4,
4
],
"cross_attention_hidden_size": null,
"ctc_loss_reduction": "sum",
"ctc_zero_infinity": false,
"decoder_start_token_id": null,
"diversity_loss_weight": 0.1,
"diversity_penalty": 0.0,
"do_sample": false,
"do_stable_layer_norm": true,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"exponential_decay_length_penalty": null,
"feat_extract_activation": "gelu",
"feat_extract_dropout": 0.0,
"feat_extract_norm": "layer",
"feat_proj_dropout": 0.0,
"feat_quantizer_dropout": 0.0,
"final_dropout": 0.0,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"fuse_matmuls": false,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout": 0.1,
"hidden_dropout_prob": 0.1,
"hidden_size": 16,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_range": 0.02,
"intermediate_size": 20,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-05,
"layerdrop": 0.0,
"length_penalty": 1.0,
"mask_feature_length": 10,
"mask_feature_min_masks": 0,
"mask_feature_prob": 0.0,
"mask_time_length": 10,
"mask_time_min_masks": 2,
"mask_time_prob": 0.1,
"max_length": 20,
"min_length": 0,
"model_type": "wav2vec2",
"no_repeat_ngram_size": 0,
"num_adapter_layers": 3,
"num_attention_heads": 2,
"num_beam_groups": 1,
"num_beams": 1,
"num_codevector_groups": 2,
"num_codevectors_per_group": 320,
"num_conv_pos_embedding_groups": 2,
"num_conv_pos_embeddings": 16,
"num_feat_extract_layers": 3,
"num_hidden_layers": 4,
"num_negatives": 10,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_size": 16,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 0,
"prefix": null,
"problem_type": null,
"proj_codevector_dim": 256,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"task_specific_params": null,
"tdnn_dilation": [
1,
2,
3,
1,
1
],
"tdnn_dim": [
512,
512,
512,
512,
1500
],
"tdnn_kernel": [
5,
3,
3,
1,
1
],
"temperature": 1.0,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"transformers_version": "4.18.0.dev0",
"typical_p": 1.0,
"use_bfloat16": false,
"use_scan": true,
"use_weighted_layer_sum": false,
"vocab_size": 32,
"xvector_output_dim": 512
},
"eos_token_id": 2,
"is_encoder_decoder": true,
"max_length": 40,
"model_type": "speech-encoder-decoder",
"pad_token_id": 1,
"processor_class": "Wav2Vec2Processor",
"tie_word_embeddings": false,
"transformers_version": null,
"use_cache": false
}
loading feature extractor configuration file ./preprocessor_config.json
Feature extractor Wav2Vec2FeatureExtractor {
"do_normalize": true,
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
"feature_size": 1,
"padding_side": "right",
"padding_value": 0.0,
"return_attention_mask": false,
"sampling_rate": 16000
}
Didn't find file ./added_tokens.json. We won't load it.
loading file ./vocab.json
loading file ./merges.txt
loading file ./tokenizer.json
loading file None
loading file ./special_tokens_map.json
loading file ./tokenizer_config.json
2022-05-07 17:43:20.362548: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
2022-05-07 17:43:20.362582: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303)
Epoch ... (1/34): 0%| | 0/34 [00:00<?, ?it/s]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
05/07/2022 17:43:20 - INFO - __main__ - ***** Running training *****
05/07/2022 17:43:20 - INFO - __main__ - Num examples = 68
05/07/2022 17:43:20 - INFO - __main__ - Num Epochs = 34
05/07/2022 17:43:20 - INFO - __main__ - Instantaneous batch size per device = 4
05/07/2022 17:43:20 - INFO - __main__ - Num gradient accumulation steps = 1
05/07/2022 17:43:20 - INFO - __main__ - Total train batch size (w. parallel & distributed) = 32
05/07/2022 17:43:20 - INFO - __main__ - Total optimization steps = 68
05/07/2022 17:43:20 - INFO - __main__ - Gradient checkpointing: False
05/07/2022 17:43:20 - INFO - __main__ - Use scan: True
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:24<00:24, 24.75s/it]
Step... (1 | Loss: 6.909794330596924, Learning Rate: 6.00004568696022e-07, Gradient Norm: 0.20182259380817413)
Epoch ... (1/34): 3%|β–ˆβ–‰ | 1/34 [00:51<27:25, 49.86s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.34s/it]
Step... (3 | Loss: 6.905749797821045, Learning Rate: 1.7999846022576094e-06, Gradient Norm: 0.21621592342853546)
Epoch ... (1/34): 6%|β–ˆβ–ˆβ–ˆβ–‰ | 2/34 [00:53<11:43, 21.99s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.22s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:02<00:01, 1.22s/it]
Evaluating ...: 75%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 3/4 [01:09<00:22, 22.82s/it]
Epoch... (3/34 | Eval Loss: 6.906824588775635 | Eval wer: 1.603448275862069 |)
Epoch... (3/34 | Eval Loss: 6.906824588775635 | Eval wer: 1.603448275862069 |): 9%|β–Ž | 3/34 [02:06<23:19, 45.13s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.16s/it]
Epoch... (3/34 | Eval Loss: 6.906824588775635 | Eval wer: 1.603448275862069 |): 12%|▍ | 4/34 [02:07<14:07, 28.25s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Step... (8 | Loss: 6.909614086151123, Learning Rate: 4.799978341907263e-06, Gradient Norm: 0.2112041562795639)
Epoch... (3/34 | Eval Loss: 6.906824588775635 | Eval wer: 1.603448275862069 |): 15%|β–Œ | 5/34 [02:11<09:08, 18.93s/it]
Training...: 0%| | 0/2 [00:01<?, ?it/s]
Step... (10 | Loss: 6.907970905303955, Learning Rate: 5.999987479299307e-06, Gradient Norm: 0.20816193521022797)
Evaluating ...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 2/4 [00:01<00:01, 1.75it/s]
Epoch... (6/34 | Eval Loss: 6.906639575958252 | Eval wer: 1.603448275862069 |)
Model weights saved in /home/sanchitgandhi/flax-wav2vec2-2-bart-dummy/flax_model.msgpack | 0/2 [00:03<?, ?it/s]
tokenizer config file saved in ./tokenizer_config.json
Special tokens file saved in ./special_tokens_map.json
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:03<00:03, 3.85s/it]
Step... (11 | Loss: 6.910458087921143, Learning Rate: 6.599992047995329e-06, Gradient Norm: 0.22143037617206573)
Epoch... (6/34 | Eval Loss: 6.906639575958252 | Eval wer: 1.603448275862069 |): 18%|β–‹ | 6/34 [02:15<06:37, 14.19s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.18s/it]
Step... (13 | Loss: 6.905548572540283, Learning Rate: 7.800001185387373e-06, Gradient Norm: 0.2015129178762436)
Epoch... (6/34 | Eval Loss: 6.906639575958252 | Eval wer: 1.603448275862069 |): 21%|β–Š | 7/34 [02:18<04:39, 10.34s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.17s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:02<00:01, 1.17s/it]
Evaluating ...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 2/4 [00:01<00:01, 1.77it/s]
Epoch... (8/34 | Eval Loss: 6.906340599060059 | Eval wer: 1.603448275862069 |): 24%|β–‰ | 8/34 [02:22<03:43, 8.61s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Step... (16 | Loss: 6.909521579742432, Learning Rate: 9.600014891475439e-06, Gradient Norm: 0.19841279089450836)
Epoch... (8/34 | Eval Loss: 6.906340599060059 | Eval wer: 1.603448275862069 |): 26%|β–ˆ | 9/34 [02:24<02:46, 6.67s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Step... (18 | Loss: 6.905771255493164, Learning Rate: 1.0799994925037026e-05, Gradient Norm: 0.20174430310726166)
Epoch... (8/34 | Eval Loss: 6.906340599060059 | Eval wer: 1.603448275862069 |): 29%|β–‰ | 10/34 [02:26<02:08, 5.35s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Training...: 0%| | 0/2 [00:01<?, ?it/s]
Evaluating ...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 2/4 [00:01<00:01, 1.82it/s]
Epoch... (11/34 | Eval Loss: 6.905916213989258 | Eval wer: 1.603448275862069 |)
Model weights saved in /home/sanchitgandhi/flax-wav2vec2-2-bart-dummy/flax_model.msgpack | 0/2 [00:03<?, ?it/s]
tokenizer config file saved in ./tokenizer_config.json
Special tokens file saved in ./special_tokens_map.json
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:03<00:03, 3.58s/it]
Step... (21 | Loss: 6.905186176300049, Learning Rate: 1.2600008631125093e-05, Gradient Norm: 0.21243052184581757)
Epoch... (11/34 | Eval Loss: 6.905916213989258 | Eval wer: 1.603448275862069 |): 32%|β–‹ | 11/34 [02:33<01:59, 5.19s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.33s/it]
Step... (23 | Loss: 6.9039998054504395, Learning Rate: 1.379998866468668e-05, Gradient Norm: 0.2145574390888214)
Epoch... (11/34 | Eval Loss: 6.905916213989258 | Eval wer: 1.603448275862069 |): 35%|β–‹ | 12/34 [02:35<01:36, 4.37s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.24s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:02<00:01, 1.24s/it]
Evaluating ...: 25%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 1/4 [00:00<00:02, 1.37it/s]
Epoch... (13/34 | Eval Loss: 6.905399322509766 | Eval wer: 1.603448275862069 |): 38%|β–Š | 13/34 [02:39<01:35, 4.56s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Step... (26 | Loss: 6.909581184387207, Learning Rate: 1.5600002370774746e-05, Gradient Norm: 0.22509345412254333)
Epoch... (13/34 | Eval Loss: 6.905399322509766 | Eval wer: 1.603448275862069 |): 41%|β–Š | 14/34 [02:41<01:18, 3.90s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.21s/it]
Step... (29 | Loss: 6.907866477966309, Learning Rate: 1.7400016076862812e-05, Gradient Norm: 0.20910179615020752)
Epoch... (13/34 | Eval Loss: 6.905399322509766 | Eval wer: 1.603448275862069 |): 44%|β–‰ | 15/34 [02:45<01:05, 3.45s/it]
Training...: 0%| | 0/2 [00:01<?, ?it/s]
Model weights saved in /home/sanchitgandhi/flax-wav2vec2-2-bart-dummy/flax_model.msgpack | 0/2 [00:03<?, ?it/s]
tokenizer config file saved in ./tokenizer_config.json
Special tokens file saved in ./special_tokens_map.json
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:03<00:03, 3.71s/it]
Epoch... (16/34 | Eval Loss: 6.904772758483887 | Eval wer: 1.603448275862069 |)
05/07/2022 17:46:08 - INFO - __main__ - Saving checkpoint...
Step... (31 | Loss: 6.901575565338135, Learning Rate: 1.85999961104244e-05, Gradient Norm: 0.21939419209957123)
Epoch... (16/34 | Eval Loss: 6.904772758483887 | Eval wer: 1.603448275862069 |): 47%|β–‰ | 16/34 [03:11<03:05, 10.31s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.33s/it]
Epoch... (16/34 | Eval Loss: 6.904772758483887 | Eval wer: 1.603448275862069 |): 50%|β–ˆ | 17/34 [03:13<02:15, 7.95s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.22s/it]
Step... (34 | Loss: 6.906285762786865, Learning Rate: 2.0400009816512465e-05, Gradient Norm: 0.2121836245059967)
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:02<00:01, 1.22s/it]
Evaluating ...: 25%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 1/4 [00:00<00:02, 1.32it/s]
Epoch... (18/34 | Eval Loss: 6.904001712799072 | Eval wer: 1.603448275862069 |): 53%|β–ˆ | 18/34 [03:17<01:53, 7.08s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Epoch... (18/34 | Eval Loss: 6.904001712799072 | Eval wer: 1.603448275862069 |): 56%|β–ˆ | 19/34 [03:20<01:25, 5.67s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Step... (37 | Loss: 6.903077125549316, Learning Rate: 2.2199994418770075e-05, Gradient Norm: 0.20692755281925201)
Epoch... (18/34 | Eval Loss: 6.904001712799072 | Eval wer: 1.603448275862069 |): 59%|β–ˆβ–| 20/34 [03:22<01:05, 4.69s/it]
Step... (39 | Loss: 6.90248966217041, Learning Rate: 2.340000355616212e-05, Gradient Norm: 0.21785803139209747)
Epoch... (18/34 | Eval Loss: 6.904001712799072 | Eval wer: 1.603448275862069 |): 59%|β–ˆβ–| 20/34 [03:23<01:05, 4.69s/it]
Training...: 0%| | 0/2 [00:01<?, ?it/s]
Evaluating ...: 25%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 1/4 [00:00<00:02, 1.37it/s]
Training...: 0%| | 0/2 [00:03<?, ?it/s]
05/07/2022 17:46:47 - INFO - __main__ - Saving checkpoint...
Model weights saved in /home/sanchitgandhi/flax-wav2vec2-2-bart-dummy/flax_model.msgpack | 0/2 [00:03<?, ?it/s]
tokenizer config file saved in ./tokenizer_config.json
Special tokens file saved in ./special_tokens_map.json
Training...: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2/2 [00:05<00:00, 2.59s/it]
Epoch... (21/34 | Eval Loss: 6.903112411499023 | Eval wer: 1.603448275862069 |): 62%|β–ˆβ–| 21/34 [03:27<01:02, 4.83s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Step... (42 | Loss: 6.902524471282959, Learning Rate: 2.5199988158419728e-05, Gradient Norm: 0.2166411280632019)
Epoch... (21/34 | Eval Loss: 6.903112411499023 | Eval wer: 1.603448275862069 |): 65%|β–ˆβ–Ž| 22/34 [03:30<00:49, 4.11s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.35s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:02<00:01, 1.35s/it]
Evaluating ...: 75%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 3/4 [00:01<00:00, 1.78it/s]
Epoch... (23/34 | Eval Loss: 6.902121543884277 | Eval wer: 1.603448275862069 |): 68%|β–ˆβ–Ž| 23/34 [03:36<00:48, 4.37s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.22s/it]
Step... (46 | Loss: 6.909830570220947, Learning Rate: 2.7600006433203816e-05, Gradient Norm: 0.20912756025791168)
Epoch... (23/34 | Eval Loss: 6.902121543884277 | Eval wer: 1.603448275862069 |): 71%|β–ˆβ–| 24/34 [03:38<00:37, 3.77s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.16s/it]
Step... (48 | Loss: 6.901593208312988, Learning Rate: 2.880001557059586e-05, Gradient Norm: 0.22144821286201477)
Epoch... (23/34 | Eval Loss: 6.902121543884277 | Eval wer: 1.603448275862069 |): 74%|β–ˆβ–| 25/34 [03:39<00:30, 3.34s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Training...: 0%| | 0/2 [00:01<?, ?it/s]
Evaluating ...: 75%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 3/4 [00:01<00:00, 1.75it/s]
Epoch... (26/34 | Eval Loss: 6.900992393493652 | Eval wer: 1.2724137931034483 |)
Model weights saved in /home/sanchitgandhi/flax-wav2vec2-2-bart-dummy/flax_model.msgpack | 0/2 [00:03<?, ?it/s]
tokenizer config file saved in ./tokenizer_config.json
Special tokens file saved in ./special_tokens_map.json
Training...: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2/2 [00:04<00:00, 2.42s/it]
Epoch... (26/34 | Eval Loss: 6.900992393493652 | Eval wer: 1.2724137931034483 |): 76%|β–Š| 26/34 [03:44<00:30, 3.79s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Step... (51 | Loss: 6.9044108390808105, Learning Rate: 3.060000017285347e-05, Gradient Norm: 0.22064745426177979)
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.23s/it]
Step... (53 | Loss: 6.9063873291015625, Learning Rate: 3.1800009310245514e-05, Gradient Norm: 0.2018101066350937)
Epoch... (26/34 | Eval Loss: 6.900992393493652 | Eval wer: 1.2724137931034483 |): 79%|β–Š| 27/34 [03:48<00:23, 3.39s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.23s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:02<00:01, 1.23s/it]
Evaluating ...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 2/4 [00:01<00:01, 1.82it/s]
Epoch... (28/34 | Eval Loss: 6.899757385253906 | Eval wer: 1.2724137931034483 |): 82%|β–Š| 28/34 [03:53<00:22, 3.82s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.21s/it]
Step... (56 | Loss: 6.906337261199951, Learning Rate: 3.359999391250312e-05, Gradient Norm: 0.21085792779922485)
Epoch... (28/34 | Eval Loss: 6.899757385253906 | Eval wer: 1.2724137931034483 |): 85%|β–Š| 29/34 [03:54<00:16, 3.39s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Step... (58 | Loss: 6.89849853515625, Learning Rate: 3.480000304989517e-05, Gradient Norm: 0.2046382576227188)
Epoch... (28/34 | Eval Loss: 6.899757385253906 | Eval wer: 1.2724137931034483 |): 88%|β–‰| 30/34 [03:56<00:12, 3.09s/it]
Training...: 0%| | 0/2 [00:00<?, ?it/s]
Training...: 0%| | 0/2 [00:01<?, ?it/s]
Evaluating ...: 25%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 1/4 [00:00<00:02, 1.40it/s]
Epoch... (31/34 | Eval Loss: 6.898369312286377 | Eval wer: 1.2724137931034483 |)
Model weights saved in /home/sanchitgandhi/flax-wav2vec2-2-bart-dummy/flax_model.msgpack | 0/2 [00:03<?, ?it/s]
tokenizer config file saved in ./tokenizer_config.json
Special tokens file saved in ./special_tokens_map.json
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:03<00:03, 3.75s/it]
Step... (61 | Loss: 6.899321556091309, Learning Rate: 3.659998765215278e-05, Gradient Norm: 0.21138229966163635)
Epoch... (31/34 | Eval Loss: 6.898369312286377 | Eval wer: 1.2724137931034483 |): 91%|β–‰| 31/34 [04:02<00:10, 3.64s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.21s/it]
Epoch... (31/34 | Eval Loss: 6.898369312286377 | Eval wer: 1.2724137931034483 |): 94%|β–‰| 32/34 [04:05<00:06, 3.27s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.24s/it]
Step... (64 | Loss: 6.900187015533447, Learning Rate: 3.840000135824084e-05, Gradient Norm: 0.21020692586898804)
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:02<00:01, 1.24s/it]
Evaluating ...: 25%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 1/4 [00:00<00:02, 1.34it/s]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:04<00:01, 1.24s/it]
Epoch... (33/34 | Eval Loss: 6.896849155426025 | Eval wer: 1.3195402298850574 |): 97%|β–‰| 33/34 [04:10<00:03, 3.97s/it]
Training...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/2 [00:01<00:01, 1.17s/it]
Epoch... (33/34 | Eval Loss: 6.896849155426025 | Eval wer: 1.3195402298850574 |): 100%|β–ˆ| 34/34 [04:12<00:00, 7.41s/it]
Predicting validation[:90%]...: 67%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 2/3 [00:26<00:13, 13.16s/it]
Predicting validation[:90%]...: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 3/3 [00:40<00:00, 13.30s/it]
Predicting validation[:95%]...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 2/4 [00:01<00:01, 1.81it/s]
Predicting validation[:95%]...: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 4/4 [00:02<00:00, 1.81it/s]