Davlan commited on
Commit
a292754
1 Parent(s): fb1ce61

update model files

Browse files
Files changed (4) hide show
  1. model_args.json +1 -1
  2. optimizer.pt +0 -3
  3. pytorch_model.bin +1 -1
  4. scheduler.pt +0 -3
model_args.json CHANGED
@@ -1 +1 @@
1
- {"adam_epsilon": 1e-08, "best_model_dir": "outputs/best_model", "cache_dir": "cache_dir/", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "adafactor_eps": [1e-30, 0.001], "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_beta1": null, "adafactor_scale_parameter": false, "adafactor_relative_step": false, "adafactor_warmup_init": false, "eval_batch_size": 20, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 30000, "evaluate_during_training_verbose": false, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.001, "local_rank": -1, "logging_steps": 50, "manual_seed": 42, "max_grad_norm": 1.0, "max_seq_length": 190, "model_name": "google/mt5-base", "model_type": "mt5", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": true, "no_save": false, "not_saved_args": [], "num_train_epochs": 5, "optimizer": "Adafactor", "output_dir": "outputs/", "overwrite_output_dir": true, "process_count": 254, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": false, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": -1, "scheduler": "constant_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_type": null, "tokenizer_name": null, "train_batch_size": 20, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": true, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {}, "wandb_project": "MT5 Yoruba noDiac to Underdot Translation", "warmup_ratio": 0.06, "warmup_steps": 7239, "weight_decay": 0.0, "model_class": "T5Model", "dataset_class": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": false, "length_penalty": 2.0, "max_length": 190, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "preprocess_inputs": false, "repetition_penalty": 1.0, "special_tokens_list": [], "top_k": null, "top_p": null, "use_multiprocessed_decoding": true}
1
+ {"adam_epsilon": 1e-08, "best_model_dir": "outputs/best_model", "cache_dir": "cache_dir/", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "adafactor_eps": [1e-30, 0.001], "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_beta1": null, "adafactor_scale_parameter": false, "adafactor_relative_step": false, "adafactor_warmup_init": false, "eval_batch_size": 20, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 30000, "evaluate_during_training_verbose": false, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.001, "local_rank": -1, "logging_steps": 50, "manual_seed": 42, "max_grad_norm": 1.0, "max_seq_length": 190, "model_name": "google/mt5-base", "model_type": "mt5", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": true, "no_save": false, "not_saved_args": [], "num_train_epochs": 5, "optimizer": "Adafactor", "output_dir": "outputs/", "overwrite_output_dir": true, "process_count": 254, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": false, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": -1, "scheduler": "constant_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_type": null, "tokenizer_name": null, "train_batch_size": 20, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": true, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {}, "wandb_project": "MT5 Yoruba noDiac to Underdot Translation", "warmup_ratio": 0.06, "warmup_steps": 7239, "weight_decay": 0.0, "model_class": "T5Model", "dataset_class": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": false, "length_penalty": 2.0, "max_length": 256, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "preprocess_inputs": false, "repetition_penalty": 1.0, "special_tokens_list": [], "top_k": null, "top_p": null, "use_multiprocessed_decoding": true}
optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b3a4dc5f06b4932c14aa42c8960e3af652fbc7cbc4066207c886a25f0383614
3
- size 4130943
 
 
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6035ffa0ae47175a1a6de254a17e098bc86ebbc9fdda4e607d2fc735df18e41f
3
  size 2329707353
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3435eab37f48e89998c10d1423693f4756c334181ddeea8e43807be9cad1c1d9
3
  size 2329707353
scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:54a781da6e71dd23c71b7d4fc3035e0f891569f5e8792b3bb6210c5e5cbf5731
3
- size 623