manocraft commited on
Commit
4673208
1 Parent(s): d7e5c0b

Upload 11 files

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/kaggle/input/train-layer1-lan2/output/best",
3
  "architectures": [
4
  "MT5ForConditionalGeneration"
5
  ],
@@ -27,7 +27,7 @@
27
  "tie_word_embeddings": false,
28
  "tokenizer_class": "T5Tokenizer",
29
  "torch_dtype": "float32",
30
- "transformers_version": "4.41.0",
31
  "use_cache": true,
32
  "vocab_size": 250112
33
  }
 
1
  {
2
+ "_name_or_path": "google/mt5-base",
3
  "architectures": [
4
  "MT5ForConditionalGeneration"
5
  ],
 
27
  "tie_word_embeddings": false,
28
  "tokenizer_class": "T5Tokenizer",
29
  "torch_dtype": "float32",
30
+ "transformers_version": "4.38.2",
31
  "use_cache": true,
32
  "vocab_size": 250112
33
  }
eval_results.txt CHANGED
@@ -1 +1 @@
1
- eval_loss = 0.26792989788871063
 
1
+ eval_loss = 0.24161592831737116
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
- "transformers_version": "4.41.0"
7
  }
 
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
+ "transformers_version": "4.38.2"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89240f3b73a7b8c244d999274f228071eaf184f9d17e9c555a299bd3b1cd3d16
3
  size 2329638768
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3301dd4550ce95b70939559e2c6e2eb211edbdc06dc5b61d9b98d71ba63d055
3
  size 2329638768
model_args.json CHANGED
@@ -1 +1 @@
1
- {"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": false, "adafactor_scale_parameter": false, "adafactor_warmup_init": false, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "/kaggle/working/output/best", "cache_dir": "/kaggle/working/output", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "eval_batch_size": 16, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 168.6875, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.0004, "local_rank": -1, "logging_steps": 50, "loss_type": null, "loss_args": {}, "manual_seed": 1234, "max_grad_norm": 1.0, "max_seq_length": 128, "model_name": "/kaggle/input/train-layer1-lan2/output/best", "model_type": "mt5", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 50, "optimizer": "Adafactor", "output_dir": "/kaggle/working/output", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 2, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": false, "save_model_every_epoch": false, "save_optimizer_and_scheduler": true, "save_steps": -1, "scheduler": "constant_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": "/kaggle/working/tensorboard_logs", "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 4, "train_custom_parameters_only": false, "trust_remote_code": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {}, "wandb_project": null, "warmup_ratio": 0.5, "warmup_steps": 9600, "weight_decay": 0.0, "model_class": "T5Model", "add_prefix": true, "dataset_class": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": false, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "preprocess_inputs": true, "repetition_penalty": 1.0, "special_tokens_list": [], "top_k": null, "top_p": null, "use_multiprocessed_decoding": true}
 
1
+ {"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": false, "adafactor_scale_parameter": false, "adafactor_warmup_init": false, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "/kaggle/working/output/best", "cache_dir": "/kaggle/working/output", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "eval_batch_size": 16, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 168.6875, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.0004, "local_rank": -1, "logging_steps": 50, "loss_type": null, "loss_args": {}, "manual_seed": 1234, "max_grad_norm": 1.0, "max_seq_length": 128, "model_name": "google/mt5-base", "model_type": "mt5", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 50, "optimizer": "Adafactor", "output_dir": "/kaggle/working/output", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 2, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": false, "save_model_every_epoch": false, "save_optimizer_and_scheduler": true, "save_steps": -1, "scheduler": "constant_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": "/kaggle/working/tensorboard_logs", "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 4, "train_custom_parameters_only": false, "trust_remote_code": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {}, "wandb_project": null, "warmup_ratio": 0.5, "warmup_steps": 16875, "weight_decay": 0.0, "model_class": "T5Model", "add_prefix": true, "dataset_class": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": false, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "preprocess_inputs": true, "repetition_penalty": 1.0, "special_tokens_list": [], "top_k": null, "top_p": null, "use_multiprocessed_decoding": true}
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fff1d6dc14c677c0035bf7bd541c05fadfea387847cff4f8c2ebc516b9f37058
3
  size 4115450
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:390c5d425162c508c51b67353466baf6d9dcca046ec8289a14622b57cb638c00
3
  size 4115450
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d44d8f5c7bbdfaa8b3c5f1c990f9695a018c9a4c9b1ac95ec7a5c6596348edb
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e86b75926a9beb30c32895a03024f85e1341131004feaf2e57c6a3ff20eb37a
3
  size 1064
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f353584c1b58b711ce3b1882a352e6c69a9e6b13e5eab03899ab4f63bcef82f1
3
- size 3768
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5ad1801204891fb51120b775e2fee9b947fb70cc6a0797a145ce132578e5d14
3
+ size 3704