Portuguese
tharindu commited on
Commit
e3219e5
1 Parent(s): d9a7cfa

Upload folder using huggingface_hub

Browse files
Files changed (39) hide show
  1. .gitattributes +4 -0
  2. best_model/decoder/config.json +30 -0
  3. best_model/decoder/generation_config.json +7 -0
  4. best_model/decoder/pytorch_model.bin +3 -0
  5. best_model/decoder/sentencepiece.bpe.model +3 -0
  6. best_model/decoder/special_tokens_map.json +15 -0
  7. best_model/decoder/tokenizer.json +3 -0
  8. best_model/decoder/tokenizer_config.json +19 -0
  9. best_model/encoder/config.json +28 -0
  10. best_model/encoder/pytorch_model.bin +3 -0
  11. best_model/encoder/sentencepiece.bpe.model +3 -0
  12. best_model/encoder/special_tokens_map.json +15 -0
  13. best_model/encoder/tokenizer.json +3 -0
  14. best_model/encoder/tokenizer_config.json +19 -0
  15. best_model/eval_results.txt +1 -0
  16. best_model/model_args.json +1 -0
  17. best_model/optimizer.pt +3 -0
  18. best_model/scheduler.pt +3 -0
  19. best_model/training_args.bin +3 -0
  20. checkpoint-44800/decoder/config.json +30 -0
  21. checkpoint-44800/decoder/generation_config.json +7 -0
  22. checkpoint-44800/decoder/pytorch_model.bin +3 -0
  23. checkpoint-44800/decoder/sentencepiece.bpe.model +3 -0
  24. checkpoint-44800/decoder/special_tokens_map.json +15 -0
  25. checkpoint-44800/decoder/tokenizer.json +3 -0
  26. checkpoint-44800/decoder/tokenizer_config.json +19 -0
  27. checkpoint-44800/encoder/config.json +28 -0
  28. checkpoint-44800/encoder/pytorch_model.bin +3 -0
  29. checkpoint-44800/encoder/sentencepiece.bpe.model +3 -0
  30. checkpoint-44800/encoder/special_tokens_map.json +15 -0
  31. checkpoint-44800/encoder/tokenizer.json +3 -0
  32. checkpoint-44800/encoder/tokenizer_config.json +19 -0
  33. checkpoint-44800/eval_results.txt +1 -0
  34. checkpoint-44800/model_args.json +1 -0
  35. checkpoint-44800/optimizer.pt +3 -0
  36. checkpoint-44800/scheduler.pt +3 -0
  37. checkpoint-44800/training_args.bin +3 -0
  38. eval_results.txt +1 -0
  39. training_progress_scores.csv +20 -0
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ best_model/decoder/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ best_model/encoder/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ checkpoint-44800/decoder/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
+ checkpoint-44800/encoder/tokenizer.json filter=lfs diff=lfs merge=lfs -text
best_model/decoder/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-large",
3
+ "add_cross_attention": true,
4
+ "architectures": [
5
+ "XLMRobertaForCausalLM"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": null,
10
+ "eos_token_id": 2,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 4096,
16
+ "is_decoder": true,
17
+ "layer_norm_eps": 1e-05,
18
+ "max_position_embeddings": 514,
19
+ "model_type": "xlm-roberta",
20
+ "num_attention_heads": 16,
21
+ "num_hidden_layers": 24,
22
+ "output_past": true,
23
+ "pad_token_id": 1,
24
+ "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.29.2",
27
+ "type_vocab_size": 1,
28
+ "use_cache": true,
29
+ "vocab_size": 250002
30
+ }
best_model/decoder/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.29.2"
7
+ }
best_model/decoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ae8f3354c67caf9b60509b777e1fcce8530a606228f82550fe84e1528a4f4d0
3
+ size 2644038265
best_model/decoder/sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
best_model/decoder/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
best_model/decoder/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b93bf61272f75c0a0b96b85fa262d2242e8a46008d76095386e98675f0bdd119
3
+ size 17082925
best_model/decoder/tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 512,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "tokenizer_class": "XLMRobertaTokenizer",
18
+ "unk_token": "<unk>"
19
+ }
best_model/encoder/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-large",
3
+ "architectures": [
4
+ "XLMRobertaModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.29.2",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
best_model/encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a5ac4e336c348744c5b26d48e2f8bd83020942516c966ae8f35734a4ef6965b
3
+ size 2239698029
best_model/encoder/sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
best_model/encoder/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
best_model/encoder/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b93bf61272f75c0a0b96b85fa262d2242e8a46008d76095386e98675f0bdd119
3
+ size 17082925
best_model/encoder/tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 512,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "tokenizer_class": "XLMRobertaTokenizer",
18
+ "unk_token": "<unk>"
19
+ }
best_model/eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 8.552877298666978
best_model/model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": true, "adafactor_scale_parameter": true, "adafactor_warmup_init": true, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "outputs/xlmrlarge/best_model", "cache_dir": "cache_dir/xlmrlarge", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 25, "encoding": null, "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 3200, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.0001, "local_rank": -1, "logging_steps": 3200, "loss_type": null, "loss_args": {}, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "xlm-roberta-large-xlm-roberta-large", "model_type": "auto-bert", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 10, "optimizer": "AdamW", "output_dir": "outputs/xlmrlarge", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 78, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": 3200, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {"name": "xlm-roberta-large"}, "wandb_project": "DORE", "warmup_ratio": 0.06, "warmup_steps": 4946, "weight_decay": 0.0, "model_class": "Seq2SeqModel", "base_marian_model_name": null, "dataset_class": null, "dataset_cache_dir": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": true, "faiss_d": 768, "faiss_m": 128, "include_title_in_knowledge_dataset": true, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "rag_embed_batch_size": 16, "repetition_penalty": 1.0, "save_knowledge_dataset": true, "save_knowledge_dataset_with_checkpoints": false, "save_recent_only": true, "split_text_character": " ", "split_text_n": 100, "src_lang": "en_XX", "tgt_lang": "ro_RO", "top_k": null, "top_p": null, "use_multiprocessed_decoding": false}
best_model/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3de18331f4940d191a42e188a5dde82d10699dae079fd276ef30ab924ed16191
3
+ size 9759220262
best_model/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e33b9cb1f273bcba57881c7822e69891f4db796e8a9123c3a6162ee35cd12ee0
3
+ size 627
best_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4967b14f137a97123a53dcc2f1f48d0534feb73a3a2ce0607c99bd15a3ebe576
3
+ size 3579
checkpoint-44800/decoder/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-large",
3
+ "add_cross_attention": true,
4
+ "architectures": [
5
+ "XLMRobertaForCausalLM"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": null,
10
+ "eos_token_id": 2,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 4096,
16
+ "is_decoder": true,
17
+ "layer_norm_eps": 1e-05,
18
+ "max_position_embeddings": 514,
19
+ "model_type": "xlm-roberta",
20
+ "num_attention_heads": 16,
21
+ "num_hidden_layers": 24,
22
+ "output_past": true,
23
+ "pad_token_id": 1,
24
+ "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.29.2",
27
+ "type_vocab_size": 1,
28
+ "use_cache": true,
29
+ "vocab_size": 250002
30
+ }
checkpoint-44800/decoder/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.29.2"
7
+ }
checkpoint-44800/decoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92c083036ab3a49d266094424cdc5772eec560d915613e339213bd36d03b8e72
3
+ size 2644038265
checkpoint-44800/decoder/sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
checkpoint-44800/decoder/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
checkpoint-44800/decoder/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b93bf61272f75c0a0b96b85fa262d2242e8a46008d76095386e98675f0bdd119
3
+ size 17082925
checkpoint-44800/decoder/tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 512,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "tokenizer_class": "XLMRobertaTokenizer",
18
+ "unk_token": "<unk>"
19
+ }
checkpoint-44800/encoder/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-large",
3
+ "architectures": [
4
+ "XLMRobertaModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.29.2",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
checkpoint-44800/encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfe4762b4611c769ec18fc0fc8df6eb021c12b292828c935fed3f3d80c8ba6c4
3
+ size 2239698029
checkpoint-44800/encoder/sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
checkpoint-44800/encoder/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
checkpoint-44800/encoder/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b93bf61272f75c0a0b96b85fa262d2242e8a46008d76095386e98675f0bdd119
3
+ size 17082925
checkpoint-44800/encoder/tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 512,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "tokenizer_class": "XLMRobertaTokenizer",
18
+ "unk_token": "<unk>"
19
+ }
checkpoint-44800/eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 12.112716560049117
checkpoint-44800/model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": true, "adafactor_scale_parameter": true, "adafactor_warmup_init": true, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "outputs/xlmrlarge/best_model", "cache_dir": "cache_dir/xlmrlarge", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 25, "encoding": null, "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 3200, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.0001, "local_rank": -1, "logging_steps": 3200, "loss_type": null, "loss_args": {}, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "xlm-roberta-large-xlm-roberta-large", "model_type": "auto-bert", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 10, "optimizer": "AdamW", "output_dir": "outputs/xlmrlarge", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 78, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": 3200, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {"name": "xlm-roberta-large"}, "wandb_project": "DORE", "warmup_ratio": 0.06, "warmup_steps": 4946, "weight_decay": 0.0, "model_class": "Seq2SeqModel", "base_marian_model_name": null, "dataset_class": null, "dataset_cache_dir": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": true, "faiss_d": 768, "faiss_m": 128, "include_title_in_knowledge_dataset": true, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "rag_embed_batch_size": 16, "repetition_penalty": 1.0, "save_knowledge_dataset": true, "save_knowledge_dataset_with_checkpoints": false, "save_recent_only": true, "split_text_character": " ", "split_text_n": 100, "src_lang": "en_XX", "tgt_lang": "ro_RO", "top_k": null, "top_p": null, "use_multiprocessed_decoding": false}
checkpoint-44800/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95845ca15d522696cd656149196f68739e790cc52182195316c18b05b9487223
3
+ size 9759220262
checkpoint-44800/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afeb3235dc8d536ec134b5673323cf52a24881e6d203f82b5a020723db986db8
3
+ size 627
checkpoint-44800/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4967b14f137a97123a53dcc2f1f48d0534feb73a3a2ce0607c99bd15a3ebe576
3
+ size 3579
eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 12.112716560049117
training_progress_scores.csv ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ global_step,eval_loss,train_loss
2
+ 3200,8.98454891900771,6.239049434661865
3
+ 6400,12.710789349633243,6.5854692459106445
4
+ 8242,11.180710541745055,6.323003768920898
5
+ 9600,10.88143311352015,6.214521408081055
6
+ 12800,10.264017345256333,6.225090980529785
7
+ 16000,8.552877298666978,6.213644027709961
8
+ 16484,9.861861456603116,6.396312236785889
9
+ 19200,9.872641939147476,5.7665114402771
10
+ 22400,11.80832740417214,6.291630744934082
11
+ 24726,9.768011126455782,6.106754302978516
12
+ 25600,10.007712807023717,6.074300765991211
13
+ 28800,9.315486268474315,6.214197158813477
14
+ 32000,11.036558783326202,6.228422164916992
15
+ 32968,11.823396353279717,6.375258922576904
16
+ 35200,9.315472150300094,6.675766468048096
17
+ 38400,11.318372732918567,6.045510768890381
18
+ 41210,11.067470789765455,6.386971473693848
19
+ 41600,11.831813449480185,6.32453727722168
20
+ 44800,12.112716560049117,6.689652919769287