pszemraj commited on
Commit
0dded0f
1 Parent(s): 7a727bd

load model from drive and convert

Browse files
README.md ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: bsd-3-clause
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16-ft3-booksum
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16-ft3-booksum
14
+
15
+ This model is a fine-tuned version of [pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16](https://huggingface.co/pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16) on the kmfoda/booksum dataset.
16
+
17
+ ## Model description
18
+
19
+ More information needed
20
+
21
+ ## Intended uses & limitations
22
+
23
+ More information needed
24
+
25
+ ## Training and evaluation data
26
+
27
+ More information needed
28
+
29
+ ## Training procedure
30
+
31
+ ### Training hyperparameters
32
+
33
+ The following hyperparameters were used during training:
34
+ - learning_rate: 0.0001
35
+ - train_batch_size: 1
36
+ - eval_batch_size: 1
37
+ - seed: 42
38
+ - distributed_type: multi-GPU
39
+ - gradient_accumulation_steps: 64
40
+ - total_train_batch_size: 64
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: cosine
43
+ - lr_scheduler_warmup_ratio: 0.01
44
+ - num_epochs: 3
45
+
46
+ ### Framework versions
47
+
48
+ - Transformers 4.23.0.dev0
49
+ - Pytorch 1.10.0+cu113
50
+ - Datasets 2.5.1
51
+ - Tokenizers 0.12.1
config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16",
3
+ "architectures": [
4
+ "LongT5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2816,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "early_stopping": true,
13
+ "encoder_attention_type": "transient-global",
14
+ "encoder_no_repeat_ngram_size": 4,
15
+ "eos_token_id": 1,
16
+ "feed_forward_proj": "gated-gelu",
17
+ "global_block_size": 16,
18
+ "initializer_factor": 1.0,
19
+ "is_encoder_decoder": true,
20
+ "is_gated_act": true,
21
+ "layer_norm_epsilon": 1e-06,
22
+ "length_penalty": 0.8,
23
+ "local_radius": 127,
24
+ "max_length": 512,
25
+ "min_length": 8,
26
+ "model_type": "longt5",
27
+ "n_positions": 4096,
28
+ "no_repeat_ngram_size": 3,
29
+ "num_beams": 2,
30
+ "num_decoder_layers": 24,
31
+ "num_heads": 16,
32
+ "num_layers": 24,
33
+ "output_past": true,
34
+ "pad_token_id": 0,
35
+ "relative_attention_max_distance": 128,
36
+ "relative_attention_num_buckets": 32,
37
+ "repetition_penalty": 3.5,
38
+ "tie_word_embeddings": false,
39
+ "torch_dtype": "float32",
40
+ "transformers_version": "4.23.0.dev0",
41
+ "use_cache": false,
42
+ "vocab_size": 32100
43
+ }
long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16-ft3-booksum_training_metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"output_dir": "/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16-ft3-booksum", "overwrite_output_dir": true, "do_train": false, "do_eval": false, "do_predict": false, "evaluation_strategy": "no", "prediction_loss_only": false, "per_device_train_batch_size": 1, "per_device_eval_batch_size": 1, "per_gpu_train_batch_size": "None", "per_gpu_eval_batch_size": "None", "gradient_accumulation_steps": 64, "eval_accumulation_steps": "None", "eval_delay": 0, "learning_rate": 0.0001, "weight_decay": 0.02, "adam_beta1": 0.9, "adam_beta2": 0.999, "adam_epsilon": 1e-08, "max_grad_norm": 1, "num_train_epochs": 3, "max_steps": -1, "lr_scheduler_type": "cosine", "warmup_ratio": 0.01, "warmup_steps": 0, "log_level": -1, "log_level_replica": -1, "log_on_each_node": true, "logging_dir": "/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16-ft3-booksum/logs", "logging_strategy": "steps", "logging_first_step": false, "logging_steps": 2, "logging_nan_inf_filter": true, "save_strategy": "steps", "save_steps": 25, "save_total_limit": 1, "save_on_each_node": false, "no_cuda": false, "use_mps_device": false, "seed": 42, "data_seed": "None", "jit_mode_eval": false, "use_ipex": false, "bf16": true, "fp16": false, "fp16_opt_level": "O1", "half_precision_backend": "cuda_amp", "bf16_full_eval": false, "fp16_full_eval": false, "tf32": "None", "local_rank": 0, "xpu_backend": "None", "tpu_num_cores": "None", "tpu_metrics_debug": false, "debug": "[]", "dataloader_drop_last": false, "eval_steps": "None", "dataloader_num_workers": 0, "past_index": -1, "run_name": "/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16-ft3-booksum", "disable_tqdm": false, "remove_unused_columns": true, "label_names": "None", "load_best_model_at_end": false, "metric_for_best_model": "None", "greater_is_better": "None", "ignore_data_skip": false, "sharded_ddp": "[]", "fsdp": "[]", "fsdp_min_num_params": 0, "fsdp_transformer_layer_cls_to_wrap": "None", "deepspeed": "None", "label_smoothing_factor": 0.0, "optim": "adamw_hf", "adafactor": false, "group_by_length": false, "length_column_name": "length", "report_to": "['tensorboard']", "ddp_find_unused_parameters": "None", "ddp_bucket_cap_mb": "None", "dataloader_pin_memory": true, "skip_memory_metrics": false, "use_legacy_prediction_loop": false, "push_to_hub": true, "resume_from_checkpoint": "None", "hub_model_id": "long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16-ft3-booksum", "hub_strategy": "end", "hub_token": "<HUB_TOKEN>", "hub_private_repo": true, "gradient_checkpointing": true, "include_inputs_for_metrics": false, "fp16_backend": "auto", "push_to_hub_model_id": "None", "push_to_hub_organization": "None", "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>", "_n_gpu": 1, "mp_parameters": "", "auto_find_batch_size": false, "full_determinism": false, "torchdynamo": "None", "ray_scope": "last", "ddp_timeout": 1800, "sortish_sampler": false, "predict_with_generate": false, "generation_max_length": "None", "generation_num_beams": "None", "train_batch_size": 1, "eval_batch_size": 1, "configs_src": "long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16-ft3-booksum", "use_adam8bit": false, "use_adan_optim": true, "optim_params": "(None, None)", "decay_rate_ada": -0.5}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dbd6ba19dec290f0c61c1b657b6739a6070f943342b9658a23be0d8a6666fb7
3
+ size 3132671411
special_tokens_map.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "pad_token": "<pad>",
106
+ "unk_token": "<unk>"
107
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "extra_ids": 100,
106
+ "name_or_path": "pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP16",
107
+ "pad_token": "<pad>",
108
+ "special_tokens_map_file": null,
109
+ "tokenizer_class": "T5Tokenizer",
110
+ "unk_token": "<unk>"
111
+ }
trainer_state.json ADDED
@@ -0,0 +1,916 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.8158640226628895,
5
+ "global_step": 300,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "learning_rate": 4e-05,
13
+ "loss": 1.9088,
14
+ "step": 2
15
+ },
16
+ {
17
+ "epoch": 0.02,
18
+ "learning_rate": 8e-05,
19
+ "loss": 1.9287,
20
+ "step": 4
21
+ },
22
+ {
23
+ "epoch": 0.04,
24
+ "learning_rate": 9.99989723479183e-05,
25
+ "loss": 1.7736,
26
+ "step": 6
27
+ },
28
+ {
29
+ "epoch": 0.05,
30
+ "learning_rate": 9.999075138471951e-05,
31
+ "loss": 1.9095,
32
+ "step": 8
33
+ },
34
+ {
35
+ "epoch": 0.06,
36
+ "learning_rate": 9.99743108100344e-05,
37
+ "loss": 1.918,
38
+ "step": 10
39
+ },
40
+ {
41
+ "epoch": 0.07,
42
+ "learning_rate": 9.994965332706573e-05,
43
+ "loss": 1.8706,
44
+ "step": 12
45
+ },
46
+ {
47
+ "epoch": 0.08,
48
+ "learning_rate": 9.991678299006205e-05,
49
+ "loss": 1.9351,
50
+ "step": 14
51
+ },
52
+ {
53
+ "epoch": 0.1,
54
+ "learning_rate": 9.987570520365104e-05,
55
+ "loss": 1.8793,
56
+ "step": 16
57
+ },
58
+ {
59
+ "epoch": 0.11,
60
+ "learning_rate": 9.982642672195092e-05,
61
+ "loss": 1.8827,
62
+ "step": 18
63
+ },
64
+ {
65
+ "epoch": 0.12,
66
+ "learning_rate": 9.976895564745991e-05,
67
+ "loss": 1.8303,
68
+ "step": 20
69
+ },
70
+ {
71
+ "epoch": 0.13,
72
+ "learning_rate": 9.970330142972401e-05,
73
+ "loss": 1.8379,
74
+ "step": 22
75
+ },
76
+ {
77
+ "epoch": 0.15,
78
+ "learning_rate": 9.962947486378326e-05,
79
+ "loss": 1.8987,
80
+ "step": 24
81
+ },
82
+ {
83
+ "epoch": 0.16,
84
+ "learning_rate": 9.954748808839674e-05,
85
+ "loss": 1.9518,
86
+ "step": 26
87
+ },
88
+ {
89
+ "epoch": 0.17,
90
+ "learning_rate": 9.945735458404681e-05,
91
+ "loss": 1.8511,
92
+ "step": 28
93
+ },
94
+ {
95
+ "epoch": 0.18,
96
+ "learning_rate": 9.935908917072252e-05,
97
+ "loss": 1.9064,
98
+ "step": 30
99
+ },
100
+ {
101
+ "epoch": 0.19,
102
+ "learning_rate": 9.925270800548285e-05,
103
+ "loss": 1.8882,
104
+ "step": 32
105
+ },
106
+ {
107
+ "epoch": 0.21,
108
+ "learning_rate": 9.91382285798002e-05,
109
+ "loss": 1.9368,
110
+ "step": 34
111
+ },
112
+ {
113
+ "epoch": 0.22,
114
+ "learning_rate": 9.901566971668437e-05,
115
+ "loss": 1.8747,
116
+ "step": 36
117
+ },
118
+ {
119
+ "epoch": 0.23,
120
+ "learning_rate": 9.888505156758759e-05,
121
+ "loss": 1.9426,
122
+ "step": 38
123
+ },
124
+ {
125
+ "epoch": 0.24,
126
+ "learning_rate": 9.874639560909117e-05,
127
+ "loss": 1.9316,
128
+ "step": 40
129
+ },
130
+ {
131
+ "epoch": 0.25,
132
+ "learning_rate": 9.859972463937441e-05,
133
+ "loss": 1.867,
134
+ "step": 42
135
+ },
136
+ {
137
+ "epoch": 0.27,
138
+ "learning_rate": 9.844506277446577e-05,
139
+ "loss": 1.8927,
140
+ "step": 44
141
+ },
142
+ {
143
+ "epoch": 0.28,
144
+ "learning_rate": 9.828243544427796e-05,
145
+ "loss": 1.9449,
146
+ "step": 46
147
+ },
148
+ {
149
+ "epoch": 0.29,
150
+ "learning_rate": 9.811186938842645e-05,
151
+ "loss": 1.8571,
152
+ "step": 48
153
+ },
154
+ {
155
+ "epoch": 0.3,
156
+ "learning_rate": 9.793339265183303e-05,
157
+ "loss": 1.8346,
158
+ "step": 50
159
+ },
160
+ {
161
+ "epoch": 0.31,
162
+ "learning_rate": 9.774703458011453e-05,
163
+ "loss": 1.9036,
164
+ "step": 52
165
+ },
166
+ {
167
+ "epoch": 0.33,
168
+ "learning_rate": 9.755282581475769e-05,
169
+ "loss": 1.8838,
170
+ "step": 54
171
+ },
172
+ {
173
+ "epoch": 0.34,
174
+ "learning_rate": 9.735079828808107e-05,
175
+ "loss": 1.959,
176
+ "step": 56
177
+ },
178
+ {
179
+ "epoch": 0.35,
180
+ "learning_rate": 9.714098521798465e-05,
181
+ "loss": 1.9376,
182
+ "step": 58
183
+ },
184
+ {
185
+ "epoch": 0.36,
186
+ "learning_rate": 9.692342110248802e-05,
187
+ "loss": 1.9326,
188
+ "step": 60
189
+ },
190
+ {
191
+ "epoch": 0.37,
192
+ "learning_rate": 9.669814171405816e-05,
193
+ "loss": 1.9344,
194
+ "step": 62
195
+ },
196
+ {
197
+ "epoch": 0.39,
198
+ "learning_rate": 9.64651840937276e-05,
199
+ "loss": 1.9038,
200
+ "step": 64
201
+ },
202
+ {
203
+ "epoch": 0.4,
204
+ "learning_rate": 9.622458654500409e-05,
205
+ "loss": 1.9294,
206
+ "step": 66
207
+ },
208
+ {
209
+ "epoch": 0.41,
210
+ "learning_rate": 9.597638862757255e-05,
211
+ "loss": 1.847,
212
+ "step": 68
213
+ },
214
+ {
215
+ "epoch": 0.42,
216
+ "learning_rate": 9.572063115079063e-05,
217
+ "loss": 1.9167,
218
+ "step": 70
219
+ },
220
+ {
221
+ "epoch": 0.44,
222
+ "learning_rate": 9.545735616697875e-05,
223
+ "loss": 1.8309,
224
+ "step": 72
225
+ },
226
+ {
227
+ "epoch": 0.45,
228
+ "learning_rate": 9.518660696450568e-05,
229
+ "loss": 1.8982,
230
+ "step": 74
231
+ },
232
+ {
233
+ "epoch": 0.46,
234
+ "learning_rate": 9.490842806067095e-05,
235
+ "loss": 1.8734,
236
+ "step": 76
237
+ },
238
+ {
239
+ "epoch": 0.47,
240
+ "learning_rate": 9.46228651943853e-05,
241
+ "loss": 1.8738,
242
+ "step": 78
243
+ },
244
+ {
245
+ "epoch": 0.48,
246
+ "learning_rate": 9.432996531865002e-05,
247
+ "loss": 1.9731,
248
+ "step": 80
249
+ },
250
+ {
251
+ "epoch": 0.5,
252
+ "learning_rate": 9.40297765928369e-05,
253
+ "loss": 1.9105,
254
+ "step": 82
255
+ },
256
+ {
257
+ "epoch": 0.51,
258
+ "learning_rate": 9.372234837476978e-05,
259
+ "loss": 1.8629,
260
+ "step": 84
261
+ },
262
+ {
263
+ "epoch": 0.52,
264
+ "learning_rate": 9.340773121260893e-05,
265
+ "loss": 1.8565,
266
+ "step": 86
267
+ },
268
+ {
269
+ "epoch": 0.53,
270
+ "learning_rate": 9.308597683653975e-05,
271
+ "loss": 1.9962,
272
+ "step": 88
273
+ },
274
+ {
275
+ "epoch": 0.54,
276
+ "learning_rate": 9.275713815026731e-05,
277
+ "loss": 1.8981,
278
+ "step": 90
279
+ },
280
+ {
281
+ "epoch": 0.56,
282
+ "learning_rate": 9.242126922231763e-05,
283
+ "loss": 1.9132,
284
+ "step": 92
285
+ },
286
+ {
287
+ "epoch": 0.57,
288
+ "learning_rate": 9.207842527714767e-05,
289
+ "loss": 1.9159,
290
+ "step": 94
291
+ },
292
+ {
293
+ "epoch": 0.58,
294
+ "learning_rate": 9.172866268606513e-05,
295
+ "loss": 1.9617,
296
+ "step": 96
297
+ },
298
+ {
299
+ "epoch": 0.59,
300
+ "learning_rate": 9.137203895795983e-05,
301
+ "loss": 1.8794,
302
+ "step": 98
303
+ },
304
+ {
305
+ "epoch": 0.6,
306
+ "learning_rate": 9.10086127298478e-05,
307
+ "loss": 2.0122,
308
+ "step": 100
309
+ },
310
+ {
311
+ "epoch": 0.62,
312
+ "learning_rate": 9.063844375723014e-05,
313
+ "loss": 1.8813,
314
+ "step": 102
315
+ },
316
+ {
317
+ "epoch": 0.63,
318
+ "learning_rate": 9.02615929042678e-05,
319
+ "loss": 1.9717,
320
+ "step": 104
321
+ },
322
+ {
323
+ "epoch": 0.64,
324
+ "learning_rate": 8.987812213377424e-05,
325
+ "loss": 1.9022,
326
+ "step": 106
327
+ },
328
+ {
329
+ "epoch": 0.65,
330
+ "learning_rate": 8.948809449702711e-05,
331
+ "loss": 1.9205,
332
+ "step": 108
333
+ },
334
+ {
335
+ "epoch": 0.66,
336
+ "learning_rate": 8.90915741234015e-05,
337
+ "loss": 1.8173,
338
+ "step": 110
339
+ },
340
+ {
341
+ "epoch": 0.68,
342
+ "learning_rate": 8.868862620982534e-05,
343
+ "loss": 2.0031,
344
+ "step": 112
345
+ },
346
+ {
347
+ "epoch": 0.69,
348
+ "learning_rate": 8.827931701005974e-05,
349
+ "loss": 1.9448,
350
+ "step": 114
351
+ },
352
+ {
353
+ "epoch": 0.7,
354
+ "learning_rate": 8.786371382380528e-05,
355
+ "loss": 1.9093,
356
+ "step": 116
357
+ },
358
+ {
359
+ "epoch": 0.71,
360
+ "learning_rate": 8.744188498563641e-05,
361
+ "loss": 1.9093,
362
+ "step": 118
363
+ },
364
+ {
365
+ "epoch": 0.73,
366
+ "learning_rate": 8.701389985376578e-05,
367
+ "loss": 1.8793,
368
+ "step": 120
369
+ },
370
+ {
371
+ "epoch": 0.74,
372
+ "learning_rate": 8.657982879864007e-05,
373
+ "loss": 1.9477,
374
+ "step": 122
375
+ },
376
+ {
377
+ "epoch": 0.75,
378
+ "learning_rate": 8.613974319136958e-05,
379
+ "loss": 1.8808,
380
+ "step": 124
381
+ },
382
+ {
383
+ "epoch": 0.76,
384
+ "learning_rate": 8.569371539199316e-05,
385
+ "loss": 1.9425,
386
+ "step": 126
387
+ },
388
+ {
389
+ "epoch": 0.77,
390
+ "learning_rate": 8.524181873758059e-05,
391
+ "loss": 1.8945,
392
+ "step": 128
393
+ },
394
+ {
395
+ "epoch": 0.79,
396
+ "learning_rate": 8.478412753017433e-05,
397
+ "loss": 1.9579,
398
+ "step": 130
399
+ },
400
+ {
401
+ "epoch": 0.8,
402
+ "learning_rate": 8.432071702457252e-05,
403
+ "loss": 1.9224,
404
+ "step": 132
405
+ },
406
+ {
407
+ "epoch": 0.81,
408
+ "learning_rate": 8.385166341595548e-05,
409
+ "loss": 1.9117,
410
+ "step": 134
411
+ },
412
+ {
413
+ "epoch": 0.82,
414
+ "learning_rate": 8.33770438273574e-05,
415
+ "loss": 1.9455,
416
+ "step": 136
417
+ },
418
+ {
419
+ "epoch": 0.83,
420
+ "learning_rate": 8.289693629698564e-05,
421
+ "loss": 1.8856,
422
+ "step": 138
423
+ },
424
+ {
425
+ "epoch": 0.85,
426
+ "learning_rate": 8.241141976538943e-05,
427
+ "loss": 1.915,
428
+ "step": 140
429
+ },
430
+ {
431
+ "epoch": 0.86,
432
+ "learning_rate": 8.192057406248028e-05,
433
+ "loss": 1.9302,
434
+ "step": 142
435
+ },
436
+ {
437
+ "epoch": 0.87,
438
+ "learning_rate": 8.142447989440618e-05,
439
+ "loss": 1.8647,
440
+ "step": 144
441
+ },
442
+ {
443
+ "epoch": 0.88,
444
+ "learning_rate": 8.092321883028158e-05,
445
+ "loss": 1.8653,
446
+ "step": 146
447
+ },
448
+ {
449
+ "epoch": 0.89,
450
+ "learning_rate": 8.041687328877567e-05,
451
+ "loss": 1.9183,
452
+ "step": 148
453
+ },
454
+ {
455
+ "epoch": 0.91,
456
+ "learning_rate": 7.990552652456081e-05,
457
+ "loss": 1.852,
458
+ "step": 150
459
+ },
460
+ {
461
+ "epoch": 0.92,
462
+ "learning_rate": 7.938926261462366e-05,
463
+ "loss": 1.9445,
464
+ "step": 152
465
+ },
466
+ {
467
+ "epoch": 0.93,
468
+ "learning_rate": 7.886816644444098e-05,
469
+ "loss": 1.8727,
470
+ "step": 154
471
+ },
472
+ {
473
+ "epoch": 0.94,
474
+ "learning_rate": 7.83423236940225e-05,
475
+ "loss": 1.9182,
476
+ "step": 156
477
+ },
478
+ {
479
+ "epoch": 0.95,
480
+ "learning_rate": 7.781182082382325e-05,
481
+ "loss": 1.9708,
482
+ "step": 158
483
+ },
484
+ {
485
+ "epoch": 0.97,
486
+ "learning_rate": 7.727674506052743e-05,
487
+ "loss": 1.8826,
488
+ "step": 160
489
+ },
490
+ {
491
+ "epoch": 0.98,
492
+ "learning_rate": 7.673718438270648e-05,
493
+ "loss": 1.9755,
494
+ "step": 162
495
+ },
496
+ {
497
+ "epoch": 0.99,
498
+ "learning_rate": 7.619322750635327e-05,
499
+ "loss": 1.947,
500
+ "step": 164
501
+ },
502
+ {
503
+ "epoch": 1.01,
504
+ "learning_rate": 7.564496387029532e-05,
505
+ "loss": 2.3347,
506
+ "step": 166
507
+ },
508
+ {
509
+ "epoch": 1.02,
510
+ "learning_rate": 7.509248362148889e-05,
511
+ "loss": 1.9177,
512
+ "step": 168
513
+ },
514
+ {
515
+ "epoch": 1.03,
516
+ "learning_rate": 7.45358776001969e-05,
517
+ "loss": 1.8889,
518
+ "step": 170
519
+ },
520
+ {
521
+ "epoch": 1.04,
522
+ "learning_rate": 7.39752373250527e-05,
523
+ "loss": 1.8581,
524
+ "step": 172
525
+ },
526
+ {
527
+ "epoch": 1.05,
528
+ "learning_rate": 7.34106549780123e-05,
529
+ "loss": 1.8564,
530
+ "step": 174
531
+ },
532
+ {
533
+ "epoch": 1.07,
534
+ "learning_rate": 7.284222338919758e-05,
535
+ "loss": 1.8354,
536
+ "step": 176
537
+ },
538
+ {
539
+ "epoch": 1.08,
540
+ "learning_rate": 7.227003602163295e-05,
541
+ "loss": 1.89,
542
+ "step": 178
543
+ },
544
+ {
545
+ "epoch": 1.09,
546
+ "learning_rate": 7.169418695587791e-05,
547
+ "loss": 1.9877,
548
+ "step": 180
549
+ },
550
+ {
551
+ "epoch": 1.1,
552
+ "learning_rate": 7.1114770874558e-05,
553
+ "loss": 1.7948,
554
+ "step": 182
555
+ },
556
+ {
557
+ "epoch": 1.11,
558
+ "learning_rate": 7.05318830467969e-05,
559
+ "loss": 1.9217,
560
+ "step": 184
561
+ },
562
+ {
563
+ "epoch": 1.13,
564
+ "learning_rate": 6.99456193125521e-05,
565
+ "loss": 1.7795,
566
+ "step": 186
567
+ },
568
+ {
569
+ "epoch": 1.14,
570
+ "learning_rate": 6.935607606685642e-05,
571
+ "loss": 1.9189,
572
+ "step": 188
573
+ },
574
+ {
575
+ "epoch": 1.15,
576
+ "learning_rate": 6.876335024396872e-05,
577
+ "loss": 1.8445,
578
+ "step": 190
579
+ },
580
+ {
581
+ "epoch": 1.16,
582
+ "learning_rate": 6.816753930143558e-05,
583
+ "loss": 1.852,
584
+ "step": 192
585
+ },
586
+ {
587
+ "epoch": 1.18,
588
+ "learning_rate": 6.756874120406714e-05,
589
+ "loss": 1.8622,
590
+ "step": 194
591
+ },
592
+ {
593
+ "epoch": 1.19,
594
+ "learning_rate": 6.696705440782938e-05,
595
+ "loss": 1.9092,
596
+ "step": 196
597
+ },
598
+ {
599
+ "epoch": 1.2,
600
+ "learning_rate": 6.636257784365584e-05,
601
+ "loss": 1.8074,
602
+ "step": 198
603
+ },
604
+ {
605
+ "epoch": 1.21,
606
+ "learning_rate": 6.575541090118105e-05,
607
+ "loss": 1.8295,
608
+ "step": 200
609
+ },
610
+ {
611
+ "epoch": 1.22,
612
+ "learning_rate": 6.514565341239861e-05,
613
+ "loss": 1.9595,
614
+ "step": 202
615
+ },
616
+ {
617
+ "epoch": 1.24,
618
+ "learning_rate": 6.453340563524669e-05,
619
+ "loss": 1.8733,
620
+ "step": 204
621
+ },
622
+ {
623
+ "epoch": 1.25,
624
+ "learning_rate": 6.391876823712317e-05,
625
+ "loss": 1.8277,
626
+ "step": 206
627
+ },
628
+ {
629
+ "epoch": 1.26,
630
+ "learning_rate": 6.330184227833376e-05,
631
+ "loss": 1.914,
632
+ "step": 208
633
+ },
634
+ {
635
+ "epoch": 1.27,
636
+ "learning_rate": 6.268272919547537e-05,
637
+ "loss": 1.8684,
638
+ "step": 210
639
+ },
640
+ {
641
+ "epoch": 1.28,
642
+ "learning_rate": 6.206153078475763e-05,
643
+ "loss": 1.9092,
644
+ "step": 212
645
+ },
646
+ {
647
+ "epoch": 1.3,
648
+ "learning_rate": 6.143834918526527e-05,
649
+ "loss": 1.7996,
650
+ "step": 214
651
+ },
652
+ {
653
+ "epoch": 1.31,
654
+ "learning_rate": 6.081328686216418e-05,
655
+ "loss": 1.8958,
656
+ "step": 216
657
+ },
658
+ {
659
+ "epoch": 1.32,
660
+ "learning_rate": 6.0186446589853784e-05,
661
+ "loss": 1.9597,
662
+ "step": 218
663
+ },
664
+ {
665
+ "epoch": 1.33,
666
+ "learning_rate": 5.955793143506863e-05,
667
+ "loss": 1.9054,
668
+ "step": 220
669
+ },
670
+ {
671
+ "epoch": 1.34,
672
+ "learning_rate": 5.8927844739931834e-05,
673
+ "loss": 1.8064,
674
+ "step": 222
675
+ },
676
+ {
677
+ "epoch": 1.36,
678
+ "learning_rate": 5.82962901049634e-05,
679
+ "loss": 1.9105,
680
+ "step": 224
681
+ },
682
+ {
683
+ "epoch": 1.37,
684
+ "learning_rate": 5.766337137204579e-05,
685
+ "loss": 1.8535,
686
+ "step": 226
687
+ },
688
+ {
689
+ "epoch": 1.38,
690
+ "learning_rate": 5.7029192607350146e-05,
691
+ "loss": 1.8961,
692
+ "step": 228
693
+ },
694
+ {
695
+ "epoch": 1.39,
696
+ "learning_rate": 5.6393858084225305e-05,
697
+ "loss": 1.8679,
698
+ "step": 230
699
+ },
700
+ {
701
+ "epoch": 1.4,
702
+ "learning_rate": 5.575747226605298e-05,
703
+ "loss": 1.8808,
704
+ "step": 232
705
+ },
706
+ {
707
+ "epoch": 1.42,
708
+ "learning_rate": 5.512013978907157e-05,
709
+ "loss": 1.8165,
710
+ "step": 234
711
+ },
712
+ {
713
+ "epoch": 1.43,
714
+ "learning_rate": 5.448196544517168e-05,
715
+ "loss": 1.849,
716
+ "step": 236
717
+ },
718
+ {
719
+ "epoch": 1.44,
720
+ "learning_rate": 5.384305416466584e-05,
721
+ "loss": 1.8585,
722
+ "step": 238
723
+ },
724
+ {
725
+ "epoch": 1.45,
726
+ "learning_rate": 5.320351099903565e-05,
727
+ "loss": 1.8477,
728
+ "step": 240
729
+ },
730
+ {
731
+ "epoch": 1.47,
732
+ "learning_rate": 5.256344110365896e-05,
733
+ "loss": 1.84,
734
+ "step": 242
735
+ },
736
+ {
737
+ "epoch": 1.48,
738
+ "learning_rate": 5.192294972051992e-05,
739
+ "loss": 1.847,
740
+ "step": 244
741
+ },
742
+ {
743
+ "epoch": 1.49,
744
+ "learning_rate": 5.128214216090478e-05,
745
+ "loss": 1.9149,
746
+ "step": 246
747
+ },
748
+ {
749
+ "epoch": 1.5,
750
+ "learning_rate": 5.064112378808637e-05,
751
+ "loss": 1.8937,
752
+ "step": 248
753
+ },
754
+ {
755
+ "epoch": 1.51,
756
+ "learning_rate": 5e-05,
757
+ "loss": 1.8805,
758
+ "step": 250
759
+ },
760
+ {
761
+ "epoch": 1.53,
762
+ "learning_rate": 4.935887621191364e-05,
763
+ "loss": 1.862,
764
+ "step": 252
765
+ },
766
+ {
767
+ "epoch": 1.54,
768
+ "learning_rate": 4.871785783909523e-05,
769
+ "loss": 1.8958,
770
+ "step": 254
771
+ },
772
+ {
773
+ "epoch": 1.55,
774
+ "learning_rate": 4.807705027948008e-05,
775
+ "loss": 1.9492,
776
+ "step": 256
777
+ },
778
+ {
779
+ "epoch": 1.56,
780
+ "learning_rate": 4.743655889634105e-05,
781
+ "loss": 1.918,
782
+ "step": 258
783
+ },
784
+ {
785
+ "epoch": 1.57,
786
+ "learning_rate": 4.679648900096436e-05,
787
+ "loss": 1.9169,
788
+ "step": 260
789
+ },
790
+ {
791
+ "epoch": 1.59,
792
+ "learning_rate": 4.6156945835334184e-05,
793
+ "loss": 1.8927,
794
+ "step": 262
795
+ },
796
+ {
797
+ "epoch": 1.6,
798
+ "learning_rate": 4.551803455482833e-05,
799
+ "loss": 1.9368,
800
+ "step": 264
801
+ },
802
+ {
803
+ "epoch": 1.61,
804
+ "learning_rate": 4.487986021092844e-05,
805
+ "loss": 1.9573,
806
+ "step": 266
807
+ },
808
+ {
809
+ "epoch": 1.62,
810
+ "learning_rate": 4.424252773394704e-05,
811
+ "loss": 1.9401,
812
+ "step": 268
813
+ },
814
+ {
815
+ "epoch": 1.63,
816
+ "learning_rate": 4.3606141915774693e-05,
817
+ "loss": 1.8759,
818
+ "step": 270
819
+ },
820
+ {
821
+ "epoch": 1.65,
822
+ "learning_rate": 4.297080739264987e-05,
823
+ "loss": 1.9353,
824
+ "step": 272
825
+ },
826
+ {
827
+ "epoch": 1.66,
828
+ "learning_rate": 4.23366286279542e-05,
829
+ "loss": 1.8375,
830
+ "step": 274
831
+ },
832
+ {
833
+ "epoch": 1.67,
834
+ "learning_rate": 4.1703709895036625e-05,
835
+ "loss": 1.9202,
836
+ "step": 276
837
+ },
838
+ {
839
+ "epoch": 1.68,
840
+ "learning_rate": 4.107215526006817e-05,
841
+ "loss": 1.9248,
842
+ "step": 278
843
+ },
844
+ {
845
+ "epoch": 1.69,
846
+ "learning_rate": 4.04420685649314e-05,
847
+ "loss": 1.7924,
848
+ "step": 280
849
+ },
850
+ {
851
+ "epoch": 1.71,
852
+ "learning_rate": 3.981355341014623e-05,
853
+ "loss": 1.7682,
854
+ "step": 282
855
+ },
856
+ {
857
+ "epoch": 1.72,
858
+ "learning_rate": 3.9186713137835826e-05,
859
+ "loss": 1.8612,
860
+ "step": 284
861
+ },
862
+ {
863
+ "epoch": 1.73,
864
+ "learning_rate": 3.856165081473474e-05,
865
+ "loss": 1.8759,
866
+ "step": 286
867
+ },
868
+ {
869
+ "epoch": 1.74,
870
+ "learning_rate": 3.793846921524237e-05,
871
+ "loss": 1.875,
872
+ "step": 288
873
+ },
874
+ {
875
+ "epoch": 1.76,
876
+ "learning_rate": 3.731727080452464e-05,
877
+ "loss": 1.8429,
878
+ "step": 290
879
+ },
880
+ {
881
+ "epoch": 1.77,
882
+ "learning_rate": 3.6698157721666246e-05,
883
+ "loss": 1.907,
884
+ "step": 292
885
+ },
886
+ {
887
+ "epoch": 1.78,
888
+ "learning_rate": 3.608123176287685e-05,
889
+ "loss": 1.8561,
890
+ "step": 294
891
+ },
892
+ {
893
+ "epoch": 1.79,
894
+ "learning_rate": 3.5466594364753326e-05,
895
+ "loss": 1.9138,
896
+ "step": 296
897
+ },
898
+ {
899
+ "epoch": 1.8,
900
+ "learning_rate": 3.48543465876014e-05,
901
+ "loss": 1.893,
902
+ "step": 298
903
+ },
904
+ {
905
+ "epoch": 1.82,
906
+ "learning_rate": 3.424458909881897e-05,
907
+ "loss": 1.8505,
908
+ "step": 300
909
+ }
910
+ ],
911
+ "max_steps": 495,
912
+ "num_train_epochs": 3,
913
+ "total_flos": 1.4182542633473147e+18,
914
+ "trial_name": null,
915
+ "trial_params": null
916
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3ab32065ec72149d7b702b1e4b4f95deadc83116204c2ae6fa43dfadf03d234
3
+ size 3695