lyc123456 commited on
Commit
9e7b707
1 Parent(s): c04d1ff

Upload 21 files

Browse files
README.md CHANGED
@@ -1,3 +1,56 @@
1
  ---
2
- license: unknown
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: other
3
+ base_model: Qwen/Qwen2-0.5B
4
+ tags:
5
+ - llama-factory
6
+ - full
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: train_2024-06-08-23-23-14
10
+ results: []
11
  ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # train_2024-06-08-23-23-14
17
+
18
+ This model is a fine-tuned version of [Qwen/Qwen2-0.5B](https://huggingface.co/Qwen/Qwen2-0.5B) on the longzu dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 5e-05
38
+ - train_batch_size: 1
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - gradient_accumulation_steps: 8
42
+ - total_train_batch_size: 8
43
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
+ - lr_scheduler_type: cosine
45
+ - num_epochs: 35.0
46
+
47
+ ### Training results
48
+
49
+
50
+
51
+ ### Framework versions
52
+
53
+ - Transformers 4.41.2
54
+ - Pytorch 2.3.0+cu121
55
+ - Datasets 2.19.2
56
+ - Tokenizers 0.19.1
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 34.89855072463768,
3
+ "num_input_tokens_seen": 37412688,
4
+ "total_flos": 8.033958240027034e+16,
5
+ "train_loss": 0.3889684765070578,
6
+ "train_runtime": 37510.9602,
7
+ "train_samples_per_second": 0.322,
8
+ "train_steps_per_second": 0.04
9
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen2-0.5B-finetuning-by-Dragonraja",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 896,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4864,
13
+ "max_position_embeddings": 131072,
14
+ "max_window_layers": 24,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 14,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 131072,
22
+ "tie_word_embeddings": true,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.41.2",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.41.2"
6
+ }
llamaboard_config.yaml ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ top.booster: none
2
+ top.checkpoint_path: null
3
+ top.finetuning_type: full
4
+ top.model_name: Qwen2-0.5B
5
+ top.quantization_bit: none
6
+ top.rope_scaling: none
7
+ top.template: default
8
+ top.visual_inputs: false
9
+ train.additional_target: ''
10
+ train.badam_mode: layer
11
+ train.badam_switch_interval: 50
12
+ train.badam_switch_mode: ascending
13
+ train.badam_update_ratio: 0.05
14
+ train.batch_size: 1
15
+ train.compute_type: fp32
16
+ train.create_new_adapter: false
17
+ train.cutoff_len: 6100
18
+ train.dataset:
19
+ - longzu
20
+ train.dataset_dir: C:\AI\LLaMA-Factory\data
21
+ train.ds_offload: false
22
+ train.ds_stage: none
23
+ train.freeze_extra_modules: ''
24
+ train.freeze_trainable_layers: 2
25
+ train.freeze_trainable_modules: all
26
+ train.galore_rank: 16
27
+ train.galore_scale: 0.25
28
+ train.galore_target: all
29
+ train.galore_update_interval: 200
30
+ train.gradient_accumulation_steps: 8
31
+ train.learning_rate: 5e-5
32
+ train.logging_steps: 5
33
+ train.lora_alpha: 16
34
+ train.lora_dropout: 0
35
+ train.lora_rank: 8
36
+ train.lora_target: ''
37
+ train.loraplus_lr_ratio: 0
38
+ train.lr_scheduler_type: cosine
39
+ train.max_grad_norm: '1.0'
40
+ train.max_samples: '100000'
41
+ train.neftune_alpha: 0
42
+ train.num_train_epochs: '35'
43
+ train.optim: adamw_torch
44
+ train.packing: false
45
+ train.ppo_score_norm: false
46
+ train.ppo_whiten_rewards: false
47
+ train.pref_beta: 0.1
48
+ train.pref_ftx: 0
49
+ train.pref_loss: sigmoid
50
+ train.report_to: false
51
+ train.resize_vocab: false
52
+ train.reward_model: null
53
+ train.save_steps: 100
54
+ train.shift_attn: false
55
+ train.training_stage: Pre-Training
56
+ train.upcast_layernorm: false
57
+ train.use_badam: false
58
+ train.use_dora: false
59
+ train.use_galore: false
60
+ train.use_llama_pro: false
61
+ train.use_rslora: false
62
+ train.val_size: 0
63
+ train.warmup_steps: 0
main.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Calling example, for reference only
3
+ 调用示例,仅供参考
4
+ '''
5
+ import os
6
+
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer
8
+
9
+ messages = [
10
+ {"role": "system", "content": "你是路明非,你会回答任何问题。"},
11
+ ]
12
+
13
+ device = "cuda" # the device to load the model onto
14
+ model_path = os.path.dirname(__file__)
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ model_path,
17
+ torch_dtype="auto",
18
+ device_map="auto"
19
+ )
20
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
21
+ response = ''
22
+ if __name__ == '__main__':
23
+
24
+ while True:
25
+ # prompt = "Give me a short introduction to large language model."
26
+ prompt = input("input:")
27
+ messages.append({"role": "user", "content": prompt})
28
+ text = tokenizer.apply_chat_template(
29
+ messages,
30
+ tokenize=False,
31
+ add_generation_prompt=True
32
+ )
33
+ model_inputs = tokenizer([text], return_tensors="pt").to(device)
34
+
35
+ generated_ids = model.generate(
36
+ model_inputs.input_ids,
37
+ max_new_tokens=768,
38
+ pad_token_id=tokenizer.eos_token_id
39
+ )
40
+ generated_ids = [
41
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
42
+ ]
43
+
44
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
45
+ print(response)
46
+ messages.append({"role": "system", "content": response}, )
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:684a23379925da2510592007f1a4946dc89b0ee00ae970df406ef9f614a3eb6e
3
+ size 1976163472
requirements.txt ADDED
Binary file (7.41 kB). View file
 
running_log.txt ADDED
@@ -0,0 +1,904 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 06/08/2024 23:48:32 - INFO - transformers.tokenization_utils_base - loading file vocab.json
2
+
3
+ 06/08/2024 23:48:32 - INFO - transformers.tokenization_utils_base - loading file merges.txt
4
+
5
+ 06/08/2024 23:48:32 - INFO - transformers.tokenization_utils_base - loading file tokenizer.json
6
+
7
+ 06/08/2024 23:48:32 - INFO - transformers.tokenization_utils_base - loading file added_tokens.json
8
+
9
+ 06/08/2024 23:48:32 - INFO - transformers.tokenization_utils_base - loading file special_tokens_map.json
10
+
11
+ 06/08/2024 23:48:32 - INFO - transformers.tokenization_utils_base - loading file tokenizer_config.json
12
+
13
+ 06/08/2024 23:48:33 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
14
+
15
+ 06/08/2024 23:48:33 - INFO - llamafactory.data.loader - Loading dataset longzu.json...
16
+
17
+ 06/08/2024 23:48:34 - INFO - transformers.configuration_utils - loading configuration file C:\AI\Qwen2_0.5B\config.json
18
+
19
+ 06/08/2024 23:48:34 - INFO - transformers.configuration_utils - Model config Qwen2Config {
20
+ "_name_or_path": "C:\\AI\\Qwen2_0.5B",
21
+ "architectures": [
22
+ "Qwen2ForCausalLM"
23
+ ],
24
+ "attention_dropout": 0.0,
25
+ "bos_token_id": 151643,
26
+ "eos_token_id": 151643,
27
+ "hidden_act": "silu",
28
+ "hidden_size": 896,
29
+ "initializer_range": 0.02,
30
+ "intermediate_size": 4864,
31
+ "max_position_embeddings": 131072,
32
+ "max_window_layers": 24,
33
+ "model_type": "qwen2",
34
+ "num_attention_heads": 14,
35
+ "num_hidden_layers": 24,
36
+ "num_key_value_heads": 2,
37
+ "rms_norm_eps": 1e-06,
38
+ "rope_theta": 1000000.0,
39
+ "sliding_window": 131072,
40
+ "tie_word_embeddings": true,
41
+ "torch_dtype": "bfloat16",
42
+ "transformers_version": "4.41.2",
43
+ "use_cache": true,
44
+ "use_sliding_window": false,
45
+ "vocab_size": 151936
46
+ }
47
+
48
+
49
+ 06/08/2024 23:48:34 - INFO - transformers.modeling_utils - loading weights file C:\AI\Qwen2_0.5B\model.safetensors
50
+
51
+ 06/08/2024 23:48:34 - INFO - transformers.modeling_utils - Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
52
+
53
+ 06/08/2024 23:48:34 - INFO - transformers.generation.configuration_utils - Generate config GenerationConfig {
54
+ "bos_token_id": 151643,
55
+ "eos_token_id": 151643
56
+ }
57
+
58
+
59
+ 06/08/2024 23:48:36 - INFO - transformers.modeling_utils - All model checkpoint weights were used when initializing Qwen2ForCausalLM.
60
+
61
+
62
+ 06/08/2024 23:48:36 - INFO - transformers.modeling_utils - All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at C:\AI\Qwen2_0.5B.
63
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
64
+
65
+ 06/08/2024 23:48:36 - INFO - transformers.generation.configuration_utils - loading configuration file C:\AI\Qwen2_0.5B\generation_config.json
66
+
67
+ 06/08/2024 23:48:36 - INFO - transformers.generation.configuration_utils - Generate config GenerationConfig {
68
+ "bos_token_id": 151643,
69
+ "eos_token_id": 151643,
70
+ "max_new_tokens": 2048
71
+ }
72
+
73
+
74
+ 06/08/2024 23:48:37 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
75
+
76
+ 06/08/2024 23:48:37 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
77
+
78
+ 06/08/2024 23:48:37 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
79
+
80
+ 06/08/2024 23:48:37 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
81
+
82
+ 06/08/2024 23:48:37 - INFO - llamafactory.model.loader - trainable params: 494032768 || all params: 494032768 || trainable%: 100.0000
83
+
84
+ 06/08/2024 23:48:37 - INFO - transformers.trainer - ***** Running training *****
85
+
86
+ 06/08/2024 23:48:37 - INFO - transformers.trainer - Num examples = 345
87
+
88
+ 06/08/2024 23:48:37 - INFO - transformers.trainer - Num Epochs = 35
89
+
90
+ 06/08/2024 23:48:37 - INFO - transformers.trainer - Instantaneous batch size per device = 1
91
+
92
+ 06/08/2024 23:48:37 - INFO - transformers.trainer - Total train batch size (w. parallel, distributed & accumulation) = 8
93
+
94
+ 06/08/2024 23:48:37 - INFO - transformers.trainer - Gradient Accumulation steps = 8
95
+
96
+ 06/08/2024 23:48:37 - INFO - transformers.trainer - Total optimization steps = 1,505
97
+
98
+ 06/08/2024 23:48:37 - INFO - transformers.trainer - Number of trainable parameters = 494,032,768
99
+
100
+ 06/08/2024 23:50:19 - INFO - llamafactory.extras.callbacks - {'loss': 3.8116, 'learning_rate': 4.9999e-05, 'epoch': 0.12, 'throughput': 1052.64}
101
+
102
+ 06/08/2024 23:52:10 - INFO - llamafactory.extras.callbacks - {'loss': 3.6928, 'learning_rate': 4.9995e-05, 'epoch': 0.23, 'throughput': 1060.20}
103
+
104
+ 06/08/2024 23:54:31 - INFO - llamafactory.extras.callbacks - {'loss': 3.6227, 'learning_rate': 4.9988e-05, 'epoch': 0.35, 'throughput': 1024.22}
105
+
106
+ 06/08/2024 23:56:20 - INFO - llamafactory.extras.callbacks - {'loss': 3.6010, 'learning_rate': 4.9978e-05, 'epoch': 0.46, 'throughput': 1032.88}
107
+
108
+ 06/08/2024 23:58:03 - INFO - llamafactory.extras.callbacks - {'loss': 3.5390, 'learning_rate': 4.9966e-05, 'epoch': 0.58, 'throughput': 1040.84}
109
+
110
+ 06/08/2024 23:59:56 - INFO - llamafactory.extras.callbacks - {'loss': 3.4956, 'learning_rate': 4.9951e-05, 'epoch': 0.70, 'throughput': 1040.12}
111
+
112
+ 06/09/2024 00:02:30 - INFO - llamafactory.extras.callbacks - {'loss': 3.5044, 'learning_rate': 4.9933e-05, 'epoch': 0.81, 'throughput': 1024.54}
113
+
114
+ 06/09/2024 00:04:46 - INFO - llamafactory.extras.callbacks - {'loss': 3.4324, 'learning_rate': 4.9913e-05, 'epoch': 0.93, 'throughput': 1022.18}
115
+
116
+ 06/09/2024 00:07:00 - INFO - llamafactory.extras.callbacks - {'loss': 3.2542, 'learning_rate': 4.9890e-05, 'epoch': 1.04, 'throughput': 1021.10}
117
+
118
+ 06/09/2024 00:08:58 - INFO - llamafactory.extras.callbacks - {'loss': 2.9024, 'learning_rate': 4.9864e-05, 'epoch': 1.16, 'throughput': 1023.49}
119
+
120
+ 06/09/2024 00:11:02 - INFO - llamafactory.extras.callbacks - {'loss': 2.8069, 'learning_rate': 4.9835e-05, 'epoch': 1.28, 'throughput': 1022.63}
121
+
122
+ 06/09/2024 00:12:57 - INFO - llamafactory.extras.callbacks - {'loss': 2.7500, 'learning_rate': 4.9804e-05, 'epoch': 1.39, 'throughput': 1020.77}
123
+
124
+ 06/09/2024 00:14:40 - INFO - llamafactory.extras.callbacks - {'loss': 2.7329, 'learning_rate': 4.9770e-05, 'epoch': 1.51, 'throughput': 1024.15}
125
+
126
+ 06/09/2024 00:17:00 - INFO - llamafactory.extras.callbacks - {'loss': 2.8212, 'learning_rate': 4.9734e-05, 'epoch': 1.62, 'throughput': 1016.55}
127
+
128
+ 06/09/2024 00:19:05 - INFO - llamafactory.extras.callbacks - {'loss': 2.7935, 'learning_rate': 4.9694e-05, 'epoch': 1.74, 'throughput': 1016.39}
129
+
130
+ 06/09/2024 00:21:08 - INFO - llamafactory.extras.callbacks - {'loss': 2.7305, 'learning_rate': 4.9652e-05, 'epoch': 1.86, 'throughput': 1017.05}
131
+
132
+ 06/09/2024 00:23:25 - INFO - llamafactory.extras.callbacks - {'loss': 2.6482, 'learning_rate': 4.9608e-05, 'epoch': 1.97, 'throughput': 1010.18}
133
+
134
+ 06/09/2024 00:25:46 - INFO - llamafactory.extras.callbacks - {'loss': 2.4292, 'learning_rate': 4.9560e-05, 'epoch': 2.09, 'throughput': 1007.97}
135
+
136
+ 06/09/2024 00:28:12 - INFO - llamafactory.extras.callbacks - {'loss': 2.1416, 'learning_rate': 4.9510e-05, 'epoch': 2.20, 'throughput': 1005.24}
137
+
138
+ 06/09/2024 00:30:30 - INFO - llamafactory.extras.callbacks - {'loss': 2.2847, 'learning_rate': 4.9457e-05, 'epoch': 2.32, 'throughput': 1003.68}
139
+
140
+ 06/09/2024 00:30:30 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-100
141
+
142
+ 06/09/2024 00:30:30 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-100\config.json
143
+
144
+ 06/09/2024 00:30:30 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-100\generation_config.json
145
+
146
+ 06/09/2024 00:30:42 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-100\model.safetensors
147
+
148
+ 06/09/2024 00:30:42 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-100\tokenizer_config.json
149
+
150
+ 06/09/2024 00:30:42 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-100\special_tokens_map.json
151
+
152
+ 06/09/2024 00:33:05 - INFO - llamafactory.extras.callbacks - {'loss': 2.0944, 'learning_rate': 4.9402e-05, 'epoch': 2.43, 'throughput': 990.48}
153
+
154
+ 06/09/2024 00:35:29 - INFO - llamafactory.extras.callbacks - {'loss': 2.2862, 'learning_rate': 4.9344e-05, 'epoch': 2.55, 'throughput': 988.89}
155
+
156
+ 06/09/2024 00:37:04 - INFO - llamafactory.extras.callbacks - {'loss': 2.1050, 'learning_rate': 4.9283e-05, 'epoch': 2.67, 'throughput': 992.54}
157
+
158
+ 06/09/2024 00:38:56 - INFO - llamafactory.extras.callbacks - {'loss': 2.0287, 'learning_rate': 4.9220e-05, 'epoch': 2.78, 'throughput': 992.56}
159
+
160
+ 06/09/2024 00:40:32 - INFO - llamafactory.extras.callbacks - {'loss': 2.0162, 'learning_rate': 4.9154e-05, 'epoch': 2.90, 'throughput': 995.97}
161
+
162
+ 06/09/2024 00:42:39 - INFO - llamafactory.extras.callbacks - {'loss': 2.2464, 'learning_rate': 4.9085e-05, 'epoch': 3.01, 'throughput': 997.54}
163
+
164
+ 06/09/2024 00:44:38 - INFO - llamafactory.extras.callbacks - {'loss': 1.6561, 'learning_rate': 4.9014e-05, 'epoch': 3.13, 'throughput': 999.52}
165
+
166
+ 06/09/2024 00:46:47 - INFO - llamafactory.extras.callbacks - {'loss': 1.6113, 'learning_rate': 4.8940e-05, 'epoch': 3.25, 'throughput': 1000.08}
167
+
168
+ 06/09/2024 00:48:53 - INFO - llamafactory.extras.callbacks - {'loss': 1.6235, 'learning_rate': 4.8864e-05, 'epoch': 3.36, 'throughput': 998.34}
169
+
170
+ 06/09/2024 00:51:17 - INFO - llamafactory.extras.callbacks - {'loss': 1.7873, 'learning_rate': 4.8784e-05, 'epoch': 3.48, 'throughput': 997.72}
171
+
172
+ 06/09/2024 00:53:20 - INFO - llamafactory.extras.callbacks - {'loss': 1.3723, 'learning_rate': 4.8703e-05, 'epoch': 3.59, 'throughput': 995.58}
173
+
174
+ 06/09/2024 00:55:20 - INFO - llamafactory.extras.callbacks - {'loss': 1.6512, 'learning_rate': 4.8619e-05, 'epoch': 3.71, 'throughput': 997.10}
175
+
176
+ 06/09/2024 00:57:20 - INFO - llamafactory.extras.callbacks - {'loss': 1.5524, 'learning_rate': 4.8532e-05, 'epoch': 3.83, 'throughput': 997.64}
177
+
178
+ 06/09/2024 00:59:17 - INFO - llamafactory.extras.callbacks - {'loss': 1.5954, 'learning_rate': 4.8442e-05, 'epoch': 3.94, 'throughput': 999.10}
179
+
180
+ 06/09/2024 01:00:53 - INFO - llamafactory.extras.callbacks - {'loss': 1.1652, 'learning_rate': 4.8350e-05, 'epoch': 4.06, 'throughput': 1001.11}
181
+
182
+ 06/09/2024 01:03:08 - INFO - llamafactory.extras.callbacks - {'loss': 1.1995, 'learning_rate': 4.8256e-05, 'epoch': 4.17, 'throughput': 1000.51}
183
+
184
+ 06/09/2024 01:05:06 - INFO - llamafactory.extras.callbacks - {'loss': 0.9431, 'learning_rate': 4.8159e-05, 'epoch': 4.29, 'throughput': 999.58}
185
+
186
+ 06/09/2024 01:07:07 - INFO - llamafactory.extras.callbacks - {'loss': 1.1136, 'learning_rate': 4.8059e-05, 'epoch': 4.41, 'throughput': 998.32}
187
+
188
+ 06/09/2024 01:09:12 - INFO - llamafactory.extras.callbacks - {'loss': 1.1509, 'learning_rate': 4.7957e-05, 'epoch': 4.52, 'throughput': 999.32}
189
+
190
+ 06/09/2024 01:10:56 - INFO - llamafactory.extras.callbacks - {'loss': 0.9962, 'learning_rate': 4.7853e-05, 'epoch': 4.64, 'throughput': 1000.59}
191
+
192
+ 06/09/2024 01:10:56 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-200
193
+
194
+ 06/09/2024 01:10:56 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-200\config.json
195
+
196
+ 06/09/2024 01:10:56 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-200\generation_config.json
197
+
198
+ 06/09/2024 01:11:07 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-200\model.safetensors
199
+
200
+ 06/09/2024 01:11:07 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-200\tokenizer_config.json
201
+
202
+ 06/09/2024 01:11:07 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-200\special_tokens_map.json
203
+
204
+ 06/09/2024 01:13:45 - INFO - llamafactory.extras.callbacks - {'loss': 1.2377, 'learning_rate': 4.7746e-05, 'epoch': 4.75, 'throughput': 996.13}
205
+
206
+ 06/09/2024 01:15:55 - INFO - llamafactory.extras.callbacks - {'loss': 1.2340, 'learning_rate': 4.7636e-05, 'epoch': 4.87, 'throughput': 996.52}
207
+
208
+ 06/09/2024 01:17:48 - INFO - llamafactory.extras.callbacks - {'loss': 1.1453, 'learning_rate': 4.7524e-05, 'epoch': 4.99, 'throughput': 998.06}
209
+
210
+ 06/09/2024 01:20:08 - INFO - llamafactory.extras.callbacks - {'loss': 0.9056, 'learning_rate': 4.7410e-05, 'epoch': 5.10, 'throughput': 997.31}
211
+
212
+ 06/09/2024 01:21:58 - INFO - llamafactory.extras.callbacks - {'loss': 0.7349, 'learning_rate': 4.7293e-05, 'epoch': 5.22, 'throughput': 998.01}
213
+
214
+ 06/09/2024 01:23:54 - INFO - llamafactory.extras.callbacks - {'loss': 0.6968, 'learning_rate': 4.7174e-05, 'epoch': 5.33, 'throughput': 999.00}
215
+
216
+ 06/09/2024 01:26:19 - INFO - llamafactory.extras.callbacks - {'loss': 0.8510, 'learning_rate': 4.7052e-05, 'epoch': 5.45, 'throughput': 998.45}
217
+
218
+ 06/09/2024 01:28:14 - INFO - llamafactory.extras.callbacks - {'loss': 0.7160, 'learning_rate': 4.6928e-05, 'epoch': 5.57, 'throughput': 999.21}
219
+
220
+ 06/09/2024 01:30:03 - INFO - llamafactory.extras.callbacks - {'loss': 0.6384, 'learning_rate': 4.6801e-05, 'epoch': 5.68, 'throughput': 1000.40}
221
+
222
+ 06/09/2024 01:32:24 - INFO - llamafactory.extras.callbacks - {'loss': 0.7799, 'learning_rate': 4.6672e-05, 'epoch': 5.80, 'throughput': 998.13}
223
+
224
+ 06/09/2024 01:34:26 - INFO - llamafactory.extras.callbacks - {'loss': 0.7923, 'learning_rate': 4.6541e-05, 'epoch': 5.91, 'throughput': 997.95}
225
+
226
+ 06/09/2024 01:36:29 - INFO - llamafactory.extras.callbacks - {'loss': 0.7452, 'learning_rate': 4.6408e-05, 'epoch': 6.03, 'throughput': 998.69}
227
+
228
+ 06/09/2024 01:38:14 - INFO - llamafactory.extras.callbacks - {'loss': 0.4085, 'learning_rate': 4.6272e-05, 'epoch': 6.14, 'throughput': 999.98}
229
+
230
+ 06/09/2024 01:40:29 - INFO - llamafactory.extras.callbacks - {'loss': 0.5777, 'learning_rate': 4.6133e-05, 'epoch': 6.26, 'throughput': 1000.40}
231
+
232
+ 06/09/2024 01:42:32 - INFO - llamafactory.extras.callbacks - {'loss': 0.4718, 'learning_rate': 4.5993e-05, 'epoch': 6.38, 'throughput': 1000.87}
233
+
234
+ 06/09/2024 01:44:17 - INFO - llamafactory.extras.callbacks - {'loss': 0.3723, 'learning_rate': 4.5850e-05, 'epoch': 6.49, 'throughput': 1002.09}
235
+
236
+ 06/09/2024 01:46:27 - INFO - llamafactory.extras.callbacks - {'loss': 0.4453, 'learning_rate': 4.5705e-05, 'epoch': 6.61, 'throughput': 1000.84}
237
+
238
+ 06/09/2024 01:48:29 - INFO - llamafactory.extras.callbacks - {'loss': 0.5742, 'learning_rate': 4.5557e-05, 'epoch': 6.72, 'throughput': 1001.38}
239
+
240
+ 06/09/2024 01:50:33 - INFO - llamafactory.extras.callbacks - {'loss': 0.5291, 'learning_rate': 4.5408e-05, 'epoch': 6.84, 'throughput': 1001.95}
241
+
242
+ 06/09/2024 01:52:27 - INFO - llamafactory.extras.callbacks - {'loss': 0.4968, 'learning_rate': 4.5256e-05, 'epoch': 6.96, 'throughput': 1002.60}
243
+
244
+ 06/09/2024 01:52:27 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-300
245
+
246
+ 06/09/2024 01:52:27 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-300\config.json
247
+
248
+ 06/09/2024 01:52:27 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-300\generation_config.json
249
+
250
+ 06/09/2024 01:52:42 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-300\model.safetensors
251
+
252
+ 06/09/2024 01:52:42 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-300\tokenizer_config.json
253
+
254
+ 06/09/2024 01:52:42 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-300\special_tokens_map.json
255
+
256
+ 06/09/2024 01:54:56 - INFO - llamafactory.extras.callbacks - {'loss': 0.4210, 'learning_rate': 4.5102e-05, 'epoch': 7.07, 'throughput': 999.94}
257
+
258
+ 06/09/2024 01:57:04 - INFO - llamafactory.extras.callbacks - {'loss': 0.2767, 'learning_rate': 4.4946e-05, 'epoch': 7.19, 'throughput': 998.07}
259
+
260
+ 06/09/2024 01:59:02 - INFO - llamafactory.extras.callbacks - {'loss': 0.2922, 'learning_rate': 4.4787e-05, 'epoch': 7.30, 'throughput': 998.79}
261
+
262
+ 06/09/2024 02:00:54 - INFO - llamafactory.extras.callbacks - {'loss': 0.2768, 'learning_rate': 4.4627e-05, 'epoch': 7.42, 'throughput': 998.57}
263
+
264
+ 06/09/2024 02:03:27 - INFO - llamafactory.extras.callbacks - {'loss': 0.3281, 'learning_rate': 4.4464e-05, 'epoch': 7.54, 'throughput': 995.98}
265
+
266
+ 06/09/2024 02:05:38 - INFO - llamafactory.extras.callbacks - {'loss': 0.3374, 'learning_rate': 4.4299e-05, 'epoch': 7.65, 'throughput': 995.98}
267
+
268
+ 06/09/2024 02:07:32 - INFO - llamafactory.extras.callbacks - {'loss': 0.2491, 'learning_rate': 4.4132e-05, 'epoch': 7.77, 'throughput': 996.31}
269
+
270
+ 06/09/2024 02:09:47 - INFO - llamafactory.extras.callbacks - {'loss': 0.3893, 'learning_rate': 4.3963e-05, 'epoch': 7.88, 'throughput': 996.21}
271
+
272
+ 06/09/2024 02:12:03 - INFO - llamafactory.extras.callbacks - {'loss': 0.3761, 'learning_rate': 4.3792e-05, 'epoch': 8.00, 'throughput': 996.14}
273
+
274
+ 06/09/2024 02:14:16 - INFO - llamafactory.extras.callbacks - {'loss': 0.2360, 'learning_rate': 4.3619e-05, 'epoch': 8.12, 'throughput': 996.45}
275
+
276
+ 06/09/2024 02:16:14 - INFO - llamafactory.extras.callbacks - {'loss': 0.1584, 'learning_rate': 4.3444e-05, 'epoch': 8.23, 'throughput': 996.43}
277
+
278
+ 06/09/2024 02:17:54 - INFO - llamafactory.extras.callbacks - {'loss': 0.1481, 'learning_rate': 4.3267e-05, 'epoch': 8.35, 'throughput': 997.27}
279
+
280
+ 06/09/2024 02:19:57 - INFO - llamafactory.extras.callbacks - {'loss': 0.1743, 'learning_rate': 4.3088e-05, 'epoch': 8.46, 'throughput': 996.84}
281
+
282
+ 06/09/2024 02:22:22 - INFO - llamafactory.extras.callbacks - {'loss': 0.3162, 'learning_rate': 4.2907e-05, 'epoch': 8.58, 'throughput': 996.95}
283
+
284
+ 06/09/2024 02:24:19 - INFO - llamafactory.extras.callbacks - {'loss': 0.2235, 'learning_rate': 4.2724e-05, 'epoch': 8.70, 'throughput': 997.40}
285
+
286
+ 06/09/2024 02:26:35 - INFO - llamafactory.extras.callbacks - {'loss': 0.2504, 'learning_rate': 4.2539e-05, 'epoch': 8.81, 'throughput': 997.16}
287
+
288
+ 06/09/2024 02:28:41 - INFO - llamafactory.extras.callbacks - {'loss': 0.2136, 'learning_rate': 4.2352e-05, 'epoch': 8.93, 'throughput': 997.85}
289
+
290
+ 06/09/2024 02:30:12 - INFO - llamafactory.extras.callbacks - {'loss': 0.1047, 'learning_rate': 4.2163e-05, 'epoch': 9.04, 'throughput': 998.27}
291
+
292
+ 06/09/2024 02:31:55 - INFO - llamafactory.extras.callbacks - {'loss': 0.1015, 'learning_rate': 4.1972e-05, 'epoch': 9.16, 'throughput': 999.00}
293
+
294
+ 06/09/2024 02:34:04 - INFO - llamafactory.extras.callbacks - {'loss': 0.1505, 'learning_rate': 4.1780e-05, 'epoch': 9.28, 'throughput': 998.92}
295
+
296
+ 06/09/2024 02:34:04 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-400
297
+
298
+ 06/09/2024 02:34:04 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-400\config.json
299
+
300
+ 06/09/2024 02:34:04 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-400\generation_config.json
301
+
302
+ 06/09/2024 02:34:12 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-400\model.safetensors
303
+
304
+ 06/09/2024 02:34:12 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-400\tokenizer_config.json
305
+
306
+ 06/09/2024 02:34:12 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-400\special_tokens_map.json
307
+
308
+ 06/09/2024 02:36:29 - INFO - llamafactory.extras.callbacks - {'loss': 0.1135, 'learning_rate': 4.1586e-05, 'epoch': 9.39, 'throughput': 997.11}
309
+
310
+ 06/09/2024 02:38:38 - INFO - llamafactory.extras.callbacks - {'loss': 0.1832, 'learning_rate': 4.1389e-05, 'epoch': 9.51, 'throughput': 997.17}
311
+
312
+ 06/09/2024 02:40:19 - INFO - llamafactory.extras.callbacks - {'loss': 0.1178, 'learning_rate': 4.1192e-05, 'epoch': 9.62, 'throughput': 998.04}
313
+
314
+ 06/09/2024 02:42:54 - INFO - llamafactory.extras.callbacks - {'loss': 0.1426, 'learning_rate': 4.0992e-05, 'epoch': 9.74, 'throughput': 996.34}
315
+
316
+ 06/09/2024 02:45:09 - INFO - llamafactory.extras.callbacks - {'loss': 0.1603, 'learning_rate': 4.0790e-05, 'epoch': 9.86, 'throughput': 996.80}
317
+
318
+ 06/09/2024 02:47:19 - INFO - llamafactory.extras.callbacks - {'loss': 0.1548, 'learning_rate': 4.0587e-05, 'epoch': 9.97, 'throughput': 996.01}
319
+
320
+ 06/09/2024 02:49:42 - INFO - llamafactory.extras.callbacks - {'loss': 0.1407, 'learning_rate': 4.0382e-05, 'epoch': 10.09, 'throughput': 995.97}
321
+
322
+ 06/09/2024 02:51:53 - INFO - llamafactory.extras.callbacks - {'loss': 0.0791, 'learning_rate': 4.0176e-05, 'epoch': 10.20, 'throughput': 995.82}
323
+
324
+ 06/09/2024 02:54:04 - INFO - llamafactory.extras.callbacks - {'loss': 0.0722, 'learning_rate': 3.9968e-05, 'epoch': 10.32, 'throughput': 995.11}
325
+
326
+ 06/09/2024 02:55:47 - INFO - llamafactory.extras.callbacks - {'loss': 0.0655, 'learning_rate': 3.9758e-05, 'epoch': 10.43, 'throughput': 995.82}
327
+
328
+ 06/09/2024 02:57:40 - INFO - llamafactory.extras.callbacks - {'loss': 0.0723, 'learning_rate': 3.9546e-05, 'epoch': 10.55, 'throughput': 996.49}
329
+
330
+ 06/09/2024 02:59:24 - INFO - llamafactory.extras.callbacks - {'loss': 0.0655, 'learning_rate': 3.9333e-05, 'epoch': 10.67, 'throughput': 997.33}
331
+
332
+ 06/09/2024 03:01:52 - INFO - llamafactory.extras.callbacks - {'loss': 0.1534, 'learning_rate': 3.9119e-05, 'epoch': 10.78, 'throughput': 996.60}
333
+
334
+ 06/09/2024 03:03:55 - INFO - llamafactory.extras.callbacks - {'loss': 0.0947, 'learning_rate': 3.8903e-05, 'epoch': 10.90, 'throughput': 996.89}
335
+
336
+ 06/09/2024 03:05:59 - INFO - llamafactory.extras.callbacks - {'loss': 0.0868, 'learning_rate': 3.8685e-05, 'epoch': 11.01, 'throughput': 997.29}
337
+
338
+ 06/09/2024 03:08:05 - INFO - llamafactory.extras.callbacks - {'loss': 0.0467, 'learning_rate': 3.8466e-05, 'epoch': 11.13, 'throughput': 997.52}
339
+
340
+ 06/09/2024 03:09:48 - INFO - llamafactory.extras.callbacks - {'loss': 0.0521, 'learning_rate': 3.8246e-05, 'epoch': 11.25, 'throughput': 998.15}
341
+
342
+ 06/09/2024 03:11:59 - INFO - llamafactory.extras.callbacks - {'loss': 0.0510, 'learning_rate': 3.8024e-05, 'epoch': 11.36, 'throughput': 998.66}
343
+
344
+ 06/09/2024 03:13:59 - INFO - llamafactory.extras.callbacks - {'loss': 0.0753, 'learning_rate': 3.7800e-05, 'epoch': 11.48, 'throughput': 999.10}
345
+
346
+ 06/09/2024 03:16:02 - INFO - llamafactory.extras.callbacks - {'loss': 0.0601, 'learning_rate': 3.7575e-05, 'epoch': 11.59, 'throughput': 999.57}
347
+
348
+ 06/09/2024 03:16:02 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-500
349
+
350
+ 06/09/2024 03:16:02 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-500\config.json
351
+
352
+ 06/09/2024 03:16:02 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-500\generation_config.json
353
+
354
+ 06/09/2024 03:16:09 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-500\model.safetensors
355
+
356
+ 06/09/2024 03:16:09 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-500\tokenizer_config.json
357
+
358
+ 06/09/2024 03:16:09 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-500\special_tokens_map.json
359
+
360
+ 06/09/2024 03:18:09 - INFO - llamafactory.extras.callbacks - {'loss': 0.0533, 'learning_rate': 3.7349e-05, 'epoch': 11.71, 'throughput': 998.61}
361
+
362
+ 06/09/2024 03:20:11 - INFO - llamafactory.extras.callbacks - {'loss': 0.0640, 'learning_rate': 3.7122e-05, 'epoch': 11.83, 'throughput': 999.10}
363
+
364
+ 06/09/2024 03:22:03 - INFO - llamafactory.extras.callbacks - {'loss': 0.0755, 'learning_rate': 3.6893e-05, 'epoch': 11.94, 'throughput': 999.59}
365
+
366
+ 06/09/2024 03:23:54 - INFO - llamafactory.extras.callbacks - {'loss': 0.0553, 'learning_rate': 3.6662e-05, 'epoch': 12.06, 'throughput': 1000.07}
367
+
368
+ 06/09/2024 03:26:08 - INFO - llamafactory.extras.callbacks - {'loss': 0.0355, 'learning_rate': 3.6431e-05, 'epoch': 12.17, 'throughput': 999.57}
369
+
370
+ 06/09/2024 03:28:14 - INFO - llamafactory.extras.callbacks - {'loss': 0.0413, 'learning_rate': 3.6198e-05, 'epoch': 12.29, 'throughput': 999.88}
371
+
372
+ 06/09/2024 03:30:05 - INFO - llamafactory.extras.callbacks - {'loss': 0.0596, 'learning_rate': 3.5964e-05, 'epoch': 12.41, 'throughput': 1000.36}
373
+
374
+ 06/09/2024 03:32:06 - INFO - llamafactory.extras.callbacks - {'loss': 0.0479, 'learning_rate': 3.5729e-05, 'epoch': 12.52, 'throughput': 1000.70}
375
+
376
+ 06/09/2024 03:34:19 - INFO - llamafactory.extras.callbacks - {'loss': 0.0470, 'learning_rate': 3.5493e-05, 'epoch': 12.64, 'throughput': 1000.73}
377
+
378
+ 06/09/2024 03:36:18 - INFO - llamafactory.extras.callbacks - {'loss': 0.0492, 'learning_rate': 3.5256e-05, 'epoch': 12.75, 'throughput': 1001.04}
379
+
380
+ 06/09/2024 03:38:16 - INFO - llamafactory.extras.callbacks - {'loss': 0.0471, 'learning_rate': 3.5017e-05, 'epoch': 12.87, 'throughput': 1001.36}
381
+
382
+ 06/09/2024 03:40:25 - INFO - llamafactory.extras.callbacks - {'loss': 0.1041, 'learning_rate': 3.4778e-05, 'epoch': 12.99, 'throughput': 1000.82}
383
+
384
+ 06/09/2024 03:42:14 - INFO - llamafactory.extras.callbacks - {'loss': 0.0282, 'learning_rate': 3.4537e-05, 'epoch': 13.10, 'throughput': 1001.50}
385
+
386
+ 06/09/2024 03:44:37 - INFO - llamafactory.extras.callbacks - {'loss': 0.0272, 'learning_rate': 3.4295e-05, 'epoch': 13.22, 'throughput': 1000.51}
387
+
388
+ 06/09/2024 03:47:00 - INFO - llamafactory.extras.callbacks - {'loss': 0.0342, 'learning_rate': 3.4053e-05, 'epoch': 13.33, 'throughput': 1000.60}
389
+
390
+ 06/09/2024 03:48:31 - INFO - llamafactory.extras.callbacks - {'loss': 0.0855, 'learning_rate': 3.3809e-05, 'epoch': 13.45, 'throughput': 1000.75}
391
+
392
+ 06/09/2024 03:50:33 - INFO - llamafactory.extras.callbacks - {'loss': 0.0378, 'learning_rate': 3.3564e-05, 'epoch': 13.57, 'throughput': 1001.03}
393
+
394
+ 06/09/2024 03:52:10 - INFO - llamafactory.extras.callbacks - {'loss': 0.0457, 'learning_rate': 3.3319e-05, 'epoch': 13.68, 'throughput': 1001.65}
395
+
396
+ 06/09/2024 03:54:42 - INFO - llamafactory.extras.callbacks - {'loss': 0.0453, 'learning_rate': 3.3072e-05, 'epoch': 13.80, 'throughput': 1001.53}
397
+
398
+ 06/09/2024 03:56:41 - INFO - llamafactory.extras.callbacks - {'loss': 0.0378, 'learning_rate': 3.2825e-05, 'epoch': 13.91, 'throughput': 1001.72}
399
+
400
+ 06/09/2024 03:56:41 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-600
401
+
402
+ 06/09/2024 03:56:41 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-600\config.json
403
+
404
+ 06/09/2024 03:56:41 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-600\generation_config.json
405
+
406
+ 06/09/2024 03:57:01 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-600\model.safetensors
407
+
408
+ 06/09/2024 03:57:01 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-600\tokenizer_config.json
409
+
410
+ 06/09/2024 03:57:01 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-600\special_tokens_map.json
411
+
412
+ 06/09/2024 03:59:09 - INFO - llamafactory.extras.callbacks - {'loss': 0.0296, 'learning_rate': 3.2576e-05, 'epoch': 14.03, 'throughput': 999.94}
413
+
414
+ 06/09/2024 04:01:17 - INFO - llamafactory.extras.callbacks - {'loss': 0.0216, 'learning_rate': 3.2327e-05, 'epoch': 14.14, 'throughput': 999.25}
415
+
416
+ 06/09/2024 04:03:35 - INFO - llamafactory.extras.callbacks - {'loss': 0.0299, 'learning_rate': 3.2077e-05, 'epoch': 14.26, 'throughput': 998.89}
417
+
418
+ 06/09/2024 04:05:32 - INFO - llamafactory.extras.callbacks - {'loss': 0.0247, 'learning_rate': 3.1827e-05, 'epoch': 14.38, 'throughput': 998.88}
419
+
420
+ 06/09/2024 04:08:01 - INFO - llamafactory.extras.callbacks - {'loss': 0.0455, 'learning_rate': 3.1575e-05, 'epoch': 14.49, 'throughput': 997.37}
421
+
422
+ 06/09/2024 04:09:49 - INFO - llamafactory.extras.callbacks - {'loss': 0.0262, 'learning_rate': 3.1323e-05, 'epoch': 14.61, 'throughput': 997.84}
423
+
424
+ 06/09/2024 04:11:59 - INFO - llamafactory.extras.callbacks - {'loss': 0.0281, 'learning_rate': 3.1071e-05, 'epoch': 14.72, 'throughput': 998.19}
425
+
426
+ 06/09/2024 04:13:52 - INFO - llamafactory.extras.callbacks - {'loss': 0.0294, 'learning_rate': 3.0817e-05, 'epoch': 14.84, 'throughput': 998.63}
427
+
428
+ 06/09/2024 04:15:58 - INFO - llamafactory.extras.callbacks - {'loss': 0.0291, 'learning_rate': 3.0563e-05, 'epoch': 14.96, 'throughput': 998.97}
429
+
430
+ 06/09/2024 04:18:05 - INFO - llamafactory.extras.callbacks - {'loss': 0.0216, 'learning_rate': 3.0308e-05, 'epoch': 15.07, 'throughput': 999.01}
431
+
432
+ 06/09/2024 04:20:32 - INFO - llamafactory.extras.callbacks - {'loss': 0.0218, 'learning_rate': 3.0053e-05, 'epoch': 15.19, 'throughput': 997.79}
433
+
434
+ 06/09/2024 04:22:51 - INFO - llamafactory.extras.callbacks - {'loss': 0.0283, 'learning_rate': 2.9797e-05, 'epoch': 15.30, 'throughput': 997.64}
435
+
436
+ 06/09/2024 04:24:40 - INFO - llamafactory.extras.callbacks - {'loss': 0.0259, 'learning_rate': 2.9541e-05, 'epoch': 15.42, 'throughput': 997.98}
437
+
438
+ 06/09/2024 04:26:42 - INFO - llamafactory.extras.callbacks - {'loss': 0.0716, 'learning_rate': 2.9284e-05, 'epoch': 15.54, 'throughput': 998.25}
439
+
440
+ 06/09/2024 04:29:05 - INFO - llamafactory.extras.callbacks - {'loss': 0.0250, 'learning_rate': 2.9027e-05, 'epoch': 15.65, 'throughput': 998.26}
441
+
442
+ 06/09/2024 04:31:00 - INFO - llamafactory.extras.callbacks - {'loss': 0.0218, 'learning_rate': 2.8769e-05, 'epoch': 15.77, 'throughput': 998.47}
443
+
444
+ 06/09/2024 04:32:58 - INFO - llamafactory.extras.callbacks - {'loss': 0.0231, 'learning_rate': 2.8511e-05, 'epoch': 15.88, 'throughput': 998.80}
445
+
446
+ 06/09/2024 04:34:35 - INFO - llamafactory.extras.callbacks - {'loss': 0.0228, 'learning_rate': 2.8252e-05, 'epoch': 16.00, 'throughput': 999.30}
447
+
448
+ 06/09/2024 04:36:56 - INFO - llamafactory.extras.callbacks - {'loss': 0.0290, 'learning_rate': 2.7993e-05, 'epoch': 16.12, 'throughput': 999.19}
449
+
450
+ 06/09/2024 04:39:18 - INFO - llamafactory.extras.callbacks - {'loss': 0.0199, 'learning_rate': 2.7734e-05, 'epoch': 16.23, 'throughput': 999.18}
451
+
452
+ 06/09/2024 04:39:18 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-700
453
+
454
+ 06/09/2024 04:39:18 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-700\config.json
455
+
456
+ 06/09/2024 04:39:18 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-700\generation_config.json
457
+
458
+ 06/09/2024 04:39:31 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-700\model.safetensors
459
+
460
+ 06/09/2024 04:39:31 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-700\tokenizer_config.json
461
+
462
+ 06/09/2024 04:39:31 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-700\special_tokens_map.json
463
+
464
+ 06/09/2024 04:41:37 - INFO - llamafactory.extras.callbacks - {'loss': 0.0259, 'learning_rate': 2.7475e-05, 'epoch': 16.35, 'throughput': 997.81}
465
+
466
+ 06/09/2024 04:43:43 - INFO - llamafactory.extras.callbacks - {'loss': 0.0210, 'learning_rate': 2.7215e-05, 'epoch': 16.46, 'throughput': 997.83}
467
+
468
+ 06/09/2024 04:45:37 - INFO - llamafactory.extras.callbacks - {'loss': 0.0199, 'learning_rate': 2.6955e-05, 'epoch': 16.58, 'throughput': 998.23}
469
+
470
+ 06/09/2024 04:47:35 - INFO - llamafactory.extras.callbacks - {'loss': 0.0189, 'learning_rate': 2.6695e-05, 'epoch': 16.70, 'throughput': 998.55}
471
+
472
+ 06/09/2024 04:49:15 - INFO - llamafactory.extras.callbacks - {'loss': 0.0169, 'learning_rate': 2.6434e-05, 'epoch': 16.81, 'throughput': 998.93}
473
+
474
+ 06/09/2024 04:51:09 - INFO - llamafactory.extras.callbacks - {'loss': 0.0209, 'learning_rate': 2.6174e-05, 'epoch': 16.93, 'throughput': 999.34}
475
+
476
+ 06/09/2024 04:53:23 - INFO - llamafactory.extras.callbacks - {'loss': 0.0201, 'learning_rate': 2.5913e-05, 'epoch': 17.04, 'throughput': 999.59}
477
+
478
+ 06/09/2024 04:55:31 - INFO - llamafactory.extras.callbacks - {'loss': 0.0170, 'learning_rate': 2.5652e-05, 'epoch': 17.16, 'throughput': 999.09}
479
+
480
+ 06/09/2024 04:57:15 - INFO - llamafactory.extras.callbacks - {'loss': 0.0187, 'learning_rate': 2.5391e-05, 'epoch': 17.28, 'throughput': 999.36}
481
+
482
+ 06/09/2024 04:59:04 - INFO - llamafactory.extras.callbacks - {'loss': 0.0242, 'learning_rate': 2.5130e-05, 'epoch': 17.39, 'throughput': 999.72}
483
+
484
+ 06/09/2024 05:01:05 - INFO - llamafactory.extras.callbacks - {'loss': 0.0153, 'learning_rate': 2.4870e-05, 'epoch': 17.51, 'throughput': 1000.04}
485
+
486
+ 06/09/2024 05:03:20 - INFO - llamafactory.extras.callbacks - {'loss': 0.0182, 'learning_rate': 2.4609e-05, 'epoch': 17.62, 'throughput': 1000.12}
487
+
488
+ 06/09/2024 05:05:14 - INFO - llamafactory.extras.callbacks - {'loss': 0.0143, 'learning_rate': 2.4348e-05, 'epoch': 17.74, 'throughput': 1000.34}
489
+
490
+ 06/09/2024 05:07:25 - INFO - llamafactory.extras.callbacks - {'loss': 0.0164, 'learning_rate': 2.4087e-05, 'epoch': 17.86, 'throughput': 1000.46}
491
+
492
+ 06/09/2024 05:09:29 - INFO - llamafactory.extras.callbacks - {'loss': 0.0157, 'learning_rate': 2.3826e-05, 'epoch': 17.97, 'throughput': 1000.43}
493
+
494
+ 06/09/2024 05:11:39 - INFO - llamafactory.extras.callbacks - {'loss': 0.0304, 'learning_rate': 2.3566e-05, 'epoch': 18.09, 'throughput': 1000.34}
495
+
496
+ 06/09/2024 05:14:00 - INFO - llamafactory.extras.callbacks - {'loss': 0.0090, 'learning_rate': 2.3305e-05, 'epoch': 18.20, 'throughput': 1000.45}
497
+
498
+ 06/09/2024 05:15:24 - INFO - llamafactory.extras.callbacks - {'loss': 0.0105, 'learning_rate': 2.3045e-05, 'epoch': 18.32, 'throughput': 1000.89}
499
+
500
+ 06/09/2024 05:17:37 - INFO - llamafactory.extras.callbacks - {'loss': 0.0104, 'learning_rate': 2.2785e-05, 'epoch': 18.43, 'throughput': 1000.55}
501
+
502
+ 06/09/2024 05:19:46 - INFO - llamafactory.extras.callbacks - {'loss': 0.0103, 'learning_rate': 2.2525e-05, 'epoch': 18.55, 'throughput': 1000.55}
503
+
504
+ 06/09/2024 05:19:46 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-800
505
+
506
+ 06/09/2024 05:19:46 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-800\config.json
507
+
508
+ 06/09/2024 05:19:46 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-800\generation_config.json
509
+
510
+ 06/09/2024 05:19:58 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-800\model.safetensors
511
+
512
+ 06/09/2024 05:19:58 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-800\tokenizer_config.json
513
+
514
+ 06/09/2024 05:19:58 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-800\special_tokens_map.json
515
+
516
+ 06/09/2024 05:22:27 - INFO - llamafactory.extras.callbacks - {'loss': 0.0107, 'learning_rate': 2.2266e-05, 'epoch': 18.67, 'throughput': 999.47}
517
+
518
+ 06/09/2024 05:24:25 - INFO - llamafactory.extras.callbacks - {'loss': 0.0269, 'learning_rate': 2.2007e-05, 'epoch': 18.78, 'throughput': 999.49}
519
+
520
+ 06/09/2024 05:26:24 - INFO - llamafactory.extras.callbacks - {'loss': 0.0086, 'learning_rate': 2.1748e-05, 'epoch': 18.90, 'throughput': 999.70}
521
+
522
+ 06/09/2024 05:28:13 - INFO - llamafactory.extras.callbacks - {'loss': 0.0133, 'learning_rate': 2.1489e-05, 'epoch': 19.01, 'throughput': 999.96}
523
+
524
+ 06/09/2024 05:29:46 - INFO - llamafactory.extras.callbacks - {'loss': 0.0114, 'learning_rate': 2.1231e-05, 'epoch': 19.13, 'throughput': 1000.41}
525
+
526
+ 06/09/2024 05:31:50 - INFO - llamafactory.extras.callbacks - {'loss': 0.0070, 'learning_rate': 2.0973e-05, 'epoch': 19.25, 'throughput': 1000.64}
527
+
528
+ 06/09/2024 05:33:47 - INFO - llamafactory.extras.callbacks - {'loss': 0.0088, 'learning_rate': 2.0716e-05, 'epoch': 19.36, 'throughput': 1000.77}
529
+
530
+ 06/09/2024 05:36:12 - INFO - llamafactory.extras.callbacks - {'loss': 0.0070, 'learning_rate': 2.0459e-05, 'epoch': 19.48, 'throughput': 1000.49}
531
+
532
+ 06/09/2024 05:37:59 - INFO - llamafactory.extras.callbacks - {'loss': 0.0059, 'learning_rate': 2.0203e-05, 'epoch': 19.59, 'throughput': 1000.78}
533
+
534
+ 06/09/2024 05:39:59 - INFO - llamafactory.extras.callbacks - {'loss': 0.0069, 'learning_rate': 1.9947e-05, 'epoch': 19.71, 'throughput': 1000.99}
535
+
536
+ 06/09/2024 05:42:18 - INFO - llamafactory.extras.callbacks - {'loss': 0.0080, 'learning_rate': 1.9692e-05, 'epoch': 19.83, 'throughput': 1001.10}
537
+
538
+ 06/09/2024 05:44:15 - INFO - llamafactory.extras.callbacks - {'loss': 0.0127, 'learning_rate': 1.9437e-05, 'epoch': 19.94, 'throughput': 1001.23}
539
+
540
+ 06/09/2024 05:46:30 - INFO - llamafactory.extras.callbacks - {'loss': 0.0059, 'learning_rate': 1.9183e-05, 'epoch': 20.06, 'throughput': 1001.07}
541
+
542
+ 06/09/2024 05:48:17 - INFO - llamafactory.extras.callbacks - {'loss': 0.0046, 'learning_rate': 1.8929e-05, 'epoch': 20.17, 'throughput': 1001.07}
543
+
544
+ 06/09/2024 05:50:34 - INFO - llamafactory.extras.callbacks - {'loss': 0.0058, 'learning_rate': 1.8677e-05, 'epoch': 20.29, 'throughput': 1001.15}
545
+
546
+ 06/09/2024 05:52:48 - INFO - llamafactory.extras.callbacks - {'loss': 0.0082, 'learning_rate': 1.8425e-05, 'epoch': 20.41, 'throughput': 1000.86}
547
+
548
+ 06/09/2024 05:54:46 - INFO - llamafactory.extras.callbacks - {'loss': 0.0148, 'learning_rate': 1.8173e-05, 'epoch': 20.52, 'throughput': 1001.09}
549
+
550
+ 06/09/2024 05:56:27 - INFO - llamafactory.extras.callbacks - {'loss': 0.0064, 'learning_rate': 1.7923e-05, 'epoch': 20.64, 'throughput': 1001.43}
551
+
552
+ 06/09/2024 05:58:37 - INFO - llamafactory.extras.callbacks - {'loss': 0.0080, 'learning_rate': 1.7673e-05, 'epoch': 20.75, 'throughput': 1001.37}
553
+
554
+ 06/09/2024 06:00:59 - INFO - llamafactory.extras.callbacks - {'loss': 0.0053, 'learning_rate': 1.7424e-05, 'epoch': 20.87, 'throughput': 1001.39}
555
+
556
+ 06/09/2024 06:00:59 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-900
557
+
558
+ 06/09/2024 06:00:59 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-900\config.json
559
+
560
+ 06/09/2024 06:00:59 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-900\generation_config.json
561
+
562
+ 06/09/2024 06:01:08 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-900\model.safetensors
563
+
564
+ 06/09/2024 06:01:08 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-900\tokenizer_config.json
565
+
566
+ 06/09/2024 06:01:08 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-900\special_tokens_map.json
567
+
568
+ 06/09/2024 06:03:11 - INFO - llamafactory.extras.callbacks - {'loss': 0.0054, 'learning_rate': 1.7175e-05, 'epoch': 20.99, 'throughput': 1000.86}
569
+
570
+ 06/09/2024 06:05:26 - INFO - llamafactory.extras.callbacks - {'loss': 0.0044, 'learning_rate': 1.6928e-05, 'epoch': 21.10, 'throughput': 1000.89}
571
+
572
+ 06/09/2024 06:07:35 - INFO - llamafactory.extras.callbacks - {'loss': 0.0043, 'learning_rate': 1.6681e-05, 'epoch': 21.22, 'throughput': 1000.65}
573
+
574
+ 06/09/2024 06:09:39 - INFO - llamafactory.extras.callbacks - {'loss': 0.0035, 'learning_rate': 1.6436e-05, 'epoch': 21.33, 'throughput': 1000.81}
575
+
576
+ 06/09/2024 06:11:17 - INFO - llamafactory.extras.callbacks - {'loss': 0.0044, 'learning_rate': 1.6191e-05, 'epoch': 21.45, 'throughput': 1001.21}
577
+
578
+ 06/09/2024 06:13:17 - INFO - llamafactory.extras.callbacks - {'loss': 0.0178, 'learning_rate': 1.5947e-05, 'epoch': 21.57, 'throughput': 1001.45}
579
+
580
+ 06/09/2024 06:14:58 - INFO - llamafactory.extras.callbacks - {'loss': 0.0089, 'learning_rate': 1.5705e-05, 'epoch': 21.68, 'throughput': 1001.76}
581
+
582
+ 06/09/2024 06:17:41 - INFO - llamafactory.extras.callbacks - {'loss': 0.0040, 'learning_rate': 1.5463e-05, 'epoch': 21.80, 'throughput': 1001.09}
583
+
584
+ 06/09/2024 06:19:50 - INFO - llamafactory.extras.callbacks - {'loss': 0.0040, 'learning_rate': 1.5222e-05, 'epoch': 21.91, 'throughput': 1000.93}
585
+
586
+ 06/09/2024 06:21:39 - INFO - llamafactory.extras.callbacks - {'loss': 0.0034, 'learning_rate': 1.4983e-05, 'epoch': 22.03, 'throughput': 1001.01}
587
+
588
+ 06/09/2024 06:23:20 - INFO - llamafactory.extras.callbacks - {'loss': 0.0066, 'learning_rate': 1.4744e-05, 'epoch': 22.14, 'throughput': 1001.40}
589
+
590
+ 06/09/2024 06:25:22 - INFO - llamafactory.extras.callbacks - {'loss': 0.0034, 'learning_rate': 1.4507e-05, 'epoch': 22.26, 'throughput': 1001.54}
591
+
592
+ 06/09/2024 06:27:14 - INFO - llamafactory.extras.callbacks - {'loss': 0.0032, 'learning_rate': 1.4271e-05, 'epoch': 22.38, 'throughput': 1001.84}
593
+
594
+ 06/09/2024 06:29:01 - INFO - llamafactory.extras.callbacks - {'loss': 0.0124, 'learning_rate': 1.4036e-05, 'epoch': 22.49, 'throughput': 1002.12}
595
+
596
+ 06/09/2024 06:31:31 - INFO - llamafactory.extras.callbacks - {'loss': 0.0027, 'learning_rate': 1.3802e-05, 'epoch': 22.61, 'throughput': 1001.67}
597
+
598
+ 06/09/2024 06:33:18 - INFO - llamafactory.extras.callbacks - {'loss': 0.0037, 'learning_rate': 1.3569e-05, 'epoch': 22.72, 'throughput': 1001.90}
599
+
600
+ 06/09/2024 06:35:28 - INFO - llamafactory.extras.callbacks - {'loss': 0.0029, 'learning_rate': 1.3338e-05, 'epoch': 22.84, 'throughput': 1001.88}
601
+
602
+ 06/09/2024 06:38:19 - INFO - llamafactory.extras.callbacks - {'loss': 0.0035, 'learning_rate': 1.3107e-05, 'epoch': 22.96, 'throughput': 1001.00}
603
+
604
+ 06/09/2024 06:39:55 - INFO - llamafactory.extras.callbacks - {'loss': 0.0020, 'learning_rate': 1.2878e-05, 'epoch': 23.07, 'throughput': 1001.32}
605
+
606
+ 06/09/2024 06:41:50 - INFO - llamafactory.extras.callbacks - {'loss': 0.0023, 'learning_rate': 1.2651e-05, 'epoch': 23.19, 'throughput': 1001.57}
607
+
608
+ 06/09/2024 06:41:50 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1000
609
+
610
+ 06/09/2024 06:41:50 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1000\config.json
611
+
612
+ 06/09/2024 06:41:50 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1000\generation_config.json
613
+
614
+ 06/09/2024 06:42:07 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1000\model.safetensors
615
+
616
+ 06/09/2024 06:42:07 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1000\tokenizer_config.json
617
+
618
+ 06/09/2024 06:42:07 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1000\special_tokens_map.json
619
+
620
+ 06/09/2024 06:44:12 - INFO - llamafactory.extras.callbacks - {'loss': 0.0031, 'learning_rate': 1.2425e-05, 'epoch': 23.30, 'throughput': 1000.67}
621
+
622
+ 06/09/2024 06:46:33 - INFO - llamafactory.extras.callbacks - {'loss': 0.0027, 'learning_rate': 1.2200e-05, 'epoch': 23.42, 'throughput': 1000.50}
623
+
624
+ 06/09/2024 06:48:36 - INFO - llamafactory.extras.callbacks - {'loss': 0.0124, 'learning_rate': 1.1976e-05, 'epoch': 23.54, 'throughput': 1000.70}
625
+
626
+ 06/09/2024 06:50:18 - INFO - llamafactory.extras.callbacks - {'loss': 0.0023, 'learning_rate': 1.1754e-05, 'epoch': 23.65, 'throughput': 1001.02}
627
+
628
+ 06/09/2024 06:52:50 - INFO - llamafactory.extras.callbacks - {'loss': 0.0027, 'learning_rate': 1.1534e-05, 'epoch': 23.77, 'throughput': 1000.96}
629
+
630
+ 06/09/2024 06:54:48 - INFO - llamafactory.extras.callbacks - {'loss': 0.0041, 'learning_rate': 1.1315e-05, 'epoch': 23.88, 'throughput': 1001.18}
631
+
632
+ 06/09/2024 06:56:41 - INFO - llamafactory.extras.callbacks - {'loss': 0.0021, 'learning_rate': 1.1097e-05, 'epoch': 24.00, 'throughput': 1001.39}
633
+
634
+ 06/09/2024 06:58:29 - INFO - llamafactory.extras.callbacks - {'loss': 0.0039, 'learning_rate': 1.0881e-05, 'epoch': 24.12, 'throughput': 1001.66}
635
+
636
+ 06/09/2024 07:00:50 - INFO - llamafactory.extras.callbacks - {'loss': 0.0025, 'learning_rate': 1.0667e-05, 'epoch': 24.23, 'throughput': 1001.60}
637
+
638
+ 06/09/2024 07:02:55 - INFO - llamafactory.extras.callbacks - {'loss': 0.0020, 'learning_rate': 1.0454e-05, 'epoch': 24.35, 'throughput': 1001.79}
639
+
640
+ 06/09/2024 07:05:24 - INFO - llamafactory.extras.callbacks - {'loss': 0.0024, 'learning_rate': 1.0242e-05, 'epoch': 24.46, 'throughput': 1001.71}
641
+
642
+ 06/09/2024 07:07:13 - INFO - llamafactory.extras.callbacks - {'loss': 0.0031, 'learning_rate': 1.0032e-05, 'epoch': 24.58, 'throughput': 1001.93}
643
+
644
+ 06/09/2024 07:09:07 - INFO - llamafactory.extras.callbacks - {'loss': 0.0022, 'learning_rate': 9.8241e-06, 'epoch': 24.70, 'throughput': 1002.12}
645
+
646
+ 06/09/2024 07:10:53 - INFO - llamafactory.extras.callbacks - {'loss': 0.0022, 'learning_rate': 9.6176e-06, 'epoch': 24.81, 'throughput': 1002.46}
647
+
648
+ 06/09/2024 07:13:23 - INFO - llamafactory.extras.callbacks - {'loss': 0.0109, 'learning_rate': 9.4128e-06, 'epoch': 24.93, 'throughput': 1001.58}
649
+
650
+ 06/09/2024 07:15:05 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 9.2096e-06, 'epoch': 25.04, 'throughput': 1001.78}
651
+
652
+ 06/09/2024 07:16:58 - INFO - llamafactory.extras.callbacks - {'loss': 0.0016, 'learning_rate': 9.0082e-06, 'epoch': 25.16, 'throughput': 1002.01}
653
+
654
+ 06/09/2024 07:18:57 - INFO - llamafactory.extras.callbacks - {'loss': 0.0020, 'learning_rate': 8.8085e-06, 'epoch': 25.28, 'throughput': 1002.15}
655
+
656
+ 06/09/2024 07:20:46 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 8.6106e-06, 'epoch': 25.39, 'throughput': 1002.35}
657
+
658
+ 06/09/2024 07:22:55 - INFO - llamafactory.extras.callbacks - {'loss': 0.0028, 'learning_rate': 8.4144e-06, 'epoch': 25.51, 'throughput': 1002.44}
659
+
660
+ 06/09/2024 07:22:55 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1100
661
+
662
+ 06/09/2024 07:22:55 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1100\config.json
663
+
664
+ 06/09/2024 07:22:55 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1100\generation_config.json
665
+
666
+ 06/09/2024 07:23:26 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1100\model.safetensors
667
+
668
+ 06/09/2024 07:23:26 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1100\tokenizer_config.json
669
+
670
+ 06/09/2024 07:23:26 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1100\special_tokens_map.json
671
+
672
+ 06/09/2024 07:27:26 - INFO - llamafactory.extras.callbacks - {'loss': 0.0021, 'learning_rate': 8.2201e-06, 'epoch': 25.62, 'throughput': 998.15}
673
+
674
+ 06/09/2024 07:29:14 - INFO - llamafactory.extras.callbacks - {'loss': 0.0057, 'learning_rate': 8.0276e-06, 'epoch': 25.74, 'throughput': 998.39}
675
+
676
+ 06/09/2024 07:31:09 - INFO - llamafactory.extras.callbacks - {'loss': 0.0067, 'learning_rate': 7.8369e-06, 'epoch': 25.86, 'throughput': 998.59}
677
+
678
+ 06/09/2024 07:33:09 - INFO - llamafactory.extras.callbacks - {'loss': 0.0020, 'learning_rate': 7.6481e-06, 'epoch': 25.97, 'throughput': 998.67}
679
+
680
+ 06/09/2024 07:35:20 - INFO - llamafactory.extras.callbacks - {'loss': 0.0021, 'learning_rate': 7.4612e-06, 'epoch': 26.09, 'throughput': 998.73}
681
+
682
+ 06/09/2024 07:37:21 - INFO - llamafactory.extras.callbacks - {'loss': 0.0020, 'learning_rate': 7.2763e-06, 'epoch': 26.20, 'throughput': 998.92}
683
+
684
+ 06/09/2024 07:39:24 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 7.0932e-06, 'epoch': 26.32, 'throughput': 998.88}
685
+
686
+ 06/09/2024 07:41:36 - INFO - llamafactory.extras.callbacks - {'loss': 0.0014, 'learning_rate': 6.9121e-06, 'epoch': 26.43, 'throughput': 998.77}
687
+
688
+ 06/09/2024 07:43:49 - INFO - llamafactory.extras.callbacks - {'loss': 0.0022, 'learning_rate': 6.7330e-06, 'epoch': 26.55, 'throughput': 998.82}
689
+
690
+ 06/09/2024 07:45:54 - INFO - llamafactory.extras.callbacks - {'loss': 0.0023, 'learning_rate': 6.5558e-06, 'epoch': 26.67, 'throughput': 998.48}
691
+
692
+ 06/09/2024 07:47:43 - INFO - llamafactory.extras.callbacks - {'loss': 0.0028, 'learning_rate': 6.3807e-06, 'epoch': 26.78, 'throughput': 998.66}
693
+
694
+ 06/09/2024 07:49:50 - INFO - llamafactory.extras.callbacks - {'loss': 0.0060, 'learning_rate': 6.2076e-06, 'epoch': 26.90, 'throughput': 998.69}
695
+
696
+ 06/09/2024 07:51:36 - INFO - llamafactory.extras.callbacks - {'loss': 0.0045, 'learning_rate': 6.0365e-06, 'epoch': 27.01, 'throughput': 998.96}
697
+
698
+ 06/09/2024 07:53:40 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 5.8675e-06, 'epoch': 27.13, 'throughput': 999.15}
699
+
700
+ 06/09/2024 07:55:56 - INFO - llamafactory.extras.callbacks - {'loss': 0.0020, 'learning_rate': 5.7006e-06, 'epoch': 27.25, 'throughput': 999.08}
701
+
702
+ 06/09/2024 07:58:18 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 5.5358e-06, 'epoch': 27.36, 'throughput': 998.78}
703
+
704
+ 06/09/2024 08:00:40 - INFO - llamafactory.extras.callbacks - {'loss': 0.0060, 'learning_rate': 5.3731e-06, 'epoch': 27.48, 'throughput': 998.02}
705
+
706
+ 06/09/2024 08:02:32 - INFO - llamafactory.extras.callbacks - {'loss': 0.0043, 'learning_rate': 5.2126e-06, 'epoch': 27.59, 'throughput': 998.17}
707
+
708
+ 06/09/2024 08:04:21 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 5.0542e-06, 'epoch': 27.71, 'throughput': 998.41}
709
+
710
+ 06/09/2024 08:06:12 - INFO - llamafactory.extras.callbacks - {'loss': 0.0020, 'learning_rate': 4.8980e-06, 'epoch': 27.83, 'throughput': 998.66}
711
+
712
+ 06/09/2024 08:06:12 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1200
713
+
714
+ 06/09/2024 08:06:12 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1200\config.json
715
+
716
+ 06/09/2024 08:06:12 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1200\generation_config.json
717
+
718
+ 06/09/2024 08:06:15 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1200\model.safetensors
719
+
720
+ 06/09/2024 08:06:15 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1200\tokenizer_config.json
721
+
722
+ 06/09/2024 08:06:15 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1200\special_tokens_map.json
723
+
724
+ 06/09/2024 08:08:33 - INFO - llamafactory.extras.callbacks - {'loss': 0.0022, 'learning_rate': 4.7439e-06, 'epoch': 27.94, 'throughput': 998.10}
725
+
726
+ 06/09/2024 08:11:12 - INFO - llamafactory.extras.callbacks - {'loss': 0.0016, 'learning_rate': 4.5921e-06, 'epoch': 28.06, 'throughput': 997.51}
727
+
728
+ 06/09/2024 08:13:00 - INFO - llamafactory.extras.callbacks - {'loss': 0.0033, 'learning_rate': 4.4425e-06, 'epoch': 28.17, 'throughput': 997.65}
729
+
730
+ 06/09/2024 08:15:26 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 4.2952e-06, 'epoch': 28.29, 'throughput': 997.44}
731
+
732
+ 06/09/2024 08:17:30 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 4.1501e-06, 'epoch': 28.41, 'throughput': 997.40}
733
+
734
+ 06/09/2024 08:19:21 - INFO - llamafactory.extras.callbacks - {'loss': 0.0078, 'learning_rate': 4.0072e-06, 'epoch': 28.52, 'throughput': 997.56}
735
+
736
+ 06/09/2024 08:21:37 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 3.8667e-06, 'epoch': 28.64, 'throughput': 997.29}
737
+
738
+ 06/09/2024 08:24:01 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 3.7284e-06, 'epoch': 28.75, 'throughput': 996.51}
739
+
740
+ 06/09/2024 08:25:32 - INFO - llamafactory.extras.callbacks - {'loss': 0.0027, 'learning_rate': 3.5925e-06, 'epoch': 28.87, 'throughput': 996.81}
741
+
742
+ 06/09/2024 08:27:46 - INFO - llamafactory.extras.callbacks - {'loss': 0.0023, 'learning_rate': 3.4589e-06, 'epoch': 28.99, 'throughput': 996.97}
743
+
744
+ 06/09/2024 08:30:00 - INFO - llamafactory.extras.callbacks - {'loss': 0.0029, 'learning_rate': 3.3277e-06, 'epoch': 29.10, 'throughput': 997.00}
745
+
746
+ 06/09/2024 08:31:58 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 3.1988e-06, 'epoch': 29.22, 'throughput': 997.14}
747
+
748
+ 06/09/2024 08:34:08 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 3.0723e-06, 'epoch': 29.33, 'throughput': 997.27}
749
+
750
+ 06/09/2024 08:36:06 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 2.9481e-06, 'epoch': 29.45, 'throughput': 997.43}
751
+
752
+ 06/09/2024 08:38:28 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 2.8264e-06, 'epoch': 29.57, 'throughput': 997.05}
753
+
754
+ 06/09/2024 08:40:32 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 2.7071e-06, 'epoch': 29.68, 'throughput': 997.23}
755
+
756
+ 06/09/2024 08:42:30 - INFO - llamafactory.extras.callbacks - {'loss': 0.0023, 'learning_rate': 2.5902e-06, 'epoch': 29.80, 'throughput': 997.24}
757
+
758
+ 06/09/2024 08:44:29 - INFO - llamafactory.extras.callbacks - {'loss': 0.0078, 'learning_rate': 2.4758e-06, 'epoch': 29.91, 'throughput': 997.37}
759
+
760
+ 06/09/2024 08:46:27 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 2.3638e-06, 'epoch': 30.03, 'throughput': 997.36}
761
+
762
+ 06/09/2024 08:48:26 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 2.2543e-06, 'epoch': 30.14, 'throughput': 997.56}
763
+
764
+ 06/09/2024 08:48:26 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1300
765
+
766
+ 06/09/2024 08:48:26 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1300\config.json
767
+
768
+ 06/09/2024 08:48:26 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1300\generation_config.json
769
+
770
+ 06/09/2024 08:48:35 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1300\model.safetensors
771
+
772
+ 06/09/2024 08:48:35 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1300\tokenizer_config.json
773
+
774
+ 06/09/2024 08:48:35 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1300\special_tokens_map.json
775
+
776
+ 06/09/2024 08:50:43 - INFO - llamafactory.extras.callbacks - {'loss': 0.0071, 'learning_rate': 2.1472e-06, 'epoch': 30.26, 'throughput': 997.03}
777
+
778
+ 06/09/2024 08:52:25 - INFO - llamafactory.extras.callbacks - {'loss': 0.0016, 'learning_rate': 2.0427e-06, 'epoch': 30.38, 'throughput': 997.31}
779
+
780
+ 06/09/2024 08:54:28 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 1.9406e-06, 'epoch': 30.49, 'throughput': 997.40}
781
+
782
+ 06/09/2024 08:56:57 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 1.8411e-06, 'epoch': 30.61, 'throughput': 997.00}
783
+
784
+ 06/09/2024 08:59:07 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 1.7441e-06, 'epoch': 30.72, 'throughput': 996.94}
785
+
786
+ 06/09/2024 09:01:08 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 1.6496e-06, 'epoch': 30.84, 'throughput': 997.09}
787
+
788
+ 06/09/2024 09:03:19 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 1.5577e-06, 'epoch': 30.96, 'throughput': 996.94}
789
+
790
+ 06/09/2024 09:05:11 - INFO - llamafactory.extras.callbacks - {'loss': 0.0038, 'learning_rate': 1.4683e-06, 'epoch': 31.07, 'throughput': 997.13}
791
+
792
+ 06/09/2024 09:06:56 - INFO - llamafactory.extras.callbacks - {'loss': 0.0052, 'learning_rate': 1.3815e-06, 'epoch': 31.19, 'throughput': 997.30}
793
+
794
+ 06/09/2024 09:09:13 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 1.2972e-06, 'epoch': 31.30, 'throughput': 997.30}
795
+
796
+ 06/09/2024 09:10:53 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 1.2155e-06, 'epoch': 31.42, 'throughput': 997.55}
797
+
798
+ 06/09/2024 09:12:40 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 1.1365e-06, 'epoch': 31.54, 'throughput': 997.81}
799
+
800
+ 06/09/2024 09:14:55 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 1.0600e-06, 'epoch': 31.65, 'throughput': 997.48}
801
+
802
+ 06/09/2024 09:16:55 - INFO - llamafactory.extras.callbacks - {'loss': 0.0022, 'learning_rate': 9.8612e-07, 'epoch': 31.77, 'throughput': 997.57}
803
+
804
+ 06/09/2024 09:19:26 - INFO - llamafactory.extras.callbacks - {'loss': 0.0037, 'learning_rate': 9.1486e-07, 'epoch': 31.88, 'throughput': 997.25}
805
+
806
+ 06/09/2024 09:21:45 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 8.4624e-07, 'epoch': 32.00, 'throughput': 997.23}
807
+
808
+ 06/09/2024 09:23:43 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 7.8024e-07, 'epoch': 32.12, 'throughput': 997.32}
809
+
810
+ 06/09/2024 09:25:59 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 7.1688e-07, 'epoch': 32.23, 'throughput': 997.25}
811
+
812
+ 06/09/2024 09:28:28 - INFO - llamafactory.extras.callbacks - {'loss': 0.0035, 'learning_rate': 6.5617e-07, 'epoch': 32.35, 'throughput': 996.95}
813
+
814
+ 06/09/2024 09:30:35 - INFO - llamafactory.extras.callbacks - {'loss': 0.0063, 'learning_rate': 5.9810e-07, 'epoch': 32.46, 'throughput': 996.92}
815
+
816
+ 06/09/2024 09:30:35 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1400
817
+
818
+ 06/09/2024 09:30:35 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1400\config.json
819
+
820
+ 06/09/2024 09:30:35 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1400\generation_config.json
821
+
822
+ 06/09/2024 09:30:41 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1400\model.safetensors
823
+
824
+ 06/09/2024 09:30:41 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1400\tokenizer_config.json
825
+
826
+ 06/09/2024 09:30:41 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1400\special_tokens_map.json
827
+
828
+ 06/09/2024 09:32:30 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 5.4270e-07, 'epoch': 32.58, 'throughput': 996.79}
829
+
830
+ 06/09/2024 09:34:17 - INFO - llamafactory.extras.callbacks - {'loss': 0.0020, 'learning_rate': 4.8996e-07, 'epoch': 32.70, 'throughput': 997.02}
831
+
832
+ 06/09/2024 09:36:26 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 4.3989e-07, 'epoch': 32.81, 'throughput': 997.12}
833
+
834
+ 06/09/2024 09:38:35 - INFO - llamafactory.extras.callbacks - {'loss': 0.0016, 'learning_rate': 3.9250e-07, 'epoch': 32.93, 'throughput': 997.16}
835
+
836
+ 06/09/2024 09:40:51 - INFO - llamafactory.extras.callbacks - {'loss': 0.0015, 'learning_rate': 3.4778e-07, 'epoch': 33.04, 'throughput': 997.07}
837
+
838
+ 06/09/2024 09:42:47 - INFO - llamafactory.extras.callbacks - {'loss': 0.0057, 'learning_rate': 3.0575e-07, 'epoch': 33.16, 'throughput': 997.25}
839
+
840
+ 06/09/2024 09:45:07 - INFO - llamafactory.extras.callbacks - {'loss': 0.0016, 'learning_rate': 2.6642e-07, 'epoch': 33.28, 'throughput': 996.81}
841
+
842
+ 06/09/2024 09:46:46 - INFO - llamafactory.extras.callbacks - {'loss': 0.0016, 'learning_rate': 2.2977e-07, 'epoch': 33.39, 'throughput': 997.10}
843
+
844
+ 06/09/2024 09:48:52 - INFO - llamafactory.extras.callbacks - {'loss': 0.0019, 'learning_rate': 1.9583e-07, 'epoch': 33.51, 'throughput': 997.13}
845
+
846
+ 06/09/2024 09:50:50 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 1.6458e-07, 'epoch': 33.62, 'throughput': 997.16}
847
+
848
+ 06/09/2024 09:52:42 - INFO - llamafactory.extras.callbacks - {'loss': 0.0042, 'learning_rate': 1.3604e-07, 'epoch': 33.74, 'throughput': 997.29}
849
+
850
+ 06/09/2024 09:54:48 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 1.1022e-07, 'epoch': 33.86, 'throughput': 997.47}
851
+
852
+ 06/09/2024 09:57:05 - INFO - llamafactory.extras.callbacks - {'loss': 0.0017, 'learning_rate': 8.7097e-08, 'epoch': 33.97, 'throughput': 997.29}
853
+
854
+ 06/09/2024 09:59:36 - INFO - llamafactory.extras.callbacks - {'loss': 0.0015, 'learning_rate': 6.6693e-08, 'epoch': 34.09, 'throughput': 997.05}
855
+
856
+ 06/09/2024 10:01:03 - INFO - llamafactory.extras.callbacks - {'loss': 0.0018, 'learning_rate': 4.9005e-08, 'epoch': 34.20, 'throughput': 997.33}
857
+
858
+ 06/09/2024 10:03:24 - INFO - llamafactory.extras.callbacks - {'loss': 0.0024, 'learning_rate': 3.4034e-08, 'epoch': 34.32, 'throughput': 997.25}
859
+
860
+ 06/09/2024 10:05:29 - INFO - llamafactory.extras.callbacks - {'loss': 0.0021, 'learning_rate': 2.1784e-08, 'epoch': 34.43, 'throughput': 997.41}
861
+
862
+ 06/09/2024 10:07:24 - INFO - llamafactory.extras.callbacks - {'loss': 0.0035, 'learning_rate': 1.2254e-08, 'epoch': 34.55, 'throughput': 997.57}
863
+
864
+ 06/09/2024 10:09:26 - INFO - llamafactory.extras.callbacks - {'loss': 0.0016, 'learning_rate': 5.4465e-09, 'epoch': 34.67, 'throughput': 997.75}
865
+
866
+ 06/09/2024 10:11:31 - INFO - llamafactory.extras.callbacks - {'loss': 0.0015, 'learning_rate': 1.3617e-09, 'epoch': 34.78, 'throughput': 997.77}
867
+
868
+ 06/09/2024 10:11:31 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1500
869
+
870
+ 06/09/2024 10:11:31 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1500\config.json
871
+
872
+ 06/09/2024 10:11:31 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1500\generation_config.json
873
+
874
+ 06/09/2024 10:11:39 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1500\model.safetensors
875
+
876
+ 06/09/2024 10:11:39 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1500\tokenizer_config.json
877
+
878
+ 06/09/2024 10:11:39 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\checkpoint-1500\special_tokens_map.json
879
+
880
+ 06/09/2024 10:13:48 - INFO - llamafactory.extras.callbacks - {'loss': 0.0053, 'learning_rate': 0.0000e+00, 'epoch': 34.90, 'throughput': 997.38}
881
+
882
+ 06/09/2024 10:13:48 - INFO - transformers.trainer -
883
+
884
+ Training completed. Do not forget to share your model on huggingface.co/models =)
885
+
886
+
887
+
888
+ 06/09/2024 10:13:48 - INFO - transformers.trainer - Saving model checkpoint to saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14
889
+
890
+ 06/09/2024 10:13:48 - INFO - transformers.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\config.json
891
+
892
+ 06/09/2024 10:13:48 - INFO - transformers.generation.configuration_utils - Configuration saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\generation_config.json
893
+
894
+ 06/09/2024 10:13:53 - INFO - transformers.modeling_utils - Model weights saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\model.safetensors
895
+
896
+ 06/09/2024 10:13:53 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\tokenizer_config.json
897
+
898
+ 06/09/2024 10:13:53 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14\special_tokens_map.json
899
+
900
+ 06/09/2024 10:13:53 - WARNING - llamafactory.extras.ploting - No metric eval_loss to plot.
901
+
902
+ 06/09/2024 10:13:53 - INFO - transformers.modelcard - Dropping the following result as it does not have all the necessary fields:
903
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
904
+
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message + '\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'Human: ' + content + '\nAssistant: ' }}{% elif message['role'] == 'assistant' %}{{ content + '<|endoftext|>' + '\n' }}{% endif %}{% endfor %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|endoftext|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "padding_side": "right",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
44
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 34.89855072463768,
3
+ "num_input_tokens_seen": 37412688,
4
+ "total_flos": 8.033958240027034e+16,
5
+ "train_loss": 0.3889684765070578,
6
+ "train_runtime": 37510.9602,
7
+ "train_samples_per_second": 0.322,
8
+ "train_steps_per_second": 0.04
9
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 5, "total_steps": 1505, "loss": 3.8116, "learning_rate": 4.999863832700438e-05, "epoch": 0.11594202898550725, "percentage": 0.33, "elapsed_time": "0:01:41", "remaining_time": "8:27:54", "throughput": "1052.64", "total_tokens": 106929}
2
+ {"current_steps": 10, "total_steps": 1505, "loss": 3.6928, "learning_rate": 4.999455345634978e-05, "epoch": 0.2318840579710145, "percentage": 0.66, "elapsed_time": "0:03:33", "remaining_time": "8:51:03", "throughput": "1060.20", "total_tokens": 225964}
3
+ {"current_steps": 15, "total_steps": 1505, "loss": 3.6227, "learning_rate": 4.9987745833016855e-05, "epoch": 0.34782608695652173, "percentage": 1.0, "elapsed_time": "0:05:53", "remaining_time": "9:45:33", "throughput": "1024.22", "total_tokens": 362264}
4
+ {"current_steps": 20, "total_steps": 1505, "loss": 3.601, "learning_rate": 4.9978216198586135e-05, "epoch": 0.463768115942029, "percentage": 1.33, "elapsed_time": "0:07:42", "remaining_time": "9:32:27", "throughput": "1032.88", "total_tokens": 477807}
5
+ {"current_steps": 25, "total_steps": 1505, "loss": 3.539, "learning_rate": 4.996596559115731e-05, "epoch": 0.5797101449275363, "percentage": 1.66, "elapsed_time": "0:09:25", "remaining_time": "9:18:14", "throughput": "1040.84", "total_tokens": 588900}
6
+ {"current_steps": 30, "total_steps": 1505, "loss": 3.4956, "learning_rate": 4.995099534523607e-05, "epoch": 0.6956521739130435, "percentage": 1.99, "elapsed_time": "0:11:18", "remaining_time": "9:16:16", "throughput": "1040.12", "total_tokens": 706077}
7
+ {"current_steps": 35, "total_steps": 1505, "loss": 3.5044, "learning_rate": 4.9933307091588796e-05, "epoch": 0.8115942028985508, "percentage": 2.33, "elapsed_time": "0:13:53", "remaining_time": "9:43:08", "throughput": "1024.54", "total_tokens": 853504}
8
+ {"current_steps": 40, "total_steps": 1505, "loss": 3.4324, "learning_rate": 4.991290275706486e-05, "epoch": 0.927536231884058, "percentage": 2.66, "elapsed_time": "0:16:08", "remaining_time": "9:51:29", "throughput": "1022.18", "total_tokens": 990472}
9
+ {"current_steps": 45, "total_steps": 1505, "loss": 3.2542, "learning_rate": 4.988978456438678e-05, "epoch": 1.0434782608695652, "percentage": 2.99, "elapsed_time": "0:18:22", "remaining_time": "9:56:13", "throughput": "1021.10", "total_tokens": 1125870}
10
+ {"current_steps": 50, "total_steps": 1505, "loss": 2.9024, "learning_rate": 4.986395503190805e-05, "epoch": 1.1594202898550725, "percentage": 3.32, "elapsed_time": "0:20:21", "remaining_time": "9:52:16", "throughput": "1023.49", "total_tokens": 1249877}
11
+ {"current_steps": 55, "total_steps": 1505, "loss": 2.8069, "learning_rate": 4.983541697333881e-05, "epoch": 1.2753623188405796, "percentage": 3.65, "elapsed_time": "0:22:24", "remaining_time": "9:50:52", "throughput": "1022.63", "total_tokens": 1375193}
12
+ {"current_steps": 60, "total_steps": 1505, "loss": 2.75, "learning_rate": 4.980417349743936e-05, "epoch": 1.391304347826087, "percentage": 3.99, "elapsed_time": "0:24:19", "remaining_time": "9:45:47", "throughput": "1020.77", "total_tokens": 1489716}
13
+ {"current_steps": 65, "total_steps": 1505, "loss": 2.7329, "learning_rate": 4.9770228007681494e-05, "epoch": 1.5072463768115942, "percentage": 4.32, "elapsed_time": "0:26:02", "remaining_time": "9:37:00", "throughput": "1024.15", "total_tokens": 1600483}
14
+ {"current_steps": 70, "total_steps": 1505, "loss": 2.8212, "learning_rate": 4.973358420187776e-05, "epoch": 1.6231884057971016, "percentage": 4.65, "elapsed_time": "0:28:23", "remaining_time": "9:41:54", "throughput": "1016.55", "total_tokens": 1731315}
15
+ {"current_steps": 75, "total_steps": 1505, "loss": 2.7935, "learning_rate": 4.9694246071778604e-05, "epoch": 1.7391304347826086, "percentage": 4.98, "elapsed_time": "0:30:28", "remaining_time": "9:40:59", "throughput": "1016.39", "total_tokens": 1858269}
16
+ {"current_steps": 80, "total_steps": 1505, "loss": 2.7305, "learning_rate": 4.9652217902637596e-05, "epoch": 1.855072463768116, "percentage": 5.32, "elapsed_time": "0:32:31", "remaining_time": "9:39:17", "throughput": "1017.05", "total_tokens": 1984587}
17
+ {"current_steps": 85, "total_steps": 1505, "loss": 2.6482, "learning_rate": 4.9607504272744575e-05, "epoch": 1.971014492753623, "percentage": 5.65, "elapsed_time": "0:34:48", "remaining_time": "9:41:24", "throughput": "1010.18", "total_tokens": 2109391}
18
+ {"current_steps": 90, "total_steps": 1505, "loss": 2.4292, "learning_rate": 4.956011005292692e-05, "epoch": 2.0869565217391304, "percentage": 5.98, "elapsed_time": "0:37:08", "remaining_time": "9:43:59", "throughput": "1007.97", "total_tokens": 2246413}
19
+ {"current_steps": 95, "total_steps": 1505, "loss": 2.1416, "learning_rate": 4.951004040601898e-05, "epoch": 2.2028985507246377, "percentage": 6.31, "elapsed_time": "0:39:34", "remaining_time": "9:47:21", "throughput": "1005.24", "total_tokens": 2386890}
20
+ {"current_steps": 100, "total_steps": 1505, "loss": 2.2847, "learning_rate": 4.945730078629964e-05, "epoch": 2.318840579710145, "percentage": 6.64, "elapsed_time": "0:41:53", "remaining_time": "9:48:28", "throughput": "1003.68", "total_tokens": 2522302}
21
+ {"current_steps": 105, "total_steps": 1505, "loss": 2.0944, "learning_rate": 4.9401896938898185e-05, "epoch": 2.4347826086956523, "percentage": 6.98, "elapsed_time": "0:44:27", "remaining_time": "9:52:47", "throughput": "990.48", "total_tokens": 2642208}
22
+ {"current_steps": 110, "total_steps": 1505, "loss": 2.2862, "learning_rate": 4.934383489916843e-05, "epoch": 2.550724637681159, "percentage": 7.31, "elapsed_time": "0:46:51", "remaining_time": "9:54:19", "throughput": "988.89", "total_tokens": 2780587}
23
+ {"current_steps": 115, "total_steps": 1505, "loss": 2.105, "learning_rate": 4.928312099203131e-05, "epoch": 2.6666666666666665, "percentage": 7.64, "elapsed_time": "0:48:26", "remaining_time": "9:45:36", "throughput": "992.54", "total_tokens": 2885320}
24
+ {"current_steps": 120, "total_steps": 1505, "loss": 2.0287, "learning_rate": 4.921976183128585e-05, "epoch": 2.782608695652174, "percentage": 7.97, "elapsed_time": "0:50:19", "remaining_time": "9:40:48", "throughput": "992.56", "total_tokens": 2996923}
25
+ {"current_steps": 125, "total_steps": 1505, "loss": 2.0162, "learning_rate": 4.9153764318888706e-05, "epoch": 2.898550724637681, "percentage": 8.31, "elapsed_time": "0:51:54", "remaining_time": "9:33:09", "throughput": "995.97", "total_tokens": 3102391}
26
+ {"current_steps": 130, "total_steps": 1505, "loss": 2.2464, "learning_rate": 4.908513564420231e-05, "epoch": 3.0144927536231885, "percentage": 8.64, "elapsed_time": "0:54:01", "remaining_time": "9:31:24", "throughput": "997.54", "total_tokens": 3233443}
27
+ {"current_steps": 135, "total_steps": 1505, "loss": 1.6561, "learning_rate": 4.90138832832117e-05, "epoch": 3.130434782608696, "percentage": 8.97, "elapsed_time": "0:56:00", "remaining_time": "9:28:23", "throughput": "999.52", "total_tokens": 3358966}
28
+ {"current_steps": 140, "total_steps": 1505, "loss": 1.6113, "learning_rate": 4.894001499771015e-05, "epoch": 3.246376811594203, "percentage": 9.3, "elapsed_time": "0:58:09", "remaining_time": "9:27:05", "throughput": "1000.08", "total_tokens": 3490069}
29
+ {"current_steps": 145, "total_steps": 1505, "loss": 1.6235, "learning_rate": 4.886353883445363e-05, "epoch": 3.36231884057971, "percentage": 9.63, "elapsed_time": "1:00:15", "remaining_time": "9:25:14", "throughput": "998.34", "total_tokens": 3609842}
30
+ {"current_steps": 150, "total_steps": 1505, "loss": 1.7873, "learning_rate": 4.878446312428424e-05, "epoch": 3.4782608695652173, "percentage": 9.97, "elapsed_time": "1:02:40", "remaining_time": "9:26:06", "throughput": "997.72", "total_tokens": 3751570}
31
+ {"current_steps": 155, "total_steps": 1505, "loss": 1.3723, "learning_rate": 4.8702796481222714e-05, "epoch": 3.5942028985507246, "percentage": 10.3, "elapsed_time": "1:04:42", "remaining_time": "9:23:34", "throughput": "995.58", "total_tokens": 3865303}
32
+ {"current_steps": 160, "total_steps": 1505, "loss": 1.6512, "learning_rate": 4.861854780153004e-05, "epoch": 3.710144927536232, "percentage": 10.63, "elapsed_time": "1:06:42", "remaining_time": "9:20:49", "throughput": "997.10", "total_tokens": 3991347}
33
+ {"current_steps": 165, "total_steps": 1505, "loss": 1.5524, "learning_rate": 4.853172626273841e-05, "epoch": 3.8260869565217392, "percentage": 10.96, "elapsed_time": "1:08:43", "remaining_time": "9:18:06", "throughput": "997.64", "total_tokens": 4113654}
34
+ {"current_steps": 170, "total_steps": 1505, "loss": 1.5954, "learning_rate": 4.8442341322651385e-05, "epoch": 3.942028985507246, "percentage": 11.3, "elapsed_time": "1:10:40", "remaining_time": "9:14:57", "throughput": "999.10", "total_tokens": 4236348}
35
+ {"current_steps": 175, "total_steps": 1505, "loss": 1.1652, "learning_rate": 4.83504027183137e-05, "epoch": 4.057971014492754, "percentage": 11.63, "elapsed_time": "1:12:15", "remaining_time": "9:09:10", "throughput": "1001.11", "total_tokens": 4340378}
36
+ {"current_steps": 180, "total_steps": 1505, "loss": 1.1995, "learning_rate": 4.825592046495054e-05, "epoch": 4.173913043478261, "percentage": 11.96, "elapsed_time": "1:14:31", "remaining_time": "9:08:33", "throughput": "1000.51", "total_tokens": 4473601}
37
+ {"current_steps": 185, "total_steps": 1505, "loss": 0.9431, "learning_rate": 4.8158904854876555e-05, "epoch": 4.2898550724637685, "percentage": 12.29, "elapsed_time": "1:16:28", "remaining_time": "9:05:42", "throughput": "999.58", "total_tokens": 4586911}
38
+ {"current_steps": 190, "total_steps": 1505, "loss": 1.1136, "learning_rate": 4.805936645637463e-05, "epoch": 4.405797101449275, "percentage": 12.62, "elapsed_time": "1:18:30", "remaining_time": "9:03:20", "throughput": "998.32", "total_tokens": 4702445}
39
+ {"current_steps": 195, "total_steps": 1505, "loss": 1.1509, "learning_rate": 4.795731611254473e-05, "epoch": 4.521739130434782, "percentage": 12.96, "elapsed_time": "1:20:34", "remaining_time": "9:01:18", "throughput": "999.32", "total_tokens": 4831301}
40
+ {"current_steps": 200, "total_steps": 1505, "loss": 0.9962, "learning_rate": 4.785276494012263e-05, "epoch": 4.63768115942029, "percentage": 13.29, "elapsed_time": "1:22:18", "remaining_time": "8:57:05", "throughput": "1000.59", "total_tokens": 4941656}
41
+ {"current_steps": 205, "total_steps": 1505, "loss": 1.2377, "learning_rate": 4.7745724328269e-05, "epoch": 4.753623188405797, "percentage": 13.62, "elapsed_time": "1:25:08", "remaining_time": "8:59:53", "throughput": "996.13", "total_tokens": 5088437}
42
+ {"current_steps": 210, "total_steps": 1505, "loss": 1.234, "learning_rate": 4.763620593732867e-05, "epoch": 4.869565217391305, "percentage": 13.95, "elapsed_time": "1:27:18", "remaining_time": "8:58:21", "throughput": "996.52", "total_tokens": 5219806}
43
+ {"current_steps": 215, "total_steps": 1505, "loss": 1.1453, "learning_rate": 4.752422169756048e-05, "epoch": 4.9855072463768115, "percentage": 14.29, "elapsed_time": "1:29:10", "remaining_time": "8:55:03", "throughput": "998.06", "total_tokens": 5340222}
44
+ {"current_steps": 220, "total_steps": 1505, "loss": 0.9056, "learning_rate": 4.740978380783765e-05, "epoch": 5.101449275362318, "percentage": 14.62, "elapsed_time": "1:31:31", "remaining_time": "8:54:32", "throughput": "997.31", "total_tokens": 5476315}
45
+ {"current_steps": 225, "total_steps": 1505, "loss": 0.7349, "learning_rate": 4.7292904734318924e-05, "epoch": 5.217391304347826, "percentage": 14.95, "elapsed_time": "1:33:21", "remaining_time": "8:51:03", "throughput": "998.01", "total_tokens": 5589951}
46
+ {"current_steps": 230, "total_steps": 1505, "loss": 0.6968, "learning_rate": 4.7173597209090534e-05, "epoch": 5.333333333333333, "percentage": 15.28, "elapsed_time": "1:35:17", "remaining_time": "8:48:13", "throughput": "999.00", "total_tokens": 5711449}
47
+ {"current_steps": 235, "total_steps": 1505, "loss": 0.851, "learning_rate": 4.70518742287793e-05, "epoch": 5.449275362318841, "percentage": 15.61, "elapsed_time": "1:37:41", "remaining_time": "8:47:58", "throughput": "998.45", "total_tokens": 5852650}
48
+ {"current_steps": 240, "total_steps": 1505, "loss": 0.716, "learning_rate": 4.6927749053136866e-05, "epoch": 5.565217391304348, "percentage": 15.95, "elapsed_time": "1:39:37", "remaining_time": "8:45:03", "throughput": "999.21", "total_tokens": 5972289}
49
+ {"current_steps": 245, "total_steps": 1505, "loss": 0.6384, "learning_rate": 4.6801235203595195e-05, "epoch": 5.681159420289855, "percentage": 16.28, "elapsed_time": "1:41:26", "remaining_time": "8:41:40", "throughput": "1000.40", "total_tokens": 6088707}
50
+ {"current_steps": 250, "total_steps": 1505, "loss": 0.7799, "learning_rate": 4.667234646179368e-05, "epoch": 5.797101449275362, "percentage": 16.61, "elapsed_time": "1:43:47", "remaining_time": "8:41:00", "throughput": "998.13", "total_tokens": 6215471}
51
+ {"current_steps": 255, "total_steps": 1505, "loss": 0.7923, "learning_rate": 4.654109686807787e-05, "epoch": 5.913043478260869, "percentage": 16.94, "elapsed_time": "1:45:48", "remaining_time": "8:38:42", "throughput": "997.95", "total_tokens": 6335935}
52
+ {"current_steps": 260, "total_steps": 1505, "loss": 0.7452, "learning_rate": 4.640750071996995e-05, "epoch": 6.028985507246377, "percentage": 17.28, "elapsed_time": "1:47:52", "remaining_time": "8:36:31", "throughput": "998.69", "total_tokens": 6463689}
53
+ {"current_steps": 265, "total_steps": 1505, "loss": 0.4085, "learning_rate": 4.6271572570611296e-05, "epoch": 6.144927536231884, "percentage": 17.61, "elapsed_time": "1:49:37", "remaining_time": "8:32:55", "throughput": "999.98", "total_tokens": 6576954}
54
+ {"current_steps": 270, "total_steps": 1505, "loss": 0.5777, "learning_rate": 4.613332722717714e-05, "epoch": 6.260869565217392, "percentage": 17.94, "elapsed_time": "1:51:51", "remaining_time": "8:31:39", "throughput": "1000.40", "total_tokens": 6714404}
55
+ {"current_steps": 275, "total_steps": 1505, "loss": 0.4718, "learning_rate": 4.5992779749263546e-05, "epoch": 6.3768115942028984, "percentage": 18.27, "elapsed_time": "1:53:54", "remaining_time": "8:29:28", "throughput": "1000.87", "total_tokens": 6840385}
56
+ {"current_steps": 280, "total_steps": 1505, "loss": 0.3723, "learning_rate": 4.584994544724695e-05, "epoch": 6.492753623188406, "percentage": 18.6, "elapsed_time": "1:55:39", "remaining_time": "8:26:01", "throughput": "1002.09", "total_tokens": 6954269}
57
+ {"current_steps": 285, "total_steps": 1505, "loss": 0.4453, "learning_rate": 4.5704839880616296e-05, "epoch": 6.608695652173913, "percentage": 18.94, "elapsed_time": "1:57:50", "remaining_time": "8:24:25", "throughput": "1000.84", "total_tokens": 7076143}
58
+ {"current_steps": 290, "total_steps": 1505, "loss": 0.5742, "learning_rate": 4.5557478856278114e-05, "epoch": 6.72463768115942, "percentage": 19.27, "elapsed_time": "1:59:51", "remaining_time": "8:22:11", "throughput": "1001.38", "total_tokens": 7201833}
59
+ {"current_steps": 295, "total_steps": 1505, "loss": 0.5291, "learning_rate": 4.5407878426834596e-05, "epoch": 6.840579710144928, "percentage": 19.6, "elapsed_time": "2:01:56", "remaining_time": "8:20:09", "throughput": "1001.95", "total_tokens": 7330479}
60
+ {"current_steps": 300, "total_steps": 1505, "loss": 0.4968, "learning_rate": 4.5256054888834934e-05, "epoch": 6.956521739130435, "percentage": 19.93, "elapsed_time": "2:03:49", "remaining_time": "8:17:23", "throughput": "1002.60", "total_tokens": 7449244}
61
+ {"current_steps": 305, "total_steps": 1505, "loss": 0.421, "learning_rate": 4.5102024781000077e-05, "epoch": 7.072463768115942, "percentage": 20.27, "elapsed_time": "2:06:19", "remaining_time": "8:17:00", "throughput": "999.94", "total_tokens": 7578947}
62
+ {"current_steps": 310, "total_steps": 1505, "loss": 0.2767, "learning_rate": 4.4945804882421086e-05, "epoch": 7.188405797101449, "percentage": 20.6, "elapsed_time": "2:08:26", "remaining_time": "8:15:08", "throughput": "998.07", "total_tokens": 7691948}
63
+ {"current_steps": 315, "total_steps": 1505, "loss": 0.2922, "learning_rate": 4.478741221073136e-05, "epoch": 7.304347826086957, "percentage": 20.93, "elapsed_time": "2:10:25", "remaining_time": "8:12:42", "throughput": "998.79", "total_tokens": 7815786}
64
+ {"current_steps": 320, "total_steps": 1505, "loss": 0.2768, "learning_rate": 4.4626864020252774e-05, "epoch": 7.420289855072464, "percentage": 21.26, "elapsed_time": "2:12:16", "remaining_time": "8:09:49", "throughput": "998.57", "total_tokens": 7925106}
65
+ {"current_steps": 325, "total_steps": 1505, "loss": 0.3281, "learning_rate": 4.446417780011618e-05, "epoch": 7.536231884057971, "percentage": 21.59, "elapsed_time": "2:14:49", "remaining_time": "8:09:31", "throughput": "995.98", "total_tokens": 8057202}
66
+ {"current_steps": 330, "total_steps": 1505, "loss": 0.3374, "learning_rate": 4.42993712723562e-05, "epoch": 7.6521739130434785, "percentage": 21.93, "elapsed_time": "2:17:00", "remaining_time": "8:07:51", "throughput": "995.98", "total_tokens": 8187865}
67
+ {"current_steps": 335, "total_steps": 1505, "loss": 0.2491, "learning_rate": 4.413246238998069e-05, "epoch": 7.768115942028985, "percentage": 22.26, "elapsed_time": "2:18:55", "remaining_time": "8:05:11", "throughput": "996.31", "total_tokens": 8304605}
68
+ {"current_steps": 340, "total_steps": 1505, "loss": 0.3893, "learning_rate": 4.3963469335015085e-05, "epoch": 7.884057971014493, "percentage": 22.59, "elapsed_time": "2:21:09", "remaining_time": "8:03:40", "throughput": "996.21", "total_tokens": 8437319}
69
+ {"current_steps": 345, "total_steps": 1505, "loss": 0.3761, "learning_rate": 4.379241051652174e-05, "epoch": 8.0, "percentage": 22.92, "elapsed_time": "2:23:26", "remaining_time": "8:02:17", "throughput": "996.14", "total_tokens": 8573080}
70
+ {"current_steps": 350, "total_steps": 1505, "loss": 0.236, "learning_rate": 4.361930456859456e-05, "epoch": 8.115942028985508, "percentage": 23.26, "elapsed_time": "2:25:38", "remaining_time": "8:00:37", "throughput": "996.45", "total_tokens": 8707741}
71
+ {"current_steps": 355, "total_steps": 1505, "loss": 0.1584, "learning_rate": 4.34441703483291e-05, "epoch": 8.231884057971014, "percentage": 23.59, "elapsed_time": "2:27:37", "remaining_time": "7:58:12", "throughput": "996.43", "total_tokens": 8825774}
72
+ {"current_steps": 360, "total_steps": 1505, "loss": 0.1481, "learning_rate": 4.326702693376844e-05, "epoch": 8.347826086956522, "percentage": 23.92, "elapsed_time": "2:29:16", "remaining_time": "7:54:47", "throughput": "997.27", "total_tokens": 8932249}
73
+ {"current_steps": 365, "total_steps": 1505, "loss": 0.1743, "learning_rate": 4.308789362182492e-05, "epoch": 8.46376811594203, "percentage": 24.25, "elapsed_time": "2:31:20", "remaining_time": "7:52:40", "throughput": "996.84", "total_tokens": 9051548}
74
+ {"current_steps": 370, "total_steps": 1505, "loss": 0.3162, "learning_rate": 4.290678992617798e-05, "epoch": 8.579710144927537, "percentage": 24.58, "elapsed_time": "2:33:45", "remaining_time": "7:51:39", "throughput": "996.95", "total_tokens": 9197232}
75
+ {"current_steps": 375, "total_steps": 1505, "loss": 0.2235, "learning_rate": 4.272373557514858e-05, "epoch": 8.695652173913043, "percentage": 24.92, "elapsed_time": "2:35:41", "remaining_time": "7:49:10", "throughput": "997.40", "total_tokens": 9317650}
76
+ {"current_steps": 380, "total_steps": 1505, "loss": 0.2504, "learning_rate": 4.2538750509550054e-05, "epoch": 8.81159420289855, "percentage": 25.25, "elapsed_time": "2:37:57", "remaining_time": "7:47:39", "throughput": "997.16", "total_tokens": 9450765}
77
+ {"current_steps": 385, "total_steps": 1505, "loss": 0.2136, "learning_rate": 4.235185488051585e-05, "epoch": 8.927536231884059, "percentage": 25.58, "elapsed_time": "2:40:03", "remaining_time": "7:45:37", "throughput": "997.85", "total_tokens": 9582961}
78
+ {"current_steps": 390, "total_steps": 1505, "loss": 0.1047, "learning_rate": 4.216306904730447e-05, "epoch": 9.043478260869565, "percentage": 25.91, "elapsed_time": "2:41:35", "remaining_time": "7:41:58", "throughput": "998.27", "total_tokens": 9678616}
79
+ {"current_steps": 395, "total_steps": 1505, "loss": 0.1015, "learning_rate": 4.1972413575081595e-05, "epoch": 9.159420289855072, "percentage": 26.25, "elapsed_time": "2:43:18", "remaining_time": "7:38:54", "throughput": "999.00", "total_tokens": 9788512}
80
+ {"current_steps": 400, "total_steps": 1505, "loss": 0.1505, "learning_rate": 4.177990923267986e-05, "epoch": 9.27536231884058, "percentage": 26.58, "elapsed_time": "2:45:26", "remaining_time": "7:37:03", "throughput": "998.92", "total_tokens": 9916229}
81
+ {"current_steps": 405, "total_steps": 1505, "loss": 0.1135, "learning_rate": 4.158557699033644e-05, "epoch": 9.391304347826088, "percentage": 26.91, "elapsed_time": "2:47:51", "remaining_time": "7:35:55", "throughput": "997.11", "total_tokens": 10042697}
82
+ {"current_steps": 410, "total_steps": 1505, "loss": 0.1832, "learning_rate": 4.138943801740865e-05, "epoch": 9.507246376811594, "percentage": 27.24, "elapsed_time": "2:50:00", "remaining_time": "7:34:03", "throughput": "997.17", "total_tokens": 10171849}
83
+ {"current_steps": 415, "total_steps": 1505, "loss": 0.1178, "learning_rate": 4.119151368006793e-05, "epoch": 9.623188405797102, "percentage": 27.57, "elapsed_time": "2:51:42", "remaining_time": "7:30:58", "throughput": "998.04", "total_tokens": 10281924}
84
+ {"current_steps": 420, "total_steps": 1505, "loss": 0.1426, "learning_rate": 4.099182553897229e-05, "epoch": 9.73913043478261, "percentage": 27.91, "elapsed_time": "2:54:17", "remaining_time": "7:30:14", "throughput": "996.34", "total_tokens": 10418758}
85
+ {"current_steps": 425, "total_steps": 1505, "loss": 0.1603, "learning_rate": 4.079039534691767e-05, "epoch": 9.855072463768115, "percentage": 28.24, "elapsed_time": "2:56:32", "remaining_time": "7:28:36", "throughput": "996.80", "total_tokens": 10558322}
86
+ {"current_steps": 430, "total_steps": 1505, "loss": 0.1548, "learning_rate": 4.058724504646834e-05, "epoch": 9.971014492753623, "percentage": 28.57, "elapsed_time": "2:58:42", "remaining_time": "7:26:45", "throughput": "996.01", "total_tokens": 10679536}
87
+ {"current_steps": 435, "total_steps": 1505, "loss": 0.1407, "learning_rate": 4.0382396767566536e-05, "epoch": 10.08695652173913, "percentage": 28.9, "elapsed_time": "3:01:04", "remaining_time": "7:25:25", "throughput": "995.97", "total_tokens": 10821076}
88
+ {"current_steps": 440, "total_steps": 1505, "loss": 0.0791, "learning_rate": 4.017587282512181e-05, "epoch": 10.202898550724637, "percentage": 29.24, "elapsed_time": "3:03:15", "remaining_time": "7:23:34", "throughput": "995.82", "total_tokens": 10949771}
89
+ {"current_steps": 445, "total_steps": 1505, "loss": 0.0722, "learning_rate": 3.9967695716580224e-05, "epoch": 10.318840579710145, "percentage": 29.57, "elapsed_time": "3:05:26", "remaining_time": "7:21:43", "throughput": "995.11", "total_tokens": 11072044}
90
+ {"current_steps": 450, "total_steps": 1505, "loss": 0.0655, "learning_rate": 3.975788811947351e-05, "epoch": 10.434782608695652, "percentage": 29.9, "elapsed_time": "3:07:09", "remaining_time": "7:18:47", "throughput": "995.82", "total_tokens": 11182627}
91
+ {"current_steps": 455, "total_steps": 1505, "loss": 0.0723, "learning_rate": 3.954647288894883e-05, "epoch": 10.55072463768116, "percentage": 30.23, "elapsed_time": "3:09:02", "remaining_time": "7:16:15", "throughput": "996.49", "total_tokens": 11303028}
92
+ {"current_steps": 460, "total_steps": 1505, "loss": 0.0655, "learning_rate": 3.933347305527898e-05, "epoch": 10.666666666666666, "percentage": 30.56, "elapsed_time": "3:10:46", "remaining_time": "7:13:23", "throughput": "997.33", "total_tokens": 11415868}
93
+ {"current_steps": 465, "total_steps": 1505, "loss": 0.1534, "learning_rate": 3.911891182135371e-05, "epoch": 10.782608695652174, "percentage": 30.9, "elapsed_time": "3:13:15", "remaining_time": "7:12:13", "throughput": "996.60", "total_tokens": 11555653}
94
+ {"current_steps": 470, "total_steps": 1505, "loss": 0.0947, "learning_rate": 3.8902812560152066e-05, "epoch": 10.898550724637682, "percentage": 31.23, "elapsed_time": "3:15:17", "remaining_time": "7:10:03", "throughput": "996.89", "total_tokens": 11681065}
95
+ {"current_steps": 475, "total_steps": 1505, "loss": 0.0868, "learning_rate": 3.868519881219631e-05, "epoch": 11.014492753623188, "percentage": 31.56, "elapsed_time": "3:17:22", "remaining_time": "7:07:58", "throughput": "997.29", "total_tokens": 11809957}
96
+ {"current_steps": 480, "total_steps": 1505, "loss": 0.0467, "learning_rate": 3.846609428298757e-05, "epoch": 11.130434782608695, "percentage": 31.89, "elapsed_time": "3:19:27", "remaining_time": "7:05:55", "throughput": "997.52", "total_tokens": 11937881}
97
+ {"current_steps": 485, "total_steps": 1505, "loss": 0.0521, "learning_rate": 3.824552284042351e-05, "epoch": 11.246376811594203, "percentage": 32.23, "elapsed_time": "3:21:11", "remaining_time": "7:03:06", "throughput": "998.15", "total_tokens": 12048905}
98
+ {"current_steps": 490, "total_steps": 1505, "loss": 0.051, "learning_rate": 3.8023508512198256e-05, "epoch": 11.36231884057971, "percentage": 32.56, "elapsed_time": "3:23:21", "remaining_time": "7:01:15", "throughput": "998.66", "total_tokens": 12185453}
99
+ {"current_steps": 495, "total_steps": 1505, "loss": 0.0753, "learning_rate": 3.780007548318507e-05, "epoch": 11.478260869565217, "percentage": 32.89, "elapsed_time": "3:25:21", "remaining_time": "6:59:01", "throughput": "999.10", "total_tokens": 12310911}
100
+ {"current_steps": 500, "total_steps": 1505, "loss": 0.0601, "learning_rate": 3.7575248092801686e-05, "epoch": 11.594202898550725, "percentage": 33.22, "elapsed_time": "3:27:25", "remaining_time": "6:56:54", "throughput": "999.57", "total_tokens": 12439708}
101
+ {"current_steps": 505, "total_steps": 1505, "loss": 0.0533, "learning_rate": 3.734905083235901e-05, "epoch": 11.710144927536232, "percentage": 33.55, "elapsed_time": "3:29:31", "remaining_time": "6:54:54", "throughput": "998.61", "total_tokens": 12554467}
102
+ {"current_steps": 510, "total_steps": 1505, "loss": 0.064, "learning_rate": 3.712150834239313e-05, "epoch": 11.826086956521738, "percentage": 33.89, "elapsed_time": "3:31:33", "remaining_time": "6:52:45", "throughput": "999.10", "total_tokens": 12682329}
103
+ {"current_steps": 515, "total_steps": 1505, "loss": 0.0755, "learning_rate": 3.689264540998116e-05, "epoch": 11.942028985507246, "percentage": 34.22, "elapsed_time": "3:33:26", "remaining_time": "6:50:17", "throughput": "999.59", "total_tokens": 12800852}
104
+ {"current_steps": 520, "total_steps": 1505, "loss": 0.0553, "learning_rate": 3.66624869660411e-05, "epoch": 12.057971014492754, "percentage": 34.55, "elapsed_time": "3:35:16", "remaining_time": "6:47:47", "throughput": "1000.07", "total_tokens": 12917527}
105
+ {"current_steps": 525, "total_steps": 1505, "loss": 0.0355, "learning_rate": 3.6431058082615964e-05, "epoch": 12.173913043478262, "percentage": 34.88, "elapsed_time": "3:37:30", "remaining_time": "6:46:00", "throughput": "999.57", "total_tokens": 13044774}
106
+ {"current_steps": 530, "total_steps": 1505, "loss": 0.0413, "learning_rate": 3.619838397014263e-05, "epoch": 12.289855072463768, "percentage": 35.22, "elapsed_time": "3:39:37", "remaining_time": "6:44:01", "throughput": "999.88", "total_tokens": 13175692}
107
+ {"current_steps": 535, "total_steps": 1505, "loss": 0.0596, "learning_rate": 3.5964489974705553e-05, "epoch": 12.405797101449275, "percentage": 35.55, "elapsed_time": "3:41:28", "remaining_time": "6:41:32", "throughput": "1000.36", "total_tokens": 13293164}
108
+ {"current_steps": 540, "total_steps": 1505, "loss": 0.0479, "learning_rate": 3.572940157527572e-05, "epoch": 12.521739130434783, "percentage": 35.88, "elapsed_time": "3:43:28", "remaining_time": "6:39:21", "throughput": "1000.70", "total_tokens": 13417894}
109
+ {"current_steps": 545, "total_steps": 1505, "loss": 0.047, "learning_rate": 3.549314438093515e-05, "epoch": 12.63768115942029, "percentage": 36.21, "elapsed_time": "3:45:42", "remaining_time": "6:37:33", "throughput": "1000.73", "total_tokens": 13551913}
110
+ {"current_steps": 550, "total_steps": 1505, "loss": 0.0492, "learning_rate": 3.525574412808717e-05, "epoch": 12.753623188405797, "percentage": 36.54, "elapsed_time": "3:47:41", "remaining_time": "6:35:20", "throughput": "1001.04", "total_tokens": 13675309}
111
+ {"current_steps": 555, "total_steps": 1505, "loss": 0.0471, "learning_rate": 3.501722667765286e-05, "epoch": 12.869565217391305, "percentage": 36.88, "elapsed_time": "3:49:38", "remaining_time": "6:33:05", "throughput": "1001.36", "total_tokens": 13797691}
112
+ {"current_steps": 560, "total_steps": 1505, "loss": 0.1041, "learning_rate": 3.47776180122539e-05, "epoch": 12.985507246376812, "percentage": 37.21, "elapsed_time": "3:51:48", "remaining_time": "6:31:10", "throughput": "1000.82", "total_tokens": 13919770}
113
+ {"current_steps": 565, "total_steps": 1505, "loss": 0.0282, "learning_rate": 3.453694423338225e-05, "epoch": 13.101449275362318, "percentage": 37.54, "elapsed_time": "3:53:36", "remaining_time": "6:28:39", "throughput": "1001.50", "total_tokens": 14037673}
114
+ {"current_steps": 570, "total_steps": 1505, "loss": 0.0272, "learning_rate": 3.4295231558556715e-05, "epoch": 13.217391304347826, "percentage": 37.87, "elapsed_time": "3:55:59", "remaining_time": "6:27:07", "throughput": "1000.51", "total_tokens": 14167090}
115
+ {"current_steps": 575, "total_steps": 1505, "loss": 0.0342, "learning_rate": 3.4052506318467084e-05, "epoch": 13.333333333333334, "percentage": 38.21, "elapsed_time": "3:58:23", "remaining_time": "6:25:33", "throughput": "1000.60", "total_tokens": 14311710}
116
+ {"current_steps": 580, "total_steps": 1505, "loss": 0.0855, "learning_rate": 3.3808794954105716e-05, "epoch": 13.44927536231884, "percentage": 38.54, "elapsed_time": "3:59:53", "remaining_time": "6:22:35", "throughput": "1000.75", "total_tokens": 14404322}
117
+ {"current_steps": 585, "total_steps": 1505, "loss": 0.0378, "learning_rate": 3.356412401388732e-05, "epoch": 13.565217391304348, "percentage": 38.87, "elapsed_time": "4:01:55", "remaining_time": "6:20:28", "throughput": "1001.03", "total_tokens": 14530794}
118
+ {"current_steps": 590, "total_steps": 1505, "loss": 0.0457, "learning_rate": 3.3318520150756846e-05, "epoch": 13.681159420289855, "percentage": 39.2, "elapsed_time": "4:03:33", "remaining_time": "6:17:42", "throughput": "1001.65", "total_tokens": 14637342}
119
+ {"current_steps": 595, "total_steps": 1505, "loss": 0.0453, "learning_rate": 3.307201011928616e-05, "epoch": 13.797101449275363, "percentage": 39.53, "elapsed_time": "4:06:05", "remaining_time": "6:16:21", "throughput": "1001.53", "total_tokens": 14787534}
120
+ {"current_steps": 600, "total_steps": 1505, "loss": 0.0378, "learning_rate": 3.282462077275947e-05, "epoch": 13.91304347826087, "percentage": 39.87, "elapsed_time": "4:08:03", "remaining_time": "6:14:09", "throughput": "1001.72", "total_tokens": 14909175}
121
+ {"current_steps": 605, "total_steps": 1505, "loss": 0.0296, "learning_rate": 3.257637906024822e-05, "epoch": 14.028985507246377, "percentage": 40.2, "elapsed_time": "4:10:31", "remaining_time": "6:12:40", "throughput": "999.94", "total_tokens": 15030530}
122
+ {"current_steps": 610, "total_steps": 1505, "loss": 0.0216, "learning_rate": 3.2327312023675287e-05, "epoch": 14.144927536231885, "percentage": 40.53, "elapsed_time": "4:12:39", "remaining_time": "6:10:42", "throughput": "999.25", "total_tokens": 15148359}
123
+ {"current_steps": 615, "total_steps": 1505, "loss": 0.0299, "learning_rate": 3.2077446794869295e-05, "epoch": 14.26086956521739, "percentage": 40.86, "elapsed_time": "4:14:57", "remaining_time": "6:08:58", "throughput": "998.89", "total_tokens": 15280749}
124
+ {"current_steps": 620, "total_steps": 1505, "loss": 0.0247, "learning_rate": 3.1826810592609036e-05, "epoch": 14.376811594202898, "percentage": 41.2, "elapsed_time": "4:16:54", "remaining_time": "6:06:42", "throughput": "998.88", "total_tokens": 15397167}
125
+ {"current_steps": 625, "total_steps": 1505, "loss": 0.0455, "learning_rate": 3.157543071965835e-05, "epoch": 14.492753623188406, "percentage": 41.53, "elapsed_time": "4:19:23", "remaining_time": "6:05:13", "throughput": "997.37", "total_tokens": 15522794}
126
+ {"current_steps": 630, "total_steps": 1505, "loss": 0.0262, "learning_rate": 3.132333455979202e-05, "epoch": 14.608695652173914, "percentage": 41.86, "elapsed_time": "4:21:11", "remaining_time": "6:02:46", "throughput": "997.84", "total_tokens": 15637987}
127
+ {"current_steps": 635, "total_steps": 1505, "loss": 0.0281, "learning_rate": 3.107054957481271e-05, "epoch": 14.72463768115942, "percentage": 42.19, "elapsed_time": "4:23:21", "remaining_time": "6:00:49", "throughput": "998.19", "total_tokens": 15773163}
128
+ {"current_steps": 640, "total_steps": 1505, "loss": 0.0294, "learning_rate": 3.081710330155942e-05, "epoch": 14.840579710144928, "percentage": 42.52, "elapsed_time": "4:25:14", "remaining_time": "5:58:29", "throughput": "998.63", "total_tokens": 15892659}
129
+ {"current_steps": 645, "total_steps": 1505, "loss": 0.0291, "learning_rate": 3.056302334890786e-05, "epoch": 14.956521739130435, "percentage": 42.86, "elapsed_time": "4:27:21", "remaining_time": "5:56:28", "throughput": "998.97", "total_tokens": 16024576}
130
+ {"current_steps": 650, "total_steps": 1505, "loss": 0.0216, "learning_rate": 3.030833739476285e-05, "epoch": 15.072463768115941, "percentage": 43.19, "elapsed_time": "4:29:27", "remaining_time": "5:54:27", "throughput": "999.01", "total_tokens": 16151987}
131
+ {"current_steps": 655, "total_steps": 1505, "loss": 0.0218, "learning_rate": 3.0053073183043256e-05, "epoch": 15.18840579710145, "percentage": 43.52, "elapsed_time": "4:31:54", "remaining_time": "5:52:51", "throughput": "997.79", "total_tokens": 16278639}
132
+ {"current_steps": 660, "total_steps": 1505, "loss": 0.0283, "learning_rate": 2.979725852065981e-05, "epoch": 15.304347826086957, "percentage": 43.85, "elapsed_time": "4:34:13", "remaining_time": "5:51:05", "throughput": "997.64", "total_tokens": 16414743}
133
+ {"current_steps": 665, "total_steps": 1505, "loss": 0.0259, "learning_rate": 2.954092127448591e-05, "epoch": 15.420289855072463, "percentage": 44.19, "elapsed_time": "4:36:02", "remaining_time": "5:48:41", "throughput": "997.98", "total_tokens": 16529298}
134
+ {"current_steps": 670, "total_steps": 1505, "loss": 0.0716, "learning_rate": 2.9284089368322045e-05, "epoch": 15.53623188405797, "percentage": 44.52, "elapsed_time": "4:38:05", "remaining_time": "5:46:34", "throughput": "998.25", "total_tokens": 16655909}
135
+ {"current_steps": 675, "total_steps": 1505, "loss": 0.025, "learning_rate": 2.9026790779853874e-05, "epoch": 15.652173913043478, "percentage": 44.85, "elapsed_time": "4:40:27", "remaining_time": "5:44:51", "throughput": "998.26", "total_tokens": 16798263}
136
+ {"current_steps": 680, "total_steps": 1505, "loss": 0.0218, "learning_rate": 2.876905353760459e-05, "epoch": 15.768115942028986, "percentage": 45.18, "elapsed_time": "4:42:22", "remaining_time": "5:42:35", "throughput": "998.47", "total_tokens": 16916827}
137
+ {"current_steps": 685, "total_steps": 1505, "loss": 0.0231, "learning_rate": 2.8510905717881614e-05, "epoch": 15.884057971014492, "percentage": 45.51, "elapsed_time": "4:44:20", "remaining_time": "5:40:23", "throughput": "998.80", "total_tokens": 17040247}
138
+ {"current_steps": 690, "total_steps": 1505, "loss": 0.0228, "learning_rate": 2.8252375441718137e-05, "epoch": 16.0, "percentage": 45.85, "elapsed_time": "4:45:58", "remaining_time": "5:37:46", "throughput": "999.30", "total_tokens": 17146160}
139
+ {"current_steps": 695, "total_steps": 1505, "loss": 0.029, "learning_rate": 2.7993490871809808e-05, "epoch": 16.115942028985508, "percentage": 46.18, "elapsed_time": "4:48:18", "remaining_time": "5:36:01", "throughput": "999.19", "total_tokens": 17284643}
140
+ {"current_steps": 700, "total_steps": 1505, "loss": 0.0199, "learning_rate": 2.7734280209446865e-05, "epoch": 16.231884057971016, "percentage": 46.51, "elapsed_time": "4:50:40", "remaining_time": "5:34:16", "throughput": "999.18", "total_tokens": 17426644}
141
+ {"current_steps": 705, "total_steps": 1505, "loss": 0.0259, "learning_rate": 2.7474771691442018e-05, "epoch": 16.347826086956523, "percentage": 46.84, "elapsed_time": "4:53:00", "remaining_time": "5:32:29", "throughput": "997.81", "total_tokens": 17541812}
142
+ {"current_steps": 710, "total_steps": 1505, "loss": 0.021, "learning_rate": 2.721499358705458e-05, "epoch": 16.463768115942027, "percentage": 47.18, "elapsed_time": "4:55:06", "remaining_time": "5:30:25", "throughput": "997.83", "total_tokens": 17667755}
143
+ {"current_steps": 715, "total_steps": 1505, "loss": 0.0199, "learning_rate": 2.6954974194910888e-05, "epoch": 16.579710144927535, "percentage": 47.51, "elapsed_time": "4:56:59", "remaining_time": "5:28:08", "throughput": "998.23", "total_tokens": 17788162}
144
+ {"current_steps": 720, "total_steps": 1505, "loss": 0.0189, "learning_rate": 2.6694741839921732e-05, "epoch": 16.695652173913043, "percentage": 47.84, "elapsed_time": "4:58:57", "remaining_time": "5:25:57", "throughput": "998.55", "total_tokens": 17911718}
145
+ {"current_steps": 725, "total_steps": 1505, "loss": 0.0169, "learning_rate": 2.6434324870196748e-05, "epoch": 16.81159420289855, "percentage": 48.17, "elapsed_time": "5:00:37", "remaining_time": "5:23:26", "throughput": "998.93", "total_tokens": 18018729}
146
+ {"current_steps": 730, "total_steps": 1505, "loss": 0.0209, "learning_rate": 2.617375165395634e-05, "epoch": 16.92753623188406, "percentage": 48.5, "elapsed_time": "5:02:31", "remaining_time": "5:21:10", "throughput": "999.34", "total_tokens": 18139681}
147
+ {"current_steps": 735, "total_steps": 1505, "loss": 0.0201, "learning_rate": 2.5913050576441477e-05, "epoch": 17.043478260869566, "percentage": 48.84, "elapsed_time": "5:04:46", "remaining_time": "5:19:16", "throughput": "999.59", "total_tokens": 18278544}
148
+ {"current_steps": 740, "total_steps": 1505, "loss": 0.017, "learning_rate": 2.5652250036821523e-05, "epoch": 17.159420289855074, "percentage": 49.17, "elapsed_time": "5:06:53", "remaining_time": "5:17:15", "throughput": "999.09", "total_tokens": 18396700}
149
+ {"current_steps": 745, "total_steps": 1505, "loss": 0.0187, "learning_rate": 2.5391378445100644e-05, "epoch": 17.27536231884058, "percentage": 49.5, "elapsed_time": "5:08:38", "remaining_time": "5:14:50", "throughput": "999.36", "total_tokens": 18506229}
150
+ {"current_steps": 750, "total_steps": 1505, "loss": 0.0242, "learning_rate": 2.5130464219022992e-05, "epoch": 17.391304347826086, "percentage": 49.83, "elapsed_time": "5:10:26", "remaining_time": "5:12:31", "throughput": "999.72", "total_tokens": 18621580}
151
+ {"current_steps": 755, "total_steps": 1505, "loss": 0.0153, "learning_rate": 2.486953578097702e-05, "epoch": 17.507246376811594, "percentage": 50.17, "elapsed_time": "5:12:27", "remaining_time": "5:10:23", "throughput": "1000.04", "total_tokens": 18748382}
152
+ {"current_steps": 760, "total_steps": 1505, "loss": 0.0182, "learning_rate": 2.4608621554899362e-05, "epoch": 17.6231884057971, "percentage": 50.5, "elapsed_time": "5:14:42", "remaining_time": "5:08:29", "throughput": "1000.12", "total_tokens": 18884730}
153
+ {"current_steps": 765, "total_steps": 1505, "loss": 0.0143, "learning_rate": 2.4347749963178486e-05, "epoch": 17.73913043478261, "percentage": 50.83, "elapsed_time": "5:16:37", "remaining_time": "5:06:16", "throughput": "1000.34", "total_tokens": 19003589}
154
+ {"current_steps": 770, "total_steps": 1505, "loss": 0.0164, "learning_rate": 2.4086949423558526e-05, "epoch": 17.855072463768117, "percentage": 51.16, "elapsed_time": "5:18:47", "remaining_time": "5:04:18", "throughput": "1000.46", "total_tokens": 19136411}
155
+ {"current_steps": 775, "total_steps": 1505, "loss": 0.0157, "learning_rate": 2.3826248346043663e-05, "epoch": 17.971014492753625, "percentage": 51.5, "elapsed_time": "5:20:52", "remaining_time": "5:02:14", "throughput": "1000.43", "total_tokens": 19260436}
156
+ {"current_steps": 780, "total_steps": 1505, "loss": 0.0304, "learning_rate": 2.356567512980326e-05, "epoch": 18.08695652173913, "percentage": 51.83, "elapsed_time": "5:23:02", "remaining_time": "5:00:15", "throughput": "1000.34", "total_tokens": 19388733}
157
+ {"current_steps": 785, "total_steps": 1505, "loss": 0.009, "learning_rate": 2.3305258160078274e-05, "epoch": 18.202898550724637, "percentage": 52.16, "elapsed_time": "5:25:22", "remaining_time": "4:58:25", "throughput": "1000.45", "total_tokens": 19531204}
158
+ {"current_steps": 790, "total_steps": 1505, "loss": 0.0105, "learning_rate": 2.3045025805089118e-05, "epoch": 18.318840579710145, "percentage": 52.49, "elapsed_time": "5:26:47", "remaining_time": "4:55:45", "throughput": "1000.89", "total_tokens": 19624608}
159
+ {"current_steps": 795, "total_steps": 1505, "loss": 0.0104, "learning_rate": 2.278500641294543e-05, "epoch": 18.434782608695652, "percentage": 52.82, "elapsed_time": "5:29:00", "remaining_time": "4:53:49", "throughput": "1000.55", "total_tokens": 19751062}
160
+ {"current_steps": 800, "total_steps": 1505, "loss": 0.0103, "learning_rate": 2.252522830855798e-05, "epoch": 18.55072463768116, "percentage": 53.16, "elapsed_time": "5:31:08", "remaining_time": "4:51:49", "throughput": "1000.55", "total_tokens": 19879837}
161
+ {"current_steps": 805, "total_steps": 1505, "loss": 0.0107, "learning_rate": 2.2265719790553147e-05, "epoch": 18.666666666666668, "percentage": 53.49, "elapsed_time": "5:33:50", "remaining_time": "4:50:17", "throughput": "999.47", "total_tokens": 20019385}
162
+ {"current_steps": 810, "total_steps": 1505, "loss": 0.0269, "learning_rate": 2.2006509128190195e-05, "epoch": 18.782608695652176, "percentage": 53.82, "elapsed_time": "5:35:48", "remaining_time": "4:48:07", "throughput": "999.49", "total_tokens": 20138003}
163
+ {"current_steps": 815, "total_steps": 1505, "loss": 0.0086, "learning_rate": 2.174762455828187e-05, "epoch": 18.89855072463768, "percentage": 54.15, "elapsed_time": "5:37:46", "remaining_time": "4:45:58", "throughput": "999.70", "total_tokens": 20260523}
164
+ {"current_steps": 820, "total_steps": 1505, "loss": 0.0133, "learning_rate": 2.1489094282118395e-05, "epoch": 19.014492753623188, "percentage": 54.49, "elapsed_time": "5:39:36", "remaining_time": "4:43:41", "throughput": "999.96", "total_tokens": 20375322}
165
+ {"current_steps": 825, "total_steps": 1505, "loss": 0.0114, "learning_rate": 2.123094646239541e-05, "epoch": 19.130434782608695, "percentage": 54.82, "elapsed_time": "5:41:09", "remaining_time": "4:41:11", "throughput": "1000.41", "total_tokens": 20477407}
166
+ {"current_steps": 830, "total_steps": 1505, "loss": 0.007, "learning_rate": 2.0973209220146135e-05, "epoch": 19.246376811594203, "percentage": 55.15, "elapsed_time": "5:43:12", "remaining_time": "4:39:06", "throughput": "1000.64", "total_tokens": 20605728}
167
+ {"current_steps": 835, "total_steps": 1505, "loss": 0.0088, "learning_rate": 2.0715910631677968e-05, "epoch": 19.36231884057971, "percentage": 55.48, "elapsed_time": "5:45:09", "remaining_time": "4:36:57", "throughput": "1000.77", "total_tokens": 20725799}
168
+ {"current_steps": 840, "total_steps": 1505, "loss": 0.007, "learning_rate": 2.0459078725514092e-05, "epoch": 19.47826086956522, "percentage": 55.81, "elapsed_time": "5:47:35", "remaining_time": "4:35:10", "throughput": "1000.49", "total_tokens": 20865534}
169
+ {"current_steps": 845, "total_steps": 1505, "loss": 0.0059, "learning_rate": 2.020274147934019e-05, "epoch": 19.594202898550726, "percentage": 56.15, "elapsed_time": "5:49:21", "remaining_time": "4:32:52", "throughput": "1000.78", "total_tokens": 20977913}
170
+ {"current_steps": 850, "total_steps": 1505, "loss": 0.0069, "learning_rate": 1.9946926816956743e-05, "epoch": 19.71014492753623, "percentage": 56.48, "elapsed_time": "5:51:21", "remaining_time": "4:30:45", "throughput": "1000.99", "total_tokens": 21102848}
171
+ {"current_steps": 855, "total_steps": 1505, "loss": 0.008, "learning_rate": 1.9691662605237166e-05, "epoch": 19.82608695652174, "percentage": 56.81, "elapsed_time": "5:53:40", "remaining_time": "4:28:52", "throughput": "1001.10", "total_tokens": 21243679}
172
+ {"current_steps": 860, "total_steps": 1505, "loss": 0.0127, "learning_rate": 1.9436976651092144e-05, "epoch": 19.942028985507246, "percentage": 57.14, "elapsed_time": "5:55:37", "remaining_time": "4:26:43", "throughput": "1001.23", "total_tokens": 21364202}
173
+ {"current_steps": 865, "total_steps": 1505, "loss": 0.0059, "learning_rate": 1.9182896698440584e-05, "epoch": 20.057971014492754, "percentage": 57.48, "elapsed_time": "5:57:53", "remaining_time": "4:24:47", "throughput": "1001.07", "total_tokens": 21496089}
174
+ {"current_steps": 870, "total_steps": 1505, "loss": 0.0046, "learning_rate": 1.89294504251873e-05, "epoch": 20.17391304347826, "percentage": 57.81, "elapsed_time": "5:59:40", "remaining_time": "4:22:30", "throughput": "1001.07", "total_tokens": 21603193}
175
+ {"current_steps": 875, "total_steps": 1505, "loss": 0.0058, "learning_rate": 1.867666544020798e-05, "epoch": 20.28985507246377, "percentage": 58.14, "elapsed_time": "6:01:57", "remaining_time": "4:20:36", "throughput": "1001.15", "total_tokens": 21742062}
176
+ {"current_steps": 880, "total_steps": 1505, "loss": 0.0082, "learning_rate": 1.8424569280341653e-05, "epoch": 20.405797101449274, "percentage": 58.47, "elapsed_time": "6:04:10", "remaining_time": "4:18:38", "throughput": "1000.86", "total_tokens": 21869307}
177
+ {"current_steps": 885, "total_steps": 1505, "loss": 0.0148, "learning_rate": 1.817318940739098e-05, "epoch": 20.52173913043478, "percentage": 58.8, "elapsed_time": "6:06:08", "remaining_time": "4:16:30", "throughput": "1001.09", "total_tokens": 21992573}
178
+ {"current_steps": 890, "total_steps": 1505, "loss": 0.0064, "learning_rate": 1.7922553205130707e-05, "epoch": 20.63768115942029, "percentage": 59.14, "elapsed_time": "6:07:50", "remaining_time": "4:14:10", "throughput": "1001.43", "total_tokens": 22101845}
179
+ {"current_steps": 895, "total_steps": 1505, "loss": 0.008, "learning_rate": 1.767268797632472e-05, "epoch": 20.753623188405797, "percentage": 59.47, "elapsed_time": "6:09:59", "remaining_time": "4:12:10", "throughput": "1001.37", "total_tokens": 22230253}
180
+ {"current_steps": 900, "total_steps": 1505, "loss": 0.0053, "learning_rate": 1.7423620939751788e-05, "epoch": 20.869565217391305, "percentage": 59.8, "elapsed_time": "6:12:22", "remaining_time": "4:10:19", "throughput": "1001.39", "total_tokens": 22373454}
181
+ {"current_steps": 905, "total_steps": 1505, "loss": 0.0054, "learning_rate": 1.7175379227240523e-05, "epoch": 20.985507246376812, "percentage": 60.13, "elapsed_time": "6:14:33", "remaining_time": "4:08:19", "throughput": "1000.86", "total_tokens": 22493123}
182
+ {"current_steps": 910, "total_steps": 1505, "loss": 0.0044, "learning_rate": 1.692798988071385e-05, "epoch": 21.10144927536232, "percentage": 60.47, "elapsed_time": "6:16:48", "remaining_time": "4:06:22", "throughput": "1000.89", "total_tokens": 22629005}
183
+ {"current_steps": 915, "total_steps": 1505, "loss": 0.0043, "learning_rate": 1.6681479849243153e-05, "epoch": 21.217391304347824, "percentage": 60.8, "elapsed_time": "6:18:57", "remaining_time": "4:04:21", "throughput": "1000.65", "total_tokens": 22752358}
184
+ {"current_steps": 920, "total_steps": 1505, "loss": 0.0035, "learning_rate": 1.6435875986112685e-05, "epoch": 21.333333333333332, "percentage": 61.13, "elapsed_time": "6:21:01", "remaining_time": "4:02:17", "throughput": "1000.81", "total_tokens": 22880349}
185
+ {"current_steps": 925, "total_steps": 1505, "loss": 0.0044, "learning_rate": 1.6191205045894283e-05, "epoch": 21.44927536231884, "percentage": 61.46, "elapsed_time": "6:22:39", "remaining_time": "3:59:56", "throughput": "1001.21", "total_tokens": 22987343}
186
+ {"current_steps": 930, "total_steps": 1505, "loss": 0.0178, "learning_rate": 1.594749368153292e-05, "epoch": 21.565217391304348, "percentage": 61.79, "elapsed_time": "6:24:39", "remaining_time": "3:57:49", "throughput": "1001.45", "total_tokens": 23113462}
187
+ {"current_steps": 935, "total_steps": 1505, "loss": 0.0089, "learning_rate": 1.570476844144329e-05, "epoch": 21.681159420289855, "percentage": 62.13, "elapsed_time": "6:26:20", "remaining_time": "3:55:31", "throughput": "1001.76", "total_tokens": 23221714}
188
+ {"current_steps": 940, "total_steps": 1505, "loss": 0.004, "learning_rate": 1.546305576661776e-05, "epoch": 21.797101449275363, "percentage": 62.46, "elapsed_time": "6:29:03", "remaining_time": "3:53:50", "throughput": "1001.09", "total_tokens": 23368857}
189
+ {"current_steps": 945, "total_steps": 1505, "loss": 0.004, "learning_rate": 1.5222381987746104e-05, "epoch": 21.91304347826087, "percentage": 62.79, "elapsed_time": "6:31:12", "remaining_time": "3:51:49", "throughput": "1000.93", "total_tokens": 23494483}
190
+ {"current_steps": 950, "total_steps": 1505, "loss": 0.0034, "learning_rate": 1.4982773322347144e-05, "epoch": 22.028985507246375, "percentage": 63.12, "elapsed_time": "6:33:01", "remaining_time": "3:49:36", "throughput": "1001.01", "total_tokens": 23605463}
191
+ {"current_steps": 955, "total_steps": 1505, "loss": 0.0066, "learning_rate": 1.4744255871912823e-05, "epoch": 22.144927536231883, "percentage": 63.46, "elapsed_time": "6:34:42", "remaining_time": "3:47:19", "throughput": "1001.40", "total_tokens": 23715776}
192
+ {"current_steps": 960, "total_steps": 1505, "loss": 0.0034, "learning_rate": 1.4506855619064846e-05, "epoch": 22.26086956521739, "percentage": 63.79, "elapsed_time": "6:36:44", "remaining_time": "3:45:14", "throughput": "1001.54", "total_tokens": 23841669}
193
+ {"current_steps": 965, "total_steps": 1505, "loss": 0.0032, "learning_rate": 1.4270598424724292e-05, "epoch": 22.3768115942029, "percentage": 64.12, "elapsed_time": "6:38:36", "remaining_time": "3:43:03", "throughput": "1001.84", "total_tokens": 23960567}
194
+ {"current_steps": 970, "total_steps": 1505, "loss": 0.0124, "learning_rate": 1.4035510025294462e-05, "epoch": 22.492753623188406, "percentage": 64.45, "elapsed_time": "6:40:23", "remaining_time": "3:40:50", "throughput": "1002.12", "total_tokens": 24074628}
195
+ {"current_steps": 975, "total_steps": 1505, "loss": 0.0027, "learning_rate": 1.3801616029857378e-05, "epoch": 22.608695652173914, "percentage": 64.78, "elapsed_time": "6:42:53", "remaining_time": "3:39:00", "throughput": "1001.67", "total_tokens": 24214324}
196
+ {"current_steps": 980, "total_steps": 1505, "loss": 0.0037, "learning_rate": 1.3568941917384036e-05, "epoch": 22.72463768115942, "percentage": 65.12, "elapsed_time": "6:44:40", "remaining_time": "3:36:47", "throughput": "1001.90", "total_tokens": 24326727}
197
+ {"current_steps": 985, "total_steps": 1505, "loss": 0.0029, "learning_rate": 1.3337513033958904e-05, "epoch": 22.840579710144926, "percentage": 65.45, "elapsed_time": "6:46:51", "remaining_time": "3:34:47", "throughput": "1001.88", "total_tokens": 24456961}
198
+ {"current_steps": 990, "total_steps": 1505, "loss": 0.0035, "learning_rate": 1.310735459001884e-05, "epoch": 22.956521739130434, "percentage": 65.78, "elapsed_time": "6:49:42", "remaining_time": "3:33:07", "throughput": "1001.00", "total_tokens": 24606652}
199
+ {"current_steps": 995, "total_steps": 1505, "loss": 0.002, "learning_rate": 1.2878491657606872e-05, "epoch": 23.07246376811594, "percentage": 66.11, "elapsed_time": "6:51:17", "remaining_time": "3:30:48", "throughput": "1001.32", "total_tokens": 24710410}
200
+ {"current_steps": 1000, "total_steps": 1505, "loss": 0.0023, "learning_rate": 1.2650949167640993e-05, "epoch": 23.18840579710145, "percentage": 66.45, "elapsed_time": "6:53:12", "remaining_time": "3:28:40", "throughput": "1001.57", "total_tokens": 24831908}
201
+ {"current_steps": 1005, "total_steps": 1505, "loss": 0.0031, "learning_rate": 1.2424751907198312e-05, "epoch": 23.304347826086957, "percentage": 66.78, "elapsed_time": "6:55:34", "remaining_time": "3:26:45", "throughput": "1000.67", "total_tokens": 24951342}
202
+ {"current_steps": 1010, "total_steps": 1505, "loss": 0.0027, "learning_rate": 1.2199924516814939e-05, "epoch": 23.420289855072465, "percentage": 67.11, "elapsed_time": "6:57:55", "remaining_time": "3:24:49", "throughput": "1000.50", "total_tokens": 25088309}
203
+ {"current_steps": 1015, "total_steps": 1505, "loss": 0.0124, "learning_rate": 1.1976491487801748e-05, "epoch": 23.536231884057973, "percentage": 67.44, "elapsed_time": "6:59:58", "remaining_time": "3:22:44", "throughput": "1000.70", "total_tokens": 25216080}
204
+ {"current_steps": 1020, "total_steps": 1505, "loss": 0.0023, "learning_rate": 1.1754477159576499e-05, "epoch": 23.652173913043477, "percentage": 67.77, "elapsed_time": "7:01:40", "remaining_time": "3:20:30", "throughput": "1001.02", "total_tokens": 25326581}
205
+ {"current_steps": 1025, "total_steps": 1505, "loss": 0.0027, "learning_rate": 1.1533905717012428e-05, "epoch": 23.768115942028984, "percentage": 68.11, "elapsed_time": "7:04:12", "remaining_time": "3:18:39", "throughput": "1000.96", "total_tokens": 25477500}
206
+ {"current_steps": 1030, "total_steps": 1505, "loss": 0.0041, "learning_rate": 1.1314801187803686e-05, "epoch": 23.884057971014492, "percentage": 68.44, "elapsed_time": "7:06:11", "remaining_time": "3:16:32", "throughput": "1001.18", "total_tokens": 25601354}
207
+ {"current_steps": 1035, "total_steps": 1505, "loss": 0.0021, "learning_rate": 1.1097187439847939e-05, "epoch": 24.0, "percentage": 68.77, "elapsed_time": "7:08:03", "remaining_time": "3:14:23", "throughput": "1001.39", "total_tokens": 25719240}
208
+ {"current_steps": 1040, "total_steps": 1505, "loss": 0.0039, "learning_rate": 1.088108817864629e-05, "epoch": 24.115942028985508, "percentage": 69.1, "elapsed_time": "7:09:52", "remaining_time": "3:12:12", "throughput": "1001.66", "total_tokens": 25834910}
209
+ {"current_steps": 1045, "total_steps": 1505, "loss": 0.0025, "learning_rate": 1.0666526944721016e-05, "epoch": 24.231884057971016, "percentage": 69.44, "elapsed_time": "7:12:13", "remaining_time": "3:10:15", "throughput": "1001.60", "total_tokens": 25974530}
210
+ {"current_steps": 1050, "total_steps": 1505, "loss": 0.002, "learning_rate": 1.0453527111051184e-05, "epoch": 24.347826086956523, "percentage": 69.77, "elapsed_time": "7:14:17", "remaining_time": "3:08:11", "throughput": "1001.79", "total_tokens": 26104464}
211
+ {"current_steps": 1055, "total_steps": 1505, "loss": 0.0024, "learning_rate": 1.0242111880526495e-05, "epoch": 24.463768115942027, "percentage": 70.1, "elapsed_time": "7:16:46", "remaining_time": "3:06:18", "throughput": "1001.71", "total_tokens": 26251334}
212
+ {"current_steps": 1060, "total_steps": 1505, "loss": 0.0031, "learning_rate": 1.003230428341979e-05, "epoch": 24.579710144927535, "percentage": 70.43, "elapsed_time": "7:18:35", "remaining_time": "3:04:07", "throughput": "1001.93", "total_tokens": 26366561}
213
+ {"current_steps": 1065, "total_steps": 1505, "loss": 0.0022, "learning_rate": 9.824127174878195e-06, "epoch": 24.695652173913043, "percentage": 70.76, "elapsed_time": "7:20:30", "remaining_time": "3:01:59", "throughput": "1002.12", "total_tokens": 26486437}
214
+ {"current_steps": 1070, "total_steps": 1505, "loss": 0.0022, "learning_rate": 9.617603232433475e-06, "epoch": 24.81159420289855, "percentage": 71.1, "elapsed_time": "7:22:16", "remaining_time": "2:59:48", "throughput": "1002.46", "total_tokens": 26601526}
215
+ {"current_steps": 1075, "total_steps": 1505, "loss": 0.0109, "learning_rate": 9.412754953531663e-06, "epoch": 24.92753623188406, "percentage": 71.43, "elapsed_time": "7:24:45", "remaining_time": "2:57:54", "throughput": "1001.58", "total_tokens": 26727922}
216
+ {"current_steps": 1080, "total_steps": 1505, "loss": 0.0019, "learning_rate": 9.209604653082326e-06, "epoch": 25.043478260869566, "percentage": 71.76, "elapsed_time": "7:26:27", "remaining_time": "2:55:41", "throughput": "1001.78", "total_tokens": 26835621}
217
+ {"current_steps": 1085, "total_steps": 1505, "loss": 0.0016, "learning_rate": 9.008174461027724e-06, "epoch": 25.159420289855074, "percentage": 72.09, "elapsed_time": "7:28:21", "remaining_time": "2:53:33", "throughput": "1002.01", "total_tokens": 26955101}
218
+ {"current_steps": 1090, "total_steps": 1505, "loss": 0.002, "learning_rate": 8.808486319932083e-06, "epoch": 25.27536231884058, "percentage": 72.43, "elapsed_time": "7:30:19", "remaining_time": "2:51:27", "throughput": "1002.15", "total_tokens": 27077833}
219
+ {"current_steps": 1095, "total_steps": 1505, "loss": 0.0018, "learning_rate": 8.610561982591357e-06, "epoch": 25.391304347826086, "percentage": 72.76, "elapsed_time": "7:32:08", "remaining_time": "2:49:17", "throughput": "1002.35", "total_tokens": 27192758}
220
+ {"current_steps": 1100, "total_steps": 1505, "loss": 0.0028, "learning_rate": 8.414423009663563e-06, "epoch": 25.507246376811594, "percentage": 73.09, "elapsed_time": "7:34:18", "remaining_time": "2:47:16", "throughput": "1002.44", "total_tokens": 27324970}
221
+ {"current_steps": 1105, "total_steps": 1505, "loss": 0.0021, "learning_rate": 8.220090767320137e-06, "epoch": 25.6231884057971, "percentage": 73.42, "elapsed_time": "7:38:48", "remaining_time": "2:46:05", "throughput": "998.15", "total_tokens": 27477531}
222
+ {"current_steps": 1110, "total_steps": 1505, "loss": 0.0057, "learning_rate": 8.027586424918412e-06, "epoch": 25.73913043478261, "percentage": 73.75, "elapsed_time": "7:40:36", "remaining_time": "2:43:54", "throughput": "998.39", "total_tokens": 27592035}
223
+ {"current_steps": 1115, "total_steps": 1505, "loss": 0.0067, "learning_rate": 7.836930952695533e-06, "epoch": 25.855072463768117, "percentage": 74.09, "elapsed_time": "7:42:31", "remaining_time": "2:41:46", "throughput": "998.59", "total_tokens": 27712377}
224
+ {"current_steps": 1120, "total_steps": 1505, "loss": 0.002, "learning_rate": 7.648145119484153e-06, "epoch": 25.971014492753625, "percentage": 74.42, "elapsed_time": "7:44:31", "remaining_time": "2:39:40", "throughput": "998.67", "total_tokens": 27834613}
225
+ {"current_steps": 1125, "total_steps": 1505, "loss": 0.0021, "learning_rate": 7.461249490449954e-06, "epoch": 26.08695652173913, "percentage": 74.75, "elapsed_time": "7:46:42", "remaining_time": "2:37:38", "throughput": "998.73", "total_tokens": 27966996}
226
+ {"current_steps": 1130, "total_steps": 1505, "loss": 0.002, "learning_rate": 7.276264424851423e-06, "epoch": 26.202898550724637, "percentage": 75.08, "elapsed_time": "7:48:44", "remaining_time": "2:35:33", "throughput": "998.92", "total_tokens": 28093538}
227
+ {"current_steps": 1135, "total_steps": 1505, "loss": 0.0017, "learning_rate": 7.0932100738220265e-06, "epoch": 26.318840579710145, "percentage": 75.42, "elapsed_time": "7:50:47", "remaining_time": "2:33:28", "throughput": "998.88", "total_tokens": 28215579}
228
+ {"current_steps": 1140, "total_steps": 1505, "loss": 0.0014, "learning_rate": 6.912106378175098e-06, "epoch": 26.434782608695652, "percentage": 75.75, "elapsed_time": "7:52:59", "remaining_time": "2:31:26", "throughput": "998.77", "total_tokens": 28344144}
229
+ {"current_steps": 1145, "total_steps": 1505, "loss": 0.0022, "learning_rate": 6.732973066231563e-06, "epoch": 26.55072463768116, "percentage": 76.08, "elapsed_time": "7:55:12", "remaining_time": "2:29:24", "throughput": "998.82", "total_tokens": 28478650}
230
+ {"current_steps": 1150, "total_steps": 1505, "loss": 0.0023, "learning_rate": 6.555829651670911e-06, "epoch": 26.666666666666668, "percentage": 76.41, "elapsed_time": "7:57:16", "remaining_time": "2:27:19", "throughput": "998.48", "total_tokens": 28593004}
231
+ {"current_steps": 1155, "total_steps": 1505, "loss": 0.0028, "learning_rate": 6.380695431405456e-06, "epoch": 26.782608695652176, "percentage": 76.74, "elapsed_time": "7:59:05", "remaining_time": "2:25:10", "throughput": "998.66", "total_tokens": 28707392}
232
+ {"current_steps": 1160, "total_steps": 1505, "loss": 0.006, "learning_rate": 6.207589483478266e-06, "epoch": 26.89855072463768, "percentage": 77.08, "elapsed_time": "8:01:12", "remaining_time": "2:23:07", "throughput": "998.69", "total_tokens": 28834902}
233
+ {"current_steps": 1165, "total_steps": 1505, "loss": 0.0045, "learning_rate": 6.0365306649849214e-06, "epoch": 27.014492753623188, "percentage": 77.41, "elapsed_time": "8:02:58", "remaining_time": "2:20:57", "throughput": "998.96", "total_tokens": 28948812}
234
+ {"current_steps": 1170, "total_steps": 1505, "loss": 0.0019, "learning_rate": 5.867537610019317e-06, "epoch": 27.130434782608695, "percentage": 77.74, "elapsed_time": "8:05:02", "remaining_time": "2:18:52", "throughput": "999.15", "total_tokens": 29078309}
235
+ {"current_steps": 1175, "total_steps": 1505, "loss": 0.002, "learning_rate": 5.700628727643806e-06, "epoch": 27.246376811594203, "percentage": 78.07, "elapsed_time": "8:07:18", "remaining_time": "2:16:51", "throughput": "999.08", "total_tokens": 29211503}
236
+ {"current_steps": 1180, "total_steps": 1505, "loss": 0.0019, "learning_rate": 5.53582219988382e-06, "epoch": 27.36231884057971, "percentage": 78.41, "elapsed_time": "8:09:40", "remaining_time": "2:14:52", "throughput": "998.78", "total_tokens": 29344489}
237
+ {"current_steps": 1185, "total_steps": 1505, "loss": 0.006, "learning_rate": 5.373135979747227e-06, "epoch": 27.47826086956522, "percentage": 78.74, "elapsed_time": "8:12:02", "remaining_time": "2:12:52", "throughput": "998.02", "total_tokens": 29464082}
238
+ {"current_steps": 1190, "total_steps": 1505, "loss": 0.0043, "learning_rate": 5.2125877892686496e-06, "epoch": 27.594202898550726, "percentage": 79.07, "elapsed_time": "8:13:55", "remaining_time": "2:10:44", "throughput": "998.17", "total_tokens": 29581124}
239
+ {"current_steps": 1195, "total_steps": 1505, "loss": 0.0019, "learning_rate": 5.054195117578914e-06, "epoch": 27.71014492753623, "percentage": 79.4, "elapsed_time": "8:15:43", "remaining_time": "2:08:35", "throughput": "998.41", "total_tokens": 29696346}
240
+ {"current_steps": 1200, "total_steps": 1505, "loss": 0.002, "learning_rate": 4.897975218999926e-06, "epoch": 27.82608695652174, "percentage": 79.73, "elapsed_time": "8:17:35", "remaining_time": "2:06:28", "throughput": "998.66", "total_tokens": 29815117}
241
+ {"current_steps": 1205, "total_steps": 1505, "loss": 0.0022, "learning_rate": 4.743945111165068e-06, "epoch": 27.942028985507246, "percentage": 80.07, "elapsed_time": "8:19:56", "remaining_time": "2:04:27", "throughput": "998.10", "total_tokens": 29939175}
242
+ {"current_steps": 1210, "total_steps": 1505, "loss": 0.0016, "learning_rate": 4.592121573165414e-06, "epoch": 28.057971014492754, "percentage": 80.4, "elapsed_time": "8:22:34", "remaining_time": "2:02:31", "throughput": "997.51", "total_tokens": 30079840}
243
+ {"current_steps": 1215, "total_steps": 1505, "loss": 0.0033, "learning_rate": 4.442521143721892e-06, "epoch": 28.17391304347826, "percentage": 80.73, "elapsed_time": "8:24:23", "remaining_time": "2:00:23", "throughput": "997.65", "total_tokens": 30192219}
244
+ {"current_steps": 1220, "total_steps": 1505, "loss": 0.0018, "learning_rate": 4.295160119383712e-06, "epoch": 28.28985507246377, "percentage": 81.06, "elapsed_time": "8:26:48", "remaining_time": "1:58:23", "throughput": "997.44", "total_tokens": 30330969}
245
+ {"current_steps": 1225, "total_steps": 1505, "loss": 0.0018, "learning_rate": 4.150054552753055e-06, "epoch": 28.405797101449274, "percentage": 81.4, "elapsed_time": "8:28:52", "remaining_time": "1:56:18", "throughput": "997.40", "total_tokens": 30453302}
246
+ {"current_steps": 1230, "total_steps": 1505, "loss": 0.0078, "learning_rate": 4.007220250736454e-06, "epoch": 28.52173913043478, "percentage": 81.73, "elapsed_time": "8:30:43", "remaining_time": "1:54:11", "throughput": "997.56", "total_tokens": 30568943}
247
+ {"current_steps": 1235, "total_steps": 1505, "loss": 0.0019, "learning_rate": 3.866672772822863e-06, "epoch": 28.63768115942029, "percentage": 82.06, "elapsed_time": "8:32:59", "remaining_time": "1:52:09", "throughput": "997.29", "total_tokens": 30696057}
248
+ {"current_steps": 1240, "total_steps": 1505, "loss": 0.0019, "learning_rate": 3.7284274293887115e-06, "epoch": 28.753623188405797, "percentage": 82.39, "elapsed_time": "8:35:23", "remaining_time": "1:50:08", "throughput": "996.51", "total_tokens": 30815506}
249
+ {"current_steps": 1245, "total_steps": 1505, "loss": 0.0027, "learning_rate": 3.592499280030057e-06, "epoch": 28.869565217391305, "percentage": 82.72, "elapsed_time": "8:36:55", "remaining_time": "1:47:57", "throughput": "996.81", "total_tokens": 30916446}
250
+ {"current_steps": 1250, "total_steps": 1505, "loss": 0.0023, "learning_rate": 3.458903131922134e-06, "epoch": 28.985507246376812, "percentage": 83.06, "elapsed_time": "8:39:08", "remaining_time": "1:45:54", "throughput": "996.97", "total_tokens": 31054242}
251
+ {"current_steps": 1255, "total_steps": 1505, "loss": 0.0029, "learning_rate": 3.3276535382063213e-06, "epoch": 29.10144927536232, "percentage": 83.39, "elapsed_time": "8:41:22", "remaining_time": "1:43:51", "throughput": "997.00", "total_tokens": 31189078}
252
+ {"current_steps": 1260, "total_steps": 1505, "loss": 0.0018, "learning_rate": 3.198764796404807e-06, "epoch": 29.217391304347824, "percentage": 83.72, "elapsed_time": "8:43:21", "remaining_time": "1:41:45", "throughput": "997.14", "total_tokens": 31311374}
253
+ {"current_steps": 1265, "total_steps": 1505, "loss": 0.0018, "learning_rate": 3.0722509468631392e-06, "epoch": 29.333333333333332, "percentage": 84.05, "elapsed_time": "8:45:30", "remaining_time": "1:39:42", "throughput": "997.27", "total_tokens": 31444681}
254
+ {"current_steps": 1270, "total_steps": 1505, "loss": 0.0018, "learning_rate": 2.948125771220697e-06, "epoch": 29.44927536231884, "percentage": 84.39, "elapsed_time": "8:47:28", "remaining_time": "1:37:36", "throughput": "997.43", "total_tokens": 31567569}
255
+ {"current_steps": 1275, "total_steps": 1505, "loss": 0.0019, "learning_rate": 2.8264027909094715e-06, "epoch": 29.565217391304348, "percentage": 84.72, "elapsed_time": "8:49:51", "remaining_time": "1:35:34", "throughput": "997.05", "total_tokens": 31697338}
256
+ {"current_steps": 1280, "total_steps": 1505, "loss": 0.0018, "learning_rate": 2.707095265681081e-06, "epoch": 29.681159420289855, "percentage": 85.05, "elapsed_time": "8:51:55", "remaining_time": "1:33:30", "throughput": "997.23", "total_tokens": 31826661}
257
+ {"current_steps": 1285, "total_steps": 1505, "loss": 0.0023, "learning_rate": 2.5902161921623454e-06, "epoch": 29.797101449275363, "percentage": 85.38, "elapsed_time": "8:53:53", "remaining_time": "1:31:24", "throughput": "997.24", "total_tokens": 31944680}
258
+ {"current_steps": 1290, "total_steps": 1505, "loss": 0.0078, "learning_rate": 2.475778302439524e-06, "epoch": 29.91304347826087, "percentage": 85.71, "elapsed_time": "8:55:51", "remaining_time": "1:29:18", "throughput": "997.37", "total_tokens": 32067106}
259
+ {"current_steps": 1295, "total_steps": 1505, "loss": 0.0018, "learning_rate": 2.3637940626713346e-06, "epoch": 30.028985507246375, "percentage": 86.05, "elapsed_time": "8:57:49", "remaining_time": "1:27:12", "throughput": "997.36", "total_tokens": 32184526}
260
+ {"current_steps": 1300, "total_steps": 1505, "loss": 0.0017, "learning_rate": 2.254275671731007e-06, "epoch": 30.144927536231883, "percentage": 86.38, "elapsed_time": "8:59:48", "remaining_time": "1:25:07", "throughput": "997.56", "total_tokens": 32309423}
261
+ {"current_steps": 1305, "total_steps": 1505, "loss": 0.0071, "learning_rate": 2.14723505987737e-06, "epoch": 30.26086956521739, "percentage": 86.71, "elapsed_time": "9:02:06", "remaining_time": "1:23:04", "throughput": "997.03", "total_tokens": 32429445}
262
+ {"current_steps": 1310, "total_steps": 1505, "loss": 0.0016, "learning_rate": 2.0426838874552714e-06, "epoch": 30.3768115942029, "percentage": 87.04, "elapsed_time": "9:03:48", "remaining_time": "1:20:56", "throughput": "997.31", "total_tokens": 32540571}
263
+ {"current_steps": 1315, "total_steps": 1505, "loss": 0.0018, "learning_rate": 1.9406335436253724e-06, "epoch": 30.492753623188406, "percentage": 87.38, "elapsed_time": "9:05:50", "remaining_time": "1:18:52", "throughput": "997.40", "total_tokens": 32665528}
264
+ {"current_steps": 1320, "total_steps": 1505, "loss": 0.0017, "learning_rate": 1.8410951451234533e-06, "epoch": 30.608695652173914, "percentage": 87.71, "elapsed_time": "9:08:19", "remaining_time": "1:16:50", "throughput": "997.00", "total_tokens": 32800773}
265
+ {"current_steps": 1325, "total_steps": 1505, "loss": 0.0017, "learning_rate": 1.7440795350494588e-06, "epoch": 30.72463768115942, "percentage": 88.04, "elapsed_time": "9:10:29", "remaining_time": "1:14:47", "throughput": "996.94", "total_tokens": 32928397}
266
+ {"current_steps": 1330, "total_steps": 1505, "loss": 0.0019, "learning_rate": 1.649597281686302e-06, "epoch": 30.840579710144926, "percentage": 88.37, "elapsed_time": "9:12:31", "remaining_time": "1:12:41", "throughput": "997.09", "total_tokens": 33054819}
267
+ {"current_steps": 1335, "total_steps": 1505, "loss": 0.0018, "learning_rate": 1.5576586773486195e-06, "epoch": 30.956521739130434, "percentage": 88.7, "elapsed_time": "9:14:42", "remaining_time": "1:10:38", "throughput": "996.94", "total_tokens": 33180616}
268
+ {"current_steps": 1340, "total_steps": 1505, "loss": 0.0038, "learning_rate": 1.4682737372615967e-06, "epoch": 31.07246376811594, "percentage": 89.04, "elapsed_time": "9:16:33", "remaining_time": "1:08:31", "throughput": "997.13", "total_tokens": 33298041}
269
+ {"current_steps": 1345, "total_steps": 1505, "loss": 0.0052, "learning_rate": 1.3814521984699596e-06, "epoch": 31.18840579710145, "percentage": 89.37, "elapsed_time": "9:18:18", "remaining_time": "1:06:24", "throughput": "997.30", "total_tokens": 33408343}
270
+ {"current_steps": 1350, "total_steps": 1505, "loss": 0.0018, "learning_rate": 1.297203518777293e-06, "epoch": 31.304347826086957, "percentage": 89.7, "elapsed_time": "9:20:36", "remaining_time": "1:04:21", "throughput": "997.30", "total_tokens": 33545364}
271
+ {"current_steps": 1355, "total_steps": 1505, "loss": 0.0019, "learning_rate": 1.2155368757157643e-06, "epoch": 31.420289855072465, "percentage": 90.03, "elapsed_time": "9:22:15", "remaining_time": "1:02:14", "throughput": "997.55", "total_tokens": 33652900}
272
+ {"current_steps": 1360, "total_steps": 1505, "loss": 0.0019, "learning_rate": 1.1364611655463736e-06, "epoch": 31.536231884057973, "percentage": 90.37, "elapsed_time": "9:24:02", "remaining_time": "1:00:08", "throughput": "997.81", "total_tokens": 33768791}
273
+ {"current_steps": 1365, "total_steps": 1505, "loss": 0.0017, "learning_rate": 1.0599850022898539e-06, "epoch": 31.652173913043477, "percentage": 90.7, "elapsed_time": "9:26:18", "remaining_time": "0:58:04", "throughput": "997.48", "total_tokens": 33892837}
274
+ {"current_steps": 1370, "total_steps": 1505, "loss": 0.0022, "learning_rate": 9.861167167883046e-07, "epoch": 31.768115942028984, "percentage": 91.03, "elapsed_time": "9:28:18", "remaining_time": "0:56:00", "throughput": "997.57", "total_tokens": 34015288}
275
+ {"current_steps": 1375, "total_steps": 1505, "loss": 0.0037, "learning_rate": 9.148643557976955e-07, "epoch": 31.884057971014492, "percentage": 91.36, "elapsed_time": "9:30:49", "remaining_time": "0:53:58", "throughput": "997.25", "total_tokens": 34154884}
276
+ {"current_steps": 1380, "total_steps": 1505, "loss": 0.0019, "learning_rate": 8.462356811112987e-07, "epoch": 32.0, "percentage": 91.69, "elapsed_time": "9:33:07", "remaining_time": "0:51:54", "throughput": "997.23", "total_tokens": 34292320}
277
+ {"current_steps": 1385, "total_steps": 1505, "loss": 0.0017, "learning_rate": 7.802381687141535e-07, "epoch": 32.11594202898551, "percentage": 92.03, "elapsed_time": "9:35:06", "remaining_time": "0:49:49", "throughput": "997.32", "total_tokens": 34413850}
278
+ {"current_steps": 1390, "total_steps": 1505, "loss": 0.0018, "learning_rate": 7.168790079686932e-07, "epoch": 32.231884057971016, "percentage": 92.36, "elapsed_time": "9:37:22", "remaining_time": "0:47:46", "throughput": "997.25", "total_tokens": 34547127}
279
+ {"current_steps": 1395, "total_steps": 1505, "loss": 0.0035, "learning_rate": 6.561651008315738e-07, "epoch": 32.34782608695652, "percentage": 92.69, "elapsed_time": "9:39:51", "remaining_time": "0:45:43", "throughput": "996.95", "total_tokens": 34685112}
280
+ {"current_steps": 1400, "total_steps": 1505, "loss": 0.0063, "learning_rate": 5.981030611018234e-07, "epoch": 32.46376811594203, "percentage": 93.02, "elapsed_time": "9:41:58", "remaining_time": "0:43:38", "throughput": "996.92", "total_tokens": 34810484}
281
+ {"current_steps": 1405, "total_steps": 1505, "loss": 0.0018, "learning_rate": 5.426992137003622e-07, "epoch": 32.57971014492754, "percentage": 93.36, "elapsed_time": "9:43:53", "remaining_time": "0:41:33", "throughput": "996.79", "total_tokens": 34920531}
282
+ {"current_steps": 1410, "total_steps": 1505, "loss": 0.002, "learning_rate": 4.899595939810236e-07, "epoch": 32.69565217391305, "percentage": 93.69, "elapsed_time": "9:45:40", "remaining_time": "0:39:27", "throughput": "997.02", "total_tokens": 35035657}
283
+ {"current_steps": 1415, "total_steps": 1505, "loss": 0.0017, "learning_rate": 4.398899470730827e-07, "epoch": 32.81159420289855, "percentage": 94.02, "elapsed_time": "9:47:49", "remaining_time": "0:37:23", "throughput": "997.12", "total_tokens": 35167466}
284
+ {"current_steps": 1420, "total_steps": 1505, "loss": 0.0016, "learning_rate": 3.9249572725543196e-07, "epoch": 32.927536231884055, "percentage": 94.35, "elapsed_time": "9:49:57", "remaining_time": "0:35:18", "throughput": "997.16", "total_tokens": 35296818}
285
+ {"current_steps": 1425, "total_steps": 1505, "loss": 0.0015, "learning_rate": 3.477820973624063e-07, "epoch": 33.04347826086956, "percentage": 94.68, "elapsed_time": "9:52:14", "remaining_time": "0:33:14", "throughput": "997.07", "total_tokens": 35430399}
286
+ {"current_steps": 1430, "total_steps": 1505, "loss": 0.0057, "learning_rate": 3.0575392822139726e-07, "epoch": 33.15942028985507, "percentage": 95.02, "elapsed_time": "9:54:09", "remaining_time": "0:31:09", "throughput": "997.25", "total_tokens": 35551540}
287
+ {"current_steps": 1435, "total_steps": 1505, "loss": 0.0016, "learning_rate": 2.664157981222437e-07, "epoch": 33.27536231884058, "percentage": 95.35, "elapsed_time": "9:56:30", "remaining_time": "0:29:05", "throughput": "996.81", "total_tokens": 35676077}
288
+ {"current_steps": 1440, "total_steps": 1505, "loss": 0.0016, "learning_rate": 2.297719923185032e-07, "epoch": 33.391304347826086, "percentage": 95.68, "elapsed_time": "9:58:09", "remaining_time": "0:27:00", "throughput": "997.10", "total_tokens": 35785127}
289
+ {"current_steps": 1445, "total_steps": 1505, "loss": 0.0019, "learning_rate": 1.9582650256064205e-07, "epoch": 33.507246376811594, "percentage": 96.01, "elapsed_time": "10:00:15", "remaining_time": "0:24:55", "throughput": "997.13", "total_tokens": 35911682}
290
+ {"current_steps": 1450, "total_steps": 1505, "loss": 0.0017, "learning_rate": 1.645830266611914e-07, "epoch": 33.6231884057971, "percentage": 96.35, "elapsed_time": "10:02:13", "remaining_time": "0:22:50", "throughput": "997.16", "total_tokens": 36030754}
291
+ {"current_steps": 1455, "total_steps": 1505, "loss": 0.0042, "learning_rate": 1.3604496809195288e-07, "epoch": 33.73913043478261, "percentage": 96.68, "elapsed_time": "10:04:04", "remaining_time": "0:20:45", "throughput": "997.29", "total_tokens": 36146749}
292
+ {"current_steps": 1460, "total_steps": 1505, "loss": 0.0017, "learning_rate": 1.1021543561322012e-07, "epoch": 33.85507246376812, "percentage": 97.01, "elapsed_time": "10:06:10", "remaining_time": "0:18:41", "throughput": "997.47", "total_tokens": 36278454}
293
+ {"current_steps": 1465, "total_steps": 1505, "loss": 0.0017, "learning_rate": 8.709724293513854e-08, "epoch": 33.971014492753625, "percentage": 97.34, "elapsed_time": "10:08:27", "remaining_time": "0:16:36", "throughput": "997.29", "total_tokens": 36408834}
294
+ {"current_steps": 1470, "total_steps": 1505, "loss": 0.0015, "learning_rate": 6.66929084112089e-08, "epoch": 34.08695652173913, "percentage": 97.67, "elapsed_time": "10:10:58", "remaining_time": "0:14:32", "throughput": "997.05", "total_tokens": 36550538}
295
+ {"current_steps": 1475, "total_steps": 1505, "loss": 0.0018, "learning_rate": 4.900465476393168e-08, "epoch": 34.20289855072464, "percentage": 98.01, "elapsed_time": "10:12:25", "remaining_time": "0:12:27", "throughput": "997.33", "total_tokens": 36647436}
296
+ {"current_steps": 1480, "total_steps": 1505, "loss": 0.0024, "learning_rate": 3.403440884269526e-08, "epoch": 34.31884057971015, "percentage": 98.34, "elapsed_time": "10:14:46", "remaining_time": "0:10:23", "throughput": "997.25", "total_tokens": 36785387}
297
+ {"current_steps": 1485, "total_steps": 1505, "loss": 0.0021, "learning_rate": 2.1783801413866046e-08, "epoch": 34.43478260869565, "percentage": 98.67, "elapsed_time": "10:16:51", "remaining_time": "0:08:18", "throughput": "997.41", "total_tokens": 36915606}
298
+ {"current_steps": 1490, "total_steps": 1505, "loss": 0.0035, "learning_rate": 1.2254166983152737e-08, "epoch": 34.55072463768116, "percentage": 99.0, "elapsed_time": "10:18:46", "remaining_time": "0:06:13", "throughput": "997.57", "total_tokens": 37036117}
299
+ {"current_steps": 1495, "total_steps": 1505, "loss": 0.0016, "learning_rate": 5.446543650219904e-09, "epoch": 34.666666666666664, "percentage": 99.34, "elapsed_time": "10:20:49", "remaining_time": "0:04:09", "throughput": "997.75", "total_tokens": 37165587}
300
+ {"current_steps": 1500, "total_steps": 1505, "loss": 0.0015, "learning_rate": 1.3616729956228425e-09, "epoch": 34.78260869565217, "percentage": 99.67, "elapsed_time": "10:22:54", "remaining_time": "0:02:04", "throughput": "997.77", "total_tokens": 37290827}
301
+ {"current_steps": 1505, "total_steps": 1505, "loss": 0.0053, "learning_rate": 0.0, "epoch": 34.89855072463768, "percentage": 100.0, "elapsed_time": "10:25:10", "remaining_time": "0:00:00", "throughput": "997.38", "total_tokens": 37412688}
302
+ {"current_steps": 1505, "total_steps": 1505, "epoch": 34.89855072463768, "percentage": 100.0, "elapsed_time": "10:25:10", "remaining_time": "0:00:00", "throughput": "997.38", "total_tokens": 37412688}
trainer_state.json ADDED
@@ -0,0 +1,2451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 34.89855072463768,
5
+ "eval_steps": 500,
6
+ "global_step": 1505,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.11594202898550725,
13
+ "grad_norm": 7.532149791717529,
14
+ "learning_rate": 4.999863832700438e-05,
15
+ "loss": 3.8116,
16
+ "num_input_tokens_seen": 106929,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.2318840579710145,
21
+ "grad_norm": 4.141716480255127,
22
+ "learning_rate": 4.999455345634978e-05,
23
+ "loss": 3.6928,
24
+ "num_input_tokens_seen": 225964,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 0.34782608695652173,
29
+ "grad_norm": 3.470097780227661,
30
+ "learning_rate": 4.9987745833016855e-05,
31
+ "loss": 3.6227,
32
+ "num_input_tokens_seen": 362264,
33
+ "step": 15
34
+ },
35
+ {
36
+ "epoch": 0.463768115942029,
37
+ "grad_norm": 3.4544646739959717,
38
+ "learning_rate": 4.9978216198586135e-05,
39
+ "loss": 3.601,
40
+ "num_input_tokens_seen": 477807,
41
+ "step": 20
42
+ },
43
+ {
44
+ "epoch": 0.5797101449275363,
45
+ "grad_norm": 3.249224901199341,
46
+ "learning_rate": 4.996596559115731e-05,
47
+ "loss": 3.539,
48
+ "num_input_tokens_seen": 588900,
49
+ "step": 25
50
+ },
51
+ {
52
+ "epoch": 0.6956521739130435,
53
+ "grad_norm": 3.395056962966919,
54
+ "learning_rate": 4.995099534523607e-05,
55
+ "loss": 3.4956,
56
+ "num_input_tokens_seen": 706077,
57
+ "step": 30
58
+ },
59
+ {
60
+ "epoch": 0.8115942028985508,
61
+ "grad_norm": 3.997875213623047,
62
+ "learning_rate": 4.9933307091588796e-05,
63
+ "loss": 3.5044,
64
+ "num_input_tokens_seen": 853504,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.927536231884058,
69
+ "grad_norm": 3.5168681144714355,
70
+ "learning_rate": 4.991290275706486e-05,
71
+ "loss": 3.4324,
72
+ "num_input_tokens_seen": 990472,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 1.0434782608695652,
77
+ "grad_norm": 7.144646167755127,
78
+ "learning_rate": 4.988978456438678e-05,
79
+ "loss": 3.2542,
80
+ "num_input_tokens_seen": 1125870,
81
+ "step": 45
82
+ },
83
+ {
84
+ "epoch": 1.1594202898550725,
85
+ "grad_norm": 3.257103681564331,
86
+ "learning_rate": 4.986395503190805e-05,
87
+ "loss": 2.9024,
88
+ "num_input_tokens_seen": 1249877,
89
+ "step": 50
90
+ },
91
+ {
92
+ "epoch": 1.2753623188405796,
93
+ "grad_norm": 3.3208603858947754,
94
+ "learning_rate": 4.983541697333881e-05,
95
+ "loss": 2.8069,
96
+ "num_input_tokens_seen": 1375193,
97
+ "step": 55
98
+ },
99
+ {
100
+ "epoch": 1.391304347826087,
101
+ "grad_norm": 4.378167629241943,
102
+ "learning_rate": 4.980417349743936e-05,
103
+ "loss": 2.75,
104
+ "num_input_tokens_seen": 1489716,
105
+ "step": 60
106
+ },
107
+ {
108
+ "epoch": 1.5072463768115942,
109
+ "grad_norm": 4.321849822998047,
110
+ "learning_rate": 4.9770228007681494e-05,
111
+ "loss": 2.7329,
112
+ "num_input_tokens_seen": 1600483,
113
+ "step": 65
114
+ },
115
+ {
116
+ "epoch": 1.6231884057971016,
117
+ "grad_norm": 3.6365067958831787,
118
+ "learning_rate": 4.973358420187776e-05,
119
+ "loss": 2.8212,
120
+ "num_input_tokens_seen": 1731315,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 1.7391304347826086,
125
+ "grad_norm": 3.74035906791687,
126
+ "learning_rate": 4.9694246071778604e-05,
127
+ "loss": 2.7935,
128
+ "num_input_tokens_seen": 1858269,
129
+ "step": 75
130
+ },
131
+ {
132
+ "epoch": 1.855072463768116,
133
+ "grad_norm": 23.26426124572754,
134
+ "learning_rate": 4.9652217902637596e-05,
135
+ "loss": 2.7305,
136
+ "num_input_tokens_seen": 1984587,
137
+ "step": 80
138
+ },
139
+ {
140
+ "epoch": 1.971014492753623,
141
+ "grad_norm": 4.870578289031982,
142
+ "learning_rate": 4.9607504272744575e-05,
143
+ "loss": 2.6482,
144
+ "num_input_tokens_seen": 2109391,
145
+ "step": 85
146
+ },
147
+ {
148
+ "epoch": 2.0869565217391304,
149
+ "grad_norm": 4.419096946716309,
150
+ "learning_rate": 4.956011005292692e-05,
151
+ "loss": 2.4292,
152
+ "num_input_tokens_seen": 2246413,
153
+ "step": 90
154
+ },
155
+ {
156
+ "epoch": 2.2028985507246377,
157
+ "grad_norm": 18.216915130615234,
158
+ "learning_rate": 4.951004040601898e-05,
159
+ "loss": 2.1416,
160
+ "num_input_tokens_seen": 2386890,
161
+ "step": 95
162
+ },
163
+ {
164
+ "epoch": 2.318840579710145,
165
+ "grad_norm": 4.4814581871032715,
166
+ "learning_rate": 4.945730078629964e-05,
167
+ "loss": 2.2847,
168
+ "num_input_tokens_seen": 2522302,
169
+ "step": 100
170
+ },
171
+ {
172
+ "epoch": 2.4347826086956523,
173
+ "grad_norm": 406.9701232910156,
174
+ "learning_rate": 4.9401896938898185e-05,
175
+ "loss": 2.0944,
176
+ "num_input_tokens_seen": 2642208,
177
+ "step": 105
178
+ },
179
+ {
180
+ "epoch": 2.550724637681159,
181
+ "grad_norm": 3.5890581607818604,
182
+ "learning_rate": 4.934383489916843e-05,
183
+ "loss": 2.2862,
184
+ "num_input_tokens_seen": 2780587,
185
+ "step": 110
186
+ },
187
+ {
188
+ "epoch": 2.6666666666666665,
189
+ "grad_norm": 5.334541320800781,
190
+ "learning_rate": 4.928312099203131e-05,
191
+ "loss": 2.105,
192
+ "num_input_tokens_seen": 2885320,
193
+ "step": 115
194
+ },
195
+ {
196
+ "epoch": 2.782608695652174,
197
+ "grad_norm": 5.464664936065674,
198
+ "learning_rate": 4.921976183128585e-05,
199
+ "loss": 2.0287,
200
+ "num_input_tokens_seen": 2996923,
201
+ "step": 120
202
+ },
203
+ {
204
+ "epoch": 2.898550724637681,
205
+ "grad_norm": 4.113780975341797,
206
+ "learning_rate": 4.9153764318888706e-05,
207
+ "loss": 2.0162,
208
+ "num_input_tokens_seen": 3102391,
209
+ "step": 125
210
+ },
211
+ {
212
+ "epoch": 3.0144927536231885,
213
+ "grad_norm": 6.009971618652344,
214
+ "learning_rate": 4.908513564420231e-05,
215
+ "loss": 2.2464,
216
+ "num_input_tokens_seen": 3233443,
217
+ "step": 130
218
+ },
219
+ {
220
+ "epoch": 3.130434782608696,
221
+ "grad_norm": 10.397327423095703,
222
+ "learning_rate": 4.90138832832117e-05,
223
+ "loss": 1.6561,
224
+ "num_input_tokens_seen": 3358966,
225
+ "step": 135
226
+ },
227
+ {
228
+ "epoch": 3.246376811594203,
229
+ "grad_norm": 4.9139556884765625,
230
+ "learning_rate": 4.894001499771015e-05,
231
+ "loss": 1.6113,
232
+ "num_input_tokens_seen": 3490069,
233
+ "step": 140
234
+ },
235
+ {
236
+ "epoch": 3.36231884057971,
237
+ "grad_norm": 4.146034240722656,
238
+ "learning_rate": 4.886353883445363e-05,
239
+ "loss": 1.6235,
240
+ "num_input_tokens_seen": 3609842,
241
+ "step": 145
242
+ },
243
+ {
244
+ "epoch": 3.4782608695652173,
245
+ "grad_norm": 4.301880359649658,
246
+ "learning_rate": 4.878446312428424e-05,
247
+ "loss": 1.7873,
248
+ "num_input_tokens_seen": 3751570,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 3.5942028985507246,
253
+ "grad_norm": 3.9485158920288086,
254
+ "learning_rate": 4.8702796481222714e-05,
255
+ "loss": 1.3723,
256
+ "num_input_tokens_seen": 3865303,
257
+ "step": 155
258
+ },
259
+ {
260
+ "epoch": 3.710144927536232,
261
+ "grad_norm": 4.183668613433838,
262
+ "learning_rate": 4.861854780153004e-05,
263
+ "loss": 1.6512,
264
+ "num_input_tokens_seen": 3991347,
265
+ "step": 160
266
+ },
267
+ {
268
+ "epoch": 3.8260869565217392,
269
+ "grad_norm": 5.1000471115112305,
270
+ "learning_rate": 4.853172626273841e-05,
271
+ "loss": 1.5524,
272
+ "num_input_tokens_seen": 4113654,
273
+ "step": 165
274
+ },
275
+ {
276
+ "epoch": 3.942028985507246,
277
+ "grad_norm": 4.142239570617676,
278
+ "learning_rate": 4.8442341322651385e-05,
279
+ "loss": 1.5954,
280
+ "num_input_tokens_seen": 4236348,
281
+ "step": 170
282
+ },
283
+ {
284
+ "epoch": 4.057971014492754,
285
+ "grad_norm": 3.8976669311523438,
286
+ "learning_rate": 4.83504027183137e-05,
287
+ "loss": 1.1652,
288
+ "num_input_tokens_seen": 4340378,
289
+ "step": 175
290
+ },
291
+ {
292
+ "epoch": 4.173913043478261,
293
+ "grad_norm": 5.923389911651611,
294
+ "learning_rate": 4.825592046495054e-05,
295
+ "loss": 1.1995,
296
+ "num_input_tokens_seen": 4473601,
297
+ "step": 180
298
+ },
299
+ {
300
+ "epoch": 4.2898550724637685,
301
+ "grad_norm": 4.220530033111572,
302
+ "learning_rate": 4.8158904854876555e-05,
303
+ "loss": 0.9431,
304
+ "num_input_tokens_seen": 4586911,
305
+ "step": 185
306
+ },
307
+ {
308
+ "epoch": 4.405797101449275,
309
+ "grad_norm": 5.896139144897461,
310
+ "learning_rate": 4.805936645637463e-05,
311
+ "loss": 1.1136,
312
+ "num_input_tokens_seen": 4702445,
313
+ "step": 190
314
+ },
315
+ {
316
+ "epoch": 4.521739130434782,
317
+ "grad_norm": 4.467094421386719,
318
+ "learning_rate": 4.795731611254473e-05,
319
+ "loss": 1.1509,
320
+ "num_input_tokens_seen": 4831301,
321
+ "step": 195
322
+ },
323
+ {
324
+ "epoch": 4.63768115942029,
325
+ "grad_norm": 4.232386112213135,
326
+ "learning_rate": 4.785276494012263e-05,
327
+ "loss": 0.9962,
328
+ "num_input_tokens_seen": 4941656,
329
+ "step": 200
330
+ },
331
+ {
332
+ "epoch": 4.753623188405797,
333
+ "grad_norm": 4.829892635345459,
334
+ "learning_rate": 4.7745724328269e-05,
335
+ "loss": 1.2377,
336
+ "num_input_tokens_seen": 5088437,
337
+ "step": 205
338
+ },
339
+ {
340
+ "epoch": 4.869565217391305,
341
+ "grad_norm": 4.1343913078308105,
342
+ "learning_rate": 4.763620593732867e-05,
343
+ "loss": 1.234,
344
+ "num_input_tokens_seen": 5219806,
345
+ "step": 210
346
+ },
347
+ {
348
+ "epoch": 4.9855072463768115,
349
+ "grad_norm": 4.9217729568481445,
350
+ "learning_rate": 4.752422169756048e-05,
351
+ "loss": 1.1453,
352
+ "num_input_tokens_seen": 5340222,
353
+ "step": 215
354
+ },
355
+ {
356
+ "epoch": 5.101449275362318,
357
+ "grad_norm": 4.4605865478515625,
358
+ "learning_rate": 4.740978380783765e-05,
359
+ "loss": 0.9056,
360
+ "num_input_tokens_seen": 5476315,
361
+ "step": 220
362
+ },
363
+ {
364
+ "epoch": 5.217391304347826,
365
+ "grad_norm": 4.396484375,
366
+ "learning_rate": 4.7292904734318924e-05,
367
+ "loss": 0.7349,
368
+ "num_input_tokens_seen": 5589951,
369
+ "step": 225
370
+ },
371
+ {
372
+ "epoch": 5.333333333333333,
373
+ "grad_norm": 4.053436279296875,
374
+ "learning_rate": 4.7173597209090534e-05,
375
+ "loss": 0.6968,
376
+ "num_input_tokens_seen": 5711449,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 5.449275362318841,
381
+ "grad_norm": 5.303736209869385,
382
+ "learning_rate": 4.70518742287793e-05,
383
+ "loss": 0.851,
384
+ "num_input_tokens_seen": 5852650,
385
+ "step": 235
386
+ },
387
+ {
388
+ "epoch": 5.565217391304348,
389
+ "grad_norm": 3.70810866355896,
390
+ "learning_rate": 4.6927749053136866e-05,
391
+ "loss": 0.716,
392
+ "num_input_tokens_seen": 5972289,
393
+ "step": 240
394
+ },
395
+ {
396
+ "epoch": 5.681159420289855,
397
+ "grad_norm": 3.9204599857330322,
398
+ "learning_rate": 4.6801235203595195e-05,
399
+ "loss": 0.6384,
400
+ "num_input_tokens_seen": 6088707,
401
+ "step": 245
402
+ },
403
+ {
404
+ "epoch": 5.797101449275362,
405
+ "grad_norm": 4.06931209564209,
406
+ "learning_rate": 4.667234646179368e-05,
407
+ "loss": 0.7799,
408
+ "num_input_tokens_seen": 6215471,
409
+ "step": 250
410
+ },
411
+ {
412
+ "epoch": 5.913043478260869,
413
+ "grad_norm": 4.283618450164795,
414
+ "learning_rate": 4.654109686807787e-05,
415
+ "loss": 0.7923,
416
+ "num_input_tokens_seen": 6335935,
417
+ "step": 255
418
+ },
419
+ {
420
+ "epoch": 6.028985507246377,
421
+ "grad_norm": 4.719886302947998,
422
+ "learning_rate": 4.640750071996995e-05,
423
+ "loss": 0.7452,
424
+ "num_input_tokens_seen": 6463689,
425
+ "step": 260
426
+ },
427
+ {
428
+ "epoch": 6.144927536231884,
429
+ "grad_norm": 3.8415334224700928,
430
+ "learning_rate": 4.6271572570611296e-05,
431
+ "loss": 0.4085,
432
+ "num_input_tokens_seen": 6576954,
433
+ "step": 265
434
+ },
435
+ {
436
+ "epoch": 6.260869565217392,
437
+ "grad_norm": 4.19309663772583,
438
+ "learning_rate": 4.613332722717714e-05,
439
+ "loss": 0.5777,
440
+ "num_input_tokens_seen": 6714404,
441
+ "step": 270
442
+ },
443
+ {
444
+ "epoch": 6.3768115942028984,
445
+ "grad_norm": 5.686235427856445,
446
+ "learning_rate": 4.5992779749263546e-05,
447
+ "loss": 0.4718,
448
+ "num_input_tokens_seen": 6840385,
449
+ "step": 275
450
+ },
451
+ {
452
+ "epoch": 6.492753623188406,
453
+ "grad_norm": 3.2365808486938477,
454
+ "learning_rate": 4.584994544724695e-05,
455
+ "loss": 0.3723,
456
+ "num_input_tokens_seen": 6954269,
457
+ "step": 280
458
+ },
459
+ {
460
+ "epoch": 6.608695652173913,
461
+ "grad_norm": 3.530801296234131,
462
+ "learning_rate": 4.5704839880616296e-05,
463
+ "loss": 0.4453,
464
+ "num_input_tokens_seen": 7076143,
465
+ "step": 285
466
+ },
467
+ {
468
+ "epoch": 6.72463768115942,
469
+ "grad_norm": 3.2134931087493896,
470
+ "learning_rate": 4.5557478856278114e-05,
471
+ "loss": 0.5742,
472
+ "num_input_tokens_seen": 7201833,
473
+ "step": 290
474
+ },
475
+ {
476
+ "epoch": 6.840579710144928,
477
+ "grad_norm": 6.281985282897949,
478
+ "learning_rate": 4.5407878426834596e-05,
479
+ "loss": 0.5291,
480
+ "num_input_tokens_seen": 7330479,
481
+ "step": 295
482
+ },
483
+ {
484
+ "epoch": 6.956521739130435,
485
+ "grad_norm": 17.072542190551758,
486
+ "learning_rate": 4.5256054888834934e-05,
487
+ "loss": 0.4968,
488
+ "num_input_tokens_seen": 7449244,
489
+ "step": 300
490
+ },
491
+ {
492
+ "epoch": 7.072463768115942,
493
+ "grad_norm": 3.737456798553467,
494
+ "learning_rate": 4.5102024781000077e-05,
495
+ "loss": 0.421,
496
+ "num_input_tokens_seen": 7578947,
497
+ "step": 305
498
+ },
499
+ {
500
+ "epoch": 7.188405797101449,
501
+ "grad_norm": 2.6586523056030273,
502
+ "learning_rate": 4.4945804882421086e-05,
503
+ "loss": 0.2767,
504
+ "num_input_tokens_seen": 7691948,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 7.304347826086957,
509
+ "grad_norm": 2.027702808380127,
510
+ "learning_rate": 4.478741221073136e-05,
511
+ "loss": 0.2922,
512
+ "num_input_tokens_seen": 7815786,
513
+ "step": 315
514
+ },
515
+ {
516
+ "epoch": 7.420289855072464,
517
+ "grad_norm": 3.4651787281036377,
518
+ "learning_rate": 4.4626864020252774e-05,
519
+ "loss": 0.2768,
520
+ "num_input_tokens_seen": 7925106,
521
+ "step": 320
522
+ },
523
+ {
524
+ "epoch": 7.536231884057971,
525
+ "grad_norm": 4.559577941894531,
526
+ "learning_rate": 4.446417780011618e-05,
527
+ "loss": 0.3281,
528
+ "num_input_tokens_seen": 8057202,
529
+ "step": 325
530
+ },
531
+ {
532
+ "epoch": 7.6521739130434785,
533
+ "grad_norm": 2.5885751247406006,
534
+ "learning_rate": 4.42993712723562e-05,
535
+ "loss": 0.3374,
536
+ "num_input_tokens_seen": 8187865,
537
+ "step": 330
538
+ },
539
+ {
540
+ "epoch": 7.768115942028985,
541
+ "grad_norm": 2.792222023010254,
542
+ "learning_rate": 4.413246238998069e-05,
543
+ "loss": 0.2491,
544
+ "num_input_tokens_seen": 8304605,
545
+ "step": 335
546
+ },
547
+ {
548
+ "epoch": 7.884057971014493,
549
+ "grad_norm": 3.610206127166748,
550
+ "learning_rate": 4.3963469335015085e-05,
551
+ "loss": 0.3893,
552
+ "num_input_tokens_seen": 8437319,
553
+ "step": 340
554
+ },
555
+ {
556
+ "epoch": 8.0,
557
+ "grad_norm": 4.31643533706665,
558
+ "learning_rate": 4.379241051652174e-05,
559
+ "loss": 0.3761,
560
+ "num_input_tokens_seen": 8573080,
561
+ "step": 345
562
+ },
563
+ {
564
+ "epoch": 8.115942028985508,
565
+ "grad_norm": 2.2834160327911377,
566
+ "learning_rate": 4.361930456859456e-05,
567
+ "loss": 0.236,
568
+ "num_input_tokens_seen": 8707741,
569
+ "step": 350
570
+ },
571
+ {
572
+ "epoch": 8.231884057971014,
573
+ "grad_norm": 2.6929121017456055,
574
+ "learning_rate": 4.34441703483291e-05,
575
+ "loss": 0.1584,
576
+ "num_input_tokens_seen": 8825774,
577
+ "step": 355
578
+ },
579
+ {
580
+ "epoch": 8.347826086956522,
581
+ "grad_norm": 3.8095011711120605,
582
+ "learning_rate": 4.326702693376844e-05,
583
+ "loss": 0.1481,
584
+ "num_input_tokens_seen": 8932249,
585
+ "step": 360
586
+ },
587
+ {
588
+ "epoch": 8.46376811594203,
589
+ "grad_norm": 2.6493489742279053,
590
+ "learning_rate": 4.308789362182492e-05,
591
+ "loss": 0.1743,
592
+ "num_input_tokens_seen": 9051548,
593
+ "step": 365
594
+ },
595
+ {
596
+ "epoch": 8.579710144927537,
597
+ "grad_norm": 30.796459197998047,
598
+ "learning_rate": 4.290678992617798e-05,
599
+ "loss": 0.3162,
600
+ "num_input_tokens_seen": 9197232,
601
+ "step": 370
602
+ },
603
+ {
604
+ "epoch": 8.695652173913043,
605
+ "grad_norm": 3.3164985179901123,
606
+ "learning_rate": 4.272373557514858e-05,
607
+ "loss": 0.2235,
608
+ "num_input_tokens_seen": 9317650,
609
+ "step": 375
610
+ },
611
+ {
612
+ "epoch": 8.81159420289855,
613
+ "grad_norm": 3.1515417098999023,
614
+ "learning_rate": 4.2538750509550054e-05,
615
+ "loss": 0.2504,
616
+ "num_input_tokens_seen": 9450765,
617
+ "step": 380
618
+ },
619
+ {
620
+ "epoch": 8.927536231884059,
621
+ "grad_norm": 3.3926901817321777,
622
+ "learning_rate": 4.235185488051585e-05,
623
+ "loss": 0.2136,
624
+ "num_input_tokens_seen": 9582961,
625
+ "step": 385
626
+ },
627
+ {
628
+ "epoch": 9.043478260869565,
629
+ "grad_norm": 4.670753002166748,
630
+ "learning_rate": 4.216306904730447e-05,
631
+ "loss": 0.1047,
632
+ "num_input_tokens_seen": 9678616,
633
+ "step": 390
634
+ },
635
+ {
636
+ "epoch": 9.159420289855072,
637
+ "grad_norm": 2.166652202606201,
638
+ "learning_rate": 4.1972413575081595e-05,
639
+ "loss": 0.1015,
640
+ "num_input_tokens_seen": 9788512,
641
+ "step": 395
642
+ },
643
+ {
644
+ "epoch": 9.27536231884058,
645
+ "grad_norm": 2.1161272525787354,
646
+ "learning_rate": 4.177990923267986e-05,
647
+ "loss": 0.1505,
648
+ "num_input_tokens_seen": 9916229,
649
+ "step": 400
650
+ },
651
+ {
652
+ "epoch": 9.391304347826088,
653
+ "grad_norm": 2.378105401992798,
654
+ "learning_rate": 4.158557699033644e-05,
655
+ "loss": 0.1135,
656
+ "num_input_tokens_seen": 10042697,
657
+ "step": 405
658
+ },
659
+ {
660
+ "epoch": 9.507246376811594,
661
+ "grad_norm": 2.5567331314086914,
662
+ "learning_rate": 4.138943801740865e-05,
663
+ "loss": 0.1832,
664
+ "num_input_tokens_seen": 10171849,
665
+ "step": 410
666
+ },
667
+ {
668
+ "epoch": 9.623188405797102,
669
+ "grad_norm": 2.022610902786255,
670
+ "learning_rate": 4.119151368006793e-05,
671
+ "loss": 0.1178,
672
+ "num_input_tokens_seen": 10281924,
673
+ "step": 415
674
+ },
675
+ {
676
+ "epoch": 9.73913043478261,
677
+ "grad_norm": 2.5578079223632812,
678
+ "learning_rate": 4.099182553897229e-05,
679
+ "loss": 0.1426,
680
+ "num_input_tokens_seen": 10418758,
681
+ "step": 420
682
+ },
683
+ {
684
+ "epoch": 9.855072463768115,
685
+ "grad_norm": 2.7287228107452393,
686
+ "learning_rate": 4.079039534691767e-05,
687
+ "loss": 0.1603,
688
+ "num_input_tokens_seen": 10558322,
689
+ "step": 425
690
+ },
691
+ {
692
+ "epoch": 9.971014492753623,
693
+ "grad_norm": 2.361532688140869,
694
+ "learning_rate": 4.058724504646834e-05,
695
+ "loss": 0.1548,
696
+ "num_input_tokens_seen": 10679536,
697
+ "step": 430
698
+ },
699
+ {
700
+ "epoch": 10.08695652173913,
701
+ "grad_norm": 1.8757002353668213,
702
+ "learning_rate": 4.0382396767566536e-05,
703
+ "loss": 0.1407,
704
+ "num_input_tokens_seen": 10821076,
705
+ "step": 435
706
+ },
707
+ {
708
+ "epoch": 10.202898550724637,
709
+ "grad_norm": 2.352725028991699,
710
+ "learning_rate": 4.017587282512181e-05,
711
+ "loss": 0.0791,
712
+ "num_input_tokens_seen": 10949771,
713
+ "step": 440
714
+ },
715
+ {
716
+ "epoch": 10.318840579710145,
717
+ "grad_norm": 1.7948939800262451,
718
+ "learning_rate": 3.9967695716580224e-05,
719
+ "loss": 0.0722,
720
+ "num_input_tokens_seen": 11072044,
721
+ "step": 445
722
+ },
723
+ {
724
+ "epoch": 10.434782608695652,
725
+ "grad_norm": 1.954727292060852,
726
+ "learning_rate": 3.975788811947351e-05,
727
+ "loss": 0.0655,
728
+ "num_input_tokens_seen": 11182627,
729
+ "step": 450
730
+ },
731
+ {
732
+ "epoch": 10.55072463768116,
733
+ "grad_norm": 2.143941640853882,
734
+ "learning_rate": 3.954647288894883e-05,
735
+ "loss": 0.0723,
736
+ "num_input_tokens_seen": 11303028,
737
+ "step": 455
738
+ },
739
+ {
740
+ "epoch": 10.666666666666666,
741
+ "grad_norm": 2.0527164936065674,
742
+ "learning_rate": 3.933347305527898e-05,
743
+ "loss": 0.0655,
744
+ "num_input_tokens_seen": 11415868,
745
+ "step": 460
746
+ },
747
+ {
748
+ "epoch": 10.782608695652174,
749
+ "grad_norm": 1.6390535831451416,
750
+ "learning_rate": 3.911891182135371e-05,
751
+ "loss": 0.1534,
752
+ "num_input_tokens_seen": 11555653,
753
+ "step": 465
754
+ },
755
+ {
756
+ "epoch": 10.898550724637682,
757
+ "grad_norm": 2.3848719596862793,
758
+ "learning_rate": 3.8902812560152066e-05,
759
+ "loss": 0.0947,
760
+ "num_input_tokens_seen": 11681065,
761
+ "step": 470
762
+ },
763
+ {
764
+ "epoch": 11.014492753623188,
765
+ "grad_norm": 2.2094757556915283,
766
+ "learning_rate": 3.868519881219631e-05,
767
+ "loss": 0.0868,
768
+ "num_input_tokens_seen": 11809957,
769
+ "step": 475
770
+ },
771
+ {
772
+ "epoch": 11.130434782608695,
773
+ "grad_norm": 4.137216567993164,
774
+ "learning_rate": 3.846609428298757e-05,
775
+ "loss": 0.0467,
776
+ "num_input_tokens_seen": 11937881,
777
+ "step": 480
778
+ },
779
+ {
780
+ "epoch": 11.246376811594203,
781
+ "grad_norm": 1.6658189296722412,
782
+ "learning_rate": 3.824552284042351e-05,
783
+ "loss": 0.0521,
784
+ "num_input_tokens_seen": 12048905,
785
+ "step": 485
786
+ },
787
+ {
788
+ "epoch": 11.36231884057971,
789
+ "grad_norm": 1.5732171535491943,
790
+ "learning_rate": 3.8023508512198256e-05,
791
+ "loss": 0.051,
792
+ "num_input_tokens_seen": 12185453,
793
+ "step": 490
794
+ },
795
+ {
796
+ "epoch": 11.478260869565217,
797
+ "grad_norm": 1.8459701538085938,
798
+ "learning_rate": 3.780007548318507e-05,
799
+ "loss": 0.0753,
800
+ "num_input_tokens_seen": 12310911,
801
+ "step": 495
802
+ },
803
+ {
804
+ "epoch": 11.594202898550725,
805
+ "grad_norm": 1.4724109172821045,
806
+ "learning_rate": 3.7575248092801686e-05,
807
+ "loss": 0.0601,
808
+ "num_input_tokens_seen": 12439708,
809
+ "step": 500
810
+ },
811
+ {
812
+ "epoch": 11.710144927536232,
813
+ "grad_norm": 2.4690322875976562,
814
+ "learning_rate": 3.734905083235901e-05,
815
+ "loss": 0.0533,
816
+ "num_input_tokens_seen": 12554467,
817
+ "step": 505
818
+ },
819
+ {
820
+ "epoch": 11.826086956521738,
821
+ "grad_norm": 2.369218111038208,
822
+ "learning_rate": 3.712150834239313e-05,
823
+ "loss": 0.064,
824
+ "num_input_tokens_seen": 12682329,
825
+ "step": 510
826
+ },
827
+ {
828
+ "epoch": 11.942028985507246,
829
+ "grad_norm": 1.6901100873947144,
830
+ "learning_rate": 3.689264540998116e-05,
831
+ "loss": 0.0755,
832
+ "num_input_tokens_seen": 12800852,
833
+ "step": 515
834
+ },
835
+ {
836
+ "epoch": 12.057971014492754,
837
+ "grad_norm": 1.303114414215088,
838
+ "learning_rate": 3.66624869660411e-05,
839
+ "loss": 0.0553,
840
+ "num_input_tokens_seen": 12917527,
841
+ "step": 520
842
+ },
843
+ {
844
+ "epoch": 12.173913043478262,
845
+ "grad_norm": 1.1986353397369385,
846
+ "learning_rate": 3.6431058082615964e-05,
847
+ "loss": 0.0355,
848
+ "num_input_tokens_seen": 13044774,
849
+ "step": 525
850
+ },
851
+ {
852
+ "epoch": 12.289855072463768,
853
+ "grad_norm": 1.5653026103973389,
854
+ "learning_rate": 3.619838397014263e-05,
855
+ "loss": 0.0413,
856
+ "num_input_tokens_seen": 13175692,
857
+ "step": 530
858
+ },
859
+ {
860
+ "epoch": 12.405797101449275,
861
+ "grad_norm": 1.0767664909362793,
862
+ "learning_rate": 3.5964489974705553e-05,
863
+ "loss": 0.0596,
864
+ "num_input_tokens_seen": 13293164,
865
+ "step": 535
866
+ },
867
+ {
868
+ "epoch": 12.521739130434783,
869
+ "grad_norm": 1.6005312204360962,
870
+ "learning_rate": 3.572940157527572e-05,
871
+ "loss": 0.0479,
872
+ "num_input_tokens_seen": 13417894,
873
+ "step": 540
874
+ },
875
+ {
876
+ "epoch": 12.63768115942029,
877
+ "grad_norm": 1.627121925354004,
878
+ "learning_rate": 3.549314438093515e-05,
879
+ "loss": 0.047,
880
+ "num_input_tokens_seen": 13551913,
881
+ "step": 545
882
+ },
883
+ {
884
+ "epoch": 12.753623188405797,
885
+ "grad_norm": 2.239276647567749,
886
+ "learning_rate": 3.525574412808717e-05,
887
+ "loss": 0.0492,
888
+ "num_input_tokens_seen": 13675309,
889
+ "step": 550
890
+ },
891
+ {
892
+ "epoch": 12.869565217391305,
893
+ "grad_norm": 1.5702998638153076,
894
+ "learning_rate": 3.501722667765286e-05,
895
+ "loss": 0.0471,
896
+ "num_input_tokens_seen": 13797691,
897
+ "step": 555
898
+ },
899
+ {
900
+ "epoch": 12.985507246376812,
901
+ "grad_norm": 1.8216972351074219,
902
+ "learning_rate": 3.47776180122539e-05,
903
+ "loss": 0.1041,
904
+ "num_input_tokens_seen": 13919770,
905
+ "step": 560
906
+ },
907
+ {
908
+ "epoch": 13.101449275362318,
909
+ "grad_norm": 0.9026144742965698,
910
+ "learning_rate": 3.453694423338225e-05,
911
+ "loss": 0.0282,
912
+ "num_input_tokens_seen": 14037673,
913
+ "step": 565
914
+ },
915
+ {
916
+ "epoch": 13.217391304347826,
917
+ "grad_norm": 1.4504765272140503,
918
+ "learning_rate": 3.4295231558556715e-05,
919
+ "loss": 0.0272,
920
+ "num_input_tokens_seen": 14167090,
921
+ "step": 570
922
+ },
923
+ {
924
+ "epoch": 13.333333333333334,
925
+ "grad_norm": 1.4278969764709473,
926
+ "learning_rate": 3.4052506318467084e-05,
927
+ "loss": 0.0342,
928
+ "num_input_tokens_seen": 14311710,
929
+ "step": 575
930
+ },
931
+ {
932
+ "epoch": 13.44927536231884,
933
+ "grad_norm": 1.1284997463226318,
934
+ "learning_rate": 3.3808794954105716e-05,
935
+ "loss": 0.0855,
936
+ "num_input_tokens_seen": 14404322,
937
+ "step": 580
938
+ },
939
+ {
940
+ "epoch": 13.565217391304348,
941
+ "grad_norm": 1.4915614128112793,
942
+ "learning_rate": 3.356412401388732e-05,
943
+ "loss": 0.0378,
944
+ "num_input_tokens_seen": 14530794,
945
+ "step": 585
946
+ },
947
+ {
948
+ "epoch": 13.681159420289855,
949
+ "grad_norm": 1.372157096862793,
950
+ "learning_rate": 3.3318520150756846e-05,
951
+ "loss": 0.0457,
952
+ "num_input_tokens_seen": 14637342,
953
+ "step": 590
954
+ },
955
+ {
956
+ "epoch": 13.797101449275363,
957
+ "grad_norm": 1.6492116451263428,
958
+ "learning_rate": 3.307201011928616e-05,
959
+ "loss": 0.0453,
960
+ "num_input_tokens_seen": 14787534,
961
+ "step": 595
962
+ },
963
+ {
964
+ "epoch": 13.91304347826087,
965
+ "grad_norm": 1.3583859205245972,
966
+ "learning_rate": 3.282462077275947e-05,
967
+ "loss": 0.0378,
968
+ "num_input_tokens_seen": 14909175,
969
+ "step": 600
970
+ },
971
+ {
972
+ "epoch": 14.028985507246377,
973
+ "grad_norm": 1.0751795768737793,
974
+ "learning_rate": 3.257637906024822e-05,
975
+ "loss": 0.0296,
976
+ "num_input_tokens_seen": 15030530,
977
+ "step": 605
978
+ },
979
+ {
980
+ "epoch": 14.144927536231885,
981
+ "grad_norm": 1.474602222442627,
982
+ "learning_rate": 3.2327312023675287e-05,
983
+ "loss": 0.0216,
984
+ "num_input_tokens_seen": 15148359,
985
+ "step": 610
986
+ },
987
+ {
988
+ "epoch": 14.26086956521739,
989
+ "grad_norm": 1.0749961137771606,
990
+ "learning_rate": 3.2077446794869295e-05,
991
+ "loss": 0.0299,
992
+ "num_input_tokens_seen": 15280749,
993
+ "step": 615
994
+ },
995
+ {
996
+ "epoch": 14.376811594202898,
997
+ "grad_norm": 1.4042794704437256,
998
+ "learning_rate": 3.1826810592609036e-05,
999
+ "loss": 0.0247,
1000
+ "num_input_tokens_seen": 15397167,
1001
+ "step": 620
1002
+ },
1003
+ {
1004
+ "epoch": 14.492753623188406,
1005
+ "grad_norm": 1.2280118465423584,
1006
+ "learning_rate": 3.157543071965835e-05,
1007
+ "loss": 0.0455,
1008
+ "num_input_tokens_seen": 15522794,
1009
+ "step": 625
1010
+ },
1011
+ {
1012
+ "epoch": 14.608695652173914,
1013
+ "grad_norm": 1.2819784879684448,
1014
+ "learning_rate": 3.132333455979202e-05,
1015
+ "loss": 0.0262,
1016
+ "num_input_tokens_seen": 15637987,
1017
+ "step": 630
1018
+ },
1019
+ {
1020
+ "epoch": 14.72463768115942,
1021
+ "grad_norm": 1.2691748142242432,
1022
+ "learning_rate": 3.107054957481271e-05,
1023
+ "loss": 0.0281,
1024
+ "num_input_tokens_seen": 15773163,
1025
+ "step": 635
1026
+ },
1027
+ {
1028
+ "epoch": 14.840579710144928,
1029
+ "grad_norm": 1.2752504348754883,
1030
+ "learning_rate": 3.081710330155942e-05,
1031
+ "loss": 0.0294,
1032
+ "num_input_tokens_seen": 15892659,
1033
+ "step": 640
1034
+ },
1035
+ {
1036
+ "epoch": 14.956521739130435,
1037
+ "grad_norm": 1.3479197025299072,
1038
+ "learning_rate": 3.056302334890786e-05,
1039
+ "loss": 0.0291,
1040
+ "num_input_tokens_seen": 16024576,
1041
+ "step": 645
1042
+ },
1043
+ {
1044
+ "epoch": 15.072463768115941,
1045
+ "grad_norm": 1.3151382207870483,
1046
+ "learning_rate": 3.030833739476285e-05,
1047
+ "loss": 0.0216,
1048
+ "num_input_tokens_seen": 16151987,
1049
+ "step": 650
1050
+ },
1051
+ {
1052
+ "epoch": 15.18840579710145,
1053
+ "grad_norm": 2.3882877826690674,
1054
+ "learning_rate": 3.0053073183043256e-05,
1055
+ "loss": 0.0218,
1056
+ "num_input_tokens_seen": 16278639,
1057
+ "step": 655
1058
+ },
1059
+ {
1060
+ "epoch": 15.304347826086957,
1061
+ "grad_norm": 0.9794278144836426,
1062
+ "learning_rate": 2.979725852065981e-05,
1063
+ "loss": 0.0283,
1064
+ "num_input_tokens_seen": 16414743,
1065
+ "step": 660
1066
+ },
1067
+ {
1068
+ "epoch": 15.420289855072463,
1069
+ "grad_norm": 0.8964869976043701,
1070
+ "learning_rate": 2.954092127448591e-05,
1071
+ "loss": 0.0259,
1072
+ "num_input_tokens_seen": 16529298,
1073
+ "step": 665
1074
+ },
1075
+ {
1076
+ "epoch": 15.53623188405797,
1077
+ "grad_norm": 1.1441810131072998,
1078
+ "learning_rate": 2.9284089368322045e-05,
1079
+ "loss": 0.0716,
1080
+ "num_input_tokens_seen": 16655909,
1081
+ "step": 670
1082
+ },
1083
+ {
1084
+ "epoch": 15.652173913043478,
1085
+ "grad_norm": 1.0959213972091675,
1086
+ "learning_rate": 2.9026790779853874e-05,
1087
+ "loss": 0.025,
1088
+ "num_input_tokens_seen": 16798263,
1089
+ "step": 675
1090
+ },
1091
+ {
1092
+ "epoch": 15.768115942028986,
1093
+ "grad_norm": 1.0119343996047974,
1094
+ "learning_rate": 2.876905353760459e-05,
1095
+ "loss": 0.0218,
1096
+ "num_input_tokens_seen": 16916827,
1097
+ "step": 680
1098
+ },
1099
+ {
1100
+ "epoch": 15.884057971014492,
1101
+ "grad_norm": 1.1373978853225708,
1102
+ "learning_rate": 2.8510905717881614e-05,
1103
+ "loss": 0.0231,
1104
+ "num_input_tokens_seen": 17040247,
1105
+ "step": 685
1106
+ },
1107
+ {
1108
+ "epoch": 16.0,
1109
+ "grad_norm": 1.2512497901916504,
1110
+ "learning_rate": 2.8252375441718137e-05,
1111
+ "loss": 0.0228,
1112
+ "num_input_tokens_seen": 17146160,
1113
+ "step": 690
1114
+ },
1115
+ {
1116
+ "epoch": 16.115942028985508,
1117
+ "grad_norm": 0.7410117387771606,
1118
+ "learning_rate": 2.7993490871809808e-05,
1119
+ "loss": 0.029,
1120
+ "num_input_tokens_seen": 17284643,
1121
+ "step": 695
1122
+ },
1123
+ {
1124
+ "epoch": 16.231884057971016,
1125
+ "grad_norm": 1.0934263467788696,
1126
+ "learning_rate": 2.7734280209446865e-05,
1127
+ "loss": 0.0199,
1128
+ "num_input_tokens_seen": 17426644,
1129
+ "step": 700
1130
+ },
1131
+ {
1132
+ "epoch": 16.347826086956523,
1133
+ "grad_norm": 1.0034395456314087,
1134
+ "learning_rate": 2.7474771691442018e-05,
1135
+ "loss": 0.0259,
1136
+ "num_input_tokens_seen": 17541812,
1137
+ "step": 705
1138
+ },
1139
+ {
1140
+ "epoch": 16.463768115942027,
1141
+ "grad_norm": 1.4287781715393066,
1142
+ "learning_rate": 2.721499358705458e-05,
1143
+ "loss": 0.021,
1144
+ "num_input_tokens_seen": 17667755,
1145
+ "step": 710
1146
+ },
1147
+ {
1148
+ "epoch": 16.579710144927535,
1149
+ "grad_norm": 1.0989606380462646,
1150
+ "learning_rate": 2.6954974194910888e-05,
1151
+ "loss": 0.0199,
1152
+ "num_input_tokens_seen": 17788162,
1153
+ "step": 715
1154
+ },
1155
+ {
1156
+ "epoch": 16.695652173913043,
1157
+ "grad_norm": 0.9687130451202393,
1158
+ "learning_rate": 2.6694741839921732e-05,
1159
+ "loss": 0.0189,
1160
+ "num_input_tokens_seen": 17911718,
1161
+ "step": 720
1162
+ },
1163
+ {
1164
+ "epoch": 16.81159420289855,
1165
+ "grad_norm": 1.143617033958435,
1166
+ "learning_rate": 2.6434324870196748e-05,
1167
+ "loss": 0.0169,
1168
+ "num_input_tokens_seen": 18018729,
1169
+ "step": 725
1170
+ },
1171
+ {
1172
+ "epoch": 16.92753623188406,
1173
+ "grad_norm": 1.1395140886306763,
1174
+ "learning_rate": 2.617375165395634e-05,
1175
+ "loss": 0.0209,
1176
+ "num_input_tokens_seen": 18139681,
1177
+ "step": 730
1178
+ },
1179
+ {
1180
+ "epoch": 17.043478260869566,
1181
+ "grad_norm": 0.881986677646637,
1182
+ "learning_rate": 2.5913050576441477e-05,
1183
+ "loss": 0.0201,
1184
+ "num_input_tokens_seen": 18278544,
1185
+ "step": 735
1186
+ },
1187
+ {
1188
+ "epoch": 17.159420289855074,
1189
+ "grad_norm": 0.8654409050941467,
1190
+ "learning_rate": 2.5652250036821523e-05,
1191
+ "loss": 0.017,
1192
+ "num_input_tokens_seen": 18396700,
1193
+ "step": 740
1194
+ },
1195
+ {
1196
+ "epoch": 17.27536231884058,
1197
+ "grad_norm": 0.9699842929840088,
1198
+ "learning_rate": 2.5391378445100644e-05,
1199
+ "loss": 0.0187,
1200
+ "num_input_tokens_seen": 18506229,
1201
+ "step": 745
1202
+ },
1203
+ {
1204
+ "epoch": 17.391304347826086,
1205
+ "grad_norm": 0.8799194693565369,
1206
+ "learning_rate": 2.5130464219022992e-05,
1207
+ "loss": 0.0242,
1208
+ "num_input_tokens_seen": 18621580,
1209
+ "step": 750
1210
+ },
1211
+ {
1212
+ "epoch": 17.507246376811594,
1213
+ "grad_norm": 0.9715821146965027,
1214
+ "learning_rate": 2.486953578097702e-05,
1215
+ "loss": 0.0153,
1216
+ "num_input_tokens_seen": 18748382,
1217
+ "step": 755
1218
+ },
1219
+ {
1220
+ "epoch": 17.6231884057971,
1221
+ "grad_norm": 0.8819458484649658,
1222
+ "learning_rate": 2.4608621554899362e-05,
1223
+ "loss": 0.0182,
1224
+ "num_input_tokens_seen": 18884730,
1225
+ "step": 760
1226
+ },
1227
+ {
1228
+ "epoch": 17.73913043478261,
1229
+ "grad_norm": 0.8835431933403015,
1230
+ "learning_rate": 2.4347749963178486e-05,
1231
+ "loss": 0.0143,
1232
+ "num_input_tokens_seen": 19003589,
1233
+ "step": 765
1234
+ },
1235
+ {
1236
+ "epoch": 17.855072463768117,
1237
+ "grad_norm": 0.780754566192627,
1238
+ "learning_rate": 2.4086949423558526e-05,
1239
+ "loss": 0.0164,
1240
+ "num_input_tokens_seen": 19136411,
1241
+ "step": 770
1242
+ },
1243
+ {
1244
+ "epoch": 17.971014492753625,
1245
+ "grad_norm": 0.7591371536254883,
1246
+ "learning_rate": 2.3826248346043663e-05,
1247
+ "loss": 0.0157,
1248
+ "num_input_tokens_seen": 19260436,
1249
+ "step": 775
1250
+ },
1251
+ {
1252
+ "epoch": 18.08695652173913,
1253
+ "grad_norm": 0.673797070980072,
1254
+ "learning_rate": 2.356567512980326e-05,
1255
+ "loss": 0.0304,
1256
+ "num_input_tokens_seen": 19388733,
1257
+ "step": 780
1258
+ },
1259
+ {
1260
+ "epoch": 18.202898550724637,
1261
+ "grad_norm": 0.4008718729019165,
1262
+ "learning_rate": 2.3305258160078274e-05,
1263
+ "loss": 0.009,
1264
+ "num_input_tokens_seen": 19531204,
1265
+ "step": 785
1266
+ },
1267
+ {
1268
+ "epoch": 18.318840579710145,
1269
+ "grad_norm": 0.6676005125045776,
1270
+ "learning_rate": 2.3045025805089118e-05,
1271
+ "loss": 0.0105,
1272
+ "num_input_tokens_seen": 19624608,
1273
+ "step": 790
1274
+ },
1275
+ {
1276
+ "epoch": 18.434782608695652,
1277
+ "grad_norm": 0.6956990957260132,
1278
+ "learning_rate": 2.278500641294543e-05,
1279
+ "loss": 0.0104,
1280
+ "num_input_tokens_seen": 19751062,
1281
+ "step": 795
1282
+ },
1283
+ {
1284
+ "epoch": 18.55072463768116,
1285
+ "grad_norm": 0.80479896068573,
1286
+ "learning_rate": 2.252522830855798e-05,
1287
+ "loss": 0.0103,
1288
+ "num_input_tokens_seen": 19879837,
1289
+ "step": 800
1290
+ },
1291
+ {
1292
+ "epoch": 18.666666666666668,
1293
+ "grad_norm": 0.7206840515136719,
1294
+ "learning_rate": 2.2265719790553147e-05,
1295
+ "loss": 0.0107,
1296
+ "num_input_tokens_seen": 20019385,
1297
+ "step": 805
1298
+ },
1299
+ {
1300
+ "epoch": 18.782608695652176,
1301
+ "grad_norm": 0.6994977593421936,
1302
+ "learning_rate": 2.2006509128190195e-05,
1303
+ "loss": 0.0269,
1304
+ "num_input_tokens_seen": 20138003,
1305
+ "step": 810
1306
+ },
1307
+ {
1308
+ "epoch": 18.89855072463768,
1309
+ "grad_norm": 0.5642988681793213,
1310
+ "learning_rate": 2.174762455828187e-05,
1311
+ "loss": 0.0086,
1312
+ "num_input_tokens_seen": 20260523,
1313
+ "step": 815
1314
+ },
1315
+ {
1316
+ "epoch": 19.014492753623188,
1317
+ "grad_norm": 0.5547834038734436,
1318
+ "learning_rate": 2.1489094282118395e-05,
1319
+ "loss": 0.0133,
1320
+ "num_input_tokens_seen": 20375322,
1321
+ "step": 820
1322
+ },
1323
+ {
1324
+ "epoch": 19.130434782608695,
1325
+ "grad_norm": 0.48678871989250183,
1326
+ "learning_rate": 2.123094646239541e-05,
1327
+ "loss": 0.0114,
1328
+ "num_input_tokens_seen": 20477407,
1329
+ "step": 825
1330
+ },
1331
+ {
1332
+ "epoch": 19.246376811594203,
1333
+ "grad_norm": 0.4791460633277893,
1334
+ "learning_rate": 2.0973209220146135e-05,
1335
+ "loss": 0.007,
1336
+ "num_input_tokens_seen": 20605728,
1337
+ "step": 830
1338
+ },
1339
+ {
1340
+ "epoch": 19.36231884057971,
1341
+ "grad_norm": 1.1198338270187378,
1342
+ "learning_rate": 2.0715910631677968e-05,
1343
+ "loss": 0.0088,
1344
+ "num_input_tokens_seen": 20725799,
1345
+ "step": 835
1346
+ },
1347
+ {
1348
+ "epoch": 19.47826086956522,
1349
+ "grad_norm": 0.6645247936248779,
1350
+ "learning_rate": 2.0459078725514092e-05,
1351
+ "loss": 0.007,
1352
+ "num_input_tokens_seen": 20865534,
1353
+ "step": 840
1354
+ },
1355
+ {
1356
+ "epoch": 19.594202898550726,
1357
+ "grad_norm": 0.5324479341506958,
1358
+ "learning_rate": 2.020274147934019e-05,
1359
+ "loss": 0.0059,
1360
+ "num_input_tokens_seen": 20977913,
1361
+ "step": 845
1362
+ },
1363
+ {
1364
+ "epoch": 19.71014492753623,
1365
+ "grad_norm": 0.6183504462242126,
1366
+ "learning_rate": 1.9946926816956743e-05,
1367
+ "loss": 0.0069,
1368
+ "num_input_tokens_seen": 21102848,
1369
+ "step": 850
1370
+ },
1371
+ {
1372
+ "epoch": 19.82608695652174,
1373
+ "grad_norm": 0.6665703058242798,
1374
+ "learning_rate": 1.9691662605237166e-05,
1375
+ "loss": 0.008,
1376
+ "num_input_tokens_seen": 21243679,
1377
+ "step": 855
1378
+ },
1379
+ {
1380
+ "epoch": 19.942028985507246,
1381
+ "grad_norm": 0.3298584222793579,
1382
+ "learning_rate": 1.9436976651092144e-05,
1383
+ "loss": 0.0127,
1384
+ "num_input_tokens_seen": 21364202,
1385
+ "step": 860
1386
+ },
1387
+ {
1388
+ "epoch": 20.057971014492754,
1389
+ "grad_norm": 0.2818591296672821,
1390
+ "learning_rate": 1.9182896698440584e-05,
1391
+ "loss": 0.0059,
1392
+ "num_input_tokens_seen": 21496089,
1393
+ "step": 865
1394
+ },
1395
+ {
1396
+ "epoch": 20.17391304347826,
1397
+ "grad_norm": 0.6906440258026123,
1398
+ "learning_rate": 1.89294504251873e-05,
1399
+ "loss": 0.0046,
1400
+ "num_input_tokens_seen": 21603193,
1401
+ "step": 870
1402
+ },
1403
+ {
1404
+ "epoch": 20.28985507246377,
1405
+ "grad_norm": 0.33482542634010315,
1406
+ "learning_rate": 1.867666544020798e-05,
1407
+ "loss": 0.0058,
1408
+ "num_input_tokens_seen": 21742062,
1409
+ "step": 875
1410
+ },
1411
+ {
1412
+ "epoch": 20.405797101449274,
1413
+ "grad_norm": 2.443847417831421,
1414
+ "learning_rate": 1.8424569280341653e-05,
1415
+ "loss": 0.0082,
1416
+ "num_input_tokens_seen": 21869307,
1417
+ "step": 880
1418
+ },
1419
+ {
1420
+ "epoch": 20.52173913043478,
1421
+ "grad_norm": 0.43886587023735046,
1422
+ "learning_rate": 1.817318940739098e-05,
1423
+ "loss": 0.0148,
1424
+ "num_input_tokens_seen": 21992573,
1425
+ "step": 885
1426
+ },
1427
+ {
1428
+ "epoch": 20.63768115942029,
1429
+ "grad_norm": 0.93570876121521,
1430
+ "learning_rate": 1.7922553205130707e-05,
1431
+ "loss": 0.0064,
1432
+ "num_input_tokens_seen": 22101845,
1433
+ "step": 890
1434
+ },
1435
+ {
1436
+ "epoch": 20.753623188405797,
1437
+ "grad_norm": 1176.9595947265625,
1438
+ "learning_rate": 1.767268797632472e-05,
1439
+ "loss": 0.008,
1440
+ "num_input_tokens_seen": 22230253,
1441
+ "step": 895
1442
+ },
1443
+ {
1444
+ "epoch": 20.869565217391305,
1445
+ "grad_norm": 0.35642215609550476,
1446
+ "learning_rate": 1.7423620939751788e-05,
1447
+ "loss": 0.0053,
1448
+ "num_input_tokens_seen": 22373454,
1449
+ "step": 900
1450
+ },
1451
+ {
1452
+ "epoch": 20.985507246376812,
1453
+ "grad_norm": 0.39736178517341614,
1454
+ "learning_rate": 1.7175379227240523e-05,
1455
+ "loss": 0.0054,
1456
+ "num_input_tokens_seen": 22493123,
1457
+ "step": 905
1458
+ },
1459
+ {
1460
+ "epoch": 21.10144927536232,
1461
+ "grad_norm": 0.5092463493347168,
1462
+ "learning_rate": 1.692798988071385e-05,
1463
+ "loss": 0.0044,
1464
+ "num_input_tokens_seen": 22629005,
1465
+ "step": 910
1466
+ },
1467
+ {
1468
+ "epoch": 21.217391304347824,
1469
+ "grad_norm": 0.26361697912216187,
1470
+ "learning_rate": 1.6681479849243153e-05,
1471
+ "loss": 0.0043,
1472
+ "num_input_tokens_seen": 22752358,
1473
+ "step": 915
1474
+ },
1475
+ {
1476
+ "epoch": 21.333333333333332,
1477
+ "grad_norm": 0.19933666288852692,
1478
+ "learning_rate": 1.6435875986112685e-05,
1479
+ "loss": 0.0035,
1480
+ "num_input_tokens_seen": 22880349,
1481
+ "step": 920
1482
+ },
1483
+ {
1484
+ "epoch": 21.44927536231884,
1485
+ "grad_norm": 0.22622954845428467,
1486
+ "learning_rate": 1.6191205045894283e-05,
1487
+ "loss": 0.0044,
1488
+ "num_input_tokens_seen": 22987343,
1489
+ "step": 925
1490
+ },
1491
+ {
1492
+ "epoch": 21.565217391304348,
1493
+ "grad_norm": 0.30199098587036133,
1494
+ "learning_rate": 1.594749368153292e-05,
1495
+ "loss": 0.0178,
1496
+ "num_input_tokens_seen": 23113462,
1497
+ "step": 930
1498
+ },
1499
+ {
1500
+ "epoch": 21.681159420289855,
1501
+ "grad_norm": 0.9627483487129211,
1502
+ "learning_rate": 1.570476844144329e-05,
1503
+ "loss": 0.0089,
1504
+ "num_input_tokens_seen": 23221714,
1505
+ "step": 935
1506
+ },
1507
+ {
1508
+ "epoch": 21.797101449275363,
1509
+ "grad_norm": 0.27791452407836914,
1510
+ "learning_rate": 1.546305576661776e-05,
1511
+ "loss": 0.004,
1512
+ "num_input_tokens_seen": 23368857,
1513
+ "step": 940
1514
+ },
1515
+ {
1516
+ "epoch": 21.91304347826087,
1517
+ "grad_norm": 0.3269965648651123,
1518
+ "learning_rate": 1.5222381987746104e-05,
1519
+ "loss": 0.004,
1520
+ "num_input_tokens_seen": 23494483,
1521
+ "step": 945
1522
+ },
1523
+ {
1524
+ "epoch": 22.028985507246375,
1525
+ "grad_norm": 0.15966826677322388,
1526
+ "learning_rate": 1.4982773322347144e-05,
1527
+ "loss": 0.0034,
1528
+ "num_input_tokens_seen": 23605463,
1529
+ "step": 950
1530
+ },
1531
+ {
1532
+ "epoch": 22.144927536231883,
1533
+ "grad_norm": 0.3009255826473236,
1534
+ "learning_rate": 1.4744255871912823e-05,
1535
+ "loss": 0.0066,
1536
+ "num_input_tokens_seen": 23715776,
1537
+ "step": 955
1538
+ },
1539
+ {
1540
+ "epoch": 22.26086956521739,
1541
+ "grad_norm": 0.4215935170650482,
1542
+ "learning_rate": 1.4506855619064846e-05,
1543
+ "loss": 0.0034,
1544
+ "num_input_tokens_seen": 23841669,
1545
+ "step": 960
1546
+ },
1547
+ {
1548
+ "epoch": 22.3768115942029,
1549
+ "grad_norm": 0.20214155316352844,
1550
+ "learning_rate": 1.4270598424724292e-05,
1551
+ "loss": 0.0032,
1552
+ "num_input_tokens_seen": 23960567,
1553
+ "step": 965
1554
+ },
1555
+ {
1556
+ "epoch": 22.492753623188406,
1557
+ "grad_norm": 7.0683207511901855,
1558
+ "learning_rate": 1.4035510025294462e-05,
1559
+ "loss": 0.0124,
1560
+ "num_input_tokens_seen": 24074628,
1561
+ "step": 970
1562
+ },
1563
+ {
1564
+ "epoch": 22.608695652173914,
1565
+ "grad_norm": 0.20178793370723724,
1566
+ "learning_rate": 1.3801616029857378e-05,
1567
+ "loss": 0.0027,
1568
+ "num_input_tokens_seen": 24214324,
1569
+ "step": 975
1570
+ },
1571
+ {
1572
+ "epoch": 22.72463768115942,
1573
+ "grad_norm": 1.3855236768722534,
1574
+ "learning_rate": 1.3568941917384036e-05,
1575
+ "loss": 0.0037,
1576
+ "num_input_tokens_seen": 24326727,
1577
+ "step": 980
1578
+ },
1579
+ {
1580
+ "epoch": 22.840579710144926,
1581
+ "grad_norm": 0.18420317769050598,
1582
+ "learning_rate": 1.3337513033958904e-05,
1583
+ "loss": 0.0029,
1584
+ "num_input_tokens_seen": 24456961,
1585
+ "step": 985
1586
+ },
1587
+ {
1588
+ "epoch": 22.956521739130434,
1589
+ "grad_norm": 0.15907694399356842,
1590
+ "learning_rate": 1.310735459001884e-05,
1591
+ "loss": 0.0035,
1592
+ "num_input_tokens_seen": 24606652,
1593
+ "step": 990
1594
+ },
1595
+ {
1596
+ "epoch": 23.07246376811594,
1597
+ "grad_norm": 0.2548115849494934,
1598
+ "learning_rate": 1.2878491657606872e-05,
1599
+ "loss": 0.002,
1600
+ "num_input_tokens_seen": 24710410,
1601
+ "step": 995
1602
+ },
1603
+ {
1604
+ "epoch": 23.18840579710145,
1605
+ "grad_norm": 0.36587971448898315,
1606
+ "learning_rate": 1.2650949167640993e-05,
1607
+ "loss": 0.0023,
1608
+ "num_input_tokens_seen": 24831908,
1609
+ "step": 1000
1610
+ },
1611
+ {
1612
+ "epoch": 23.304347826086957,
1613
+ "grad_norm": 0.13662408292293549,
1614
+ "learning_rate": 1.2424751907198312e-05,
1615
+ "loss": 0.0031,
1616
+ "num_input_tokens_seen": 24951342,
1617
+ "step": 1005
1618
+ },
1619
+ {
1620
+ "epoch": 23.420289855072465,
1621
+ "grad_norm": 0.19979843497276306,
1622
+ "learning_rate": 1.2199924516814939e-05,
1623
+ "loss": 0.0027,
1624
+ "num_input_tokens_seen": 25088309,
1625
+ "step": 1010
1626
+ },
1627
+ {
1628
+ "epoch": 23.536231884057973,
1629
+ "grad_norm": 0.14170995354652405,
1630
+ "learning_rate": 1.1976491487801748e-05,
1631
+ "loss": 0.0124,
1632
+ "num_input_tokens_seen": 25216080,
1633
+ "step": 1015
1634
+ },
1635
+ {
1636
+ "epoch": 23.652173913043477,
1637
+ "grad_norm": 0.06863216310739517,
1638
+ "learning_rate": 1.1754477159576499e-05,
1639
+ "loss": 0.0023,
1640
+ "num_input_tokens_seen": 25326581,
1641
+ "step": 1020
1642
+ },
1643
+ {
1644
+ "epoch": 23.768115942028984,
1645
+ "grad_norm": 0.25133436918258667,
1646
+ "learning_rate": 1.1533905717012428e-05,
1647
+ "loss": 0.0027,
1648
+ "num_input_tokens_seen": 25477500,
1649
+ "step": 1025
1650
+ },
1651
+ {
1652
+ "epoch": 23.884057971014492,
1653
+ "grad_norm": 0.28348398208618164,
1654
+ "learning_rate": 1.1314801187803686e-05,
1655
+ "loss": 0.0041,
1656
+ "num_input_tokens_seen": 25601354,
1657
+ "step": 1030
1658
+ },
1659
+ {
1660
+ "epoch": 24.0,
1661
+ "grad_norm": 0.5024954676628113,
1662
+ "learning_rate": 1.1097187439847939e-05,
1663
+ "loss": 0.0021,
1664
+ "num_input_tokens_seen": 25719240,
1665
+ "step": 1035
1666
+ },
1667
+ {
1668
+ "epoch": 24.115942028985508,
1669
+ "grad_norm": 0.1774568408727646,
1670
+ "learning_rate": 1.088108817864629e-05,
1671
+ "loss": 0.0039,
1672
+ "num_input_tokens_seen": 25834910,
1673
+ "step": 1040
1674
+ },
1675
+ {
1676
+ "epoch": 24.231884057971016,
1677
+ "grad_norm": 0.08105342090129852,
1678
+ "learning_rate": 1.0666526944721016e-05,
1679
+ "loss": 0.0025,
1680
+ "num_input_tokens_seen": 25974530,
1681
+ "step": 1045
1682
+ },
1683
+ {
1684
+ "epoch": 24.347826086956523,
1685
+ "grad_norm": 0.13048779964447021,
1686
+ "learning_rate": 1.0453527111051184e-05,
1687
+ "loss": 0.002,
1688
+ "num_input_tokens_seen": 26104464,
1689
+ "step": 1050
1690
+ },
1691
+ {
1692
+ "epoch": 24.463768115942027,
1693
+ "grad_norm": 0.10774020105600357,
1694
+ "learning_rate": 1.0242111880526495e-05,
1695
+ "loss": 0.0024,
1696
+ "num_input_tokens_seen": 26251334,
1697
+ "step": 1055
1698
+ },
1699
+ {
1700
+ "epoch": 24.579710144927535,
1701
+ "grad_norm": 0.7494776248931885,
1702
+ "learning_rate": 1.003230428341979e-05,
1703
+ "loss": 0.0031,
1704
+ "num_input_tokens_seen": 26366561,
1705
+ "step": 1060
1706
+ },
1707
+ {
1708
+ "epoch": 24.695652173913043,
1709
+ "grad_norm": 0.3580308258533478,
1710
+ "learning_rate": 9.824127174878195e-06,
1711
+ "loss": 0.0022,
1712
+ "num_input_tokens_seen": 26486437,
1713
+ "step": 1065
1714
+ },
1715
+ {
1716
+ "epoch": 24.81159420289855,
1717
+ "grad_norm": 0.1473228931427002,
1718
+ "learning_rate": 9.617603232433475e-06,
1719
+ "loss": 0.0022,
1720
+ "num_input_tokens_seen": 26601526,
1721
+ "step": 1070
1722
+ },
1723
+ {
1724
+ "epoch": 24.92753623188406,
1725
+ "grad_norm": 0.11716706305742264,
1726
+ "learning_rate": 9.412754953531663e-06,
1727
+ "loss": 0.0109,
1728
+ "num_input_tokens_seen": 26727922,
1729
+ "step": 1075
1730
+ },
1731
+ {
1732
+ "epoch": 25.043478260869566,
1733
+ "grad_norm": 0.12043190747499466,
1734
+ "learning_rate": 9.209604653082326e-06,
1735
+ "loss": 0.0019,
1736
+ "num_input_tokens_seen": 26835621,
1737
+ "step": 1080
1738
+ },
1739
+ {
1740
+ "epoch": 25.159420289855074,
1741
+ "grad_norm": 0.1277165412902832,
1742
+ "learning_rate": 9.008174461027724e-06,
1743
+ "loss": 0.0016,
1744
+ "num_input_tokens_seen": 26955101,
1745
+ "step": 1085
1746
+ },
1747
+ {
1748
+ "epoch": 25.27536231884058,
1749
+ "grad_norm": 0.08892516791820526,
1750
+ "learning_rate": 8.808486319932083e-06,
1751
+ "loss": 0.002,
1752
+ "num_input_tokens_seen": 27077833,
1753
+ "step": 1090
1754
+ },
1755
+ {
1756
+ "epoch": 25.391304347826086,
1757
+ "grad_norm": 0.30754807591438293,
1758
+ "learning_rate": 8.610561982591357e-06,
1759
+ "loss": 0.0018,
1760
+ "num_input_tokens_seen": 27192758,
1761
+ "step": 1095
1762
+ },
1763
+ {
1764
+ "epoch": 25.507246376811594,
1765
+ "grad_norm": 0.7194050550460815,
1766
+ "learning_rate": 8.414423009663563e-06,
1767
+ "loss": 0.0028,
1768
+ "num_input_tokens_seen": 27324970,
1769
+ "step": 1100
1770
+ },
1771
+ {
1772
+ "epoch": 25.6231884057971,
1773
+ "grad_norm": 4777.61328125,
1774
+ "learning_rate": 8.220090767320137e-06,
1775
+ "loss": 0.0021,
1776
+ "num_input_tokens_seen": 27477531,
1777
+ "step": 1105
1778
+ },
1779
+ {
1780
+ "epoch": 25.73913043478261,
1781
+ "grad_norm": 2.280327081680298,
1782
+ "learning_rate": 8.027586424918412e-06,
1783
+ "loss": 0.0057,
1784
+ "num_input_tokens_seen": 27592035,
1785
+ "step": 1110
1786
+ },
1787
+ {
1788
+ "epoch": 25.855072463768117,
1789
+ "grad_norm": 0.13882993161678314,
1790
+ "learning_rate": 7.836930952695533e-06,
1791
+ "loss": 0.0067,
1792
+ "num_input_tokens_seen": 27712377,
1793
+ "step": 1115
1794
+ },
1795
+ {
1796
+ "epoch": 25.971014492753625,
1797
+ "grad_norm": 0.20987676084041595,
1798
+ "learning_rate": 7.648145119484153e-06,
1799
+ "loss": 0.002,
1800
+ "num_input_tokens_seen": 27834613,
1801
+ "step": 1120
1802
+ },
1803
+ {
1804
+ "epoch": 26.08695652173913,
1805
+ "grad_norm": 0.09795770049095154,
1806
+ "learning_rate": 7.461249490449954e-06,
1807
+ "loss": 0.0021,
1808
+ "num_input_tokens_seen": 27966996,
1809
+ "step": 1125
1810
+ },
1811
+ {
1812
+ "epoch": 26.202898550724637,
1813
+ "grad_norm": 0.14506971836090088,
1814
+ "learning_rate": 7.276264424851423e-06,
1815
+ "loss": 0.002,
1816
+ "num_input_tokens_seen": 28093538,
1817
+ "step": 1130
1818
+ },
1819
+ {
1820
+ "epoch": 26.318840579710145,
1821
+ "grad_norm": 0.08091314136981964,
1822
+ "learning_rate": 7.0932100738220265e-06,
1823
+ "loss": 0.0017,
1824
+ "num_input_tokens_seen": 28215579,
1825
+ "step": 1135
1826
+ },
1827
+ {
1828
+ "epoch": 26.434782608695652,
1829
+ "grad_norm": 0.22550061345100403,
1830
+ "learning_rate": 6.912106378175098e-06,
1831
+ "loss": 0.0014,
1832
+ "num_input_tokens_seen": 28344144,
1833
+ "step": 1140
1834
+ },
1835
+ {
1836
+ "epoch": 26.55072463768116,
1837
+ "grad_norm": 0.23987355828285217,
1838
+ "learning_rate": 6.732973066231563e-06,
1839
+ "loss": 0.0022,
1840
+ "num_input_tokens_seen": 28478650,
1841
+ "step": 1145
1842
+ },
1843
+ {
1844
+ "epoch": 26.666666666666668,
1845
+ "grad_norm": 0.1993756741285324,
1846
+ "learning_rate": 6.555829651670911e-06,
1847
+ "loss": 0.0023,
1848
+ "num_input_tokens_seen": 28593004,
1849
+ "step": 1150
1850
+ },
1851
+ {
1852
+ "epoch": 26.782608695652176,
1853
+ "grad_norm": 0.7184757590293884,
1854
+ "learning_rate": 6.380695431405456e-06,
1855
+ "loss": 0.0028,
1856
+ "num_input_tokens_seen": 28707392,
1857
+ "step": 1155
1858
+ },
1859
+ {
1860
+ "epoch": 26.89855072463768,
1861
+ "grad_norm": 0.06247011199593544,
1862
+ "learning_rate": 6.207589483478266e-06,
1863
+ "loss": 0.006,
1864
+ "num_input_tokens_seen": 28834902,
1865
+ "step": 1160
1866
+ },
1867
+ {
1868
+ "epoch": 27.014492753623188,
1869
+ "grad_norm": 0.11046591401100159,
1870
+ "learning_rate": 6.0365306649849214e-06,
1871
+ "loss": 0.0045,
1872
+ "num_input_tokens_seen": 28948812,
1873
+ "step": 1165
1874
+ },
1875
+ {
1876
+ "epoch": 27.130434782608695,
1877
+ "grad_norm": 0.12309098988771439,
1878
+ "learning_rate": 5.867537610019317e-06,
1879
+ "loss": 0.0019,
1880
+ "num_input_tokens_seen": 29078309,
1881
+ "step": 1170
1882
+ },
1883
+ {
1884
+ "epoch": 27.246376811594203,
1885
+ "grad_norm": 0.11428932845592499,
1886
+ "learning_rate": 5.700628727643806e-06,
1887
+ "loss": 0.002,
1888
+ "num_input_tokens_seen": 29211503,
1889
+ "step": 1175
1890
+ },
1891
+ {
1892
+ "epoch": 27.36231884057971,
1893
+ "grad_norm": 0.1093268170952797,
1894
+ "learning_rate": 5.53582219988382e-06,
1895
+ "loss": 0.0019,
1896
+ "num_input_tokens_seen": 29344489,
1897
+ "step": 1180
1898
+ },
1899
+ {
1900
+ "epoch": 27.47826086956522,
1901
+ "grad_norm": 0.2166384607553482,
1902
+ "learning_rate": 5.373135979747227e-06,
1903
+ "loss": 0.006,
1904
+ "num_input_tokens_seen": 29464082,
1905
+ "step": 1185
1906
+ },
1907
+ {
1908
+ "epoch": 27.594202898550726,
1909
+ "grad_norm": 0.15387850999832153,
1910
+ "learning_rate": 5.2125877892686496e-06,
1911
+ "loss": 0.0043,
1912
+ "num_input_tokens_seen": 29581124,
1913
+ "step": 1190
1914
+ },
1915
+ {
1916
+ "epoch": 27.71014492753623,
1917
+ "grad_norm": 0.11962082982063293,
1918
+ "learning_rate": 5.054195117578914e-06,
1919
+ "loss": 0.0019,
1920
+ "num_input_tokens_seen": 29696346,
1921
+ "step": 1195
1922
+ },
1923
+ {
1924
+ "epoch": 27.82608695652174,
1925
+ "grad_norm": 0.18724732100963593,
1926
+ "learning_rate": 4.897975218999926e-06,
1927
+ "loss": 0.002,
1928
+ "num_input_tokens_seen": 29815117,
1929
+ "step": 1200
1930
+ },
1931
+ {
1932
+ "epoch": 27.942028985507246,
1933
+ "grad_norm": 0.09917350113391876,
1934
+ "learning_rate": 4.743945111165068e-06,
1935
+ "loss": 0.0022,
1936
+ "num_input_tokens_seen": 29939175,
1937
+ "step": 1205
1938
+ },
1939
+ {
1940
+ "epoch": 28.057971014492754,
1941
+ "grad_norm": 0.08235369622707367,
1942
+ "learning_rate": 4.592121573165414e-06,
1943
+ "loss": 0.0016,
1944
+ "num_input_tokens_seen": 30079840,
1945
+ "step": 1210
1946
+ },
1947
+ {
1948
+ "epoch": 28.17391304347826,
1949
+ "grad_norm": 0.20488996803760529,
1950
+ "learning_rate": 4.442521143721892e-06,
1951
+ "loss": 0.0033,
1952
+ "num_input_tokens_seen": 30192219,
1953
+ "step": 1215
1954
+ },
1955
+ {
1956
+ "epoch": 28.28985507246377,
1957
+ "grad_norm": 0.05383768677711487,
1958
+ "learning_rate": 4.295160119383712e-06,
1959
+ "loss": 0.0018,
1960
+ "num_input_tokens_seen": 30330969,
1961
+ "step": 1220
1962
+ },
1963
+ {
1964
+ "epoch": 28.405797101449274,
1965
+ "grad_norm": 0.14237363636493683,
1966
+ "learning_rate": 4.150054552753055e-06,
1967
+ "loss": 0.0018,
1968
+ "num_input_tokens_seen": 30453302,
1969
+ "step": 1225
1970
+ },
1971
+ {
1972
+ "epoch": 28.52173913043478,
1973
+ "grad_norm": 0.12487669289112091,
1974
+ "learning_rate": 4.007220250736454e-06,
1975
+ "loss": 0.0078,
1976
+ "num_input_tokens_seen": 30568943,
1977
+ "step": 1230
1978
+ },
1979
+ {
1980
+ "epoch": 28.63768115942029,
1981
+ "grad_norm": 0.1423855572938919,
1982
+ "learning_rate": 3.866672772822863e-06,
1983
+ "loss": 0.0019,
1984
+ "num_input_tokens_seen": 30696057,
1985
+ "step": 1235
1986
+ },
1987
+ {
1988
+ "epoch": 28.753623188405797,
1989
+ "grad_norm": 0.1543101817369461,
1990
+ "learning_rate": 3.7284274293887115e-06,
1991
+ "loss": 0.0019,
1992
+ "num_input_tokens_seen": 30815506,
1993
+ "step": 1240
1994
+ },
1995
+ {
1996
+ "epoch": 28.869565217391305,
1997
+ "grad_norm": 0.1402539610862732,
1998
+ "learning_rate": 3.592499280030057e-06,
1999
+ "loss": 0.0027,
2000
+ "num_input_tokens_seen": 30916446,
2001
+ "step": 1245
2002
+ },
2003
+ {
2004
+ "epoch": 28.985507246376812,
2005
+ "grad_norm": 0.26191645860671997,
2006
+ "learning_rate": 3.458903131922134e-06,
2007
+ "loss": 0.0023,
2008
+ "num_input_tokens_seen": 31054242,
2009
+ "step": 1250
2010
+ },
2011
+ {
2012
+ "epoch": 29.10144927536232,
2013
+ "grad_norm": 0.09874732792377472,
2014
+ "learning_rate": 3.3276535382063213e-06,
2015
+ "loss": 0.0029,
2016
+ "num_input_tokens_seen": 31189078,
2017
+ "step": 1255
2018
+ },
2019
+ {
2020
+ "epoch": 29.217391304347824,
2021
+ "grad_norm": 0.11677820980548859,
2022
+ "learning_rate": 3.198764796404807e-06,
2023
+ "loss": 0.0018,
2024
+ "num_input_tokens_seen": 31311374,
2025
+ "step": 1260
2026
+ },
2027
+ {
2028
+ "epoch": 29.333333333333332,
2029
+ "grad_norm": 0.05459802597761154,
2030
+ "learning_rate": 3.0722509468631392e-06,
2031
+ "loss": 0.0018,
2032
+ "num_input_tokens_seen": 31444681,
2033
+ "step": 1265
2034
+ },
2035
+ {
2036
+ "epoch": 29.44927536231884,
2037
+ "grad_norm": 0.1113714948296547,
2038
+ "learning_rate": 2.948125771220697e-06,
2039
+ "loss": 0.0018,
2040
+ "num_input_tokens_seen": 31567569,
2041
+ "step": 1270
2042
+ },
2043
+ {
2044
+ "epoch": 29.565217391304348,
2045
+ "grad_norm": 0.1816156655550003,
2046
+ "learning_rate": 2.8264027909094715e-06,
2047
+ "loss": 0.0019,
2048
+ "num_input_tokens_seen": 31697338,
2049
+ "step": 1275
2050
+ },
2051
+ {
2052
+ "epoch": 29.681159420289855,
2053
+ "grad_norm": 0.13639949262142181,
2054
+ "learning_rate": 2.707095265681081e-06,
2055
+ "loss": 0.0018,
2056
+ "num_input_tokens_seen": 31826661,
2057
+ "step": 1280
2058
+ },
2059
+ {
2060
+ "epoch": 29.797101449275363,
2061
+ "grad_norm": 0.05292365327477455,
2062
+ "learning_rate": 2.5902161921623454e-06,
2063
+ "loss": 0.0023,
2064
+ "num_input_tokens_seen": 31944680,
2065
+ "step": 1285
2066
+ },
2067
+ {
2068
+ "epoch": 29.91304347826087,
2069
+ "grad_norm": 0.16608740389347076,
2070
+ "learning_rate": 2.475778302439524e-06,
2071
+ "loss": 0.0078,
2072
+ "num_input_tokens_seen": 32067106,
2073
+ "step": 1290
2074
+ },
2075
+ {
2076
+ "epoch": 30.028985507246375,
2077
+ "grad_norm": 0.09277443587779999,
2078
+ "learning_rate": 2.3637940626713346e-06,
2079
+ "loss": 0.0018,
2080
+ "num_input_tokens_seen": 32184526,
2081
+ "step": 1295
2082
+ },
2083
+ {
2084
+ "epoch": 30.144927536231883,
2085
+ "grad_norm": 0.18832191824913025,
2086
+ "learning_rate": 2.254275671731007e-06,
2087
+ "loss": 0.0017,
2088
+ "num_input_tokens_seen": 32309423,
2089
+ "step": 1300
2090
+ },
2091
+ {
2092
+ "epoch": 30.26086956521739,
2093
+ "grad_norm": 0.1828456073999405,
2094
+ "learning_rate": 2.14723505987737e-06,
2095
+ "loss": 0.0071,
2096
+ "num_input_tokens_seen": 32429445,
2097
+ "step": 1305
2098
+ },
2099
+ {
2100
+ "epoch": 30.3768115942029,
2101
+ "grad_norm": 0.07503814995288849,
2102
+ "learning_rate": 2.0426838874552714e-06,
2103
+ "loss": 0.0016,
2104
+ "num_input_tokens_seen": 32540571,
2105
+ "step": 1310
2106
+ },
2107
+ {
2108
+ "epoch": 30.492753623188406,
2109
+ "grad_norm": 0.19047732651233673,
2110
+ "learning_rate": 1.9406335436253724e-06,
2111
+ "loss": 0.0018,
2112
+ "num_input_tokens_seen": 32665528,
2113
+ "step": 1315
2114
+ },
2115
+ {
2116
+ "epoch": 30.608695652173914,
2117
+ "grad_norm": 0.17791509628295898,
2118
+ "learning_rate": 1.8410951451234533e-06,
2119
+ "loss": 0.0017,
2120
+ "num_input_tokens_seen": 32800773,
2121
+ "step": 1320
2122
+ },
2123
+ {
2124
+ "epoch": 30.72463768115942,
2125
+ "grad_norm": 0.10698456317186356,
2126
+ "learning_rate": 1.7440795350494588e-06,
2127
+ "loss": 0.0017,
2128
+ "num_input_tokens_seen": 32928397,
2129
+ "step": 1325
2130
+ },
2131
+ {
2132
+ "epoch": 30.840579710144926,
2133
+ "grad_norm": 0.0963551327586174,
2134
+ "learning_rate": 1.649597281686302e-06,
2135
+ "loss": 0.0019,
2136
+ "num_input_tokens_seen": 33054819,
2137
+ "step": 1330
2138
+ },
2139
+ {
2140
+ "epoch": 30.956521739130434,
2141
+ "grad_norm": 0.24703514575958252,
2142
+ "learning_rate": 1.5576586773486195e-06,
2143
+ "loss": 0.0018,
2144
+ "num_input_tokens_seen": 33180616,
2145
+ "step": 1335
2146
+ },
2147
+ {
2148
+ "epoch": 31.07246376811594,
2149
+ "grad_norm": 0.12497910857200623,
2150
+ "learning_rate": 1.4682737372615967e-06,
2151
+ "loss": 0.0038,
2152
+ "num_input_tokens_seen": 33298041,
2153
+ "step": 1340
2154
+ },
2155
+ {
2156
+ "epoch": 31.18840579710145,
2157
+ "grad_norm": 0.18260960280895233,
2158
+ "learning_rate": 1.3814521984699596e-06,
2159
+ "loss": 0.0052,
2160
+ "num_input_tokens_seen": 33408343,
2161
+ "step": 1345
2162
+ },
2163
+ {
2164
+ "epoch": 31.304347826086957,
2165
+ "grad_norm": 0.13422255218029022,
2166
+ "learning_rate": 1.297203518777293e-06,
2167
+ "loss": 0.0018,
2168
+ "num_input_tokens_seen": 33545364,
2169
+ "step": 1350
2170
+ },
2171
+ {
2172
+ "epoch": 31.420289855072465,
2173
+ "grad_norm": 0.1285027116537094,
2174
+ "learning_rate": 1.2155368757157643e-06,
2175
+ "loss": 0.0019,
2176
+ "num_input_tokens_seen": 33652900,
2177
+ "step": 1355
2178
+ },
2179
+ {
2180
+ "epoch": 31.536231884057973,
2181
+ "grad_norm": 0.12832242250442505,
2182
+ "learning_rate": 1.1364611655463736e-06,
2183
+ "loss": 0.0019,
2184
+ "num_input_tokens_seen": 33768791,
2185
+ "step": 1360
2186
+ },
2187
+ {
2188
+ "epoch": 31.652173913043477,
2189
+ "grad_norm": 0.12093157321214676,
2190
+ "learning_rate": 1.0599850022898539e-06,
2191
+ "loss": 0.0017,
2192
+ "num_input_tokens_seen": 33892837,
2193
+ "step": 1365
2194
+ },
2195
+ {
2196
+ "epoch": 31.768115942028984,
2197
+ "grad_norm": 0.7227018475532532,
2198
+ "learning_rate": 9.861167167883046e-07,
2199
+ "loss": 0.0022,
2200
+ "num_input_tokens_seen": 34015288,
2201
+ "step": 1370
2202
+ },
2203
+ {
2204
+ "epoch": 31.884057971014492,
2205
+ "grad_norm": 2.143653631210327,
2206
+ "learning_rate": 9.148643557976955e-07,
2207
+ "loss": 0.0037,
2208
+ "num_input_tokens_seen": 34154884,
2209
+ "step": 1375
2210
+ },
2211
+ {
2212
+ "epoch": 32.0,
2213
+ "grad_norm": 0.17518474161624908,
2214
+ "learning_rate": 8.462356811112987e-07,
2215
+ "loss": 0.0019,
2216
+ "num_input_tokens_seen": 34292320,
2217
+ "step": 1380
2218
+ },
2219
+ {
2220
+ "epoch": 32.11594202898551,
2221
+ "grad_norm": 0.1274159997701645,
2222
+ "learning_rate": 7.802381687141535e-07,
2223
+ "loss": 0.0017,
2224
+ "num_input_tokens_seen": 34413850,
2225
+ "step": 1385
2226
+ },
2227
+ {
2228
+ "epoch": 32.231884057971016,
2229
+ "grad_norm": 0.11443401873111725,
2230
+ "learning_rate": 7.168790079686932e-07,
2231
+ "loss": 0.0018,
2232
+ "num_input_tokens_seen": 34547127,
2233
+ "step": 1390
2234
+ },
2235
+ {
2236
+ "epoch": 32.34782608695652,
2237
+ "grad_norm": 0.08239752799272537,
2238
+ "learning_rate": 6.561651008315738e-07,
2239
+ "loss": 0.0035,
2240
+ "num_input_tokens_seen": 34685112,
2241
+ "step": 1395
2242
+ },
2243
+ {
2244
+ "epoch": 32.46376811594203,
2245
+ "grad_norm": 0.7361220717430115,
2246
+ "learning_rate": 5.981030611018234e-07,
2247
+ "loss": 0.0063,
2248
+ "num_input_tokens_seen": 34810484,
2249
+ "step": 1400
2250
+ },
2251
+ {
2252
+ "epoch": 32.57971014492754,
2253
+ "grad_norm": 0.20323431491851807,
2254
+ "learning_rate": 5.426992137003622e-07,
2255
+ "loss": 0.0018,
2256
+ "num_input_tokens_seen": 34920531,
2257
+ "step": 1405
2258
+ },
2259
+ {
2260
+ "epoch": 32.69565217391305,
2261
+ "grad_norm": 0.11165229231119156,
2262
+ "learning_rate": 4.899595939810236e-07,
2263
+ "loss": 0.002,
2264
+ "num_input_tokens_seen": 35035657,
2265
+ "step": 1410
2266
+ },
2267
+ {
2268
+ "epoch": 32.81159420289855,
2269
+ "grad_norm": 0.15023387968540192,
2270
+ "learning_rate": 4.398899470730827e-07,
2271
+ "loss": 0.0017,
2272
+ "num_input_tokens_seen": 35167466,
2273
+ "step": 1415
2274
+ },
2275
+ {
2276
+ "epoch": 32.927536231884055,
2277
+ "grad_norm": 0.18479810655117035,
2278
+ "learning_rate": 3.9249572725543196e-07,
2279
+ "loss": 0.0016,
2280
+ "num_input_tokens_seen": 35296818,
2281
+ "step": 1420
2282
+ },
2283
+ {
2284
+ "epoch": 33.04347826086956,
2285
+ "grad_norm": 0.08527754247188568,
2286
+ "learning_rate": 3.477820973624063e-07,
2287
+ "loss": 0.0015,
2288
+ "num_input_tokens_seen": 35430399,
2289
+ "step": 1425
2290
+ },
2291
+ {
2292
+ "epoch": 33.15942028985507,
2293
+ "grad_norm": 0.16888481378555298,
2294
+ "learning_rate": 3.0575392822139726e-07,
2295
+ "loss": 0.0057,
2296
+ "num_input_tokens_seen": 35551540,
2297
+ "step": 1430
2298
+ },
2299
+ {
2300
+ "epoch": 33.27536231884058,
2301
+ "grad_norm": 0.18187086284160614,
2302
+ "learning_rate": 2.664157981222437e-07,
2303
+ "loss": 0.0016,
2304
+ "num_input_tokens_seen": 35676077,
2305
+ "step": 1435
2306
+ },
2307
+ {
2308
+ "epoch": 33.391304347826086,
2309
+ "grad_norm": 0.15047162771224976,
2310
+ "learning_rate": 2.297719923185032e-07,
2311
+ "loss": 0.0016,
2312
+ "num_input_tokens_seen": 35785127,
2313
+ "step": 1440
2314
+ },
2315
+ {
2316
+ "epoch": 33.507246376811594,
2317
+ "grad_norm": 0.12288761883974075,
2318
+ "learning_rate": 1.9582650256064205e-07,
2319
+ "loss": 0.0019,
2320
+ "num_input_tokens_seen": 35911682,
2321
+ "step": 1445
2322
+ },
2323
+ {
2324
+ "epoch": 33.6231884057971,
2325
+ "grad_norm": 0.22509098052978516,
2326
+ "learning_rate": 1.645830266611914e-07,
2327
+ "loss": 0.0017,
2328
+ "num_input_tokens_seen": 36030754,
2329
+ "step": 1450
2330
+ },
2331
+ {
2332
+ "epoch": 33.73913043478261,
2333
+ "grad_norm": 2.0408618450164795,
2334
+ "learning_rate": 1.3604496809195288e-07,
2335
+ "loss": 0.0042,
2336
+ "num_input_tokens_seen": 36146749,
2337
+ "step": 1455
2338
+ },
2339
+ {
2340
+ "epoch": 33.85507246376812,
2341
+ "grad_norm": 0.10705255717039108,
2342
+ "learning_rate": 1.1021543561322012e-07,
2343
+ "loss": 0.0017,
2344
+ "num_input_tokens_seen": 36278454,
2345
+ "step": 1460
2346
+ },
2347
+ {
2348
+ "epoch": 33.971014492753625,
2349
+ "grad_norm": 1876.0384521484375,
2350
+ "learning_rate": 8.709724293513854e-08,
2351
+ "loss": 0.0017,
2352
+ "num_input_tokens_seen": 36408834,
2353
+ "step": 1465
2354
+ },
2355
+ {
2356
+ "epoch": 34.08695652173913,
2357
+ "grad_norm": 0.1927630454301834,
2358
+ "learning_rate": 6.66929084112089e-08,
2359
+ "loss": 0.0015,
2360
+ "num_input_tokens_seen": 36550538,
2361
+ "step": 1470
2362
+ },
2363
+ {
2364
+ "epoch": 34.20289855072464,
2365
+ "grad_norm": 0.1668202131986618,
2366
+ "learning_rate": 4.900465476393168e-08,
2367
+ "loss": 0.0018,
2368
+ "num_input_tokens_seen": 36647436,
2369
+ "step": 1475
2370
+ },
2371
+ {
2372
+ "epoch": 34.31884057971015,
2373
+ "grad_norm": 0.7123565673828125,
2374
+ "learning_rate": 3.403440884269526e-08,
2375
+ "loss": 0.0024,
2376
+ "num_input_tokens_seen": 36785387,
2377
+ "step": 1480
2378
+ },
2379
+ {
2380
+ "epoch": 34.43478260869565,
2381
+ "grad_norm": 0.16973845660686493,
2382
+ "learning_rate": 2.1783801413866046e-08,
2383
+ "loss": 0.0021,
2384
+ "num_input_tokens_seen": 36915606,
2385
+ "step": 1485
2386
+ },
2387
+ {
2388
+ "epoch": 34.55072463768116,
2389
+ "grad_norm": 2.034724473953247,
2390
+ "learning_rate": 1.2254166983152737e-08,
2391
+ "loss": 0.0035,
2392
+ "num_input_tokens_seen": 37036117,
2393
+ "step": 1490
2394
+ },
2395
+ {
2396
+ "epoch": 34.666666666666664,
2397
+ "grad_norm": 0.155415877699852,
2398
+ "learning_rate": 5.446543650219904e-09,
2399
+ "loss": 0.0016,
2400
+ "num_input_tokens_seen": 37165587,
2401
+ "step": 1495
2402
+ },
2403
+ {
2404
+ "epoch": 34.78260869565217,
2405
+ "grad_norm": 0.10199662297964096,
2406
+ "learning_rate": 1.3616729956228425e-09,
2407
+ "loss": 0.0015,
2408
+ "num_input_tokens_seen": 37290827,
2409
+ "step": 1500
2410
+ },
2411
+ {
2412
+ "epoch": 34.89855072463768,
2413
+ "grad_norm": 0.14740267395973206,
2414
+ "learning_rate": 0.0,
2415
+ "loss": 0.0053,
2416
+ "num_input_tokens_seen": 37412688,
2417
+ "step": 1505
2418
+ },
2419
+ {
2420
+ "epoch": 34.89855072463768,
2421
+ "num_input_tokens_seen": 37412688,
2422
+ "step": 1505,
2423
+ "total_flos": 8.033958240027034e+16,
2424
+ "train_loss": 0.3889684765070578,
2425
+ "train_runtime": 37510.9602,
2426
+ "train_samples_per_second": 0.322,
2427
+ "train_steps_per_second": 0.04
2428
+ }
2429
+ ],
2430
+ "logging_steps": 5,
2431
+ "max_steps": 1505,
2432
+ "num_input_tokens_seen": 37412688,
2433
+ "num_train_epochs": 35,
2434
+ "save_steps": 100,
2435
+ "stateful_callbacks": {
2436
+ "TrainerControl": {
2437
+ "args": {
2438
+ "should_epoch_stop": false,
2439
+ "should_evaluate": false,
2440
+ "should_log": false,
2441
+ "should_save": true,
2442
+ "should_training_stop": false
2443
+ },
2444
+ "attributes": {}
2445
+ }
2446
+ },
2447
+ "total_flos": 8.033958240027034e+16,
2448
+ "train_batch_size": 1,
2449
+ "trial_name": null,
2450
+ "trial_params": null
2451
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d1bf7e450e4a6d37229f59b5902ed4c9f65d4b8a73fe748056cfd0dc9b1d2f6
3
+ size 5304
training_args.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cutoff_len: 6100
2
+ dataset: longzu
3
+ dataset_dir: C:\AI\LLaMA-Factory\data
4
+ ddp_timeout: 180000000
5
+ do_train: true
6
+ finetuning_type: full
7
+ flash_attn: auto
8
+ gradient_accumulation_steps: 8
9
+ include_num_input_tokens_seen: true
10
+ learning_rate: 5.0e-05
11
+ logging_steps: 5
12
+ lr_scheduler_type: cosine
13
+ max_grad_norm: 1.0
14
+ max_samples: 100000
15
+ model_name_or_path: C:\AI\Qwen2_0.5B
16
+ num_train_epochs: 35.0
17
+ optim: adamw_torch
18
+ output_dir: saves\Qwen2-0.5B\full\train_2024-06-08-23-23-14
19
+ packing: false
20
+ per_device_train_batch_size: 1
21
+ plot_loss: true
22
+ preprocessing_num_workers: 16
23
+ report_to: none
24
+ save_steps: 100
25
+ stage: pt
26
+ template: default
27
+ warmup_steps: 0
training_loss.png ADDED
vocab.json ADDED
The diff for this file is too large to render. See raw diff