Safetensors
qwen3
twilightsnow commited on
Commit
11bd08e
·
verified ·
1 Parent(s): 79e746f

update files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
args.json ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_dir": "/c23474/home/zhuminfeng/Newclid/models/sft17",
3
+ "overwrite_output_dir": false,
4
+ "do_train": false,
5
+ "do_eval": false,
6
+ "do_predict": false,
7
+ "eval_strategy": "no",
8
+ "prediction_loss_only": false,
9
+ "per_device_train_batch_size": 8,
10
+ "per_device_eval_batch_size": 1,
11
+ "per_gpu_train_batch_size": null,
12
+ "per_gpu_eval_batch_size": null,
13
+ "gradient_accumulation_steps": 1,
14
+ "eval_accumulation_steps": null,
15
+ "eval_delay": 0,
16
+ "torch_empty_cache_steps": null,
17
+ "learning_rate": 0.0001,
18
+ "weight_decay": 0.1,
19
+ "adam_beta1": 0.9,
20
+ "adam_beta2": 0.999,
21
+ "adam_epsilon": 1e-08,
22
+ "max_grad_norm": 1.0,
23
+ "num_train_epochs": 1.0,
24
+ "max_steps": -1,
25
+ "lr_scheduler_type": "cosine",
26
+ "lr_scheduler_kwargs": null,
27
+ "warmup_ratio": 0.05,
28
+ "warmup_steps": 0,
29
+ "log_level": "passive",
30
+ "log_level_replica": "warning",
31
+ "log_on_each_node": true,
32
+ "logging_dir": "/c23474/home/zhuminfeng/Newclid/models/sft17/runs",
33
+ "logging_strategy": "steps",
34
+ "logging_first_step": true,
35
+ "logging_steps": 500,
36
+ "logging_nan_inf_filter": true,
37
+ "save_strategy": "steps",
38
+ "save_steps": 10000.0,
39
+ "save_total_limit": null,
40
+ "save_safetensors": true,
41
+ "save_on_each_node": false,
42
+ "save_only_model": true,
43
+ "restore_callback_states_from_checkpoint": false,
44
+ "no_cuda": false,
45
+ "use_cpu": false,
46
+ "use_mps_device": false,
47
+ "seed": 42,
48
+ "data_seed": 42,
49
+ "jit_mode_eval": false,
50
+ "use_ipex": false,
51
+ "bf16": true,
52
+ "fp16": false,
53
+ "fp16_opt_level": "O1",
54
+ "half_precision_backend": "auto",
55
+ "bf16_full_eval": false,
56
+ "fp16_full_eval": false,
57
+ "tf32": null,
58
+ "local_rank": 0,
59
+ "ddp_backend": null,
60
+ "tpu_num_cores": null,
61
+ "tpu_metrics_debug": false,
62
+ "debug": null,
63
+ "dataloader_drop_last": false,
64
+ "eval_steps": 10000.0,
65
+ "dataloader_num_workers": 4,
66
+ "dataloader_prefetch_factor": null,
67
+ "past_index": -1,
68
+ "run_name": "/c23474/home/zhuminfeng/Newclid/models/sft17",
69
+ "disable_tqdm": null,
70
+ "remove_unused_columns": true,
71
+ "label_names": null,
72
+ "load_best_model_at_end": false,
73
+ "metric_for_best_model": "loss",
74
+ "greater_is_better": false,
75
+ "ignore_data_skip": false,
76
+ "fsdp": "",
77
+ "fsdp_min_num_params": 0,
78
+ "fsdp_config": null,
79
+ "fsdp_transformer_layer_cls_to_wrap": null,
80
+ "accelerator_config": {
81
+ "dispatch_batches": false
82
+ },
83
+ "deepspeed": {
84
+ "fp16": {
85
+ "enabled": "auto",
86
+ "loss_scale": 0,
87
+ "loss_scale_window": 1000,
88
+ "initial_scale_power": 16,
89
+ "hysteresis": 2,
90
+ "min_loss_scale": 1
91
+ },
92
+ "bf16": {
93
+ "enabled": "auto"
94
+ },
95
+ "zero_optimization": {
96
+ "stage": 1,
97
+ "offload_optimizer": {
98
+ "device": "none",
99
+ "pin_memory": true
100
+ },
101
+ "allgather_partitions": true,
102
+ "allgather_bucket_size": 200000000.0,
103
+ "overlap_comm": false,
104
+ "reduce_scatter": true,
105
+ "reduce_bucket_size": 200000000.0,
106
+ "contiguous_gradients": true
107
+ },
108
+ "gradient_accumulation_steps": "auto",
109
+ "gradient_clipping": "auto",
110
+ "steps_per_print": 2000,
111
+ "train_batch_size": "auto",
112
+ "train_micro_batch_size_per_gpu": "auto",
113
+ "wall_clock_breakdown": false
114
+ },
115
+ "label_smoothing_factor": 0.0,
116
+ "optim": "adamw_torch",
117
+ "optim_args": null,
118
+ "adafactor": false,
119
+ "group_by_length": false,
120
+ "length_column_name": "length",
121
+ "report_to": [
122
+ "tensorboard"
123
+ ],
124
+ "ddp_find_unused_parameters": null,
125
+ "ddp_bucket_cap_mb": null,
126
+ "ddp_broadcast_buffers": null,
127
+ "dataloader_pin_memory": true,
128
+ "dataloader_persistent_workers": false,
129
+ "skip_memory_metrics": true,
130
+ "use_legacy_prediction_loop": false,
131
+ "push_to_hub": false,
132
+ "resume_from_checkpoint": null,
133
+ "hub_model_id": null,
134
+ "hub_strategy": "every_save",
135
+ "hub_token": null,
136
+ "hub_private_repo": null,
137
+ "hub_always_push": false,
138
+ "gradient_checkpointing": true,
139
+ "gradient_checkpointing_kwargs": null,
140
+ "include_inputs_for_metrics": false,
141
+ "include_for_metrics": [],
142
+ "eval_do_concat_batches": true,
143
+ "fp16_backend": "auto",
144
+ "push_to_hub_model_id": null,
145
+ "push_to_hub_organization": null,
146
+ "push_to_hub_token": null,
147
+ "mp_parameters": "",
148
+ "auto_find_batch_size": false,
149
+ "full_determinism": false,
150
+ "torchdynamo": null,
151
+ "ray_scope": "last",
152
+ "ddp_timeout": 18000000,
153
+ "torch_compile": false,
154
+ "torch_compile_backend": null,
155
+ "torch_compile_mode": null,
156
+ "include_tokens_per_second": false,
157
+ "include_num_input_tokens_seen": false,
158
+ "neftune_noise_alpha": null,
159
+ "optim_target_modules": null,
160
+ "batch_eval_metrics": false,
161
+ "eval_on_start": false,
162
+ "use_liger_kernel": true,
163
+ "eval_use_gather_object": false,
164
+ "average_tokens_across_devices": false,
165
+ "sortish_sampler": false,
166
+ "predict_with_generate": false,
167
+ "generation_max_length": null,
168
+ "generation_num_beams": null,
169
+ "generation_config": null,
170
+ "vit_gradient_checkpointing": null,
171
+ "check_model": true,
172
+ "acc_strategy": "token",
173
+ "train_dataloader_shuffle": true,
174
+ "max_epochs": null,
175
+ "aligner_lr": null,
176
+ "vit_lr": null,
177
+ "optimizer": null,
178
+ "use_logits_to_keep": null,
179
+ "channels": null,
180
+ "ds3_gather_for_generation": true,
181
+ "metric_warmup_step": 0,
182
+ "fsdp_num": 1,
183
+ "acc_steps": 1,
184
+ "eval_use_evalscope": false,
185
+ "eval_dataset": [],
186
+ "eval_dataset_args": null,
187
+ "eval_limit": null,
188
+ "eval_generation_config": null,
189
+ "model": "Qwen/Qwen3-0.6B-Base",
190
+ "model_type": "qwen3",
191
+ "model_revision": null,
192
+ "task_type": "causal_lm",
193
+ "torch_dtype": "bfloat16",
194
+ "attn_impl": "flash_attn",
195
+ "num_labels": null,
196
+ "problem_type": null,
197
+ "rope_scaling": null,
198
+ "device_map": null,
199
+ "max_memory": {},
200
+ "local_repo_path": null,
201
+ "init_strategy": null,
202
+ "template": "qwen3",
203
+ "system": "You are a helpful assistant.",
204
+ "max_length": 2048,
205
+ "truncation_strategy": "delete",
206
+ "max_pixels": null,
207
+ "agent_template": null,
208
+ "norm_bbox": null,
209
+ "use_chat_template": true,
210
+ "padding_free": false,
211
+ "padding_side": "right",
212
+ "loss_scale": "default",
213
+ "sequence_parallel_size": 1,
214
+ "response_prefix": null,
215
+ "template_backend": "swift",
216
+ "dataset": [
217
+ "datasets/0801/geometry_all_filtered2.jsonl",
218
+ "datasets/0901/geometry_clauses25-30_all_filtered2.jsonl"
219
+ ],
220
+ "val_dataset": [],
221
+ "split_dataset_ratio": 0.0,
222
+ "dataset_num_proc": 16,
223
+ "load_from_cache_file": true,
224
+ "dataset_shuffle": true,
225
+ "val_dataset_shuffle": false,
226
+ "streaming": false,
227
+ "interleave_prob": null,
228
+ "stopping_strategy": "first_exhausted",
229
+ "shuffle_buffer_size": 1000,
230
+ "download_mode": "reuse_dataset_if_exists",
231
+ "columns": {
232
+ "llm_input_renamed": "query",
233
+ "llm_output_renamed": "response"
234
+ },
235
+ "strict": false,
236
+ "model_name": null,
237
+ "model_author": null,
238
+ "custom_dataset_info": [],
239
+ "quant_method": null,
240
+ "quant_bits": null,
241
+ "hqq_axis": null,
242
+ "bnb_4bit_compute_dtype": "bfloat16",
243
+ "bnb_4bit_quant_type": "nf4",
244
+ "bnb_4bit_use_double_quant": true,
245
+ "bnb_4bit_quant_storage": null,
246
+ "max_new_tokens": 64,
247
+ "temperature": 0.0,
248
+ "top_k": null,
249
+ "top_p": null,
250
+ "repetition_penalty": null,
251
+ "num_beams": 1,
252
+ "stream": false,
253
+ "stop_words": [],
254
+ "logprobs": false,
255
+ "top_logprobs": null,
256
+ "ckpt_dir": null,
257
+ "lora_modules": [],
258
+ "tuner_backend": "peft",
259
+ "train_type": "full",
260
+ "adapters": [],
261
+ "external_plugins": [],
262
+ "model_kwargs": {},
263
+ "load_args": false,
264
+ "load_data_args": false,
265
+ "packing": true,
266
+ "packing_cache": null,
267
+ "custom_register_path": [],
268
+ "use_hf": false,
269
+ "ignore_args_error": false,
270
+ "use_swift_lora": false,
271
+ "freeze_parameters": [],
272
+ "freeze_parameters_regex": null,
273
+ "freeze_parameters_ratio": 0.0,
274
+ "trainable_parameters": [],
275
+ "trainable_parameters_regex": null,
276
+ "freeze_llm": false,
277
+ "freeze_vit": true,
278
+ "freeze_aligner": true,
279
+ "target_modules": [
280
+ "all-linear"
281
+ ],
282
+ "target_regex": null,
283
+ "modules_to_save": [],
284
+ "lora_rank": 8,
285
+ "lora_alpha": 32,
286
+ "lora_dropout": 0.05,
287
+ "lora_bias": "none",
288
+ "lora_dtype": null,
289
+ "lorap_lr_ratio": null,
290
+ "use_rslora": false,
291
+ "use_dora": false,
292
+ "lora_ga_batch_size": 2,
293
+ "lora_ga_iters": 2,
294
+ "lora_ga_max_length": 1024,
295
+ "lora_ga_direction": "ArB2r",
296
+ "lora_ga_scale": "stable",
297
+ "lora_ga_stable_gamma": 16,
298
+ "init_weights": true,
299
+ "fourier_n_frequency": 2000,
300
+ "fourier_scaling": 300.0,
301
+ "boft_block_size": 4,
302
+ "boft_block_num": 0,
303
+ "boft_n_butterfly_factor": 1,
304
+ "boft_dropout": 0.0,
305
+ "vera_rank": 256,
306
+ "vera_projection_prng_key": 0,
307
+ "vera_dropout": 0.0,
308
+ "vera_d_initial": 0.1,
309
+ "adapter_act": "gelu",
310
+ "adapter_length": 128,
311
+ "use_galore": false,
312
+ "galore_target_modules": null,
313
+ "galore_rank": 128,
314
+ "galore_update_proj_gap": 50,
315
+ "galore_scale": 1.0,
316
+ "galore_proj_type": "std",
317
+ "galore_optim_per_parameter": false,
318
+ "galore_with_embedding": false,
319
+ "galore_quantization": false,
320
+ "galore_proj_quant": false,
321
+ "galore_proj_bits": 4,
322
+ "galore_proj_group_size": 256,
323
+ "galore_cos_threshold": 0.4,
324
+ "galore_gamma_proj": 2,
325
+ "galore_queue_size": 5,
326
+ "adalora_target_r": 8,
327
+ "adalora_init_r": 12,
328
+ "adalora_tinit": 0,
329
+ "adalora_tfinal": 0,
330
+ "adalora_deltaT": 1,
331
+ "adalora_beta1": 0.85,
332
+ "adalora_beta2": 0.85,
333
+ "adalora_orth_reg_weight": 0.5,
334
+ "llamapro_num_new_blocks": 4,
335
+ "llamapro_num_groups": null,
336
+ "lisa_activated_layers": 0,
337
+ "lisa_step_interval": 20,
338
+ "reft_layer_key": null,
339
+ "reft_layers": null,
340
+ "reft_rank": 4,
341
+ "reft_intervention_type": "LoreftIntervention",
342
+ "reft_args": null,
343
+ "swanlab_token": null,
344
+ "swanlab_project": null,
345
+ "swanlab_workspace": null,
346
+ "swanlab_exp_name": null,
347
+ "swanlab_lark_webhook_url": null,
348
+ "swanlab_lark_secret": null,
349
+ "swanlab_mode": "cloud",
350
+ "add_version": false,
351
+ "resume_only_model": false,
352
+ "create_checkpoint_symlink": false,
353
+ "lazy_tokenize": false,
354
+ "loss_type": null,
355
+ "metric": null,
356
+ "zero_hpz_partition_size": null,
357
+ "rank": 0,
358
+ "global_world_size": 8,
359
+ "local_world_size": 8,
360
+ "model_suffix": "Qwen3-0.6B-Base",
361
+ "model_info": "ModelInfo(model_type='qwen3', model_dir='/c23474/home/zhuminfeng/.cache/modelscope/hub/models/Qwen/Qwen3-0___6B-Base', torch_dtype=torch.bfloat16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling=None, config=None, task_type='causal_lm', num_labels=None)",
362
+ "model_meta": "ModelMeta(model_type='qwen3', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen3-0.6B-Base', hf_model_id='Qwen/Qwen3-0.6B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-1.7B-Base', hf_model_id='Qwen/Qwen3-1.7B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B-Base', hf_model_id='Qwen/Qwen3-4B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-8B-Base', hf_model_id='Qwen/Qwen3-8B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-14B-Base', hf_model_id='Qwen/Qwen3-14B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-0.6B', hf_model_id='Qwen/Qwen3-0.6B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-1.7B', hf_model_id='Qwen/Qwen3-1.7B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B', hf_model_id='Qwen/Qwen3-4B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-8B', hf_model_id='Qwen/Qwen3-8B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-14B', hf_model_id='Qwen/Qwen3-14B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-32B', hf_model_id='Qwen/Qwen3-32B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-0.6B-FP8', hf_model_id='Qwen/Qwen3-0.6B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-1.7B-FP8', hf_model_id='Qwen/Qwen3-1.7B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B-FP8', hf_model_id='Qwen/Qwen3-4B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-8B-FP8', hf_model_id='Qwen/Qwen3-8B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-14B-FP8', hf_model_id='Qwen/Qwen3-14B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-32B-FP8', hf_model_id='Qwen/Qwen3-32B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B-AWQ', hf_model_id='Qwen/Qwen3-4B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-8B-AWQ', hf_model_id='Qwen/Qwen3-8B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-14B-AWQ', hf_model_id='Qwen/Qwen3-14B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-32B-AWQ', hf_model_id='Qwen/Qwen3-32B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='swift/Qwen3-32B-AWQ', hf_model_id=None, model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen3', get_function=<function get_model_tokenizer_with_flash_attn at 0x7fa66e946cb0>, model_arch='llama', architectures=['Qwen3ForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.51'], tags=[])",
363
+ "model_dir": "/c23474/home/zhuminfeng/.cache/modelscope/hub/models/Qwen/Qwen3-0___6B-Base",
364
+ "hub": "<class 'swift.hub.hub.MSHub'>",
365
+ "evaluation_strategy": "steps",
366
+ "training_args": "Seq2SeqTrainingArguments(output_dir='/c23474/home/zhuminfeng/Newclid/models/sft17', overwrite_output_dir=False, do_train=False, do_eval=False, do_predict=False, eval_strategy=<IntervalStrategy.NO: 'no'>, prediction_loss_only=False, per_device_train_batch_size=8, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=1, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=0.0001, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=1.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/c23474/home/zhuminfeng/Newclid/models/sft17/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=500, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=10000, save_total_limit=None, save_safetensors=True, save_on_each_node=False, save_only_model=True, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=10000.0, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/c23474/home/zhuminfeng/Newclid/models/sft17', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 1, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'allgather_partitions': True, 'allgather_bucket_size': 200000000.0, 'overlap_comm': False, 'reduce_scatter': True, 'reduce_bucket_size': 200000000.0, 'contiguous_gradients': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=True, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, vit_gradient_checkpointing=True, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, optimizer=None, use_logits_to_keep=None, channels=None, ds3_gather_for_generation=True, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_dataset=[], eval_dataset_args=None, eval_limit=None, eval_generation_config=None, sft_alpha=0, train_type='full', local_repo_path=None, galore_config=None)"
367
+ }
chat_template.jinja ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
27
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
28
+ {%- elif message.role == "assistant" %}
29
+ {%- set content = message.content %}
30
+ {%- set reasoning_content = '' %}
31
+ {%- if message.reasoning_content is defined and message.reasoning_content is not none %}
32
+ {%- set reasoning_content = message.reasoning_content %}
33
+ {%- else %}
34
+ {%- if '</think>' in message.content %}
35
+ {%- set content = message.content.split('</think>')[-1].lstrip('\n') %}
36
+ {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
37
+ {%- endif %}
38
+ {%- endif %}
39
+ {%- if loop.index0 > ns.last_query_index %}
40
+ {%- if loop.last or (not loop.last and reasoning_content) %}
41
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
42
+ {%- else %}
43
+ {{- '<|im_start|>' + message.role + '\n' + content }}
44
+ {%- endif %}
45
+ {%- else %}
46
+ {{- '<|im_start|>' + message.role + '\n' + content }}
47
+ {%- endif %}
48
+ {%- if message.tool_calls %}
49
+ {%- for tool_call in message.tool_calls %}
50
+ {%- if (loop.first and content) or (not loop.first) %}
51
+ {{- '\n' }}
52
+ {%- endif %}
53
+ {%- if tool_call.function %}
54
+ {%- set tool_call = tool_call.function %}
55
+ {%- endif %}
56
+ {{- '<tool_call>\n{"name": "' }}
57
+ {{- tool_call.name }}
58
+ {{- '", "arguments": ' }}
59
+ {%- if tool_call.arguments is string %}
60
+ {{- tool_call.arguments }}
61
+ {%- else %}
62
+ {{- tool_call.arguments | tojson }}
63
+ {%- endif %}
64
+ {{- '}\n</tool_call>' }}
65
+ {%- endfor %}
66
+ {%- endif %}
67
+ {{- '<|im_end|>\n' }}
68
+ {%- elif message.role == "tool" %}
69
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
70
+ {{- '<|im_start|>user' }}
71
+ {%- endif %}
72
+ {{- '\n<tool_response>\n' }}
73
+ {{- message.content }}
74
+ {{- '\n</tool_response>' }}
75
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
76
+ {{- '<|im_end|>\n' }}
77
+ {%- endif %}
78
+ {%- endif %}
79
+ {%- endfor %}
80
+ {%- if add_generation_prompt %}
81
+ {{- '<|im_start|>assistant\n' }}
82
+ {%- if enable_thinking is defined and enable_thinking is false %}
83
+ {{- '<think>\n\n</think>\n\n' }}
84
+ {%- endif %}
85
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151643,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "max_position_embeddings": 32768,
15
+ "max_window_layers": 28,
16
+ "model_type": "qwen3",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 28,
19
+ "num_key_value_heads": 8,
20
+ "pad_token_id": 151643,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 1000000,
24
+ "sliding_window": null,
25
+ "tie_word_embeddings": true,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.52.4",
28
+ "use_cache": false,
29
+ "use_sliding_window": false,
30
+ "vocab_size": 151936
31
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": [
4
+ 151643,
5
+ 151645
6
+ ],
7
+ "max_new_tokens": 2048,
8
+ "transformers_version": "4.52.4"
9
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77406d21e84699b3d0d123653e40b7f48f3642beae10c0b608f58249223b8099
3
+ size 1503300328
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|endoftext|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
trainer_state.json ADDED
@@ -0,0 +1,1663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.7684295008623486,
6
+ "eval_steps": 10000.0,
7
+ "global_step": 90000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 8.538105565137207e-06,
14
+ "grad_norm": 6.5811052322387695,
15
+ "learning_rate": 1.7073587160662455e-08,
16
+ "loss": 0.9425575733184814,
17
+ "memory(GiB)": 4.21,
18
+ "step": 1,
19
+ "train_speed(iter/s)": 0.107952
20
+ },
21
+ {
22
+ "epoch": 0.004269052782568603,
23
+ "grad_norm": 0.775414764881134,
24
+ "learning_rate": 8.536793580331227e-06,
25
+ "loss": 0.2293025076030968,
26
+ "memory(GiB)": 5.71,
27
+ "step": 500,
28
+ "train_speed(iter/s)": 0.938128
29
+ },
30
+ {
31
+ "epoch": 0.008538105565137207,
32
+ "grad_norm": 0.5124035477638245,
33
+ "learning_rate": 1.7073587160662455e-05,
34
+ "loss": 0.05007815933227539,
35
+ "memory(GiB)": 5.71,
36
+ "step": 1000,
37
+ "train_speed(iter/s)": 0.946051
38
+ },
39
+ {
40
+ "epoch": 0.012807158347705811,
41
+ "grad_norm": 0.34229570627212524,
42
+ "learning_rate": 2.5610380740993682e-05,
43
+ "loss": 0.036965576171875,
44
+ "memory(GiB)": 5.71,
45
+ "step": 1500,
46
+ "train_speed(iter/s)": 0.980261
47
+ },
48
+ {
49
+ "epoch": 0.017076211130274414,
50
+ "grad_norm": 0.29022017121315,
51
+ "learning_rate": 3.414717432132491e-05,
52
+ "loss": 0.030718547821044922,
53
+ "memory(GiB)": 5.71,
54
+ "step": 2000,
55
+ "train_speed(iter/s)": 1.013563
56
+ },
57
+ {
58
+ "epoch": 0.02134526391284302,
59
+ "grad_norm": 0.2701531946659088,
60
+ "learning_rate": 4.268396790165614e-05,
61
+ "loss": 0.026950979232788087,
62
+ "memory(GiB)": 5.71,
63
+ "step": 2500,
64
+ "train_speed(iter/s)": 1.038614
65
+ },
66
+ {
67
+ "epoch": 0.025614316695411622,
68
+ "grad_norm": 0.1944938600063324,
69
+ "learning_rate": 5.1220761481987364e-05,
70
+ "loss": 0.025618917465209962,
71
+ "memory(GiB)": 5.71,
72
+ "step": 3000,
73
+ "train_speed(iter/s)": 1.081032
74
+ },
75
+ {
76
+ "epoch": 0.029883369477980225,
77
+ "grad_norm": 0.15646104514598846,
78
+ "learning_rate": 5.9757555062318595e-05,
79
+ "loss": 0.0239105281829834,
80
+ "memory(GiB)": 5.71,
81
+ "step": 3500,
82
+ "train_speed(iter/s)": 1.023247
83
+ },
84
+ {
85
+ "epoch": 0.03415242226054883,
86
+ "grad_norm": 0.12373720854520798,
87
+ "learning_rate": 6.829434864264982e-05,
88
+ "loss": 0.021082000732421877,
89
+ "memory(GiB)": 5.71,
90
+ "step": 4000,
91
+ "train_speed(iter/s)": 0.971265
92
+ },
93
+ {
94
+ "epoch": 0.03842147504311743,
95
+ "grad_norm": 0.11245039105415344,
96
+ "learning_rate": 7.683114222298106e-05,
97
+ "loss": 0.020011337280273437,
98
+ "memory(GiB)": 5.71,
99
+ "step": 4500,
100
+ "train_speed(iter/s)": 0.934224
101
+ },
102
+ {
103
+ "epoch": 0.04269052782568604,
104
+ "grad_norm": 0.12453507632017136,
105
+ "learning_rate": 8.536793580331228e-05,
106
+ "loss": 0.019226245880126953,
107
+ "memory(GiB)": 5.71,
108
+ "step": 5000,
109
+ "train_speed(iter/s)": 0.906199
110
+ },
111
+ {
112
+ "epoch": 0.04695958060825464,
113
+ "grad_norm": 0.08364573866128922,
114
+ "learning_rate": 9.39047293836435e-05,
115
+ "loss": 0.018570756912231444,
116
+ "memory(GiB)": 5.71,
117
+ "step": 5500,
118
+ "train_speed(iter/s)": 0.887629
119
+ },
120
+ {
121
+ "epoch": 0.051228633390823244,
122
+ "grad_norm": 0.08422534167766571,
123
+ "learning_rate": 9.999959243761552e-05,
124
+ "loss": 0.018008022308349608,
125
+ "memory(GiB)": 5.71,
126
+ "step": 6000,
127
+ "train_speed(iter/s)": 0.882382
128
+ },
129
+ {
130
+ "epoch": 0.05549768617339185,
131
+ "grad_norm": 0.0722428560256958,
132
+ "learning_rate": 9.999175989726746e-05,
133
+ "loss": 0.016920005798339844,
134
+ "memory(GiB)": 5.71,
135
+ "step": 6500,
136
+ "train_speed(iter/s)": 0.887128
137
+ },
138
+ {
139
+ "epoch": 0.05976673895596045,
140
+ "grad_norm": 0.0696873590350151,
141
+ "learning_rate": 9.997396381339795e-05,
142
+ "loss": 0.016275758743286132,
143
+ "memory(GiB)": 5.71,
144
+ "step": 7000,
145
+ "train_speed(iter/s)": 0.896874
146
+ },
147
+ {
148
+ "epoch": 0.06403579173852905,
149
+ "grad_norm": 0.0715385377407074,
150
+ "learning_rate": 9.994620773283261e-05,
151
+ "loss": 0.03211669921875,
152
+ "memory(GiB)": 5.71,
153
+ "step": 7500,
154
+ "train_speed(iter/s)": 0.906916
155
+ },
156
+ {
157
+ "epoch": 0.06830484452109765,
158
+ "grad_norm": 0.07314470410346985,
159
+ "learning_rate": 9.990849718746144e-05,
160
+ "loss": 0.014735573768615723,
161
+ "memory(GiB)": 5.71,
162
+ "step": 8000,
163
+ "train_speed(iter/s)": 0.915054
164
+ },
165
+ {
166
+ "epoch": 0.07257389730366626,
167
+ "grad_norm": 0.0624830424785614,
168
+ "learning_rate": 9.986083969313632e-05,
169
+ "loss": 0.014247347831726074,
170
+ "memory(GiB)": 5.71,
171
+ "step": 8500,
172
+ "train_speed(iter/s)": 0.923566
173
+ },
174
+ {
175
+ "epoch": 0.07684295008623486,
176
+ "grad_norm": 0.054315660148859024,
177
+ "learning_rate": 9.980324474817292e-05,
178
+ "loss": 0.014038642883300782,
179
+ "memory(GiB)": 5.71,
180
+ "step": 9000,
181
+ "train_speed(iter/s)": 0.931957
182
+ },
183
+ {
184
+ "epoch": 0.08111200286880348,
185
+ "grad_norm": 0.04966143146157265,
186
+ "learning_rate": 9.973572383145782e-05,
187
+ "loss": 0.013697422027587891,
188
+ "memory(GiB)": 5.71,
189
+ "step": 9500,
190
+ "train_speed(iter/s)": 0.939331
191
+ },
192
+ {
193
+ "epoch": 0.08538105565137208,
194
+ "grad_norm": 0.055357128381729126,
195
+ "learning_rate": 9.965829040016061e-05,
196
+ "loss": 0.013380534172058105,
197
+ "memory(GiB)": 5.71,
198
+ "step": 10000,
199
+ "train_speed(iter/s)": 0.946434
200
+ },
201
+ {
202
+ "epoch": 0.08965010843394068,
203
+ "grad_norm": 0.05234465003013611,
204
+ "learning_rate": 9.957095988705193e-05,
205
+ "loss": 0.013177488327026367,
206
+ "memory(GiB)": 5.71,
207
+ "step": 10500,
208
+ "train_speed(iter/s)": 0.960222
209
+ },
210
+ {
211
+ "epoch": 0.09391916121650928,
212
+ "grad_norm": 0.053024690598249435,
213
+ "learning_rate": 9.947374969742755e-05,
214
+ "loss": 0.013030742645263672,
215
+ "memory(GiB)": 5.71,
216
+ "step": 11000,
217
+ "train_speed(iter/s)": 0.973308
218
+ },
219
+ {
220
+ "epoch": 0.09818821399907789,
221
+ "grad_norm": 0.04746083542704582,
222
+ "learning_rate": 9.936667920563951e-05,
223
+ "loss": 0.012802671432495118,
224
+ "memory(GiB)": 5.71,
225
+ "step": 11500,
226
+ "train_speed(iter/s)": 0.985248
227
+ },
228
+ {
229
+ "epoch": 0.10245726678164649,
230
+ "grad_norm": 0.045355405658483505,
231
+ "learning_rate": 9.924976975123472e-05,
232
+ "loss": 0.012366548538208007,
233
+ "memory(GiB)": 5.71,
234
+ "step": 12000,
235
+ "train_speed(iter/s)": 0.996787
236
+ },
237
+ {
238
+ "epoch": 0.10672631956421509,
239
+ "grad_norm": 0.0430605448782444,
240
+ "learning_rate": 9.912304463470185e-05,
241
+ "loss": 0.018324718475341797,
242
+ "memory(GiB)": 5.71,
243
+ "step": 12500,
244
+ "train_speed(iter/s)": 1.007619
245
+ },
246
+ {
247
+ "epoch": 0.1109953723467837,
248
+ "grad_norm": 0.03760723024606705,
249
+ "learning_rate": 9.89865291128276e-05,
250
+ "loss": 0.011288459777832032,
251
+ "memory(GiB)": 5.71,
252
+ "step": 13000,
253
+ "train_speed(iter/s)": 1.017831
254
+ },
255
+ {
256
+ "epoch": 0.1152644251293523,
257
+ "grad_norm": 0.04123725742101669,
258
+ "learning_rate": 9.884025039366274e-05,
259
+ "loss": 0.011067386627197265,
260
+ "memory(GiB)": 5.71,
261
+ "step": 13500,
262
+ "train_speed(iter/s)": 1.027479
263
+ },
264
+ {
265
+ "epoch": 0.1195334779119209,
266
+ "grad_norm": 0.046554189175367355,
267
+ "learning_rate": 9.868423763109962e-05,
268
+ "loss": 0.010972289085388184,
269
+ "memory(GiB)": 5.71,
270
+ "step": 14000,
271
+ "train_speed(iter/s)": 1.0366
272
+ },
273
+ {
274
+ "epoch": 0.1238025306944895,
275
+ "grad_norm": 0.04529291018843651,
276
+ "learning_rate": 9.851852191906155e-05,
277
+ "loss": 0.010918002128601074,
278
+ "memory(GiB)": 5.71,
279
+ "step": 14500,
280
+ "train_speed(iter/s)": 1.045276
281
+ },
282
+ {
283
+ "epoch": 0.1280715834770581,
284
+ "grad_norm": 0.039391227066516876,
285
+ "learning_rate": 9.834313628530574e-05,
286
+ "loss": 0.010901000022888184,
287
+ "memory(GiB)": 5.71,
288
+ "step": 15000,
289
+ "train_speed(iter/s)": 1.053467
290
+ },
291
+ {
292
+ "epoch": 0.13234063625962672,
293
+ "grad_norm": 0.050082143396139145,
294
+ "learning_rate": 9.81581156848408e-05,
295
+ "loss": 0.010822629928588868,
296
+ "memory(GiB)": 5.71,
297
+ "step": 15500,
298
+ "train_speed(iter/s)": 1.061046
299
+ },
300
+ {
301
+ "epoch": 0.1366096890421953,
302
+ "grad_norm": 0.04181528836488724,
303
+ "learning_rate": 9.79634969929599e-05,
304
+ "loss": 0.01061639404296875,
305
+ "memory(GiB)": 5.71,
306
+ "step": 16000,
307
+ "train_speed(iter/s)": 1.068454
308
+ },
309
+ {
310
+ "epoch": 0.14087874182476393,
311
+ "grad_norm": 0.037736013531684875,
312
+ "learning_rate": 9.775931899789159e-05,
313
+ "loss": 0.01050139808654785,
314
+ "memory(GiB)": 5.71,
315
+ "step": 16500,
316
+ "train_speed(iter/s)": 1.075513
317
+ },
318
+ {
319
+ "epoch": 0.14514779460733251,
320
+ "grad_norm": 0.03446267917752266,
321
+ "learning_rate": 9.754562239306902e-05,
322
+ "loss": 0.010301560401916503,
323
+ "memory(GiB)": 5.71,
324
+ "step": 17000,
325
+ "train_speed(iter/s)": 1.08225
326
+ },
327
+ {
328
+ "epoch": 0.14941684738990113,
329
+ "grad_norm": 0.035218626260757446,
330
+ "learning_rate": 9.732244976901965e-05,
331
+ "loss": 0.010299022674560548,
332
+ "memory(GiB)": 5.71,
333
+ "step": 17500,
334
+ "train_speed(iter/s)": 1.088656
335
+ },
336
+ {
337
+ "epoch": 0.15368590017246972,
338
+ "grad_norm": 0.034140028059482574,
339
+ "learning_rate": 9.708984560487677e-05,
340
+ "loss": 0.010085094451904296,
341
+ "memory(GiB)": 5.71,
342
+ "step": 18000,
343
+ "train_speed(iter/s)": 1.094769
344
+ },
345
+ {
346
+ "epoch": 0.15795495295503834,
347
+ "grad_norm": 0.040227197110652924,
348
+ "learning_rate": 9.684785625951468e-05,
349
+ "loss": 0.009981593132019044,
350
+ "memory(GiB)": 5.71,
351
+ "step": 18500,
352
+ "train_speed(iter/s)": 1.100633
353
+ },
354
+ {
355
+ "epoch": 0.16222400573760695,
356
+ "grad_norm": 0.03376320004463196,
357
+ "learning_rate": 9.659652996230917e-05,
358
+ "loss": 0.009874713897705079,
359
+ "memory(GiB)": 5.71,
360
+ "step": 19000,
361
+ "train_speed(iter/s)": 1.106243
362
+ },
363
+ {
364
+ "epoch": 0.16649305852017554,
365
+ "grad_norm": 0.03337237238883972,
366
+ "learning_rate": 9.633591680352522e-05,
367
+ "loss": 0.009621439933776855,
368
+ "memory(GiB)": 5.71,
369
+ "step": 19500,
370
+ "train_speed(iter/s)": 1.111407
371
+ },
372
+ {
373
+ "epoch": 0.17076211130274416,
374
+ "grad_norm": 0.03156784921884537,
375
+ "learning_rate": 9.606606872433384e-05,
376
+ "loss": 0.01175856876373291,
377
+ "memory(GiB)": 5.73,
378
+ "step": 20000,
379
+ "train_speed(iter/s)": 1.116578
380
+ },
381
+ {
382
+ "epoch": 0.17503116408531275,
383
+ "grad_norm": 0.06789804250001907,
384
+ "learning_rate": 9.578703950645998e-05,
385
+ "loss": 0.008876850128173828,
386
+ "memory(GiB)": 5.73,
387
+ "step": 20500,
388
+ "train_speed(iter/s)": 1.121386
389
+ },
390
+ {
391
+ "epoch": 0.17930021686788136,
392
+ "grad_norm": 0.029559865593910217,
393
+ "learning_rate": 9.549888476146366e-05,
394
+ "loss": 0.008808825492858887,
395
+ "memory(GiB)": 5.73,
396
+ "step": 21000,
397
+ "train_speed(iter/s)": 1.126144
398
+ },
399
+ {
400
+ "epoch": 0.18356926965044995,
401
+ "grad_norm": 0.027149997651576996,
402
+ "learning_rate": 9.52016619196564e-05,
403
+ "loss": 0.008746042251586914,
404
+ "memory(GiB)": 5.73,
405
+ "step": 21500,
406
+ "train_speed(iter/s)": 1.130706
407
+ },
408
+ {
409
+ "epoch": 0.18783832243301857,
410
+ "grad_norm": 0.031270887702703476,
411
+ "learning_rate": 9.489543021865507e-05,
412
+ "loss": 0.008727970123291016,
413
+ "memory(GiB)": 5.73,
414
+ "step": 22000,
415
+ "train_speed(iter/s)": 1.135093
416
+ },
417
+ {
418
+ "epoch": 0.19210737521558716,
419
+ "grad_norm": 0.03134565427899361,
420
+ "learning_rate": 9.458025069157563e-05,
421
+ "loss": 0.008822738647460937,
422
+ "memory(GiB)": 5.73,
423
+ "step": 22500,
424
+ "train_speed(iter/s)": 1.139318
425
+ },
426
+ {
427
+ "epoch": 0.19637642799815577,
428
+ "grad_norm": 0.02970048598945141,
429
+ "learning_rate": 9.425618615486908e-05,
430
+ "loss": 0.008724775314331055,
431
+ "memory(GiB)": 5.73,
432
+ "step": 23000,
433
+ "train_speed(iter/s)": 1.143306
434
+ },
435
+ {
436
+ "epoch": 0.20064548078072436,
437
+ "grad_norm": 0.037413984537124634,
438
+ "learning_rate": 9.392330119580186e-05,
439
+ "loss": 0.008617961883544922,
440
+ "memory(GiB)": 5.73,
441
+ "step": 23500,
442
+ "train_speed(iter/s)": 1.147167
443
+ },
444
+ {
445
+ "epoch": 0.20491453356329298,
446
+ "grad_norm": 0.031085532158613205,
447
+ "learning_rate": 9.358166215958333e-05,
448
+ "loss": 0.008613507270812988,
449
+ "memory(GiB)": 5.73,
450
+ "step": 24000,
451
+ "train_speed(iter/s)": 1.150974
452
+ },
453
+ {
454
+ "epoch": 0.2091835863458616,
455
+ "grad_norm": 0.030068758875131607,
456
+ "learning_rate": 9.323133713614297e-05,
457
+ "loss": 0.008516620635986329,
458
+ "memory(GiB)": 5.73,
459
+ "step": 24500,
460
+ "train_speed(iter/s)": 1.154635
461
+ },
462
+ {
463
+ "epoch": 0.21345263912843018,
464
+ "grad_norm": 0.030049536377191544,
465
+ "learning_rate": 9.287239594655976e-05,
466
+ "loss": 0.00915114688873291,
467
+ "memory(GiB)": 5.73,
468
+ "step": 25000,
469
+ "train_speed(iter/s)": 1.158172
470
+ },
471
+ {
472
+ "epoch": 0.2177216919109988,
473
+ "grad_norm": 0.03236347809433937,
474
+ "learning_rate": 9.250491012914668e-05,
475
+ "loss": 0.008387946128845214,
476
+ "memory(GiB)": 5.73,
477
+ "step": 25500,
478
+ "train_speed(iter/s)": 1.161598
479
+ },
480
+ {
481
+ "epoch": 0.2219907446935674,
482
+ "grad_norm": 0.028227701783180237,
483
+ "learning_rate": 9.212895292519276e-05,
484
+ "loss": 0.008091423034667969,
485
+ "memory(GiB)": 5.73,
486
+ "step": 26000,
487
+ "train_speed(iter/s)": 1.164914
488
+ },
489
+ {
490
+ "epoch": 0.226259797476136,
491
+ "grad_norm": 0.026642831042408943,
492
+ "learning_rate": 9.17445992643658e-05,
493
+ "loss": 0.008073025703430176,
494
+ "memory(GiB)": 5.73,
495
+ "step": 26500,
496
+ "train_speed(iter/s)": 1.168129
497
+ },
498
+ {
499
+ "epoch": 0.2305288502587046,
500
+ "grad_norm": 0.029216019436717033,
501
+ "learning_rate": 9.135192574977873e-05,
502
+ "loss": 0.008088951110839843,
503
+ "memory(GiB)": 5.73,
504
+ "step": 27000,
505
+ "train_speed(iter/s)": 1.169593
506
+ },
507
+ {
508
+ "epoch": 0.2347979030412732,
509
+ "grad_norm": 0.02682262659072876,
510
+ "learning_rate": 9.09510106427222e-05,
511
+ "loss": 0.007971211433410645,
512
+ "memory(GiB)": 5.73,
513
+ "step": 27500,
514
+ "train_speed(iter/s)": 1.172618
515
+ },
516
+ {
517
+ "epoch": 0.2390669558238418,
518
+ "grad_norm": 0.027296727523207664,
519
+ "learning_rate": 9.054193384706688e-05,
520
+ "loss": 0.007928550243377686,
521
+ "memory(GiB)": 5.73,
522
+ "step": 28000,
523
+ "train_speed(iter/s)": 1.165921
524
+ },
525
+ {
526
+ "epoch": 0.24333600860641041,
527
+ "grad_norm": 0.03066374734044075,
528
+ "learning_rate": 9.012477689333834e-05,
529
+ "loss": 0.007805256366729736,
530
+ "memory(GiB)": 5.73,
531
+ "step": 28500,
532
+ "train_speed(iter/s)": 1.155767
533
+ },
534
+ {
535
+ "epoch": 0.247605061388979,
536
+ "grad_norm": 0.027467776089906693,
537
+ "learning_rate": 8.96996229224676e-05,
538
+ "loss": 0.007825798034667968,
539
+ "memory(GiB)": 5.73,
540
+ "step": 29000,
541
+ "train_speed(iter/s)": 1.147611
542
+ },
543
+ {
544
+ "epoch": 0.2518741141715476,
545
+ "grad_norm": 0.028940001502633095,
546
+ "learning_rate": 8.926655666922102e-05,
547
+ "loss": 0.007748476028442383,
548
+ "memory(GiB)": 5.73,
549
+ "step": 29500,
550
+ "train_speed(iter/s)": 1.139656
551
+ },
552
+ {
553
+ "epoch": 0.2561431669541162,
554
+ "grad_norm": 0.0281364805996418,
555
+ "learning_rate": 8.882566444531216e-05,
556
+ "loss": 0.007644564628601074,
557
+ "memory(GiB)": 5.73,
558
+ "step": 30000,
559
+ "train_speed(iter/s)": 1.131607
560
+ },
561
+ {
562
+ "epoch": 0.2604122197366848,
563
+ "grad_norm": 0.03272758424282074,
564
+ "learning_rate": 8.837703412219962e-05,
565
+ "loss": 0.007614383697509766,
566
+ "memory(GiB)": 5.73,
567
+ "step": 30500,
568
+ "train_speed(iter/s)": 1.123907
569
+ },
570
+ {
571
+ "epoch": 0.26468127251925344,
572
+ "grad_norm": 0.033637482672929764,
573
+ "learning_rate": 8.7920755113574e-05,
574
+ "loss": 0.007485725402832031,
575
+ "memory(GiB)": 5.73,
576
+ "step": 31000,
577
+ "train_speed(iter/s)": 1.118652
578
+ },
579
+ {
580
+ "epoch": 0.26895032530182206,
581
+ "grad_norm": 0.028677962720394135,
582
+ "learning_rate": 8.745691835753724e-05,
583
+ "loss": 0.007466458320617676,
584
+ "memory(GiB)": 5.73,
585
+ "step": 31500,
586
+ "train_speed(iter/s)": 1.114283
587
+ },
588
+ {
589
+ "epoch": 0.2732193780843906,
590
+ "grad_norm": 0.026501238346099854,
591
+ "learning_rate": 8.698561629847851e-05,
592
+ "loss": 0.00739455795288086,
593
+ "memory(GiB)": 5.73,
594
+ "step": 32000,
595
+ "train_speed(iter/s)": 1.110807
596
+ },
597
+ {
598
+ "epoch": 0.27748843086695923,
599
+ "grad_norm": 0.03185174614191055,
600
+ "learning_rate": 8.650694286864957e-05,
601
+ "loss": 0.007317279815673828,
602
+ "memory(GiB)": 5.73,
603
+ "step": 32500,
604
+ "train_speed(iter/s)": 1.109657
605
+ },
606
+ {
607
+ "epoch": 0.28175748364952785,
608
+ "grad_norm": 0.031619079411029816,
609
+ "learning_rate": 8.602099346944379e-05,
610
+ "loss": 0.007236574649810791,
611
+ "memory(GiB)": 5.73,
612
+ "step": 33000,
613
+ "train_speed(iter/s)": 1.109762
614
+ },
615
+ {
616
+ "epoch": 0.28602653643209647,
617
+ "grad_norm": 0.028590602800250053,
618
+ "learning_rate": 8.552786495238226e-05,
619
+ "loss": 0.00712824535369873,
620
+ "memory(GiB)": 5.73,
621
+ "step": 33500,
622
+ "train_speed(iter/s)": 1.10959
623
+ },
624
+ {
625
+ "epoch": 0.29029558921466503,
626
+ "grad_norm": 0.026914609596133232,
627
+ "learning_rate": 8.502765559981091e-05,
628
+ "loss": 0.007133237838745117,
629
+ "memory(GiB)": 5.73,
630
+ "step": 34000,
631
+ "train_speed(iter/s)": 1.108437
632
+ },
633
+ {
634
+ "epoch": 0.29456464199723364,
635
+ "grad_norm": 0.03186658397316933,
636
+ "learning_rate": 8.452046510531258e-05,
637
+ "loss": 0.00705194091796875,
638
+ "memory(GiB)": 5.73,
639
+ "step": 34500,
640
+ "train_speed(iter/s)": 1.111376
641
+ },
642
+ {
643
+ "epoch": 0.29883369477980226,
644
+ "grad_norm": 0.022866345942020416,
645
+ "learning_rate": 8.400639455383754e-05,
646
+ "loss": 0.006991560935974121,
647
+ "memory(GiB)": 5.73,
648
+ "step": 35000,
649
+ "train_speed(iter/s)": 1.103171
650
+ },
651
+ {
652
+ "epoch": 0.3031027475623709,
653
+ "grad_norm": 0.027075253427028656,
654
+ "learning_rate": 8.348554640155709e-05,
655
+ "loss": 0.006916217803955078,
656
+ "memory(GiB)": 5.73,
657
+ "step": 35500,
658
+ "train_speed(iter/s)": 1.094784
659
+ },
660
+ {
661
+ "epoch": 0.30737180034493944,
662
+ "grad_norm": 0.027598075568675995,
663
+ "learning_rate": 8.295802445544345e-05,
664
+ "loss": 0.0068712844848632815,
665
+ "memory(GiB)": 5.73,
666
+ "step": 36000,
667
+ "train_speed(iter/s)": 1.087037
668
+ },
669
+ {
670
+ "epoch": 0.31164085312750806,
671
+ "grad_norm": 0.022508256137371063,
672
+ "learning_rate": 8.242393385258083e-05,
673
+ "loss": 0.006878099918365479,
674
+ "memory(GiB)": 5.73,
675
+ "step": 36500,
676
+ "train_speed(iter/s)": 1.080097
677
+ },
678
+ {
679
+ "epoch": 0.31590990591007667,
680
+ "grad_norm": 0.02388549968600273,
681
+ "learning_rate": 8.188338103921109e-05,
682
+ "loss": 0.006974416732788086,
683
+ "memory(GiB)": 5.73,
684
+ "step": 37000,
685
+ "train_speed(iter/s)": 1.073446
686
+ },
687
+ {
688
+ "epoch": 0.3201789586926453,
689
+ "grad_norm": 0.025459734722971916,
690
+ "learning_rate": 8.13364737495187e-05,
691
+ "loss": 0.0067239184379577635,
692
+ "memory(GiB)": 5.73,
693
+ "step": 37500,
694
+ "train_speed(iter/s)": 1.067642
695
+ },
696
+ {
697
+ "epoch": 0.3244480114752139,
698
+ "grad_norm": 0.023768454790115356,
699
+ "learning_rate": 8.078332098415881e-05,
700
+ "loss": 0.006635515213012695,
701
+ "memory(GiB)": 5.73,
702
+ "step": 38000,
703
+ "train_speed(iter/s)": 1.067385
704
+ },
705
+ {
706
+ "epoch": 0.32871706425778247,
707
+ "grad_norm": 0.028179064393043518,
708
+ "learning_rate": 8.022403298853317e-05,
709
+ "loss": 0.00661515998840332,
710
+ "memory(GiB)": 5.73,
711
+ "step": 38500,
712
+ "train_speed(iter/s)": 1.067485
713
+ },
714
+ {
715
+ "epoch": 0.3329861170403511,
716
+ "grad_norm": 0.026124022901058197,
717
+ "learning_rate": 7.965872123081765e-05,
718
+ "loss": 0.006523737907409668,
719
+ "memory(GiB)": 5.73,
720
+ "step": 39000,
721
+ "train_speed(iter/s)": 1.067501
722
+ },
723
+ {
724
+ "epoch": 0.3372551698229197,
725
+ "grad_norm": 0.02668868564069271,
726
+ "learning_rate": 7.908749837974632e-05,
727
+ "loss": 0.006474626541137695,
728
+ "memory(GiB)": 5.73,
729
+ "step": 39500,
730
+ "train_speed(iter/s)": 1.067703
731
+ },
732
+ {
733
+ "epoch": 0.3415242226054883,
734
+ "grad_norm": 0.02441154234111309,
735
+ "learning_rate": 7.851047828215611e-05,
736
+ "loss": 0.006419078826904297,
737
+ "memory(GiB)": 5.73,
738
+ "step": 40000,
739
+ "train_speed(iter/s)": 1.06808
740
+ },
741
+ {
742
+ "epoch": 0.3457932753880569,
743
+ "grad_norm": 0.0254750307649374,
744
+ "learning_rate": 7.792777594029674e-05,
745
+ "loss": 0.006350691795349121,
746
+ "memory(GiB)": 5.73,
747
+ "step": 40500,
748
+ "train_speed(iter/s)": 1.068683
749
+ },
750
+ {
751
+ "epoch": 0.3500623281706255,
752
+ "grad_norm": 0.027933409437537193,
753
+ "learning_rate": 7.73395074889103e-05,
754
+ "loss": 0.006355803966522217,
755
+ "memory(GiB)": 5.73,
756
+ "step": 41000,
757
+ "train_speed(iter/s)": 1.064808
758
+ },
759
+ {
760
+ "epoch": 0.3543313809531941,
761
+ "grad_norm": 0.022553391754627228,
762
+ "learning_rate": 7.67457901720852e-05,
763
+ "loss": 0.006336944103240967,
764
+ "memory(GiB)": 5.73,
765
+ "step": 41500,
766
+ "train_speed(iter/s)": 1.060284
767
+ },
768
+ {
769
+ "epoch": 0.3586004337357627,
770
+ "grad_norm": 0.027581321075558662,
771
+ "learning_rate": 7.614674231988903e-05,
772
+ "loss": 0.00619974422454834,
773
+ "memory(GiB)": 5.73,
774
+ "step": 42000,
775
+ "train_speed(iter/s)": 1.055658
776
+ },
777
+ {
778
+ "epoch": 0.3628694865183313,
779
+ "grad_norm": 0.02141967974603176,
780
+ "learning_rate": 7.554248332478485e-05,
781
+ "loss": 0.006249521732330322,
782
+ "memory(GiB)": 5.73,
783
+ "step": 42500,
784
+ "train_speed(iter/s)": 1.051341
785
+ },
786
+ {
787
+ "epoch": 0.3671385393008999,
788
+ "grad_norm": 0.025843387469649315,
789
+ "learning_rate": 7.49331336178358e-05,
790
+ "loss": 0.006162589550018311,
791
+ "memory(GiB)": 5.73,
792
+ "step": 43000,
793
+ "train_speed(iter/s)": 1.046866
794
+ },
795
+ {
796
+ "epoch": 0.3714075920834685,
797
+ "grad_norm": 0.02431940846145153,
798
+ "learning_rate": 7.431881464470293e-05,
799
+ "loss": 0.0060729503631591795,
800
+ "memory(GiB)": 5.73,
801
+ "step": 43500,
802
+ "train_speed(iter/s)": 1.042554
803
+ },
804
+ {
805
+ "epoch": 0.37567664486603713,
806
+ "grad_norm": 0.0244905948638916,
807
+ "learning_rate": 7.369964884144047e-05,
808
+ "loss": 0.006033665180206299,
809
+ "memory(GiB)": 5.73,
810
+ "step": 44000,
811
+ "train_speed(iter/s)": 1.041578
812
+ },
813
+ {
814
+ "epoch": 0.37994569764860575,
815
+ "grad_norm": 0.02309691719710827,
816
+ "learning_rate": 7.307575961009385e-05,
817
+ "loss": 0.006005731582641602,
818
+ "memory(GiB)": 5.73,
819
+ "step": 44500,
820
+ "train_speed(iter/s)": 1.041875
821
+ },
822
+ {
823
+ "epoch": 0.3842147504311743,
824
+ "grad_norm": 0.023321352899074554,
825
+ "learning_rate": 7.24472712941053e-05,
826
+ "loss": 0.005931224346160889,
827
+ "memory(GiB)": 5.73,
828
+ "step": 45000,
829
+ "train_speed(iter/s)": 1.044453
830
+ },
831
+ {
832
+ "epoch": 0.38848380321374293,
833
+ "grad_norm": 0.024199847131967545,
834
+ "learning_rate": 7.181430915353171e-05,
835
+ "loss": 0.0059114408493041995,
836
+ "memory(GiB)": 5.73,
837
+ "step": 45500,
838
+ "train_speed(iter/s)": 1.047147
839
+ },
840
+ {
841
+ "epoch": 0.39275285599631155,
842
+ "grad_norm": 0.02660815231502056,
843
+ "learning_rate": 7.117699934007987e-05,
844
+ "loss": 0.005867915630340576,
845
+ "memory(GiB)": 5.73,
846
+ "step": 46000,
847
+ "train_speed(iter/s)": 1.049798
848
+ },
849
+ {
850
+ "epoch": 0.39702190877888016,
851
+ "grad_norm": 0.02538706362247467,
852
+ "learning_rate": 7.053546887196391e-05,
853
+ "loss": 0.005895719528198242,
854
+ "memory(GiB)": 5.73,
855
+ "step": 46500,
856
+ "train_speed(iter/s)": 1.052318
857
+ },
858
+ {
859
+ "epoch": 0.4012909615614487,
860
+ "grad_norm": 0.023992260918021202,
861
+ "learning_rate": 6.988984560859009e-05,
862
+ "loss": 0.005823767662048339,
863
+ "memory(GiB)": 5.73,
864
+ "step": 47000,
865
+ "train_speed(iter/s)": 1.054874
866
+ },
867
+ {
868
+ "epoch": 0.40556001434401734,
869
+ "grad_norm": 0.024961460381746292,
870
+ "learning_rate": 6.924025822507398e-05,
871
+ "loss": 0.005796549797058105,
872
+ "memory(GiB)": 5.73,
873
+ "step": 47500,
874
+ "train_speed(iter/s)": 1.057392
875
+ },
876
+ {
877
+ "epoch": 0.40982906712658596,
878
+ "grad_norm": 0.026839323341846466,
879
+ "learning_rate": 6.858683618659509e-05,
880
+ "loss": 0.0057229394912719726,
881
+ "memory(GiB)": 5.73,
882
+ "step": 48000,
883
+ "train_speed(iter/s)": 1.059871
884
+ },
885
+ {
886
+ "epoch": 0.41409811990915457,
887
+ "grad_norm": 0.026930488646030426,
888
+ "learning_rate": 6.792970972259381e-05,
889
+ "loss": 0.005688785552978515,
890
+ "memory(GiB)": 5.73,
891
+ "step": 48500,
892
+ "train_speed(iter/s)": 1.062309
893
+ },
894
+ {
895
+ "epoch": 0.4183671726917232,
896
+ "grad_norm": 0.024773526936769485,
897
+ "learning_rate": 6.726900980081639e-05,
898
+ "loss": 0.005612356185913086,
899
+ "memory(GiB)": 5.73,
900
+ "step": 49000,
901
+ "train_speed(iter/s)": 1.06471
902
+ },
903
+ {
904
+ "epoch": 0.42263622547429175,
905
+ "grad_norm": 0.025835830718278885,
906
+ "learning_rate": 6.660486810121244e-05,
907
+ "loss": 0.005570381164550781,
908
+ "memory(GiB)": 5.73,
909
+ "step": 49500,
910
+ "train_speed(iter/s)": 1.067072
911
+ },
912
+ {
913
+ "epoch": 0.42690527825686037,
914
+ "grad_norm": 0.028116557747125626,
915
+ "learning_rate": 6.593741698969073e-05,
916
+ "loss": 0.005553098201751709,
917
+ "memory(GiB)": 5.73,
918
+ "step": 50000,
919
+ "train_speed(iter/s)": 1.069395
920
+ },
921
+ {
922
+ "epoch": 0.431174331039429,
923
+ "grad_norm": 0.026658741757273674,
924
+ "learning_rate": 6.526678949173808e-05,
925
+ "loss": 0.005453477859497071,
926
+ "memory(GiB)": 5.73,
927
+ "step": 50500,
928
+ "train_speed(iter/s)": 1.065559
929
+ },
930
+ {
931
+ "epoch": 0.4354433838219976,
932
+ "grad_norm": 0.02522198110818863,
933
+ "learning_rate": 6.459311926590695e-05,
934
+ "loss": 0.005405562877655029,
935
+ "memory(GiB)": 5.73,
936
+ "step": 51000,
937
+ "train_speed(iter/s)": 1.061202
938
+ },
939
+ {
940
+ "epoch": 0.43971243660456616,
941
+ "grad_norm": 0.019938671961426735,
942
+ "learning_rate": 6.391654057717676e-05,
943
+ "loss": 0.005375346183776855,
944
+ "memory(GiB)": 5.73,
945
+ "step": 51500,
946
+ "train_speed(iter/s)": 1.05697
947
+ },
948
+ {
949
+ "epoch": 0.4439814893871348,
950
+ "grad_norm": 0.02449255809187889,
951
+ "learning_rate": 6.32371882701944e-05,
952
+ "loss": 0.00538975715637207,
953
+ "memory(GiB)": 5.73,
954
+ "step": 52000,
955
+ "train_speed(iter/s)": 1.053086
956
+ },
957
+ {
958
+ "epoch": 0.4482505421697034,
959
+ "grad_norm": 0.027349578216671944,
960
+ "learning_rate": 6.25551977423992e-05,
961
+ "loss": 0.005338613510131836,
962
+ "memory(GiB)": 5.73,
963
+ "step": 52500,
964
+ "train_speed(iter/s)": 1.049655
965
+ },
966
+ {
967
+ "epoch": 0.452519594952272,
968
+ "grad_norm": 0.02677008882164955,
969
+ "learning_rate": 6.187070491703767e-05,
970
+ "loss": 0.005392338752746582,
971
+ "memory(GiB)": 5.73,
972
+ "step": 53000,
973
+ "train_speed(iter/s)": 1.046434
974
+ },
975
+ {
976
+ "epoch": 0.45678864773484057,
977
+ "grad_norm": 0.021387379616498947,
978
+ "learning_rate": 6.118384621607356e-05,
979
+ "loss": 0.0052757196426391605,
980
+ "memory(GiB)": 5.73,
981
+ "step": 53500,
982
+ "train_speed(iter/s)": 1.043484
983
+ },
984
+ {
985
+ "epoch": 0.4610577005174092,
986
+ "grad_norm": 0.021920237690210342,
987
+ "learning_rate": 6.0494758532998397e-05,
988
+ "loss": 0.0052754092216491695,
989
+ "memory(GiB)": 5.73,
990
+ "step": 54000,
991
+ "train_speed(iter/s)": 1.040652
992
+ },
993
+ {
994
+ "epoch": 0.4653267532999778,
995
+ "grad_norm": 0.02255011908710003,
996
+ "learning_rate": 5.980357920554813e-05,
997
+ "loss": 0.005176177024841308,
998
+ "memory(GiB)": 5.73,
999
+ "step": 54500,
1000
+ "train_speed(iter/s)": 1.03761
1001
+ },
1002
+ {
1003
+ "epoch": 0.4695958060825464,
1004
+ "grad_norm": 0.023933693766593933,
1005
+ "learning_rate": 5.91104459883312e-05,
1006
+ "loss": 0.00518220043182373,
1007
+ "memory(GiB)": 5.73,
1008
+ "step": 55000,
1009
+ "train_speed(iter/s)": 1.03464
1010
+ },
1011
+ {
1012
+ "epoch": 0.47386485886511504,
1013
+ "grad_norm": 0.02640974149107933,
1014
+ "learning_rate": 5.8415497025373545e-05,
1015
+ "loss": 0.0051289405822753905,
1016
+ "memory(GiB)": 5.73,
1017
+ "step": 55500,
1018
+ "train_speed(iter/s)": 1.032562
1019
+ },
1020
+ {
1021
+ "epoch": 0.4781339116476836,
1022
+ "grad_norm": 0.027417296543717384,
1023
+ "learning_rate": 5.771887082258598e-05,
1024
+ "loss": 0.005091516494750976,
1025
+ "memory(GiB)": 5.73,
1026
+ "step": 56000,
1027
+ "train_speed(iter/s)": 1.031309
1028
+ },
1029
+ {
1030
+ "epoch": 0.4824029644302522,
1031
+ "grad_norm": 0.02626318484544754,
1032
+ "learning_rate": 5.7020706220159446e-05,
1033
+ "loss": 0.005014698505401611,
1034
+ "memory(GiB)": 5.73,
1035
+ "step": 56500,
1036
+ "train_speed(iter/s)": 1.030475
1037
+ },
1038
+ {
1039
+ "epoch": 0.48667201721282083,
1040
+ "grad_norm": 0.022486470639705658,
1041
+ "learning_rate": 5.6321142364893655e-05,
1042
+ "loss": 0.00502289867401123,
1043
+ "memory(GiB)": 5.73,
1044
+ "step": 57000,
1045
+ "train_speed(iter/s)": 1.029867
1046
+ },
1047
+ {
1048
+ "epoch": 0.49094106999538945,
1049
+ "grad_norm": 0.024762239307165146,
1050
+ "learning_rate": 5.562031868246459e-05,
1051
+ "loss": 0.004976710319519043,
1052
+ "memory(GiB)": 5.73,
1053
+ "step": 57500,
1054
+ "train_speed(iter/s)": 1.029133
1055
+ },
1056
+ {
1057
+ "epoch": 0.495210122777958,
1058
+ "grad_norm": 0.02197747305035591,
1059
+ "learning_rate": 5.49183748496365e-05,
1060
+ "loss": 0.004930309295654297,
1061
+ "memory(GiB)": 5.73,
1062
+ "step": 58000,
1063
+ "train_speed(iter/s)": 1.03126
1064
+ },
1065
+ {
1066
+ "epoch": 0.4994791755605266,
1067
+ "grad_norm": 0.017993444576859474,
1068
+ "learning_rate": 5.421545076642376e-05,
1069
+ "loss": 0.004885564804077149,
1070
+ "memory(GiB)": 5.73,
1071
+ "step": 58500,
1072
+ "train_speed(iter/s)": 1.033407
1073
+ },
1074
+ {
1075
+ "epoch": 0.5037482283430952,
1076
+ "grad_norm": 0.023290056735277176,
1077
+ "learning_rate": 5.351168652820825e-05,
1078
+ "loss": 0.004815481662750244,
1079
+ "memory(GiB)": 5.73,
1080
+ "step": 59000,
1081
+ "train_speed(iter/s)": 1.035534
1082
+ },
1083
+ {
1084
+ "epoch": 0.5080172811256638,
1085
+ "grad_norm": 0.02278745174407959,
1086
+ "learning_rate": 5.2807222397817946e-05,
1087
+ "loss": 0.0048018951416015625,
1088
+ "memory(GiB)": 5.73,
1089
+ "step": 59500,
1090
+ "train_speed(iter/s)": 1.037635
1091
+ },
1092
+ {
1093
+ "epoch": 0.5122863339082324,
1094
+ "grad_norm": 0.01709616929292679,
1095
+ "learning_rate": 5.210219877757185e-05,
1096
+ "loss": 0.004790943622589111,
1097
+ "memory(GiB)": 5.73,
1098
+ "step": 60000,
1099
+ "train_speed(iter/s)": 1.039708
1100
+ },
1101
+ {
1102
+ "epoch": 0.516555386690801,
1103
+ "grad_norm": 0.024141253903508186,
1104
+ "learning_rate": 5.139675618129741e-05,
1105
+ "loss": 0.0047971105575561526,
1106
+ "memory(GiB)": 5.73,
1107
+ "step": 60500,
1108
+ "train_speed(iter/s)": 1.041714
1109
+ },
1110
+ {
1111
+ "epoch": 0.5208244394733696,
1112
+ "grad_norm": 0.025403697043657303,
1113
+ "learning_rate": 5.069103520632558e-05,
1114
+ "loss": 0.0046922645568847655,
1115
+ "memory(GiB)": 5.73,
1116
+ "step": 61000,
1117
+ "train_speed(iter/s)": 1.043735
1118
+ },
1119
+ {
1120
+ "epoch": 0.5250934922559383,
1121
+ "grad_norm": 0.023161958903074265,
1122
+ "learning_rate": 4.998517650546916e-05,
1123
+ "loss": 0.0046929998397827145,
1124
+ "memory(GiB)": 5.73,
1125
+ "step": 61500,
1126
+ "train_speed(iter/s)": 1.045718
1127
+ },
1128
+ {
1129
+ "epoch": 0.5293625450385069,
1130
+ "grad_norm": 0.022052627056837082,
1131
+ "learning_rate": 4.927932075899032e-05,
1132
+ "loss": 0.004638696193695068,
1133
+ "memory(GiB)": 5.73,
1134
+ "step": 62000,
1135
+ "train_speed(iter/s)": 1.043334
1136
+ },
1137
+ {
1138
+ "epoch": 0.5336315978210755,
1139
+ "grad_norm": 0.023017114028334618,
1140
+ "learning_rate": 4.857360864656229e-05,
1141
+ "loss": 0.004680471420288086,
1142
+ "memory(GiB)": 5.73,
1143
+ "step": 62500,
1144
+ "train_speed(iter/s)": 1.041039
1145
+ },
1146
+ {
1147
+ "epoch": 0.5379006506036441,
1148
+ "grad_norm": 0.023162037134170532,
1149
+ "learning_rate": 4.7868180819231614e-05,
1150
+ "loss": 0.004635006904602051,
1151
+ "memory(GiB)": 5.73,
1152
+ "step": 63000,
1153
+ "train_speed(iter/s)": 1.039508
1154
+ },
1155
+ {
1156
+ "epoch": 0.5421697033862126,
1157
+ "grad_norm": 0.02154356613755226,
1158
+ "learning_rate": 4.7163177871385713e-05,
1159
+ "loss": 0.004594725131988525,
1160
+ "memory(GiB)": 5.73,
1161
+ "step": 63500,
1162
+ "train_speed(iter/s)": 1.037869
1163
+ },
1164
+ {
1165
+ "epoch": 0.5464387561687812,
1166
+ "grad_norm": 0.024489399045705795,
1167
+ "learning_rate": 4.6458740312731915e-05,
1168
+ "loss": 0.004505970001220703,
1169
+ "memory(GiB)": 5.73,
1170
+ "step": 64000,
1171
+ "train_speed(iter/s)": 1.036001
1172
+ },
1173
+ {
1174
+ "epoch": 0.5507078089513499,
1175
+ "grad_norm": 0.02230563387274742,
1176
+ "learning_rate": 4.575500854029343e-05,
1177
+ "loss": 0.004512208938598633,
1178
+ "memory(GiB)": 5.73,
1179
+ "step": 64500,
1180
+ "train_speed(iter/s)": 1.034042
1181
+ },
1182
+ {
1183
+ "epoch": 0.5549768617339185,
1184
+ "grad_norm": 0.024222563952207565,
1185
+ "learning_rate": 4.5052122810427655e-05,
1186
+ "loss": 0.004453976154327393,
1187
+ "memory(GiB)": 5.73,
1188
+ "step": 65000,
1189
+ "train_speed(iter/s)": 1.033383
1190
+ },
1191
+ {
1192
+ "epoch": 0.5592459145164871,
1193
+ "grad_norm": 0.024108612909913063,
1194
+ "learning_rate": 4.435022321087251e-05,
1195
+ "loss": 0.004433969497680664,
1196
+ "memory(GiB)": 5.73,
1197
+ "step": 65500,
1198
+ "train_speed(iter/s)": 1.031504
1199
+ },
1200
+ {
1201
+ "epoch": 0.5635149672990557,
1202
+ "grad_norm": 0.02404128573834896,
1203
+ "learning_rate": 4.3649449632826524e-05,
1204
+ "loss": 0.004369840621948242,
1205
+ "memory(GiB)": 5.73,
1206
+ "step": 66000,
1207
+ "train_speed(iter/s)": 1.029515
1208
+ },
1209
+ {
1210
+ "epoch": 0.5677840200816243,
1211
+ "grad_norm": 0.02122694067656994,
1212
+ "learning_rate": 4.294994174306796e-05,
1213
+ "loss": 0.00436569881439209,
1214
+ "memory(GiB)": 5.73,
1215
+ "step": 66500,
1216
+ "train_speed(iter/s)": 1.027863
1217
+ },
1218
+ {
1219
+ "epoch": 0.5720530728641929,
1220
+ "grad_norm": 0.02231895923614502,
1221
+ "learning_rate": 4.2251838956118646e-05,
1222
+ "loss": 0.004324491500854492,
1223
+ "memory(GiB)": 5.73,
1224
+ "step": 67000,
1225
+ "train_speed(iter/s)": 1.026447
1226
+ },
1227
+ {
1228
+ "epoch": 0.5763221256467616,
1229
+ "grad_norm": 0.01995609700679779,
1230
+ "learning_rate": 4.1555280406458243e-05,
1231
+ "loss": 0.004273086071014404,
1232
+ "memory(GiB)": 5.73,
1233
+ "step": 67500,
1234
+ "train_speed(iter/s)": 1.025095
1235
+ },
1236
+ {
1237
+ "epoch": 0.5805911784293301,
1238
+ "grad_norm": 0.023028602823615074,
1239
+ "learning_rate": 4.086040492079418e-05,
1240
+ "loss": 0.004247576713562012,
1241
+ "memory(GiB)": 5.73,
1242
+ "step": 68000,
1243
+ "train_speed(iter/s)": 1.024105
1244
+ },
1245
+ {
1246
+ "epoch": 0.5848602312118987,
1247
+ "grad_norm": 0.02473682351410389,
1248
+ "learning_rate": 4.016735099039299e-05,
1249
+ "loss": 0.004212839603424072,
1250
+ "memory(GiB)": 5.73,
1251
+ "step": 68500,
1252
+ "train_speed(iter/s)": 1.025833
1253
+ },
1254
+ {
1255
+ "epoch": 0.5891292839944673,
1256
+ "grad_norm": 0.02591153420507908,
1257
+ "learning_rate": 3.947625674347842e-05,
1258
+ "loss": 0.004188227653503418,
1259
+ "memory(GiB)": 5.73,
1260
+ "step": 69000,
1261
+ "train_speed(iter/s)": 1.027676
1262
+ },
1263
+ {
1264
+ "epoch": 0.5933983367770359,
1265
+ "grad_norm": 0.022372225299477577,
1266
+ "learning_rate": 3.878725991770206e-05,
1267
+ "loss": 0.00420154619216919,
1268
+ "memory(GiB)": 5.73,
1269
+ "step": 69500,
1270
+ "train_speed(iter/s)": 1.029494
1271
+ },
1272
+ {
1273
+ "epoch": 0.5976673895596045,
1274
+ "grad_norm": 0.020848704501986504,
1275
+ "learning_rate": 3.810049783269169e-05,
1276
+ "loss": 0.004149648189544677,
1277
+ "memory(GiB)": 5.73,
1278
+ "step": 70000,
1279
+ "train_speed(iter/s)": 1.031265
1280
+ },
1281
+ {
1282
+ "epoch": 0.6019364423421731,
1283
+ "grad_norm": 0.020646043121814728,
1284
+ "learning_rate": 3.7416107362682874e-05,
1285
+ "loss": 0.004120903968811035,
1286
+ "memory(GiB)": 5.73,
1287
+ "step": 70500,
1288
+ "train_speed(iter/s)": 1.03288
1289
+ },
1290
+ {
1291
+ "epoch": 0.6062054951247418,
1292
+ "grad_norm": 0.02318960428237915,
1293
+ "learning_rate": 3.673422490923957e-05,
1294
+ "loss": 0.004070096492767334,
1295
+ "memory(GiB)": 5.73,
1296
+ "step": 71000,
1297
+ "train_speed(iter/s)": 1.031156
1298
+ },
1299
+ {
1300
+ "epoch": 0.6104745479073104,
1301
+ "grad_norm": 0.01929691806435585,
1302
+ "learning_rate": 3.605498637406871e-05,
1303
+ "loss": 0.0040385212898254395,
1304
+ "memory(GiB)": 5.73,
1305
+ "step": 71500,
1306
+ "train_speed(iter/s)": 1.029013
1307
+ },
1308
+ {
1309
+ "epoch": 0.6147436006898789,
1310
+ "grad_norm": 0.0221713837236166,
1311
+ "learning_rate": 3.5378527131934415e-05,
1312
+ "loss": 0.004040939807891846,
1313
+ "memory(GiB)": 5.73,
1314
+ "step": 72000,
1315
+ "train_speed(iter/s)": 1.027147
1316
+ },
1317
+ {
1318
+ "epoch": 0.6190126534724475,
1319
+ "grad_norm": 0.026295281946659088,
1320
+ "learning_rate": 3.470498200367745e-05,
1321
+ "loss": 0.003968184471130371,
1322
+ "memory(GiB)": 5.73,
1323
+ "step": 72500,
1324
+ "train_speed(iter/s)": 1.025599
1325
+ },
1326
+ {
1327
+ "epoch": 0.6232817062550161,
1328
+ "grad_norm": 0.022878218442201614,
1329
+ "learning_rate": 3.403448522934484e-05,
1330
+ "loss": 0.00394676160812378,
1331
+ "memory(GiB)": 5.73,
1332
+ "step": 73000,
1333
+ "train_speed(iter/s)": 1.024666
1334
+ },
1335
+ {
1336
+ "epoch": 0.6275507590375847,
1337
+ "grad_norm": 0.017653649672865868,
1338
+ "learning_rate": 3.3367170441435326e-05,
1339
+ "loss": 0.003906076669692993,
1340
+ "memory(GiB)": 5.73,
1341
+ "step": 73500,
1342
+ "train_speed(iter/s)": 1.023811
1343
+ },
1344
+ {
1345
+ "epoch": 0.6318198118201533,
1346
+ "grad_norm": 0.021848097443580627,
1347
+ "learning_rate": 3.270317063826594e-05,
1348
+ "loss": 0.0038814377784729005,
1349
+ "memory(GiB)": 5.73,
1350
+ "step": 74000,
1351
+ "train_speed(iter/s)": 1.023855
1352
+ },
1353
+ {
1354
+ "epoch": 0.636088864602722,
1355
+ "grad_norm": 0.022029753774404526,
1356
+ "learning_rate": 3.204261815746496e-05,
1357
+ "loss": 0.003879170894622803,
1358
+ "memory(GiB)": 5.73,
1359
+ "step": 74500,
1360
+ "train_speed(iter/s)": 1.024885
1361
+ },
1362
+ {
1363
+ "epoch": 0.6403579173852906,
1364
+ "grad_norm": 0.02403407171368599,
1365
+ "learning_rate": 3.1385644649596445e-05,
1366
+ "loss": 0.003841569900512695,
1367
+ "memory(GiB)": 5.73,
1368
+ "step": 75000,
1369
+ "train_speed(iter/s)": 1.026583
1370
+ },
1371
+ {
1372
+ "epoch": 0.6446269701678592,
1373
+ "grad_norm": 0.023609979078173637,
1374
+ "learning_rate": 3.073238105192191e-05,
1375
+ "loss": 0.0038005766868591307,
1376
+ "memory(GiB)": 5.73,
1377
+ "step": 75500,
1378
+ "train_speed(iter/s)": 1.028269
1379
+ },
1380
+ {
1381
+ "epoch": 0.6488960229504278,
1382
+ "grad_norm": 0.01760837249457836,
1383
+ "learning_rate": 3.008295756230397e-05,
1384
+ "loss": 0.0037522752285003664,
1385
+ "memory(GiB)": 5.73,
1386
+ "step": 76000,
1387
+ "train_speed(iter/s)": 1.029938
1388
+ },
1389
+ {
1390
+ "epoch": 0.6531650757329963,
1391
+ "grad_norm": 0.021126747131347656,
1392
+ "learning_rate": 2.943750361325739e-05,
1393
+ "loss": 0.003741382837295532,
1394
+ "memory(GiB)": 5.73,
1395
+ "step": 76500,
1396
+ "train_speed(iter/s)": 1.03159
1397
+ },
1398
+ {
1399
+ "epoch": 0.6574341285155649,
1400
+ "grad_norm": 0.02278253622353077,
1401
+ "learning_rate": 2.879614784615281e-05,
1402
+ "loss": 0.0037315216064453126,
1403
+ "memory(GiB)": 5.73,
1404
+ "step": 77000,
1405
+ "train_speed(iter/s)": 1.033224
1406
+ },
1407
+ {
1408
+ "epoch": 0.6617031812981335,
1409
+ "grad_norm": 0.023599898442626,
1410
+ "learning_rate": 2.8159018085577936e-05,
1411
+ "loss": 0.0037167372703552247,
1412
+ "memory(GiB)": 5.73,
1413
+ "step": 77500,
1414
+ "train_speed(iter/s)": 1.034809
1415
+ },
1416
+ {
1417
+ "epoch": 0.6659722340807022,
1418
+ "grad_norm": 0.02341049537062645,
1419
+ "learning_rate": 2.752624131386169e-05,
1420
+ "loss": 0.0036745924949645997,
1421
+ "memory(GiB)": 5.73,
1422
+ "step": 78000,
1423
+ "train_speed(iter/s)": 1.036412
1424
+ },
1425
+ {
1426
+ "epoch": 0.6702412868632708,
1427
+ "grad_norm": 0.021224385127425194,
1428
+ "learning_rate": 2.68979436457661e-05,
1429
+ "loss": 0.0036270735263824465,
1430
+ "memory(GiB)": 5.73,
1431
+ "step": 78500,
1432
+ "train_speed(iter/s)": 1.035171
1433
+ },
1434
+ {
1435
+ "epoch": 0.6745103396458394,
1436
+ "grad_norm": 0.02113701030611992,
1437
+ "learning_rate": 2.6274250303351277e-05,
1438
+ "loss": 0.003653192758560181,
1439
+ "memory(GiB)": 5.73,
1440
+ "step": 79000,
1441
+ "train_speed(iter/s)": 1.033322
1442
+ },
1443
+ {
1444
+ "epoch": 0.678779392428408,
1445
+ "grad_norm": 0.02273395285010338,
1446
+ "learning_rate": 2.5655285591018053e-05,
1447
+ "loss": 0.003600950241088867,
1448
+ "memory(GiB)": 5.73,
1449
+ "step": 79500,
1450
+ "train_speed(iter/s)": 1.03175
1451
+ },
1452
+ {
1453
+ "epoch": 0.6830484452109766,
1454
+ "grad_norm": 0.01843477226793766,
1455
+ "learning_rate": 2.5041172870733688e-05,
1456
+ "loss": 0.003576310634613037,
1457
+ "memory(GiB)": 5.73,
1458
+ "step": 80000,
1459
+ "train_speed(iter/s)": 1.030732
1460
+ },
1461
+ {
1462
+ "epoch": 0.6873174979935452,
1463
+ "grad_norm": 0.021858269348740578,
1464
+ "learning_rate": 2.4432034537445504e-05,
1465
+ "loss": 0.0035532989501953125,
1466
+ "memory(GiB)": 5.73,
1467
+ "step": 80500,
1468
+ "train_speed(iter/s)": 1.030234
1469
+ },
1470
+ {
1471
+ "epoch": 0.6915865507761138,
1472
+ "grad_norm": 0.02257091924548149,
1473
+ "learning_rate": 2.3827991994686855e-05,
1474
+ "loss": 0.0034713072776794435,
1475
+ "memory(GiB)": 5.73,
1476
+ "step": 81000,
1477
+ "train_speed(iter/s)": 1.029758
1478
+ },
1479
+ {
1480
+ "epoch": 0.6958556035586824,
1481
+ "grad_norm": 0.02086802013218403,
1482
+ "learning_rate": 2.3229165630381254e-05,
1483
+ "loss": 0.0035013933181762694,
1484
+ "memory(GiB)": 5.73,
1485
+ "step": 81500,
1486
+ "train_speed(iter/s)": 1.029855
1487
+ },
1488
+ {
1489
+ "epoch": 0.700124656341251,
1490
+ "grad_norm": 0.02159390039741993,
1491
+ "learning_rate": 2.263567479284836e-05,
1492
+ "loss": 0.0034512946605682374,
1493
+ "memory(GiB)": 5.73,
1494
+ "step": 82000,
1495
+ "train_speed(iter/s)": 1.031396
1496
+ },
1497
+ {
1498
+ "epoch": 0.7043937091238196,
1499
+ "grad_norm": 0.02238837257027626,
1500
+ "learning_rate": 2.2047637767017594e-05,
1501
+ "loss": 0.0034342200756073,
1502
+ "memory(GiB)": 5.73,
1503
+ "step": 82500,
1504
+ "train_speed(iter/s)": 1.029892
1505
+ },
1506
+ {
1507
+ "epoch": 0.7086627619063882,
1508
+ "grad_norm": 0.022957606241106987,
1509
+ "learning_rate": 2.1465171750853386e-05,
1510
+ "loss": 0.003412749528884888,
1511
+ "memory(GiB)": 5.73,
1512
+ "step": 83000,
1513
+ "train_speed(iter/s)": 1.02831
1514
+ },
1515
+ {
1516
+ "epoch": 0.7129318146889568,
1517
+ "grad_norm": 0.019689923152327538,
1518
+ "learning_rate": 2.0888392831997238e-05,
1519
+ "loss": 0.00341141414642334,
1520
+ "memory(GiB)": 5.73,
1521
+ "step": 83500,
1522
+ "train_speed(iter/s)": 1.026956
1523
+ },
1524
+ {
1525
+ "epoch": 0.7172008674715254,
1526
+ "grad_norm": 0.023503178730607033,
1527
+ "learning_rate": 2.03174159646311e-05,
1528
+ "loss": 0.0033840060234069822,
1529
+ "memory(GiB)": 5.73,
1530
+ "step": 84000,
1531
+ "train_speed(iter/s)": 1.025661
1532
+ },
1533
+ {
1534
+ "epoch": 0.7214699202540941,
1535
+ "grad_norm": 0.02037668041884899,
1536
+ "learning_rate": 1.9752354946566354e-05,
1537
+ "loss": 0.0033505113124847412,
1538
+ "memory(GiB)": 5.73,
1539
+ "step": 84500,
1540
+ "train_speed(iter/s)": 1.02442
1541
+ },
1542
+ {
1543
+ "epoch": 0.7257389730366626,
1544
+ "grad_norm": 0.025251047685742378,
1545
+ "learning_rate": 1.9193322396563785e-05,
1546
+ "loss": 0.0033303892612457277,
1547
+ "memory(GiB)": 5.73,
1548
+ "step": 85000,
1549
+ "train_speed(iter/s)": 1.023539
1550
+ },
1551
+ {
1552
+ "epoch": 0.7300080258192312,
1553
+ "grad_norm": 0.01954658329486847,
1554
+ "learning_rate": 1.8640429731887998e-05,
1555
+ "loss": 0.003283708333969116,
1556
+ "memory(GiB)": 5.73,
1557
+ "step": 85500,
1558
+ "train_speed(iter/s)": 1.023297
1559
+ },
1560
+ {
1561
+ "epoch": 0.7342770786017998,
1562
+ "grad_norm": 0.0188963171094656,
1563
+ "learning_rate": 1.809378714610167e-05,
1564
+ "loss": 0.003271867275238037,
1565
+ "memory(GiB)": 5.73,
1566
+ "step": 86000,
1567
+ "train_speed(iter/s)": 1.023267
1568
+ },
1569
+ {
1570
+ "epoch": 0.7385461313843684,
1571
+ "grad_norm": 0.02359418198466301,
1572
+ "learning_rate": 1.7553503587103505e-05,
1573
+ "loss": 0.0032482266426086424,
1574
+ "memory(GiB)": 5.73,
1575
+ "step": 86500,
1576
+ "train_speed(iter/s)": 1.023221
1577
+ },
1578
+ {
1579
+ "epoch": 0.742815184166937,
1580
+ "grad_norm": 0.020105060189962387,
1581
+ "learning_rate": 1.701968673541458e-05,
1582
+ "loss": 0.003266146183013916,
1583
+ "memory(GiB)": 5.73,
1584
+ "step": 87000,
1585
+ "train_speed(iter/s)": 1.023208
1586
+ },
1587
+ {
1588
+ "epoch": 0.7470842369495057,
1589
+ "grad_norm": 0.018053073436021805,
1590
+ "learning_rate": 1.649244298271714e-05,
1591
+ "loss": 0.003204747676849365,
1592
+ "memory(GiB)": 5.73,
1593
+ "step": 87500,
1594
+ "train_speed(iter/s)": 1.023206
1595
+ },
1596
+ {
1597
+ "epoch": 0.7513532897320743,
1598
+ "grad_norm": 0.021992964670062065,
1599
+ "learning_rate": 1.5971877410650354e-05,
1600
+ "loss": 0.0031999170780181883,
1601
+ "memory(GiB)": 5.73,
1602
+ "step": 88000,
1603
+ "train_speed(iter/s)": 1.023363
1604
+ },
1605
+ {
1606
+ "epoch": 0.7556223425146429,
1607
+ "grad_norm": 0.022893013432621956,
1608
+ "learning_rate": 1.545809376986727e-05,
1609
+ "loss": 0.0031597645282745363,
1610
+ "memory(GiB)": 5.73,
1611
+ "step": 88500,
1612
+ "train_speed(iter/s)": 1.023915
1613
+ },
1614
+ {
1615
+ "epoch": 0.7598913952972115,
1616
+ "grad_norm": 0.021105078980326653,
1617
+ "learning_rate": 1.4951194459356693e-05,
1618
+ "loss": 0.003171279191970825,
1619
+ "memory(GiB)": 5.73,
1620
+ "step": 89000,
1621
+ "train_speed(iter/s)": 1.024754
1622
+ },
1623
+ {
1624
+ "epoch": 0.76416044807978,
1625
+ "grad_norm": 0.01919909007847309,
1626
+ "learning_rate": 1.445128050603493e-05,
1627
+ "loss": 0.0031237168312072752,
1628
+ "memory(GiB)": 5.73,
1629
+ "step": 89500,
1630
+ "train_speed(iter/s)": 1.026165
1631
+ },
1632
+ {
1633
+ "epoch": 0.7684295008623486,
1634
+ "grad_norm": 0.02016974799335003,
1635
+ "learning_rate": 1.39584515446106e-05,
1636
+ "loss": 0.003100724697113037,
1637
+ "memory(GiB)": 5.73,
1638
+ "step": 90000,
1639
+ "train_speed(iter/s)": 1.027578
1640
+ }
1641
+ ],
1642
+ "logging_steps": 500,
1643
+ "max_steps": 117122,
1644
+ "num_input_tokens_seen": 0,
1645
+ "num_train_epochs": 1,
1646
+ "save_steps": 10000,
1647
+ "stateful_callbacks": {
1648
+ "TrainerControl": {
1649
+ "args": {
1650
+ "should_epoch_stop": false,
1651
+ "should_evaluate": false,
1652
+ "should_log": false,
1653
+ "should_save": true,
1654
+ "should_training_stop": false
1655
+ },
1656
+ "attributes": {}
1657
+ }
1658
+ },
1659
+ "total_flos": 3.069758781932123e+19,
1660
+ "train_batch_size": 8,
1661
+ "trial_name": null,
1662
+ "trial_params": null
1663
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f06904dd14a5f081e106cc2dd29d26cb53fc4d95430b80a46f0c4b1883b25d60
3
+ size 8401
vocab.json ADDED
The diff for this file is too large to render. See raw diff