ZeroUniqueness commited on
Commit
745de28
1 Parent(s): 5c3ef38

Training in progress step 5900

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. checkpoint-5700/adapter_model/README.md +20 -0
  2. checkpoint-5700/adapter_model/adapter_config.json +26 -0
  3. checkpoint-5700/adapter_model/adapter_model.bin +3 -0
  4. checkpoint-5800/README.md +20 -0
  5. checkpoint-5800/adapter_config.json +26 -0
  6. checkpoint-5800/adapter_model.bin +3 -0
  7. checkpoint-5800/adapter_model/README.md +20 -0
  8. checkpoint-5800/adapter_model/adapter_config.json +26 -0
  9. checkpoint-5800/adapter_model/adapter_model.bin +3 -0
  10. checkpoint-5800/optimizer.pt +3 -0
  11. checkpoint-5800/rng_state_0.pth +3 -0
  12. checkpoint-5800/rng_state_1.pth +3 -0
  13. checkpoint-5800/rng_state_10.pth +3 -0
  14. checkpoint-5800/rng_state_11.pth +3 -0
  15. checkpoint-5800/rng_state_12.pth +3 -0
  16. checkpoint-5800/rng_state_13.pth +3 -0
  17. checkpoint-5800/rng_state_2.pth +3 -0
  18. checkpoint-5800/rng_state_3.pth +3 -0
  19. checkpoint-5800/rng_state_4.pth +3 -0
  20. checkpoint-5800/rng_state_5.pth +3 -0
  21. checkpoint-5800/rng_state_6.pth +3 -0
  22. checkpoint-5800/rng_state_7.pth +3 -0
  23. checkpoint-5800/rng_state_8.pth +3 -0
  24. checkpoint-5800/rng_state_9.pth +3 -0
  25. checkpoint-5800/scheduler.pt +3 -0
  26. checkpoint-5800/trainer_state.json +1424 -0
  27. checkpoint-5800/training_args.bin +3 -0
  28. checkpoint-5900/README.md +20 -0
  29. checkpoint-5900/adapter_config.json +26 -0
  30. checkpoint-5900/adapter_model.bin +3 -0
  31. checkpoint-5900/adapter_model/README.md +20 -0
  32. checkpoint-5900/adapter_model/adapter_config.json +26 -0
  33. checkpoint-5900/adapter_model/adapter_model.bin +3 -0
  34. checkpoint-5900/optimizer.pt +3 -0
  35. checkpoint-5900/rng_state_0.pth +3 -0
  36. checkpoint-5900/rng_state_1.pth +3 -0
  37. checkpoint-5900/rng_state_10.pth +3 -0
  38. checkpoint-5900/rng_state_11.pth +3 -0
  39. checkpoint-5900/rng_state_12.pth +3 -0
  40. checkpoint-5900/rng_state_13.pth +3 -0
  41. checkpoint-5900/rng_state_2.pth +3 -0
  42. checkpoint-5900/rng_state_3.pth +3 -0
  43. checkpoint-5900/rng_state_4.pth +3 -0
  44. checkpoint-5900/rng_state_5.pth +3 -0
  45. checkpoint-5900/rng_state_6.pth +3 -0
  46. checkpoint-5900/rng_state_7.pth +3 -0
  47. checkpoint-5900/rng_state_8.pth +3 -0
  48. checkpoint-5900/rng_state_9.pth +3 -0
  49. checkpoint-5900/scheduler.pt +3 -0
  50. checkpoint-5900/trainer_state.json +1448 -0
checkpoint-5700/adapter_model/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-5700/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 32,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj",
19
+ "gate_proj",
20
+ "up_proj",
21
+ "o_proj",
22
+ "k_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-5700/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bdd5402d919c62a264000ec96b1dd621956e56ddd66679cf8a429f111552d95
3
+ size 500897101
checkpoint-5800/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-5800/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 32,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj",
19
+ "gate_proj",
20
+ "up_proj",
21
+ "o_proj",
22
+ "k_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-5800/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6c8948b163d7faf80bcb4ea3c791311da3d9b14d4257794aeb4a2b35935a025
3
+ size 500897101
checkpoint-5800/adapter_model/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-5800/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 32,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj",
19
+ "gate_proj",
20
+ "up_proj",
21
+ "o_proj",
22
+ "k_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-5800/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6c8948b163d7faf80bcb4ea3c791311da3d9b14d4257794aeb4a2b35935a025
3
+ size 500897101
checkpoint-5800/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdafcf17dcbaf493ce7420b5efc55cc37f121d6d18e479f66229dfb7d3ebfe9a
3
+ size 1001752701
checkpoint-5800/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdcc97c3d5ae4ead7b5285c5c5b8dddcbec730d6aced698514214e40163f6c80
3
+ size 27772
checkpoint-5800/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f0dfb5773126adda2e927345797eb8babdfc8aa673a963e413f06bda803f6b8
3
+ size 27772
checkpoint-5800/rng_state_10.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:481ee2d6343e3ee249f8f13ba72e97689ec745d34ca3eb731c5977d455e68087
3
+ size 27789
checkpoint-5800/rng_state_11.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56d4288a9e908f82added428e5a14ff5dd6e86473ebd372733e241f3c2a4e833
3
+ size 27789
checkpoint-5800/rng_state_12.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5749fda2bb629d9c3e4a0bb894676f98047fc123c7ada64e2385c1692f194369
3
+ size 27789
checkpoint-5800/rng_state_13.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ac848792be0fda751cfdebaa1419f845e441f823e55d97dc332cb6ebecab888
3
+ size 27789
checkpoint-5800/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:004ca3f2557bec1000ed01ad0ac091380c145ff4d9054e495042d9673b164cd3
3
+ size 27772
checkpoint-5800/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83a6e03a2771bdfefb81f6b70a469b27bf46d9bac7c91cec25fbd8fc2e1bb9fd
3
+ size 27772
checkpoint-5800/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:027cc230cd1577ece8a43abc1ed88fcc51ac874f84b8bcbe865b8338b17825dc
3
+ size 27772
checkpoint-5800/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e15c6848b49b0375761f21215e2d56ea5a16e27885192cc4af7b1a3f9c325a2
3
+ size 27772
checkpoint-5800/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0290c211df0a25ca2b739876f096befd7a2f50010f23fcfb700c648fc7c37b88
3
+ size 27772
checkpoint-5800/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe7bdf51b5f2867802a2660bdbfa211be18d28a9f5e7325d4957f7ee65895a3a
3
+ size 27772
checkpoint-5800/rng_state_8.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c7043986c3cfa7014abbd2eb87d4f8ccda04f6a243deda922c9522e5c66f2fd
3
+ size 27772
checkpoint-5800/rng_state_9.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3371033bac6e8e8c6fb2001ca45e33bd941b91fd370cce2d2edc6bac7df0e55
3
+ size 27772
checkpoint-5800/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7e2a15463250851f8f0353fd93191418e2b4e589eb68be6185d12012957a33f
3
+ size 627
checkpoint-5800/trainer_state.json ADDED
@@ -0,0 +1,1424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.2489336952307095,
5
+ "global_step": 5800,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.02,
12
+ "learning_rate": 0.0001999867761371633,
13
+ "loss": 1.0435,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.04,
18
+ "learning_rate": 0.00019993306018843102,
19
+ "loss": 0.8918,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.06,
24
+ "learning_rate": 0.00019983804784290833,
25
+ "loss": 0.8874,
26
+ "step": 150
27
+ },
28
+ {
29
+ "epoch": 0.08,
30
+ "learning_rate": 0.00019970177836355307,
31
+ "loss": 0.8839,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.09,
36
+ "learning_rate": 0.00019961818913082012,
37
+ "loss": 0.8801,
38
+ "step": 225
39
+ },
40
+ {
41
+ "epoch": 0.1,
42
+ "learning_rate": 0.00019952430806244534,
43
+ "loss": 0.8753,
44
+ "step": 250
45
+ },
46
+ {
47
+ "epoch": 0.11,
48
+ "learning_rate": 0.00019942014485754635,
49
+ "loss": 0.8754,
50
+ "step": 275
51
+ },
52
+ {
53
+ "epoch": 0.12,
54
+ "learning_rate": 0.00019930571027751713,
55
+ "loss": 0.8751,
56
+ "step": 300
57
+ },
58
+ {
59
+ "epoch": 0.13,
60
+ "learning_rate": 0.0001991810161449164,
61
+ "loss": 0.8819,
62
+ "step": 325
63
+ },
64
+ {
65
+ "epoch": 0.14,
66
+ "learning_rate": 0.00019904607534224612,
67
+ "loss": 0.8744,
68
+ "step": 350
69
+ },
70
+ {
71
+ "epoch": 0.15,
72
+ "learning_rate": 0.00019890090181062063,
73
+ "loss": 0.8735,
74
+ "step": 375
75
+ },
76
+ {
77
+ "epoch": 0.16,
78
+ "learning_rate": 0.00019874551054832625,
79
+ "loss": 0.8703,
80
+ "step": 400
81
+ },
82
+ {
83
+ "epoch": 0.16,
84
+ "learning_rate": 0.00019857991760927193,
85
+ "loss": 0.8715,
86
+ "step": 425
87
+ },
88
+ {
89
+ "epoch": 0.17,
90
+ "learning_rate": 0.00019840414010133045,
91
+ "loss": 0.8714,
92
+ "step": 450
93
+ },
94
+ {
95
+ "epoch": 0.18,
96
+ "learning_rate": 0.00019821819618457114,
97
+ "loss": 0.8653,
98
+ "step": 475
99
+ },
100
+ {
101
+ "epoch": 0.19,
102
+ "learning_rate": 0.0001980221050693837,
103
+ "loss": 0.8716,
104
+ "step": 500
105
+ },
106
+ {
107
+ "epoch": 0.2,
108
+ "learning_rate": 0.00019781588701449338,
109
+ "loss": 0.8695,
110
+ "step": 525
111
+ },
112
+ {
113
+ "epoch": 0.21,
114
+ "learning_rate": 0.0001975995633248682,
115
+ "loss": 0.8746,
116
+ "step": 550
117
+ },
118
+ {
119
+ "epoch": 0.22,
120
+ "learning_rate": 0.00019737315634951762,
121
+ "loss": 0.8731,
122
+ "step": 575
123
+ },
124
+ {
125
+ "epoch": 0.23,
126
+ "learning_rate": 0.00019713668947918386,
127
+ "loss": 0.867,
128
+ "step": 600
129
+ },
130
+ {
131
+ "epoch": 0.24,
132
+ "learning_rate": 0.0001968901871439252,
133
+ "loss": 0.8706,
134
+ "step": 625
135
+ },
136
+ {
137
+ "epoch": 0.25,
138
+ "learning_rate": 0.000196633674810592,
139
+ "loss": 0.8595,
140
+ "step": 650
141
+ },
142
+ {
143
+ "epoch": 0.26,
144
+ "learning_rate": 0.0001963671789801958,
145
+ "loss": 0.8627,
146
+ "step": 675
147
+ },
148
+ {
149
+ "epoch": 0.27,
150
+ "learning_rate": 0.0001960907271851712,
151
+ "loss": 0.8607,
152
+ "step": 700
153
+ },
154
+ {
155
+ "epoch": 0.28,
156
+ "learning_rate": 0.00019580434798653173,
157
+ "loss": 0.858,
158
+ "step": 725
159
+ },
160
+ {
161
+ "epoch": 0.29,
162
+ "learning_rate": 0.00019550807097091876,
163
+ "loss": 0.8589,
164
+ "step": 750
165
+ },
166
+ {
167
+ "epoch": 0.3,
168
+ "learning_rate": 0.00019520192674754515,
169
+ "loss": 0.8561,
170
+ "step": 775
171
+ },
172
+ {
173
+ "epoch": 0.31,
174
+ "learning_rate": 0.00019488594694503264,
175
+ "loss": 0.8576,
176
+ "step": 800
177
+ },
178
+ {
179
+ "epoch": 0.32,
180
+ "learning_rate": 0.00019456016420814446,
181
+ "loss": 0.8597,
182
+ "step": 825
183
+ },
184
+ {
185
+ "epoch": 0.33,
186
+ "learning_rate": 0.00019422461219441254,
187
+ "loss": 0.862,
188
+ "step": 850
189
+ },
190
+ {
191
+ "epoch": 0.34,
192
+ "learning_rate": 0.00019387932557066035,
193
+ "loss": 0.8577,
194
+ "step": 875
195
+ },
196
+ {
197
+ "epoch": 0.35,
198
+ "learning_rate": 0.00019352434000942127,
199
+ "loss": 0.8632,
200
+ "step": 900
201
+ },
202
+ {
203
+ "epoch": 0.36,
204
+ "learning_rate": 0.00019315969218525333,
205
+ "loss": 0.8567,
206
+ "step": 925
207
+ },
208
+ {
209
+ "epoch": 0.37,
210
+ "learning_rate": 0.00019278541977095005,
211
+ "loss": 0.8501,
212
+ "step": 950
213
+ },
214
+ {
215
+ "epoch": 0.38,
216
+ "learning_rate": 0.00019240156143364844,
217
+ "loss": 0.8596,
218
+ "step": 975
219
+ },
220
+ {
221
+ "epoch": 0.39,
222
+ "learning_rate": 0.00019200815683083434,
223
+ "loss": 0.8556,
224
+ "step": 1000
225
+ },
226
+ {
227
+ "epoch": 0.39,
228
+ "eval_loss": 0.8521950244903564,
229
+ "eval_runtime": 59.8838,
230
+ "eval_samples_per_second": 12.19,
231
+ "eval_steps_per_second": 0.885,
232
+ "step": 1000
233
+ },
234
+ {
235
+ "epoch": 0.4,
236
+ "learning_rate": 0.00019160524660624505,
237
+ "loss": 0.8531,
238
+ "step": 1025
239
+ },
240
+ {
241
+ "epoch": 0.41,
242
+ "learning_rate": 0.00019119287238567045,
243
+ "loss": 0.8513,
244
+ "step": 1050
245
+ },
246
+ {
247
+ "epoch": 0.42,
248
+ "learning_rate": 0.00019077107677265253,
249
+ "loss": 0.8502,
250
+ "step": 1075
251
+ },
252
+ {
253
+ "epoch": 0.43,
254
+ "learning_rate": 0.00019033990334408384,
255
+ "loss": 0.8469,
256
+ "step": 1100
257
+ },
258
+ {
259
+ "epoch": 0.44,
260
+ "learning_rate": 0.00018989939664570545,
261
+ "loss": 0.8495,
262
+ "step": 1125
263
+ },
264
+ {
265
+ "epoch": 0.45,
266
+ "learning_rate": 0.00018944960218750484,
267
+ "loss": 0.8485,
268
+ "step": 1150
269
+ },
270
+ {
271
+ "epoch": 0.46,
272
+ "learning_rate": 0.00018899056643901404,
273
+ "loss": 0.8534,
274
+ "step": 1175
275
+ },
276
+ {
277
+ "epoch": 0.47,
278
+ "learning_rate": 0.00018852233682450893,
279
+ "loss": 0.8531,
280
+ "step": 1200
281
+ },
282
+ {
283
+ "epoch": 0.47,
284
+ "learning_rate": 0.00018804496171810948,
285
+ "loss": 0.8509,
286
+ "step": 1225
287
+ },
288
+ {
289
+ "epoch": 0.48,
290
+ "learning_rate": 0.00018755849043878222,
291
+ "loss": 0.8445,
292
+ "step": 1250
293
+ },
294
+ {
295
+ "epoch": 0.49,
296
+ "learning_rate": 0.0001870629732452449,
297
+ "loss": 0.8548,
298
+ "step": 1275
299
+ },
300
+ {
301
+ "epoch": 0.5,
302
+ "learning_rate": 0.00018655846133077417,
303
+ "loss": 0.8441,
304
+ "step": 1300
305
+ },
306
+ {
307
+ "epoch": 0.51,
308
+ "learning_rate": 0.00018604500681791656,
309
+ "loss": 0.8533,
310
+ "step": 1325
311
+ },
312
+ {
313
+ "epoch": 0.52,
314
+ "learning_rate": 0.00018552266275310373,
315
+ "loss": 0.8505,
316
+ "step": 1350
317
+ },
318
+ {
319
+ "epoch": 0.53,
320
+ "learning_rate": 0.0001849914831011719,
321
+ "loss": 0.8544,
322
+ "step": 1375
323
+ },
324
+ {
325
+ "epoch": 0.54,
326
+ "learning_rate": 0.00018445152273978668,
327
+ "loss": 0.845,
328
+ "step": 1400
329
+ },
330
+ {
331
+ "epoch": 0.55,
332
+ "learning_rate": 0.00018390283745377354,
333
+ "loss": 0.8376,
334
+ "step": 1425
335
+ },
336
+ {
337
+ "epoch": 0.56,
338
+ "learning_rate": 0.0001833454839293545,
339
+ "loss": 0.847,
340
+ "step": 1450
341
+ },
342
+ {
343
+ "epoch": 0.57,
344
+ "learning_rate": 0.00018277951974829163,
345
+ "loss": 0.8473,
346
+ "step": 1475
347
+ },
348
+ {
349
+ "epoch": 0.58,
350
+ "learning_rate": 0.0001822050033819382,
351
+ "loss": 0.8438,
352
+ "step": 1500
353
+ },
354
+ {
355
+ "epoch": 0.59,
356
+ "learning_rate": 0.00018162199418519785,
357
+ "loss": 0.8418,
358
+ "step": 1525
359
+ },
360
+ {
361
+ "epoch": 0.6,
362
+ "learning_rate": 0.00018103055239039243,
363
+ "loss": 0.842,
364
+ "step": 1550
365
+ },
366
+ {
367
+ "epoch": 0.61,
368
+ "learning_rate": 0.0001804307391010393,
369
+ "loss": 0.8435,
370
+ "step": 1575
371
+ },
372
+ {
373
+ "epoch": 0.62,
374
+ "learning_rate": 0.00017982261628553842,
375
+ "loss": 0.8349,
376
+ "step": 1600
377
+ },
378
+ {
379
+ "epoch": 0.63,
380
+ "learning_rate": 0.0001792062467707703,
381
+ "loss": 0.8483,
382
+ "step": 1625
383
+ },
384
+ {
385
+ "epoch": 0.64,
386
+ "learning_rate": 0.0001785816942356052,
387
+ "loss": 0.8387,
388
+ "step": 1650
389
+ },
390
+ {
391
+ "epoch": 0.65,
392
+ "learning_rate": 0.00017794902320432429,
393
+ "loss": 0.843,
394
+ "step": 1675
395
+ },
396
+ {
397
+ "epoch": 0.66,
398
+ "learning_rate": 0.00017730829903995333,
399
+ "loss": 0.8424,
400
+ "step": 1700
401
+ },
402
+ {
403
+ "epoch": 0.67,
404
+ "learning_rate": 0.00017665958793751006,
405
+ "loss": 0.8418,
406
+ "step": 1725
407
+ },
408
+ {
409
+ "epoch": 0.68,
410
+ "learning_rate": 0.00017600295691716522,
411
+ "loss": 0.8384,
412
+ "step": 1750
413
+ },
414
+ {
415
+ "epoch": 0.69,
416
+ "learning_rate": 0.00017533847381731856,
417
+ "loss": 0.8445,
418
+ "step": 1775
419
+ },
420
+ {
421
+ "epoch": 0.7,
422
+ "learning_rate": 0.00017466620728759033,
423
+ "loss": 0.8446,
424
+ "step": 1800
425
+ },
426
+ {
427
+ "epoch": 0.71,
428
+ "learning_rate": 0.00017398622678172878,
429
+ "loss": 0.838,
430
+ "step": 1825
431
+ },
432
+ {
433
+ "epoch": 0.72,
434
+ "learning_rate": 0.0001732986025504348,
435
+ "loss": 0.8415,
436
+ "step": 1850
437
+ },
438
+ {
439
+ "epoch": 0.73,
440
+ "learning_rate": 0.000172603405634104,
441
+ "loss": 0.8357,
442
+ "step": 1875
443
+ },
444
+ {
445
+ "epoch": 0.74,
446
+ "learning_rate": 0.00017190070785548755,
447
+ "loss": 0.8311,
448
+ "step": 1900
449
+ },
450
+ {
451
+ "epoch": 0.75,
452
+ "learning_rate": 0.0001711905818122717,
453
+ "loss": 0.8333,
454
+ "step": 1925
455
+ },
456
+ {
457
+ "epoch": 0.76,
458
+ "learning_rate": 0.0001704731008695777,
459
+ "loss": 0.8387,
460
+ "step": 1950
461
+ },
462
+ {
463
+ "epoch": 0.77,
464
+ "learning_rate": 0.0001697483391523821,
465
+ "loss": 0.8442,
466
+ "step": 1975
467
+ },
468
+ {
469
+ "epoch": 0.78,
470
+ "learning_rate": 0.00016901637153785885,
471
+ "loss": 0.8399,
472
+ "step": 2000
473
+ },
474
+ {
475
+ "epoch": 0.78,
476
+ "eval_loss": 0.8339959383010864,
477
+ "eval_runtime": 58.5829,
478
+ "eval_samples_per_second": 12.461,
479
+ "eval_steps_per_second": 0.905,
480
+ "step": 2000
481
+ },
482
+ {
483
+ "epoch": 0.79,
484
+ "learning_rate": 0.0001682772736476434,
485
+ "loss": 0.8334,
486
+ "step": 2025
487
+ },
488
+ {
489
+ "epoch": 0.79,
490
+ "learning_rate": 0.0001675311218400201,
491
+ "loss": 0.835,
492
+ "step": 2050
493
+ },
494
+ {
495
+ "epoch": 0.8,
496
+ "learning_rate": 0.00016677799320203332,
497
+ "loss": 0.8368,
498
+ "step": 2075
499
+ },
500
+ {
501
+ "epoch": 0.81,
502
+ "learning_rate": 0.00016601796554152344,
503
+ "loss": 0.8278,
504
+ "step": 2100
505
+ },
506
+ {
507
+ "epoch": 0.82,
508
+ "learning_rate": 0.00016525111737908827,
509
+ "loss": 0.8334,
510
+ "step": 2125
511
+ },
512
+ {
513
+ "epoch": 0.83,
514
+ "learning_rate": 0.00016447752793997096,
515
+ "loss": 0.8416,
516
+ "step": 2150
517
+ },
518
+ {
519
+ "epoch": 0.84,
520
+ "learning_rate": 0.00016369727714587483,
521
+ "loss": 0.8297,
522
+ "step": 2175
523
+ },
524
+ {
525
+ "epoch": 0.85,
526
+ "learning_rate": 0.0001629104456067066,
527
+ "loss": 0.8327,
528
+ "step": 2200
529
+ },
530
+ {
531
+ "epoch": 0.86,
532
+ "learning_rate": 0.00016211711461224825,
533
+ "loss": 0.8324,
534
+ "step": 2225
535
+ },
536
+ {
537
+ "epoch": 0.87,
538
+ "learning_rate": 0.0001613173661237589,
539
+ "loss": 0.8313,
540
+ "step": 2250
541
+ },
542
+ {
543
+ "epoch": 0.88,
544
+ "learning_rate": 0.0001605112827655069,
545
+ "loss": 0.8292,
546
+ "step": 2275
547
+ },
548
+ {
549
+ "epoch": 0.89,
550
+ "learning_rate": 0.0001596989478162339,
551
+ "loss": 0.8334,
552
+ "step": 2300
553
+ },
554
+ {
555
+ "epoch": 0.9,
556
+ "learning_rate": 0.00015888044520055106,
557
+ "loss": 0.8352,
558
+ "step": 2325
559
+ },
560
+ {
561
+ "epoch": 0.91,
562
+ "learning_rate": 0.00015805585948026852,
563
+ "loss": 0.823,
564
+ "step": 2350
565
+ },
566
+ {
567
+ "epoch": 0.92,
568
+ "learning_rate": 0.000157225275845659,
569
+ "loss": 0.8293,
570
+ "step": 2375
571
+ },
572
+ {
573
+ "epoch": 0.93,
574
+ "learning_rate": 0.00015638878010665672,
575
+ "loss": 0.8289,
576
+ "step": 2400
577
+ },
578
+ {
579
+ "epoch": 0.94,
580
+ "learning_rate": 0.00015554645868399205,
581
+ "loss": 0.832,
582
+ "step": 2425
583
+ },
584
+ {
585
+ "epoch": 0.95,
586
+ "learning_rate": 0.00015469839860026308,
587
+ "loss": 0.8294,
588
+ "step": 2450
589
+ },
590
+ {
591
+ "epoch": 0.96,
592
+ "learning_rate": 0.0001538446874709452,
593
+ "loss": 0.8281,
594
+ "step": 2475
595
+ },
596
+ {
597
+ "epoch": 0.97,
598
+ "learning_rate": 0.00015298541349533925,
599
+ "loss": 0.8314,
600
+ "step": 2500
601
+ },
602
+ {
603
+ "epoch": 0.98,
604
+ "learning_rate": 0.00015212066544745926,
605
+ "loss": 0.831,
606
+ "step": 2525
607
+ },
608
+ {
609
+ "epoch": 0.99,
610
+ "learning_rate": 0.00015125053266686124,
611
+ "loss": 0.8319,
612
+ "step": 2550
613
+ },
614
+ {
615
+ "epoch": 1.0,
616
+ "learning_rate": 0.00015037510504941303,
617
+ "loss": 0.8259,
618
+ "step": 2575
619
+ },
620
+ {
621
+ "epoch": 1.01,
622
+ "learning_rate": 0.00014949447303800695,
623
+ "loss": 0.8133,
624
+ "step": 2600
625
+ },
626
+ {
627
+ "epoch": 1.02,
628
+ "learning_rate": 0.00014860872761321593,
629
+ "loss": 0.8139,
630
+ "step": 2625
631
+ },
632
+ {
633
+ "epoch": 1.03,
634
+ "learning_rate": 0.00014771796028389405,
635
+ "loss": 0.804,
636
+ "step": 2650
637
+ },
638
+ {
639
+ "epoch": 1.04,
640
+ "learning_rate": 0.0001468222630777225,
641
+ "loss": 0.8011,
642
+ "step": 2675
643
+ },
644
+ {
645
+ "epoch": 1.05,
646
+ "learning_rate": 0.00014592172853170193,
647
+ "loss": 0.8037,
648
+ "step": 2700
649
+ },
650
+ {
651
+ "epoch": 1.06,
652
+ "learning_rate": 0.00014501644968259212,
653
+ "loss": 0.8063,
654
+ "step": 2725
655
+ },
656
+ {
657
+ "epoch": 1.07,
658
+ "learning_rate": 0.00014410652005730025,
659
+ "loss": 0.8155,
660
+ "step": 2750
661
+ },
662
+ {
663
+ "epoch": 1.08,
664
+ "learning_rate": 0.00014319203366321826,
665
+ "loss": 0.8066,
666
+ "step": 2775
667
+ },
668
+ {
669
+ "epoch": 1.09,
670
+ "learning_rate": 0.0001422730849785107,
671
+ "loss": 0.8091,
672
+ "step": 2800
673
+ },
674
+ {
675
+ "epoch": 1.1,
676
+ "learning_rate": 0.0001413497689423539,
677
+ "loss": 0.8067,
678
+ "step": 2825
679
+ },
680
+ {
681
+ "epoch": 1.11,
682
+ "learning_rate": 0.00014042218094512755,
683
+ "loss": 0.8046,
684
+ "step": 2850
685
+ },
686
+ {
687
+ "epoch": 1.11,
688
+ "learning_rate": 0.00013949041681855985,
689
+ "loss": 0.8053,
690
+ "step": 2875
691
+ },
692
+ {
693
+ "epoch": 1.12,
694
+ "learning_rate": 0.0001385545728258264,
695
+ "loss": 0.8075,
696
+ "step": 2900
697
+ },
698
+ {
699
+ "epoch": 1.13,
700
+ "learning_rate": 0.0001376147456516055,
701
+ "loss": 0.8015,
702
+ "step": 2925
703
+ },
704
+ {
705
+ "epoch": 1.14,
706
+ "learning_rate": 0.00013667103239208903,
707
+ "loss": 0.8016,
708
+ "step": 2950
709
+ },
710
+ {
711
+ "epoch": 1.15,
712
+ "learning_rate": 0.00013572353054495126,
713
+ "loss": 0.8029,
714
+ "step": 2975
715
+ },
716
+ {
717
+ "epoch": 1.16,
718
+ "learning_rate": 0.0001347723379992762,
719
+ "loss": 0.8017,
720
+ "step": 3000
721
+ },
722
+ {
723
+ "epoch": 1.16,
724
+ "eval_loss": 0.8229297995567322,
725
+ "eval_runtime": 59.3398,
726
+ "eval_samples_per_second": 12.302,
727
+ "eval_steps_per_second": 0.893,
728
+ "step": 3000
729
+ },
730
+ {
731
+ "epoch": 1.17,
732
+ "learning_rate": 0.0001338175530254443,
733
+ "loss": 0.8049,
734
+ "step": 3025
735
+ },
736
+ {
737
+ "epoch": 1.18,
738
+ "learning_rate": 0.00013285927426497985,
739
+ "loss": 0.8027,
740
+ "step": 3050
741
+ },
742
+ {
743
+ "epoch": 1.19,
744
+ "learning_rate": 0.00013189760072036008,
745
+ "loss": 0.8028,
746
+ "step": 3075
747
+ },
748
+ {
749
+ "epoch": 1.2,
750
+ "learning_rate": 0.0001309326317447869,
751
+ "loss": 0.8021,
752
+ "step": 3100
753
+ },
754
+ {
755
+ "epoch": 1.21,
756
+ "learning_rate": 0.00012996446703192257,
757
+ "loss": 0.8033,
758
+ "step": 3125
759
+ },
760
+ {
761
+ "epoch": 1.22,
762
+ "learning_rate": 0.00012899320660558986,
763
+ "loss": 0.8016,
764
+ "step": 3150
765
+ },
766
+ {
767
+ "epoch": 1.23,
768
+ "learning_rate": 0.00012801895080943846,
769
+ "loss": 0.7995,
770
+ "step": 3175
771
+ },
772
+ {
773
+ "epoch": 1.24,
774
+ "learning_rate": 0.0001270418002965782,
775
+ "loss": 0.799,
776
+ "step": 3200
777
+ },
778
+ {
779
+ "epoch": 1.25,
780
+ "learning_rate": 0.0001260618560191802,
781
+ "loss": 0.8002,
782
+ "step": 3225
783
+ },
784
+ {
785
+ "epoch": 1.26,
786
+ "learning_rate": 0.00012507921921804717,
787
+ "loss": 0.8068,
788
+ "step": 3250
789
+ },
790
+ {
791
+ "epoch": 1.27,
792
+ "learning_rate": 0.00012409399141215423,
793
+ "loss": 0.8041,
794
+ "step": 3275
795
+ },
796
+ {
797
+ "epoch": 1.28,
798
+ "learning_rate": 0.0001231062743881603,
799
+ "loss": 0.7999,
800
+ "step": 3300
801
+ },
802
+ {
803
+ "epoch": 1.29,
804
+ "learning_rate": 0.0001221161701898926,
805
+ "loss": 0.7995,
806
+ "step": 3325
807
+ },
808
+ {
809
+ "epoch": 1.3,
810
+ "learning_rate": 0.00012112378110780391,
811
+ "loss": 0.7959,
812
+ "step": 3350
813
+ },
814
+ {
815
+ "epoch": 1.31,
816
+ "learning_rate": 0.00012012920966840486,
817
+ "loss": 0.7999,
818
+ "step": 3375
819
+ },
820
+ {
821
+ "epoch": 1.32,
822
+ "learning_rate": 0.00011913255862367151,
823
+ "loss": 0.8016,
824
+ "step": 3400
825
+ },
826
+ {
827
+ "epoch": 1.33,
828
+ "learning_rate": 0.00011813393094042993,
829
+ "loss": 0.7944,
830
+ "step": 3425
831
+ },
832
+ {
833
+ "epoch": 1.34,
834
+ "learning_rate": 0.0001171334297897181,
835
+ "loss": 0.8026,
836
+ "step": 3450
837
+ },
838
+ {
839
+ "epoch": 1.35,
840
+ "learning_rate": 0.00011613115853612734,
841
+ "loss": 0.8004,
842
+ "step": 3475
843
+ },
844
+ {
845
+ "epoch": 1.36,
846
+ "learning_rate": 0.00011512722072712321,
847
+ "loss": 0.7992,
848
+ "step": 3500
849
+ },
850
+ {
851
+ "epoch": 1.37,
852
+ "learning_rate": 0.00011412172008234785,
853
+ "loss": 0.8004,
854
+ "step": 3525
855
+ },
856
+ {
857
+ "epoch": 1.38,
858
+ "learning_rate": 0.0001131147604829043,
859
+ "loss": 0.8009,
860
+ "step": 3550
861
+ },
862
+ {
863
+ "epoch": 1.39,
864
+ "learning_rate": 0.00011210644596062439,
865
+ "loss": 0.7993,
866
+ "step": 3575
867
+ },
868
+ {
869
+ "epoch": 1.4,
870
+ "learning_rate": 0.00011109688068732081,
871
+ "loss": 0.7965,
872
+ "step": 3600
873
+ },
874
+ {
875
+ "epoch": 1.41,
876
+ "learning_rate": 0.00011008616896402482,
877
+ "loss": 0.7991,
878
+ "step": 3625
879
+ },
880
+ {
881
+ "epoch": 1.42,
882
+ "learning_rate": 0.00010907441521021072,
883
+ "loss": 0.8026,
884
+ "step": 3650
885
+ },
886
+ {
887
+ "epoch": 1.42,
888
+ "learning_rate": 0.00010806172395300789,
889
+ "loss": 0.7941,
890
+ "step": 3675
891
+ },
892
+ {
893
+ "epoch": 1.43,
894
+ "learning_rate": 0.00010704819981640186,
895
+ "loss": 0.7989,
896
+ "step": 3700
897
+ },
898
+ {
899
+ "epoch": 1.44,
900
+ "learning_rate": 0.00010603394751042522,
901
+ "loss": 0.7981,
902
+ "step": 3725
903
+ },
904
+ {
905
+ "epoch": 1.45,
906
+ "learning_rate": 0.00010501907182033979,
907
+ "loss": 0.7985,
908
+ "step": 3750
909
+ },
910
+ {
911
+ "epoch": 1.46,
912
+ "learning_rate": 0.000104003677595811,
913
+ "loss": 0.7921,
914
+ "step": 3775
915
+ },
916
+ {
917
+ "epoch": 1.47,
918
+ "learning_rate": 0.00010298786974007555,
919
+ "loss": 0.8012,
920
+ "step": 3800
921
+ },
922
+ {
923
+ "epoch": 1.48,
924
+ "learning_rate": 0.00010197175319910343,
925
+ "loss": 0.7906,
926
+ "step": 3825
927
+ },
928
+ {
929
+ "epoch": 1.49,
930
+ "learning_rate": 0.00010095543295075593,
931
+ "loss": 0.7928,
932
+ "step": 3850
933
+ },
934
+ {
935
+ "epoch": 1.5,
936
+ "learning_rate": 9.993901399393979e-05,
937
+ "loss": 0.8018,
938
+ "step": 3875
939
+ },
940
+ {
941
+ "epoch": 1.51,
942
+ "learning_rate": 9.892260133775968e-05,
943
+ "loss": 0.7991,
944
+ "step": 3900
945
+ },
946
+ {
947
+ "epoch": 1.52,
948
+ "learning_rate": 9.79062999906693e-05,
949
+ "loss": 0.795,
950
+ "step": 3925
951
+ },
952
+ {
953
+ "epoch": 1.53,
954
+ "learning_rate": 9.68902149496227e-05,
955
+ "loss": 0.7977,
956
+ "step": 3950
957
+ },
958
+ {
959
+ "epoch": 1.54,
960
+ "learning_rate": 9.587445118922674e-05,
961
+ "loss": 0.8013,
962
+ "step": 3975
963
+ },
964
+ {
965
+ "epoch": 1.55,
966
+ "learning_rate": 9.485911365089589e-05,
967
+ "loss": 0.7978,
968
+ "step": 4000
969
+ },
970
+ {
971
+ "epoch": 1.55,
972
+ "eval_loss": 0.8142631649971008,
973
+ "eval_runtime": 59.4108,
974
+ "eval_samples_per_second": 12.287,
975
+ "eval_steps_per_second": 0.892,
976
+ "step": 4000
977
+ },
978
+ {
979
+ "epoch": 1.56,
980
+ "learning_rate": 9.384430723201036e-05,
981
+ "loss": 0.7912,
982
+ "step": 4025
983
+ },
984
+ {
985
+ "epoch": 1.57,
986
+ "learning_rate": 9.283013677507902e-05,
987
+ "loss": 0.7919,
988
+ "step": 4050
989
+ },
990
+ {
991
+ "epoch": 1.58,
992
+ "learning_rate": 9.181670705690761e-05,
993
+ "loss": 0.7919,
994
+ "step": 4075
995
+ },
996
+ {
997
+ "epoch": 1.59,
998
+ "learning_rate": 9.080412277777413e-05,
999
+ "loss": 0.8018,
1000
+ "step": 4100
1001
+ },
1002
+ {
1003
+ "epoch": 1.6,
1004
+ "learning_rate": 8.979248855061188e-05,
1005
+ "loss": 0.7811,
1006
+ "step": 4125
1007
+ },
1008
+ {
1009
+ "epoch": 1.61,
1010
+ "learning_rate": 8.878190889020159e-05,
1011
+ "loss": 0.7919,
1012
+ "step": 4150
1013
+ },
1014
+ {
1015
+ "epoch": 1.62,
1016
+ "learning_rate": 8.777248820237376e-05,
1017
+ "loss": 0.7994,
1018
+ "step": 4175
1019
+ },
1020
+ {
1021
+ "epoch": 1.63,
1022
+ "learning_rate": 8.676433077322215e-05,
1023
+ "loss": 0.7956,
1024
+ "step": 4200
1025
+ },
1026
+ {
1027
+ "epoch": 1.64,
1028
+ "learning_rate": 8.575754075832973e-05,
1029
+ "loss": 0.7968,
1030
+ "step": 4225
1031
+ },
1032
+ {
1033
+ "epoch": 1.65,
1034
+ "learning_rate": 8.475222217200801e-05,
1035
+ "loss": 0.7905,
1036
+ "step": 4250
1037
+ },
1038
+ {
1039
+ "epoch": 1.66,
1040
+ "learning_rate": 8.374847887655112e-05,
1041
+ "loss": 0.7889,
1042
+ "step": 4275
1043
+ },
1044
+ {
1045
+ "epoch": 1.67,
1046
+ "learning_rate": 8.274641457150543e-05,
1047
+ "loss": 0.7988,
1048
+ "step": 4300
1049
+ },
1050
+ {
1051
+ "epoch": 1.68,
1052
+ "learning_rate": 8.174613278295608e-05,
1053
+ "loss": 0.7947,
1054
+ "step": 4325
1055
+ },
1056
+ {
1057
+ "epoch": 1.69,
1058
+ "learning_rate": 8.074773685283137e-05,
1059
+ "loss": 0.7929,
1060
+ "step": 4350
1061
+ },
1062
+ {
1063
+ "epoch": 1.7,
1064
+ "learning_rate": 7.97513299282264e-05,
1065
+ "loss": 0.7949,
1066
+ "step": 4375
1067
+ },
1068
+ {
1069
+ "epoch": 1.71,
1070
+ "learning_rate": 7.875701495074638e-05,
1071
+ "loss": 0.7925,
1072
+ "step": 4400
1073
+ },
1074
+ {
1075
+ "epoch": 1.72,
1076
+ "learning_rate": 7.776489464587158e-05,
1077
+ "loss": 0.7917,
1078
+ "step": 4425
1079
+ },
1080
+ {
1081
+ "epoch": 1.73,
1082
+ "learning_rate": 7.677507151234448e-05,
1083
+ "loss": 0.7905,
1084
+ "step": 4450
1085
+ },
1086
+ {
1087
+ "epoch": 1.74,
1088
+ "learning_rate": 7.578764781158034e-05,
1089
+ "loss": 0.7912,
1090
+ "step": 4475
1091
+ },
1092
+ {
1093
+ "epoch": 1.74,
1094
+ "learning_rate": 7.480272555710227e-05,
1095
+ "loss": 0.8006,
1096
+ "step": 4500
1097
+ },
1098
+ {
1099
+ "epoch": 1.75,
1100
+ "learning_rate": 7.382040650400185e-05,
1101
+ "loss": 0.7937,
1102
+ "step": 4525
1103
+ },
1104
+ {
1105
+ "epoch": 1.76,
1106
+ "learning_rate": 7.28407921384267e-05,
1107
+ "loss": 0.794,
1108
+ "step": 4550
1109
+ },
1110
+ {
1111
+ "epoch": 1.77,
1112
+ "learning_rate": 7.186398366709545e-05,
1113
+ "loss": 0.7931,
1114
+ "step": 4575
1115
+ },
1116
+ {
1117
+ "epoch": 1.78,
1118
+ "learning_rate": 7.089008200684197e-05,
1119
+ "loss": 0.7982,
1120
+ "step": 4600
1121
+ },
1122
+ {
1123
+ "epoch": 1.79,
1124
+ "learning_rate": 6.991918777418928e-05,
1125
+ "loss": 0.7916,
1126
+ "step": 4625
1127
+ },
1128
+ {
1129
+ "epoch": 1.8,
1130
+ "learning_rate": 6.895140127495455e-05,
1131
+ "loss": 0.7919,
1132
+ "step": 4650
1133
+ },
1134
+ {
1135
+ "epoch": 1.81,
1136
+ "learning_rate": 6.798682249388631e-05,
1137
+ "loss": 0.7863,
1138
+ "step": 4675
1139
+ },
1140
+ {
1141
+ "epoch": 1.82,
1142
+ "learning_rate": 6.702555108433461e-05,
1143
+ "loss": 0.789,
1144
+ "step": 4700
1145
+ },
1146
+ {
1147
+ "epoch": 1.83,
1148
+ "learning_rate": 6.606768635795574e-05,
1149
+ "loss": 0.7902,
1150
+ "step": 4725
1151
+ },
1152
+ {
1153
+ "epoch": 1.84,
1154
+ "learning_rate": 6.511332727445191e-05,
1155
+ "loss": 0.7924,
1156
+ "step": 4750
1157
+ },
1158
+ {
1159
+ "epoch": 1.85,
1160
+ "learning_rate": 6.416257243134747e-05,
1161
+ "loss": 0.7957,
1162
+ "step": 4775
1163
+ },
1164
+ {
1165
+ "epoch": 1.86,
1166
+ "learning_rate": 6.321552005380256e-05,
1167
+ "loss": 0.7916,
1168
+ "step": 4800
1169
+ },
1170
+ {
1171
+ "epoch": 1.87,
1172
+ "learning_rate": 6.22722679844652e-05,
1173
+ "loss": 0.7867,
1174
+ "step": 4825
1175
+ },
1176
+ {
1177
+ "epoch": 1.88,
1178
+ "learning_rate": 6.133291367336284e-05,
1179
+ "loss": 0.7944,
1180
+ "step": 4850
1181
+ },
1182
+ {
1183
+ "epoch": 1.89,
1184
+ "learning_rate": 6.039755416783457e-05,
1185
+ "loss": 0.7982,
1186
+ "step": 4875
1187
+ },
1188
+ {
1189
+ "epoch": 1.9,
1190
+ "learning_rate": 5.946628610250484e-05,
1191
+ "loss": 0.7918,
1192
+ "step": 4900
1193
+ },
1194
+ {
1195
+ "epoch": 1.91,
1196
+ "learning_rate": 5.853920568929996e-05,
1197
+ "loss": 0.7921,
1198
+ "step": 4925
1199
+ },
1200
+ {
1201
+ "epoch": 1.92,
1202
+ "learning_rate": 5.761640870750799e-05,
1203
+ "loss": 0.7878,
1204
+ "step": 4950
1205
+ },
1206
+ {
1207
+ "epoch": 1.93,
1208
+ "learning_rate": 5.669799049388375e-05,
1209
+ "loss": 0.7901,
1210
+ "step": 4975
1211
+ },
1212
+ {
1213
+ "epoch": 1.94,
1214
+ "learning_rate": 5.578404593279911e-05,
1215
+ "loss": 0.7858,
1216
+ "step": 5000
1217
+ },
1218
+ {
1219
+ "epoch": 1.94,
1220
+ "eval_loss": 0.807844877243042,
1221
+ "eval_runtime": 59.586,
1222
+ "eval_samples_per_second": 12.251,
1223
+ "eval_steps_per_second": 0.889,
1224
+ "step": 5000
1225
+ },
1226
+ {
1227
+ "epoch": 1.95,
1228
+ "learning_rate": 5.487466944644033e-05,
1229
+ "loss": 0.7902,
1230
+ "step": 5025
1231
+ },
1232
+ {
1233
+ "epoch": 1.96,
1234
+ "learning_rate": 5.3969954985052996e-05,
1235
+ "loss": 0.7979,
1236
+ "step": 5050
1237
+ },
1238
+ {
1239
+ "epoch": 1.97,
1240
+ "learning_rate": 5.306999601723579e-05,
1241
+ "loss": 0.7931,
1242
+ "step": 5075
1243
+ },
1244
+ {
1245
+ "epoch": 1.98,
1246
+ "learning_rate": 5.21748855202839e-05,
1247
+ "loss": 0.7868,
1248
+ "step": 5100
1249
+ },
1250
+ {
1251
+ "epoch": 1.99,
1252
+ "learning_rate": 5.128471597058342e-05,
1253
+ "loss": 0.7993,
1254
+ "step": 5125
1255
+ },
1256
+ {
1257
+ "epoch": 2.0,
1258
+ "learning_rate": 5.03995793340572e-05,
1259
+ "loss": 0.7892,
1260
+ "step": 5150
1261
+ },
1262
+ {
1263
+ "epoch": 2.01,
1264
+ "learning_rate": 4.9519567056663694e-05,
1265
+ "loss": 0.7788,
1266
+ "step": 5175
1267
+ },
1268
+ {
1269
+ "epoch": 2.02,
1270
+ "learning_rate": 4.864477005494938e-05,
1271
+ "loss": 0.7654,
1272
+ "step": 5200
1273
+ },
1274
+ {
1275
+ "epoch": 2.03,
1276
+ "learning_rate": 4.777527870665592e-05,
1277
+ "loss": 0.7468,
1278
+ "step": 5225
1279
+ },
1280
+ {
1281
+ "epoch": 2.04,
1282
+ "learning_rate": 4.691118284138296e-05,
1283
+ "loss": 0.7359,
1284
+ "step": 5250
1285
+ },
1286
+ {
1287
+ "epoch": 2.05,
1288
+ "learning_rate": 4.605257173130763e-05,
1289
+ "loss": 0.7422,
1290
+ "step": 5275
1291
+ },
1292
+ {
1293
+ "epoch": 2.06,
1294
+ "learning_rate": 4.519953408196152e-05,
1295
+ "loss": 0.7424,
1296
+ "step": 5300
1297
+ },
1298
+ {
1299
+ "epoch": 2.06,
1300
+ "learning_rate": 4.435215802306635e-05,
1301
+ "loss": 0.7521,
1302
+ "step": 5325
1303
+ },
1304
+ {
1305
+ "epoch": 2.07,
1306
+ "learning_rate": 4.351053109942894e-05,
1307
+ "loss": 0.7477,
1308
+ "step": 5350
1309
+ },
1310
+ {
1311
+ "epoch": 2.08,
1312
+ "learning_rate": 4.2674740261896776e-05,
1313
+ "loss": 0.7456,
1314
+ "step": 5375
1315
+ },
1316
+ {
1317
+ "epoch": 2.09,
1318
+ "learning_rate": 4.1844871858374844e-05,
1319
+ "loss": 0.766,
1320
+ "step": 5400
1321
+ },
1322
+ {
1323
+ "epoch": 2.1,
1324
+ "learning_rate": 4.1021011624904814e-05,
1325
+ "loss": 0.7664,
1326
+ "step": 5425
1327
+ },
1328
+ {
1329
+ "epoch": 2.11,
1330
+ "learning_rate": 4.0203244676807353e-05,
1331
+ "loss": 0.7703,
1332
+ "step": 5450
1333
+ },
1334
+ {
1335
+ "epoch": 2.12,
1336
+ "learning_rate": 3.939165549988873e-05,
1337
+ "loss": 0.7674,
1338
+ "step": 5475
1339
+ },
1340
+ {
1341
+ "epoch": 2.13,
1342
+ "learning_rate": 3.858632794171222e-05,
1343
+ "loss": 0.7722,
1344
+ "step": 5500
1345
+ },
1346
+ {
1347
+ "epoch": 2.14,
1348
+ "learning_rate": 3.778734520293562e-05,
1349
+ "loss": 0.7716,
1350
+ "step": 5525
1351
+ },
1352
+ {
1353
+ "epoch": 2.15,
1354
+ "learning_rate": 3.699478982871561e-05,
1355
+ "loss": 0.7795,
1356
+ "step": 5550
1357
+ },
1358
+ {
1359
+ "epoch": 2.16,
1360
+ "learning_rate": 3.62087437001797e-05,
1361
+ "loss": 0.7728,
1362
+ "step": 5575
1363
+ },
1364
+ {
1365
+ "epoch": 2.17,
1366
+ "learning_rate": 3.5429288025966944e-05,
1367
+ "loss": 0.7709,
1368
+ "step": 5600
1369
+ },
1370
+ {
1371
+ "epoch": 2.18,
1372
+ "learning_rate": 3.4656503333837956e-05,
1373
+ "loss": 0.7682,
1374
+ "step": 5625
1375
+ },
1376
+ {
1377
+ "epoch": 2.19,
1378
+ "learning_rate": 3.389046946235542e-05,
1379
+ "loss": 0.7734,
1380
+ "step": 5650
1381
+ },
1382
+ {
1383
+ "epoch": 2.2,
1384
+ "learning_rate": 3.313126555263576e-05,
1385
+ "loss": 0.7716,
1386
+ "step": 5675
1387
+ },
1388
+ {
1389
+ "epoch": 2.21,
1390
+ "learning_rate": 3.237897004017276e-05,
1391
+ "loss": 0.7716,
1392
+ "step": 5700
1393
+ },
1394
+ {
1395
+ "epoch": 2.22,
1396
+ "learning_rate": 3.163366064673427e-05,
1397
+ "loss": 0.7721,
1398
+ "step": 5725
1399
+ },
1400
+ {
1401
+ "epoch": 2.23,
1402
+ "learning_rate": 3.089541437233252e-05,
1403
+ "loss": 0.7658,
1404
+ "step": 5750
1405
+ },
1406
+ {
1407
+ "epoch": 2.24,
1408
+ "learning_rate": 3.0164307487268996e-05,
1409
+ "loss": 0.7716,
1410
+ "step": 5775
1411
+ },
1412
+ {
1413
+ "epoch": 2.25,
1414
+ "learning_rate": 2.944041552425475e-05,
1415
+ "loss": 0.7687,
1416
+ "step": 5800
1417
+ }
1418
+ ],
1419
+ "max_steps": 7737,
1420
+ "num_train_epochs": 3,
1421
+ "total_flos": 2.4980935510562177e+19,
1422
+ "trial_name": null,
1423
+ "trial_params": null
1424
+ }
checkpoint-5800/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df5d13b3f1b9942f80afde79010ef0947feee3df761d245fef1699bc397648b2
3
+ size 4027
checkpoint-5900/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-5900/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 32,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj",
19
+ "gate_proj",
20
+ "up_proj",
21
+ "o_proj",
22
+ "k_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-5900/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3b3da1c54329907c1913ad730b0e8c5d24d4c2d49e4f95aa7976664afc0a98c
3
+ size 500897101
checkpoint-5900/adapter_model/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-5900/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 32,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj",
19
+ "gate_proj",
20
+ "up_proj",
21
+ "o_proj",
22
+ "k_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-5900/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3b3da1c54329907c1913ad730b0e8c5d24d4c2d49e4f95aa7976664afc0a98c
3
+ size 500897101
checkpoint-5900/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1197bafa9ae84ea7a453b741f03a67406157df8c322c0f2e87f2e9c99c7e6415
3
+ size 1001752701
checkpoint-5900/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6b970d7fefc137c6fa7c1f3ec5751405f46258d274a85e6d4372f5c1b430100
3
+ size 27772
checkpoint-5900/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c346a11c7d7489d10f88696f09c70a4abe9b4d22e70d081daf287152ba6758
3
+ size 27772
checkpoint-5900/rng_state_10.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28cc2f86fa3f892a60fcd12ec1c513fe4892758b164d8b6dcaf3ffad379de4bf
3
+ size 27789
checkpoint-5900/rng_state_11.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7e9eaa5d2c6ecd694c6005f784826c1ef125cd869dc4ae397dc6622d332750e
3
+ size 27789
checkpoint-5900/rng_state_12.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38acd1e9cddbb15c52d3524a7f3e3b0813e9d4cd4aa950fe53314f0b60178040
3
+ size 27789
checkpoint-5900/rng_state_13.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c474d8b008c60069849057cd5597c90d8f9e8db24500adaab180041a48449f
3
+ size 27789
checkpoint-5900/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b70d819dbeb60759d92edd51269d8cee2700d47a9ed7c6a47d97f93849e55bb0
3
+ size 27772
checkpoint-5900/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d28ba4337f3d0ed31c728bf8fbbd7c00c8014b3f57ffbf29863e421ce59080c
3
+ size 27772
checkpoint-5900/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00c43f507f234617c2e9bcf5dd72636cec19310803bce07ad6514e0e90814858
3
+ size 27772
checkpoint-5900/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29979a24bd6bef289a10c8e120d56f27b5f058b309d98caf16f123a7a861dce9
3
+ size 27772
checkpoint-5900/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78aa8d7c2497285c4d5739872ca5ceac97b5b636a89d97a0185297b5bbd049d6
3
+ size 27772
checkpoint-5900/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04a7b8a1abaea1776e2ddd83a1b3d050ed23897ede63223f377eb777702f7a0f
3
+ size 27772
checkpoint-5900/rng_state_8.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acca6763654c1dff02cf8af98fff17739233e457ad75b6ff4297b68efc173984
3
+ size 27772
checkpoint-5900/rng_state_9.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a71a0b91f398dff81646dcd42cd8df3dfca54a277c75bab4cc51332fdf4a4768
3
+ size 27772
checkpoint-5900/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f769241bb104f866dbb417f19005672d382c2f1d72ce0cc8284c37b0a8dd3e0e
3
+ size 627
checkpoint-5900/trainer_state.json ADDED
@@ -0,0 +1,1448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.2877084141139976,
5
+ "global_step": 5900,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.02,
12
+ "learning_rate": 0.0001999867761371633,
13
+ "loss": 1.0435,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.04,
18
+ "learning_rate": 0.00019993306018843102,
19
+ "loss": 0.8918,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.06,
24
+ "learning_rate": 0.00019983804784290833,
25
+ "loss": 0.8874,
26
+ "step": 150
27
+ },
28
+ {
29
+ "epoch": 0.08,
30
+ "learning_rate": 0.00019970177836355307,
31
+ "loss": 0.8839,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.09,
36
+ "learning_rate": 0.00019961818913082012,
37
+ "loss": 0.8801,
38
+ "step": 225
39
+ },
40
+ {
41
+ "epoch": 0.1,
42
+ "learning_rate": 0.00019952430806244534,
43
+ "loss": 0.8753,
44
+ "step": 250
45
+ },
46
+ {
47
+ "epoch": 0.11,
48
+ "learning_rate": 0.00019942014485754635,
49
+ "loss": 0.8754,
50
+ "step": 275
51
+ },
52
+ {
53
+ "epoch": 0.12,
54
+ "learning_rate": 0.00019930571027751713,
55
+ "loss": 0.8751,
56
+ "step": 300
57
+ },
58
+ {
59
+ "epoch": 0.13,
60
+ "learning_rate": 0.0001991810161449164,
61
+ "loss": 0.8819,
62
+ "step": 325
63
+ },
64
+ {
65
+ "epoch": 0.14,
66
+ "learning_rate": 0.00019904607534224612,
67
+ "loss": 0.8744,
68
+ "step": 350
69
+ },
70
+ {
71
+ "epoch": 0.15,
72
+ "learning_rate": 0.00019890090181062063,
73
+ "loss": 0.8735,
74
+ "step": 375
75
+ },
76
+ {
77
+ "epoch": 0.16,
78
+ "learning_rate": 0.00019874551054832625,
79
+ "loss": 0.8703,
80
+ "step": 400
81
+ },
82
+ {
83
+ "epoch": 0.16,
84
+ "learning_rate": 0.00019857991760927193,
85
+ "loss": 0.8715,
86
+ "step": 425
87
+ },
88
+ {
89
+ "epoch": 0.17,
90
+ "learning_rate": 0.00019840414010133045,
91
+ "loss": 0.8714,
92
+ "step": 450
93
+ },
94
+ {
95
+ "epoch": 0.18,
96
+ "learning_rate": 0.00019821819618457114,
97
+ "loss": 0.8653,
98
+ "step": 475
99
+ },
100
+ {
101
+ "epoch": 0.19,
102
+ "learning_rate": 0.0001980221050693837,
103
+ "loss": 0.8716,
104
+ "step": 500
105
+ },
106
+ {
107
+ "epoch": 0.2,
108
+ "learning_rate": 0.00019781588701449338,
109
+ "loss": 0.8695,
110
+ "step": 525
111
+ },
112
+ {
113
+ "epoch": 0.21,
114
+ "learning_rate": 0.0001975995633248682,
115
+ "loss": 0.8746,
116
+ "step": 550
117
+ },
118
+ {
119
+ "epoch": 0.22,
120
+ "learning_rate": 0.00019737315634951762,
121
+ "loss": 0.8731,
122
+ "step": 575
123
+ },
124
+ {
125
+ "epoch": 0.23,
126
+ "learning_rate": 0.00019713668947918386,
127
+ "loss": 0.867,
128
+ "step": 600
129
+ },
130
+ {
131
+ "epoch": 0.24,
132
+ "learning_rate": 0.0001968901871439252,
133
+ "loss": 0.8706,
134
+ "step": 625
135
+ },
136
+ {
137
+ "epoch": 0.25,
138
+ "learning_rate": 0.000196633674810592,
139
+ "loss": 0.8595,
140
+ "step": 650
141
+ },
142
+ {
143
+ "epoch": 0.26,
144
+ "learning_rate": 0.0001963671789801958,
145
+ "loss": 0.8627,
146
+ "step": 675
147
+ },
148
+ {
149
+ "epoch": 0.27,
150
+ "learning_rate": 0.0001960907271851712,
151
+ "loss": 0.8607,
152
+ "step": 700
153
+ },
154
+ {
155
+ "epoch": 0.28,
156
+ "learning_rate": 0.00019580434798653173,
157
+ "loss": 0.858,
158
+ "step": 725
159
+ },
160
+ {
161
+ "epoch": 0.29,
162
+ "learning_rate": 0.00019550807097091876,
163
+ "loss": 0.8589,
164
+ "step": 750
165
+ },
166
+ {
167
+ "epoch": 0.3,
168
+ "learning_rate": 0.00019520192674754515,
169
+ "loss": 0.8561,
170
+ "step": 775
171
+ },
172
+ {
173
+ "epoch": 0.31,
174
+ "learning_rate": 0.00019488594694503264,
175
+ "loss": 0.8576,
176
+ "step": 800
177
+ },
178
+ {
179
+ "epoch": 0.32,
180
+ "learning_rate": 0.00019456016420814446,
181
+ "loss": 0.8597,
182
+ "step": 825
183
+ },
184
+ {
185
+ "epoch": 0.33,
186
+ "learning_rate": 0.00019422461219441254,
187
+ "loss": 0.862,
188
+ "step": 850
189
+ },
190
+ {
191
+ "epoch": 0.34,
192
+ "learning_rate": 0.00019387932557066035,
193
+ "loss": 0.8577,
194
+ "step": 875
195
+ },
196
+ {
197
+ "epoch": 0.35,
198
+ "learning_rate": 0.00019352434000942127,
199
+ "loss": 0.8632,
200
+ "step": 900
201
+ },
202
+ {
203
+ "epoch": 0.36,
204
+ "learning_rate": 0.00019315969218525333,
205
+ "loss": 0.8567,
206
+ "step": 925
207
+ },
208
+ {
209
+ "epoch": 0.37,
210
+ "learning_rate": 0.00019278541977095005,
211
+ "loss": 0.8501,
212
+ "step": 950
213
+ },
214
+ {
215
+ "epoch": 0.38,
216
+ "learning_rate": 0.00019240156143364844,
217
+ "loss": 0.8596,
218
+ "step": 975
219
+ },
220
+ {
221
+ "epoch": 0.39,
222
+ "learning_rate": 0.00019200815683083434,
223
+ "loss": 0.8556,
224
+ "step": 1000
225
+ },
226
+ {
227
+ "epoch": 0.39,
228
+ "eval_loss": 0.8521950244903564,
229
+ "eval_runtime": 59.8838,
230
+ "eval_samples_per_second": 12.19,
231
+ "eval_steps_per_second": 0.885,
232
+ "step": 1000
233
+ },
234
+ {
235
+ "epoch": 0.4,
236
+ "learning_rate": 0.00019160524660624505,
237
+ "loss": 0.8531,
238
+ "step": 1025
239
+ },
240
+ {
241
+ "epoch": 0.41,
242
+ "learning_rate": 0.00019119287238567045,
243
+ "loss": 0.8513,
244
+ "step": 1050
245
+ },
246
+ {
247
+ "epoch": 0.42,
248
+ "learning_rate": 0.00019077107677265253,
249
+ "loss": 0.8502,
250
+ "step": 1075
251
+ },
252
+ {
253
+ "epoch": 0.43,
254
+ "learning_rate": 0.00019033990334408384,
255
+ "loss": 0.8469,
256
+ "step": 1100
257
+ },
258
+ {
259
+ "epoch": 0.44,
260
+ "learning_rate": 0.00018989939664570545,
261
+ "loss": 0.8495,
262
+ "step": 1125
263
+ },
264
+ {
265
+ "epoch": 0.45,
266
+ "learning_rate": 0.00018944960218750484,
267
+ "loss": 0.8485,
268
+ "step": 1150
269
+ },
270
+ {
271
+ "epoch": 0.46,
272
+ "learning_rate": 0.00018899056643901404,
273
+ "loss": 0.8534,
274
+ "step": 1175
275
+ },
276
+ {
277
+ "epoch": 0.47,
278
+ "learning_rate": 0.00018852233682450893,
279
+ "loss": 0.8531,
280
+ "step": 1200
281
+ },
282
+ {
283
+ "epoch": 0.47,
284
+ "learning_rate": 0.00018804496171810948,
285
+ "loss": 0.8509,
286
+ "step": 1225
287
+ },
288
+ {
289
+ "epoch": 0.48,
290
+ "learning_rate": 0.00018755849043878222,
291
+ "loss": 0.8445,
292
+ "step": 1250
293
+ },
294
+ {
295
+ "epoch": 0.49,
296
+ "learning_rate": 0.0001870629732452449,
297
+ "loss": 0.8548,
298
+ "step": 1275
299
+ },
300
+ {
301
+ "epoch": 0.5,
302
+ "learning_rate": 0.00018655846133077417,
303
+ "loss": 0.8441,
304
+ "step": 1300
305
+ },
306
+ {
307
+ "epoch": 0.51,
308
+ "learning_rate": 0.00018604500681791656,
309
+ "loss": 0.8533,
310
+ "step": 1325
311
+ },
312
+ {
313
+ "epoch": 0.52,
314
+ "learning_rate": 0.00018552266275310373,
315
+ "loss": 0.8505,
316
+ "step": 1350
317
+ },
318
+ {
319
+ "epoch": 0.53,
320
+ "learning_rate": 0.0001849914831011719,
321
+ "loss": 0.8544,
322
+ "step": 1375
323
+ },
324
+ {
325
+ "epoch": 0.54,
326
+ "learning_rate": 0.00018445152273978668,
327
+ "loss": 0.845,
328
+ "step": 1400
329
+ },
330
+ {
331
+ "epoch": 0.55,
332
+ "learning_rate": 0.00018390283745377354,
333
+ "loss": 0.8376,
334
+ "step": 1425
335
+ },
336
+ {
337
+ "epoch": 0.56,
338
+ "learning_rate": 0.0001833454839293545,
339
+ "loss": 0.847,
340
+ "step": 1450
341
+ },
342
+ {
343
+ "epoch": 0.57,
344
+ "learning_rate": 0.00018277951974829163,
345
+ "loss": 0.8473,
346
+ "step": 1475
347
+ },
348
+ {
349
+ "epoch": 0.58,
350
+ "learning_rate": 0.0001822050033819382,
351
+ "loss": 0.8438,
352
+ "step": 1500
353
+ },
354
+ {
355
+ "epoch": 0.59,
356
+ "learning_rate": 0.00018162199418519785,
357
+ "loss": 0.8418,
358
+ "step": 1525
359
+ },
360
+ {
361
+ "epoch": 0.6,
362
+ "learning_rate": 0.00018103055239039243,
363
+ "loss": 0.842,
364
+ "step": 1550
365
+ },
366
+ {
367
+ "epoch": 0.61,
368
+ "learning_rate": 0.0001804307391010393,
369
+ "loss": 0.8435,
370
+ "step": 1575
371
+ },
372
+ {
373
+ "epoch": 0.62,
374
+ "learning_rate": 0.00017982261628553842,
375
+ "loss": 0.8349,
376
+ "step": 1600
377
+ },
378
+ {
379
+ "epoch": 0.63,
380
+ "learning_rate": 0.0001792062467707703,
381
+ "loss": 0.8483,
382
+ "step": 1625
383
+ },
384
+ {
385
+ "epoch": 0.64,
386
+ "learning_rate": 0.0001785816942356052,
387
+ "loss": 0.8387,
388
+ "step": 1650
389
+ },
390
+ {
391
+ "epoch": 0.65,
392
+ "learning_rate": 0.00017794902320432429,
393
+ "loss": 0.843,
394
+ "step": 1675
395
+ },
396
+ {
397
+ "epoch": 0.66,
398
+ "learning_rate": 0.00017730829903995333,
399
+ "loss": 0.8424,
400
+ "step": 1700
401
+ },
402
+ {
403
+ "epoch": 0.67,
404
+ "learning_rate": 0.00017665958793751006,
405
+ "loss": 0.8418,
406
+ "step": 1725
407
+ },
408
+ {
409
+ "epoch": 0.68,
410
+ "learning_rate": 0.00017600295691716522,
411
+ "loss": 0.8384,
412
+ "step": 1750
413
+ },
414
+ {
415
+ "epoch": 0.69,
416
+ "learning_rate": 0.00017533847381731856,
417
+ "loss": 0.8445,
418
+ "step": 1775
419
+ },
420
+ {
421
+ "epoch": 0.7,
422
+ "learning_rate": 0.00017466620728759033,
423
+ "loss": 0.8446,
424
+ "step": 1800
425
+ },
426
+ {
427
+ "epoch": 0.71,
428
+ "learning_rate": 0.00017398622678172878,
429
+ "loss": 0.838,
430
+ "step": 1825
431
+ },
432
+ {
433
+ "epoch": 0.72,
434
+ "learning_rate": 0.0001732986025504348,
435
+ "loss": 0.8415,
436
+ "step": 1850
437
+ },
438
+ {
439
+ "epoch": 0.73,
440
+ "learning_rate": 0.000172603405634104,
441
+ "loss": 0.8357,
442
+ "step": 1875
443
+ },
444
+ {
445
+ "epoch": 0.74,
446
+ "learning_rate": 0.00017190070785548755,
447
+ "loss": 0.8311,
448
+ "step": 1900
449
+ },
450
+ {
451
+ "epoch": 0.75,
452
+ "learning_rate": 0.0001711905818122717,
453
+ "loss": 0.8333,
454
+ "step": 1925
455
+ },
456
+ {
457
+ "epoch": 0.76,
458
+ "learning_rate": 0.0001704731008695777,
459
+ "loss": 0.8387,
460
+ "step": 1950
461
+ },
462
+ {
463
+ "epoch": 0.77,
464
+ "learning_rate": 0.0001697483391523821,
465
+ "loss": 0.8442,
466
+ "step": 1975
467
+ },
468
+ {
469
+ "epoch": 0.78,
470
+ "learning_rate": 0.00016901637153785885,
471
+ "loss": 0.8399,
472
+ "step": 2000
473
+ },
474
+ {
475
+ "epoch": 0.78,
476
+ "eval_loss": 0.8339959383010864,
477
+ "eval_runtime": 58.5829,
478
+ "eval_samples_per_second": 12.461,
479
+ "eval_steps_per_second": 0.905,
480
+ "step": 2000
481
+ },
482
+ {
483
+ "epoch": 0.79,
484
+ "learning_rate": 0.0001682772736476434,
485
+ "loss": 0.8334,
486
+ "step": 2025
487
+ },
488
+ {
489
+ "epoch": 0.79,
490
+ "learning_rate": 0.0001675311218400201,
491
+ "loss": 0.835,
492
+ "step": 2050
493
+ },
494
+ {
495
+ "epoch": 0.8,
496
+ "learning_rate": 0.00016677799320203332,
497
+ "loss": 0.8368,
498
+ "step": 2075
499
+ },
500
+ {
501
+ "epoch": 0.81,
502
+ "learning_rate": 0.00016601796554152344,
503
+ "loss": 0.8278,
504
+ "step": 2100
505
+ },
506
+ {
507
+ "epoch": 0.82,
508
+ "learning_rate": 0.00016525111737908827,
509
+ "loss": 0.8334,
510
+ "step": 2125
511
+ },
512
+ {
513
+ "epoch": 0.83,
514
+ "learning_rate": 0.00016447752793997096,
515
+ "loss": 0.8416,
516
+ "step": 2150
517
+ },
518
+ {
519
+ "epoch": 0.84,
520
+ "learning_rate": 0.00016369727714587483,
521
+ "loss": 0.8297,
522
+ "step": 2175
523
+ },
524
+ {
525
+ "epoch": 0.85,
526
+ "learning_rate": 0.0001629104456067066,
527
+ "loss": 0.8327,
528
+ "step": 2200
529
+ },
530
+ {
531
+ "epoch": 0.86,
532
+ "learning_rate": 0.00016211711461224825,
533
+ "loss": 0.8324,
534
+ "step": 2225
535
+ },
536
+ {
537
+ "epoch": 0.87,
538
+ "learning_rate": 0.0001613173661237589,
539
+ "loss": 0.8313,
540
+ "step": 2250
541
+ },
542
+ {
543
+ "epoch": 0.88,
544
+ "learning_rate": 0.0001605112827655069,
545
+ "loss": 0.8292,
546
+ "step": 2275
547
+ },
548
+ {
549
+ "epoch": 0.89,
550
+ "learning_rate": 0.0001596989478162339,
551
+ "loss": 0.8334,
552
+ "step": 2300
553
+ },
554
+ {
555
+ "epoch": 0.9,
556
+ "learning_rate": 0.00015888044520055106,
557
+ "loss": 0.8352,
558
+ "step": 2325
559
+ },
560
+ {
561
+ "epoch": 0.91,
562
+ "learning_rate": 0.00015805585948026852,
563
+ "loss": 0.823,
564
+ "step": 2350
565
+ },
566
+ {
567
+ "epoch": 0.92,
568
+ "learning_rate": 0.000157225275845659,
569
+ "loss": 0.8293,
570
+ "step": 2375
571
+ },
572
+ {
573
+ "epoch": 0.93,
574
+ "learning_rate": 0.00015638878010665672,
575
+ "loss": 0.8289,
576
+ "step": 2400
577
+ },
578
+ {
579
+ "epoch": 0.94,
580
+ "learning_rate": 0.00015554645868399205,
581
+ "loss": 0.832,
582
+ "step": 2425
583
+ },
584
+ {
585
+ "epoch": 0.95,
586
+ "learning_rate": 0.00015469839860026308,
587
+ "loss": 0.8294,
588
+ "step": 2450
589
+ },
590
+ {
591
+ "epoch": 0.96,
592
+ "learning_rate": 0.0001538446874709452,
593
+ "loss": 0.8281,
594
+ "step": 2475
595
+ },
596
+ {
597
+ "epoch": 0.97,
598
+ "learning_rate": 0.00015298541349533925,
599
+ "loss": 0.8314,
600
+ "step": 2500
601
+ },
602
+ {
603
+ "epoch": 0.98,
604
+ "learning_rate": 0.00015212066544745926,
605
+ "loss": 0.831,
606
+ "step": 2525
607
+ },
608
+ {
609
+ "epoch": 0.99,
610
+ "learning_rate": 0.00015125053266686124,
611
+ "loss": 0.8319,
612
+ "step": 2550
613
+ },
614
+ {
615
+ "epoch": 1.0,
616
+ "learning_rate": 0.00015037510504941303,
617
+ "loss": 0.8259,
618
+ "step": 2575
619
+ },
620
+ {
621
+ "epoch": 1.01,
622
+ "learning_rate": 0.00014949447303800695,
623
+ "loss": 0.8133,
624
+ "step": 2600
625
+ },
626
+ {
627
+ "epoch": 1.02,
628
+ "learning_rate": 0.00014860872761321593,
629
+ "loss": 0.8139,
630
+ "step": 2625
631
+ },
632
+ {
633
+ "epoch": 1.03,
634
+ "learning_rate": 0.00014771796028389405,
635
+ "loss": 0.804,
636
+ "step": 2650
637
+ },
638
+ {
639
+ "epoch": 1.04,
640
+ "learning_rate": 0.0001468222630777225,
641
+ "loss": 0.8011,
642
+ "step": 2675
643
+ },
644
+ {
645
+ "epoch": 1.05,
646
+ "learning_rate": 0.00014592172853170193,
647
+ "loss": 0.8037,
648
+ "step": 2700
649
+ },
650
+ {
651
+ "epoch": 1.06,
652
+ "learning_rate": 0.00014501644968259212,
653
+ "loss": 0.8063,
654
+ "step": 2725
655
+ },
656
+ {
657
+ "epoch": 1.07,
658
+ "learning_rate": 0.00014410652005730025,
659
+ "loss": 0.8155,
660
+ "step": 2750
661
+ },
662
+ {
663
+ "epoch": 1.08,
664
+ "learning_rate": 0.00014319203366321826,
665
+ "loss": 0.8066,
666
+ "step": 2775
667
+ },
668
+ {
669
+ "epoch": 1.09,
670
+ "learning_rate": 0.0001422730849785107,
671
+ "loss": 0.8091,
672
+ "step": 2800
673
+ },
674
+ {
675
+ "epoch": 1.1,
676
+ "learning_rate": 0.0001413497689423539,
677
+ "loss": 0.8067,
678
+ "step": 2825
679
+ },
680
+ {
681
+ "epoch": 1.11,
682
+ "learning_rate": 0.00014042218094512755,
683
+ "loss": 0.8046,
684
+ "step": 2850
685
+ },
686
+ {
687
+ "epoch": 1.11,
688
+ "learning_rate": 0.00013949041681855985,
689
+ "loss": 0.8053,
690
+ "step": 2875
691
+ },
692
+ {
693
+ "epoch": 1.12,
694
+ "learning_rate": 0.0001385545728258264,
695
+ "loss": 0.8075,
696
+ "step": 2900
697
+ },
698
+ {
699
+ "epoch": 1.13,
700
+ "learning_rate": 0.0001376147456516055,
701
+ "loss": 0.8015,
702
+ "step": 2925
703
+ },
704
+ {
705
+ "epoch": 1.14,
706
+ "learning_rate": 0.00013667103239208903,
707
+ "loss": 0.8016,
708
+ "step": 2950
709
+ },
710
+ {
711
+ "epoch": 1.15,
712
+ "learning_rate": 0.00013572353054495126,
713
+ "loss": 0.8029,
714
+ "step": 2975
715
+ },
716
+ {
717
+ "epoch": 1.16,
718
+ "learning_rate": 0.0001347723379992762,
719
+ "loss": 0.8017,
720
+ "step": 3000
721
+ },
722
+ {
723
+ "epoch": 1.16,
724
+ "eval_loss": 0.8229297995567322,
725
+ "eval_runtime": 59.3398,
726
+ "eval_samples_per_second": 12.302,
727
+ "eval_steps_per_second": 0.893,
728
+ "step": 3000
729
+ },
730
+ {
731
+ "epoch": 1.17,
732
+ "learning_rate": 0.0001338175530254443,
733
+ "loss": 0.8049,
734
+ "step": 3025
735
+ },
736
+ {
737
+ "epoch": 1.18,
738
+ "learning_rate": 0.00013285927426497985,
739
+ "loss": 0.8027,
740
+ "step": 3050
741
+ },
742
+ {
743
+ "epoch": 1.19,
744
+ "learning_rate": 0.00013189760072036008,
745
+ "loss": 0.8028,
746
+ "step": 3075
747
+ },
748
+ {
749
+ "epoch": 1.2,
750
+ "learning_rate": 0.0001309326317447869,
751
+ "loss": 0.8021,
752
+ "step": 3100
753
+ },
754
+ {
755
+ "epoch": 1.21,
756
+ "learning_rate": 0.00012996446703192257,
757
+ "loss": 0.8033,
758
+ "step": 3125
759
+ },
760
+ {
761
+ "epoch": 1.22,
762
+ "learning_rate": 0.00012899320660558986,
763
+ "loss": 0.8016,
764
+ "step": 3150
765
+ },
766
+ {
767
+ "epoch": 1.23,
768
+ "learning_rate": 0.00012801895080943846,
769
+ "loss": 0.7995,
770
+ "step": 3175
771
+ },
772
+ {
773
+ "epoch": 1.24,
774
+ "learning_rate": 0.0001270418002965782,
775
+ "loss": 0.799,
776
+ "step": 3200
777
+ },
778
+ {
779
+ "epoch": 1.25,
780
+ "learning_rate": 0.0001260618560191802,
781
+ "loss": 0.8002,
782
+ "step": 3225
783
+ },
784
+ {
785
+ "epoch": 1.26,
786
+ "learning_rate": 0.00012507921921804717,
787
+ "loss": 0.8068,
788
+ "step": 3250
789
+ },
790
+ {
791
+ "epoch": 1.27,
792
+ "learning_rate": 0.00012409399141215423,
793
+ "loss": 0.8041,
794
+ "step": 3275
795
+ },
796
+ {
797
+ "epoch": 1.28,
798
+ "learning_rate": 0.0001231062743881603,
799
+ "loss": 0.7999,
800
+ "step": 3300
801
+ },
802
+ {
803
+ "epoch": 1.29,
804
+ "learning_rate": 0.0001221161701898926,
805
+ "loss": 0.7995,
806
+ "step": 3325
807
+ },
808
+ {
809
+ "epoch": 1.3,
810
+ "learning_rate": 0.00012112378110780391,
811
+ "loss": 0.7959,
812
+ "step": 3350
813
+ },
814
+ {
815
+ "epoch": 1.31,
816
+ "learning_rate": 0.00012012920966840486,
817
+ "loss": 0.7999,
818
+ "step": 3375
819
+ },
820
+ {
821
+ "epoch": 1.32,
822
+ "learning_rate": 0.00011913255862367151,
823
+ "loss": 0.8016,
824
+ "step": 3400
825
+ },
826
+ {
827
+ "epoch": 1.33,
828
+ "learning_rate": 0.00011813393094042993,
829
+ "loss": 0.7944,
830
+ "step": 3425
831
+ },
832
+ {
833
+ "epoch": 1.34,
834
+ "learning_rate": 0.0001171334297897181,
835
+ "loss": 0.8026,
836
+ "step": 3450
837
+ },
838
+ {
839
+ "epoch": 1.35,
840
+ "learning_rate": 0.00011613115853612734,
841
+ "loss": 0.8004,
842
+ "step": 3475
843
+ },
844
+ {
845
+ "epoch": 1.36,
846
+ "learning_rate": 0.00011512722072712321,
847
+ "loss": 0.7992,
848
+ "step": 3500
849
+ },
850
+ {
851
+ "epoch": 1.37,
852
+ "learning_rate": 0.00011412172008234785,
853
+ "loss": 0.8004,
854
+ "step": 3525
855
+ },
856
+ {
857
+ "epoch": 1.38,
858
+ "learning_rate": 0.0001131147604829043,
859
+ "loss": 0.8009,
860
+ "step": 3550
861
+ },
862
+ {
863
+ "epoch": 1.39,
864
+ "learning_rate": 0.00011210644596062439,
865
+ "loss": 0.7993,
866
+ "step": 3575
867
+ },
868
+ {
869
+ "epoch": 1.4,
870
+ "learning_rate": 0.00011109688068732081,
871
+ "loss": 0.7965,
872
+ "step": 3600
873
+ },
874
+ {
875
+ "epoch": 1.41,
876
+ "learning_rate": 0.00011008616896402482,
877
+ "loss": 0.7991,
878
+ "step": 3625
879
+ },
880
+ {
881
+ "epoch": 1.42,
882
+ "learning_rate": 0.00010907441521021072,
883
+ "loss": 0.8026,
884
+ "step": 3650
885
+ },
886
+ {
887
+ "epoch": 1.42,
888
+ "learning_rate": 0.00010806172395300789,
889
+ "loss": 0.7941,
890
+ "step": 3675
891
+ },
892
+ {
893
+ "epoch": 1.43,
894
+ "learning_rate": 0.00010704819981640186,
895
+ "loss": 0.7989,
896
+ "step": 3700
897
+ },
898
+ {
899
+ "epoch": 1.44,
900
+ "learning_rate": 0.00010603394751042522,
901
+ "loss": 0.7981,
902
+ "step": 3725
903
+ },
904
+ {
905
+ "epoch": 1.45,
906
+ "learning_rate": 0.00010501907182033979,
907
+ "loss": 0.7985,
908
+ "step": 3750
909
+ },
910
+ {
911
+ "epoch": 1.46,
912
+ "learning_rate": 0.000104003677595811,
913
+ "loss": 0.7921,
914
+ "step": 3775
915
+ },
916
+ {
917
+ "epoch": 1.47,
918
+ "learning_rate": 0.00010298786974007555,
919
+ "loss": 0.8012,
920
+ "step": 3800
921
+ },
922
+ {
923
+ "epoch": 1.48,
924
+ "learning_rate": 0.00010197175319910343,
925
+ "loss": 0.7906,
926
+ "step": 3825
927
+ },
928
+ {
929
+ "epoch": 1.49,
930
+ "learning_rate": 0.00010095543295075593,
931
+ "loss": 0.7928,
932
+ "step": 3850
933
+ },
934
+ {
935
+ "epoch": 1.5,
936
+ "learning_rate": 9.993901399393979e-05,
937
+ "loss": 0.8018,
938
+ "step": 3875
939
+ },
940
+ {
941
+ "epoch": 1.51,
942
+ "learning_rate": 9.892260133775968e-05,
943
+ "loss": 0.7991,
944
+ "step": 3900
945
+ },
946
+ {
947
+ "epoch": 1.52,
948
+ "learning_rate": 9.79062999906693e-05,
949
+ "loss": 0.795,
950
+ "step": 3925
951
+ },
952
+ {
953
+ "epoch": 1.53,
954
+ "learning_rate": 9.68902149496227e-05,
955
+ "loss": 0.7977,
956
+ "step": 3950
957
+ },
958
+ {
959
+ "epoch": 1.54,
960
+ "learning_rate": 9.587445118922674e-05,
961
+ "loss": 0.8013,
962
+ "step": 3975
963
+ },
964
+ {
965
+ "epoch": 1.55,
966
+ "learning_rate": 9.485911365089589e-05,
967
+ "loss": 0.7978,
968
+ "step": 4000
969
+ },
970
+ {
971
+ "epoch": 1.55,
972
+ "eval_loss": 0.8142631649971008,
973
+ "eval_runtime": 59.4108,
974
+ "eval_samples_per_second": 12.287,
975
+ "eval_steps_per_second": 0.892,
976
+ "step": 4000
977
+ },
978
+ {
979
+ "epoch": 1.56,
980
+ "learning_rate": 9.384430723201036e-05,
981
+ "loss": 0.7912,
982
+ "step": 4025
983
+ },
984
+ {
985
+ "epoch": 1.57,
986
+ "learning_rate": 9.283013677507902e-05,
987
+ "loss": 0.7919,
988
+ "step": 4050
989
+ },
990
+ {
991
+ "epoch": 1.58,
992
+ "learning_rate": 9.181670705690761e-05,
993
+ "loss": 0.7919,
994
+ "step": 4075
995
+ },
996
+ {
997
+ "epoch": 1.59,
998
+ "learning_rate": 9.080412277777413e-05,
999
+ "loss": 0.8018,
1000
+ "step": 4100
1001
+ },
1002
+ {
1003
+ "epoch": 1.6,
1004
+ "learning_rate": 8.979248855061188e-05,
1005
+ "loss": 0.7811,
1006
+ "step": 4125
1007
+ },
1008
+ {
1009
+ "epoch": 1.61,
1010
+ "learning_rate": 8.878190889020159e-05,
1011
+ "loss": 0.7919,
1012
+ "step": 4150
1013
+ },
1014
+ {
1015
+ "epoch": 1.62,
1016
+ "learning_rate": 8.777248820237376e-05,
1017
+ "loss": 0.7994,
1018
+ "step": 4175
1019
+ },
1020
+ {
1021
+ "epoch": 1.63,
1022
+ "learning_rate": 8.676433077322215e-05,
1023
+ "loss": 0.7956,
1024
+ "step": 4200
1025
+ },
1026
+ {
1027
+ "epoch": 1.64,
1028
+ "learning_rate": 8.575754075832973e-05,
1029
+ "loss": 0.7968,
1030
+ "step": 4225
1031
+ },
1032
+ {
1033
+ "epoch": 1.65,
1034
+ "learning_rate": 8.475222217200801e-05,
1035
+ "loss": 0.7905,
1036
+ "step": 4250
1037
+ },
1038
+ {
1039
+ "epoch": 1.66,
1040
+ "learning_rate": 8.374847887655112e-05,
1041
+ "loss": 0.7889,
1042
+ "step": 4275
1043
+ },
1044
+ {
1045
+ "epoch": 1.67,
1046
+ "learning_rate": 8.274641457150543e-05,
1047
+ "loss": 0.7988,
1048
+ "step": 4300
1049
+ },
1050
+ {
1051
+ "epoch": 1.68,
1052
+ "learning_rate": 8.174613278295608e-05,
1053
+ "loss": 0.7947,
1054
+ "step": 4325
1055
+ },
1056
+ {
1057
+ "epoch": 1.69,
1058
+ "learning_rate": 8.074773685283137e-05,
1059
+ "loss": 0.7929,
1060
+ "step": 4350
1061
+ },
1062
+ {
1063
+ "epoch": 1.7,
1064
+ "learning_rate": 7.97513299282264e-05,
1065
+ "loss": 0.7949,
1066
+ "step": 4375
1067
+ },
1068
+ {
1069
+ "epoch": 1.71,
1070
+ "learning_rate": 7.875701495074638e-05,
1071
+ "loss": 0.7925,
1072
+ "step": 4400
1073
+ },
1074
+ {
1075
+ "epoch": 1.72,
1076
+ "learning_rate": 7.776489464587158e-05,
1077
+ "loss": 0.7917,
1078
+ "step": 4425
1079
+ },
1080
+ {
1081
+ "epoch": 1.73,
1082
+ "learning_rate": 7.677507151234448e-05,
1083
+ "loss": 0.7905,
1084
+ "step": 4450
1085
+ },
1086
+ {
1087
+ "epoch": 1.74,
1088
+ "learning_rate": 7.578764781158034e-05,
1089
+ "loss": 0.7912,
1090
+ "step": 4475
1091
+ },
1092
+ {
1093
+ "epoch": 1.74,
1094
+ "learning_rate": 7.480272555710227e-05,
1095
+ "loss": 0.8006,
1096
+ "step": 4500
1097
+ },
1098
+ {
1099
+ "epoch": 1.75,
1100
+ "learning_rate": 7.382040650400185e-05,
1101
+ "loss": 0.7937,
1102
+ "step": 4525
1103
+ },
1104
+ {
1105
+ "epoch": 1.76,
1106
+ "learning_rate": 7.28407921384267e-05,
1107
+ "loss": 0.794,
1108
+ "step": 4550
1109
+ },
1110
+ {
1111
+ "epoch": 1.77,
1112
+ "learning_rate": 7.186398366709545e-05,
1113
+ "loss": 0.7931,
1114
+ "step": 4575
1115
+ },
1116
+ {
1117
+ "epoch": 1.78,
1118
+ "learning_rate": 7.089008200684197e-05,
1119
+ "loss": 0.7982,
1120
+ "step": 4600
1121
+ },
1122
+ {
1123
+ "epoch": 1.79,
1124
+ "learning_rate": 6.991918777418928e-05,
1125
+ "loss": 0.7916,
1126
+ "step": 4625
1127
+ },
1128
+ {
1129
+ "epoch": 1.8,
1130
+ "learning_rate": 6.895140127495455e-05,
1131
+ "loss": 0.7919,
1132
+ "step": 4650
1133
+ },
1134
+ {
1135
+ "epoch": 1.81,
1136
+ "learning_rate": 6.798682249388631e-05,
1137
+ "loss": 0.7863,
1138
+ "step": 4675
1139
+ },
1140
+ {
1141
+ "epoch": 1.82,
1142
+ "learning_rate": 6.702555108433461e-05,
1143
+ "loss": 0.789,
1144
+ "step": 4700
1145
+ },
1146
+ {
1147
+ "epoch": 1.83,
1148
+ "learning_rate": 6.606768635795574e-05,
1149
+ "loss": 0.7902,
1150
+ "step": 4725
1151
+ },
1152
+ {
1153
+ "epoch": 1.84,
1154
+ "learning_rate": 6.511332727445191e-05,
1155
+ "loss": 0.7924,
1156
+ "step": 4750
1157
+ },
1158
+ {
1159
+ "epoch": 1.85,
1160
+ "learning_rate": 6.416257243134747e-05,
1161
+ "loss": 0.7957,
1162
+ "step": 4775
1163
+ },
1164
+ {
1165
+ "epoch": 1.86,
1166
+ "learning_rate": 6.321552005380256e-05,
1167
+ "loss": 0.7916,
1168
+ "step": 4800
1169
+ },
1170
+ {
1171
+ "epoch": 1.87,
1172
+ "learning_rate": 6.22722679844652e-05,
1173
+ "loss": 0.7867,
1174
+ "step": 4825
1175
+ },
1176
+ {
1177
+ "epoch": 1.88,
1178
+ "learning_rate": 6.133291367336284e-05,
1179
+ "loss": 0.7944,
1180
+ "step": 4850
1181
+ },
1182
+ {
1183
+ "epoch": 1.89,
1184
+ "learning_rate": 6.039755416783457e-05,
1185
+ "loss": 0.7982,
1186
+ "step": 4875
1187
+ },
1188
+ {
1189
+ "epoch": 1.9,
1190
+ "learning_rate": 5.946628610250484e-05,
1191
+ "loss": 0.7918,
1192
+ "step": 4900
1193
+ },
1194
+ {
1195
+ "epoch": 1.91,
1196
+ "learning_rate": 5.853920568929996e-05,
1197
+ "loss": 0.7921,
1198
+ "step": 4925
1199
+ },
1200
+ {
1201
+ "epoch": 1.92,
1202
+ "learning_rate": 5.761640870750799e-05,
1203
+ "loss": 0.7878,
1204
+ "step": 4950
1205
+ },
1206
+ {
1207
+ "epoch": 1.93,
1208
+ "learning_rate": 5.669799049388375e-05,
1209
+ "loss": 0.7901,
1210
+ "step": 4975
1211
+ },
1212
+ {
1213
+ "epoch": 1.94,
1214
+ "learning_rate": 5.578404593279911e-05,
1215
+ "loss": 0.7858,
1216
+ "step": 5000
1217
+ },
1218
+ {
1219
+ "epoch": 1.94,
1220
+ "eval_loss": 0.807844877243042,
1221
+ "eval_runtime": 59.586,
1222
+ "eval_samples_per_second": 12.251,
1223
+ "eval_steps_per_second": 0.889,
1224
+ "step": 5000
1225
+ },
1226
+ {
1227
+ "epoch": 1.95,
1228
+ "learning_rate": 5.487466944644033e-05,
1229
+ "loss": 0.7902,
1230
+ "step": 5025
1231
+ },
1232
+ {
1233
+ "epoch": 1.96,
1234
+ "learning_rate": 5.3969954985052996e-05,
1235
+ "loss": 0.7979,
1236
+ "step": 5050
1237
+ },
1238
+ {
1239
+ "epoch": 1.97,
1240
+ "learning_rate": 5.306999601723579e-05,
1241
+ "loss": 0.7931,
1242
+ "step": 5075
1243
+ },
1244
+ {
1245
+ "epoch": 1.98,
1246
+ "learning_rate": 5.21748855202839e-05,
1247
+ "loss": 0.7868,
1248
+ "step": 5100
1249
+ },
1250
+ {
1251
+ "epoch": 1.99,
1252
+ "learning_rate": 5.128471597058342e-05,
1253
+ "loss": 0.7993,
1254
+ "step": 5125
1255
+ },
1256
+ {
1257
+ "epoch": 2.0,
1258
+ "learning_rate": 5.03995793340572e-05,
1259
+ "loss": 0.7892,
1260
+ "step": 5150
1261
+ },
1262
+ {
1263
+ "epoch": 2.01,
1264
+ "learning_rate": 4.9519567056663694e-05,
1265
+ "loss": 0.7788,
1266
+ "step": 5175
1267
+ },
1268
+ {
1269
+ "epoch": 2.02,
1270
+ "learning_rate": 4.864477005494938e-05,
1271
+ "loss": 0.7654,
1272
+ "step": 5200
1273
+ },
1274
+ {
1275
+ "epoch": 2.03,
1276
+ "learning_rate": 4.777527870665592e-05,
1277
+ "loss": 0.7468,
1278
+ "step": 5225
1279
+ },
1280
+ {
1281
+ "epoch": 2.04,
1282
+ "learning_rate": 4.691118284138296e-05,
1283
+ "loss": 0.7359,
1284
+ "step": 5250
1285
+ },
1286
+ {
1287
+ "epoch": 2.05,
1288
+ "learning_rate": 4.605257173130763e-05,
1289
+ "loss": 0.7422,
1290
+ "step": 5275
1291
+ },
1292
+ {
1293
+ "epoch": 2.06,
1294
+ "learning_rate": 4.519953408196152e-05,
1295
+ "loss": 0.7424,
1296
+ "step": 5300
1297
+ },
1298
+ {
1299
+ "epoch": 2.06,
1300
+ "learning_rate": 4.435215802306635e-05,
1301
+ "loss": 0.7521,
1302
+ "step": 5325
1303
+ },
1304
+ {
1305
+ "epoch": 2.07,
1306
+ "learning_rate": 4.351053109942894e-05,
1307
+ "loss": 0.7477,
1308
+ "step": 5350
1309
+ },
1310
+ {
1311
+ "epoch": 2.08,
1312
+ "learning_rate": 4.2674740261896776e-05,
1313
+ "loss": 0.7456,
1314
+ "step": 5375
1315
+ },
1316
+ {
1317
+ "epoch": 2.09,
1318
+ "learning_rate": 4.1844871858374844e-05,
1319
+ "loss": 0.766,
1320
+ "step": 5400
1321
+ },
1322
+ {
1323
+ "epoch": 2.1,
1324
+ "learning_rate": 4.1021011624904814e-05,
1325
+ "loss": 0.7664,
1326
+ "step": 5425
1327
+ },
1328
+ {
1329
+ "epoch": 2.11,
1330
+ "learning_rate": 4.0203244676807353e-05,
1331
+ "loss": 0.7703,
1332
+ "step": 5450
1333
+ },
1334
+ {
1335
+ "epoch": 2.12,
1336
+ "learning_rate": 3.939165549988873e-05,
1337
+ "loss": 0.7674,
1338
+ "step": 5475
1339
+ },
1340
+ {
1341
+ "epoch": 2.13,
1342
+ "learning_rate": 3.858632794171222e-05,
1343
+ "loss": 0.7722,
1344
+ "step": 5500
1345
+ },
1346
+ {
1347
+ "epoch": 2.14,
1348
+ "learning_rate": 3.778734520293562e-05,
1349
+ "loss": 0.7716,
1350
+ "step": 5525
1351
+ },
1352
+ {
1353
+ "epoch": 2.15,
1354
+ "learning_rate": 3.699478982871561e-05,
1355
+ "loss": 0.7795,
1356
+ "step": 5550
1357
+ },
1358
+ {
1359
+ "epoch": 2.16,
1360
+ "learning_rate": 3.62087437001797e-05,
1361
+ "loss": 0.7728,
1362
+ "step": 5575
1363
+ },
1364
+ {
1365
+ "epoch": 2.17,
1366
+ "learning_rate": 3.5429288025966944e-05,
1367
+ "loss": 0.7709,
1368
+ "step": 5600
1369
+ },
1370
+ {
1371
+ "epoch": 2.18,
1372
+ "learning_rate": 3.4656503333837956e-05,
1373
+ "loss": 0.7682,
1374
+ "step": 5625
1375
+ },
1376
+ {
1377
+ "epoch": 2.19,
1378
+ "learning_rate": 3.389046946235542e-05,
1379
+ "loss": 0.7734,
1380
+ "step": 5650
1381
+ },
1382
+ {
1383
+ "epoch": 2.2,
1384
+ "learning_rate": 3.313126555263576e-05,
1385
+ "loss": 0.7716,
1386
+ "step": 5675
1387
+ },
1388
+ {
1389
+ "epoch": 2.21,
1390
+ "learning_rate": 3.237897004017276e-05,
1391
+ "loss": 0.7716,
1392
+ "step": 5700
1393
+ },
1394
+ {
1395
+ "epoch": 2.22,
1396
+ "learning_rate": 3.163366064673427e-05,
1397
+ "loss": 0.7721,
1398
+ "step": 5725
1399
+ },
1400
+ {
1401
+ "epoch": 2.23,
1402
+ "learning_rate": 3.089541437233252e-05,
1403
+ "loss": 0.7658,
1404
+ "step": 5750
1405
+ },
1406
+ {
1407
+ "epoch": 2.24,
1408
+ "learning_rate": 3.0164307487268996e-05,
1409
+ "loss": 0.7716,
1410
+ "step": 5775
1411
+ },
1412
+ {
1413
+ "epoch": 2.25,
1414
+ "learning_rate": 2.944041552425475e-05,
1415
+ "loss": 0.7687,
1416
+ "step": 5800
1417
+ },
1418
+ {
1419
+ "epoch": 2.26,
1420
+ "learning_rate": 2.8723813270606982e-05,
1421
+ "loss": 0.7698,
1422
+ "step": 5825
1423
+ },
1424
+ {
1425
+ "epoch": 2.27,
1426
+ "learning_rate": 2.8014574760522416e-05,
1427
+ "loss": 0.7641,
1428
+ "step": 5850
1429
+ },
1430
+ {
1431
+ "epoch": 2.28,
1432
+ "learning_rate": 2.731277326742876e-05,
1433
+ "loss": 0.7746,
1434
+ "step": 5875
1435
+ },
1436
+ {
1437
+ "epoch": 2.29,
1438
+ "learning_rate": 2.6618481296414522e-05,
1439
+ "loss": 0.7722,
1440
+ "step": 5900
1441
+ }
1442
+ ],
1443
+ "max_steps": 7737,
1444
+ "num_train_epochs": 3,
1445
+ "total_flos": 2.5410887248997515e+19,
1446
+ "trial_name": null,
1447
+ "trial_params": null
1448
+ }