Upload 14 files
Browse files- README.md +59 -3
- adapter_config.json +29 -0
- adapter_model.safetensors +3 -0
- all_results.json +8 -0
- running_log.txt +1137 -0
- special_tokens_map.json +24 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +42 -0
- train_results.json +8 -0
- trainer_config.yaml +28 -0
- trainer_log.jsonl +254 -0
- trainer_state.json +1801 -0
- training_args.bin +3 -0
README.md
CHANGED
@@ -1,3 +1,59 @@
|
|
1 |
-
---
|
2 |
-
license:
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: other
|
3 |
+
library_name: peft
|
4 |
+
tags:
|
5 |
+
- llama-factory
|
6 |
+
- lora
|
7 |
+
- generated_from_trainer
|
8 |
+
base_model: huggyllama/llama-7b
|
9 |
+
model-index:
|
10 |
+
- name: custom1
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
# custom1
|
18 |
+
|
19 |
+
This model is a fine-tuned version of [huggyllama/llama-7b](https://huggingface.co/huggyllama/llama-7b) on the identity dataset.
|
20 |
+
|
21 |
+
## Model description
|
22 |
+
|
23 |
+
More information needed
|
24 |
+
|
25 |
+
## Intended uses & limitations
|
26 |
+
|
27 |
+
More information needed
|
28 |
+
|
29 |
+
## Training and evaluation data
|
30 |
+
|
31 |
+
More information needed
|
32 |
+
|
33 |
+
## Training procedure
|
34 |
+
|
35 |
+
### Training hyperparameters
|
36 |
+
|
37 |
+
The following hyperparameters were used during training:
|
38 |
+
- learning_rate: 5e-05
|
39 |
+
- train_batch_size: 2
|
40 |
+
- eval_batch_size: 8
|
41 |
+
- seed: 42
|
42 |
+
- gradient_accumulation_steps: 8
|
43 |
+
- total_train_batch_size: 16
|
44 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
45 |
+
- lr_scheduler_type: cosine
|
46 |
+
- num_epochs: 3.0
|
47 |
+
- mixed_precision_training: Native AMP
|
48 |
+
|
49 |
+
### Training results
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
### Framework versions
|
54 |
+
|
55 |
+
- PEFT 0.10.0
|
56 |
+
- Transformers 4.40.1
|
57 |
+
- Pytorch 2.2.1+cu121
|
58 |
+
- Datasets 2.19.0
|
59 |
+
- Tokenizers 0.19.1
|
adapter_config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "huggyllama/llama-7b",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 16,
|
14 |
+
"lora_dropout": 0,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 8,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"v_proj",
|
24 |
+
"q_proj"
|
25 |
+
],
|
26 |
+
"task_type": "CAUSAL_LM",
|
27 |
+
"use_dora": false,
|
28 |
+
"use_rslora": false
|
29 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d754527c09091f5c684d740413ac5fda645f387b868be523e3787f15e614ea73
|
3 |
+
size 16794200
|
all_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.9982238010657194,
|
3 |
+
"total_flos": 6.019503790030848e+16,
|
4 |
+
"train_loss": 1.9697479162170988,
|
5 |
+
"train_runtime": 4932.4291,
|
6 |
+
"train_samples_per_second": 4.109,
|
7 |
+
"train_steps_per_second": 0.257
|
8 |
+
}
|
running_log.txt
ADDED
@@ -0,0 +1,1137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
05/05/2024 16:36:00 - INFO - transformers.tokenization_utils_base - loading file tokenizer.model from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/tokenizer.model
|
2 |
+
|
3 |
+
05/05/2024 16:36:00 - INFO - transformers.tokenization_utils_base - loading file tokenizer.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/tokenizer.json
|
4 |
+
|
5 |
+
05/05/2024 16:36:00 - INFO - transformers.tokenization_utils_base - loading file added_tokens.json from cache at None
|
6 |
+
|
7 |
+
05/05/2024 16:36:00 - INFO - transformers.tokenization_utils_base - loading file special_tokens_map.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/special_tokens_map.json
|
8 |
+
|
9 |
+
05/05/2024 16:36:00 - INFO - transformers.tokenization_utils_base - loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/tokenizer_config.json
|
10 |
+
|
11 |
+
05/05/2024 16:36:00 - INFO - llmtuner.data.template - Add pad token: </s>
|
12 |
+
|
13 |
+
05/05/2024 16:36:00 - INFO - llmtuner.data.loader - Loading dataset identity.json...
|
14 |
+
|
15 |
+
05/05/2024 16:36:00 - WARNING - llmtuner.data.utils - Checksum failed: mismatched SHA-1 hash value at data/identity.json.
|
16 |
+
|
17 |
+
05/05/2024 16:36:03 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
18 |
+
|
19 |
+
05/05/2024 16:36:03 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
20 |
+
"_name_or_path": "huggyllama/llama-7b",
|
21 |
+
"architectures": [
|
22 |
+
"LlamaForCausalLM"
|
23 |
+
],
|
24 |
+
"attention_bias": false,
|
25 |
+
"attention_dropout": 0.0,
|
26 |
+
"bos_token_id": 1,
|
27 |
+
"eos_token_id": 2,
|
28 |
+
"hidden_act": "silu",
|
29 |
+
"hidden_size": 4096,
|
30 |
+
"initializer_range": 0.02,
|
31 |
+
"intermediate_size": 11008,
|
32 |
+
"max_position_embeddings": 2048,
|
33 |
+
"max_sequence_length": 2048,
|
34 |
+
"model_type": "llama",
|
35 |
+
"num_attention_heads": 32,
|
36 |
+
"num_hidden_layers": 32,
|
37 |
+
"num_key_value_heads": 32,
|
38 |
+
"pad_token_id": 0,
|
39 |
+
"pretraining_tp": 1,
|
40 |
+
"rms_norm_eps": 1e-06,
|
41 |
+
"rope_scaling": null,
|
42 |
+
"rope_theta": 10000.0,
|
43 |
+
"tie_word_embeddings": false,
|
44 |
+
"torch_dtype": "float16",
|
45 |
+
"transformers_version": "4.40.1",
|
46 |
+
"use_cache": true,
|
47 |
+
"vocab_size": 32000
|
48 |
+
}
|
49 |
+
|
50 |
+
|
51 |
+
05/05/2024 16:36:04 - INFO - transformers.modeling_utils - loading weights file model.safetensors from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/model.safetensors.index.json
|
52 |
+
|
53 |
+
05/05/2024 16:38:24 - INFO - transformers.modeling_utils - Instantiating LlamaForCausalLM model under default dtype torch.float16.
|
54 |
+
|
55 |
+
05/05/2024 16:38:24 - INFO - transformers.generation.configuration_utils - Generate config GenerationConfig {
|
56 |
+
"bos_token_id": 1,
|
57 |
+
"eos_token_id": 2,
|
58 |
+
"pad_token_id": 0
|
59 |
+
}
|
60 |
+
|
61 |
+
|
62 |
+
05/05/2024 16:39:24 - INFO - transformers.modeling_utils - All model checkpoint weights were used when initializing LlamaForCausalLM.
|
63 |
+
|
64 |
+
|
65 |
+
05/05/2024 16:39:24 - INFO - transformers.modeling_utils - All the weights of LlamaForCausalLM were initialized from the model checkpoint at huggyllama/llama-7b.
|
66 |
+
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
|
67 |
+
|
68 |
+
05/05/2024 16:39:24 - INFO - transformers.generation.configuration_utils - loading configuration file generation_config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/generation_config.json
|
69 |
+
|
70 |
+
05/05/2024 16:39:24 - INFO - transformers.generation.configuration_utils - Generate config GenerationConfig {
|
71 |
+
"bos_token_id": 1,
|
72 |
+
"eos_token_id": 2,
|
73 |
+
"pad_token_id": 0
|
74 |
+
}
|
75 |
+
|
76 |
+
|
77 |
+
05/05/2024 16:39:24 - INFO - llmtuner.model.utils.checkpointing - Gradient checkpointing enabled.
|
78 |
+
|
79 |
+
05/05/2024 16:39:24 - INFO - llmtuner.model.utils.attention - Using torch SDPA for faster training and inference.
|
80 |
+
|
81 |
+
05/05/2024 16:39:24 - INFO - llmtuner.model.adapter - Fine-tuning method: LoRA
|
82 |
+
|
83 |
+
05/05/2024 16:39:24 - INFO - llmtuner.model.loader - trainable params: 4194304 || all params: 6742609920 || trainable%: 0.0622
|
84 |
+
|
85 |
+
05/05/2024 16:39:24 - INFO - transformers.trainer - Using auto half precision backend
|
86 |
+
|
87 |
+
05/05/2024 16:39:25 - INFO - transformers.trainer - ***** Running training *****
|
88 |
+
|
89 |
+
05/05/2024 16:39:25 - INFO - transformers.trainer - Num examples = 6,755
|
90 |
+
|
91 |
+
05/05/2024 16:39:25 - INFO - transformers.trainer - Num Epochs = 3
|
92 |
+
|
93 |
+
05/05/2024 16:39:25 - INFO - transformers.trainer - Instantaneous batch size per device = 2
|
94 |
+
|
95 |
+
05/05/2024 16:39:25 - INFO - transformers.trainer - Total train batch size (w. parallel, distributed & accumulation) = 16
|
96 |
+
|
97 |
+
05/05/2024 16:39:25 - INFO - transformers.trainer - Gradient Accumulation steps = 8
|
98 |
+
|
99 |
+
05/05/2024 16:39:25 - INFO - transformers.trainer - Total optimization steps = 1,266
|
100 |
+
|
101 |
+
05/05/2024 16:39:25 - INFO - transformers.trainer - Number of trainable parameters = 4,194,304
|
102 |
+
|
103 |
+
05/05/2024 16:39:43 - INFO - llmtuner.extras.callbacks - {'loss': 3.8760, 'learning_rate': 4.9998e-05, 'epoch': 0.01}
|
104 |
+
|
105 |
+
05/05/2024 16:40:01 - INFO - llmtuner.extras.callbacks - {'loss': 3.8538, 'learning_rate': 4.9992e-05, 'epoch': 0.02}
|
106 |
+
|
107 |
+
05/05/2024 16:40:18 - INFO - llmtuner.extras.callbacks - {'loss': 3.5742, 'learning_rate': 4.9983e-05, 'epoch': 0.04}
|
108 |
+
|
109 |
+
05/05/2024 16:40:36 - INFO - llmtuner.extras.callbacks - {'loss': 3.5193, 'learning_rate': 4.9969e-05, 'epoch': 0.05}
|
110 |
+
|
111 |
+
05/05/2024 16:40:52 - INFO - llmtuner.extras.callbacks - {'loss': 2.9465, 'learning_rate': 4.9952e-05, 'epoch': 0.06}
|
112 |
+
|
113 |
+
05/05/2024 16:41:09 - INFO - llmtuner.extras.callbacks - {'loss': 3.0208, 'learning_rate': 4.9931e-05, 'epoch': 0.07}
|
114 |
+
|
115 |
+
05/05/2024 16:41:27 - INFO - llmtuner.extras.callbacks - {'loss': 2.7173, 'learning_rate': 4.9906e-05, 'epoch': 0.08}
|
116 |
+
|
117 |
+
05/05/2024 16:41:45 - INFO - llmtuner.extras.callbacks - {'loss': 2.6235, 'learning_rate': 4.9877e-05, 'epoch': 0.09}
|
118 |
+
|
119 |
+
05/05/2024 16:42:05 - INFO - llmtuner.extras.callbacks - {'loss': 2.4021, 'learning_rate': 4.9844e-05, 'epoch': 0.11}
|
120 |
+
|
121 |
+
05/05/2024 16:42:24 - INFO - llmtuner.extras.callbacks - {'loss': 2.1688, 'learning_rate': 4.9808e-05, 'epoch': 0.12}
|
122 |
+
|
123 |
+
05/05/2024 16:42:44 - INFO - llmtuner.extras.callbacks - {'loss': 2.2943, 'learning_rate': 4.9768e-05, 'epoch': 0.13}
|
124 |
+
|
125 |
+
05/05/2024 16:43:03 - INFO - llmtuner.extras.callbacks - {'loss': 1.9571, 'learning_rate': 4.9723e-05, 'epoch': 0.14}
|
126 |
+
|
127 |
+
05/05/2024 16:43:21 - INFO - llmtuner.extras.callbacks - {'loss': 2.1024, 'learning_rate': 4.9675e-05, 'epoch': 0.15}
|
128 |
+
|
129 |
+
05/05/2024 16:43:41 - INFO - llmtuner.extras.callbacks - {'loss': 1.9145, 'learning_rate': 4.9624e-05, 'epoch': 0.17}
|
130 |
+
|
131 |
+
05/05/2024 16:44:01 - INFO - llmtuner.extras.callbacks - {'loss': 2.1472, 'learning_rate': 4.9568e-05, 'epoch': 0.18}
|
132 |
+
|
133 |
+
05/05/2024 16:44:21 - INFO - llmtuner.extras.callbacks - {'loss': 2.0441, 'learning_rate': 4.9509e-05, 'epoch': 0.19}
|
134 |
+
|
135 |
+
05/05/2024 16:44:39 - INFO - llmtuner.extras.callbacks - {'loss': 2.1605, 'learning_rate': 4.9446e-05, 'epoch': 0.20}
|
136 |
+
|
137 |
+
05/05/2024 16:44:57 - INFO - llmtuner.extras.callbacks - {'loss': 2.2801, 'learning_rate': 4.9379e-05, 'epoch': 0.21}
|
138 |
+
|
139 |
+
05/05/2024 16:45:16 - INFO - llmtuner.extras.callbacks - {'loss': 2.2327, 'learning_rate': 4.9309e-05, 'epoch': 0.22}
|
140 |
+
|
141 |
+
05/05/2024 16:45:36 - INFO - llmtuner.extras.callbacks - {'loss': 2.0031, 'learning_rate': 4.9234e-05, 'epoch': 0.24}
|
142 |
+
|
143 |
+
05/05/2024 16:45:36 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-100
|
144 |
+
|
145 |
+
05/05/2024 16:45:36 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
146 |
+
|
147 |
+
05/05/2024 16:45:36 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
148 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
149 |
+
"architectures": [
|
150 |
+
"LlamaForCausalLM"
|
151 |
+
],
|
152 |
+
"attention_bias": false,
|
153 |
+
"attention_dropout": 0.0,
|
154 |
+
"bos_token_id": 1,
|
155 |
+
"eos_token_id": 2,
|
156 |
+
"hidden_act": "silu",
|
157 |
+
"hidden_size": 4096,
|
158 |
+
"initializer_range": 0.02,
|
159 |
+
"intermediate_size": 11008,
|
160 |
+
"max_position_embeddings": 2048,
|
161 |
+
"max_sequence_length": 2048,
|
162 |
+
"model_type": "llama",
|
163 |
+
"num_attention_heads": 32,
|
164 |
+
"num_hidden_layers": 32,
|
165 |
+
"num_key_value_heads": 32,
|
166 |
+
"pad_token_id": 0,
|
167 |
+
"pretraining_tp": 1,
|
168 |
+
"rms_norm_eps": 1e-06,
|
169 |
+
"rope_scaling": null,
|
170 |
+
"rope_theta": 10000.0,
|
171 |
+
"tie_word_embeddings": false,
|
172 |
+
"torch_dtype": "float16",
|
173 |
+
"transformers_version": "4.40.1",
|
174 |
+
"use_cache": true,
|
175 |
+
"vocab_size": 32000
|
176 |
+
}
|
177 |
+
|
178 |
+
|
179 |
+
05/05/2024 16:45:36 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-100/tokenizer_config.json
|
180 |
+
|
181 |
+
05/05/2024 16:45:36 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-100/special_tokens_map.json
|
182 |
+
|
183 |
+
05/05/2024 16:45:55 - INFO - llmtuner.extras.callbacks - {'loss': 1.8637, 'learning_rate': 4.9156e-05, 'epoch': 0.25}
|
184 |
+
|
185 |
+
05/05/2024 16:46:15 - INFO - llmtuner.extras.callbacks - {'loss': 1.9813, 'learning_rate': 4.9074e-05, 'epoch': 0.26}
|
186 |
+
|
187 |
+
05/05/2024 16:46:34 - INFO - llmtuner.extras.callbacks - {'loss': 2.1698, 'learning_rate': 4.8989e-05, 'epoch': 0.27}
|
188 |
+
|
189 |
+
05/05/2024 16:46:52 - INFO - llmtuner.extras.callbacks - {'loss': 2.1691, 'learning_rate': 4.8900e-05, 'epoch': 0.28}
|
190 |
+
|
191 |
+
05/05/2024 16:47:15 - INFO - llmtuner.extras.callbacks - {'loss': 2.1437, 'learning_rate': 4.8807e-05, 'epoch': 0.30}
|
192 |
+
|
193 |
+
05/05/2024 16:47:34 - INFO - llmtuner.extras.callbacks - {'loss': 2.0780, 'learning_rate': 4.8710e-05, 'epoch': 0.31}
|
194 |
+
|
195 |
+
05/05/2024 16:47:56 - INFO - llmtuner.extras.callbacks - {'loss': 2.0338, 'learning_rate': 4.8610e-05, 'epoch': 0.32}
|
196 |
+
|
197 |
+
05/05/2024 16:48:16 - INFO - llmtuner.extras.callbacks - {'loss': 2.1387, 'learning_rate': 4.8506e-05, 'epoch': 0.33}
|
198 |
+
|
199 |
+
05/05/2024 16:48:35 - INFO - llmtuner.extras.callbacks - {'loss': 2.0853, 'learning_rate': 4.8399e-05, 'epoch': 0.34}
|
200 |
+
|
201 |
+
05/05/2024 16:48:55 - INFO - llmtuner.extras.callbacks - {'loss': 1.9919, 'learning_rate': 4.8288e-05, 'epoch': 0.36}
|
202 |
+
|
203 |
+
05/05/2024 16:49:17 - INFO - llmtuner.extras.callbacks - {'loss': 2.1078, 'learning_rate': 4.8173e-05, 'epoch': 0.37}
|
204 |
+
|
205 |
+
05/05/2024 16:49:38 - INFO - llmtuner.extras.callbacks - {'loss': 1.9385, 'learning_rate': 4.8055e-05, 'epoch': 0.38}
|
206 |
+
|
207 |
+
05/05/2024 16:49:58 - INFO - llmtuner.extras.callbacks - {'loss': 1.8268, 'learning_rate': 4.7934e-05, 'epoch': 0.39}
|
208 |
+
|
209 |
+
05/05/2024 16:50:18 - INFO - llmtuner.extras.callbacks - {'loss': 1.8595, 'learning_rate': 4.7808e-05, 'epoch': 0.40}
|
210 |
+
|
211 |
+
05/05/2024 16:50:38 - INFO - llmtuner.extras.callbacks - {'loss': 2.0050, 'learning_rate': 4.7679e-05, 'epoch': 0.41}
|
212 |
+
|
213 |
+
05/05/2024 16:50:57 - INFO - llmtuner.extras.callbacks - {'loss': 2.0088, 'learning_rate': 4.7547e-05, 'epoch': 0.43}
|
214 |
+
|
215 |
+
05/05/2024 16:51:17 - INFO - llmtuner.extras.callbacks - {'loss': 2.1591, 'learning_rate': 4.7412e-05, 'epoch': 0.44}
|
216 |
+
|
217 |
+
05/05/2024 16:51:37 - INFO - llmtuner.extras.callbacks - {'loss': 1.9772, 'learning_rate': 4.7272e-05, 'epoch': 0.45}
|
218 |
+
|
219 |
+
05/05/2024 16:51:56 - INFO - llmtuner.extras.callbacks - {'loss': 1.9476, 'learning_rate': 4.7130e-05, 'epoch': 0.46}
|
220 |
+
|
221 |
+
05/05/2024 16:52:16 - INFO - llmtuner.extras.callbacks - {'loss': 2.0495, 'learning_rate': 4.6984e-05, 'epoch': 0.47}
|
222 |
+
|
223 |
+
05/05/2024 16:52:16 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-200
|
224 |
+
|
225 |
+
05/05/2024 16:52:16 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
226 |
+
|
227 |
+
05/05/2024 16:52:16 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
228 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
229 |
+
"architectures": [
|
230 |
+
"LlamaForCausalLM"
|
231 |
+
],
|
232 |
+
"attention_bias": false,
|
233 |
+
"attention_dropout": 0.0,
|
234 |
+
"bos_token_id": 1,
|
235 |
+
"eos_token_id": 2,
|
236 |
+
"hidden_act": "silu",
|
237 |
+
"hidden_size": 4096,
|
238 |
+
"initializer_range": 0.02,
|
239 |
+
"intermediate_size": 11008,
|
240 |
+
"max_position_embeddings": 2048,
|
241 |
+
"max_sequence_length": 2048,
|
242 |
+
"model_type": "llama",
|
243 |
+
"num_attention_heads": 32,
|
244 |
+
"num_hidden_layers": 32,
|
245 |
+
"num_key_value_heads": 32,
|
246 |
+
"pad_token_id": 0,
|
247 |
+
"pretraining_tp": 1,
|
248 |
+
"rms_norm_eps": 1e-06,
|
249 |
+
"rope_scaling": null,
|
250 |
+
"rope_theta": 10000.0,
|
251 |
+
"tie_word_embeddings": false,
|
252 |
+
"torch_dtype": "float16",
|
253 |
+
"transformers_version": "4.40.1",
|
254 |
+
"use_cache": true,
|
255 |
+
"vocab_size": 32000
|
256 |
+
}
|
257 |
+
|
258 |
+
|
259 |
+
05/05/2024 16:52:16 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-200/tokenizer_config.json
|
260 |
+
|
261 |
+
05/05/2024 16:52:16 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-200/special_tokens_map.json
|
262 |
+
|
263 |
+
05/05/2024 16:52:36 - INFO - llmtuner.extras.callbacks - {'loss': 2.0267, 'learning_rate': 4.6834e-05, 'epoch': 0.49}
|
264 |
+
|
265 |
+
05/05/2024 16:52:55 - INFO - llmtuner.extras.callbacks - {'loss': 1.8998, 'learning_rate': 4.6682e-05, 'epoch': 0.50}
|
266 |
+
|
267 |
+
05/05/2024 16:53:14 - INFO - llmtuner.extras.callbacks - {'loss': 1.9095, 'learning_rate': 4.6525e-05, 'epoch': 0.51}
|
268 |
+
|
269 |
+
05/05/2024 16:53:34 - INFO - llmtuner.extras.callbacks - {'loss': 1.9350, 'learning_rate': 4.6366e-05, 'epoch': 0.52}
|
270 |
+
|
271 |
+
05/05/2024 16:53:53 - INFO - llmtuner.extras.callbacks - {'loss': 2.0840, 'learning_rate': 4.6203e-05, 'epoch': 0.53}
|
272 |
+
|
273 |
+
05/05/2024 16:54:13 - INFO - llmtuner.extras.callbacks - {'loss': 1.9387, 'learning_rate': 4.6037e-05, 'epoch': 0.54}
|
274 |
+
|
275 |
+
05/05/2024 16:54:34 - INFO - llmtuner.extras.callbacks - {'loss': 1.8710, 'learning_rate': 4.5868e-05, 'epoch': 0.56}
|
276 |
+
|
277 |
+
05/05/2024 16:54:54 - INFO - llmtuner.extras.callbacks - {'loss': 1.9629, 'learning_rate': 4.5696e-05, 'epoch': 0.57}
|
278 |
+
|
279 |
+
05/05/2024 16:55:13 - INFO - llmtuner.extras.callbacks - {'loss': 2.1182, 'learning_rate': 4.5520e-05, 'epoch': 0.58}
|
280 |
+
|
281 |
+
05/05/2024 16:55:31 - INFO - llmtuner.extras.callbacks - {'loss': 1.8874, 'learning_rate': 4.5341e-05, 'epoch': 0.59}
|
282 |
+
|
283 |
+
05/05/2024 16:55:49 - INFO - llmtuner.extras.callbacks - {'loss': 1.8281, 'learning_rate': 4.5160e-05, 'epoch': 0.60}
|
284 |
+
|
285 |
+
05/05/2024 16:56:07 - INFO - llmtuner.extras.callbacks - {'loss': 2.0387, 'learning_rate': 4.4975e-05, 'epoch': 0.62}
|
286 |
+
|
287 |
+
05/05/2024 16:56:27 - INFO - llmtuner.extras.callbacks - {'loss': 2.1307, 'learning_rate': 4.4787e-05, 'epoch': 0.63}
|
288 |
+
|
289 |
+
05/05/2024 16:56:47 - INFO - llmtuner.extras.callbacks - {'loss': 2.0755, 'learning_rate': 4.4595e-05, 'epoch': 0.64}
|
290 |
+
|
291 |
+
05/05/2024 16:57:07 - INFO - llmtuner.extras.callbacks - {'loss': 1.9149, 'learning_rate': 4.4401e-05, 'epoch': 0.65}
|
292 |
+
|
293 |
+
05/05/2024 16:57:30 - INFO - llmtuner.extras.callbacks - {'loss': 1.9840, 'learning_rate': 4.4204e-05, 'epoch': 0.66}
|
294 |
+
|
295 |
+
05/05/2024 16:57:48 - INFO - llmtuner.extras.callbacks - {'loss': 1.8697, 'learning_rate': 4.4004e-05, 'epoch': 0.67}
|
296 |
+
|
297 |
+
05/05/2024 16:58:10 - INFO - llmtuner.extras.callbacks - {'loss': 1.7951, 'learning_rate': 4.3801e-05, 'epoch': 0.69}
|
298 |
+
|
299 |
+
05/05/2024 16:58:29 - INFO - llmtuner.extras.callbacks - {'loss': 1.6276, 'learning_rate': 4.3595e-05, 'epoch': 0.70}
|
300 |
+
|
301 |
+
05/05/2024 16:58:50 - INFO - llmtuner.extras.callbacks - {'loss': 1.8438, 'learning_rate': 4.3386e-05, 'epoch': 0.71}
|
302 |
+
|
303 |
+
05/05/2024 16:58:50 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-300
|
304 |
+
|
305 |
+
05/05/2024 16:58:50 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
306 |
+
|
307 |
+
05/05/2024 16:58:50 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
308 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
309 |
+
"architectures": [
|
310 |
+
"LlamaForCausalLM"
|
311 |
+
],
|
312 |
+
"attention_bias": false,
|
313 |
+
"attention_dropout": 0.0,
|
314 |
+
"bos_token_id": 1,
|
315 |
+
"eos_token_id": 2,
|
316 |
+
"hidden_act": "silu",
|
317 |
+
"hidden_size": 4096,
|
318 |
+
"initializer_range": 0.02,
|
319 |
+
"intermediate_size": 11008,
|
320 |
+
"max_position_embeddings": 2048,
|
321 |
+
"max_sequence_length": 2048,
|
322 |
+
"model_type": "llama",
|
323 |
+
"num_attention_heads": 32,
|
324 |
+
"num_hidden_layers": 32,
|
325 |
+
"num_key_value_heads": 32,
|
326 |
+
"pad_token_id": 0,
|
327 |
+
"pretraining_tp": 1,
|
328 |
+
"rms_norm_eps": 1e-06,
|
329 |
+
"rope_scaling": null,
|
330 |
+
"rope_theta": 10000.0,
|
331 |
+
"tie_word_embeddings": false,
|
332 |
+
"torch_dtype": "float16",
|
333 |
+
"transformers_version": "4.40.1",
|
334 |
+
"use_cache": true,
|
335 |
+
"vocab_size": 32000
|
336 |
+
}
|
337 |
+
|
338 |
+
|
339 |
+
05/05/2024 16:58:50 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-300/tokenizer_config.json
|
340 |
+
|
341 |
+
05/05/2024 16:58:50 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-300/special_tokens_map.json
|
342 |
+
|
343 |
+
05/05/2024 16:59:10 - INFO - llmtuner.extras.callbacks - {'loss': 1.7839, 'learning_rate': 4.3175e-05, 'epoch': 0.72}
|
344 |
+
|
345 |
+
05/05/2024 16:59:31 - INFO - llmtuner.extras.callbacks - {'loss': 1.8604, 'learning_rate': 4.2960e-05, 'epoch': 0.73}
|
346 |
+
|
347 |
+
05/05/2024 16:59:49 - INFO - llmtuner.extras.callbacks - {'loss': 1.9514, 'learning_rate': 4.2743e-05, 'epoch': 0.75}
|
348 |
+
|
349 |
+
05/05/2024 17:00:08 - INFO - llmtuner.extras.callbacks - {'loss': 1.9439, 'learning_rate': 4.2523e-05, 'epoch': 0.76}
|
350 |
+
|
351 |
+
05/05/2024 17:00:28 - INFO - llmtuner.extras.callbacks - {'loss': 2.0266, 'learning_rate': 4.2301e-05, 'epoch': 0.77}
|
352 |
+
|
353 |
+
05/05/2024 17:00:47 - INFO - llmtuner.extras.callbacks - {'loss': 1.9851, 'learning_rate': 4.2076e-05, 'epoch': 0.78}
|
354 |
+
|
355 |
+
05/05/2024 17:01:06 - INFO - llmtuner.extras.callbacks - {'loss': 1.9760, 'learning_rate': 4.1848e-05, 'epoch': 0.79}
|
356 |
+
|
357 |
+
05/05/2024 17:01:26 - INFO - llmtuner.extras.callbacks - {'loss': 1.9039, 'learning_rate': 4.1617e-05, 'epoch': 0.81}
|
358 |
+
|
359 |
+
05/05/2024 17:01:45 - INFO - llmtuner.extras.callbacks - {'loss': 1.9577, 'learning_rate': 4.1384e-05, 'epoch': 0.82}
|
360 |
+
|
361 |
+
05/05/2024 17:02:03 - INFO - llmtuner.extras.callbacks - {'loss': 1.9182, 'learning_rate': 4.1149e-05, 'epoch': 0.83}
|
362 |
+
|
363 |
+
05/05/2024 17:02:23 - INFO - llmtuner.extras.callbacks - {'loss': 1.9574, 'learning_rate': 4.0911e-05, 'epoch': 0.84}
|
364 |
+
|
365 |
+
05/05/2024 17:02:41 - INFO - llmtuner.extras.callbacks - {'loss': 1.9129, 'learning_rate': 4.0670e-05, 'epoch': 0.85}
|
366 |
+
|
367 |
+
05/05/2024 17:03:04 - INFO - llmtuner.extras.callbacks - {'loss': 2.0108, 'learning_rate': 4.0427e-05, 'epoch': 0.86}
|
368 |
+
|
369 |
+
05/05/2024 17:03:23 - INFO - llmtuner.extras.callbacks - {'loss': 2.1509, 'learning_rate': 4.0182e-05, 'epoch': 0.88}
|
370 |
+
|
371 |
+
05/05/2024 17:03:43 - INFO - llmtuner.extras.callbacks - {'loss': 2.0100, 'learning_rate': 3.9934e-05, 'epoch': 0.89}
|
372 |
+
|
373 |
+
05/05/2024 17:04:02 - INFO - llmtuner.extras.callbacks - {'loss': 2.0312, 'learning_rate': 3.9685e-05, 'epoch': 0.90}
|
374 |
+
|
375 |
+
05/05/2024 17:04:21 - INFO - llmtuner.extras.callbacks - {'loss': 1.9705, 'learning_rate': 3.9432e-05, 'epoch': 0.91}
|
376 |
+
|
377 |
+
05/05/2024 17:04:41 - INFO - llmtuner.extras.callbacks - {'loss': 2.1293, 'learning_rate': 3.9178e-05, 'epoch': 0.92}
|
378 |
+
|
379 |
+
05/05/2024 17:05:01 - INFO - llmtuner.extras.callbacks - {'loss': 2.1897, 'learning_rate': 3.8921e-05, 'epoch': 0.94}
|
380 |
+
|
381 |
+
05/05/2024 17:05:20 - INFO - llmtuner.extras.callbacks - {'loss': 1.9660, 'learning_rate': 3.8663e-05, 'epoch': 0.95}
|
382 |
+
|
383 |
+
05/05/2024 17:05:20 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-400
|
384 |
+
|
385 |
+
05/05/2024 17:05:20 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
386 |
+
|
387 |
+
05/05/2024 17:05:20 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
388 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
389 |
+
"architectures": [
|
390 |
+
"LlamaForCausalLM"
|
391 |
+
],
|
392 |
+
"attention_bias": false,
|
393 |
+
"attention_dropout": 0.0,
|
394 |
+
"bos_token_id": 1,
|
395 |
+
"eos_token_id": 2,
|
396 |
+
"hidden_act": "silu",
|
397 |
+
"hidden_size": 4096,
|
398 |
+
"initializer_range": 0.02,
|
399 |
+
"intermediate_size": 11008,
|
400 |
+
"max_position_embeddings": 2048,
|
401 |
+
"max_sequence_length": 2048,
|
402 |
+
"model_type": "llama",
|
403 |
+
"num_attention_heads": 32,
|
404 |
+
"num_hidden_layers": 32,
|
405 |
+
"num_key_value_heads": 32,
|
406 |
+
"pad_token_id": 0,
|
407 |
+
"pretraining_tp": 1,
|
408 |
+
"rms_norm_eps": 1e-06,
|
409 |
+
"rope_scaling": null,
|
410 |
+
"rope_theta": 10000.0,
|
411 |
+
"tie_word_embeddings": false,
|
412 |
+
"torch_dtype": "float16",
|
413 |
+
"transformers_version": "4.40.1",
|
414 |
+
"use_cache": true,
|
415 |
+
"vocab_size": 32000
|
416 |
+
}
|
417 |
+
|
418 |
+
|
419 |
+
05/05/2024 17:05:21 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-400/tokenizer_config.json
|
420 |
+
|
421 |
+
05/05/2024 17:05:21 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-400/special_tokens_map.json
|
422 |
+
|
423 |
+
05/05/2024 17:05:39 - INFO - llmtuner.extras.callbacks - {'loss': 1.8980, 'learning_rate': 3.8402e-05, 'epoch': 0.96}
|
424 |
+
|
425 |
+
05/05/2024 17:05:59 - INFO - llmtuner.extras.callbacks - {'loss': 2.1288, 'learning_rate': 3.8139e-05, 'epoch': 0.97}
|
426 |
+
|
427 |
+
05/05/2024 17:06:18 - INFO - llmtuner.extras.callbacks - {'loss': 1.8449, 'learning_rate': 3.7874e-05, 'epoch': 0.98}
|
428 |
+
|
429 |
+
05/05/2024 17:06:37 - INFO - llmtuner.extras.callbacks - {'loss': 1.8785, 'learning_rate': 3.7607e-05, 'epoch': 0.99}
|
430 |
+
|
431 |
+
05/05/2024 17:06:56 - INFO - llmtuner.extras.callbacks - {'loss': 1.8010, 'learning_rate': 3.7338e-05, 'epoch': 1.01}
|
432 |
+
|
433 |
+
05/05/2024 17:07:14 - INFO - llmtuner.extras.callbacks - {'loss': 1.9475, 'learning_rate': 3.7068e-05, 'epoch': 1.02}
|
434 |
+
|
435 |
+
05/05/2024 17:07:34 - INFO - llmtuner.extras.callbacks - {'loss': 1.9996, 'learning_rate': 3.6795e-05, 'epoch': 1.03}
|
436 |
+
|
437 |
+
05/05/2024 17:07:56 - INFO - llmtuner.extras.callbacks - {'loss': 1.9598, 'learning_rate': 3.6521e-05, 'epoch': 1.04}
|
438 |
+
|
439 |
+
05/05/2024 17:08:17 - INFO - llmtuner.extras.callbacks - {'loss': 1.9598, 'learning_rate': 3.6245e-05, 'epoch': 1.05}
|
440 |
+
|
441 |
+
05/05/2024 17:08:38 - INFO - llmtuner.extras.callbacks - {'loss': 1.9089, 'learning_rate': 3.5967e-05, 'epoch': 1.07}
|
442 |
+
|
443 |
+
05/05/2024 17:08:59 - INFO - llmtuner.extras.callbacks - {'loss': 1.9496, 'learning_rate': 3.5687e-05, 'epoch': 1.08}
|
444 |
+
|
445 |
+
05/05/2024 17:09:17 - INFO - llmtuner.extras.callbacks - {'loss': 1.9467, 'learning_rate': 3.5406e-05, 'epoch': 1.09}
|
446 |
+
|
447 |
+
05/05/2024 17:09:37 - INFO - llmtuner.extras.callbacks - {'loss': 2.0235, 'learning_rate': 3.5123e-05, 'epoch': 1.10}
|
448 |
+
|
449 |
+
05/05/2024 17:09:56 - INFO - llmtuner.extras.callbacks - {'loss': 1.8844, 'learning_rate': 3.4839e-05, 'epoch': 1.11}
|
450 |
+
|
451 |
+
05/05/2024 17:10:16 - INFO - llmtuner.extras.callbacks - {'loss': 1.8287, 'learning_rate': 3.4553e-05, 'epoch': 1.12}
|
452 |
+
|
453 |
+
05/05/2024 17:10:35 - INFO - llmtuner.extras.callbacks - {'loss': 2.0754, 'learning_rate': 3.4265e-05, 'epoch': 1.14}
|
454 |
+
|
455 |
+
05/05/2024 17:10:54 - INFO - llmtuner.extras.callbacks - {'loss': 1.9269, 'learning_rate': 3.3977e-05, 'epoch': 1.15}
|
456 |
+
|
457 |
+
05/05/2024 17:11:13 - INFO - llmtuner.extras.callbacks - {'loss': 2.0613, 'learning_rate': 3.3686e-05, 'epoch': 1.16}
|
458 |
+
|
459 |
+
05/05/2024 17:11:33 - INFO - llmtuner.extras.callbacks - {'loss': 1.8047, 'learning_rate': 3.3395e-05, 'epoch': 1.17}
|
460 |
+
|
461 |
+
05/05/2024 17:11:55 - INFO - llmtuner.extras.callbacks - {'loss': 1.8639, 'learning_rate': 3.3102e-05, 'epoch': 1.18}
|
462 |
+
|
463 |
+
05/05/2024 17:11:55 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-500
|
464 |
+
|
465 |
+
05/05/2024 17:11:55 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
466 |
+
|
467 |
+
05/05/2024 17:11:55 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
468 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
469 |
+
"architectures": [
|
470 |
+
"LlamaForCausalLM"
|
471 |
+
],
|
472 |
+
"attention_bias": false,
|
473 |
+
"attention_dropout": 0.0,
|
474 |
+
"bos_token_id": 1,
|
475 |
+
"eos_token_id": 2,
|
476 |
+
"hidden_act": "silu",
|
477 |
+
"hidden_size": 4096,
|
478 |
+
"initializer_range": 0.02,
|
479 |
+
"intermediate_size": 11008,
|
480 |
+
"max_position_embeddings": 2048,
|
481 |
+
"max_sequence_length": 2048,
|
482 |
+
"model_type": "llama",
|
483 |
+
"num_attention_heads": 32,
|
484 |
+
"num_hidden_layers": 32,
|
485 |
+
"num_key_value_heads": 32,
|
486 |
+
"pad_token_id": 0,
|
487 |
+
"pretraining_tp": 1,
|
488 |
+
"rms_norm_eps": 1e-06,
|
489 |
+
"rope_scaling": null,
|
490 |
+
"rope_theta": 10000.0,
|
491 |
+
"tie_word_embeddings": false,
|
492 |
+
"torch_dtype": "float16",
|
493 |
+
"transformers_version": "4.40.1",
|
494 |
+
"use_cache": true,
|
495 |
+
"vocab_size": 32000
|
496 |
+
}
|
497 |
+
|
498 |
+
|
499 |
+
05/05/2024 17:11:55 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-500/tokenizer_config.json
|
500 |
+
|
501 |
+
05/05/2024 17:11:55 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-500/special_tokens_map.json
|
502 |
+
|
503 |
+
05/05/2024 17:12:15 - INFO - llmtuner.extras.callbacks - {'loss': 1.9545, 'learning_rate': 3.2808e-05, 'epoch': 1.20}
|
504 |
+
|
505 |
+
05/05/2024 17:12:34 - INFO - llmtuner.extras.callbacks - {'loss': 1.8740, 'learning_rate': 3.2513e-05, 'epoch': 1.21}
|
506 |
+
|
507 |
+
05/05/2024 17:12:54 - INFO - llmtuner.extras.callbacks - {'loss': 1.9178, 'learning_rate': 3.2216e-05, 'epoch': 1.22}
|
508 |
+
|
509 |
+
05/05/2024 17:13:13 - INFO - llmtuner.extras.callbacks - {'loss': 1.9190, 'learning_rate': 3.1919e-05, 'epoch': 1.23}
|
510 |
+
|
511 |
+
05/05/2024 17:13:32 - INFO - llmtuner.extras.callbacks - {'loss': 1.9128, 'learning_rate': 3.1620e-05, 'epoch': 1.24}
|
512 |
+
|
513 |
+
05/05/2024 17:13:50 - INFO - llmtuner.extras.callbacks - {'loss': 2.0132, 'learning_rate': 3.1321e-05, 'epoch': 1.26}
|
514 |
+
|
515 |
+
05/05/2024 17:14:10 - INFO - llmtuner.extras.callbacks - {'loss': 1.9014, 'learning_rate': 3.1020e-05, 'epoch': 1.27}
|
516 |
+
|
517 |
+
05/05/2024 17:14:30 - INFO - llmtuner.extras.callbacks - {'loss': 1.8482, 'learning_rate': 3.0718e-05, 'epoch': 1.28}
|
518 |
+
|
519 |
+
05/05/2024 17:14:49 - INFO - llmtuner.extras.callbacks - {'loss': 1.7475, 'learning_rate': 3.0416e-05, 'epoch': 1.29}
|
520 |
+
|
521 |
+
05/05/2024 17:15:08 - INFO - llmtuner.extras.callbacks - {'loss': 2.0185, 'learning_rate': 3.0113e-05, 'epoch': 1.30}
|
522 |
+
|
523 |
+
05/05/2024 17:15:27 - INFO - llmtuner.extras.callbacks - {'loss': 1.8825, 'learning_rate': 2.9809e-05, 'epoch': 1.31}
|
524 |
+
|
525 |
+
05/05/2024 17:15:49 - INFO - llmtuner.extras.callbacks - {'loss': 1.8319, 'learning_rate': 2.9504e-05, 'epoch': 1.33}
|
526 |
+
|
527 |
+
05/05/2024 17:16:08 - INFO - llmtuner.extras.callbacks - {'loss': 1.9613, 'learning_rate': 2.9199e-05, 'epoch': 1.34}
|
528 |
+
|
529 |
+
05/05/2024 17:16:29 - INFO - llmtuner.extras.callbacks - {'loss': 1.9367, 'learning_rate': 2.8892e-05, 'epoch': 1.35}
|
530 |
+
|
531 |
+
05/05/2024 17:16:48 - INFO - llmtuner.extras.callbacks - {'loss': 1.8950, 'learning_rate': 2.8586e-05, 'epoch': 1.36}
|
532 |
+
|
533 |
+
05/05/2024 17:17:09 - INFO - llmtuner.extras.callbacks - {'loss': 2.0023, 'learning_rate': 2.8279e-05, 'epoch': 1.37}
|
534 |
+
|
535 |
+
05/05/2024 17:17:27 - INFO - llmtuner.extras.callbacks - {'loss': 1.9469, 'learning_rate': 2.7971e-05, 'epoch': 1.39}
|
536 |
+
|
537 |
+
05/05/2024 17:17:46 - INFO - llmtuner.extras.callbacks - {'loss': 1.7915, 'learning_rate': 2.7663e-05, 'epoch': 1.40}
|
538 |
+
|
539 |
+
05/05/2024 17:18:06 - INFO - llmtuner.extras.callbacks - {'loss': 1.8253, 'learning_rate': 2.7354e-05, 'epoch': 1.41}
|
540 |
+
|
541 |
+
05/05/2024 17:18:26 - INFO - llmtuner.extras.callbacks - {'loss': 2.0741, 'learning_rate': 2.7045e-05, 'epoch': 1.42}
|
542 |
+
|
543 |
+
05/05/2024 17:18:26 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-600
|
544 |
+
|
545 |
+
05/05/2024 17:18:26 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
546 |
+
|
547 |
+
05/05/2024 17:18:26 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
548 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
549 |
+
"architectures": [
|
550 |
+
"LlamaForCausalLM"
|
551 |
+
],
|
552 |
+
"attention_bias": false,
|
553 |
+
"attention_dropout": 0.0,
|
554 |
+
"bos_token_id": 1,
|
555 |
+
"eos_token_id": 2,
|
556 |
+
"hidden_act": "silu",
|
557 |
+
"hidden_size": 4096,
|
558 |
+
"initializer_range": 0.02,
|
559 |
+
"intermediate_size": 11008,
|
560 |
+
"max_position_embeddings": 2048,
|
561 |
+
"max_sequence_length": 2048,
|
562 |
+
"model_type": "llama",
|
563 |
+
"num_attention_heads": 32,
|
564 |
+
"num_hidden_layers": 32,
|
565 |
+
"num_key_value_heads": 32,
|
566 |
+
"pad_token_id": 0,
|
567 |
+
"pretraining_tp": 1,
|
568 |
+
"rms_norm_eps": 1e-06,
|
569 |
+
"rope_scaling": null,
|
570 |
+
"rope_theta": 10000.0,
|
571 |
+
"tie_word_embeddings": false,
|
572 |
+
"torch_dtype": "float16",
|
573 |
+
"transformers_version": "4.40.1",
|
574 |
+
"use_cache": true,
|
575 |
+
"vocab_size": 32000
|
576 |
+
}
|
577 |
+
|
578 |
+
|
579 |
+
05/05/2024 17:18:26 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-600/tokenizer_config.json
|
580 |
+
|
581 |
+
05/05/2024 17:18:26 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-600/special_tokens_map.json
|
582 |
+
|
583 |
+
05/05/2024 17:18:45 - INFO - llmtuner.extras.callbacks - {'loss': 1.9010, 'learning_rate': 2.6736e-05, 'epoch': 1.43}
|
584 |
+
|
585 |
+
05/05/2024 17:19:06 - INFO - llmtuner.extras.callbacks - {'loss': 2.0099, 'learning_rate': 2.6426e-05, 'epoch': 1.44}
|
586 |
+
|
587 |
+
05/05/2024 17:19:25 - INFO - llmtuner.extras.callbacks - {'loss': 1.8853, 'learning_rate': 2.6116e-05, 'epoch': 1.46}
|
588 |
+
|
589 |
+
05/05/2024 17:19:43 - INFO - llmtuner.extras.callbacks - {'loss': 1.8205, 'learning_rate': 2.5806e-05, 'epoch': 1.47}
|
590 |
+
|
591 |
+
05/05/2024 17:20:03 - INFO - llmtuner.extras.callbacks - {'loss': 2.0851, 'learning_rate': 2.5496e-05, 'epoch': 1.48}
|
592 |
+
|
593 |
+
05/05/2024 17:20:24 - INFO - llmtuner.extras.callbacks - {'loss': 2.0913, 'learning_rate': 2.5186e-05, 'epoch': 1.49}
|
594 |
+
|
595 |
+
05/05/2024 17:20:43 - INFO - llmtuner.extras.callbacks - {'loss': 1.9521, 'learning_rate': 2.4876e-05, 'epoch': 1.50}
|
596 |
+
|
597 |
+
05/05/2024 17:21:04 - INFO - llmtuner.extras.callbacks - {'loss': 1.8525, 'learning_rate': 2.4566e-05, 'epoch': 1.52}
|
598 |
+
|
599 |
+
05/05/2024 17:21:25 - INFO - llmtuner.extras.callbacks - {'loss': 1.8034, 'learning_rate': 2.4256e-05, 'epoch': 1.53}
|
600 |
+
|
601 |
+
05/05/2024 17:21:44 - INFO - llmtuner.extras.callbacks - {'loss': 2.0530, 'learning_rate': 2.3946e-05, 'epoch': 1.54}
|
602 |
+
|
603 |
+
05/05/2024 17:22:03 - INFO - llmtuner.extras.callbacks - {'loss': 1.7877, 'learning_rate': 2.3636e-05, 'epoch': 1.55}
|
604 |
+
|
605 |
+
05/05/2024 17:22:22 - INFO - llmtuner.extras.callbacks - {'loss': 1.9865, 'learning_rate': 2.3326e-05, 'epoch': 1.56}
|
606 |
+
|
607 |
+
05/05/2024 17:22:41 - INFO - llmtuner.extras.callbacks - {'loss': 1.8799, 'learning_rate': 2.3017e-05, 'epoch': 1.57}
|
608 |
+
|
609 |
+
05/05/2024 17:23:00 - INFO - llmtuner.extras.callbacks - {'loss': 1.9329, 'learning_rate': 2.2708e-05, 'epoch': 1.59}
|
610 |
+
|
611 |
+
05/05/2024 17:23:18 - INFO - llmtuner.extras.callbacks - {'loss': 1.7828, 'learning_rate': 2.2399e-05, 'epoch': 1.60}
|
612 |
+
|
613 |
+
05/05/2024 17:23:37 - INFO - llmtuner.extras.callbacks - {'loss': 2.0114, 'learning_rate': 2.2091e-05, 'epoch': 1.61}
|
614 |
+
|
615 |
+
05/05/2024 17:23:56 - INFO - llmtuner.extras.callbacks - {'loss': 1.8671, 'learning_rate': 2.1783e-05, 'epoch': 1.62}
|
616 |
+
|
617 |
+
05/05/2024 17:24:14 - INFO - llmtuner.extras.callbacks - {'loss': 1.9453, 'learning_rate': 2.1476e-05, 'epoch': 1.63}
|
618 |
+
|
619 |
+
05/05/2024 17:24:36 - INFO - llmtuner.extras.callbacks - {'loss': 1.7151, 'learning_rate': 2.1169e-05, 'epoch': 1.65}
|
620 |
+
|
621 |
+
05/05/2024 17:24:54 - INFO - llmtuner.extras.callbacks - {'loss': 1.9591, 'learning_rate': 2.0863e-05, 'epoch': 1.66}
|
622 |
+
|
623 |
+
05/05/2024 17:24:54 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-700
|
624 |
+
|
625 |
+
05/05/2024 17:24:54 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
626 |
+
|
627 |
+
05/05/2024 17:24:54 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
628 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
629 |
+
"architectures": [
|
630 |
+
"LlamaForCausalLM"
|
631 |
+
],
|
632 |
+
"attention_bias": false,
|
633 |
+
"attention_dropout": 0.0,
|
634 |
+
"bos_token_id": 1,
|
635 |
+
"eos_token_id": 2,
|
636 |
+
"hidden_act": "silu",
|
637 |
+
"hidden_size": 4096,
|
638 |
+
"initializer_range": 0.02,
|
639 |
+
"intermediate_size": 11008,
|
640 |
+
"max_position_embeddings": 2048,
|
641 |
+
"max_sequence_length": 2048,
|
642 |
+
"model_type": "llama",
|
643 |
+
"num_attention_heads": 32,
|
644 |
+
"num_hidden_layers": 32,
|
645 |
+
"num_key_value_heads": 32,
|
646 |
+
"pad_token_id": 0,
|
647 |
+
"pretraining_tp": 1,
|
648 |
+
"rms_norm_eps": 1e-06,
|
649 |
+
"rope_scaling": null,
|
650 |
+
"rope_theta": 10000.0,
|
651 |
+
"tie_word_embeddings": false,
|
652 |
+
"torch_dtype": "float16",
|
653 |
+
"transformers_version": "4.40.1",
|
654 |
+
"use_cache": true,
|
655 |
+
"vocab_size": 32000
|
656 |
+
}
|
657 |
+
|
658 |
+
|
659 |
+
05/05/2024 17:24:55 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-700/tokenizer_config.json
|
660 |
+
|
661 |
+
05/05/2024 17:24:55 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-700/special_tokens_map.json
|
662 |
+
|
663 |
+
05/05/2024 17:25:14 - INFO - llmtuner.extras.callbacks - {'loss': 1.8999, 'learning_rate': 2.0557e-05, 'epoch': 1.67}
|
664 |
+
|
665 |
+
05/05/2024 17:25:33 - INFO - llmtuner.extras.callbacks - {'loss': 2.0037, 'learning_rate': 2.0252e-05, 'epoch': 1.68}
|
666 |
+
|
667 |
+
05/05/2024 17:25:54 - INFO - llmtuner.extras.callbacks - {'loss': 1.7028, 'learning_rate': 1.9948e-05, 'epoch': 1.69}
|
668 |
+
|
669 |
+
05/05/2024 17:26:11 - INFO - llmtuner.extras.callbacks - {'loss': 2.1283, 'learning_rate': 1.9645e-05, 'epoch': 1.71}
|
670 |
+
|
671 |
+
05/05/2024 17:26:30 - INFO - llmtuner.extras.callbacks - {'loss': 2.0017, 'learning_rate': 1.9342e-05, 'epoch': 1.72}
|
672 |
+
|
673 |
+
05/05/2024 17:26:50 - INFO - llmtuner.extras.callbacks - {'loss': 1.7329, 'learning_rate': 1.9040e-05, 'epoch': 1.73}
|
674 |
+
|
675 |
+
05/05/2024 17:27:09 - INFO - llmtuner.extras.callbacks - {'loss': 1.9965, 'learning_rate': 1.8739e-05, 'epoch': 1.74}
|
676 |
+
|
677 |
+
05/05/2024 17:27:30 - INFO - llmtuner.extras.callbacks - {'loss': 1.9321, 'learning_rate': 1.8440e-05, 'epoch': 1.75}
|
678 |
+
|
679 |
+
05/05/2024 17:27:48 - INFO - llmtuner.extras.callbacks - {'loss': 1.9739, 'learning_rate': 1.8141e-05, 'epoch': 1.76}
|
680 |
+
|
681 |
+
05/05/2024 17:28:06 - INFO - llmtuner.extras.callbacks - {'loss': 1.8864, 'learning_rate': 1.7843e-05, 'epoch': 1.78}
|
682 |
+
|
683 |
+
05/05/2024 17:28:26 - INFO - llmtuner.extras.callbacks - {'loss': 1.9227, 'learning_rate': 1.7546e-05, 'epoch': 1.79}
|
684 |
+
|
685 |
+
05/05/2024 17:28:46 - INFO - llmtuner.extras.callbacks - {'loss': 2.0025, 'learning_rate': 1.7251e-05, 'epoch': 1.80}
|
686 |
+
|
687 |
+
05/05/2024 17:29:05 - INFO - llmtuner.extras.callbacks - {'loss': 1.8039, 'learning_rate': 1.6957e-05, 'epoch': 1.81}
|
688 |
+
|
689 |
+
05/05/2024 17:29:23 - INFO - llmtuner.extras.callbacks - {'loss': 1.8008, 'learning_rate': 1.6664e-05, 'epoch': 1.82}
|
690 |
+
|
691 |
+
05/05/2024 17:29:41 - INFO - llmtuner.extras.callbacks - {'loss': 1.9609, 'learning_rate': 1.6372e-05, 'epoch': 1.84}
|
692 |
+
|
693 |
+
05/05/2024 17:30:00 - INFO - llmtuner.extras.callbacks - {'loss': 2.0843, 'learning_rate': 1.6081e-05, 'epoch': 1.85}
|
694 |
+
|
695 |
+
05/05/2024 17:30:23 - INFO - llmtuner.extras.callbacks - {'loss': 1.8281, 'learning_rate': 1.5792e-05, 'epoch': 1.86}
|
696 |
+
|
697 |
+
05/05/2024 17:30:43 - INFO - llmtuner.extras.callbacks - {'loss': 1.9628, 'learning_rate': 1.5505e-05, 'epoch': 1.87}
|
698 |
+
|
699 |
+
05/05/2024 17:31:02 - INFO - llmtuner.extras.callbacks - {'loss': 1.9867, 'learning_rate': 1.5218e-05, 'epoch': 1.88}
|
700 |
+
|
701 |
+
05/05/2024 17:31:22 - INFO - llmtuner.extras.callbacks - {'loss': 1.8819, 'learning_rate': 1.4934e-05, 'epoch': 1.89}
|
702 |
+
|
703 |
+
05/05/2024 17:31:22 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-800
|
704 |
+
|
705 |
+
05/05/2024 17:31:22 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
706 |
+
|
707 |
+
05/05/2024 17:31:22 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
708 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
709 |
+
"architectures": [
|
710 |
+
"LlamaForCausalLM"
|
711 |
+
],
|
712 |
+
"attention_bias": false,
|
713 |
+
"attention_dropout": 0.0,
|
714 |
+
"bos_token_id": 1,
|
715 |
+
"eos_token_id": 2,
|
716 |
+
"hidden_act": "silu",
|
717 |
+
"hidden_size": 4096,
|
718 |
+
"initializer_range": 0.02,
|
719 |
+
"intermediate_size": 11008,
|
720 |
+
"max_position_embeddings": 2048,
|
721 |
+
"max_sequence_length": 2048,
|
722 |
+
"model_type": "llama",
|
723 |
+
"num_attention_heads": 32,
|
724 |
+
"num_hidden_layers": 32,
|
725 |
+
"num_key_value_heads": 32,
|
726 |
+
"pad_token_id": 0,
|
727 |
+
"pretraining_tp": 1,
|
728 |
+
"rms_norm_eps": 1e-06,
|
729 |
+
"rope_scaling": null,
|
730 |
+
"rope_theta": 10000.0,
|
731 |
+
"tie_word_embeddings": false,
|
732 |
+
"torch_dtype": "float16",
|
733 |
+
"transformers_version": "4.40.1",
|
734 |
+
"use_cache": true,
|
735 |
+
"vocab_size": 32000
|
736 |
+
}
|
737 |
+
|
738 |
+
|
739 |
+
05/05/2024 17:31:22 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-800/tokenizer_config.json
|
740 |
+
|
741 |
+
05/05/2024 17:31:22 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-800/special_tokens_map.json
|
742 |
+
|
743 |
+
05/05/2024 17:31:43 - INFO - llmtuner.extras.callbacks - {'loss': 1.8938, 'learning_rate': 1.4651e-05, 'epoch': 1.91}
|
744 |
+
|
745 |
+
05/05/2024 17:32:02 - INFO - llmtuner.extras.callbacks - {'loss': 1.6936, 'learning_rate': 1.4369e-05, 'epoch': 1.92}
|
746 |
+
|
747 |
+
05/05/2024 17:32:20 - INFO - llmtuner.extras.callbacks - {'loss': 2.1922, 'learning_rate': 1.4089e-05, 'epoch': 1.93}
|
748 |
+
|
749 |
+
05/05/2024 17:32:41 - INFO - llmtuner.extras.callbacks - {'loss': 1.8547, 'learning_rate': 1.3811e-05, 'epoch': 1.94}
|
750 |
+
|
751 |
+
05/05/2024 17:33:01 - INFO - llmtuner.extras.callbacks - {'loss': 1.9437, 'learning_rate': 1.3534e-05, 'epoch': 1.95}
|
752 |
+
|
753 |
+
05/05/2024 17:33:24 - INFO - llmtuner.extras.callbacks - {'loss': 1.7016, 'learning_rate': 1.3260e-05, 'epoch': 1.97}
|
754 |
+
|
755 |
+
05/05/2024 17:33:44 - INFO - llmtuner.extras.callbacks - {'loss': 1.8801, 'learning_rate': 1.2987e-05, 'epoch': 1.98}
|
756 |
+
|
757 |
+
05/05/2024 17:34:02 - INFO - llmtuner.extras.callbacks - {'loss': 1.9408, 'learning_rate': 1.2716e-05, 'epoch': 1.99}
|
758 |
+
|
759 |
+
05/05/2024 17:34:21 - INFO - llmtuner.extras.callbacks - {'loss': 1.6631, 'learning_rate': 1.2446e-05, 'epoch': 2.00}
|
760 |
+
|
761 |
+
05/05/2024 17:34:41 - INFO - llmtuner.extras.callbacks - {'loss': 1.8590, 'learning_rate': 1.2179e-05, 'epoch': 2.01}
|
762 |
+
|
763 |
+
05/05/2024 17:35:03 - INFO - llmtuner.extras.callbacks - {'loss': 1.9671, 'learning_rate': 1.1914e-05, 'epoch': 2.02}
|
764 |
+
|
765 |
+
05/05/2024 17:35:21 - INFO - llmtuner.extras.callbacks - {'loss': 1.8109, 'learning_rate': 1.1650e-05, 'epoch': 2.04}
|
766 |
+
|
767 |
+
05/05/2024 17:35:41 - INFO - llmtuner.extras.callbacks - {'loss': 1.8024, 'learning_rate': 1.1389e-05, 'epoch': 2.05}
|
768 |
+
|
769 |
+
05/05/2024 17:35:58 - INFO - llmtuner.extras.callbacks - {'loss': 1.8327, 'learning_rate': 1.1130e-05, 'epoch': 2.06}
|
770 |
+
|
771 |
+
05/05/2024 17:36:17 - INFO - llmtuner.extras.callbacks - {'loss': 1.9797, 'learning_rate': 1.0873e-05, 'epoch': 2.07}
|
772 |
+
|
773 |
+
05/05/2024 17:36:37 - INFO - llmtuner.extras.callbacks - {'loss': 1.8164, 'learning_rate': 1.0618e-05, 'epoch': 2.08}
|
774 |
+
|
775 |
+
05/05/2024 17:36:57 - INFO - llmtuner.extras.callbacks - {'loss': 1.7994, 'learning_rate': 1.0366e-05, 'epoch': 2.10}
|
776 |
+
|
777 |
+
05/05/2024 17:37:16 - INFO - llmtuner.extras.callbacks - {'loss': 1.9428, 'learning_rate': 1.0115e-05, 'epoch': 2.11}
|
778 |
+
|
779 |
+
05/05/2024 17:37:36 - INFO - llmtuner.extras.callbacks - {'loss': 1.9297, 'learning_rate': 9.8672e-06, 'epoch': 2.12}
|
780 |
+
|
781 |
+
05/05/2024 17:37:55 - INFO - llmtuner.extras.callbacks - {'loss': 1.8185, 'learning_rate': 9.6215e-06, 'epoch': 2.13}
|
782 |
+
|
783 |
+
05/05/2024 17:37:55 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-900
|
784 |
+
|
785 |
+
05/05/2024 17:37:55 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
786 |
+
|
787 |
+
05/05/2024 17:37:55 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
788 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
789 |
+
"architectures": [
|
790 |
+
"LlamaForCausalLM"
|
791 |
+
],
|
792 |
+
"attention_bias": false,
|
793 |
+
"attention_dropout": 0.0,
|
794 |
+
"bos_token_id": 1,
|
795 |
+
"eos_token_id": 2,
|
796 |
+
"hidden_act": "silu",
|
797 |
+
"hidden_size": 4096,
|
798 |
+
"initializer_range": 0.02,
|
799 |
+
"intermediate_size": 11008,
|
800 |
+
"max_position_embeddings": 2048,
|
801 |
+
"max_sequence_length": 2048,
|
802 |
+
"model_type": "llama",
|
803 |
+
"num_attention_heads": 32,
|
804 |
+
"num_hidden_layers": 32,
|
805 |
+
"num_key_value_heads": 32,
|
806 |
+
"pad_token_id": 0,
|
807 |
+
"pretraining_tp": 1,
|
808 |
+
"rms_norm_eps": 1e-06,
|
809 |
+
"rope_scaling": null,
|
810 |
+
"rope_theta": 10000.0,
|
811 |
+
"tie_word_embeddings": false,
|
812 |
+
"torch_dtype": "float16",
|
813 |
+
"transformers_version": "4.40.1",
|
814 |
+
"use_cache": true,
|
815 |
+
"vocab_size": 32000
|
816 |
+
}
|
817 |
+
|
818 |
+
|
819 |
+
05/05/2024 17:37:55 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-900/tokenizer_config.json
|
820 |
+
|
821 |
+
05/05/2024 17:37:55 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-900/special_tokens_map.json
|
822 |
+
|
823 |
+
05/05/2024 17:38:16 - INFO - llmtuner.extras.callbacks - {'loss': 1.7902, 'learning_rate': 9.3781e-06, 'epoch': 2.14}
|
824 |
+
|
825 |
+
05/05/2024 17:38:37 - INFO - llmtuner.extras.callbacks - {'loss': 1.7111, 'learning_rate': 9.1372e-06, 'epoch': 2.16}
|
826 |
+
|
827 |
+
05/05/2024 17:38:58 - INFO - llmtuner.extras.callbacks - {'loss': 2.0400, 'learning_rate': 8.8986e-06, 'epoch': 2.17}
|
828 |
+
|
829 |
+
05/05/2024 17:39:16 - INFO - llmtuner.extras.callbacks - {'loss': 1.9046, 'learning_rate': 8.6626e-06, 'epoch': 2.18}
|
830 |
+
|
831 |
+
05/05/2024 17:39:34 - INFO - llmtuner.extras.callbacks - {'loss': 2.1426, 'learning_rate': 8.4291e-06, 'epoch': 2.19}
|
832 |
+
|
833 |
+
05/05/2024 17:39:55 - INFO - llmtuner.extras.callbacks - {'loss': 2.0743, 'learning_rate': 8.1981e-06, 'epoch': 2.20}
|
834 |
+
|
835 |
+
05/05/2024 17:40:15 - INFO - llmtuner.extras.callbacks - {'loss': 1.7135, 'learning_rate': 7.9697e-06, 'epoch': 2.21}
|
836 |
+
|
837 |
+
05/05/2024 17:40:34 - INFO - llmtuner.extras.callbacks - {'loss': 1.8054, 'learning_rate': 7.7439e-06, 'epoch': 2.23}
|
838 |
+
|
839 |
+
05/05/2024 17:40:52 - INFO - llmtuner.extras.callbacks - {'loss': 1.8244, 'learning_rate': 7.5208e-06, 'epoch': 2.24}
|
840 |
+
|
841 |
+
05/05/2024 17:41:14 - INFO - llmtuner.extras.callbacks - {'loss': 1.8097, 'learning_rate': 7.3004e-06, 'epoch': 2.25}
|
842 |
+
|
843 |
+
05/05/2024 17:41:32 - INFO - llmtuner.extras.callbacks - {'loss': 1.9919, 'learning_rate': 7.0827e-06, 'epoch': 2.26}
|
844 |
+
|
845 |
+
05/05/2024 17:41:51 - INFO - llmtuner.extras.callbacks - {'loss': 1.9084, 'learning_rate': 6.8678e-06, 'epoch': 2.27}
|
846 |
+
|
847 |
+
05/05/2024 17:42:11 - INFO - llmtuner.extras.callbacks - {'loss': 1.7835, 'learning_rate': 6.6556e-06, 'epoch': 2.29}
|
848 |
+
|
849 |
+
05/05/2024 17:42:30 - INFO - llmtuner.extras.callbacks - {'loss': 1.7542, 'learning_rate': 6.4463e-06, 'epoch': 2.30}
|
850 |
+
|
851 |
+
05/05/2024 17:42:49 - INFO - llmtuner.extras.callbacks - {'loss': 2.0351, 'learning_rate': 6.2398e-06, 'epoch': 2.31}
|
852 |
+
|
853 |
+
05/05/2024 17:43:10 - INFO - llmtuner.extras.callbacks - {'loss': 1.7908, 'learning_rate': 6.0363e-06, 'epoch': 2.32}
|
854 |
+
|
855 |
+
05/05/2024 17:43:29 - INFO - llmtuner.extras.callbacks - {'loss': 1.7511, 'learning_rate': 5.8356e-06, 'epoch': 2.33}
|
856 |
+
|
857 |
+
05/05/2024 17:43:48 - INFO - llmtuner.extras.callbacks - {'loss': 1.9621, 'learning_rate': 5.6379e-06, 'epoch': 2.34}
|
858 |
+
|
859 |
+
05/05/2024 17:44:07 - INFO - llmtuner.extras.callbacks - {'loss': 2.0578, 'learning_rate': 5.4432e-06, 'epoch': 2.36}
|
860 |
+
|
861 |
+
05/05/2024 17:44:29 - INFO - llmtuner.extras.callbacks - {'loss': 1.9198, 'learning_rate': 5.2514e-06, 'epoch': 2.37}
|
862 |
+
|
863 |
+
05/05/2024 17:44:29 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-1000
|
864 |
+
|
865 |
+
05/05/2024 17:44:29 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
866 |
+
|
867 |
+
05/05/2024 17:44:29 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
868 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
869 |
+
"architectures": [
|
870 |
+
"LlamaForCausalLM"
|
871 |
+
],
|
872 |
+
"attention_bias": false,
|
873 |
+
"attention_dropout": 0.0,
|
874 |
+
"bos_token_id": 1,
|
875 |
+
"eos_token_id": 2,
|
876 |
+
"hidden_act": "silu",
|
877 |
+
"hidden_size": 4096,
|
878 |
+
"initializer_range": 0.02,
|
879 |
+
"intermediate_size": 11008,
|
880 |
+
"max_position_embeddings": 2048,
|
881 |
+
"max_sequence_length": 2048,
|
882 |
+
"model_type": "llama",
|
883 |
+
"num_attention_heads": 32,
|
884 |
+
"num_hidden_layers": 32,
|
885 |
+
"num_key_value_heads": 32,
|
886 |
+
"pad_token_id": 0,
|
887 |
+
"pretraining_tp": 1,
|
888 |
+
"rms_norm_eps": 1e-06,
|
889 |
+
"rope_scaling": null,
|
890 |
+
"rope_theta": 10000.0,
|
891 |
+
"tie_word_embeddings": false,
|
892 |
+
"torch_dtype": "float16",
|
893 |
+
"transformers_version": "4.40.1",
|
894 |
+
"use_cache": true,
|
895 |
+
"vocab_size": 32000
|
896 |
+
}
|
897 |
+
|
898 |
+
|
899 |
+
05/05/2024 17:44:29 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-1000/tokenizer_config.json
|
900 |
+
|
901 |
+
05/05/2024 17:44:29 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-1000/special_tokens_map.json
|
902 |
+
|
903 |
+
05/05/2024 17:44:48 - INFO - llmtuner.extras.callbacks - {'loss': 1.6089, 'learning_rate': 5.0628e-06, 'epoch': 2.38}
|
904 |
+
|
905 |
+
05/05/2024 17:45:08 - INFO - llmtuner.extras.callbacks - {'loss': 1.9499, 'learning_rate': 4.8772e-06, 'epoch': 2.39}
|
906 |
+
|
907 |
+
05/05/2024 17:45:27 - INFO - llmtuner.extras.callbacks - {'loss': 2.0044, 'learning_rate': 4.6947e-06, 'epoch': 2.40}
|
908 |
+
|
909 |
+
05/05/2024 17:45:47 - INFO - llmtuner.extras.callbacks - {'loss': 2.0896, 'learning_rate': 4.5153e-06, 'epoch': 2.42}
|
910 |
+
|
911 |
+
05/05/2024 17:46:06 - INFO - llmtuner.extras.callbacks - {'loss': 1.8803, 'learning_rate': 4.3390e-06, 'epoch': 2.43}
|
912 |
+
|
913 |
+
05/05/2024 17:46:24 - INFO - llmtuner.extras.callbacks - {'loss': 2.0795, 'learning_rate': 4.1660e-06, 'epoch': 2.44}
|
914 |
+
|
915 |
+
05/05/2024 17:46:43 - INFO - llmtuner.extras.callbacks - {'loss': 1.7221, 'learning_rate': 3.9961e-06, 'epoch': 2.45}
|
916 |
+
|
917 |
+
05/05/2024 17:47:02 - INFO - llmtuner.extras.callbacks - {'loss': 1.6881, 'learning_rate': 3.8295e-06, 'epoch': 2.46}
|
918 |
+
|
919 |
+
05/05/2024 17:47:22 - INFO - llmtuner.extras.callbacks - {'loss': 1.8039, 'learning_rate': 3.6662e-06, 'epoch': 2.47}
|
920 |
+
|
921 |
+
05/05/2024 17:47:40 - INFO - llmtuner.extras.callbacks - {'loss': 1.9333, 'learning_rate': 3.5061e-06, 'epoch': 2.49}
|
922 |
+
|
923 |
+
05/05/2024 17:48:00 - INFO - llmtuner.extras.callbacks - {'loss': 1.6731, 'learning_rate': 3.3494e-06, 'epoch': 2.50}
|
924 |
+
|
925 |
+
05/05/2024 17:48:19 - INFO - llmtuner.extras.callbacks - {'loss': 1.8437, 'learning_rate': 3.1959e-06, 'epoch': 2.51}
|
926 |
+
|
927 |
+
05/05/2024 17:48:38 - INFO - llmtuner.extras.callbacks - {'loss': 1.9577, 'learning_rate': 3.0459e-06, 'epoch': 2.52}
|
928 |
+
|
929 |
+
05/05/2024 17:48:59 - INFO - llmtuner.extras.callbacks - {'loss': 2.0162, 'learning_rate': 2.8992e-06, 'epoch': 2.53}
|
930 |
+
|
931 |
+
05/05/2024 17:49:19 - INFO - llmtuner.extras.callbacks - {'loss': 1.8091, 'learning_rate': 2.7559e-06, 'epoch': 2.55}
|
932 |
+
|
933 |
+
05/05/2024 17:49:40 - INFO - llmtuner.extras.callbacks - {'loss': 1.7485, 'learning_rate': 2.6160e-06, 'epoch': 2.56}
|
934 |
+
|
935 |
+
05/05/2024 17:49:59 - INFO - llmtuner.extras.callbacks - {'loss': 1.9556, 'learning_rate': 2.4796e-06, 'epoch': 2.57}
|
936 |
+
|
937 |
+
05/05/2024 17:50:19 - INFO - llmtuner.extras.callbacks - {'loss': 1.7762, 'learning_rate': 2.3467e-06, 'epoch': 2.58}
|
938 |
+
|
939 |
+
05/05/2024 17:50:38 - INFO - llmtuner.extras.callbacks - {'loss': 1.7463, 'learning_rate': 2.2172e-06, 'epoch': 2.59}
|
940 |
+
|
941 |
+
05/05/2024 17:50:56 - INFO - llmtuner.extras.callbacks - {'loss': 1.9825, 'learning_rate': 2.0913e-06, 'epoch': 2.61}
|
942 |
+
|
943 |
+
05/05/2024 17:50:56 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-1100
|
944 |
+
|
945 |
+
05/05/2024 17:50:56 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
946 |
+
|
947 |
+
05/05/2024 17:50:56 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
948 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
949 |
+
"architectures": [
|
950 |
+
"LlamaForCausalLM"
|
951 |
+
],
|
952 |
+
"attention_bias": false,
|
953 |
+
"attention_dropout": 0.0,
|
954 |
+
"bos_token_id": 1,
|
955 |
+
"eos_token_id": 2,
|
956 |
+
"hidden_act": "silu",
|
957 |
+
"hidden_size": 4096,
|
958 |
+
"initializer_range": 0.02,
|
959 |
+
"intermediate_size": 11008,
|
960 |
+
"max_position_embeddings": 2048,
|
961 |
+
"max_sequence_length": 2048,
|
962 |
+
"model_type": "llama",
|
963 |
+
"num_attention_heads": 32,
|
964 |
+
"num_hidden_layers": 32,
|
965 |
+
"num_key_value_heads": 32,
|
966 |
+
"pad_token_id": 0,
|
967 |
+
"pretraining_tp": 1,
|
968 |
+
"rms_norm_eps": 1e-06,
|
969 |
+
"rope_scaling": null,
|
970 |
+
"rope_theta": 10000.0,
|
971 |
+
"tie_word_embeddings": false,
|
972 |
+
"torch_dtype": "float16",
|
973 |
+
"transformers_version": "4.40.1",
|
974 |
+
"use_cache": true,
|
975 |
+
"vocab_size": 32000
|
976 |
+
}
|
977 |
+
|
978 |
+
|
979 |
+
05/05/2024 17:50:56 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-1100/tokenizer_config.json
|
980 |
+
|
981 |
+
05/05/2024 17:50:56 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-1100/special_tokens_map.json
|
982 |
+
|
983 |
+
05/05/2024 17:51:15 - INFO - llmtuner.extras.callbacks - {'loss': 1.8317, 'learning_rate': 1.9688e-06, 'epoch': 2.62}
|
984 |
+
|
985 |
+
05/05/2024 17:51:33 - INFO - llmtuner.extras.callbacks - {'loss': 1.7377, 'learning_rate': 1.8500e-06, 'epoch': 2.63}
|
986 |
+
|
987 |
+
05/05/2024 17:51:53 - INFO - llmtuner.extras.callbacks - {'loss': 1.8165, 'learning_rate': 1.7346e-06, 'epoch': 2.64}
|
988 |
+
|
989 |
+
05/05/2024 17:52:11 - INFO - llmtuner.extras.callbacks - {'loss': 1.8233, 'learning_rate': 1.6229e-06, 'epoch': 2.65}
|
990 |
+
|
991 |
+
05/05/2024 17:52:29 - INFO - llmtuner.extras.callbacks - {'loss': 1.9029, 'learning_rate': 1.5148e-06, 'epoch': 2.66}
|
992 |
+
|
993 |
+
05/05/2024 17:52:50 - INFO - llmtuner.extras.callbacks - {'loss': 1.8527, 'learning_rate': 1.4102e-06, 'epoch': 2.68}
|
994 |
+
|
995 |
+
05/05/2024 17:53:08 - INFO - llmtuner.extras.callbacks - {'loss': 1.8855, 'learning_rate': 1.3094e-06, 'epoch': 2.69}
|
996 |
+
|
997 |
+
05/05/2024 17:53:27 - INFO - llmtuner.extras.callbacks - {'loss': 1.8746, 'learning_rate': 1.2121e-06, 'epoch': 2.70}
|
998 |
+
|
999 |
+
05/05/2024 17:53:46 - INFO - llmtuner.extras.callbacks - {'loss': 1.6960, 'learning_rate': 1.1185e-06, 'epoch': 2.71}
|
1000 |
+
|
1001 |
+
05/05/2024 17:54:07 - INFO - llmtuner.extras.callbacks - {'loss': 1.7904, 'learning_rate': 1.0286e-06, 'epoch': 2.72}
|
1002 |
+
|
1003 |
+
05/05/2024 17:54:27 - INFO - llmtuner.extras.callbacks - {'loss': 1.9620, 'learning_rate': 9.4241e-07, 'epoch': 2.74}
|
1004 |
+
|
1005 |
+
05/05/2024 17:54:44 - INFO - llmtuner.extras.callbacks - {'loss': 1.9262, 'learning_rate': 8.5990e-07, 'epoch': 2.75}
|
1006 |
+
|
1007 |
+
05/05/2024 17:55:04 - INFO - llmtuner.extras.callbacks - {'loss': 1.8789, 'learning_rate': 7.8111e-07, 'epoch': 2.76}
|
1008 |
+
|
1009 |
+
05/05/2024 17:55:22 - INFO - llmtuner.extras.callbacks - {'loss': 1.7546, 'learning_rate': 7.0604e-07, 'epoch': 2.77}
|
1010 |
+
|
1011 |
+
05/05/2024 17:55:43 - INFO - llmtuner.extras.callbacks - {'loss': 1.7979, 'learning_rate': 6.3472e-07, 'epoch': 2.78}
|
1012 |
+
|
1013 |
+
05/05/2024 17:56:02 - INFO - llmtuner.extras.callbacks - {'loss': 1.7874, 'learning_rate': 5.6714e-07, 'epoch': 2.79}
|
1014 |
+
|
1015 |
+
05/05/2024 17:56:21 - INFO - llmtuner.extras.callbacks - {'loss': 1.6197, 'learning_rate': 5.0333e-07, 'epoch': 2.81}
|
1016 |
+
|
1017 |
+
05/05/2024 17:56:39 - INFO - llmtuner.extras.callbacks - {'loss': 1.5225, 'learning_rate': 4.4328e-07, 'epoch': 2.82}
|
1018 |
+
|
1019 |
+
05/05/2024 17:56:57 - INFO - llmtuner.extras.callbacks - {'loss': 1.9811, 'learning_rate': 3.8702e-07, 'epoch': 2.83}
|
1020 |
+
|
1021 |
+
05/05/2024 17:57:16 - INFO - llmtuner.extras.callbacks - {'loss': 1.9999, 'learning_rate': 3.3455e-07, 'epoch': 2.84}
|
1022 |
+
|
1023 |
+
05/05/2024 17:57:16 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1/checkpoint-1200
|
1024 |
+
|
1025 |
+
05/05/2024 17:57:17 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
1026 |
+
|
1027 |
+
05/05/2024 17:57:17 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
1028 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
1029 |
+
"architectures": [
|
1030 |
+
"LlamaForCausalLM"
|
1031 |
+
],
|
1032 |
+
"attention_bias": false,
|
1033 |
+
"attention_dropout": 0.0,
|
1034 |
+
"bos_token_id": 1,
|
1035 |
+
"eos_token_id": 2,
|
1036 |
+
"hidden_act": "silu",
|
1037 |
+
"hidden_size": 4096,
|
1038 |
+
"initializer_range": 0.02,
|
1039 |
+
"intermediate_size": 11008,
|
1040 |
+
"max_position_embeddings": 2048,
|
1041 |
+
"max_sequence_length": 2048,
|
1042 |
+
"model_type": "llama",
|
1043 |
+
"num_attention_heads": 32,
|
1044 |
+
"num_hidden_layers": 32,
|
1045 |
+
"num_key_value_heads": 32,
|
1046 |
+
"pad_token_id": 0,
|
1047 |
+
"pretraining_tp": 1,
|
1048 |
+
"rms_norm_eps": 1e-06,
|
1049 |
+
"rope_scaling": null,
|
1050 |
+
"rope_theta": 10000.0,
|
1051 |
+
"tie_word_embeddings": false,
|
1052 |
+
"torch_dtype": "float16",
|
1053 |
+
"transformers_version": "4.40.1",
|
1054 |
+
"use_cache": true,
|
1055 |
+
"vocab_size": 32000
|
1056 |
+
}
|
1057 |
+
|
1058 |
+
|
1059 |
+
05/05/2024 17:57:17 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/checkpoint-1200/tokenizer_config.json
|
1060 |
+
|
1061 |
+
05/05/2024 17:57:17 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/checkpoint-1200/special_tokens_map.json
|
1062 |
+
|
1063 |
+
05/05/2024 17:57:36 - INFO - llmtuner.extras.callbacks - {'loss': 2.1399, 'learning_rate': 2.8587e-07, 'epoch': 2.85}
|
1064 |
+
|
1065 |
+
05/05/2024 17:57:57 - INFO - llmtuner.extras.callbacks - {'loss': 1.8664, 'learning_rate': 2.4100e-07, 'epoch': 2.87}
|
1066 |
+
|
1067 |
+
05/05/2024 17:58:16 - INFO - llmtuner.extras.callbacks - {'loss': 1.9633, 'learning_rate': 1.9994e-07, 'epoch': 2.88}
|
1068 |
+
|
1069 |
+
05/05/2024 17:58:36 - INFO - llmtuner.extras.callbacks - {'loss': 2.0106, 'learning_rate': 1.6270e-07, 'epoch': 2.89}
|
1070 |
+
|
1071 |
+
05/05/2024 17:58:55 - INFO - llmtuner.extras.callbacks - {'loss': 1.7896, 'learning_rate': 1.2928e-07, 'epoch': 2.90}
|
1072 |
+
|
1073 |
+
05/05/2024 17:59:16 - INFO - llmtuner.extras.callbacks - {'loss': 1.8759, 'learning_rate': 9.9692e-08, 'epoch': 2.91}
|
1074 |
+
|
1075 |
+
05/05/2024 17:59:35 - INFO - llmtuner.extras.callbacks - {'loss': 1.9413, 'learning_rate': 7.3935e-08, 'epoch': 2.92}
|
1076 |
+
|
1077 |
+
05/05/2024 17:59:55 - INFO - llmtuner.extras.callbacks - {'loss': 1.8404, 'learning_rate': 5.2016e-08, 'epoch': 2.94}
|
1078 |
+
|
1079 |
+
05/05/2024 18:00:15 - INFO - llmtuner.extras.callbacks - {'loss': 2.0654, 'learning_rate': 3.3938e-08, 'epoch': 2.95}
|
1080 |
+
|
1081 |
+
05/05/2024 18:00:36 - INFO - llmtuner.extras.callbacks - {'loss': 1.8111, 'learning_rate': 1.9703e-08, 'epoch': 2.96}
|
1082 |
+
|
1083 |
+
05/05/2024 18:00:55 - INFO - llmtuner.extras.callbacks - {'loss': 1.8708, 'learning_rate': 9.3132e-09, 'epoch': 2.97}
|
1084 |
+
|
1085 |
+
05/05/2024 18:01:15 - INFO - llmtuner.extras.callbacks - {'loss': 1.8842, 'learning_rate': 2.7710e-09, 'epoch': 2.98}
|
1086 |
+
|
1087 |
+
05/05/2024 18:01:34 - INFO - llmtuner.extras.callbacks - {'loss': 1.9665, 'learning_rate': 7.6974e-11, 'epoch': 3.00}
|
1088 |
+
|
1089 |
+
05/05/2024 18:01:37 - INFO - transformers.trainer -
|
1090 |
+
|
1091 |
+
Training completed. Do not forget to share your model on huggingface.co/models =)
|
1092 |
+
|
1093 |
+
|
1094 |
+
|
1095 |
+
05/05/2024 18:01:37 - INFO - transformers.trainer - Saving model checkpoint to saves/LLaMA-7B/lora/custom1
|
1096 |
+
|
1097 |
+
05/05/2024 18:01:37 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--huggyllama--llama-7b/snapshots/8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16/config.json
|
1098 |
+
|
1099 |
+
05/05/2024 18:01:37 - INFO - transformers.configuration_utils - Model config LlamaConfig {
|
1100 |
+
"_name_or_path": "/home/sgugger/tmp/llama/llama-7b/",
|
1101 |
+
"architectures": [
|
1102 |
+
"LlamaForCausalLM"
|
1103 |
+
],
|
1104 |
+
"attention_bias": false,
|
1105 |
+
"attention_dropout": 0.0,
|
1106 |
+
"bos_token_id": 1,
|
1107 |
+
"eos_token_id": 2,
|
1108 |
+
"hidden_act": "silu",
|
1109 |
+
"hidden_size": 4096,
|
1110 |
+
"initializer_range": 0.02,
|
1111 |
+
"intermediate_size": 11008,
|
1112 |
+
"max_position_embeddings": 2048,
|
1113 |
+
"max_sequence_length": 2048,
|
1114 |
+
"model_type": "llama",
|
1115 |
+
"num_attention_heads": 32,
|
1116 |
+
"num_hidden_layers": 32,
|
1117 |
+
"num_key_value_heads": 32,
|
1118 |
+
"pad_token_id": 0,
|
1119 |
+
"pretraining_tp": 1,
|
1120 |
+
"rms_norm_eps": 1e-06,
|
1121 |
+
"rope_scaling": null,
|
1122 |
+
"rope_theta": 10000.0,
|
1123 |
+
"tie_word_embeddings": false,
|
1124 |
+
"torch_dtype": "float16",
|
1125 |
+
"transformers_version": "4.40.1",
|
1126 |
+
"use_cache": true,
|
1127 |
+
"vocab_size": 32000
|
1128 |
+
}
|
1129 |
+
|
1130 |
+
|
1131 |
+
05/05/2024 18:01:37 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/LLaMA-7B/lora/custom1/tokenizer_config.json
|
1132 |
+
|
1133 |
+
05/05/2024 18:01:37 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/LLaMA-7B/lora/custom1/special_tokens_map.json
|
1134 |
+
|
1135 |
+
05/05/2024 18:01:37 - INFO - transformers.modelcard - Dropping the following result as it does not have all the necessary fields:
|
1136 |
+
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
1137 |
+
|
special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "</s>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<unk>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
tokenizer_config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"bos_token": "<s>",
|
31 |
+
"chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message + '\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'Human: ' + content + '\\nAssistant: ' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' + '\\n' }}{% endif %}{% endfor %}",
|
32 |
+
"clean_up_tokenization_spaces": false,
|
33 |
+
"eos_token": "</s>",
|
34 |
+
"model_max_length": 2048,
|
35 |
+
"pad_token": "</s>",
|
36 |
+
"padding_side": "right",
|
37 |
+
"sp_model_kwargs": {},
|
38 |
+
"split_special_tokens": false,
|
39 |
+
"tokenizer_class": "LlamaTokenizer",
|
40 |
+
"unk_token": "<unk>",
|
41 |
+
"use_default_system_prompt": false
|
42 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.9982238010657194,
|
3 |
+
"total_flos": 6.019503790030848e+16,
|
4 |
+
"train_loss": 1.9697479162170988,
|
5 |
+
"train_runtime": 4932.4291,
|
6 |
+
"train_samples_per_second": 4.109,
|
7 |
+
"train_steps_per_second": 0.257
|
8 |
+
}
|
trainer_config.yaml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cutoff_len: 1024
|
2 |
+
dataset: identity
|
3 |
+
dataset_dir: data
|
4 |
+
do_train: true
|
5 |
+
finetuning_type: lora
|
6 |
+
flash_attn: auto
|
7 |
+
fp16: true
|
8 |
+
gradient_accumulation_steps: 8
|
9 |
+
learning_rate: 5.0e-05
|
10 |
+
logging_steps: 5
|
11 |
+
lora_alpha: 16
|
12 |
+
lora_dropout: 0
|
13 |
+
lora_rank: 8
|
14 |
+
lora_target: q_proj,v_proj
|
15 |
+
lr_scheduler_type: cosine
|
16 |
+
max_grad_norm: 1.0
|
17 |
+
max_samples: 100000
|
18 |
+
model_name_or_path: huggyllama/llama-7b
|
19 |
+
num_train_epochs: 3.0
|
20 |
+
optim: adamw_torch
|
21 |
+
output_dir: saves/LLaMA-7B/lora/custom1
|
22 |
+
packing: false
|
23 |
+
per_device_train_batch_size: 2
|
24 |
+
report_to: none
|
25 |
+
save_steps: 100
|
26 |
+
stage: sft
|
27 |
+
template: default
|
28 |
+
warmup_steps: 0
|
trainer_log.jsonl
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"current_steps": 5, "total_steps": 1266, "loss": 3.876, "learning_rate": 4.9998075682257415e-05, "epoch": 0.011841326228537596, "percentage": 0.39, "elapsed_time": "0:00:18", "remaining_time": "1:15:45"}
|
2 |
+
{"current_steps": 10, "total_steps": 1266, "loss": 3.8538, "learning_rate": 4.9992303025269555e-05, "epoch": 0.023682652457075192, "percentage": 0.79, "elapsed_time": "0:00:36", "remaining_time": "1:15:47"}
|
3 |
+
{"current_steps": 15, "total_steps": 1266, "loss": 3.5742, "learning_rate": 4.9982682917710524e-05, "epoch": 0.035523978685612786, "percentage": 1.18, "elapsed_time": "0:00:53", "remaining_time": "1:14:04"}
|
4 |
+
{"current_steps": 20, "total_steps": 1266, "loss": 3.5193, "learning_rate": 4.996921684055182e-05, "epoch": 0.047365304914150384, "percentage": 1.58, "elapsed_time": "0:01:10", "remaining_time": "1:13:29"}
|
5 |
+
{"current_steps": 25, "total_steps": 1266, "loss": 2.9465, "learning_rate": 4.9951906866834316e-05, "epoch": 0.05920663114268798, "percentage": 1.97, "elapsed_time": "0:01:27", "remaining_time": "1:12:27"}
|
6 |
+
{"current_steps": 30, "total_steps": 1266, "loss": 3.0208, "learning_rate": 4.993075566134921e-05, "epoch": 0.07104795737122557, "percentage": 2.37, "elapsed_time": "0:01:44", "remaining_time": "1:11:46"}
|
7 |
+
{"current_steps": 35, "total_steps": 1266, "loss": 2.7173, "learning_rate": 4.990576648022768e-05, "epoch": 0.08288928359976318, "percentage": 2.76, "elapsed_time": "0:02:02", "remaining_time": "1:11:46"}
|
8 |
+
{"current_steps": 40, "total_steps": 1266, "loss": 2.6235, "learning_rate": 4.987694317043969e-05, "epoch": 0.09473060982830077, "percentage": 3.16, "elapsed_time": "0:02:19", "remaining_time": "1:11:26"}
|
9 |
+
{"current_steps": 45, "total_steps": 1266, "loss": 2.4021, "learning_rate": 4.984429016920178e-05, "epoch": 0.10657193605683836, "percentage": 3.55, "elapsed_time": "0:02:40", "remaining_time": "1:12:28"}
|
10 |
+
{"current_steps": 50, "total_steps": 1266, "loss": 2.1688, "learning_rate": 4.980781250329389e-05, "epoch": 0.11841326228537596, "percentage": 3.95, "elapsed_time": "0:02:59", "remaining_time": "1:12:33"}
|
11 |
+
{"current_steps": 55, "total_steps": 1266, "loss": 2.2943, "learning_rate": 4.976751578828562e-05, "epoch": 0.13025458851391356, "percentage": 4.34, "elapsed_time": "0:03:19", "remaining_time": "1:13:10"}
|
12 |
+
{"current_steps": 60, "total_steps": 1266, "loss": 1.9571, "learning_rate": 4.9723406227671643e-05, "epoch": 0.14209591474245115, "percentage": 4.74, "elapsed_time": "0:03:37", "remaining_time": "1:12:57"}
|
13 |
+
{"current_steps": 65, "total_steps": 1266, "loss": 2.1024, "learning_rate": 4.967549061191679e-05, "epoch": 0.15393724097098876, "percentage": 5.13, "elapsed_time": "0:03:56", "remaining_time": "1:12:52"}
|
14 |
+
{"current_steps": 70, "total_steps": 1266, "loss": 1.9145, "learning_rate": 4.96237763174106e-05, "epoch": 0.16577856719952636, "percentage": 5.53, "elapsed_time": "0:04:16", "remaining_time": "1:12:56"}
|
15 |
+
{"current_steps": 75, "total_steps": 1266, "loss": 2.1472, "learning_rate": 4.956827130533185e-05, "epoch": 0.17761989342806395, "percentage": 5.92, "elapsed_time": "0:04:36", "remaining_time": "1:13:14"}
|
16 |
+
{"current_steps": 80, "total_steps": 1266, "loss": 2.0441, "learning_rate": 4.95089841204229e-05, "epoch": 0.18946121965660154, "percentage": 6.32, "elapsed_time": "0:04:55", "remaining_time": "1:13:06"}
|
17 |
+
{"current_steps": 85, "total_steps": 1266, "loss": 2.1605, "learning_rate": 4.944592388967428e-05, "epoch": 0.20130254588513913, "percentage": 6.71, "elapsed_time": "0:05:14", "remaining_time": "1:12:52"}
|
18 |
+
{"current_steps": 90, "total_steps": 1266, "loss": 2.2801, "learning_rate": 4.937910032091968e-05, "epoch": 0.21314387211367672, "percentage": 7.11, "elapsed_time": "0:05:32", "remaining_time": "1:12:27"}
|
19 |
+
{"current_steps": 95, "total_steps": 1266, "loss": 2.2327, "learning_rate": 4.930852370134141e-05, "epoch": 0.22498519834221434, "percentage": 7.5, "elapsed_time": "0:05:51", "remaining_time": "1:12:09"}
|
20 |
+
{"current_steps": 100, "total_steps": 1266, "loss": 2.0031, "learning_rate": 4.923420489588677e-05, "epoch": 0.23682652457075193, "percentage": 7.9, "elapsed_time": "0:06:10", "remaining_time": "1:12:05"}
|
21 |
+
{"current_steps": 105, "total_steps": 1266, "loss": 1.8637, "learning_rate": 4.9156155345595445e-05, "epoch": 0.24866785079928952, "percentage": 8.29, "elapsed_time": "0:06:30", "remaining_time": "1:12:00"}
|
22 |
+
{"current_steps": 110, "total_steps": 1266, "loss": 1.9813, "learning_rate": 4.907438706583818e-05, "epoch": 0.2605091770278271, "percentage": 8.69, "elapsed_time": "0:06:50", "remaining_time": "1:11:55"}
|
23 |
+
{"current_steps": 115, "total_steps": 1266, "loss": 2.1698, "learning_rate": 4.898891264446709e-05, "epoch": 0.27235050325636473, "percentage": 9.08, "elapsed_time": "0:07:09", "remaining_time": "1:11:38"}
|
24 |
+
{"current_steps": 120, "total_steps": 1266, "loss": 2.1691, "learning_rate": 4.8899745239877845e-05, "epoch": 0.2841918294849023, "percentage": 9.48, "elapsed_time": "0:07:27", "remaining_time": "1:11:15"}
|
25 |
+
{"current_steps": 125, "total_steps": 1266, "loss": 2.1437, "learning_rate": 4.880689857898392e-05, "epoch": 0.2960331557134399, "percentage": 9.87, "elapsed_time": "0:07:49", "remaining_time": "1:11:29"}
|
26 |
+
{"current_steps": 130, "total_steps": 1266, "loss": 2.078, "learning_rate": 4.871038695510347e-05, "epoch": 0.30787448194197753, "percentage": 10.27, "elapsed_time": "0:08:09", "remaining_time": "1:11:13"}
|
27 |
+
{"current_steps": 135, "total_steps": 1266, "loss": 2.0338, "learning_rate": 4.861022522575892e-05, "epoch": 0.3197158081705151, "percentage": 10.66, "elapsed_time": "0:08:30", "remaining_time": "1:11:19"}
|
28 |
+
{"current_steps": 140, "total_steps": 1266, "loss": 2.1387, "learning_rate": 4.8506428810389696e-05, "epoch": 0.3315571343990527, "percentage": 11.06, "elapsed_time": "0:08:51", "remaining_time": "1:11:12"}
|
29 |
+
{"current_steps": 145, "total_steps": 1266, "loss": 2.0853, "learning_rate": 4.839901368797849e-05, "epoch": 0.3433984606275903, "percentage": 11.45, "elapsed_time": "0:09:10", "remaining_time": "1:10:54"}
|
30 |
+
{"current_steps": 150, "total_steps": 1266, "loss": 1.9919, "learning_rate": 4.828799639459138e-05, "epoch": 0.3552397868561279, "percentage": 11.85, "elapsed_time": "0:09:30", "remaining_time": "1:10:42"}
|
31 |
+
{"current_steps": 155, "total_steps": 1266, "loss": 2.1078, "learning_rate": 4.8173394020832164e-05, "epoch": 0.36708111308466546, "percentage": 12.24, "elapsed_time": "0:09:52", "remaining_time": "1:10:46"}
|
32 |
+
{"current_steps": 160, "total_steps": 1266, "loss": 1.9385, "learning_rate": 4.8055224209211316e-05, "epoch": 0.3789224393132031, "percentage": 12.64, "elapsed_time": "0:10:13", "remaining_time": "1:10:40"}
|
33 |
+
{"current_steps": 165, "total_steps": 1266, "loss": 1.8268, "learning_rate": 4.793350515143007e-05, "epoch": 0.3907637655417407, "percentage": 13.03, "elapsed_time": "0:10:33", "remaining_time": "1:10:27"}
|
34 |
+
{"current_steps": 170, "total_steps": 1266, "loss": 1.8595, "learning_rate": 4.780825558557981e-05, "epoch": 0.40260509177027826, "percentage": 13.43, "elapsed_time": "0:10:53", "remaining_time": "1:10:12"}
|
35 |
+
{"current_steps": 175, "total_steps": 1266, "loss": 2.005, "learning_rate": 4.767949479325748e-05, "epoch": 0.4144464179988159, "percentage": 13.82, "elapsed_time": "0:11:13", "remaining_time": "1:09:56"}
|
36 |
+
{"current_steps": 180, "total_steps": 1266, "loss": 2.0088, "learning_rate": 4.7547242596597274e-05, "epoch": 0.42628774422735344, "percentage": 14.22, "elapsed_time": "0:11:32", "remaining_time": "1:09:36"}
|
37 |
+
{"current_steps": 185, "total_steps": 1266, "loss": 2.1591, "learning_rate": 4.7411519355219066e-05, "epoch": 0.43812907045589106, "percentage": 14.61, "elapsed_time": "0:11:52", "remaining_time": "1:09:21"}
|
38 |
+
{"current_steps": 190, "total_steps": 1266, "loss": 1.9772, "learning_rate": 4.727234596309417e-05, "epoch": 0.4499703966844287, "percentage": 15.01, "elapsed_time": "0:12:12", "remaining_time": "1:09:05"}
|
39 |
+
{"current_steps": 195, "total_steps": 1266, "loss": 1.9476, "learning_rate": 4.71297438453288e-05, "epoch": 0.46181172291296624, "percentage": 15.4, "elapsed_time": "0:12:31", "remaining_time": "1:08:48"}
|
40 |
+
{"current_steps": 200, "total_steps": 1266, "loss": 2.0495, "learning_rate": 4.698373495486579e-05, "epoch": 0.47365304914150386, "percentage": 15.8, "elapsed_time": "0:12:51", "remaining_time": "1:08:30"}
|
41 |
+
{"current_steps": 205, "total_steps": 1266, "loss": 2.0267, "learning_rate": 4.683434176910503e-05, "epoch": 0.4854943753700414, "percentage": 16.19, "elapsed_time": "0:13:10", "remaining_time": "1:08:13"}
|
42 |
+
{"current_steps": 210, "total_steps": 1266, "loss": 1.8998, "learning_rate": 4.6681587286443146e-05, "epoch": 0.49733570159857904, "percentage": 16.59, "elapsed_time": "0:13:30", "remaining_time": "1:07:56"}
|
43 |
+
{"current_steps": 215, "total_steps": 1266, "loss": 1.9095, "learning_rate": 4.652549502273304e-05, "epoch": 0.5091770278271166, "percentage": 16.98, "elapsed_time": "0:13:49", "remaining_time": "1:07:34"}
|
44 |
+
{"current_steps": 220, "total_steps": 1266, "loss": 1.935, "learning_rate": 4.636608900766372e-05, "epoch": 0.5210183540556542, "percentage": 17.38, "elapsed_time": "0:14:09", "remaining_time": "1:07:17"}
|
45 |
+
{"current_steps": 225, "total_steps": 1266, "loss": 2.084, "learning_rate": 4.620339378106102e-05, "epoch": 0.5328596802841918, "percentage": 17.77, "elapsed_time": "0:14:27", "remaining_time": "1:06:55"}
|
46 |
+
{"current_steps": 230, "total_steps": 1266, "loss": 1.9387, "learning_rate": 4.603743438910986e-05, "epoch": 0.5447010065127295, "percentage": 18.17, "elapsed_time": "0:14:48", "remaining_time": "1:06:40"}
|
47 |
+
{"current_steps": 235, "total_steps": 1266, "loss": 1.871, "learning_rate": 4.586823638049841e-05, "epoch": 0.5565423327412671, "percentage": 18.56, "elapsed_time": "0:15:08", "remaining_time": "1:06:27"}
|
48 |
+
{"current_steps": 240, "total_steps": 1266, "loss": 1.9629, "learning_rate": 4.5695825802485085e-05, "epoch": 0.5683836589698046, "percentage": 18.96, "elapsed_time": "0:15:28", "remaining_time": "1:06:10"}
|
49 |
+
{"current_steps": 245, "total_steps": 1266, "loss": 2.1182, "learning_rate": 4.552022919688861e-05, "epoch": 0.5802249851983422, "percentage": 19.35, "elapsed_time": "0:15:48", "remaining_time": "1:05:50"}
|
50 |
+
{"current_steps": 250, "total_steps": 1266, "loss": 1.8874, "learning_rate": 4.53414735960021e-05, "epoch": 0.5920663114268798, "percentage": 19.75, "elapsed_time": "0:16:06", "remaining_time": "1:05:27"}
|
51 |
+
{"current_steps": 255, "total_steps": 1266, "loss": 1.8281, "learning_rate": 4.51595865184315e-05, "epoch": 0.6039076376554174, "percentage": 20.14, "elapsed_time": "0:16:24", "remaining_time": "1:05:03"}
|
52 |
+
{"current_steps": 260, "total_steps": 1266, "loss": 2.0387, "learning_rate": 4.497459596485924e-05, "epoch": 0.6157489638839551, "percentage": 20.54, "elapsed_time": "0:16:42", "remaining_time": "1:04:38"}
|
53 |
+
{"current_steps": 265, "total_steps": 1266, "loss": 2.1307, "learning_rate": 4.47865304137337e-05, "epoch": 0.6275902901124926, "percentage": 20.93, "elapsed_time": "0:17:02", "remaining_time": "1:04:20"}
|
54 |
+
{"current_steps": 270, "total_steps": 1266, "loss": 2.0755, "learning_rate": 4.4595418816885004e-05, "epoch": 0.6394316163410302, "percentage": 21.33, "elapsed_time": "0:17:22", "remaining_time": "1:04:06"}
|
55 |
+
{"current_steps": 275, "total_steps": 1266, "loss": 1.9149, "learning_rate": 4.440129059506808e-05, "epoch": 0.6512729425695678, "percentage": 21.72, "elapsed_time": "0:17:42", "remaining_time": "1:03:49"}
|
56 |
+
{"current_steps": 280, "total_steps": 1266, "loss": 1.984, "learning_rate": 4.420417563343346e-05, "epoch": 0.6631142687981054, "percentage": 22.12, "elapsed_time": "0:18:05", "remaining_time": "1:03:41"}
|
57 |
+
{"current_steps": 285, "total_steps": 1266, "loss": 1.8697, "learning_rate": 4.40041042769266e-05, "epoch": 0.6749555950266429, "percentage": 22.51, "elapsed_time": "0:18:23", "remaining_time": "1:03:18"}
|
58 |
+
{"current_steps": 290, "total_steps": 1266, "loss": 1.7951, "learning_rate": 4.380110732561637e-05, "epoch": 0.6867969212551805, "percentage": 22.91, "elapsed_time": "0:18:44", "remaining_time": "1:03:05"}
|
59 |
+
{"current_steps": 295, "total_steps": 1266, "loss": 1.6276, "learning_rate": 4.3595216029953575e-05, "epoch": 0.6986382474837182, "percentage": 23.3, "elapsed_time": "0:19:04", "remaining_time": "1:02:47"}
|
60 |
+
{"current_steps": 300, "total_steps": 1266, "loss": 1.8438, "learning_rate": 4.3386462085960086e-05, "epoch": 0.7104795737122558, "percentage": 23.7, "elapsed_time": "0:19:25", "remaining_time": "1:02:32"}
|
61 |
+
{"current_steps": 305, "total_steps": 1266, "loss": 1.7839, "learning_rate": 4.3174877630349366e-05, "epoch": 0.7223208999407934, "percentage": 24.09, "elapsed_time": "0:19:45", "remaining_time": "1:02:15"}
|
62 |
+
{"current_steps": 310, "total_steps": 1266, "loss": 1.8604, "learning_rate": 4.296049523557917e-05, "epoch": 0.7341622261693309, "percentage": 24.49, "elapsed_time": "0:20:06", "remaining_time": "1:01:59"}
|
63 |
+
{"current_steps": 315, "total_steps": 1266, "loss": 1.9514, "learning_rate": 4.2743347904837176e-05, "epoch": 0.7460035523978685, "percentage": 24.88, "elapsed_time": "0:20:24", "remaining_time": "1:01:37"}
|
64 |
+
{"current_steps": 320, "total_steps": 1266, "loss": 1.9439, "learning_rate": 4.2523469066960295e-05, "epoch": 0.7578448786264061, "percentage": 25.28, "elapsed_time": "0:20:43", "remaining_time": "1:01:16"}
|
65 |
+
{"current_steps": 325, "total_steps": 1266, "loss": 2.0266, "learning_rate": 4.230089257128842e-05, "epoch": 0.7696862048549438, "percentage": 25.67, "elapsed_time": "0:21:03", "remaining_time": "1:00:58"}
|
66 |
+
{"current_steps": 330, "total_steps": 1266, "loss": 1.9851, "learning_rate": 4.2075652682453554e-05, "epoch": 0.7815275310834814, "percentage": 26.07, "elapsed_time": "0:21:21", "remaining_time": "1:00:36"}
|
67 |
+
{"current_steps": 335, "total_steps": 1266, "loss": 1.976, "learning_rate": 4.184778407510484e-05, "epoch": 0.7933688573120189, "percentage": 26.46, "elapsed_time": "0:21:41", "remaining_time": "1:00:15"}
|
68 |
+
{"current_steps": 340, "total_steps": 1266, "loss": 1.9039, "learning_rate": 4.16173218285706e-05, "epoch": 0.8052101835405565, "percentage": 26.86, "elapsed_time": "0:22:00", "remaining_time": "0:59:57"}
|
69 |
+
{"current_steps": 345, "total_steps": 1266, "loss": 1.9577, "learning_rate": 4.138430142145805e-05, "epoch": 0.8170515097690941, "percentage": 27.25, "elapsed_time": "0:22:20", "remaining_time": "0:59:39"}
|
70 |
+
{"current_steps": 350, "total_steps": 1266, "loss": 1.9182, "learning_rate": 4.114875872619147e-05, "epoch": 0.8288928359976317, "percentage": 27.65, "elapsed_time": "0:22:38", "remaining_time": "0:59:15"}
|
71 |
+
{"current_steps": 355, "total_steps": 1266, "loss": 1.9574, "learning_rate": 4.0910730003489894e-05, "epoch": 0.8407341622261694, "percentage": 28.04, "elapsed_time": "0:22:58", "remaining_time": "0:58:56"}
|
72 |
+
{"current_steps": 360, "total_steps": 1266, "loss": 1.9129, "learning_rate": 4.067025189678485e-05, "epoch": 0.8525754884547069, "percentage": 28.44, "elapsed_time": "0:23:16", "remaining_time": "0:58:35"}
|
73 |
+
{"current_steps": 365, "total_steps": 1266, "loss": 2.0108, "learning_rate": 4.042736142657935e-05, "epoch": 0.8644168146832445, "percentage": 28.83, "elapsed_time": "0:23:39", "remaining_time": "0:58:23"}
|
74 |
+
{"current_steps": 370, "total_steps": 1266, "loss": 2.1509, "learning_rate": 4.018209598474869e-05, "epoch": 0.8762581409117821, "percentage": 29.23, "elapsed_time": "0:23:58", "remaining_time": "0:58:03"}
|
75 |
+
{"current_steps": 375, "total_steps": 1266, "loss": 2.01, "learning_rate": 3.993449332878418e-05, "epoch": 0.8880994671403197, "percentage": 29.62, "elapsed_time": "0:24:18", "remaining_time": "0:57:44"}
|
76 |
+
{"current_steps": 380, "total_steps": 1266, "loss": 2.0312, "learning_rate": 3.9684591575980546e-05, "epoch": 0.8999407933688574, "percentage": 30.02, "elapsed_time": "0:24:37", "remaining_time": "0:57:24"}
|
77 |
+
{"current_steps": 385, "total_steps": 1266, "loss": 1.9705, "learning_rate": 3.943242919756792e-05, "epoch": 0.9117821195973949, "percentage": 30.41, "elapsed_time": "0:24:55", "remaining_time": "0:57:02"}
|
78 |
+
{"current_steps": 390, "total_steps": 1266, "loss": 2.1293, "learning_rate": 3.917804501278942e-05, "epoch": 0.9236234458259325, "percentage": 30.81, "elapsed_time": "0:25:16", "remaining_time": "0:56:46"}
|
79 |
+
{"current_steps": 395, "total_steps": 1266, "loss": 2.1897, "learning_rate": 3.8921478182925055e-05, "epoch": 0.9354647720544701, "percentage": 31.2, "elapsed_time": "0:25:36", "remaining_time": "0:56:28"}
|
80 |
+
{"current_steps": 400, "total_steps": 1266, "loss": 1.966, "learning_rate": 3.8662768205263044e-05, "epoch": 0.9473060982830077, "percentage": 31.6, "elapsed_time": "0:25:55", "remaining_time": "0:56:07"}
|
81 |
+
{"current_steps": 405, "total_steps": 1266, "loss": 1.898, "learning_rate": 3.8401954907019424e-05, "epoch": 0.9591474245115453, "percentage": 31.99, "elapsed_time": "0:26:14", "remaining_time": "0:55:46"}
|
82 |
+
{"current_steps": 410, "total_steps": 1266, "loss": 2.1288, "learning_rate": 3.813907843920675e-05, "epoch": 0.9709887507400828, "percentage": 32.39, "elapsed_time": "0:26:34", "remaining_time": "0:55:28"}
|
83 |
+
{"current_steps": 415, "total_steps": 1266, "loss": 1.8449, "learning_rate": 3.787417927045315e-05, "epoch": 0.9828300769686205, "percentage": 32.78, "elapsed_time": "0:26:52", "remaining_time": "0:55:07"}
|
84 |
+
{"current_steps": 420, "total_steps": 1266, "loss": 1.8785, "learning_rate": 3.7607298180772236e-05, "epoch": 0.9946714031971581, "percentage": 33.18, "elapsed_time": "0:27:12", "remaining_time": "0:54:47"}
|
85 |
+
{"current_steps": 425, "total_steps": 1266, "loss": 1.801, "learning_rate": 3.733847625528529e-05, "epoch": 1.0065127294256957, "percentage": 33.57, "elapsed_time": "0:27:31", "remaining_time": "0:54:27"}
|
86 |
+
{"current_steps": 430, "total_steps": 1266, "loss": 1.9475, "learning_rate": 3.706775487789639e-05, "epoch": 1.0183540556542332, "percentage": 33.97, "elapsed_time": "0:27:49", "remaining_time": "0:54:05"}
|
87 |
+
{"current_steps": 435, "total_steps": 1266, "loss": 1.9996, "learning_rate": 3.679517572492151e-05, "epoch": 1.030195381882771, "percentage": 34.36, "elapsed_time": "0:28:09", "remaining_time": "0:53:47"}
|
88 |
+
{"current_steps": 440, "total_steps": 1266, "loss": 1.9598, "learning_rate": 3.652078075867267e-05, "epoch": 1.0420367081113084, "percentage": 34.76, "elapsed_time": "0:28:30", "remaining_time": "0:53:31"}
|
89 |
+
{"current_steps": 445, "total_steps": 1266, "loss": 1.9598, "learning_rate": 3.624461222099804e-05, "epoch": 1.0538780343398462, "percentage": 35.15, "elapsed_time": "0:28:52", "remaining_time": "0:53:15"}
|
90 |
+
{"current_steps": 450, "total_steps": 1266, "loss": 1.9089, "learning_rate": 3.596671262677898e-05, "epoch": 1.0657193605683837, "percentage": 35.55, "elapsed_time": "0:29:13", "remaining_time": "0:52:59"}
|
91 |
+
{"current_steps": 455, "total_steps": 1266, "loss": 1.9496, "learning_rate": 3.568712475738508e-05, "epoch": 1.0775606867969212, "percentage": 35.94, "elapsed_time": "0:29:33", "remaining_time": "0:52:41"}
|
92 |
+
{"current_steps": 460, "total_steps": 1266, "loss": 1.9467, "learning_rate": 3.5405891654088154e-05, "epoch": 1.089402013025459, "percentage": 36.33, "elapsed_time": "0:29:52", "remaining_time": "0:52:21"}
|
93 |
+
{"current_steps": 465, "total_steps": 1266, "loss": 2.0235, "learning_rate": 3.5123056611436224e-05, "epoch": 1.1012433392539964, "percentage": 36.73, "elapsed_time": "0:30:12", "remaining_time": "0:52:01"}
|
94 |
+
{"current_steps": 470, "total_steps": 1266, "loss": 1.8844, "learning_rate": 3.483866317058857e-05, "epoch": 1.1130846654825342, "percentage": 37.12, "elapsed_time": "0:30:31", "remaining_time": "0:51:41"}
|
95 |
+
{"current_steps": 475, "total_steps": 1266, "loss": 1.8287, "learning_rate": 3.4552755112612714e-05, "epoch": 1.1249259917110717, "percentage": 37.52, "elapsed_time": "0:30:51", "remaining_time": "0:51:23"}
|
96 |
+
{"current_steps": 480, "total_steps": 1266, "loss": 2.0754, "learning_rate": 3.4265376451744565e-05, "epoch": 1.1367673179396092, "percentage": 37.91, "elapsed_time": "0:31:10", "remaining_time": "0:51:02"}
|
97 |
+
{"current_steps": 485, "total_steps": 1266, "loss": 1.9269, "learning_rate": 3.397657142861258e-05, "epoch": 1.148608644168147, "percentage": 38.31, "elapsed_time": "0:31:29", "remaining_time": "0:50:43"}
|
98 |
+
{"current_steps": 490, "total_steps": 1266, "loss": 2.0613, "learning_rate": 3.3686384503427174e-05, "epoch": 1.1604499703966844, "percentage": 38.7, "elapsed_time": "0:31:48", "remaining_time": "0:50:22"}
|
99 |
+
{"current_steps": 495, "total_steps": 1266, "loss": 1.8047, "learning_rate": 3.339486034913627e-05, "epoch": 1.1722912966252221, "percentage": 39.1, "elapsed_time": "0:32:08", "remaining_time": "0:50:03"}
|
100 |
+
{"current_steps": 500, "total_steps": 1266, "loss": 1.8639, "learning_rate": 3.3102043844548044e-05, "epoch": 1.1841326228537596, "percentage": 39.49, "elapsed_time": "0:32:30", "remaining_time": "0:49:48"}
|
101 |
+
{"current_steps": 505, "total_steps": 1266, "loss": 1.9545, "learning_rate": 3.280798006742213e-05, "epoch": 1.1959739490822971, "percentage": 39.89, "elapsed_time": "0:32:49", "remaining_time": "0:49:28"}
|
102 |
+
{"current_steps": 510, "total_steps": 1266, "loss": 1.874, "learning_rate": 3.2512714287530006e-05, "epoch": 1.2078152753108349, "percentage": 40.28, "elapsed_time": "0:33:09", "remaining_time": "0:49:09"}
|
103 |
+
{"current_steps": 515, "total_steps": 1266, "loss": 1.9178, "learning_rate": 3.2216291959686006e-05, "epoch": 1.2196566015393724, "percentage": 40.68, "elapsed_time": "0:33:28", "remaining_time": "0:48:49"}
|
104 |
+
{"current_steps": 520, "total_steps": 1266, "loss": 1.919, "learning_rate": 3.191875871674971e-05, "epoch": 1.2314979277679101, "percentage": 41.07, "elapsed_time": "0:33:48", "remaining_time": "0:48:30"}
|
105 |
+
{"current_steps": 525, "total_steps": 1266, "loss": 1.9128, "learning_rate": 3.1620160362600984e-05, "epoch": 1.2433392539964476, "percentage": 41.47, "elapsed_time": "0:34:06", "remaining_time": "0:48:09"}
|
106 |
+
{"current_steps": 530, "total_steps": 1266, "loss": 2.0132, "learning_rate": 3.1320542865088696e-05, "epoch": 1.2551805802249851, "percentage": 41.86, "elapsed_time": "0:34:25", "remaining_time": "0:47:48"}
|
107 |
+
{"current_steps": 535, "total_steps": 1266, "loss": 1.9014, "learning_rate": 3.101995234895416e-05, "epoch": 1.2670219064535229, "percentage": 42.26, "elapsed_time": "0:34:45", "remaining_time": "0:47:29"}
|
108 |
+
{"current_steps": 540, "total_steps": 1266, "loss": 1.8482, "learning_rate": 3.071843508873046e-05, "epoch": 1.2788632326820604, "percentage": 42.65, "elapsed_time": "0:35:05", "remaining_time": "0:47:10"}
|
109 |
+
{"current_steps": 545, "total_steps": 1266, "loss": 1.7475, "learning_rate": 3.0416037501618677e-05, "epoch": 1.290704558910598, "percentage": 43.05, "elapsed_time": "0:35:24", "remaining_time": "0:46:49"}
|
110 |
+
{"current_steps": 550, "total_steps": 1266, "loss": 2.0185, "learning_rate": 3.0112806140342176e-05, "epoch": 1.3025458851391356, "percentage": 43.44, "elapsed_time": "0:35:43", "remaining_time": "0:46:29"}
|
111 |
+
{"current_steps": 555, "total_steps": 1266, "loss": 1.8825, "learning_rate": 2.9808787685980054e-05, "epoch": 1.3143872113676731, "percentage": 43.84, "elapsed_time": "0:36:02", "remaining_time": "0:46:10"}
|
112 |
+
{"current_steps": 560, "total_steps": 1266, "loss": 1.8319, "learning_rate": 2.9504028940780776e-05, "epoch": 1.3262285375962108, "percentage": 44.23, "elapsed_time": "0:36:24", "remaining_time": "0:45:53"}
|
113 |
+
{"current_steps": 565, "total_steps": 1266, "loss": 1.9613, "learning_rate": 2.9198576820957187e-05, "epoch": 1.3380698638247484, "percentage": 44.63, "elapsed_time": "0:36:43", "remaining_time": "0:45:34"}
|
114 |
+
{"current_steps": 570, "total_steps": 1266, "loss": 1.9367, "learning_rate": 2.8892478349463986e-05, "epoch": 1.349911190053286, "percentage": 45.02, "elapsed_time": "0:37:04", "remaining_time": "0:45:15"}
|
115 |
+
{"current_steps": 575, "total_steps": 1266, "loss": 1.895, "learning_rate": 2.858578064875874e-05, "epoch": 1.3617525162818236, "percentage": 45.42, "elapsed_time": "0:37:23", "remaining_time": "0:44:56"}
|
116 |
+
{"current_steps": 580, "total_steps": 1266, "loss": 2.0023, "learning_rate": 2.8278530933547624e-05, "epoch": 1.373593842510361, "percentage": 45.81, "elapsed_time": "0:37:44", "remaining_time": "0:44:37"}
|
117 |
+
{"current_steps": 585, "total_steps": 1266, "loss": 1.9469, "learning_rate": 2.79707765035169e-05, "epoch": 1.3854351687388988, "percentage": 46.21, "elapsed_time": "0:38:02", "remaining_time": "0:44:17"}
|
118 |
+
{"current_steps": 590, "total_steps": 1266, "loss": 1.7915, "learning_rate": 2.7662564736051377e-05, "epoch": 1.3972764949674363, "percentage": 46.6, "elapsed_time": "0:38:20", "remaining_time": "0:43:56"}
|
119 |
+
{"current_steps": 595, "total_steps": 1266, "loss": 1.8253, "learning_rate": 2.7353943078940875e-05, "epoch": 1.409117821195974, "percentage": 47.0, "elapsed_time": "0:38:41", "remaining_time": "0:43:38"}
|
120 |
+
{"current_steps": 600, "total_steps": 1266, "loss": 2.0741, "learning_rate": 2.7044959043075814e-05, "epoch": 1.4209591474245116, "percentage": 47.39, "elapsed_time": "0:39:01", "remaining_time": "0:43:18"}
|
121 |
+
{"current_steps": 605, "total_steps": 1266, "loss": 1.901, "learning_rate": 2.67356601951332e-05, "epoch": 1.432800473653049, "percentage": 47.79, "elapsed_time": "0:39:20", "remaining_time": "0:42:59"}
|
122 |
+
{"current_steps": 610, "total_steps": 1266, "loss": 2.0099, "learning_rate": 2.64260941502539e-05, "epoch": 1.4446417998815868, "percentage": 48.18, "elapsed_time": "0:39:41", "remaining_time": "0:42:40"}
|
123 |
+
{"current_steps": 615, "total_steps": 1266, "loss": 1.8853, "learning_rate": 2.611630856471252e-05, "epoch": 1.4564831261101243, "percentage": 48.58, "elapsed_time": "0:40:00", "remaining_time": "0:42:20"}
|
124 |
+
{"current_steps": 620, "total_steps": 1266, "loss": 1.8205, "learning_rate": 2.5806351128580964e-05, "epoch": 1.468324452338662, "percentage": 48.97, "elapsed_time": "0:40:18", "remaining_time": "0:41:59"}
|
125 |
+
{"current_steps": 625, "total_steps": 1266, "loss": 2.0851, "learning_rate": 2.5496269558386725e-05, "epoch": 1.4801657785671996, "percentage": 49.37, "elapsed_time": "0:40:38", "remaining_time": "0:41:40"}
|
126 |
+
{"current_steps": 630, "total_steps": 1266, "loss": 2.0913, "learning_rate": 2.5186111589767187e-05, "epoch": 1.492007104795737, "percentage": 49.76, "elapsed_time": "0:40:59", "remaining_time": "0:41:22"}
|
127 |
+
{"current_steps": 635, "total_steps": 1266, "loss": 1.9521, "learning_rate": 2.487592497012089e-05, "epoch": 1.5038484310242746, "percentage": 50.16, "elapsed_time": "0:41:18", "remaining_time": "0:41:02"}
|
128 |
+
{"current_steps": 640, "total_steps": 1266, "loss": 1.8525, "learning_rate": 2.4565757451257128e-05, "epoch": 1.5156897572528123, "percentage": 50.55, "elapsed_time": "0:41:39", "remaining_time": "0:40:44"}
|
129 |
+
{"current_steps": 645, "total_steps": 1266, "loss": 1.8034, "learning_rate": 2.4255656782044644e-05, "epoch": 1.52753108348135, "percentage": 50.95, "elapsed_time": "0:42:00", "remaining_time": "0:40:26"}
|
130 |
+
{"current_steps": 650, "total_steps": 1266, "loss": 2.053, "learning_rate": 2.3945670701061033e-05, "epoch": 1.5393724097098875, "percentage": 51.34, "elapsed_time": "0:42:18", "remaining_time": "0:40:06"}
|
131 |
+
{"current_steps": 655, "total_steps": 1266, "loss": 1.7877, "learning_rate": 2.3635846929243537e-05, "epoch": 1.551213735938425, "percentage": 51.74, "elapsed_time": "0:42:38", "remaining_time": "0:39:46"}
|
132 |
+
{"current_steps": 660, "total_steps": 1266, "loss": 1.9865, "learning_rate": 2.3326233162542655e-05, "epoch": 1.5630550621669625, "percentage": 52.13, "elapsed_time": "0:42:57", "remaining_time": "0:39:26"}
|
133 |
+
{"current_steps": 665, "total_steps": 1266, "loss": 1.8799, "learning_rate": 2.3016877064579564e-05, "epoch": 1.5748963883955003, "percentage": 52.53, "elapsed_time": "0:43:15", "remaining_time": "0:39:06"}
|
134 |
+
{"current_steps": 670, "total_steps": 1266, "loss": 1.9329, "learning_rate": 2.2707826259308492e-05, "epoch": 1.586737714624038, "percentage": 52.92, "elapsed_time": "0:43:35", "remaining_time": "0:38:46"}
|
135 |
+
{"current_steps": 675, "total_steps": 1266, "loss": 1.7828, "learning_rate": 2.2399128323685286e-05, "epoch": 1.5985790408525755, "percentage": 53.32, "elapsed_time": "0:43:53", "remaining_time": "0:38:25"}
|
136 |
+
{"current_steps": 680, "total_steps": 1266, "loss": 2.0114, "learning_rate": 2.2090830780343113e-05, "epoch": 1.610420367081113, "percentage": 53.71, "elapsed_time": "0:44:12", "remaining_time": "0:38:05"}
|
137 |
+
{"current_steps": 685, "total_steps": 1266, "loss": 1.8671, "learning_rate": 2.1782981090276585e-05, "epoch": 1.6222616933096505, "percentage": 54.11, "elapsed_time": "0:44:31", "remaining_time": "0:37:45"}
|
138 |
+
{"current_steps": 690, "total_steps": 1266, "loss": 1.9453, "learning_rate": 2.147562664553537e-05, "epoch": 1.6341030195381883, "percentage": 54.5, "elapsed_time": "0:44:49", "remaining_time": "0:37:25"}
|
139 |
+
{"current_steps": 695, "total_steps": 1266, "loss": 1.7151, "learning_rate": 2.1168814761928336e-05, "epoch": 1.645944345766726, "percentage": 54.9, "elapsed_time": "0:45:11", "remaining_time": "0:37:07"}
|
140 |
+
{"current_steps": 700, "total_steps": 1266, "loss": 1.9591, "learning_rate": 2.0862592671739608e-05, "epoch": 1.6577856719952635, "percentage": 55.29, "elapsed_time": "0:45:29", "remaining_time": "0:36:46"}
|
141 |
+
{"current_steps": 705, "total_steps": 1266, "loss": 1.8999, "learning_rate": 2.0557007516457288e-05, "epoch": 1.669626998223801, "percentage": 55.69, "elapsed_time": "0:45:48", "remaining_time": "0:36:27"}
|
142 |
+
{"current_steps": 710, "total_steps": 1266, "loss": 2.0037, "learning_rate": 2.0252106339516272e-05, "epoch": 1.6814683244523385, "percentage": 56.08, "elapsed_time": "0:46:08", "remaining_time": "0:36:07"}
|
143 |
+
{"current_steps": 715, "total_steps": 1266, "loss": 1.7028, "learning_rate": 1.9947936079056117e-05, "epoch": 1.6933096506808762, "percentage": 56.48, "elapsed_time": "0:46:29", "remaining_time": "0:35:49"}
|
144 |
+
{"current_steps": 720, "total_steps": 1266, "loss": 2.1283, "learning_rate": 1.964454356069514e-05, "epoch": 1.705150976909414, "percentage": 56.87, "elapsed_time": "0:46:46", "remaining_time": "0:35:28"}
|
145 |
+
{"current_steps": 725, "total_steps": 1266, "loss": 2.0017, "learning_rate": 1.9341975490321827e-05, "epoch": 1.7169923031379515, "percentage": 57.27, "elapsed_time": "0:47:05", "remaining_time": "0:35:08"}
|
146 |
+
{"current_steps": 730, "total_steps": 1266, "loss": 1.7329, "learning_rate": 1.9040278446904677e-05, "epoch": 1.728833629366489, "percentage": 57.66, "elapsed_time": "0:47:25", "remaining_time": "0:34:49"}
|
147 |
+
{"current_steps": 735, "total_steps": 1266, "loss": 1.9965, "learning_rate": 1.873949887532156e-05, "epoch": 1.7406749555950265, "percentage": 58.06, "elapsed_time": "0:47:44", "remaining_time": "0:34:29"}
|
148 |
+
{"current_steps": 740, "total_steps": 1266, "loss": 1.9321, "learning_rate": 1.8439683079209787e-05, "epoch": 1.7525162818235642, "percentage": 58.45, "elapsed_time": "0:48:05", "remaining_time": "0:34:10"}
|
149 |
+
{"current_steps": 745, "total_steps": 1266, "loss": 1.9739, "learning_rate": 1.8140877213837823e-05, "epoch": 1.764357608052102, "percentage": 58.85, "elapsed_time": "0:48:23", "remaining_time": "0:33:50"}
|
150 |
+
{"current_steps": 750, "total_steps": 1266, "loss": 1.8864, "learning_rate": 1.7843127278999943e-05, "epoch": 1.7761989342806395, "percentage": 59.24, "elapsed_time": "0:48:40", "remaining_time": "0:33:29"}
|
151 |
+
{"current_steps": 755, "total_steps": 1266, "loss": 1.9227, "learning_rate": 1.754647911193473e-05, "epoch": 1.788040260509177, "percentage": 59.64, "elapsed_time": "0:49:01", "remaining_time": "0:33:10"}
|
152 |
+
{"current_steps": 760, "total_steps": 1266, "loss": 2.0025, "learning_rate": 1.7250978380268694e-05, "epoch": 1.7998815867377145, "percentage": 60.03, "elapsed_time": "0:49:20", "remaining_time": "0:32:51"}
|
153 |
+
{"current_steps": 765, "total_steps": 1266, "loss": 1.8039, "learning_rate": 1.6956670574985908e-05, "epoch": 1.8117229129662522, "percentage": 60.43, "elapsed_time": "0:49:39", "remaining_time": "0:32:31"}
|
154 |
+
{"current_steps": 770, "total_steps": 1266, "loss": 1.8008, "learning_rate": 1.6663601003424883e-05, "epoch": 1.82356423919479, "percentage": 60.82, "elapsed_time": "0:49:58", "remaining_time": "0:32:11"}
|
155 |
+
{"current_steps": 775, "total_steps": 1266, "loss": 1.9609, "learning_rate": 1.6371814782303722e-05, "epoch": 1.8354055654233274, "percentage": 61.22, "elapsed_time": "0:50:16", "remaining_time": "0:31:51"}
|
156 |
+
{"current_steps": 780, "total_steps": 1266, "loss": 2.0843, "learning_rate": 1.6081356830774625e-05, "epoch": 1.847246891651865, "percentage": 61.61, "elapsed_time": "0:50:35", "remaining_time": "0:31:31"}
|
157 |
+
{"current_steps": 785, "total_steps": 1266, "loss": 1.8281, "learning_rate": 1.579227186350875e-05, "epoch": 1.8590882178804025, "percentage": 62.01, "elapsed_time": "0:50:58", "remaining_time": "0:31:13"}
|
158 |
+
{"current_steps": 790, "total_steps": 1266, "loss": 1.9628, "learning_rate": 1.5504604383812646e-05, "epoch": 1.8709295441089402, "percentage": 62.4, "elapsed_time": "0:51:17", "remaining_time": "0:30:54"}
|
159 |
+
{"current_steps": 795, "total_steps": 1266, "loss": 1.9867, "learning_rate": 1.5218398676777102e-05, "epoch": 1.882770870337478, "percentage": 62.8, "elapsed_time": "0:51:37", "remaining_time": "0:30:35"}
|
160 |
+
{"current_steps": 800, "total_steps": 1266, "loss": 1.8819, "learning_rate": 1.4933698802459731e-05, "epoch": 1.8946121965660154, "percentage": 63.19, "elapsed_time": "0:51:56", "remaining_time": "0:30:15"}
|
161 |
+
{"current_steps": 805, "total_steps": 1266, "loss": 1.8938, "learning_rate": 1.4650548589102092e-05, "epoch": 1.906453522794553, "percentage": 63.59, "elapsed_time": "0:52:17", "remaining_time": "0:29:56"}
|
162 |
+
{"current_steps": 810, "total_steps": 1266, "loss": 1.6936, "learning_rate": 1.436899162638255e-05, "epoch": 1.9182948490230904, "percentage": 63.98, "elapsed_time": "0:52:36", "remaining_time": "0:29:37"}
|
163 |
+
{"current_steps": 815, "total_steps": 1266, "loss": 2.1922, "learning_rate": 1.4089071258705783e-05, "epoch": 1.9301361752516282, "percentage": 64.38, "elapsed_time": "0:52:55", "remaining_time": "0:29:17"}
|
164 |
+
{"current_steps": 820, "total_steps": 1266, "loss": 1.8547, "learning_rate": 1.3810830578530225e-05, "epoch": 1.941977501480166, "percentage": 64.77, "elapsed_time": "0:53:16", "remaining_time": "0:28:58"}
|
165 |
+
{"current_steps": 825, "total_steps": 1266, "loss": 1.9437, "learning_rate": 1.3534312419734066e-05, "epoch": 1.9538188277087034, "percentage": 65.17, "elapsed_time": "0:53:36", "remaining_time": "0:28:39"}
|
166 |
+
{"current_steps": 830, "total_steps": 1266, "loss": 1.7016, "learning_rate": 1.3259559351021247e-05, "epoch": 1.965660153937241, "percentage": 65.56, "elapsed_time": "0:53:59", "remaining_time": "0:28:21"}
|
167 |
+
{"current_steps": 835, "total_steps": 1266, "loss": 1.8801, "learning_rate": 1.2986613669368158e-05, "epoch": 1.9775014801657784, "percentage": 65.96, "elapsed_time": "0:54:18", "remaining_time": "0:28:02"}
|
168 |
+
{"current_steps": 840, "total_steps": 1266, "loss": 1.9408, "learning_rate": 1.271551739351224e-05, "epoch": 1.9893428063943162, "percentage": 66.35, "elapsed_time": "0:54:37", "remaining_time": "0:27:41"}
|
169 |
+
{"current_steps": 845, "total_steps": 1266, "loss": 1.6631, "learning_rate": 1.2446312257483358e-05, "epoch": 2.001184132622854, "percentage": 66.75, "elapsed_time": "0:54:56", "remaining_time": "0:27:22"}
|
170 |
+
{"current_steps": 850, "total_steps": 1266, "loss": 1.859, "learning_rate": 1.2179039704179118e-05, "epoch": 2.0130254588513914, "percentage": 67.14, "elapsed_time": "0:55:16", "remaining_time": "0:27:03"}
|
171 |
+
{"current_steps": 855, "total_steps": 1266, "loss": 1.9671, "learning_rate": 1.1913740878984816e-05, "epoch": 2.024866785079929, "percentage": 67.54, "elapsed_time": "0:55:37", "remaining_time": "0:26:44"}
|
172 |
+
{"current_steps": 860, "total_steps": 1266, "loss": 1.8109, "learning_rate": 1.1650456623439367e-05, "epoch": 2.0367081113084664, "percentage": 67.93, "elapsed_time": "0:55:56", "remaining_time": "0:26:24"}
|
173 |
+
{"current_steps": 865, "total_steps": 1266, "loss": 1.8024, "learning_rate": 1.1389227468947906e-05, "epoch": 2.0485494375370044, "percentage": 68.33, "elapsed_time": "0:56:15", "remaining_time": "0:26:05"}
|
174 |
+
{"current_steps": 870, "total_steps": 1266, "loss": 1.8327, "learning_rate": 1.1130093630542198e-05, "epoch": 2.060390763765542, "percentage": 68.72, "elapsed_time": "0:56:33", "remaining_time": "0:25:44"}
|
175 |
+
{"current_steps": 875, "total_steps": 1266, "loss": 1.9797, "learning_rate": 1.0873095000689675e-05, "epoch": 2.0722320899940794, "percentage": 69.12, "elapsed_time": "0:56:51", "remaining_time": "0:25:24"}
|
176 |
+
{"current_steps": 880, "total_steps": 1266, "loss": 1.8164, "learning_rate": 1.0618271143152184e-05, "epoch": 2.084073416222617, "percentage": 69.51, "elapsed_time": "0:57:11", "remaining_time": "0:25:05"}
|
177 |
+
{"current_steps": 885, "total_steps": 1266, "loss": 1.7994, "learning_rate": 1.0365661286895365e-05, "epoch": 2.0959147424511544, "percentage": 69.91, "elapsed_time": "0:57:32", "remaining_time": "0:24:46"}
|
178 |
+
{"current_steps": 890, "total_steps": 1266, "loss": 1.9428, "learning_rate": 1.0115304320049479e-05, "epoch": 2.1077560686796923, "percentage": 70.3, "elapsed_time": "0:57:51", "remaining_time": "0:24:26"}
|
179 |
+
{"current_steps": 895, "total_steps": 1266, "loss": 1.9297, "learning_rate": 9.867238783922789e-06, "epoch": 2.11959739490823, "percentage": 70.7, "elapsed_time": "0:58:10", "remaining_time": "0:24:07"}
|
180 |
+
{"current_steps": 900, "total_steps": 1266, "loss": 1.8185, "learning_rate": 9.621502867068285e-06, "epoch": 2.1314387211367674, "percentage": 71.09, "elapsed_time": "0:58:30", "remaining_time": "0:23:47"}
|
181 |
+
{"current_steps": 905, "total_steps": 1266, "loss": 1.7902, "learning_rate": 9.378134399404767e-06, "epoch": 2.143280047365305, "percentage": 71.48, "elapsed_time": "0:58:51", "remaining_time": "0:23:28"}
|
182 |
+
{"current_steps": 910, "total_steps": 1266, "loss": 1.7111, "learning_rate": 9.137170846393054e-06, "epoch": 2.1551213735938424, "percentage": 71.88, "elapsed_time": "0:59:12", "remaining_time": "0:23:09"}
|
183 |
+
{"current_steps": 915, "total_steps": 1266, "loss": 2.04, "learning_rate": 8.898649303268372e-06, "epoch": 2.1669626998223803, "percentage": 72.27, "elapsed_time": "0:59:33", "remaining_time": "0:22:50"}
|
184 |
+
{"current_steps": 920, "total_steps": 1266, "loss": 1.9046, "learning_rate": 8.662606489329711e-06, "epoch": 2.178804026050918, "percentage": 72.67, "elapsed_time": "0:59:51", "remaining_time": "0:22:30"}
|
185 |
+
{"current_steps": 925, "total_steps": 1266, "loss": 2.1426, "learning_rate": 8.429078742287073e-06, "epoch": 2.1906453522794553, "percentage": 73.06, "elapsed_time": "1:00:09", "remaining_time": "0:22:10"}
|
186 |
+
{"current_steps": 930, "total_steps": 1266, "loss": 2.0743, "learning_rate": 8.198102012667407e-06, "epoch": 2.202486678507993, "percentage": 73.46, "elapsed_time": "1:00:30", "remaining_time": "0:21:51"}
|
187 |
+
{"current_steps": 935, "total_steps": 1266, "loss": 1.7135, "learning_rate": 7.969711858280252e-06, "epoch": 2.2143280047365304, "percentage": 73.85, "elapsed_time": "1:00:50", "remaining_time": "0:21:32"}
|
188 |
+
{"current_steps": 940, "total_steps": 1266, "loss": 1.8054, "learning_rate": 7.743943438743676e-06, "epoch": 2.2261693309650683, "percentage": 74.25, "elapsed_time": "1:01:08", "remaining_time": "0:21:12"}
|
189 |
+
{"current_steps": 945, "total_steps": 1266, "loss": 1.8244, "learning_rate": 7.520831510071744e-06, "epoch": 2.238010657193606, "percentage": 74.64, "elapsed_time": "1:01:27", "remaining_time": "0:20:52"}
|
190 |
+
{"current_steps": 950, "total_steps": 1266, "loss": 1.8097, "learning_rate": 7.300410419323869e-06, "epoch": 2.2498519834221433, "percentage": 75.04, "elapsed_time": "1:01:49", "remaining_time": "0:20:33"}
|
191 |
+
{"current_steps": 955, "total_steps": 1266, "loss": 1.9919, "learning_rate": 7.082714099317334e-06, "epoch": 2.261693309650681, "percentage": 75.43, "elapsed_time": "1:02:07", "remaining_time": "0:20:13"}
|
192 |
+
{"current_steps": 960, "total_steps": 1266, "loss": 1.9084, "learning_rate": 6.867776063403411e-06, "epoch": 2.2735346358792183, "percentage": 75.83, "elapsed_time": "1:02:26", "remaining_time": "0:19:54"}
|
193 |
+
{"current_steps": 965, "total_steps": 1266, "loss": 1.7835, "learning_rate": 6.6556294003081914e-06, "epoch": 2.2853759621077563, "percentage": 76.22, "elapsed_time": "1:02:46", "remaining_time": "0:19:34"}
|
194 |
+
{"current_steps": 970, "total_steps": 1266, "loss": 1.7542, "learning_rate": 6.44630676903869e-06, "epoch": 2.297217288336294, "percentage": 76.62, "elapsed_time": "1:03:05", "remaining_time": "0:19:15"}
|
195 |
+
{"current_steps": 975, "total_steps": 1266, "loss": 2.0351, "learning_rate": 6.239840393855184e-06, "epoch": 2.3090586145648313, "percentage": 77.01, "elapsed_time": "1:03:23", "remaining_time": "0:18:55"}
|
196 |
+
{"current_steps": 980, "total_steps": 1266, "loss": 1.7908, "learning_rate": 6.036262059310382e-06, "epoch": 2.320899940793369, "percentage": 77.41, "elapsed_time": "1:03:45", "remaining_time": "0:18:36"}
|
197 |
+
{"current_steps": 985, "total_steps": 1266, "loss": 1.7511, "learning_rate": 5.835603105356396e-06, "epoch": 2.3327412670219063, "percentage": 77.8, "elapsed_time": "1:04:04", "remaining_time": "0:18:16"}
|
198 |
+
{"current_steps": 990, "total_steps": 1266, "loss": 1.9621, "learning_rate": 5.637894422520027e-06, "epoch": 2.3445825932504443, "percentage": 78.2, "elapsed_time": "1:04:23", "remaining_time": "0:17:57"}
|
199 |
+
{"current_steps": 995, "total_steps": 1266, "loss": 2.0578, "learning_rate": 5.443166447147391e-06, "epoch": 2.3564239194789818, "percentage": 78.59, "elapsed_time": "1:04:42", "remaining_time": "0:17:37"}
|
200 |
+
{"current_steps": 1000, "total_steps": 1266, "loss": 1.9198, "learning_rate": 5.251449156718313e-06, "epoch": 2.3682652457075193, "percentage": 78.99, "elapsed_time": "1:05:04", "remaining_time": "0:17:18"}
|
201 |
+
{"current_steps": 1005, "total_steps": 1266, "loss": 1.6089, "learning_rate": 5.062772065231491e-06, "epoch": 2.380106571936057, "percentage": 79.38, "elapsed_time": "1:05:23", "remaining_time": "0:16:58"}
|
202 |
+
{"current_steps": 1010, "total_steps": 1266, "loss": 1.9499, "learning_rate": 4.877164218660901e-06, "epoch": 2.3919478981645943, "percentage": 79.78, "elapsed_time": "1:05:43", "remaining_time": "0:16:39"}
|
203 |
+
{"current_steps": 1015, "total_steps": 1266, "loss": 2.0044, "learning_rate": 4.694654190484327e-06, "epoch": 2.4037892243931323, "percentage": 80.17, "elapsed_time": "1:06:02", "remaining_time": "0:16:19"}
|
204 |
+
{"current_steps": 1020, "total_steps": 1266, "loss": 2.0896, "learning_rate": 4.515270077284595e-06, "epoch": 2.4156305506216698, "percentage": 80.57, "elapsed_time": "1:06:21", "remaining_time": "0:16:00"}
|
205 |
+
{"current_steps": 1025, "total_steps": 1266, "loss": 1.8803, "learning_rate": 4.339039494424263e-06, "epoch": 2.4274718768502073, "percentage": 80.96, "elapsed_time": "1:06:41", "remaining_time": "0:15:40"}
|
206 |
+
{"current_steps": 1030, "total_steps": 1266, "loss": 2.0795, "learning_rate": 4.16598957179431e-06, "epoch": 2.4393132030787448, "percentage": 81.36, "elapsed_time": "1:06:59", "remaining_time": "0:15:20"}
|
207 |
+
{"current_steps": 1035, "total_steps": 1266, "loss": 1.7221, "learning_rate": 3.996146949637658e-06, "epoch": 2.4511545293072823, "percentage": 81.75, "elapsed_time": "1:07:18", "remaining_time": "0:15:01"}
|
208 |
+
{"current_steps": 1040, "total_steps": 1266, "loss": 1.6881, "learning_rate": 3.8295377744479995e-06, "epoch": 2.4629958555358202, "percentage": 82.15, "elapsed_time": "1:07:37", "remaining_time": "0:14:41"}
|
209 |
+
{"current_steps": 1045, "total_steps": 1266, "loss": 1.8039, "learning_rate": 3.6661876949447007e-06, "epoch": 2.4748371817643577, "percentage": 82.54, "elapsed_time": "1:07:56", "remaining_time": "0:14:22"}
|
210 |
+
{"current_steps": 1050, "total_steps": 1266, "loss": 1.9333, "learning_rate": 3.5061218581242535e-06, "epoch": 2.4866785079928952, "percentage": 82.94, "elapsed_time": "1:08:15", "remaining_time": "0:14:02"}
|
211 |
+
{"current_steps": 1055, "total_steps": 1266, "loss": 1.6731, "learning_rate": 3.3493649053890326e-06, "epoch": 2.4985198342214328, "percentage": 83.33, "elapsed_time": "1:08:35", "remaining_time": "0:13:43"}
|
212 |
+
{"current_steps": 1060, "total_steps": 1266, "loss": 1.8437, "learning_rate": 3.1959409687538853e-06, "epoch": 2.5103611604499703, "percentage": 83.73, "elapsed_time": "1:08:54", "remaining_time": "0:13:23"}
|
213 |
+
{"current_steps": 1065, "total_steps": 1266, "loss": 1.9577, "learning_rate": 3.04587366713108e-06, "epoch": 2.522202486678508, "percentage": 84.12, "elapsed_time": "1:09:13", "remaining_time": "0:13:03"}
|
214 |
+
{"current_steps": 1070, "total_steps": 1266, "loss": 2.0162, "learning_rate": 2.8991861026943014e-06, "epoch": 2.5340438129070457, "percentage": 84.52, "elapsed_time": "1:09:33", "remaining_time": "0:12:44"}
|
215 |
+
{"current_steps": 1075, "total_steps": 1266, "loss": 1.8091, "learning_rate": 2.7559008573221717e-06, "epoch": 2.5458851391355832, "percentage": 84.91, "elapsed_time": "1:09:54", "remaining_time": "0:12:25"}
|
216 |
+
{"current_steps": 1080, "total_steps": 1266, "loss": 1.7485, "learning_rate": 2.6160399891218988e-06, "epoch": 2.5577264653641207, "percentage": 85.31, "elapsed_time": "1:10:14", "remaining_time": "0:12:05"}
|
217 |
+
{"current_steps": 1085, "total_steps": 1266, "loss": 1.9556, "learning_rate": 2.4796250290334887e-06, "epoch": 2.5695677915926582, "percentage": 85.7, "elapsed_time": "1:10:34", "remaining_time": "0:11:46"}
|
218 |
+
{"current_steps": 1090, "total_steps": 1266, "loss": 1.7762, "learning_rate": 2.346676977515189e-06, "epoch": 2.581409117821196, "percentage": 86.1, "elapsed_time": "1:10:54", "remaining_time": "0:11:26"}
|
219 |
+
{"current_steps": 1095, "total_steps": 1266, "loss": 1.7463, "learning_rate": 2.21721630131054e-06, "epoch": 2.5932504440497337, "percentage": 86.49, "elapsed_time": "1:11:13", "remaining_time": "0:11:07"}
|
220 |
+
{"current_steps": 1100, "total_steps": 1266, "loss": 1.9825, "learning_rate": 2.0912629302976493e-06, "epoch": 2.605091770278271, "percentage": 86.89, "elapsed_time": "1:11:31", "remaining_time": "0:10:47"}
|
221 |
+
{"current_steps": 1105, "total_steps": 1266, "loss": 1.8317, "learning_rate": 1.968836254421036e-06, "epoch": 2.6169330965068087, "percentage": 87.28, "elapsed_time": "1:11:49", "remaining_time": "0:10:27"}
|
222 |
+
{"current_steps": 1110, "total_steps": 1266, "loss": 1.7377, "learning_rate": 1.849955120706673e-06, "epoch": 2.6287744227353462, "percentage": 87.68, "elapsed_time": "1:12:08", "remaining_time": "0:10:08"}
|
223 |
+
{"current_steps": 1115, "total_steps": 1266, "loss": 1.8165, "learning_rate": 1.7346378303605359e-06, "epoch": 2.640615748963884, "percentage": 88.07, "elapsed_time": "1:12:27", "remaining_time": "0:09:48"}
|
224 |
+
{"current_steps": 1120, "total_steps": 1266, "loss": 1.8233, "learning_rate": 1.6229021359512624e-06, "epoch": 2.6524570751924217, "percentage": 88.47, "elapsed_time": "1:12:46", "remaining_time": "0:09:29"}
|
225 |
+
{"current_steps": 1125, "total_steps": 1266, "loss": 1.9029, "learning_rate": 1.5147652386771848e-06, "epoch": 2.664298401420959, "percentage": 88.86, "elapsed_time": "1:13:04", "remaining_time": "0:09:09"}
|
226 |
+
{"current_steps": 1130, "total_steps": 1266, "loss": 1.8527, "learning_rate": 1.4102437857183155e-06, "epoch": 2.6761397276494967, "percentage": 89.26, "elapsed_time": "1:13:25", "remaining_time": "0:08:50"}
|
227 |
+
{"current_steps": 1135, "total_steps": 1266, "loss": 1.8855, "learning_rate": 1.3093538676735601e-06, "epoch": 2.687981053878034, "percentage": 89.65, "elapsed_time": "1:13:43", "remaining_time": "0:08:30"}
|
228 |
+
{"current_steps": 1140, "total_steps": 1266, "loss": 1.8746, "learning_rate": 1.2121110160836696e-06, "epoch": 2.699822380106572, "percentage": 90.05, "elapsed_time": "1:14:02", "remaining_time": "0:08:11"}
|
229 |
+
{"current_steps": 1145, "total_steps": 1266, "loss": 1.696, "learning_rate": 1.1185302010402105e-06, "epoch": 2.7116637063351097, "percentage": 90.44, "elapsed_time": "1:14:21", "remaining_time": "0:07:51"}
|
230 |
+
{"current_steps": 1150, "total_steps": 1266, "loss": 1.7904, "learning_rate": 1.0286258288810107e-06, "epoch": 2.723505032563647, "percentage": 90.84, "elapsed_time": "1:14:41", "remaining_time": "0:07:32"}
|
231 |
+
{"current_steps": 1155, "total_steps": 1266, "loss": 1.962, "learning_rate": 9.424117399723431e-07, "epoch": 2.7353463587921847, "percentage": 91.23, "elapsed_time": "1:15:01", "remaining_time": "0:07:12"}
|
232 |
+
{"current_steps": 1160, "total_steps": 1266, "loss": 1.9262, "learning_rate": 8.599012065782924e-07, "epoch": 2.747187685020722, "percentage": 91.63, "elapsed_time": "1:15:19", "remaining_time": "0:06:52"}
|
233 |
+
{"current_steps": 1165, "total_steps": 1266, "loss": 1.8789, "learning_rate": 7.811069308175156e-07, "epoch": 2.75902901124926, "percentage": 92.02, "elapsed_time": "1:15:38", "remaining_time": "0:06:33"}
|
234 |
+
{"current_steps": 1170, "total_steps": 1266, "loss": 1.7546, "learning_rate": 7.060410427078473e-07, "epoch": 2.7708703374777977, "percentage": 92.42, "elapsed_time": "1:15:57", "remaining_time": "0:06:13"}
|
235 |
+
{"current_steps": 1175, "total_steps": 1266, "loss": 1.7979, "learning_rate": 6.347150982989159e-07, "epoch": 2.782711663706335, "percentage": 92.81, "elapsed_time": "1:16:18", "remaining_time": "0:05:54"}
|
236 |
+
{"current_steps": 1180, "total_steps": 1266, "loss": 1.7874, "learning_rate": 5.671400778931468e-07, "epoch": 2.7945529899348727, "percentage": 93.21, "elapsed_time": "1:16:37", "remaining_time": "0:05:35"}
|
237 |
+
{"current_steps": 1185, "total_steps": 1266, "loss": 1.6197, "learning_rate": 5.033263843554015e-07, "epoch": 2.80639431616341, "percentage": 93.6, "elapsed_time": "1:16:56", "remaining_time": "0:05:15"}
|
238 |
+
{"current_steps": 1190, "total_steps": 1266, "loss": 1.5225, "learning_rate": 4.4328384151149095e-07, "epoch": 2.818235642391948, "percentage": 94.0, "elapsed_time": "1:17:13", "remaining_time": "0:04:55"}
|
239 |
+
{"current_steps": 1195, "total_steps": 1266, "loss": 1.9811, "learning_rate": 3.8702169263585554e-07, "epoch": 2.8300769686204856, "percentage": 94.39, "elapsed_time": "1:17:32", "remaining_time": "0:04:36"}
|
240 |
+
{"current_steps": 1200, "total_steps": 1266, "loss": 1.9999, "learning_rate": 3.345485990286029e-07, "epoch": 2.841918294849023, "percentage": 94.79, "elapsed_time": "1:17:51", "remaining_time": "0:04:16"}
|
241 |
+
{"current_steps": 1205, "total_steps": 1266, "loss": 2.1399, "learning_rate": 2.8587263868213585e-07, "epoch": 2.8537596210775606, "percentage": 95.18, "elapsed_time": "1:18:11", "remaining_time": "0:03:57"}
|
242 |
+
{"current_steps": 1210, "total_steps": 1266, "loss": 1.8664, "learning_rate": 2.410013050375859e-07, "epoch": 2.865600947306098, "percentage": 95.58, "elapsed_time": "1:18:32", "remaining_time": "0:03:38"}
|
243 |
+
{"current_steps": 1215, "total_steps": 1266, "loss": 1.9633, "learning_rate": 1.999415058312276e-07, "epoch": 2.877442273534636, "percentage": 95.97, "elapsed_time": "1:18:51", "remaining_time": "0:03:18"}
|
244 |
+
{"current_steps": 1220, "total_steps": 1266, "loss": 2.0106, "learning_rate": 1.6269956203107117e-07, "epoch": 2.8892835997631736, "percentage": 96.37, "elapsed_time": "1:19:10", "remaining_time": "0:02:59"}
|
245 |
+
{"current_steps": 1225, "total_steps": 1266, "loss": 1.7896, "learning_rate": 1.2928120686377388e-07, "epoch": 2.901124925991711, "percentage": 96.76, "elapsed_time": "1:19:29", "remaining_time": "0:02:39"}
|
246 |
+
{"current_steps": 1230, "total_steps": 1266, "loss": 1.8759, "learning_rate": 9.969158493204067e-08, "epoch": 2.9129662522202486, "percentage": 97.16, "elapsed_time": "1:19:50", "remaining_time": "0:02:20"}
|
247 |
+
{"current_steps": 1235, "total_steps": 1266, "loss": 1.9413, "learning_rate": 7.393525142262991e-08, "epoch": 2.924807578448786, "percentage": 97.55, "elapsed_time": "1:20:09", "remaining_time": "0:02:00"}
|
248 |
+
{"current_steps": 1240, "total_steps": 1266, "loss": 1.8404, "learning_rate": 5.2016171405103174e-08, "epoch": 2.936648904677324, "percentage": 97.95, "elapsed_time": "1:20:30", "remaining_time": "0:01:41"}
|
249 |
+
{"current_steps": 1245, "total_steps": 1266, "loss": 2.0654, "learning_rate": 3.393771922142741e-08, "epoch": 2.9484902309058616, "percentage": 98.34, "elapsed_time": "1:20:50", "remaining_time": "0:01:21"}
|
250 |
+
{"current_steps": 1250, "total_steps": 1266, "loss": 1.8111, "learning_rate": 1.9702677966507154e-08, "epoch": 2.960331557134399, "percentage": 98.74, "elapsed_time": "1:21:11", "remaining_time": "0:01:02"}
|
251 |
+
{"current_steps": 1255, "total_steps": 1266, "loss": 1.8708, "learning_rate": 9.31323905974113e-09, "epoch": 2.9721728833629366, "percentage": 99.13, "elapsed_time": "1:21:30", "remaining_time": "0:00:42"}
|
252 |
+
{"current_steps": 1260, "total_steps": 1266, "loss": 1.8842, "learning_rate": 2.771001907653226e-09, "epoch": 2.984014209591474, "percentage": 99.53, "elapsed_time": "1:21:49", "remaining_time": "0:00:23"}
|
253 |
+
{"current_steps": 1265, "total_steps": 1266, "loss": 1.9665, "learning_rate": 7.697365768943864e-11, "epoch": 2.995855535820012, "percentage": 99.92, "elapsed_time": "1:22:08", "remaining_time": "0:00:03"}
|
254 |
+
{"current_steps": 1266, "total_steps": 1266, "epoch": 2.9982238010657194, "percentage": 100.0, "elapsed_time": "1:22:12", "remaining_time": "0:00:00"}
|
trainer_state.json
ADDED
@@ -0,0 +1,1801 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 2.9982238010657194,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 1266,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.011841326228537596,
|
13 |
+
"grad_norm": 0.6710847020149231,
|
14 |
+
"learning_rate": 4.9998075682257415e-05,
|
15 |
+
"loss": 3.876,
|
16 |
+
"step": 5
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.023682652457075192,
|
20 |
+
"grad_norm": 0.8348745107650757,
|
21 |
+
"learning_rate": 4.9992303025269555e-05,
|
22 |
+
"loss": 3.8538,
|
23 |
+
"step": 10
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.035523978685612786,
|
27 |
+
"grad_norm": 2.387429714202881,
|
28 |
+
"learning_rate": 4.9982682917710524e-05,
|
29 |
+
"loss": 3.5742,
|
30 |
+
"step": 15
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.047365304914150384,
|
34 |
+
"grad_norm": 1.0946344137191772,
|
35 |
+
"learning_rate": 4.996921684055182e-05,
|
36 |
+
"loss": 3.5193,
|
37 |
+
"step": 20
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.05920663114268798,
|
41 |
+
"grad_norm": 1.1773927211761475,
|
42 |
+
"learning_rate": 4.9951906866834316e-05,
|
43 |
+
"loss": 2.9465,
|
44 |
+
"step": 25
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.07104795737122557,
|
48 |
+
"grad_norm": 1.147207260131836,
|
49 |
+
"learning_rate": 4.993075566134921e-05,
|
50 |
+
"loss": 3.0208,
|
51 |
+
"step": 30
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 0.08288928359976318,
|
55 |
+
"grad_norm": 1.8781776428222656,
|
56 |
+
"learning_rate": 4.990576648022768e-05,
|
57 |
+
"loss": 2.7173,
|
58 |
+
"step": 35
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 0.09473060982830077,
|
62 |
+
"grad_norm": 1.009155511856079,
|
63 |
+
"learning_rate": 4.987694317043969e-05,
|
64 |
+
"loss": 2.6235,
|
65 |
+
"step": 40
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.10657193605683836,
|
69 |
+
"grad_norm": 0.9919832944869995,
|
70 |
+
"learning_rate": 4.984429016920178e-05,
|
71 |
+
"loss": 2.4021,
|
72 |
+
"step": 45
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 0.11841326228537596,
|
76 |
+
"grad_norm": 0.8563632965087891,
|
77 |
+
"learning_rate": 4.980781250329389e-05,
|
78 |
+
"loss": 2.1688,
|
79 |
+
"step": 50
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 0.13025458851391356,
|
83 |
+
"grad_norm": 0.7374653816223145,
|
84 |
+
"learning_rate": 4.976751578828562e-05,
|
85 |
+
"loss": 2.2943,
|
86 |
+
"step": 55
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.14209591474245115,
|
90 |
+
"grad_norm": 0.9438661336898804,
|
91 |
+
"learning_rate": 4.9723406227671643e-05,
|
92 |
+
"loss": 1.9571,
|
93 |
+
"step": 60
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 0.15393724097098876,
|
97 |
+
"grad_norm": 0.9622551202774048,
|
98 |
+
"learning_rate": 4.967549061191679e-05,
|
99 |
+
"loss": 2.1024,
|
100 |
+
"step": 65
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 0.16577856719952636,
|
104 |
+
"grad_norm": 0.6480829119682312,
|
105 |
+
"learning_rate": 4.96237763174106e-05,
|
106 |
+
"loss": 1.9145,
|
107 |
+
"step": 70
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 0.17761989342806395,
|
111 |
+
"grad_norm": 1.2615852355957031,
|
112 |
+
"learning_rate": 4.956827130533185e-05,
|
113 |
+
"loss": 2.1472,
|
114 |
+
"step": 75
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.18946121965660154,
|
118 |
+
"grad_norm": 1.0979194641113281,
|
119 |
+
"learning_rate": 4.95089841204229e-05,
|
120 |
+
"loss": 2.0441,
|
121 |
+
"step": 80
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 0.20130254588513913,
|
125 |
+
"grad_norm": 0.8610992431640625,
|
126 |
+
"learning_rate": 4.944592388967428e-05,
|
127 |
+
"loss": 2.1605,
|
128 |
+
"step": 85
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.21314387211367672,
|
132 |
+
"grad_norm": 1.0579572916030884,
|
133 |
+
"learning_rate": 4.937910032091968e-05,
|
134 |
+
"loss": 2.2801,
|
135 |
+
"step": 90
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 0.22498519834221434,
|
139 |
+
"grad_norm": 1.0034611225128174,
|
140 |
+
"learning_rate": 4.930852370134141e-05,
|
141 |
+
"loss": 2.2327,
|
142 |
+
"step": 95
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 0.23682652457075193,
|
146 |
+
"grad_norm": 0.8670480847358704,
|
147 |
+
"learning_rate": 4.923420489588677e-05,
|
148 |
+
"loss": 2.0031,
|
149 |
+
"step": 100
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 0.24866785079928952,
|
153 |
+
"grad_norm": 0.8441548347473145,
|
154 |
+
"learning_rate": 4.9156155345595445e-05,
|
155 |
+
"loss": 1.8637,
|
156 |
+
"step": 105
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"epoch": 0.2605091770278271,
|
160 |
+
"grad_norm": 0.8399316072463989,
|
161 |
+
"learning_rate": 4.907438706583818e-05,
|
162 |
+
"loss": 1.9813,
|
163 |
+
"step": 110
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 0.27235050325636473,
|
167 |
+
"grad_norm": 0.9002792835235596,
|
168 |
+
"learning_rate": 4.898891264446709e-05,
|
169 |
+
"loss": 2.1698,
|
170 |
+
"step": 115
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.2841918294849023,
|
174 |
+
"grad_norm": 1.1735421419143677,
|
175 |
+
"learning_rate": 4.8899745239877845e-05,
|
176 |
+
"loss": 2.1691,
|
177 |
+
"step": 120
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 0.2960331557134399,
|
181 |
+
"grad_norm": 1.2177324295043945,
|
182 |
+
"learning_rate": 4.880689857898392e-05,
|
183 |
+
"loss": 2.1437,
|
184 |
+
"step": 125
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"epoch": 0.30787448194197753,
|
188 |
+
"grad_norm": 1.062441349029541,
|
189 |
+
"learning_rate": 4.871038695510347e-05,
|
190 |
+
"loss": 2.078,
|
191 |
+
"step": 130
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 0.3197158081705151,
|
195 |
+
"grad_norm": 0.8719960451126099,
|
196 |
+
"learning_rate": 4.861022522575892e-05,
|
197 |
+
"loss": 2.0338,
|
198 |
+
"step": 135
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 0.3315571343990527,
|
202 |
+
"grad_norm": 1.042143702507019,
|
203 |
+
"learning_rate": 4.8506428810389696e-05,
|
204 |
+
"loss": 2.1387,
|
205 |
+
"step": 140
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.3433984606275903,
|
209 |
+
"grad_norm": 0.8554458022117615,
|
210 |
+
"learning_rate": 4.839901368797849e-05,
|
211 |
+
"loss": 2.0853,
|
212 |
+
"step": 145
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 0.3552397868561279,
|
216 |
+
"grad_norm": 1.033553957939148,
|
217 |
+
"learning_rate": 4.828799639459138e-05,
|
218 |
+
"loss": 1.9919,
|
219 |
+
"step": 150
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.36708111308466546,
|
223 |
+
"grad_norm": 0.7688170075416565,
|
224 |
+
"learning_rate": 4.8173394020832164e-05,
|
225 |
+
"loss": 2.1078,
|
226 |
+
"step": 155
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"epoch": 0.3789224393132031,
|
230 |
+
"grad_norm": 0.9199577569961548,
|
231 |
+
"learning_rate": 4.8055224209211316e-05,
|
232 |
+
"loss": 1.9385,
|
233 |
+
"step": 160
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.3907637655417407,
|
237 |
+
"grad_norm": 1.0168030261993408,
|
238 |
+
"learning_rate": 4.793350515143007e-05,
|
239 |
+
"loss": 1.8268,
|
240 |
+
"step": 165
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"epoch": 0.40260509177027826,
|
244 |
+
"grad_norm": 1.2735998630523682,
|
245 |
+
"learning_rate": 4.780825558557981e-05,
|
246 |
+
"loss": 1.8595,
|
247 |
+
"step": 170
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"epoch": 0.4144464179988159,
|
251 |
+
"grad_norm": 0.7815925478935242,
|
252 |
+
"learning_rate": 4.767949479325748e-05,
|
253 |
+
"loss": 2.005,
|
254 |
+
"step": 175
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"epoch": 0.42628774422735344,
|
258 |
+
"grad_norm": 0.9479308724403381,
|
259 |
+
"learning_rate": 4.7547242596597274e-05,
|
260 |
+
"loss": 2.0088,
|
261 |
+
"step": 180
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"epoch": 0.43812907045589106,
|
265 |
+
"grad_norm": 1.5356885194778442,
|
266 |
+
"learning_rate": 4.7411519355219066e-05,
|
267 |
+
"loss": 2.1591,
|
268 |
+
"step": 185
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"epoch": 0.4499703966844287,
|
272 |
+
"grad_norm": 1.3683360815048218,
|
273 |
+
"learning_rate": 4.727234596309417e-05,
|
274 |
+
"loss": 1.9772,
|
275 |
+
"step": 190
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"epoch": 0.46181172291296624,
|
279 |
+
"grad_norm": 1.2764394283294678,
|
280 |
+
"learning_rate": 4.71297438453288e-05,
|
281 |
+
"loss": 1.9476,
|
282 |
+
"step": 195
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"epoch": 0.47365304914150386,
|
286 |
+
"grad_norm": 1.4485411643981934,
|
287 |
+
"learning_rate": 4.698373495486579e-05,
|
288 |
+
"loss": 2.0495,
|
289 |
+
"step": 200
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"epoch": 0.4854943753700414,
|
293 |
+
"grad_norm": 0.9207140207290649,
|
294 |
+
"learning_rate": 4.683434176910503e-05,
|
295 |
+
"loss": 2.0267,
|
296 |
+
"step": 205
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"epoch": 0.49733570159857904,
|
300 |
+
"grad_norm": 1.0169957876205444,
|
301 |
+
"learning_rate": 4.6681587286443146e-05,
|
302 |
+
"loss": 1.8998,
|
303 |
+
"step": 210
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"epoch": 0.5091770278271166,
|
307 |
+
"grad_norm": 1.0399326086044312,
|
308 |
+
"learning_rate": 4.652549502273304e-05,
|
309 |
+
"loss": 1.9095,
|
310 |
+
"step": 215
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.5210183540556542,
|
314 |
+
"grad_norm": 1.6732031106948853,
|
315 |
+
"learning_rate": 4.636608900766372e-05,
|
316 |
+
"loss": 1.935,
|
317 |
+
"step": 220
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"epoch": 0.5328596802841918,
|
321 |
+
"grad_norm": 1.1295055150985718,
|
322 |
+
"learning_rate": 4.620339378106102e-05,
|
323 |
+
"loss": 2.084,
|
324 |
+
"step": 225
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.5447010065127295,
|
328 |
+
"grad_norm": 1.196055293083191,
|
329 |
+
"learning_rate": 4.603743438910986e-05,
|
330 |
+
"loss": 1.9387,
|
331 |
+
"step": 230
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"epoch": 0.5565423327412671,
|
335 |
+
"grad_norm": 1.519239902496338,
|
336 |
+
"learning_rate": 4.586823638049841e-05,
|
337 |
+
"loss": 1.871,
|
338 |
+
"step": 235
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"epoch": 0.5683836589698046,
|
342 |
+
"grad_norm": 1.1272022724151611,
|
343 |
+
"learning_rate": 4.5695825802485085e-05,
|
344 |
+
"loss": 1.9629,
|
345 |
+
"step": 240
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.5802249851983422,
|
349 |
+
"grad_norm": 1.135977864265442,
|
350 |
+
"learning_rate": 4.552022919688861e-05,
|
351 |
+
"loss": 2.1182,
|
352 |
+
"step": 245
|
353 |
+
},
|
354 |
+
{
|
355 |
+
"epoch": 0.5920663114268798,
|
356 |
+
"grad_norm": 1.3236933946609497,
|
357 |
+
"learning_rate": 4.53414735960021e-05,
|
358 |
+
"loss": 1.8874,
|
359 |
+
"step": 250
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"epoch": 0.6039076376554174,
|
363 |
+
"grad_norm": 2.230280637741089,
|
364 |
+
"learning_rate": 4.51595865184315e-05,
|
365 |
+
"loss": 1.8281,
|
366 |
+
"step": 255
|
367 |
+
},
|
368 |
+
{
|
369 |
+
"epoch": 0.6157489638839551,
|
370 |
+
"grad_norm": 1.3642871379852295,
|
371 |
+
"learning_rate": 4.497459596485924e-05,
|
372 |
+
"loss": 2.0387,
|
373 |
+
"step": 260
|
374 |
+
},
|
375 |
+
{
|
376 |
+
"epoch": 0.6275902901124926,
|
377 |
+
"grad_norm": 0.9770427942276001,
|
378 |
+
"learning_rate": 4.47865304137337e-05,
|
379 |
+
"loss": 2.1307,
|
380 |
+
"step": 265
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"epoch": 0.6394316163410302,
|
384 |
+
"grad_norm": 3.981346845626831,
|
385 |
+
"learning_rate": 4.4595418816885004e-05,
|
386 |
+
"loss": 2.0755,
|
387 |
+
"step": 270
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"epoch": 0.6512729425695678,
|
391 |
+
"grad_norm": 1.1023027896881104,
|
392 |
+
"learning_rate": 4.440129059506808e-05,
|
393 |
+
"loss": 1.9149,
|
394 |
+
"step": 275
|
395 |
+
},
|
396 |
+
{
|
397 |
+
"epoch": 0.6631142687981054,
|
398 |
+
"grad_norm": 1.648698091506958,
|
399 |
+
"learning_rate": 4.420417563343346e-05,
|
400 |
+
"loss": 1.984,
|
401 |
+
"step": 280
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.6749555950266429,
|
405 |
+
"grad_norm": 1.157861590385437,
|
406 |
+
"learning_rate": 4.40041042769266e-05,
|
407 |
+
"loss": 1.8697,
|
408 |
+
"step": 285
|
409 |
+
},
|
410 |
+
{
|
411 |
+
"epoch": 0.6867969212551805,
|
412 |
+
"grad_norm": 1.0009181499481201,
|
413 |
+
"learning_rate": 4.380110732561637e-05,
|
414 |
+
"loss": 1.7951,
|
415 |
+
"step": 290
|
416 |
+
},
|
417 |
+
{
|
418 |
+
"epoch": 0.6986382474837182,
|
419 |
+
"grad_norm": 2.11307692527771,
|
420 |
+
"learning_rate": 4.3595216029953575e-05,
|
421 |
+
"loss": 1.6276,
|
422 |
+
"step": 295
|
423 |
+
},
|
424 |
+
{
|
425 |
+
"epoch": 0.7104795737122558,
|
426 |
+
"grad_norm": 1.0800515413284302,
|
427 |
+
"learning_rate": 4.3386462085960086e-05,
|
428 |
+
"loss": 1.8438,
|
429 |
+
"step": 300
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.7223208999407934,
|
433 |
+
"grad_norm": 0.9078517556190491,
|
434 |
+
"learning_rate": 4.3174877630349366e-05,
|
435 |
+
"loss": 1.7839,
|
436 |
+
"step": 305
|
437 |
+
},
|
438 |
+
{
|
439 |
+
"epoch": 0.7341622261693309,
|
440 |
+
"grad_norm": 1.0842117071151733,
|
441 |
+
"learning_rate": 4.296049523557917e-05,
|
442 |
+
"loss": 1.8604,
|
443 |
+
"step": 310
|
444 |
+
},
|
445 |
+
{
|
446 |
+
"epoch": 0.7460035523978685,
|
447 |
+
"grad_norm": 1.2695246934890747,
|
448 |
+
"learning_rate": 4.2743347904837176e-05,
|
449 |
+
"loss": 1.9514,
|
450 |
+
"step": 315
|
451 |
+
},
|
452 |
+
{
|
453 |
+
"epoch": 0.7578448786264061,
|
454 |
+
"grad_norm": 1.0501705408096313,
|
455 |
+
"learning_rate": 4.2523469066960295e-05,
|
456 |
+
"loss": 1.9439,
|
457 |
+
"step": 320
|
458 |
+
},
|
459 |
+
{
|
460 |
+
"epoch": 0.7696862048549438,
|
461 |
+
"grad_norm": 1.3608386516571045,
|
462 |
+
"learning_rate": 4.230089257128842e-05,
|
463 |
+
"loss": 2.0266,
|
464 |
+
"step": 325
|
465 |
+
},
|
466 |
+
{
|
467 |
+
"epoch": 0.7815275310834814,
|
468 |
+
"grad_norm": 1.1087582111358643,
|
469 |
+
"learning_rate": 4.2075652682453554e-05,
|
470 |
+
"loss": 1.9851,
|
471 |
+
"step": 330
|
472 |
+
},
|
473 |
+
{
|
474 |
+
"epoch": 0.7933688573120189,
|
475 |
+
"grad_norm": 1.4247711896896362,
|
476 |
+
"learning_rate": 4.184778407510484e-05,
|
477 |
+
"loss": 1.976,
|
478 |
+
"step": 335
|
479 |
+
},
|
480 |
+
{
|
481 |
+
"epoch": 0.8052101835405565,
|
482 |
+
"grad_norm": 1.5246973037719727,
|
483 |
+
"learning_rate": 4.16173218285706e-05,
|
484 |
+
"loss": 1.9039,
|
485 |
+
"step": 340
|
486 |
+
},
|
487 |
+
{
|
488 |
+
"epoch": 0.8170515097690941,
|
489 |
+
"grad_norm": 0.8650221824645996,
|
490 |
+
"learning_rate": 4.138430142145805e-05,
|
491 |
+
"loss": 1.9577,
|
492 |
+
"step": 345
|
493 |
+
},
|
494 |
+
{
|
495 |
+
"epoch": 0.8288928359976317,
|
496 |
+
"grad_norm": 1.5152937173843384,
|
497 |
+
"learning_rate": 4.114875872619147e-05,
|
498 |
+
"loss": 1.9182,
|
499 |
+
"step": 350
|
500 |
+
},
|
501 |
+
{
|
502 |
+
"epoch": 0.8407341622261694,
|
503 |
+
"grad_norm": 1.2927278280258179,
|
504 |
+
"learning_rate": 4.0910730003489894e-05,
|
505 |
+
"loss": 1.9574,
|
506 |
+
"step": 355
|
507 |
+
},
|
508 |
+
{
|
509 |
+
"epoch": 0.8525754884547069,
|
510 |
+
"grad_norm": 1.193464756011963,
|
511 |
+
"learning_rate": 4.067025189678485e-05,
|
512 |
+
"loss": 1.9129,
|
513 |
+
"step": 360
|
514 |
+
},
|
515 |
+
{
|
516 |
+
"epoch": 0.8644168146832445,
|
517 |
+
"grad_norm": 1.1596301794052124,
|
518 |
+
"learning_rate": 4.042736142657935e-05,
|
519 |
+
"loss": 2.0108,
|
520 |
+
"step": 365
|
521 |
+
},
|
522 |
+
{
|
523 |
+
"epoch": 0.8762581409117821,
|
524 |
+
"grad_norm": 1.278910756111145,
|
525 |
+
"learning_rate": 4.018209598474869e-05,
|
526 |
+
"loss": 2.1509,
|
527 |
+
"step": 370
|
528 |
+
},
|
529 |
+
{
|
530 |
+
"epoch": 0.8880994671403197,
|
531 |
+
"grad_norm": 0.9644595384597778,
|
532 |
+
"learning_rate": 3.993449332878418e-05,
|
533 |
+
"loss": 2.01,
|
534 |
+
"step": 375
|
535 |
+
},
|
536 |
+
{
|
537 |
+
"epoch": 0.8999407933688574,
|
538 |
+
"grad_norm": 1.2030296325683594,
|
539 |
+
"learning_rate": 3.9684591575980546e-05,
|
540 |
+
"loss": 2.0312,
|
541 |
+
"step": 380
|
542 |
+
},
|
543 |
+
{
|
544 |
+
"epoch": 0.9117821195973949,
|
545 |
+
"grad_norm": 1.4285073280334473,
|
546 |
+
"learning_rate": 3.943242919756792e-05,
|
547 |
+
"loss": 1.9705,
|
548 |
+
"step": 385
|
549 |
+
},
|
550 |
+
{
|
551 |
+
"epoch": 0.9236234458259325,
|
552 |
+
"grad_norm": 1.1423532962799072,
|
553 |
+
"learning_rate": 3.917804501278942e-05,
|
554 |
+
"loss": 2.1293,
|
555 |
+
"step": 390
|
556 |
+
},
|
557 |
+
{
|
558 |
+
"epoch": 0.9354647720544701,
|
559 |
+
"grad_norm": 1.3577874898910522,
|
560 |
+
"learning_rate": 3.8921478182925055e-05,
|
561 |
+
"loss": 2.1897,
|
562 |
+
"step": 395
|
563 |
+
},
|
564 |
+
{
|
565 |
+
"epoch": 0.9473060982830077,
|
566 |
+
"grad_norm": 1.166759967803955,
|
567 |
+
"learning_rate": 3.8662768205263044e-05,
|
568 |
+
"loss": 1.966,
|
569 |
+
"step": 400
|
570 |
+
},
|
571 |
+
{
|
572 |
+
"epoch": 0.9591474245115453,
|
573 |
+
"grad_norm": 0.8779304623603821,
|
574 |
+
"learning_rate": 3.8401954907019424e-05,
|
575 |
+
"loss": 1.898,
|
576 |
+
"step": 405
|
577 |
+
},
|
578 |
+
{
|
579 |
+
"epoch": 0.9709887507400828,
|
580 |
+
"grad_norm": 0.9446001648902893,
|
581 |
+
"learning_rate": 3.813907843920675e-05,
|
582 |
+
"loss": 2.1288,
|
583 |
+
"step": 410
|
584 |
+
},
|
585 |
+
{
|
586 |
+
"epoch": 0.9828300769686205,
|
587 |
+
"grad_norm": 1.2340161800384521,
|
588 |
+
"learning_rate": 3.787417927045315e-05,
|
589 |
+
"loss": 1.8449,
|
590 |
+
"step": 415
|
591 |
+
},
|
592 |
+
{
|
593 |
+
"epoch": 0.9946714031971581,
|
594 |
+
"grad_norm": 1.0638560056686401,
|
595 |
+
"learning_rate": 3.7607298180772236e-05,
|
596 |
+
"loss": 1.8785,
|
597 |
+
"step": 420
|
598 |
+
},
|
599 |
+
{
|
600 |
+
"epoch": 1.0065127294256957,
|
601 |
+
"grad_norm": 1.046241044998169,
|
602 |
+
"learning_rate": 3.733847625528529e-05,
|
603 |
+
"loss": 1.801,
|
604 |
+
"step": 425
|
605 |
+
},
|
606 |
+
{
|
607 |
+
"epoch": 1.0183540556542332,
|
608 |
+
"grad_norm": 0.9987891316413879,
|
609 |
+
"learning_rate": 3.706775487789639e-05,
|
610 |
+
"loss": 1.9475,
|
611 |
+
"step": 430
|
612 |
+
},
|
613 |
+
{
|
614 |
+
"epoch": 1.030195381882771,
|
615 |
+
"grad_norm": 1.4792598485946655,
|
616 |
+
"learning_rate": 3.679517572492151e-05,
|
617 |
+
"loss": 1.9996,
|
618 |
+
"step": 435
|
619 |
+
},
|
620 |
+
{
|
621 |
+
"epoch": 1.0420367081113084,
|
622 |
+
"grad_norm": 1.3502384424209595,
|
623 |
+
"learning_rate": 3.652078075867267e-05,
|
624 |
+
"loss": 1.9598,
|
625 |
+
"step": 440
|
626 |
+
},
|
627 |
+
{
|
628 |
+
"epoch": 1.0538780343398462,
|
629 |
+
"grad_norm": 0.9646159410476685,
|
630 |
+
"learning_rate": 3.624461222099804e-05,
|
631 |
+
"loss": 1.9598,
|
632 |
+
"step": 445
|
633 |
+
},
|
634 |
+
{
|
635 |
+
"epoch": 1.0657193605683837,
|
636 |
+
"grad_norm": 1.464591145515442,
|
637 |
+
"learning_rate": 3.596671262677898e-05,
|
638 |
+
"loss": 1.9089,
|
639 |
+
"step": 450
|
640 |
+
},
|
641 |
+
{
|
642 |
+
"epoch": 1.0775606867969212,
|
643 |
+
"grad_norm": 0.9556955099105835,
|
644 |
+
"learning_rate": 3.568712475738508e-05,
|
645 |
+
"loss": 1.9496,
|
646 |
+
"step": 455
|
647 |
+
},
|
648 |
+
{
|
649 |
+
"epoch": 1.089402013025459,
|
650 |
+
"grad_norm": 1.2917728424072266,
|
651 |
+
"learning_rate": 3.5405891654088154e-05,
|
652 |
+
"loss": 1.9467,
|
653 |
+
"step": 460
|
654 |
+
},
|
655 |
+
{
|
656 |
+
"epoch": 1.1012433392539964,
|
657 |
+
"grad_norm": 1.3475714921951294,
|
658 |
+
"learning_rate": 3.5123056611436224e-05,
|
659 |
+
"loss": 2.0235,
|
660 |
+
"step": 465
|
661 |
+
},
|
662 |
+
{
|
663 |
+
"epoch": 1.1130846654825342,
|
664 |
+
"grad_norm": 0.9648019671440125,
|
665 |
+
"learning_rate": 3.483866317058857e-05,
|
666 |
+
"loss": 1.8844,
|
667 |
+
"step": 470
|
668 |
+
},
|
669 |
+
{
|
670 |
+
"epoch": 1.1249259917110717,
|
671 |
+
"grad_norm": 1.3338149785995483,
|
672 |
+
"learning_rate": 3.4552755112612714e-05,
|
673 |
+
"loss": 1.8287,
|
674 |
+
"step": 475
|
675 |
+
},
|
676 |
+
{
|
677 |
+
"epoch": 1.1367673179396092,
|
678 |
+
"grad_norm": 1.1770730018615723,
|
679 |
+
"learning_rate": 3.4265376451744565e-05,
|
680 |
+
"loss": 2.0754,
|
681 |
+
"step": 480
|
682 |
+
},
|
683 |
+
{
|
684 |
+
"epoch": 1.148608644168147,
|
685 |
+
"grad_norm": 1.0296565294265747,
|
686 |
+
"learning_rate": 3.397657142861258e-05,
|
687 |
+
"loss": 1.9269,
|
688 |
+
"step": 485
|
689 |
+
},
|
690 |
+
{
|
691 |
+
"epoch": 1.1604499703966844,
|
692 |
+
"grad_norm": 1.1220016479492188,
|
693 |
+
"learning_rate": 3.3686384503427174e-05,
|
694 |
+
"loss": 2.0613,
|
695 |
+
"step": 490
|
696 |
+
},
|
697 |
+
{
|
698 |
+
"epoch": 1.1722912966252221,
|
699 |
+
"grad_norm": 1.0247845649719238,
|
700 |
+
"learning_rate": 3.339486034913627e-05,
|
701 |
+
"loss": 1.8047,
|
702 |
+
"step": 495
|
703 |
+
},
|
704 |
+
{
|
705 |
+
"epoch": 1.1841326228537596,
|
706 |
+
"grad_norm": 0.9551325440406799,
|
707 |
+
"learning_rate": 3.3102043844548044e-05,
|
708 |
+
"loss": 1.8639,
|
709 |
+
"step": 500
|
710 |
+
},
|
711 |
+
{
|
712 |
+
"epoch": 1.1959739490822971,
|
713 |
+
"grad_norm": 1.7456352710723877,
|
714 |
+
"learning_rate": 3.280798006742213e-05,
|
715 |
+
"loss": 1.9545,
|
716 |
+
"step": 505
|
717 |
+
},
|
718 |
+
{
|
719 |
+
"epoch": 1.2078152753108349,
|
720 |
+
"grad_norm": 1.260713815689087,
|
721 |
+
"learning_rate": 3.2512714287530006e-05,
|
722 |
+
"loss": 1.874,
|
723 |
+
"step": 510
|
724 |
+
},
|
725 |
+
{
|
726 |
+
"epoch": 1.2196566015393724,
|
727 |
+
"grad_norm": 1.4492847919464111,
|
728 |
+
"learning_rate": 3.2216291959686006e-05,
|
729 |
+
"loss": 1.9178,
|
730 |
+
"step": 515
|
731 |
+
},
|
732 |
+
{
|
733 |
+
"epoch": 1.2314979277679101,
|
734 |
+
"grad_norm": 1.4412329196929932,
|
735 |
+
"learning_rate": 3.191875871674971e-05,
|
736 |
+
"loss": 1.919,
|
737 |
+
"step": 520
|
738 |
+
},
|
739 |
+
{
|
740 |
+
"epoch": 1.2433392539964476,
|
741 |
+
"grad_norm": 1.2013176679611206,
|
742 |
+
"learning_rate": 3.1620160362600984e-05,
|
743 |
+
"loss": 1.9128,
|
744 |
+
"step": 525
|
745 |
+
},
|
746 |
+
{
|
747 |
+
"epoch": 1.2551805802249851,
|
748 |
+
"grad_norm": 1.0584838390350342,
|
749 |
+
"learning_rate": 3.1320542865088696e-05,
|
750 |
+
"loss": 2.0132,
|
751 |
+
"step": 530
|
752 |
+
},
|
753 |
+
{
|
754 |
+
"epoch": 1.2670219064535229,
|
755 |
+
"grad_norm": 0.9713219404220581,
|
756 |
+
"learning_rate": 3.101995234895416e-05,
|
757 |
+
"loss": 1.9014,
|
758 |
+
"step": 535
|
759 |
+
},
|
760 |
+
{
|
761 |
+
"epoch": 1.2788632326820604,
|
762 |
+
"grad_norm": 1.167724609375,
|
763 |
+
"learning_rate": 3.071843508873046e-05,
|
764 |
+
"loss": 1.8482,
|
765 |
+
"step": 540
|
766 |
+
},
|
767 |
+
{
|
768 |
+
"epoch": 1.290704558910598,
|
769 |
+
"grad_norm": 1.43012273311615,
|
770 |
+
"learning_rate": 3.0416037501618677e-05,
|
771 |
+
"loss": 1.7475,
|
772 |
+
"step": 545
|
773 |
+
},
|
774 |
+
{
|
775 |
+
"epoch": 1.3025458851391356,
|
776 |
+
"grad_norm": 1.8622283935546875,
|
777 |
+
"learning_rate": 3.0112806140342176e-05,
|
778 |
+
"loss": 2.0185,
|
779 |
+
"step": 550
|
780 |
+
},
|
781 |
+
{
|
782 |
+
"epoch": 1.3143872113676731,
|
783 |
+
"grad_norm": 1.513700008392334,
|
784 |
+
"learning_rate": 2.9808787685980054e-05,
|
785 |
+
"loss": 1.8825,
|
786 |
+
"step": 555
|
787 |
+
},
|
788 |
+
{
|
789 |
+
"epoch": 1.3262285375962108,
|
790 |
+
"grad_norm": 1.4935449361801147,
|
791 |
+
"learning_rate": 2.9504028940780776e-05,
|
792 |
+
"loss": 1.8319,
|
793 |
+
"step": 560
|
794 |
+
},
|
795 |
+
{
|
796 |
+
"epoch": 1.3380698638247484,
|
797 |
+
"grad_norm": 1.4678127765655518,
|
798 |
+
"learning_rate": 2.9198576820957187e-05,
|
799 |
+
"loss": 1.9613,
|
800 |
+
"step": 565
|
801 |
+
},
|
802 |
+
{
|
803 |
+
"epoch": 1.349911190053286,
|
804 |
+
"grad_norm": 1.2753045558929443,
|
805 |
+
"learning_rate": 2.8892478349463986e-05,
|
806 |
+
"loss": 1.9367,
|
807 |
+
"step": 570
|
808 |
+
},
|
809 |
+
{
|
810 |
+
"epoch": 1.3617525162818236,
|
811 |
+
"grad_norm": 1.5118064880371094,
|
812 |
+
"learning_rate": 2.858578064875874e-05,
|
813 |
+
"loss": 1.895,
|
814 |
+
"step": 575
|
815 |
+
},
|
816 |
+
{
|
817 |
+
"epoch": 1.373593842510361,
|
818 |
+
"grad_norm": 1.3416668176651,
|
819 |
+
"learning_rate": 2.8278530933547624e-05,
|
820 |
+
"loss": 2.0023,
|
821 |
+
"step": 580
|
822 |
+
},
|
823 |
+
{
|
824 |
+
"epoch": 1.3854351687388988,
|
825 |
+
"grad_norm": 1.6278949975967407,
|
826 |
+
"learning_rate": 2.79707765035169e-05,
|
827 |
+
"loss": 1.9469,
|
828 |
+
"step": 585
|
829 |
+
},
|
830 |
+
{
|
831 |
+
"epoch": 1.3972764949674363,
|
832 |
+
"grad_norm": 1.2708017826080322,
|
833 |
+
"learning_rate": 2.7662564736051377e-05,
|
834 |
+
"loss": 1.7915,
|
835 |
+
"step": 590
|
836 |
+
},
|
837 |
+
{
|
838 |
+
"epoch": 1.409117821195974,
|
839 |
+
"grad_norm": 1.0602233409881592,
|
840 |
+
"learning_rate": 2.7353943078940875e-05,
|
841 |
+
"loss": 1.8253,
|
842 |
+
"step": 595
|
843 |
+
},
|
844 |
+
{
|
845 |
+
"epoch": 1.4209591474245116,
|
846 |
+
"grad_norm": 1.3578535318374634,
|
847 |
+
"learning_rate": 2.7044959043075814e-05,
|
848 |
+
"loss": 2.0741,
|
849 |
+
"step": 600
|
850 |
+
},
|
851 |
+
{
|
852 |
+
"epoch": 1.432800473653049,
|
853 |
+
"grad_norm": 1.1489042043685913,
|
854 |
+
"learning_rate": 2.67356601951332e-05,
|
855 |
+
"loss": 1.901,
|
856 |
+
"step": 605
|
857 |
+
},
|
858 |
+
{
|
859 |
+
"epoch": 1.4446417998815868,
|
860 |
+
"grad_norm": 1.680655598640442,
|
861 |
+
"learning_rate": 2.64260941502539e-05,
|
862 |
+
"loss": 2.0099,
|
863 |
+
"step": 610
|
864 |
+
},
|
865 |
+
{
|
866 |
+
"epoch": 1.4564831261101243,
|
867 |
+
"grad_norm": 1.7020906209945679,
|
868 |
+
"learning_rate": 2.611630856471252e-05,
|
869 |
+
"loss": 1.8853,
|
870 |
+
"step": 615
|
871 |
+
},
|
872 |
+
{
|
873 |
+
"epoch": 1.468324452338662,
|
874 |
+
"grad_norm": 0.9535301923751831,
|
875 |
+
"learning_rate": 2.5806351128580964e-05,
|
876 |
+
"loss": 1.8205,
|
877 |
+
"step": 620
|
878 |
+
},
|
879 |
+
{
|
880 |
+
"epoch": 1.4801657785671996,
|
881 |
+
"grad_norm": 1.0446418523788452,
|
882 |
+
"learning_rate": 2.5496269558386725e-05,
|
883 |
+
"loss": 2.0851,
|
884 |
+
"step": 625
|
885 |
+
},
|
886 |
+
{
|
887 |
+
"epoch": 1.492007104795737,
|
888 |
+
"grad_norm": 1.1212449073791504,
|
889 |
+
"learning_rate": 2.5186111589767187e-05,
|
890 |
+
"loss": 2.0913,
|
891 |
+
"step": 630
|
892 |
+
},
|
893 |
+
{
|
894 |
+
"epoch": 1.5038484310242746,
|
895 |
+
"grad_norm": 1.5539659261703491,
|
896 |
+
"learning_rate": 2.487592497012089e-05,
|
897 |
+
"loss": 1.9521,
|
898 |
+
"step": 635
|
899 |
+
},
|
900 |
+
{
|
901 |
+
"epoch": 1.5156897572528123,
|
902 |
+
"grad_norm": 1.3954375982284546,
|
903 |
+
"learning_rate": 2.4565757451257128e-05,
|
904 |
+
"loss": 1.8525,
|
905 |
+
"step": 640
|
906 |
+
},
|
907 |
+
{
|
908 |
+
"epoch": 1.52753108348135,
|
909 |
+
"grad_norm": 1.5919753313064575,
|
910 |
+
"learning_rate": 2.4255656782044644e-05,
|
911 |
+
"loss": 1.8034,
|
912 |
+
"step": 645
|
913 |
+
},
|
914 |
+
{
|
915 |
+
"epoch": 1.5393724097098875,
|
916 |
+
"grad_norm": 1.420255184173584,
|
917 |
+
"learning_rate": 2.3945670701061033e-05,
|
918 |
+
"loss": 2.053,
|
919 |
+
"step": 650
|
920 |
+
},
|
921 |
+
{
|
922 |
+
"epoch": 1.551213735938425,
|
923 |
+
"grad_norm": 1.0267295837402344,
|
924 |
+
"learning_rate": 2.3635846929243537e-05,
|
925 |
+
"loss": 1.7877,
|
926 |
+
"step": 655
|
927 |
+
},
|
928 |
+
{
|
929 |
+
"epoch": 1.5630550621669625,
|
930 |
+
"grad_norm": 1.778398036956787,
|
931 |
+
"learning_rate": 2.3326233162542655e-05,
|
932 |
+
"loss": 1.9865,
|
933 |
+
"step": 660
|
934 |
+
},
|
935 |
+
{
|
936 |
+
"epoch": 1.5748963883955003,
|
937 |
+
"grad_norm": 1.0454083681106567,
|
938 |
+
"learning_rate": 2.3016877064579564e-05,
|
939 |
+
"loss": 1.8799,
|
940 |
+
"step": 665
|
941 |
+
},
|
942 |
+
{
|
943 |
+
"epoch": 1.586737714624038,
|
944 |
+
"grad_norm": 1.4005528688430786,
|
945 |
+
"learning_rate": 2.2707826259308492e-05,
|
946 |
+
"loss": 1.9329,
|
947 |
+
"step": 670
|
948 |
+
},
|
949 |
+
{
|
950 |
+
"epoch": 1.5985790408525755,
|
951 |
+
"grad_norm": 1.240485429763794,
|
952 |
+
"learning_rate": 2.2399128323685286e-05,
|
953 |
+
"loss": 1.7828,
|
954 |
+
"step": 675
|
955 |
+
},
|
956 |
+
{
|
957 |
+
"epoch": 1.610420367081113,
|
958 |
+
"grad_norm": 1.2078522443771362,
|
959 |
+
"learning_rate": 2.2090830780343113e-05,
|
960 |
+
"loss": 2.0114,
|
961 |
+
"step": 680
|
962 |
+
},
|
963 |
+
{
|
964 |
+
"epoch": 1.6222616933096505,
|
965 |
+
"grad_norm": 1.1760393381118774,
|
966 |
+
"learning_rate": 2.1782981090276585e-05,
|
967 |
+
"loss": 1.8671,
|
968 |
+
"step": 685
|
969 |
+
},
|
970 |
+
{
|
971 |
+
"epoch": 1.6341030195381883,
|
972 |
+
"grad_norm": 1.362109661102295,
|
973 |
+
"learning_rate": 2.147562664553537e-05,
|
974 |
+
"loss": 1.9453,
|
975 |
+
"step": 690
|
976 |
+
},
|
977 |
+
{
|
978 |
+
"epoch": 1.645944345766726,
|
979 |
+
"grad_norm": 2.057487964630127,
|
980 |
+
"learning_rate": 2.1168814761928336e-05,
|
981 |
+
"loss": 1.7151,
|
982 |
+
"step": 695
|
983 |
+
},
|
984 |
+
{
|
985 |
+
"epoch": 1.6577856719952635,
|
986 |
+
"grad_norm": 1.5289130210876465,
|
987 |
+
"learning_rate": 2.0862592671739608e-05,
|
988 |
+
"loss": 1.9591,
|
989 |
+
"step": 700
|
990 |
+
},
|
991 |
+
{
|
992 |
+
"epoch": 1.669626998223801,
|
993 |
+
"grad_norm": 1.42037832736969,
|
994 |
+
"learning_rate": 2.0557007516457288e-05,
|
995 |
+
"loss": 1.8999,
|
996 |
+
"step": 705
|
997 |
+
},
|
998 |
+
{
|
999 |
+
"epoch": 1.6814683244523385,
|
1000 |
+
"grad_norm": 1.101355791091919,
|
1001 |
+
"learning_rate": 2.0252106339516272e-05,
|
1002 |
+
"loss": 2.0037,
|
1003 |
+
"step": 710
|
1004 |
+
},
|
1005 |
+
{
|
1006 |
+
"epoch": 1.6933096506808762,
|
1007 |
+
"grad_norm": 5.125020980834961,
|
1008 |
+
"learning_rate": 1.9947936079056117e-05,
|
1009 |
+
"loss": 1.7028,
|
1010 |
+
"step": 715
|
1011 |
+
},
|
1012 |
+
{
|
1013 |
+
"epoch": 1.705150976909414,
|
1014 |
+
"grad_norm": 1.3062840700149536,
|
1015 |
+
"learning_rate": 1.964454356069514e-05,
|
1016 |
+
"loss": 2.1283,
|
1017 |
+
"step": 720
|
1018 |
+
},
|
1019 |
+
{
|
1020 |
+
"epoch": 1.7169923031379515,
|
1021 |
+
"grad_norm": 1.273743987083435,
|
1022 |
+
"learning_rate": 1.9341975490321827e-05,
|
1023 |
+
"loss": 2.0017,
|
1024 |
+
"step": 725
|
1025 |
+
},
|
1026 |
+
{
|
1027 |
+
"epoch": 1.728833629366489,
|
1028 |
+
"grad_norm": 1.3251115083694458,
|
1029 |
+
"learning_rate": 1.9040278446904677e-05,
|
1030 |
+
"loss": 1.7329,
|
1031 |
+
"step": 730
|
1032 |
+
},
|
1033 |
+
{
|
1034 |
+
"epoch": 1.7406749555950265,
|
1035 |
+
"grad_norm": 1.5570122003555298,
|
1036 |
+
"learning_rate": 1.873949887532156e-05,
|
1037 |
+
"loss": 1.9965,
|
1038 |
+
"step": 735
|
1039 |
+
},
|
1040 |
+
{
|
1041 |
+
"epoch": 1.7525162818235642,
|
1042 |
+
"grad_norm": 1.3525424003601074,
|
1043 |
+
"learning_rate": 1.8439683079209787e-05,
|
1044 |
+
"loss": 1.9321,
|
1045 |
+
"step": 740
|
1046 |
+
},
|
1047 |
+
{
|
1048 |
+
"epoch": 1.764357608052102,
|
1049 |
+
"grad_norm": 1.776936411857605,
|
1050 |
+
"learning_rate": 1.8140877213837823e-05,
|
1051 |
+
"loss": 1.9739,
|
1052 |
+
"step": 745
|
1053 |
+
},
|
1054 |
+
{
|
1055 |
+
"epoch": 1.7761989342806395,
|
1056 |
+
"grad_norm": 1.317015290260315,
|
1057 |
+
"learning_rate": 1.7843127278999943e-05,
|
1058 |
+
"loss": 1.8864,
|
1059 |
+
"step": 750
|
1060 |
+
},
|
1061 |
+
{
|
1062 |
+
"epoch": 1.788040260509177,
|
1063 |
+
"grad_norm": 1.6961033344268799,
|
1064 |
+
"learning_rate": 1.754647911193473e-05,
|
1065 |
+
"loss": 1.9227,
|
1066 |
+
"step": 755
|
1067 |
+
},
|
1068 |
+
{
|
1069 |
+
"epoch": 1.7998815867377145,
|
1070 |
+
"grad_norm": 1.448065161705017,
|
1071 |
+
"learning_rate": 1.7250978380268694e-05,
|
1072 |
+
"loss": 2.0025,
|
1073 |
+
"step": 760
|
1074 |
+
},
|
1075 |
+
{
|
1076 |
+
"epoch": 1.8117229129662522,
|
1077 |
+
"grad_norm": 1.4134864807128906,
|
1078 |
+
"learning_rate": 1.6956670574985908e-05,
|
1079 |
+
"loss": 1.8039,
|
1080 |
+
"step": 765
|
1081 |
+
},
|
1082 |
+
{
|
1083 |
+
"epoch": 1.82356423919479,
|
1084 |
+
"grad_norm": 1.3354402780532837,
|
1085 |
+
"learning_rate": 1.6663601003424883e-05,
|
1086 |
+
"loss": 1.8008,
|
1087 |
+
"step": 770
|
1088 |
+
},
|
1089 |
+
{
|
1090 |
+
"epoch": 1.8354055654233274,
|
1091 |
+
"grad_norm": 1.2575969696044922,
|
1092 |
+
"learning_rate": 1.6371814782303722e-05,
|
1093 |
+
"loss": 1.9609,
|
1094 |
+
"step": 775
|
1095 |
+
},
|
1096 |
+
{
|
1097 |
+
"epoch": 1.847246891651865,
|
1098 |
+
"grad_norm": 1.4361883401870728,
|
1099 |
+
"learning_rate": 1.6081356830774625e-05,
|
1100 |
+
"loss": 2.0843,
|
1101 |
+
"step": 780
|
1102 |
+
},
|
1103 |
+
{
|
1104 |
+
"epoch": 1.8590882178804025,
|
1105 |
+
"grad_norm": 1.320430874824524,
|
1106 |
+
"learning_rate": 1.579227186350875e-05,
|
1107 |
+
"loss": 1.8281,
|
1108 |
+
"step": 785
|
1109 |
+
},
|
1110 |
+
{
|
1111 |
+
"epoch": 1.8709295441089402,
|
1112 |
+
"grad_norm": 1.2178723812103271,
|
1113 |
+
"learning_rate": 1.5504604383812646e-05,
|
1114 |
+
"loss": 1.9628,
|
1115 |
+
"step": 790
|
1116 |
+
},
|
1117 |
+
{
|
1118 |
+
"epoch": 1.882770870337478,
|
1119 |
+
"grad_norm": 1.3220632076263428,
|
1120 |
+
"learning_rate": 1.5218398676777102e-05,
|
1121 |
+
"loss": 1.9867,
|
1122 |
+
"step": 795
|
1123 |
+
},
|
1124 |
+
{
|
1125 |
+
"epoch": 1.8946121965660154,
|
1126 |
+
"grad_norm": 0.9402835965156555,
|
1127 |
+
"learning_rate": 1.4933698802459731e-05,
|
1128 |
+
"loss": 1.8819,
|
1129 |
+
"step": 800
|
1130 |
+
},
|
1131 |
+
{
|
1132 |
+
"epoch": 1.906453522794553,
|
1133 |
+
"grad_norm": 1.3736234903335571,
|
1134 |
+
"learning_rate": 1.4650548589102092e-05,
|
1135 |
+
"loss": 1.8938,
|
1136 |
+
"step": 805
|
1137 |
+
},
|
1138 |
+
{
|
1139 |
+
"epoch": 1.9182948490230904,
|
1140 |
+
"grad_norm": 0.9985255002975464,
|
1141 |
+
"learning_rate": 1.436899162638255e-05,
|
1142 |
+
"loss": 1.6936,
|
1143 |
+
"step": 810
|
1144 |
+
},
|
1145 |
+
{
|
1146 |
+
"epoch": 1.9301361752516282,
|
1147 |
+
"grad_norm": 1.5471998453140259,
|
1148 |
+
"learning_rate": 1.4089071258705783e-05,
|
1149 |
+
"loss": 2.1922,
|
1150 |
+
"step": 815
|
1151 |
+
},
|
1152 |
+
{
|
1153 |
+
"epoch": 1.941977501480166,
|
1154 |
+
"grad_norm": 1.515426754951477,
|
1155 |
+
"learning_rate": 1.3810830578530225e-05,
|
1156 |
+
"loss": 1.8547,
|
1157 |
+
"step": 820
|
1158 |
+
},
|
1159 |
+
{
|
1160 |
+
"epoch": 1.9538188277087034,
|
1161 |
+
"grad_norm": 1.2444261312484741,
|
1162 |
+
"learning_rate": 1.3534312419734066e-05,
|
1163 |
+
"loss": 1.9437,
|
1164 |
+
"step": 825
|
1165 |
+
},
|
1166 |
+
{
|
1167 |
+
"epoch": 1.965660153937241,
|
1168 |
+
"grad_norm": 1.3579230308532715,
|
1169 |
+
"learning_rate": 1.3259559351021247e-05,
|
1170 |
+
"loss": 1.7016,
|
1171 |
+
"step": 830
|
1172 |
+
},
|
1173 |
+
{
|
1174 |
+
"epoch": 1.9775014801657784,
|
1175 |
+
"grad_norm": 1.436259388923645,
|
1176 |
+
"learning_rate": 1.2986613669368158e-05,
|
1177 |
+
"loss": 1.8801,
|
1178 |
+
"step": 835
|
1179 |
+
},
|
1180 |
+
{
|
1181 |
+
"epoch": 1.9893428063943162,
|
1182 |
+
"grad_norm": 1.196303367614746,
|
1183 |
+
"learning_rate": 1.271551739351224e-05,
|
1184 |
+
"loss": 1.9408,
|
1185 |
+
"step": 840
|
1186 |
+
},
|
1187 |
+
{
|
1188 |
+
"epoch": 2.001184132622854,
|
1189 |
+
"grad_norm": 1.1158932447433472,
|
1190 |
+
"learning_rate": 1.2446312257483358e-05,
|
1191 |
+
"loss": 1.6631,
|
1192 |
+
"step": 845
|
1193 |
+
},
|
1194 |
+
{
|
1195 |
+
"epoch": 2.0130254588513914,
|
1196 |
+
"grad_norm": 1.18464195728302,
|
1197 |
+
"learning_rate": 1.2179039704179118e-05,
|
1198 |
+
"loss": 1.859,
|
1199 |
+
"step": 850
|
1200 |
+
},
|
1201 |
+
{
|
1202 |
+
"epoch": 2.024866785079929,
|
1203 |
+
"grad_norm": 1.7019284963607788,
|
1204 |
+
"learning_rate": 1.1913740878984816e-05,
|
1205 |
+
"loss": 1.9671,
|
1206 |
+
"step": 855
|
1207 |
+
},
|
1208 |
+
{
|
1209 |
+
"epoch": 2.0367081113084664,
|
1210 |
+
"grad_norm": 1.4283084869384766,
|
1211 |
+
"learning_rate": 1.1650456623439367e-05,
|
1212 |
+
"loss": 1.8109,
|
1213 |
+
"step": 860
|
1214 |
+
},
|
1215 |
+
{
|
1216 |
+
"epoch": 2.0485494375370044,
|
1217 |
+
"grad_norm": 1.2812663316726685,
|
1218 |
+
"learning_rate": 1.1389227468947906e-05,
|
1219 |
+
"loss": 1.8024,
|
1220 |
+
"step": 865
|
1221 |
+
},
|
1222 |
+
{
|
1223 |
+
"epoch": 2.060390763765542,
|
1224 |
+
"grad_norm": 1.4143946170806885,
|
1225 |
+
"learning_rate": 1.1130093630542198e-05,
|
1226 |
+
"loss": 1.8327,
|
1227 |
+
"step": 870
|
1228 |
+
},
|
1229 |
+
{
|
1230 |
+
"epoch": 2.0722320899940794,
|
1231 |
+
"grad_norm": 1.2379989624023438,
|
1232 |
+
"learning_rate": 1.0873095000689675e-05,
|
1233 |
+
"loss": 1.9797,
|
1234 |
+
"step": 875
|
1235 |
+
},
|
1236 |
+
{
|
1237 |
+
"epoch": 2.084073416222617,
|
1238 |
+
"grad_norm": 1.524370551109314,
|
1239 |
+
"learning_rate": 1.0618271143152184e-05,
|
1240 |
+
"loss": 1.8164,
|
1241 |
+
"step": 880
|
1242 |
+
},
|
1243 |
+
{
|
1244 |
+
"epoch": 2.0959147424511544,
|
1245 |
+
"grad_norm": 1.7849695682525635,
|
1246 |
+
"learning_rate": 1.0365661286895365e-05,
|
1247 |
+
"loss": 1.7994,
|
1248 |
+
"step": 885
|
1249 |
+
},
|
1250 |
+
{
|
1251 |
+
"epoch": 2.1077560686796923,
|
1252 |
+
"grad_norm": 2.3028218746185303,
|
1253 |
+
"learning_rate": 1.0115304320049479e-05,
|
1254 |
+
"loss": 1.9428,
|
1255 |
+
"step": 890
|
1256 |
+
},
|
1257 |
+
{
|
1258 |
+
"epoch": 2.11959739490823,
|
1259 |
+
"grad_norm": 1.4404668807983398,
|
1260 |
+
"learning_rate": 9.867238783922789e-06,
|
1261 |
+
"loss": 1.9297,
|
1262 |
+
"step": 895
|
1263 |
+
},
|
1264 |
+
{
|
1265 |
+
"epoch": 2.1314387211367674,
|
1266 |
+
"grad_norm": 2.096019744873047,
|
1267 |
+
"learning_rate": 9.621502867068285e-06,
|
1268 |
+
"loss": 1.8185,
|
1269 |
+
"step": 900
|
1270 |
+
},
|
1271 |
+
{
|
1272 |
+
"epoch": 2.143280047365305,
|
1273 |
+
"grad_norm": 1.393315076828003,
|
1274 |
+
"learning_rate": 9.378134399404767e-06,
|
1275 |
+
"loss": 1.7902,
|
1276 |
+
"step": 905
|
1277 |
+
},
|
1278 |
+
{
|
1279 |
+
"epoch": 2.1551213735938424,
|
1280 |
+
"grad_norm": 1.4423267841339111,
|
1281 |
+
"learning_rate": 9.137170846393054e-06,
|
1282 |
+
"loss": 1.7111,
|
1283 |
+
"step": 910
|
1284 |
+
},
|
1285 |
+
{
|
1286 |
+
"epoch": 2.1669626998223803,
|
1287 |
+
"grad_norm": 1.1944553852081299,
|
1288 |
+
"learning_rate": 8.898649303268372e-06,
|
1289 |
+
"loss": 2.04,
|
1290 |
+
"step": 915
|
1291 |
+
},
|
1292 |
+
{
|
1293 |
+
"epoch": 2.178804026050918,
|
1294 |
+
"grad_norm": 1.407127857208252,
|
1295 |
+
"learning_rate": 8.662606489329711e-06,
|
1296 |
+
"loss": 1.9046,
|
1297 |
+
"step": 920
|
1298 |
+
},
|
1299 |
+
{
|
1300 |
+
"epoch": 2.1906453522794553,
|
1301 |
+
"grad_norm": 1.855188012123108,
|
1302 |
+
"learning_rate": 8.429078742287073e-06,
|
1303 |
+
"loss": 2.1426,
|
1304 |
+
"step": 925
|
1305 |
+
},
|
1306 |
+
{
|
1307 |
+
"epoch": 2.202486678507993,
|
1308 |
+
"grad_norm": 1.5526237487792969,
|
1309 |
+
"learning_rate": 8.198102012667407e-06,
|
1310 |
+
"loss": 2.0743,
|
1311 |
+
"step": 930
|
1312 |
+
},
|
1313 |
+
{
|
1314 |
+
"epoch": 2.2143280047365304,
|
1315 |
+
"grad_norm": 1.1409013271331787,
|
1316 |
+
"learning_rate": 7.969711858280252e-06,
|
1317 |
+
"loss": 1.7135,
|
1318 |
+
"step": 935
|
1319 |
+
},
|
1320 |
+
{
|
1321 |
+
"epoch": 2.2261693309650683,
|
1322 |
+
"grad_norm": 1.7926199436187744,
|
1323 |
+
"learning_rate": 7.743943438743676e-06,
|
1324 |
+
"loss": 1.8054,
|
1325 |
+
"step": 940
|
1326 |
+
},
|
1327 |
+
{
|
1328 |
+
"epoch": 2.238010657193606,
|
1329 |
+
"grad_norm": 2.43799090385437,
|
1330 |
+
"learning_rate": 7.520831510071744e-06,
|
1331 |
+
"loss": 1.8244,
|
1332 |
+
"step": 945
|
1333 |
+
},
|
1334 |
+
{
|
1335 |
+
"epoch": 2.2498519834221433,
|
1336 |
+
"grad_norm": 1.3174521923065186,
|
1337 |
+
"learning_rate": 7.300410419323869e-06,
|
1338 |
+
"loss": 1.8097,
|
1339 |
+
"step": 950
|
1340 |
+
},
|
1341 |
+
{
|
1342 |
+
"epoch": 2.261693309650681,
|
1343 |
+
"grad_norm": 1.5044059753417969,
|
1344 |
+
"learning_rate": 7.082714099317334e-06,
|
1345 |
+
"loss": 1.9919,
|
1346 |
+
"step": 955
|
1347 |
+
},
|
1348 |
+
{
|
1349 |
+
"epoch": 2.2735346358792183,
|
1350 |
+
"grad_norm": 1.3440165519714355,
|
1351 |
+
"learning_rate": 6.867776063403411e-06,
|
1352 |
+
"loss": 1.9084,
|
1353 |
+
"step": 960
|
1354 |
+
},
|
1355 |
+
{
|
1356 |
+
"epoch": 2.2853759621077563,
|
1357 |
+
"grad_norm": 1.5049198865890503,
|
1358 |
+
"learning_rate": 6.6556294003081914e-06,
|
1359 |
+
"loss": 1.7835,
|
1360 |
+
"step": 965
|
1361 |
+
},
|
1362 |
+
{
|
1363 |
+
"epoch": 2.297217288336294,
|
1364 |
+
"grad_norm": 1.3639992475509644,
|
1365 |
+
"learning_rate": 6.44630676903869e-06,
|
1366 |
+
"loss": 1.7542,
|
1367 |
+
"step": 970
|
1368 |
+
},
|
1369 |
+
{
|
1370 |
+
"epoch": 2.3090586145648313,
|
1371 |
+
"grad_norm": 1.5013291835784912,
|
1372 |
+
"learning_rate": 6.239840393855184e-06,
|
1373 |
+
"loss": 2.0351,
|
1374 |
+
"step": 975
|
1375 |
+
},
|
1376 |
+
{
|
1377 |
+
"epoch": 2.320899940793369,
|
1378 |
+
"grad_norm": 1.4973437786102295,
|
1379 |
+
"learning_rate": 6.036262059310382e-06,
|
1380 |
+
"loss": 1.7908,
|
1381 |
+
"step": 980
|
1382 |
+
},
|
1383 |
+
{
|
1384 |
+
"epoch": 2.3327412670219063,
|
1385 |
+
"grad_norm": 2.3206512928009033,
|
1386 |
+
"learning_rate": 5.835603105356396e-06,
|
1387 |
+
"loss": 1.7511,
|
1388 |
+
"step": 985
|
1389 |
+
},
|
1390 |
+
{
|
1391 |
+
"epoch": 2.3445825932504443,
|
1392 |
+
"grad_norm": 1.5731827020645142,
|
1393 |
+
"learning_rate": 5.637894422520027e-06,
|
1394 |
+
"loss": 1.9621,
|
1395 |
+
"step": 990
|
1396 |
+
},
|
1397 |
+
{
|
1398 |
+
"epoch": 2.3564239194789818,
|
1399 |
+
"grad_norm": 1.6597813367843628,
|
1400 |
+
"learning_rate": 5.443166447147391e-06,
|
1401 |
+
"loss": 2.0578,
|
1402 |
+
"step": 995
|
1403 |
+
},
|
1404 |
+
{
|
1405 |
+
"epoch": 2.3682652457075193,
|
1406 |
+
"grad_norm": 1.659856915473938,
|
1407 |
+
"learning_rate": 5.251449156718313e-06,
|
1408 |
+
"loss": 1.9198,
|
1409 |
+
"step": 1000
|
1410 |
+
},
|
1411 |
+
{
|
1412 |
+
"epoch": 2.380106571936057,
|
1413 |
+
"grad_norm": 1.1122225522994995,
|
1414 |
+
"learning_rate": 5.062772065231491e-06,
|
1415 |
+
"loss": 1.6089,
|
1416 |
+
"step": 1005
|
1417 |
+
},
|
1418 |
+
{
|
1419 |
+
"epoch": 2.3919478981645943,
|
1420 |
+
"grad_norm": 1.6533218622207642,
|
1421 |
+
"learning_rate": 4.877164218660901e-06,
|
1422 |
+
"loss": 1.9499,
|
1423 |
+
"step": 1010
|
1424 |
+
},
|
1425 |
+
{
|
1426 |
+
"epoch": 2.4037892243931323,
|
1427 |
+
"grad_norm": 1.2786110639572144,
|
1428 |
+
"learning_rate": 4.694654190484327e-06,
|
1429 |
+
"loss": 2.0044,
|
1430 |
+
"step": 1015
|
1431 |
+
},
|
1432 |
+
{
|
1433 |
+
"epoch": 2.4156305506216698,
|
1434 |
+
"grad_norm": 1.542189359664917,
|
1435 |
+
"learning_rate": 4.515270077284595e-06,
|
1436 |
+
"loss": 2.0896,
|
1437 |
+
"step": 1020
|
1438 |
+
},
|
1439 |
+
{
|
1440 |
+
"epoch": 2.4274718768502073,
|
1441 |
+
"grad_norm": 1.5293947458267212,
|
1442 |
+
"learning_rate": 4.339039494424263e-06,
|
1443 |
+
"loss": 1.8803,
|
1444 |
+
"step": 1025
|
1445 |
+
},
|
1446 |
+
{
|
1447 |
+
"epoch": 2.4393132030787448,
|
1448 |
+
"grad_norm": 1.390453815460205,
|
1449 |
+
"learning_rate": 4.16598957179431e-06,
|
1450 |
+
"loss": 2.0795,
|
1451 |
+
"step": 1030
|
1452 |
+
},
|
1453 |
+
{
|
1454 |
+
"epoch": 2.4511545293072823,
|
1455 |
+
"grad_norm": 2.7319912910461426,
|
1456 |
+
"learning_rate": 3.996146949637658e-06,
|
1457 |
+
"loss": 1.7221,
|
1458 |
+
"step": 1035
|
1459 |
+
},
|
1460 |
+
{
|
1461 |
+
"epoch": 2.4629958555358202,
|
1462 |
+
"grad_norm": 1.080298900604248,
|
1463 |
+
"learning_rate": 3.8295377744479995e-06,
|
1464 |
+
"loss": 1.6881,
|
1465 |
+
"step": 1040
|
1466 |
+
},
|
1467 |
+
{
|
1468 |
+
"epoch": 2.4748371817643577,
|
1469 |
+
"grad_norm": 1.4576321840286255,
|
1470 |
+
"learning_rate": 3.6661876949447007e-06,
|
1471 |
+
"loss": 1.8039,
|
1472 |
+
"step": 1045
|
1473 |
+
},
|
1474 |
+
{
|
1475 |
+
"epoch": 2.4866785079928952,
|
1476 |
+
"grad_norm": 1.5691012144088745,
|
1477 |
+
"learning_rate": 3.5061218581242535e-06,
|
1478 |
+
"loss": 1.9333,
|
1479 |
+
"step": 1050
|
1480 |
+
},
|
1481 |
+
{
|
1482 |
+
"epoch": 2.4985198342214328,
|
1483 |
+
"grad_norm": 1.2626398801803589,
|
1484 |
+
"learning_rate": 3.3493649053890326e-06,
|
1485 |
+
"loss": 1.6731,
|
1486 |
+
"step": 1055
|
1487 |
+
},
|
1488 |
+
{
|
1489 |
+
"epoch": 2.5103611604499703,
|
1490 |
+
"grad_norm": 1.5094714164733887,
|
1491 |
+
"learning_rate": 3.1959409687538853e-06,
|
1492 |
+
"loss": 1.8437,
|
1493 |
+
"step": 1060
|
1494 |
+
},
|
1495 |
+
{
|
1496 |
+
"epoch": 2.522202486678508,
|
1497 |
+
"grad_norm": 2.1249232292175293,
|
1498 |
+
"learning_rate": 3.04587366713108e-06,
|
1499 |
+
"loss": 1.9577,
|
1500 |
+
"step": 1065
|
1501 |
+
},
|
1502 |
+
{
|
1503 |
+
"epoch": 2.5340438129070457,
|
1504 |
+
"grad_norm": 1.563010334968567,
|
1505 |
+
"learning_rate": 2.8991861026943014e-06,
|
1506 |
+
"loss": 2.0162,
|
1507 |
+
"step": 1070
|
1508 |
+
},
|
1509 |
+
{
|
1510 |
+
"epoch": 2.5458851391355832,
|
1511 |
+
"grad_norm": 2.769664764404297,
|
1512 |
+
"learning_rate": 2.7559008573221717e-06,
|
1513 |
+
"loss": 1.8091,
|
1514 |
+
"step": 1075
|
1515 |
+
},
|
1516 |
+
{
|
1517 |
+
"epoch": 2.5577264653641207,
|
1518 |
+
"grad_norm": 1.178745985031128,
|
1519 |
+
"learning_rate": 2.6160399891218988e-06,
|
1520 |
+
"loss": 1.7485,
|
1521 |
+
"step": 1080
|
1522 |
+
},
|
1523 |
+
{
|
1524 |
+
"epoch": 2.5695677915926582,
|
1525 |
+
"grad_norm": 1.479054570198059,
|
1526 |
+
"learning_rate": 2.4796250290334887e-06,
|
1527 |
+
"loss": 1.9556,
|
1528 |
+
"step": 1085
|
1529 |
+
},
|
1530 |
+
{
|
1531 |
+
"epoch": 2.581409117821196,
|
1532 |
+
"grad_norm": 1.1433284282684326,
|
1533 |
+
"learning_rate": 2.346676977515189e-06,
|
1534 |
+
"loss": 1.7762,
|
1535 |
+
"step": 1090
|
1536 |
+
},
|
1537 |
+
{
|
1538 |
+
"epoch": 2.5932504440497337,
|
1539 |
+
"grad_norm": 1.6979899406433105,
|
1540 |
+
"learning_rate": 2.21721630131054e-06,
|
1541 |
+
"loss": 1.7463,
|
1542 |
+
"step": 1095
|
1543 |
+
},
|
1544 |
+
{
|
1545 |
+
"epoch": 2.605091770278271,
|
1546 |
+
"grad_norm": 2.4911599159240723,
|
1547 |
+
"learning_rate": 2.0912629302976493e-06,
|
1548 |
+
"loss": 1.9825,
|
1549 |
+
"step": 1100
|
1550 |
+
},
|
1551 |
+
{
|
1552 |
+
"epoch": 2.6169330965068087,
|
1553 |
+
"grad_norm": 1.3858684301376343,
|
1554 |
+
"learning_rate": 1.968836254421036e-06,
|
1555 |
+
"loss": 1.8317,
|
1556 |
+
"step": 1105
|
1557 |
+
},
|
1558 |
+
{
|
1559 |
+
"epoch": 2.6287744227353462,
|
1560 |
+
"grad_norm": 1.304112195968628,
|
1561 |
+
"learning_rate": 1.849955120706673e-06,
|
1562 |
+
"loss": 1.7377,
|
1563 |
+
"step": 1110
|
1564 |
+
},
|
1565 |
+
{
|
1566 |
+
"epoch": 2.640615748963884,
|
1567 |
+
"grad_norm": 1.253029465675354,
|
1568 |
+
"learning_rate": 1.7346378303605359e-06,
|
1569 |
+
"loss": 1.8165,
|
1570 |
+
"step": 1115
|
1571 |
+
},
|
1572 |
+
{
|
1573 |
+
"epoch": 2.6524570751924217,
|
1574 |
+
"grad_norm": 1.166486382484436,
|
1575 |
+
"learning_rate": 1.6229021359512624e-06,
|
1576 |
+
"loss": 1.8233,
|
1577 |
+
"step": 1120
|
1578 |
+
},
|
1579 |
+
{
|
1580 |
+
"epoch": 2.664298401420959,
|
1581 |
+
"grad_norm": 1.5361779928207397,
|
1582 |
+
"learning_rate": 1.5147652386771848e-06,
|
1583 |
+
"loss": 1.9029,
|
1584 |
+
"step": 1125
|
1585 |
+
},
|
1586 |
+
{
|
1587 |
+
"epoch": 2.6761397276494967,
|
1588 |
+
"grad_norm": 1.3075627088546753,
|
1589 |
+
"learning_rate": 1.4102437857183155e-06,
|
1590 |
+
"loss": 1.8527,
|
1591 |
+
"step": 1130
|
1592 |
+
},
|
1593 |
+
{
|
1594 |
+
"epoch": 2.687981053878034,
|
1595 |
+
"grad_norm": 1.133543610572815,
|
1596 |
+
"learning_rate": 1.3093538676735601e-06,
|
1597 |
+
"loss": 1.8855,
|
1598 |
+
"step": 1135
|
1599 |
+
},
|
1600 |
+
{
|
1601 |
+
"epoch": 2.699822380106572,
|
1602 |
+
"grad_norm": 1.6549872159957886,
|
1603 |
+
"learning_rate": 1.2121110160836696e-06,
|
1604 |
+
"loss": 1.8746,
|
1605 |
+
"step": 1140
|
1606 |
+
},
|
1607 |
+
{
|
1608 |
+
"epoch": 2.7116637063351097,
|
1609 |
+
"grad_norm": 1.2035839557647705,
|
1610 |
+
"learning_rate": 1.1185302010402105e-06,
|
1611 |
+
"loss": 1.696,
|
1612 |
+
"step": 1145
|
1613 |
+
},
|
1614 |
+
{
|
1615 |
+
"epoch": 2.723505032563647,
|
1616 |
+
"grad_norm": 1.338542103767395,
|
1617 |
+
"learning_rate": 1.0286258288810107e-06,
|
1618 |
+
"loss": 1.7904,
|
1619 |
+
"step": 1150
|
1620 |
+
},
|
1621 |
+
{
|
1622 |
+
"epoch": 2.7353463587921847,
|
1623 |
+
"grad_norm": 1.4907745122909546,
|
1624 |
+
"learning_rate": 9.424117399723431e-07,
|
1625 |
+
"loss": 1.962,
|
1626 |
+
"step": 1155
|
1627 |
+
},
|
1628 |
+
{
|
1629 |
+
"epoch": 2.747187685020722,
|
1630 |
+
"grad_norm": 1.6321779489517212,
|
1631 |
+
"learning_rate": 8.599012065782924e-07,
|
1632 |
+
"loss": 1.9262,
|
1633 |
+
"step": 1160
|
1634 |
+
},
|
1635 |
+
{
|
1636 |
+
"epoch": 2.75902901124926,
|
1637 |
+
"grad_norm": 1.2635369300842285,
|
1638 |
+
"learning_rate": 7.811069308175156e-07,
|
1639 |
+
"loss": 1.8789,
|
1640 |
+
"step": 1165
|
1641 |
+
},
|
1642 |
+
{
|
1643 |
+
"epoch": 2.7708703374777977,
|
1644 |
+
"grad_norm": 1.5785208940505981,
|
1645 |
+
"learning_rate": 7.060410427078473e-07,
|
1646 |
+
"loss": 1.7546,
|
1647 |
+
"step": 1170
|
1648 |
+
},
|
1649 |
+
{
|
1650 |
+
"epoch": 2.782711663706335,
|
1651 |
+
"grad_norm": 1.1907869577407837,
|
1652 |
+
"learning_rate": 6.347150982989159e-07,
|
1653 |
+
"loss": 1.7979,
|
1654 |
+
"step": 1175
|
1655 |
+
},
|
1656 |
+
{
|
1657 |
+
"epoch": 2.7945529899348727,
|
1658 |
+
"grad_norm": 1.3581962585449219,
|
1659 |
+
"learning_rate": 5.671400778931468e-07,
|
1660 |
+
"loss": 1.7874,
|
1661 |
+
"step": 1180
|
1662 |
+
},
|
1663 |
+
{
|
1664 |
+
"epoch": 2.80639431616341,
|
1665 |
+
"grad_norm": 1.0370872020721436,
|
1666 |
+
"learning_rate": 5.033263843554015e-07,
|
1667 |
+
"loss": 1.6197,
|
1668 |
+
"step": 1185
|
1669 |
+
},
|
1670 |
+
{
|
1671 |
+
"epoch": 2.818235642391948,
|
1672 |
+
"grad_norm": 1.2103910446166992,
|
1673 |
+
"learning_rate": 4.4328384151149095e-07,
|
1674 |
+
"loss": 1.5225,
|
1675 |
+
"step": 1190
|
1676 |
+
},
|
1677 |
+
{
|
1678 |
+
"epoch": 2.8300769686204856,
|
1679 |
+
"grad_norm": 1.4887464046478271,
|
1680 |
+
"learning_rate": 3.8702169263585554e-07,
|
1681 |
+
"loss": 1.9811,
|
1682 |
+
"step": 1195
|
1683 |
+
},
|
1684 |
+
{
|
1685 |
+
"epoch": 2.841918294849023,
|
1686 |
+
"grad_norm": 1.4641401767730713,
|
1687 |
+
"learning_rate": 3.345485990286029e-07,
|
1688 |
+
"loss": 1.9999,
|
1689 |
+
"step": 1200
|
1690 |
+
},
|
1691 |
+
{
|
1692 |
+
"epoch": 2.8537596210775606,
|
1693 |
+
"grad_norm": 1.529491901397705,
|
1694 |
+
"learning_rate": 2.8587263868213585e-07,
|
1695 |
+
"loss": 2.1399,
|
1696 |
+
"step": 1205
|
1697 |
+
},
|
1698 |
+
{
|
1699 |
+
"epoch": 2.865600947306098,
|
1700 |
+
"grad_norm": 1.5479165315628052,
|
1701 |
+
"learning_rate": 2.410013050375859e-07,
|
1702 |
+
"loss": 1.8664,
|
1703 |
+
"step": 1210
|
1704 |
+
},
|
1705 |
+
{
|
1706 |
+
"epoch": 2.877442273534636,
|
1707 |
+
"grad_norm": 1.1747933626174927,
|
1708 |
+
"learning_rate": 1.999415058312276e-07,
|
1709 |
+
"loss": 1.9633,
|
1710 |
+
"step": 1215
|
1711 |
+
},
|
1712 |
+
{
|
1713 |
+
"epoch": 2.8892835997631736,
|
1714 |
+
"grad_norm": 1.358820915222168,
|
1715 |
+
"learning_rate": 1.6269956203107117e-07,
|
1716 |
+
"loss": 2.0106,
|
1717 |
+
"step": 1220
|
1718 |
+
},
|
1719 |
+
{
|
1720 |
+
"epoch": 2.901124925991711,
|
1721 |
+
"grad_norm": 1.554287314414978,
|
1722 |
+
"learning_rate": 1.2928120686377388e-07,
|
1723 |
+
"loss": 1.7896,
|
1724 |
+
"step": 1225
|
1725 |
+
},
|
1726 |
+
{
|
1727 |
+
"epoch": 2.9129662522202486,
|
1728 |
+
"grad_norm": 1.5915781259536743,
|
1729 |
+
"learning_rate": 9.969158493204067e-08,
|
1730 |
+
"loss": 1.8759,
|
1731 |
+
"step": 1230
|
1732 |
+
},
|
1733 |
+
{
|
1734 |
+
"epoch": 2.924807578448786,
|
1735 |
+
"grad_norm": 1.6586300134658813,
|
1736 |
+
"learning_rate": 7.393525142262991e-08,
|
1737 |
+
"loss": 1.9413,
|
1738 |
+
"step": 1235
|
1739 |
+
},
|
1740 |
+
{
|
1741 |
+
"epoch": 2.936648904677324,
|
1742 |
+
"grad_norm": 1.416338562965393,
|
1743 |
+
"learning_rate": 5.2016171405103174e-08,
|
1744 |
+
"loss": 1.8404,
|
1745 |
+
"step": 1240
|
1746 |
+
},
|
1747 |
+
{
|
1748 |
+
"epoch": 2.9484902309058616,
|
1749 |
+
"grad_norm": 1.3145766258239746,
|
1750 |
+
"learning_rate": 3.393771922142741e-08,
|
1751 |
+
"loss": 2.0654,
|
1752 |
+
"step": 1245
|
1753 |
+
},
|
1754 |
+
{
|
1755 |
+
"epoch": 2.960331557134399,
|
1756 |
+
"grad_norm": 1.3440285921096802,
|
1757 |
+
"learning_rate": 1.9702677966507154e-08,
|
1758 |
+
"loss": 1.8111,
|
1759 |
+
"step": 1250
|
1760 |
+
},
|
1761 |
+
{
|
1762 |
+
"epoch": 2.9721728833629366,
|
1763 |
+
"grad_norm": 1.3247921466827393,
|
1764 |
+
"learning_rate": 9.31323905974113e-09,
|
1765 |
+
"loss": 1.8708,
|
1766 |
+
"step": 1255
|
1767 |
+
},
|
1768 |
+
{
|
1769 |
+
"epoch": 2.984014209591474,
|
1770 |
+
"grad_norm": 1.7957427501678467,
|
1771 |
+
"learning_rate": 2.771001907653226e-09,
|
1772 |
+
"loss": 1.8842,
|
1773 |
+
"step": 1260
|
1774 |
+
},
|
1775 |
+
{
|
1776 |
+
"epoch": 2.995855535820012,
|
1777 |
+
"grad_norm": 1.4069582223892212,
|
1778 |
+
"learning_rate": 7.697365768943864e-11,
|
1779 |
+
"loss": 1.9665,
|
1780 |
+
"step": 1265
|
1781 |
+
},
|
1782 |
+
{
|
1783 |
+
"epoch": 2.9982238010657194,
|
1784 |
+
"step": 1266,
|
1785 |
+
"total_flos": 6.019503790030848e+16,
|
1786 |
+
"train_loss": 1.9697479162170988,
|
1787 |
+
"train_runtime": 4932.4291,
|
1788 |
+
"train_samples_per_second": 4.109,
|
1789 |
+
"train_steps_per_second": 0.257
|
1790 |
+
}
|
1791 |
+
],
|
1792 |
+
"logging_steps": 5,
|
1793 |
+
"max_steps": 1266,
|
1794 |
+
"num_input_tokens_seen": 0,
|
1795 |
+
"num_train_epochs": 3,
|
1796 |
+
"save_steps": 100,
|
1797 |
+
"total_flos": 6.019503790030848e+16,
|
1798 |
+
"train_batch_size": 2,
|
1799 |
+
"trial_name": null,
|
1800 |
+
"trial_params": null
|
1801 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc5b4b5d8d6561d6afb3707121328f6240013c31041ab528cec3f7233d9e0d08
|
3 |
+
size 5112
|