abdulelahagr commited on
Commit
bf3d770
1 Parent(s): ac0f211

initial commit

Browse files
README.md CHANGED
@@ -1,3 +1,91 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ base_model: google/vit-base-patch16-224-in21k
4
+ tags:
5
+ - image-classification
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: vit-base-chest-xray
11
+ results: []
12
  ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # vit-base-chest-xray
18
+
19
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the trpakov/chest-xray-classification dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0856
22
+ - Accuracy: 0.9742
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 0.0002
42
+ - train_batch_size: 16
43
+ - eval_batch_size: 8
44
+ - seed: 42
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: linear
47
+ - num_epochs: 4
48
+ - mixed_precision_training: Native AMP
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
53
+ |:-------------:|:------:|:----:|:---------------:|:--------:|
54
+ | 0.1891 | 0.1307 | 100 | 0.1028 | 0.9665 |
55
+ | 0.2123 | 0.2614 | 200 | 0.1254 | 0.9562 |
56
+ | 0.0536 | 0.3922 | 300 | 0.1142 | 0.9691 |
57
+ | 0.0799 | 0.5229 | 400 | 0.1173 | 0.9648 |
58
+ | 0.0537 | 0.6536 | 500 | 0.0856 | 0.9742 |
59
+ | 0.0911 | 0.7843 | 600 | 0.2005 | 0.9425 |
60
+ | 0.1027 | 0.9150 | 700 | 0.0869 | 0.9708 |
61
+ | 0.1011 | 1.0458 | 800 | 0.1063 | 0.9631 |
62
+ | 0.0717 | 1.1765 | 900 | 0.1424 | 0.9588 |
63
+ | 0.0605 | 1.3072 | 1000 | 0.1525 | 0.9648 |
64
+ | 0.0573 | 1.4379 | 1100 | 0.0970 | 0.9700 |
65
+ | 0.024 | 1.5686 | 1200 | 0.0867 | 0.9751 |
66
+ | 0.0056 | 1.6993 | 1300 | 0.0888 | 0.9760 |
67
+ | 0.0051 | 1.8301 | 1400 | 0.1054 | 0.9768 |
68
+ | 0.063 | 1.9608 | 1500 | 0.1896 | 0.9571 |
69
+ | 0.002 | 2.0915 | 1600 | 0.1886 | 0.9588 |
70
+ | 0.005 | 2.2222 | 1700 | 0.1184 | 0.9734 |
71
+ | 0.0083 | 2.3529 | 1800 | 0.1084 | 0.9760 |
72
+ | 0.0013 | 2.4837 | 1900 | 0.0903 | 0.9777 |
73
+ | 0.0298 | 2.6144 | 2000 | 0.1023 | 0.9734 |
74
+ | 0.0008 | 2.7451 | 2100 | 0.1104 | 0.9768 |
75
+ | 0.0011 | 2.8758 | 2200 | 0.1128 | 0.9785 |
76
+ | 0.0006 | 3.0065 | 2300 | 0.1395 | 0.9734 |
77
+ | 0.0059 | 3.1373 | 2400 | 0.1419 | 0.9725 |
78
+ | 0.0005 | 3.2680 | 2500 | 0.1335 | 0.9777 |
79
+ | 0.0005 | 3.3987 | 2600 | 0.1249 | 0.9768 |
80
+ | 0.0007 | 3.5294 | 2700 | 0.1157 | 0.9777 |
81
+ | 0.0005 | 3.6601 | 2800 | 0.1202 | 0.9785 |
82
+ | 0.001 | 3.7908 | 2900 | 0.1239 | 0.9777 |
83
+ | 0.0004 | 3.9216 | 3000 | 0.1231 | 0.9768 |
84
+
85
+
86
+ ### Framework versions
87
+
88
+ - Transformers 4.40.0
89
+ - Pytorch 2.2.1+cu121
90
+ - Datasets 2.19.0
91
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9742489270386266,
4
+ "eval_loss": 0.08559587597846985,
5
+ "eval_runtime": 15.5649,
6
+ "eval_samples_per_second": 74.848,
7
+ "eval_steps_per_second": 9.38,
8
+ "total_flos": 3.7909081319458406e+18,
9
+ "train_loss": 0.047429592626662374,
10
+ "train_runtime": 1590.6048,
11
+ "train_samples_per_second": 30.756,
12
+ "train_steps_per_second": 1.924
13
+ }
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "PNEUMONIA",
13
+ "1": "NORMAL"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "NORMAL": "1",
20
+ "PNEUMONIA": "0"
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.40.0"
32
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9742489270386266,
4
+ "eval_loss": 0.08559587597846985,
5
+ "eval_runtime": 15.5649,
6
+ "eval_samples_per_second": 74.848,
7
+ "eval_steps_per_second": 9.38
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9bcb42bee7381aa20ffba641ad5e9ae2b83011862860f9bc5729090cdcefae1
3
+ size 343223968
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
runs/Apr22_18-43-01_7118ffc10d0b/events.out.tfevents.1713811381.7118ffc10d0b.14620.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a718059f0a2c96b3fc39481dec5dc4ff8cfe0e9bb7e19d0bdd9dd58078fa10a1
3
+ size 79238
runs/Apr22_18-43-01_7118ffc10d0b/events.out.tfevents.1713812995.7118ffc10d0b.14620.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae1b2bae53d20332f2c057b673006991ff3c5a6a35d6b84d71708b4c6a7a1301
3
+ size 411
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "total_flos": 3.7909081319458406e+18,
4
+ "train_loss": 0.047429592626662374,
5
+ "train_runtime": 1590.6048,
6
+ "train_samples_per_second": 30.756,
7
+ "train_steps_per_second": 1.924
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,2442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.08559587597846985,
3
+ "best_model_checkpoint": "./vit-base-chest-xray/checkpoint-500",
4
+ "epoch": 4.0,
5
+ "eval_steps": 100,
6
+ "global_step": 3060,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.013071895424836602,
13
+ "grad_norm": 1.1339586973190308,
14
+ "learning_rate": 0.0001993464052287582,
15
+ "loss": 0.4681,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.026143790849673203,
20
+ "grad_norm": 3.4900288581848145,
21
+ "learning_rate": 0.00019869281045751635,
22
+ "loss": 0.2909,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.0392156862745098,
27
+ "grad_norm": 4.287875652313232,
28
+ "learning_rate": 0.00019803921568627454,
29
+ "loss": 0.2672,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.05228758169934641,
34
+ "grad_norm": 0.31267789006233215,
35
+ "learning_rate": 0.0001973856209150327,
36
+ "loss": 0.1288,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.06535947712418301,
41
+ "grad_norm": 0.20445282757282257,
42
+ "learning_rate": 0.00019673202614379085,
43
+ "loss": 0.2329,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.0784313725490196,
48
+ "grad_norm": 1.763299584388733,
49
+ "learning_rate": 0.000196078431372549,
50
+ "loss": 0.1666,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.0915032679738562,
55
+ "grad_norm": 4.610985279083252,
56
+ "learning_rate": 0.0001954248366013072,
57
+ "loss": 0.1897,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.10457516339869281,
62
+ "grad_norm": 0.2082081139087677,
63
+ "learning_rate": 0.00019477124183006535,
64
+ "loss": 0.1638,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.11764705882352941,
69
+ "grad_norm": 2.0618653297424316,
70
+ "learning_rate": 0.00019411764705882354,
71
+ "loss": 0.1161,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.13071895424836602,
76
+ "grad_norm": 2.996493101119995,
77
+ "learning_rate": 0.0001934640522875817,
78
+ "loss": 0.1891,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.13071895424836602,
83
+ "eval_accuracy": 0.9665236051502146,
84
+ "eval_loss": 0.10277536511421204,
85
+ "eval_runtime": 24.9127,
86
+ "eval_samples_per_second": 46.763,
87
+ "eval_steps_per_second": 5.86,
88
+ "step": 100
89
+ },
90
+ {
91
+ "epoch": 0.1437908496732026,
92
+ "grad_norm": 1.4748905897140503,
93
+ "learning_rate": 0.00019281045751633988,
94
+ "loss": 0.181,
95
+ "step": 110
96
+ },
97
+ {
98
+ "epoch": 0.1568627450980392,
99
+ "grad_norm": 2.811959743499756,
100
+ "learning_rate": 0.00019215686274509807,
101
+ "loss": 0.2343,
102
+ "step": 120
103
+ },
104
+ {
105
+ "epoch": 0.16993464052287582,
106
+ "grad_norm": 1.9155172109603882,
107
+ "learning_rate": 0.00019150326797385623,
108
+ "loss": 0.1792,
109
+ "step": 130
110
+ },
111
+ {
112
+ "epoch": 0.1830065359477124,
113
+ "grad_norm": 0.3125442564487457,
114
+ "learning_rate": 0.0001908496732026144,
115
+ "loss": 0.1898,
116
+ "step": 140
117
+ },
118
+ {
119
+ "epoch": 0.19607843137254902,
120
+ "grad_norm": 0.10497033596038818,
121
+ "learning_rate": 0.00019019607843137254,
122
+ "loss": 0.0737,
123
+ "step": 150
124
+ },
125
+ {
126
+ "epoch": 0.20915032679738563,
127
+ "grad_norm": 0.12402518093585968,
128
+ "learning_rate": 0.00018954248366013073,
129
+ "loss": 0.0448,
130
+ "step": 160
131
+ },
132
+ {
133
+ "epoch": 0.2222222222222222,
134
+ "grad_norm": 3.1576356887817383,
135
+ "learning_rate": 0.00018888888888888888,
136
+ "loss": 0.2302,
137
+ "step": 170
138
+ },
139
+ {
140
+ "epoch": 0.23529411764705882,
141
+ "grad_norm": 0.15159478783607483,
142
+ "learning_rate": 0.00018823529411764707,
143
+ "loss": 0.094,
144
+ "step": 180
145
+ },
146
+ {
147
+ "epoch": 0.24836601307189543,
148
+ "grad_norm": 0.0732378214597702,
149
+ "learning_rate": 0.00018758169934640523,
150
+ "loss": 0.1228,
151
+ "step": 190
152
+ },
153
+ {
154
+ "epoch": 0.26143790849673204,
155
+ "grad_norm": 3.1713297367095947,
156
+ "learning_rate": 0.0001869281045751634,
157
+ "loss": 0.2123,
158
+ "step": 200
159
+ },
160
+ {
161
+ "epoch": 0.26143790849673204,
162
+ "eval_accuracy": 0.9562231759656652,
163
+ "eval_loss": 0.12540382146835327,
164
+ "eval_runtime": 15.5185,
165
+ "eval_samples_per_second": 75.072,
166
+ "eval_steps_per_second": 9.408,
167
+ "step": 200
168
+ },
169
+ {
170
+ "epoch": 0.27450980392156865,
171
+ "grad_norm": 0.42199578881263733,
172
+ "learning_rate": 0.00018633986928104577,
173
+ "loss": 0.1039,
174
+ "step": 210
175
+ },
176
+ {
177
+ "epoch": 0.2875816993464052,
178
+ "grad_norm": 1.0121151208877563,
179
+ "learning_rate": 0.00018568627450980392,
180
+ "loss": 0.146,
181
+ "step": 220
182
+ },
183
+ {
184
+ "epoch": 0.3006535947712418,
185
+ "grad_norm": 0.16740572452545166,
186
+ "learning_rate": 0.0001850326797385621,
187
+ "loss": 0.1232,
188
+ "step": 230
189
+ },
190
+ {
191
+ "epoch": 0.3137254901960784,
192
+ "grad_norm": 0.7064481973648071,
193
+ "learning_rate": 0.0001843790849673203,
194
+ "loss": 0.2105,
195
+ "step": 240
196
+ },
197
+ {
198
+ "epoch": 0.32679738562091504,
199
+ "grad_norm": 0.23291558027267456,
200
+ "learning_rate": 0.00018372549019607842,
201
+ "loss": 0.1186,
202
+ "step": 250
203
+ },
204
+ {
205
+ "epoch": 0.33986928104575165,
206
+ "grad_norm": 0.3717576861381531,
207
+ "learning_rate": 0.0001830718954248366,
208
+ "loss": 0.1671,
209
+ "step": 260
210
+ },
211
+ {
212
+ "epoch": 0.35294117647058826,
213
+ "grad_norm": 2.2546300888061523,
214
+ "learning_rate": 0.00018241830065359477,
215
+ "loss": 0.1799,
216
+ "step": 270
217
+ },
218
+ {
219
+ "epoch": 0.3660130718954248,
220
+ "grad_norm": 1.6236902475357056,
221
+ "learning_rate": 0.00018176470588235295,
222
+ "loss": 0.5579,
223
+ "step": 280
224
+ },
225
+ {
226
+ "epoch": 0.3790849673202614,
227
+ "grad_norm": 0.9936646819114685,
228
+ "learning_rate": 0.0001811111111111111,
229
+ "loss": 0.1809,
230
+ "step": 290
231
+ },
232
+ {
233
+ "epoch": 0.39215686274509803,
234
+ "grad_norm": 4.444874286651611,
235
+ "learning_rate": 0.0001804575163398693,
236
+ "loss": 0.0536,
237
+ "step": 300
238
+ },
239
+ {
240
+ "epoch": 0.39215686274509803,
241
+ "eval_accuracy": 0.9690987124463519,
242
+ "eval_loss": 0.11418166011571884,
243
+ "eval_runtime": 14.394,
244
+ "eval_samples_per_second": 80.937,
245
+ "eval_steps_per_second": 10.143,
246
+ "step": 300
247
+ },
248
+ {
249
+ "epoch": 0.40522875816993464,
250
+ "grad_norm": 0.05227786302566528,
251
+ "learning_rate": 0.00017980392156862745,
252
+ "loss": 0.0896,
253
+ "step": 310
254
+ },
255
+ {
256
+ "epoch": 0.41830065359477125,
257
+ "grad_norm": 0.06666386872529984,
258
+ "learning_rate": 0.00017915032679738564,
259
+ "loss": 0.0992,
260
+ "step": 320
261
+ },
262
+ {
263
+ "epoch": 0.43137254901960786,
264
+ "grad_norm": 0.2057139277458191,
265
+ "learning_rate": 0.0001784967320261438,
266
+ "loss": 0.1225,
267
+ "step": 330
268
+ },
269
+ {
270
+ "epoch": 0.4444444444444444,
271
+ "grad_norm": 1.088228702545166,
272
+ "learning_rate": 0.00017784313725490195,
273
+ "loss": 0.0892,
274
+ "step": 340
275
+ },
276
+ {
277
+ "epoch": 0.45751633986928103,
278
+ "grad_norm": 0.7964250445365906,
279
+ "learning_rate": 0.00017718954248366014,
280
+ "loss": 0.1447,
281
+ "step": 350
282
+ },
283
+ {
284
+ "epoch": 0.47058823529411764,
285
+ "grad_norm": 0.7459284663200378,
286
+ "learning_rate": 0.0001765359477124183,
287
+ "loss": 0.1589,
288
+ "step": 360
289
+ },
290
+ {
291
+ "epoch": 0.48366013071895425,
292
+ "grad_norm": 0.40795373916625977,
293
+ "learning_rate": 0.00017588235294117648,
294
+ "loss": 0.2556,
295
+ "step": 370
296
+ },
297
+ {
298
+ "epoch": 0.49673202614379086,
299
+ "grad_norm": 2.7890219688415527,
300
+ "learning_rate": 0.00017522875816993464,
301
+ "loss": 0.1933,
302
+ "step": 380
303
+ },
304
+ {
305
+ "epoch": 0.5098039215686274,
306
+ "grad_norm": 1.0104824304580688,
307
+ "learning_rate": 0.00017457516339869283,
308
+ "loss": 0.1709,
309
+ "step": 390
310
+ },
311
+ {
312
+ "epoch": 0.5228758169934641,
313
+ "grad_norm": 0.8147939443588257,
314
+ "learning_rate": 0.00017392156862745098,
315
+ "loss": 0.0799,
316
+ "step": 400
317
+ },
318
+ {
319
+ "epoch": 0.5228758169934641,
320
+ "eval_accuracy": 0.9648068669527897,
321
+ "eval_loss": 0.1173361986875534,
322
+ "eval_runtime": 14.4858,
323
+ "eval_samples_per_second": 80.423,
324
+ "eval_steps_per_second": 10.079,
325
+ "step": 400
326
+ },
327
+ {
328
+ "epoch": 0.5359477124183006,
329
+ "grad_norm": 0.25372856855392456,
330
+ "learning_rate": 0.00017326797385620917,
331
+ "loss": 0.1718,
332
+ "step": 410
333
+ },
334
+ {
335
+ "epoch": 0.5490196078431373,
336
+ "grad_norm": 0.6395456194877625,
337
+ "learning_rate": 0.00017261437908496733,
338
+ "loss": 0.1003,
339
+ "step": 420
340
+ },
341
+ {
342
+ "epoch": 0.5620915032679739,
343
+ "grad_norm": 1.1706054210662842,
344
+ "learning_rate": 0.0001719607843137255,
345
+ "loss": 0.2023,
346
+ "step": 430
347
+ },
348
+ {
349
+ "epoch": 0.5751633986928104,
350
+ "grad_norm": 1.8857362270355225,
351
+ "learning_rate": 0.00017130718954248367,
352
+ "loss": 0.1288,
353
+ "step": 440
354
+ },
355
+ {
356
+ "epoch": 0.5882352941176471,
357
+ "grad_norm": 0.07465291023254395,
358
+ "learning_rate": 0.00017065359477124183,
359
+ "loss": 0.0458,
360
+ "step": 450
361
+ },
362
+ {
363
+ "epoch": 0.6013071895424836,
364
+ "grad_norm": 0.05449380353093147,
365
+ "learning_rate": 0.00017,
366
+ "loss": 0.112,
367
+ "step": 460
368
+ },
369
+ {
370
+ "epoch": 0.6143790849673203,
371
+ "grad_norm": 2.8383517265319824,
372
+ "learning_rate": 0.00016934640522875817,
373
+ "loss": 0.1434,
374
+ "step": 470
375
+ },
376
+ {
377
+ "epoch": 0.6274509803921569,
378
+ "grad_norm": 2.2419204711914062,
379
+ "learning_rate": 0.00016869281045751636,
380
+ "loss": 0.1108,
381
+ "step": 480
382
+ },
383
+ {
384
+ "epoch": 0.6405228758169934,
385
+ "grad_norm": 1.1179159879684448,
386
+ "learning_rate": 0.0001680392156862745,
387
+ "loss": 0.0392,
388
+ "step": 490
389
+ },
390
+ {
391
+ "epoch": 0.6535947712418301,
392
+ "grad_norm": 1.6863864660263062,
393
+ "learning_rate": 0.0001673856209150327,
394
+ "loss": 0.0537,
395
+ "step": 500
396
+ },
397
+ {
398
+ "epoch": 0.6535947712418301,
399
+ "eval_accuracy": 0.9742489270386266,
400
+ "eval_loss": 0.08559587597846985,
401
+ "eval_runtime": 15.4,
402
+ "eval_samples_per_second": 75.649,
403
+ "eval_steps_per_second": 9.481,
404
+ "step": 500
405
+ },
406
+ {
407
+ "epoch": 0.6666666666666666,
408
+ "grad_norm": 2.383035659790039,
409
+ "learning_rate": 0.00016673202614379086,
410
+ "loss": 0.2613,
411
+ "step": 510
412
+ },
413
+ {
414
+ "epoch": 0.6797385620915033,
415
+ "grad_norm": 0.652946412563324,
416
+ "learning_rate": 0.00016607843137254904,
417
+ "loss": 0.1499,
418
+ "step": 520
419
+ },
420
+ {
421
+ "epoch": 0.6928104575163399,
422
+ "grad_norm": 1.1430766582489014,
423
+ "learning_rate": 0.0001654248366013072,
424
+ "loss": 0.082,
425
+ "step": 530
426
+ },
427
+ {
428
+ "epoch": 0.7058823529411765,
429
+ "grad_norm": 0.6167986989021301,
430
+ "learning_rate": 0.00016477124183006536,
431
+ "loss": 0.017,
432
+ "step": 540
433
+ },
434
+ {
435
+ "epoch": 0.7189542483660131,
436
+ "grad_norm": 0.06140501797199249,
437
+ "learning_rate": 0.00016411764705882354,
438
+ "loss": 0.0802,
439
+ "step": 550
440
+ },
441
+ {
442
+ "epoch": 0.7320261437908496,
443
+ "grad_norm": 0.047364696860313416,
444
+ "learning_rate": 0.0001634640522875817,
445
+ "loss": 0.1183,
446
+ "step": 560
447
+ },
448
+ {
449
+ "epoch": 0.7450980392156863,
450
+ "grad_norm": 0.4764581024646759,
451
+ "learning_rate": 0.00016281045751633989,
452
+ "loss": 0.0451,
453
+ "step": 570
454
+ },
455
+ {
456
+ "epoch": 0.7581699346405228,
457
+ "grad_norm": 0.05738784372806549,
458
+ "learning_rate": 0.00016215686274509804,
459
+ "loss": 0.0137,
460
+ "step": 580
461
+ },
462
+ {
463
+ "epoch": 0.7712418300653595,
464
+ "grad_norm": 2.4237992763519287,
465
+ "learning_rate": 0.00016150326797385623,
466
+ "loss": 0.0707,
467
+ "step": 590
468
+ },
469
+ {
470
+ "epoch": 0.7843137254901961,
471
+ "grad_norm": 0.10018886625766754,
472
+ "learning_rate": 0.00016084967320261439,
473
+ "loss": 0.0911,
474
+ "step": 600
475
+ },
476
+ {
477
+ "epoch": 0.7843137254901961,
478
+ "eval_accuracy": 0.9424892703862661,
479
+ "eval_loss": 0.20054155588150024,
480
+ "eval_runtime": 14.614,
481
+ "eval_samples_per_second": 79.718,
482
+ "eval_steps_per_second": 9.99,
483
+ "step": 600
484
+ },
485
+ {
486
+ "epoch": 0.7973856209150327,
487
+ "grad_norm": 0.5596518516540527,
488
+ "learning_rate": 0.00016019607843137257,
489
+ "loss": 0.0513,
490
+ "step": 610
491
+ },
492
+ {
493
+ "epoch": 0.8104575163398693,
494
+ "grad_norm": 0.052388403564691544,
495
+ "learning_rate": 0.00015954248366013073,
496
+ "loss": 0.0463,
497
+ "step": 620
498
+ },
499
+ {
500
+ "epoch": 0.8235294117647058,
501
+ "grad_norm": 0.567359983921051,
502
+ "learning_rate": 0.0001588888888888889,
503
+ "loss": 0.0966,
504
+ "step": 630
505
+ },
506
+ {
507
+ "epoch": 0.8366013071895425,
508
+ "grad_norm": 2.3820440769195557,
509
+ "learning_rate": 0.00015823529411764707,
510
+ "loss": 0.0887,
511
+ "step": 640
512
+ },
513
+ {
514
+ "epoch": 0.8496732026143791,
515
+ "grad_norm": 0.05958991125226021,
516
+ "learning_rate": 0.00015758169934640523,
517
+ "loss": 0.1156,
518
+ "step": 650
519
+ },
520
+ {
521
+ "epoch": 0.8627450980392157,
522
+ "grad_norm": 0.09057465195655823,
523
+ "learning_rate": 0.00015692810457516342,
524
+ "loss": 0.0545,
525
+ "step": 660
526
+ },
527
+ {
528
+ "epoch": 0.8758169934640523,
529
+ "grad_norm": 0.6337572932243347,
530
+ "learning_rate": 0.00015627450980392157,
531
+ "loss": 0.0771,
532
+ "step": 670
533
+ },
534
+ {
535
+ "epoch": 0.8888888888888888,
536
+ "grad_norm": 0.7565958499908447,
537
+ "learning_rate": 0.00015562091503267976,
538
+ "loss": 0.1179,
539
+ "step": 680
540
+ },
541
+ {
542
+ "epoch": 0.9019607843137255,
543
+ "grad_norm": 3.6693177223205566,
544
+ "learning_rate": 0.00015496732026143792,
545
+ "loss": 0.2026,
546
+ "step": 690
547
+ },
548
+ {
549
+ "epoch": 0.9150326797385621,
550
+ "grad_norm": 0.13483676314353943,
551
+ "learning_rate": 0.0001543137254901961,
552
+ "loss": 0.1027,
553
+ "step": 700
554
+ },
555
+ {
556
+ "epoch": 0.9150326797385621,
557
+ "eval_accuracy": 0.9708154506437768,
558
+ "eval_loss": 0.08694365620613098,
559
+ "eval_runtime": 14.2568,
560
+ "eval_samples_per_second": 81.715,
561
+ "eval_steps_per_second": 10.241,
562
+ "step": 700
563
+ },
564
+ {
565
+ "epoch": 0.9281045751633987,
566
+ "grad_norm": 0.8454955220222473,
567
+ "learning_rate": 0.00015366013071895426,
568
+ "loss": 0.0757,
569
+ "step": 710
570
+ },
571
+ {
572
+ "epoch": 0.9411764705882353,
573
+ "grad_norm": 4.228348255157471,
574
+ "learning_rate": 0.00015300653594771242,
575
+ "loss": 0.0729,
576
+ "step": 720
577
+ },
578
+ {
579
+ "epoch": 0.954248366013072,
580
+ "grad_norm": 0.35448652505874634,
581
+ "learning_rate": 0.00015235294117647057,
582
+ "loss": 0.0605,
583
+ "step": 730
584
+ },
585
+ {
586
+ "epoch": 0.9673202614379085,
587
+ "grad_norm": 0.04271356761455536,
588
+ "learning_rate": 0.00015169934640522876,
589
+ "loss": 0.0747,
590
+ "step": 740
591
+ },
592
+ {
593
+ "epoch": 0.9803921568627451,
594
+ "grad_norm": 0.11471935361623764,
595
+ "learning_rate": 0.00015104575163398694,
596
+ "loss": 0.0345,
597
+ "step": 750
598
+ },
599
+ {
600
+ "epoch": 0.9934640522875817,
601
+ "grad_norm": 0.3417186439037323,
602
+ "learning_rate": 0.0001503921568627451,
603
+ "loss": 0.1179,
604
+ "step": 760
605
+ },
606
+ {
607
+ "epoch": 1.0065359477124183,
608
+ "grad_norm": 0.07955777645111084,
609
+ "learning_rate": 0.0001497385620915033,
610
+ "loss": 0.0091,
611
+ "step": 770
612
+ },
613
+ {
614
+ "epoch": 1.0196078431372548,
615
+ "grad_norm": 0.07078742980957031,
616
+ "learning_rate": 0.00014908496732026145,
617
+ "loss": 0.0229,
618
+ "step": 780
619
+ },
620
+ {
621
+ "epoch": 1.0326797385620916,
622
+ "grad_norm": 0.11356745660305023,
623
+ "learning_rate": 0.00014843137254901963,
624
+ "loss": 0.0351,
625
+ "step": 790
626
+ },
627
+ {
628
+ "epoch": 1.0457516339869282,
629
+ "grad_norm": 1.0611521005630493,
630
+ "learning_rate": 0.0001477777777777778,
631
+ "loss": 0.1011,
632
+ "step": 800
633
+ },
634
+ {
635
+ "epoch": 1.0457516339869282,
636
+ "eval_accuracy": 0.9630901287553648,
637
+ "eval_loss": 0.10628381371498108,
638
+ "eval_runtime": 14.4937,
639
+ "eval_samples_per_second": 80.38,
640
+ "eval_steps_per_second": 10.073,
641
+ "step": 800
642
+ },
643
+ {
644
+ "epoch": 1.0588235294117647,
645
+ "grad_norm": 0.5016999244689941,
646
+ "learning_rate": 0.00014712418300653597,
647
+ "loss": 0.0779,
648
+ "step": 810
649
+ },
650
+ {
651
+ "epoch": 1.0718954248366013,
652
+ "grad_norm": 0.2654229402542114,
653
+ "learning_rate": 0.0001464705882352941,
654
+ "loss": 0.1233,
655
+ "step": 820
656
+ },
657
+ {
658
+ "epoch": 1.0849673202614378,
659
+ "grad_norm": 0.16079501807689667,
660
+ "learning_rate": 0.0001458169934640523,
661
+ "loss": 0.07,
662
+ "step": 830
663
+ },
664
+ {
665
+ "epoch": 1.0980392156862746,
666
+ "grad_norm": 0.08982982486486435,
667
+ "learning_rate": 0.00014516339869281045,
668
+ "loss": 0.0525,
669
+ "step": 840
670
+ },
671
+ {
672
+ "epoch": 1.1111111111111112,
673
+ "grad_norm": 0.029190940782427788,
674
+ "learning_rate": 0.00014450980392156863,
675
+ "loss": 0.0844,
676
+ "step": 850
677
+ },
678
+ {
679
+ "epoch": 1.1241830065359477,
680
+ "grad_norm": 0.035515930503606796,
681
+ "learning_rate": 0.0001438562091503268,
682
+ "loss": 0.0231,
683
+ "step": 860
684
+ },
685
+ {
686
+ "epoch": 1.1372549019607843,
687
+ "grad_norm": 0.19514258205890656,
688
+ "learning_rate": 0.00014320261437908498,
689
+ "loss": 0.0435,
690
+ "step": 870
691
+ },
692
+ {
693
+ "epoch": 1.1503267973856208,
694
+ "grad_norm": 0.04219866171479225,
695
+ "learning_rate": 0.00014254901960784316,
696
+ "loss": 0.0517,
697
+ "step": 880
698
+ },
699
+ {
700
+ "epoch": 1.1633986928104576,
701
+ "grad_norm": 0.0360812209546566,
702
+ "learning_rate": 0.00014189542483660132,
703
+ "loss": 0.0311,
704
+ "step": 890
705
+ },
706
+ {
707
+ "epoch": 1.1764705882352942,
708
+ "grad_norm": 0.4207318425178528,
709
+ "learning_rate": 0.0001412418300653595,
710
+ "loss": 0.0717,
711
+ "step": 900
712
+ },
713
+ {
714
+ "epoch": 1.1764705882352942,
715
+ "eval_accuracy": 0.9587982832618026,
716
+ "eval_loss": 0.14242781698703766,
717
+ "eval_runtime": 15.8491,
718
+ "eval_samples_per_second": 73.506,
719
+ "eval_steps_per_second": 9.212,
720
+ "step": 900
721
+ },
722
+ {
723
+ "epoch": 1.1895424836601307,
724
+ "grad_norm": 2.1643173694610596,
725
+ "learning_rate": 0.00014058823529411763,
726
+ "loss": 0.0269,
727
+ "step": 910
728
+ },
729
+ {
730
+ "epoch": 1.2026143790849673,
731
+ "grad_norm": 0.03318187966942787,
732
+ "learning_rate": 0.00013993464052287582,
733
+ "loss": 0.0272,
734
+ "step": 920
735
+ },
736
+ {
737
+ "epoch": 1.215686274509804,
738
+ "grad_norm": 0.7758973240852356,
739
+ "learning_rate": 0.00013928104575163398,
740
+ "loss": 0.0633,
741
+ "step": 930
742
+ },
743
+ {
744
+ "epoch": 1.2287581699346406,
745
+ "grad_norm": 1.6854296922683716,
746
+ "learning_rate": 0.00013862745098039216,
747
+ "loss": 0.0678,
748
+ "step": 940
749
+ },
750
+ {
751
+ "epoch": 1.2418300653594772,
752
+ "grad_norm": 3.3588085174560547,
753
+ "learning_rate": 0.00013797385620915032,
754
+ "loss": 0.1088,
755
+ "step": 950
756
+ },
757
+ {
758
+ "epoch": 1.2549019607843137,
759
+ "grad_norm": 0.2561059892177582,
760
+ "learning_rate": 0.0001373202614379085,
761
+ "loss": 0.0657,
762
+ "step": 960
763
+ },
764
+ {
765
+ "epoch": 1.2679738562091503,
766
+ "grad_norm": 1.3019442558288574,
767
+ "learning_rate": 0.00013666666666666666,
768
+ "loss": 0.0405,
769
+ "step": 970
770
+ },
771
+ {
772
+ "epoch": 1.2810457516339868,
773
+ "grad_norm": 0.23876748979091644,
774
+ "learning_rate": 0.00013601307189542485,
775
+ "loss": 0.0277,
776
+ "step": 980
777
+ },
778
+ {
779
+ "epoch": 1.2941176470588236,
780
+ "grad_norm": 0.21840398013591766,
781
+ "learning_rate": 0.00013535947712418303,
782
+ "loss": 0.0356,
783
+ "step": 990
784
+ },
785
+ {
786
+ "epoch": 1.3071895424836601,
787
+ "grad_norm": 0.05377864092588425,
788
+ "learning_rate": 0.0001347058823529412,
789
+ "loss": 0.0605,
790
+ "step": 1000
791
+ },
792
+ {
793
+ "epoch": 1.3071895424836601,
794
+ "eval_accuracy": 0.9648068669527897,
795
+ "eval_loss": 0.1524645835161209,
796
+ "eval_runtime": 14.9355,
797
+ "eval_samples_per_second": 78.002,
798
+ "eval_steps_per_second": 9.775,
799
+ "step": 1000
800
+ },
801
+ {
802
+ "epoch": 1.3202614379084967,
803
+ "grad_norm": 0.13815192878246307,
804
+ "learning_rate": 0.00013405228758169935,
805
+ "loss": 0.0715,
806
+ "step": 1010
807
+ },
808
+ {
809
+ "epoch": 1.3333333333333333,
810
+ "grad_norm": 3.1915783882141113,
811
+ "learning_rate": 0.0001333986928104575,
812
+ "loss": 0.0762,
813
+ "step": 1020
814
+ },
815
+ {
816
+ "epoch": 1.34640522875817,
817
+ "grad_norm": 0.12432447075843811,
818
+ "learning_rate": 0.0001327450980392157,
819
+ "loss": 0.0514,
820
+ "step": 1030
821
+ },
822
+ {
823
+ "epoch": 1.3594771241830066,
824
+ "grad_norm": 0.029387619346380234,
825
+ "learning_rate": 0.00013209150326797385,
826
+ "loss": 0.0094,
827
+ "step": 1040
828
+ },
829
+ {
830
+ "epoch": 1.3725490196078431,
831
+ "grad_norm": 0.02788469009101391,
832
+ "learning_rate": 0.00013143790849673204,
833
+ "loss": 0.0128,
834
+ "step": 1050
835
+ },
836
+ {
837
+ "epoch": 1.3856209150326797,
838
+ "grad_norm": 0.38828742504119873,
839
+ "learning_rate": 0.0001307843137254902,
840
+ "loss": 0.1181,
841
+ "step": 1060
842
+ },
843
+ {
844
+ "epoch": 1.3986928104575163,
845
+ "grad_norm": 0.029526783153414726,
846
+ "learning_rate": 0.00013013071895424838,
847
+ "loss": 0.0701,
848
+ "step": 1070
849
+ },
850
+ {
851
+ "epoch": 1.4117647058823528,
852
+ "grad_norm": 8.863903999328613,
853
+ "learning_rate": 0.00012947712418300654,
854
+ "loss": 0.0349,
855
+ "step": 1080
856
+ },
857
+ {
858
+ "epoch": 1.4248366013071896,
859
+ "grad_norm": 0.05002022534608841,
860
+ "learning_rate": 0.00012882352941176472,
861
+ "loss": 0.0111,
862
+ "step": 1090
863
+ },
864
+ {
865
+ "epoch": 1.4379084967320261,
866
+ "grad_norm": 0.04968211427330971,
867
+ "learning_rate": 0.00012816993464052288,
868
+ "loss": 0.0573,
869
+ "step": 1100
870
+ },
871
+ {
872
+ "epoch": 1.4379084967320261,
873
+ "eval_accuracy": 0.9699570815450643,
874
+ "eval_loss": 0.09700144827365875,
875
+ "eval_runtime": 14.9347,
876
+ "eval_samples_per_second": 78.006,
877
+ "eval_steps_per_second": 9.776,
878
+ "step": 1100
879
+ },
880
+ {
881
+ "epoch": 1.4509803921568627,
882
+ "grad_norm": 0.021018337458372116,
883
+ "learning_rate": 0.00012751633986928104,
884
+ "loss": 0.0338,
885
+ "step": 1110
886
+ },
887
+ {
888
+ "epoch": 1.4640522875816995,
889
+ "grad_norm": 0.05960996448993683,
890
+ "learning_rate": 0.00012686274509803922,
891
+ "loss": 0.0577,
892
+ "step": 1120
893
+ },
894
+ {
895
+ "epoch": 1.477124183006536,
896
+ "grad_norm": 0.02618427947163582,
897
+ "learning_rate": 0.00012620915032679738,
898
+ "loss": 0.055,
899
+ "step": 1130
900
+ },
901
+ {
902
+ "epoch": 1.4901960784313726,
903
+ "grad_norm": 0.05656788498163223,
904
+ "learning_rate": 0.00012555555555555557,
905
+ "loss": 0.1561,
906
+ "step": 1140
907
+ },
908
+ {
909
+ "epoch": 1.5032679738562091,
910
+ "grad_norm": 0.046731386333703995,
911
+ "learning_rate": 0.00012490196078431372,
912
+ "loss": 0.0314,
913
+ "step": 1150
914
+ },
915
+ {
916
+ "epoch": 1.5163398692810457,
917
+ "grad_norm": 0.06606756895780563,
918
+ "learning_rate": 0.0001242483660130719,
919
+ "loss": 0.0359,
920
+ "step": 1160
921
+ },
922
+ {
923
+ "epoch": 1.5294117647058822,
924
+ "grad_norm": 0.065465547144413,
925
+ "learning_rate": 0.00012359477124183007,
926
+ "loss": 0.0312,
927
+ "step": 1170
928
+ },
929
+ {
930
+ "epoch": 1.5424836601307188,
931
+ "grad_norm": 0.05767370015382767,
932
+ "learning_rate": 0.00012294117647058825,
933
+ "loss": 0.0548,
934
+ "step": 1180
935
+ },
936
+ {
937
+ "epoch": 1.5555555555555556,
938
+ "grad_norm": 0.02790243923664093,
939
+ "learning_rate": 0.0001222875816993464,
940
+ "loss": 0.0483,
941
+ "step": 1190
942
+ },
943
+ {
944
+ "epoch": 1.5686274509803921,
945
+ "grad_norm": 0.03534754365682602,
946
+ "learning_rate": 0.00012163398692810457,
947
+ "loss": 0.024,
948
+ "step": 1200
949
+ },
950
+ {
951
+ "epoch": 1.5686274509803921,
952
+ "eval_accuracy": 0.975107296137339,
953
+ "eval_loss": 0.08669988811016083,
954
+ "eval_runtime": 16.5277,
955
+ "eval_samples_per_second": 70.488,
956
+ "eval_steps_per_second": 8.834,
957
+ "step": 1200
958
+ },
959
+ {
960
+ "epoch": 1.581699346405229,
961
+ "grad_norm": 0.03731567785143852,
962
+ "learning_rate": 0.00012098039215686274,
963
+ "loss": 0.0143,
964
+ "step": 1210
965
+ },
966
+ {
967
+ "epoch": 1.5947712418300655,
968
+ "grad_norm": 0.05173814296722412,
969
+ "learning_rate": 0.00012032679738562091,
970
+ "loss": 0.0096,
971
+ "step": 1220
972
+ },
973
+ {
974
+ "epoch": 1.607843137254902,
975
+ "grad_norm": 0.07455731183290482,
976
+ "learning_rate": 0.00011967320261437908,
977
+ "loss": 0.0251,
978
+ "step": 1230
979
+ },
980
+ {
981
+ "epoch": 1.6209150326797386,
982
+ "grad_norm": 0.13736297190189362,
983
+ "learning_rate": 0.00011901960784313725,
984
+ "loss": 0.011,
985
+ "step": 1240
986
+ },
987
+ {
988
+ "epoch": 1.6339869281045751,
989
+ "grad_norm": 2.5091629028320312,
990
+ "learning_rate": 0.00011836601307189544,
991
+ "loss": 0.0372,
992
+ "step": 1250
993
+ },
994
+ {
995
+ "epoch": 1.6470588235294117,
996
+ "grad_norm": 0.048637136816978455,
997
+ "learning_rate": 0.00011771241830065361,
998
+ "loss": 0.0373,
999
+ "step": 1260
1000
+ },
1001
+ {
1002
+ "epoch": 1.6601307189542482,
1003
+ "grad_norm": 0.019804753363132477,
1004
+ "learning_rate": 0.00011705882352941178,
1005
+ "loss": 0.0171,
1006
+ "step": 1270
1007
+ },
1008
+ {
1009
+ "epoch": 1.673202614379085,
1010
+ "grad_norm": 2.910708427429199,
1011
+ "learning_rate": 0.00011640522875816995,
1012
+ "loss": 0.0118,
1013
+ "step": 1280
1014
+ },
1015
+ {
1016
+ "epoch": 1.6862745098039216,
1017
+ "grad_norm": 0.02344467304646969,
1018
+ "learning_rate": 0.00011575163398692812,
1019
+ "loss": 0.0259,
1020
+ "step": 1290
1021
+ },
1022
+ {
1023
+ "epoch": 1.6993464052287581,
1024
+ "grad_norm": 0.0272055696696043,
1025
+ "learning_rate": 0.00011509803921568627,
1026
+ "loss": 0.0056,
1027
+ "step": 1300
1028
+ },
1029
+ {
1030
+ "epoch": 1.6993464052287581,
1031
+ "eval_accuracy": 0.9759656652360515,
1032
+ "eval_loss": 0.08883309364318848,
1033
+ "eval_runtime": 14.3413,
1034
+ "eval_samples_per_second": 81.234,
1035
+ "eval_steps_per_second": 10.18,
1036
+ "step": 1300
1037
+ },
1038
+ {
1039
+ "epoch": 1.712418300653595,
1040
+ "grad_norm": 0.12058671563863754,
1041
+ "learning_rate": 0.00011444444444444444,
1042
+ "loss": 0.0078,
1043
+ "step": 1310
1044
+ },
1045
+ {
1046
+ "epoch": 1.7254901960784315,
1047
+ "grad_norm": 0.012682071886956692,
1048
+ "learning_rate": 0.00011379084967320261,
1049
+ "loss": 0.014,
1050
+ "step": 1320
1051
+ },
1052
+ {
1053
+ "epoch": 1.738562091503268,
1054
+ "grad_norm": 0.011953895911574364,
1055
+ "learning_rate": 0.00011313725490196078,
1056
+ "loss": 0.0195,
1057
+ "step": 1330
1058
+ },
1059
+ {
1060
+ "epoch": 1.7516339869281046,
1061
+ "grad_norm": 4.999667167663574,
1062
+ "learning_rate": 0.00011248366013071895,
1063
+ "loss": 0.0472,
1064
+ "step": 1340
1065
+ },
1066
+ {
1067
+ "epoch": 1.7647058823529411,
1068
+ "grad_norm": 0.011710714548826218,
1069
+ "learning_rate": 0.00011183006535947713,
1070
+ "loss": 0.0014,
1071
+ "step": 1350
1072
+ },
1073
+ {
1074
+ "epoch": 1.7777777777777777,
1075
+ "grad_norm": 2.8777430057525635,
1076
+ "learning_rate": 0.0001111764705882353,
1077
+ "loss": 0.0422,
1078
+ "step": 1360
1079
+ },
1080
+ {
1081
+ "epoch": 1.7908496732026142,
1082
+ "grad_norm": 0.15594060719013214,
1083
+ "learning_rate": 0.00011052287581699348,
1084
+ "loss": 0.0046,
1085
+ "step": 1370
1086
+ },
1087
+ {
1088
+ "epoch": 1.803921568627451,
1089
+ "grad_norm": 0.01740155927836895,
1090
+ "learning_rate": 0.00010986928104575165,
1091
+ "loss": 0.015,
1092
+ "step": 1380
1093
+ },
1094
+ {
1095
+ "epoch": 1.8169934640522876,
1096
+ "grad_norm": 0.010606693103909492,
1097
+ "learning_rate": 0.0001092156862745098,
1098
+ "loss": 0.022,
1099
+ "step": 1390
1100
+ },
1101
+ {
1102
+ "epoch": 1.8300653594771243,
1103
+ "grad_norm": 0.02794831059873104,
1104
+ "learning_rate": 0.00010856209150326797,
1105
+ "loss": 0.0051,
1106
+ "step": 1400
1107
+ },
1108
+ {
1109
+ "epoch": 1.8300653594771243,
1110
+ "eval_accuracy": 0.976824034334764,
1111
+ "eval_loss": 0.10543067008256912,
1112
+ "eval_runtime": 16.6018,
1113
+ "eval_samples_per_second": 70.173,
1114
+ "eval_steps_per_second": 8.794,
1115
+ "step": 1400
1116
+ },
1117
+ {
1118
+ "epoch": 1.843137254901961,
1119
+ "grad_norm": 0.038915157318115234,
1120
+ "learning_rate": 0.00010790849673202614,
1121
+ "loss": 0.0068,
1122
+ "step": 1410
1123
+ },
1124
+ {
1125
+ "epoch": 1.8562091503267975,
1126
+ "grad_norm": 0.01105284970253706,
1127
+ "learning_rate": 0.00010725490196078431,
1128
+ "loss": 0.0011,
1129
+ "step": 1420
1130
+ },
1131
+ {
1132
+ "epoch": 1.869281045751634,
1133
+ "grad_norm": 0.00978641677647829,
1134
+ "learning_rate": 0.00010660130718954248,
1135
+ "loss": 0.0286,
1136
+ "step": 1430
1137
+ },
1138
+ {
1139
+ "epoch": 1.8823529411764706,
1140
+ "grad_norm": 2.092639923095703,
1141
+ "learning_rate": 0.00010594771241830066,
1142
+ "loss": 0.0618,
1143
+ "step": 1440
1144
+ },
1145
+ {
1146
+ "epoch": 1.8954248366013071,
1147
+ "grad_norm": 0.11766193807125092,
1148
+ "learning_rate": 0.00010529411764705883,
1149
+ "loss": 0.0048,
1150
+ "step": 1450
1151
+ },
1152
+ {
1153
+ "epoch": 1.9084967320261437,
1154
+ "grad_norm": 0.013952341862022877,
1155
+ "learning_rate": 0.000104640522875817,
1156
+ "loss": 0.0041,
1157
+ "step": 1460
1158
+ },
1159
+ {
1160
+ "epoch": 1.9215686274509802,
1161
+ "grad_norm": 0.019551385194063187,
1162
+ "learning_rate": 0.00010398692810457517,
1163
+ "loss": 0.0297,
1164
+ "step": 1470
1165
+ },
1166
+ {
1167
+ "epoch": 1.934640522875817,
1168
+ "grad_norm": 0.020047994330525398,
1169
+ "learning_rate": 0.00010333333333333334,
1170
+ "loss": 0.0029,
1171
+ "step": 1480
1172
+ },
1173
+ {
1174
+ "epoch": 1.9477124183006536,
1175
+ "grad_norm": 1.58188796043396,
1176
+ "learning_rate": 0.0001026797385620915,
1177
+ "loss": 0.0436,
1178
+ "step": 1490
1179
+ },
1180
+ {
1181
+ "epoch": 1.9607843137254903,
1182
+ "grad_norm": 5.982904434204102,
1183
+ "learning_rate": 0.00010202614379084967,
1184
+ "loss": 0.063,
1185
+ "step": 1500
1186
+ },
1187
+ {
1188
+ "epoch": 1.9607843137254903,
1189
+ "eval_accuracy": 0.9570815450643777,
1190
+ "eval_loss": 0.18960346281528473,
1191
+ "eval_runtime": 14.9121,
1192
+ "eval_samples_per_second": 78.124,
1193
+ "eval_steps_per_second": 9.791,
1194
+ "step": 1500
1195
+ },
1196
+ {
1197
+ "epoch": 1.973856209150327,
1198
+ "grad_norm": 0.1691681146621704,
1199
+ "learning_rate": 0.00010137254901960784,
1200
+ "loss": 0.0505,
1201
+ "step": 1510
1202
+ },
1203
+ {
1204
+ "epoch": 1.9869281045751634,
1205
+ "grad_norm": 0.16387327015399933,
1206
+ "learning_rate": 0.00010071895424836601,
1207
+ "loss": 0.0139,
1208
+ "step": 1520
1209
+ },
1210
+ {
1211
+ "epoch": 2.0,
1212
+ "grad_norm": 0.01866912469267845,
1213
+ "learning_rate": 0.00010006535947712419,
1214
+ "loss": 0.0318,
1215
+ "step": 1530
1216
+ },
1217
+ {
1218
+ "epoch": 2.0130718954248366,
1219
+ "grad_norm": 0.43450427055358887,
1220
+ "learning_rate": 9.941176470588236e-05,
1221
+ "loss": 0.0491,
1222
+ "step": 1540
1223
+ },
1224
+ {
1225
+ "epoch": 2.026143790849673,
1226
+ "grad_norm": 0.12364810705184937,
1227
+ "learning_rate": 9.875816993464053e-05,
1228
+ "loss": 0.0289,
1229
+ "step": 1550
1230
+ },
1231
+ {
1232
+ "epoch": 2.0392156862745097,
1233
+ "grad_norm": 0.008522446267306805,
1234
+ "learning_rate": 9.810457516339869e-05,
1235
+ "loss": 0.0225,
1236
+ "step": 1560
1237
+ },
1238
+ {
1239
+ "epoch": 2.052287581699346,
1240
+ "grad_norm": 0.047463927417993546,
1241
+ "learning_rate": 9.745098039215686e-05,
1242
+ "loss": 0.0038,
1243
+ "step": 1570
1244
+ },
1245
+ {
1246
+ "epoch": 2.065359477124183,
1247
+ "grad_norm": 0.008024625480175018,
1248
+ "learning_rate": 9.679738562091504e-05,
1249
+ "loss": 0.0014,
1250
+ "step": 1580
1251
+ },
1252
+ {
1253
+ "epoch": 2.0784313725490198,
1254
+ "grad_norm": 0.008238660171627998,
1255
+ "learning_rate": 9.614379084967322e-05,
1256
+ "loss": 0.0084,
1257
+ "step": 1590
1258
+ },
1259
+ {
1260
+ "epoch": 2.0915032679738563,
1261
+ "grad_norm": 0.007536654360592365,
1262
+ "learning_rate": 9.549019607843139e-05,
1263
+ "loss": 0.002,
1264
+ "step": 1600
1265
+ },
1266
+ {
1267
+ "epoch": 2.0915032679738563,
1268
+ "eval_accuracy": 0.9587982832618026,
1269
+ "eval_loss": 0.18859457969665527,
1270
+ "eval_runtime": 15.741,
1271
+ "eval_samples_per_second": 74.01,
1272
+ "eval_steps_per_second": 9.275,
1273
+ "step": 1600
1274
+ },
1275
+ {
1276
+ "epoch": 2.104575163398693,
1277
+ "grad_norm": 0.012532038614153862,
1278
+ "learning_rate": 9.483660130718954e-05,
1279
+ "loss": 0.0028,
1280
+ "step": 1610
1281
+ },
1282
+ {
1283
+ "epoch": 2.1176470588235294,
1284
+ "grad_norm": 0.006995632313191891,
1285
+ "learning_rate": 9.418300653594772e-05,
1286
+ "loss": 0.0012,
1287
+ "step": 1620
1288
+ },
1289
+ {
1290
+ "epoch": 2.130718954248366,
1291
+ "grad_norm": 0.007673881947994232,
1292
+ "learning_rate": 9.352941176470589e-05,
1293
+ "loss": 0.0008,
1294
+ "step": 1630
1295
+ },
1296
+ {
1297
+ "epoch": 2.1437908496732025,
1298
+ "grad_norm": 0.008022445254027843,
1299
+ "learning_rate": 9.287581699346406e-05,
1300
+ "loss": 0.0625,
1301
+ "step": 1640
1302
+ },
1303
+ {
1304
+ "epoch": 2.156862745098039,
1305
+ "grad_norm": 0.008978744968771935,
1306
+ "learning_rate": 9.222222222222223e-05,
1307
+ "loss": 0.0328,
1308
+ "step": 1650
1309
+ },
1310
+ {
1311
+ "epoch": 2.1699346405228757,
1312
+ "grad_norm": 0.008565125055611134,
1313
+ "learning_rate": 9.156862745098039e-05,
1314
+ "loss": 0.0429,
1315
+ "step": 1660
1316
+ },
1317
+ {
1318
+ "epoch": 2.183006535947712,
1319
+ "grad_norm": 0.009053260087966919,
1320
+ "learning_rate": 9.091503267973856e-05,
1321
+ "loss": 0.0227,
1322
+ "step": 1670
1323
+ },
1324
+ {
1325
+ "epoch": 2.196078431372549,
1326
+ "grad_norm": 0.008361272513866425,
1327
+ "learning_rate": 9.026143790849673e-05,
1328
+ "loss": 0.0011,
1329
+ "step": 1680
1330
+ },
1331
+ {
1332
+ "epoch": 2.2091503267973858,
1333
+ "grad_norm": 0.008024726063013077,
1334
+ "learning_rate": 8.96078431372549e-05,
1335
+ "loss": 0.001,
1336
+ "step": 1690
1337
+ },
1338
+ {
1339
+ "epoch": 2.2222222222222223,
1340
+ "grad_norm": 0.00964928325265646,
1341
+ "learning_rate": 8.895424836601307e-05,
1342
+ "loss": 0.005,
1343
+ "step": 1700
1344
+ },
1345
+ {
1346
+ "epoch": 2.2222222222222223,
1347
+ "eval_accuracy": 0.9733905579399141,
1348
+ "eval_loss": 0.11838679015636444,
1349
+ "eval_runtime": 15.5963,
1350
+ "eval_samples_per_second": 74.697,
1351
+ "eval_steps_per_second": 9.361,
1352
+ "step": 1700
1353
+ },
1354
+ {
1355
+ "epoch": 2.235294117647059,
1356
+ "grad_norm": 0.00880915205925703,
1357
+ "learning_rate": 8.830065359477125e-05,
1358
+ "loss": 0.0011,
1359
+ "step": 1710
1360
+ },
1361
+ {
1362
+ "epoch": 2.2483660130718954,
1363
+ "grad_norm": 0.655288577079773,
1364
+ "learning_rate": 8.764705882352942e-05,
1365
+ "loss": 0.0019,
1366
+ "step": 1720
1367
+ },
1368
+ {
1369
+ "epoch": 2.261437908496732,
1370
+ "grad_norm": 0.0070320917293429375,
1371
+ "learning_rate": 8.699346405228759e-05,
1372
+ "loss": 0.0009,
1373
+ "step": 1730
1374
+ },
1375
+ {
1376
+ "epoch": 2.2745098039215685,
1377
+ "grad_norm": 0.006975400261580944,
1378
+ "learning_rate": 8.633986928104576e-05,
1379
+ "loss": 0.0017,
1380
+ "step": 1740
1381
+ },
1382
+ {
1383
+ "epoch": 2.287581699346405,
1384
+ "grad_norm": 0.007141157519072294,
1385
+ "learning_rate": 8.568627450980392e-05,
1386
+ "loss": 0.001,
1387
+ "step": 1750
1388
+ },
1389
+ {
1390
+ "epoch": 2.3006535947712417,
1391
+ "grad_norm": 0.007537210825830698,
1392
+ "learning_rate": 8.503267973856209e-05,
1393
+ "loss": 0.0012,
1394
+ "step": 1760
1395
+ },
1396
+ {
1397
+ "epoch": 2.313725490196078,
1398
+ "grad_norm": 0.764282763004303,
1399
+ "learning_rate": 8.437908496732026e-05,
1400
+ "loss": 0.009,
1401
+ "step": 1770
1402
+ },
1403
+ {
1404
+ "epoch": 2.326797385620915,
1405
+ "grad_norm": 0.009738568216562271,
1406
+ "learning_rate": 8.372549019607843e-05,
1407
+ "loss": 0.0899,
1408
+ "step": 1780
1409
+ },
1410
+ {
1411
+ "epoch": 2.3398692810457518,
1412
+ "grad_norm": 0.05287899821996689,
1413
+ "learning_rate": 8.30718954248366e-05,
1414
+ "loss": 0.0027,
1415
+ "step": 1790
1416
+ },
1417
+ {
1418
+ "epoch": 2.3529411764705883,
1419
+ "grad_norm": 0.0178971029818058,
1420
+ "learning_rate": 8.241830065359478e-05,
1421
+ "loss": 0.0083,
1422
+ "step": 1800
1423
+ },
1424
+ {
1425
+ "epoch": 2.3529411764705883,
1426
+ "eval_accuracy": 0.9759656652360515,
1427
+ "eval_loss": 0.10839741677045822,
1428
+ "eval_runtime": 14.7686,
1429
+ "eval_samples_per_second": 78.884,
1430
+ "eval_steps_per_second": 9.886,
1431
+ "step": 1800
1432
+ },
1433
+ {
1434
+ "epoch": 2.366013071895425,
1435
+ "grad_norm": 0.01301596313714981,
1436
+ "learning_rate": 8.176470588235295e-05,
1437
+ "loss": 0.0016,
1438
+ "step": 1810
1439
+ },
1440
+ {
1441
+ "epoch": 2.3790849673202614,
1442
+ "grad_norm": 0.01324276439845562,
1443
+ "learning_rate": 8.111111111111112e-05,
1444
+ "loss": 0.0135,
1445
+ "step": 1820
1446
+ },
1447
+ {
1448
+ "epoch": 2.392156862745098,
1449
+ "grad_norm": 0.013326325453817844,
1450
+ "learning_rate": 8.045751633986929e-05,
1451
+ "loss": 0.0334,
1452
+ "step": 1830
1453
+ },
1454
+ {
1455
+ "epoch": 2.4052287581699345,
1456
+ "grad_norm": 0.011139987967908382,
1457
+ "learning_rate": 7.980392156862746e-05,
1458
+ "loss": 0.0014,
1459
+ "step": 1840
1460
+ },
1461
+ {
1462
+ "epoch": 2.418300653594771,
1463
+ "grad_norm": 0.0077317566610872746,
1464
+ "learning_rate": 7.915032679738562e-05,
1465
+ "loss": 0.0011,
1466
+ "step": 1850
1467
+ },
1468
+ {
1469
+ "epoch": 2.431372549019608,
1470
+ "grad_norm": 0.03215256333351135,
1471
+ "learning_rate": 7.849673202614379e-05,
1472
+ "loss": 0.0354,
1473
+ "step": 1860
1474
+ },
1475
+ {
1476
+ "epoch": 2.4444444444444446,
1477
+ "grad_norm": 0.009134942665696144,
1478
+ "learning_rate": 7.784313725490196e-05,
1479
+ "loss": 0.0018,
1480
+ "step": 1870
1481
+ },
1482
+ {
1483
+ "epoch": 2.457516339869281,
1484
+ "grad_norm": 0.008007184602320194,
1485
+ "learning_rate": 7.718954248366013e-05,
1486
+ "loss": 0.001,
1487
+ "step": 1880
1488
+ },
1489
+ {
1490
+ "epoch": 2.4705882352941178,
1491
+ "grad_norm": 0.015561921522021294,
1492
+ "learning_rate": 7.653594771241829e-05,
1493
+ "loss": 0.0009,
1494
+ "step": 1890
1495
+ },
1496
+ {
1497
+ "epoch": 2.4836601307189543,
1498
+ "grad_norm": 0.04056130349636078,
1499
+ "learning_rate": 7.588235294117648e-05,
1500
+ "loss": 0.0013,
1501
+ "step": 1900
1502
+ },
1503
+ {
1504
+ "epoch": 2.4836601307189543,
1505
+ "eval_accuracy": 0.9776824034334763,
1506
+ "eval_loss": 0.0902954488992691,
1507
+ "eval_runtime": 14.9792,
1508
+ "eval_samples_per_second": 77.774,
1509
+ "eval_steps_per_second": 9.747,
1510
+ "step": 1900
1511
+ },
1512
+ {
1513
+ "epoch": 2.496732026143791,
1514
+ "grad_norm": 0.00714436499401927,
1515
+ "learning_rate": 7.522875816993465e-05,
1516
+ "loss": 0.0434,
1517
+ "step": 1910
1518
+ },
1519
+ {
1520
+ "epoch": 2.5098039215686274,
1521
+ "grad_norm": 0.009256112389266491,
1522
+ "learning_rate": 7.457516339869282e-05,
1523
+ "loss": 0.001,
1524
+ "step": 1920
1525
+ },
1526
+ {
1527
+ "epoch": 2.522875816993464,
1528
+ "grad_norm": 0.016420746222138405,
1529
+ "learning_rate": 7.392156862745099e-05,
1530
+ "loss": 0.0282,
1531
+ "step": 1930
1532
+ },
1533
+ {
1534
+ "epoch": 2.5359477124183005,
1535
+ "grad_norm": 0.013019098900258541,
1536
+ "learning_rate": 7.326797385620915e-05,
1537
+ "loss": 0.0017,
1538
+ "step": 1940
1539
+ },
1540
+ {
1541
+ "epoch": 2.549019607843137,
1542
+ "grad_norm": 3.201817274093628,
1543
+ "learning_rate": 7.261437908496732e-05,
1544
+ "loss": 0.0274,
1545
+ "step": 1950
1546
+ },
1547
+ {
1548
+ "epoch": 2.5620915032679736,
1549
+ "grad_norm": 0.009792659431695938,
1550
+ "learning_rate": 7.196078431372549e-05,
1551
+ "loss": 0.0011,
1552
+ "step": 1960
1553
+ },
1554
+ {
1555
+ "epoch": 2.57516339869281,
1556
+ "grad_norm": 0.010890874080359936,
1557
+ "learning_rate": 7.130718954248366e-05,
1558
+ "loss": 0.0329,
1559
+ "step": 1970
1560
+ },
1561
+ {
1562
+ "epoch": 2.588235294117647,
1563
+ "grad_norm": 0.02582782320678234,
1564
+ "learning_rate": 7.065359477124184e-05,
1565
+ "loss": 0.0024,
1566
+ "step": 1980
1567
+ },
1568
+ {
1569
+ "epoch": 2.6013071895424837,
1570
+ "grad_norm": 4.812190055847168,
1571
+ "learning_rate": 7e-05,
1572
+ "loss": 0.0134,
1573
+ "step": 1990
1574
+ },
1575
+ {
1576
+ "epoch": 2.6143790849673203,
1577
+ "grad_norm": 0.05194231495261192,
1578
+ "learning_rate": 6.934640522875817e-05,
1579
+ "loss": 0.0298,
1580
+ "step": 2000
1581
+ },
1582
+ {
1583
+ "epoch": 2.6143790849673203,
1584
+ "eval_accuracy": 0.9733905579399141,
1585
+ "eval_loss": 0.10234156996011734,
1586
+ "eval_runtime": 15.3135,
1587
+ "eval_samples_per_second": 76.077,
1588
+ "eval_steps_per_second": 9.534,
1589
+ "step": 2000
1590
+ },
1591
+ {
1592
+ "epoch": 2.627450980392157,
1593
+ "grad_norm": 0.01054445281624794,
1594
+ "learning_rate": 6.869281045751634e-05,
1595
+ "loss": 0.0021,
1596
+ "step": 2010
1597
+ },
1598
+ {
1599
+ "epoch": 2.6405228758169934,
1600
+ "grad_norm": 0.010878126136958599,
1601
+ "learning_rate": 6.803921568627452e-05,
1602
+ "loss": 0.0015,
1603
+ "step": 2020
1604
+ },
1605
+ {
1606
+ "epoch": 2.65359477124183,
1607
+ "grad_norm": 0.032522134482860565,
1608
+ "learning_rate": 6.73856209150327e-05,
1609
+ "loss": 0.035,
1610
+ "step": 2030
1611
+ },
1612
+ {
1613
+ "epoch": 2.6666666666666665,
1614
+ "grad_norm": 0.02457094006240368,
1615
+ "learning_rate": 6.673202614379085e-05,
1616
+ "loss": 0.0032,
1617
+ "step": 2040
1618
+ },
1619
+ {
1620
+ "epoch": 2.6797385620915035,
1621
+ "grad_norm": 0.014886660501360893,
1622
+ "learning_rate": 6.607843137254902e-05,
1623
+ "loss": 0.003,
1624
+ "step": 2050
1625
+ },
1626
+ {
1627
+ "epoch": 2.69281045751634,
1628
+ "grad_norm": 0.020414328202605247,
1629
+ "learning_rate": 6.54248366013072e-05,
1630
+ "loss": 0.0017,
1631
+ "step": 2060
1632
+ },
1633
+ {
1634
+ "epoch": 2.7058823529411766,
1635
+ "grad_norm": 0.009341062046587467,
1636
+ "learning_rate": 6.477124183006537e-05,
1637
+ "loss": 0.0013,
1638
+ "step": 2070
1639
+ },
1640
+ {
1641
+ "epoch": 2.718954248366013,
1642
+ "grad_norm": 0.0061281765811145306,
1643
+ "learning_rate": 6.411764705882354e-05,
1644
+ "loss": 0.0009,
1645
+ "step": 2080
1646
+ },
1647
+ {
1648
+ "epoch": 2.7320261437908497,
1649
+ "grad_norm": 0.009815551340579987,
1650
+ "learning_rate": 6.34640522875817e-05,
1651
+ "loss": 0.0009,
1652
+ "step": 2090
1653
+ },
1654
+ {
1655
+ "epoch": 2.7450980392156863,
1656
+ "grad_norm": 0.0064156195148825645,
1657
+ "learning_rate": 6.281045751633987e-05,
1658
+ "loss": 0.0008,
1659
+ "step": 2100
1660
+ },
1661
+ {
1662
+ "epoch": 2.7450980392156863,
1663
+ "eval_accuracy": 0.976824034334764,
1664
+ "eval_loss": 0.110395647585392,
1665
+ "eval_runtime": 15.8536,
1666
+ "eval_samples_per_second": 73.485,
1667
+ "eval_steps_per_second": 9.209,
1668
+ "step": 2100
1669
+ },
1670
+ {
1671
+ "epoch": 2.758169934640523,
1672
+ "grad_norm": 0.009114363230764866,
1673
+ "learning_rate": 6.215686274509804e-05,
1674
+ "loss": 0.0008,
1675
+ "step": 2110
1676
+ },
1677
+ {
1678
+ "epoch": 2.7712418300653594,
1679
+ "grad_norm": 0.012108026072382927,
1680
+ "learning_rate": 6.150326797385621e-05,
1681
+ "loss": 0.0425,
1682
+ "step": 2120
1683
+ },
1684
+ {
1685
+ "epoch": 2.784313725490196,
1686
+ "grad_norm": 0.02400955744087696,
1687
+ "learning_rate": 6.0849673202614375e-05,
1688
+ "loss": 0.0011,
1689
+ "step": 2130
1690
+ },
1691
+ {
1692
+ "epoch": 2.7973856209150325,
1693
+ "grad_norm": 0.006131911184638739,
1694
+ "learning_rate": 6.0196078431372546e-05,
1695
+ "loss": 0.001,
1696
+ "step": 2140
1697
+ },
1698
+ {
1699
+ "epoch": 2.810457516339869,
1700
+ "grad_norm": 0.01375108677893877,
1701
+ "learning_rate": 5.9542483660130724e-05,
1702
+ "loss": 0.0014,
1703
+ "step": 2150
1704
+ },
1705
+ {
1706
+ "epoch": 2.8235294117647056,
1707
+ "grad_norm": 0.27725741267204285,
1708
+ "learning_rate": 5.8888888888888896e-05,
1709
+ "loss": 0.001,
1710
+ "step": 2160
1711
+ },
1712
+ {
1713
+ "epoch": 2.8366013071895426,
1714
+ "grad_norm": 0.005803197622299194,
1715
+ "learning_rate": 5.823529411764707e-05,
1716
+ "loss": 0.0012,
1717
+ "step": 2170
1718
+ },
1719
+ {
1720
+ "epoch": 2.849673202614379,
1721
+ "grad_norm": 0.005719684064388275,
1722
+ "learning_rate": 5.7581699346405225e-05,
1723
+ "loss": 0.0007,
1724
+ "step": 2180
1725
+ },
1726
+ {
1727
+ "epoch": 2.8627450980392157,
1728
+ "grad_norm": 0.049487028270959854,
1729
+ "learning_rate": 5.69281045751634e-05,
1730
+ "loss": 0.0013,
1731
+ "step": 2190
1732
+ },
1733
+ {
1734
+ "epoch": 2.8758169934640523,
1735
+ "grad_norm": 0.005333769600838423,
1736
+ "learning_rate": 5.627450980392157e-05,
1737
+ "loss": 0.0011,
1738
+ "step": 2200
1739
+ },
1740
+ {
1741
+ "epoch": 2.8758169934640523,
1742
+ "eval_accuracy": 0.9785407725321889,
1743
+ "eval_loss": 0.11278601735830307,
1744
+ "eval_runtime": 15.3235,
1745
+ "eval_samples_per_second": 76.027,
1746
+ "eval_steps_per_second": 9.528,
1747
+ "step": 2200
1748
+ },
1749
+ {
1750
+ "epoch": 2.888888888888889,
1751
+ "grad_norm": 0.013878793455660343,
1752
+ "learning_rate": 5.5620915032679746e-05,
1753
+ "loss": 0.0191,
1754
+ "step": 2210
1755
+ },
1756
+ {
1757
+ "epoch": 2.9019607843137254,
1758
+ "grad_norm": 0.005247410852462053,
1759
+ "learning_rate": 5.496732026143792e-05,
1760
+ "loss": 0.0008,
1761
+ "step": 2220
1762
+ },
1763
+ {
1764
+ "epoch": 2.915032679738562,
1765
+ "grad_norm": 0.010724911466240883,
1766
+ "learning_rate": 5.4313725490196076e-05,
1767
+ "loss": 0.0006,
1768
+ "step": 2230
1769
+ },
1770
+ {
1771
+ "epoch": 2.928104575163399,
1772
+ "grad_norm": 0.014705362729728222,
1773
+ "learning_rate": 5.366013071895425e-05,
1774
+ "loss": 0.0006,
1775
+ "step": 2240
1776
+ },
1777
+ {
1778
+ "epoch": 2.9411764705882355,
1779
+ "grad_norm": 0.007574844174087048,
1780
+ "learning_rate": 5.300653594771242e-05,
1781
+ "loss": 0.0007,
1782
+ "step": 2250
1783
+ },
1784
+ {
1785
+ "epoch": 2.954248366013072,
1786
+ "grad_norm": 0.004934164695441723,
1787
+ "learning_rate": 5.235294117647059e-05,
1788
+ "loss": 0.0015,
1789
+ "step": 2260
1790
+ },
1791
+ {
1792
+ "epoch": 2.9673202614379086,
1793
+ "grad_norm": 0.005061435047537088,
1794
+ "learning_rate": 5.169934640522877e-05,
1795
+ "loss": 0.0006,
1796
+ "step": 2270
1797
+ },
1798
+ {
1799
+ "epoch": 2.980392156862745,
1800
+ "grad_norm": 0.005057324655354023,
1801
+ "learning_rate": 5.104575163398693e-05,
1802
+ "loss": 0.0006,
1803
+ "step": 2280
1804
+ },
1805
+ {
1806
+ "epoch": 2.9934640522875817,
1807
+ "grad_norm": 0.004673287738114595,
1808
+ "learning_rate": 5.045751633986928e-05,
1809
+ "loss": 0.0127,
1810
+ "step": 2290
1811
+ },
1812
+ {
1813
+ "epoch": 3.0065359477124183,
1814
+ "grad_norm": 0.004593541845679283,
1815
+ "learning_rate": 4.980392156862745e-05,
1816
+ "loss": 0.0006,
1817
+ "step": 2300
1818
+ },
1819
+ {
1820
+ "epoch": 3.0065359477124183,
1821
+ "eval_accuracy": 0.9733905579399141,
1822
+ "eval_loss": 0.139481782913208,
1823
+ "eval_runtime": 14.0319,
1824
+ "eval_samples_per_second": 83.025,
1825
+ "eval_steps_per_second": 10.405,
1826
+ "step": 2300
1827
+ },
1828
+ {
1829
+ "epoch": 3.019607843137255,
1830
+ "grad_norm": 0.004796142224222422,
1831
+ "learning_rate": 4.915032679738562e-05,
1832
+ "loss": 0.0006,
1833
+ "step": 2310
1834
+ },
1835
+ {
1836
+ "epoch": 3.0326797385620914,
1837
+ "grad_norm": 0.005047705490142107,
1838
+ "learning_rate": 4.8496732026143794e-05,
1839
+ "loss": 0.0006,
1840
+ "step": 2320
1841
+ },
1842
+ {
1843
+ "epoch": 3.045751633986928,
1844
+ "grad_norm": 0.004708459600806236,
1845
+ "learning_rate": 4.7843137254901966e-05,
1846
+ "loss": 0.0005,
1847
+ "step": 2330
1848
+ },
1849
+ {
1850
+ "epoch": 3.0588235294117645,
1851
+ "grad_norm": 0.004462133627384901,
1852
+ "learning_rate": 4.718954248366013e-05,
1853
+ "loss": 0.0006,
1854
+ "step": 2340
1855
+ },
1856
+ {
1857
+ "epoch": 3.0718954248366015,
1858
+ "grad_norm": 0.004772055894136429,
1859
+ "learning_rate": 4.65359477124183e-05,
1860
+ "loss": 0.0009,
1861
+ "step": 2350
1862
+ },
1863
+ {
1864
+ "epoch": 3.084967320261438,
1865
+ "grad_norm": 0.004368507768958807,
1866
+ "learning_rate": 4.588235294117647e-05,
1867
+ "loss": 0.0005,
1868
+ "step": 2360
1869
+ },
1870
+ {
1871
+ "epoch": 3.0980392156862746,
1872
+ "grad_norm": 0.004448315594345331,
1873
+ "learning_rate": 4.5228758169934645e-05,
1874
+ "loss": 0.0005,
1875
+ "step": 2370
1876
+ },
1877
+ {
1878
+ "epoch": 3.111111111111111,
1879
+ "grad_norm": 0.004565922077745199,
1880
+ "learning_rate": 4.4575163398692816e-05,
1881
+ "loss": 0.0173,
1882
+ "step": 2380
1883
+ },
1884
+ {
1885
+ "epoch": 3.1241830065359477,
1886
+ "grad_norm": 0.004459939897060394,
1887
+ "learning_rate": 4.392156862745098e-05,
1888
+ "loss": 0.0069,
1889
+ "step": 2390
1890
+ },
1891
+ {
1892
+ "epoch": 3.1372549019607843,
1893
+ "grad_norm": 0.004509753547608852,
1894
+ "learning_rate": 4.326797385620915e-05,
1895
+ "loss": 0.0059,
1896
+ "step": 2400
1897
+ },
1898
+ {
1899
+ "epoch": 3.1372549019607843,
1900
+ "eval_accuracy": 0.9725321888412017,
1901
+ "eval_loss": 0.14194443821907043,
1902
+ "eval_runtime": 13.8686,
1903
+ "eval_samples_per_second": 84.002,
1904
+ "eval_steps_per_second": 10.527,
1905
+ "step": 2400
1906
+ },
1907
+ {
1908
+ "epoch": 3.150326797385621,
1909
+ "grad_norm": 0.004385701846331358,
1910
+ "learning_rate": 4.2614379084967324e-05,
1911
+ "loss": 0.0005,
1912
+ "step": 2410
1913
+ },
1914
+ {
1915
+ "epoch": 3.1633986928104574,
1916
+ "grad_norm": 0.5638505220413208,
1917
+ "learning_rate": 4.1960784313725496e-05,
1918
+ "loss": 0.0234,
1919
+ "step": 2420
1920
+ },
1921
+ {
1922
+ "epoch": 3.176470588235294,
1923
+ "grad_norm": 0.004813672509044409,
1924
+ "learning_rate": 4.130718954248366e-05,
1925
+ "loss": 0.0005,
1926
+ "step": 2430
1927
+ },
1928
+ {
1929
+ "epoch": 3.189542483660131,
1930
+ "grad_norm": 0.0054300627671182156,
1931
+ "learning_rate": 4.065359477124183e-05,
1932
+ "loss": 0.004,
1933
+ "step": 2440
1934
+ },
1935
+ {
1936
+ "epoch": 3.2026143790849675,
1937
+ "grad_norm": 0.004265918862074614,
1938
+ "learning_rate": 4e-05,
1939
+ "loss": 0.0005,
1940
+ "step": 2450
1941
+ },
1942
+ {
1943
+ "epoch": 3.215686274509804,
1944
+ "grad_norm": 0.005545614752918482,
1945
+ "learning_rate": 3.934640522875817e-05,
1946
+ "loss": 0.0005,
1947
+ "step": 2460
1948
+ },
1949
+ {
1950
+ "epoch": 3.2287581699346406,
1951
+ "grad_norm": 0.004381006117910147,
1952
+ "learning_rate": 3.8692810457516346e-05,
1953
+ "loss": 0.0005,
1954
+ "step": 2470
1955
+ },
1956
+ {
1957
+ "epoch": 3.241830065359477,
1958
+ "grad_norm": 0.004227633588016033,
1959
+ "learning_rate": 3.803921568627451e-05,
1960
+ "loss": 0.0005,
1961
+ "step": 2480
1962
+ },
1963
+ {
1964
+ "epoch": 3.2549019607843137,
1965
+ "grad_norm": 0.016775546595454216,
1966
+ "learning_rate": 3.738562091503268e-05,
1967
+ "loss": 0.0005,
1968
+ "step": 2490
1969
+ },
1970
+ {
1971
+ "epoch": 3.2679738562091503,
1972
+ "grad_norm": 0.024223702028393745,
1973
+ "learning_rate": 3.6732026143790854e-05,
1974
+ "loss": 0.0005,
1975
+ "step": 2500
1976
+ },
1977
+ {
1978
+ "epoch": 3.2679738562091503,
1979
+ "eval_accuracy": 0.9776824034334763,
1980
+ "eval_loss": 0.13354463875293732,
1981
+ "eval_runtime": 14.2248,
1982
+ "eval_samples_per_second": 81.899,
1983
+ "eval_steps_per_second": 10.264,
1984
+ "step": 2500
1985
+ },
1986
+ {
1987
+ "epoch": 3.281045751633987,
1988
+ "grad_norm": 0.0041252183727920055,
1989
+ "learning_rate": 3.607843137254902e-05,
1990
+ "loss": 0.0005,
1991
+ "step": 2510
1992
+ },
1993
+ {
1994
+ "epoch": 3.2941176470588234,
1995
+ "grad_norm": 0.004066206980496645,
1996
+ "learning_rate": 3.542483660130719e-05,
1997
+ "loss": 0.0008,
1998
+ "step": 2520
1999
+ },
2000
+ {
2001
+ "epoch": 3.30718954248366,
2002
+ "grad_norm": 0.004196736495941877,
2003
+ "learning_rate": 3.477124183006536e-05,
2004
+ "loss": 0.0036,
2005
+ "step": 2530
2006
+ },
2007
+ {
2008
+ "epoch": 3.3202614379084965,
2009
+ "grad_norm": 0.004051230847835541,
2010
+ "learning_rate": 3.411764705882353e-05,
2011
+ "loss": 0.0005,
2012
+ "step": 2540
2013
+ },
2014
+ {
2015
+ "epoch": 3.3333333333333335,
2016
+ "grad_norm": 0.004058164078742266,
2017
+ "learning_rate": 3.34640522875817e-05,
2018
+ "loss": 0.0175,
2019
+ "step": 2550
2020
+ },
2021
+ {
2022
+ "epoch": 3.34640522875817,
2023
+ "grad_norm": 0.0038333344273269176,
2024
+ "learning_rate": 3.281045751633987e-05,
2025
+ "loss": 0.0005,
2026
+ "step": 2560
2027
+ },
2028
+ {
2029
+ "epoch": 3.3594771241830066,
2030
+ "grad_norm": 0.004068716894835234,
2031
+ "learning_rate": 3.215686274509804e-05,
2032
+ "loss": 0.0005,
2033
+ "step": 2570
2034
+ },
2035
+ {
2036
+ "epoch": 3.372549019607843,
2037
+ "grad_norm": 0.004003152251243591,
2038
+ "learning_rate": 3.150326797385621e-05,
2039
+ "loss": 0.001,
2040
+ "step": 2580
2041
+ },
2042
+ {
2043
+ "epoch": 3.3856209150326797,
2044
+ "grad_norm": 0.0039999475702643394,
2045
+ "learning_rate": 3.0849673202614384e-05,
2046
+ "loss": 0.0005,
2047
+ "step": 2590
2048
+ },
2049
+ {
2050
+ "epoch": 3.3986928104575163,
2051
+ "grad_norm": 0.003976929467171431,
2052
+ "learning_rate": 3.019607843137255e-05,
2053
+ "loss": 0.0005,
2054
+ "step": 2600
2055
+ },
2056
+ {
2057
+ "epoch": 3.3986928104575163,
2058
+ "eval_accuracy": 0.976824034334764,
2059
+ "eval_loss": 0.12492600083351135,
2060
+ "eval_runtime": 13.7805,
2061
+ "eval_samples_per_second": 84.54,
2062
+ "eval_steps_per_second": 10.595,
2063
+ "step": 2600
2064
+ },
2065
+ {
2066
+ "epoch": 3.411764705882353,
2067
+ "grad_norm": 0.0038215164095163345,
2068
+ "learning_rate": 2.954248366013072e-05,
2069
+ "loss": 0.0004,
2070
+ "step": 2610
2071
+ },
2072
+ {
2073
+ "epoch": 3.4248366013071894,
2074
+ "grad_norm": 0.0037740464322268963,
2075
+ "learning_rate": 2.8888888888888888e-05,
2076
+ "loss": 0.0004,
2077
+ "step": 2620
2078
+ },
2079
+ {
2080
+ "epoch": 3.4379084967320264,
2081
+ "grad_norm": 0.003947110380977392,
2082
+ "learning_rate": 2.823529411764706e-05,
2083
+ "loss": 0.0005,
2084
+ "step": 2630
2085
+ },
2086
+ {
2087
+ "epoch": 3.450980392156863,
2088
+ "grad_norm": 0.003939367830753326,
2089
+ "learning_rate": 2.758169934640523e-05,
2090
+ "loss": 0.0004,
2091
+ "step": 2640
2092
+ },
2093
+ {
2094
+ "epoch": 3.4640522875816995,
2095
+ "grad_norm": 0.0037511338014155626,
2096
+ "learning_rate": 2.69281045751634e-05,
2097
+ "loss": 0.0004,
2098
+ "step": 2650
2099
+ },
2100
+ {
2101
+ "epoch": 3.477124183006536,
2102
+ "grad_norm": 0.004019754473119974,
2103
+ "learning_rate": 2.627450980392157e-05,
2104
+ "loss": 0.0367,
2105
+ "step": 2660
2106
+ },
2107
+ {
2108
+ "epoch": 3.4901960784313726,
2109
+ "grad_norm": 0.009803508408367634,
2110
+ "learning_rate": 2.562091503267974e-05,
2111
+ "loss": 0.0005,
2112
+ "step": 2670
2113
+ },
2114
+ {
2115
+ "epoch": 3.503267973856209,
2116
+ "grad_norm": 0.013077986426651478,
2117
+ "learning_rate": 2.496732026143791e-05,
2118
+ "loss": 0.0007,
2119
+ "step": 2680
2120
+ },
2121
+ {
2122
+ "epoch": 3.5163398692810457,
2123
+ "grad_norm": 0.007836658507585526,
2124
+ "learning_rate": 2.431372549019608e-05,
2125
+ "loss": 0.0008,
2126
+ "step": 2690
2127
+ },
2128
+ {
2129
+ "epoch": 3.5294117647058822,
2130
+ "grad_norm": 0.004085828084498644,
2131
+ "learning_rate": 2.366013071895425e-05,
2132
+ "loss": 0.0007,
2133
+ "step": 2700
2134
+ },
2135
+ {
2136
+ "epoch": 3.5294117647058822,
2137
+ "eval_accuracy": 0.9776824034334763,
2138
+ "eval_loss": 0.1157020702958107,
2139
+ "eval_runtime": 14.8164,
2140
+ "eval_samples_per_second": 78.629,
2141
+ "eval_steps_per_second": 9.854,
2142
+ "step": 2700
2143
+ },
2144
+ {
2145
+ "epoch": 3.542483660130719,
2146
+ "grad_norm": 0.004049411043524742,
2147
+ "learning_rate": 2.3006535947712418e-05,
2148
+ "loss": 0.0005,
2149
+ "step": 2710
2150
+ },
2151
+ {
2152
+ "epoch": 3.5555555555555554,
2153
+ "grad_norm": 0.015368789434432983,
2154
+ "learning_rate": 2.235294117647059e-05,
2155
+ "loss": 0.0006,
2156
+ "step": 2720
2157
+ },
2158
+ {
2159
+ "epoch": 3.568627450980392,
2160
+ "grad_norm": 0.003844345221295953,
2161
+ "learning_rate": 2.169934640522876e-05,
2162
+ "loss": 0.0008,
2163
+ "step": 2730
2164
+ },
2165
+ {
2166
+ "epoch": 3.581699346405229,
2167
+ "grad_norm": 0.004043512977659702,
2168
+ "learning_rate": 2.104575163398693e-05,
2169
+ "loss": 0.0005,
2170
+ "step": 2740
2171
+ },
2172
+ {
2173
+ "epoch": 3.5947712418300655,
2174
+ "grad_norm": 0.003938615787774324,
2175
+ "learning_rate": 2.0392156862745097e-05,
2176
+ "loss": 0.0005,
2177
+ "step": 2750
2178
+ },
2179
+ {
2180
+ "epoch": 3.607843137254902,
2181
+ "grad_norm": 0.008323022164404392,
2182
+ "learning_rate": 1.973856209150327e-05,
2183
+ "loss": 0.0005,
2184
+ "step": 2760
2185
+ },
2186
+ {
2187
+ "epoch": 3.6209150326797386,
2188
+ "grad_norm": 0.004000243730843067,
2189
+ "learning_rate": 1.9084967320261437e-05,
2190
+ "loss": 0.0005,
2191
+ "step": 2770
2192
+ },
2193
+ {
2194
+ "epoch": 3.633986928104575,
2195
+ "grad_norm": 0.004011669661849737,
2196
+ "learning_rate": 1.843137254901961e-05,
2197
+ "loss": 0.0005,
2198
+ "step": 2780
2199
+ },
2200
+ {
2201
+ "epoch": 3.6470588235294117,
2202
+ "grad_norm": 0.01243202667683363,
2203
+ "learning_rate": 1.777777777777778e-05,
2204
+ "loss": 0.0139,
2205
+ "step": 2790
2206
+ },
2207
+ {
2208
+ "epoch": 3.6601307189542482,
2209
+ "grad_norm": 0.0176347978413105,
2210
+ "learning_rate": 1.7124183006535948e-05,
2211
+ "loss": 0.0005,
2212
+ "step": 2800
2213
+ },
2214
+ {
2215
+ "epoch": 3.6601307189542482,
2216
+ "eval_accuracy": 0.9785407725321889,
2217
+ "eval_loss": 0.12019691616296768,
2218
+ "eval_runtime": 14.517,
2219
+ "eval_samples_per_second": 80.251,
2220
+ "eval_steps_per_second": 10.057,
2221
+ "step": 2800
2222
+ },
2223
+ {
2224
+ "epoch": 3.6732026143790852,
2225
+ "grad_norm": 0.0037740224506706,
2226
+ "learning_rate": 1.647058823529412e-05,
2227
+ "loss": 0.0005,
2228
+ "step": 2810
2229
+ },
2230
+ {
2231
+ "epoch": 3.686274509803922,
2232
+ "grad_norm": 0.004195007495582104,
2233
+ "learning_rate": 1.5816993464052288e-05,
2234
+ "loss": 0.0008,
2235
+ "step": 2820
2236
+ },
2237
+ {
2238
+ "epoch": 3.6993464052287583,
2239
+ "grad_norm": 0.0038069712463766336,
2240
+ "learning_rate": 1.5163398692810458e-05,
2241
+ "loss": 0.0007,
2242
+ "step": 2830
2243
+ },
2244
+ {
2245
+ "epoch": 3.712418300653595,
2246
+ "grad_norm": 0.010378316044807434,
2247
+ "learning_rate": 1.4509803921568629e-05,
2248
+ "loss": 0.0006,
2249
+ "step": 2840
2250
+ },
2251
+ {
2252
+ "epoch": 3.7254901960784315,
2253
+ "grad_norm": 0.003901832504197955,
2254
+ "learning_rate": 1.3856209150326799e-05,
2255
+ "loss": 0.0005,
2256
+ "step": 2850
2257
+ },
2258
+ {
2259
+ "epoch": 3.738562091503268,
2260
+ "grad_norm": 0.003902917029336095,
2261
+ "learning_rate": 1.3202614379084969e-05,
2262
+ "loss": 0.0005,
2263
+ "step": 2860
2264
+ },
2265
+ {
2266
+ "epoch": 3.7516339869281046,
2267
+ "grad_norm": 0.004125463310629129,
2268
+ "learning_rate": 1.2549019607843138e-05,
2269
+ "loss": 0.001,
2270
+ "step": 2870
2271
+ },
2272
+ {
2273
+ "epoch": 3.764705882352941,
2274
+ "grad_norm": 0.003765885718166828,
2275
+ "learning_rate": 1.1895424836601307e-05,
2276
+ "loss": 0.0006,
2277
+ "step": 2880
2278
+ },
2279
+ {
2280
+ "epoch": 3.7777777777777777,
2281
+ "grad_norm": 0.0036050116177648306,
2282
+ "learning_rate": 1.1241830065359478e-05,
2283
+ "loss": 0.001,
2284
+ "step": 2890
2285
+ },
2286
+ {
2287
+ "epoch": 3.7908496732026142,
2288
+ "grad_norm": 0.003705250099301338,
2289
+ "learning_rate": 1.0588235294117648e-05,
2290
+ "loss": 0.001,
2291
+ "step": 2900
2292
+ },
2293
+ {
2294
+ "epoch": 3.7908496732026142,
2295
+ "eval_accuracy": 0.9776824034334763,
2296
+ "eval_loss": 0.12394391000270844,
2297
+ "eval_runtime": 14.3108,
2298
+ "eval_samples_per_second": 81.407,
2299
+ "eval_steps_per_second": 10.202,
2300
+ "step": 2900
2301
+ },
2302
+ {
2303
+ "epoch": 3.803921568627451,
2304
+ "grad_norm": 0.01087539829313755,
2305
+ "learning_rate": 9.934640522875818e-06,
2306
+ "loss": 0.0005,
2307
+ "step": 2910
2308
+ },
2309
+ {
2310
+ "epoch": 3.8169934640522873,
2311
+ "grad_norm": 0.004286112263798714,
2312
+ "learning_rate": 9.281045751633987e-06,
2313
+ "loss": 0.0004,
2314
+ "step": 2920
2315
+ },
2316
+ {
2317
+ "epoch": 3.8300653594771243,
2318
+ "grad_norm": 0.005555053241550922,
2319
+ "learning_rate": 8.627450980392157e-06,
2320
+ "loss": 0.0006,
2321
+ "step": 2930
2322
+ },
2323
+ {
2324
+ "epoch": 3.843137254901961,
2325
+ "grad_norm": 0.05337900295853615,
2326
+ "learning_rate": 7.973856209150327e-06,
2327
+ "loss": 0.0007,
2328
+ "step": 2940
2329
+ },
2330
+ {
2331
+ "epoch": 3.8562091503267975,
2332
+ "grad_norm": 0.0040720016695559025,
2333
+ "learning_rate": 7.320261437908498e-06,
2334
+ "loss": 0.0004,
2335
+ "step": 2950
2336
+ },
2337
+ {
2338
+ "epoch": 3.869281045751634,
2339
+ "grad_norm": 0.0036330316215753555,
2340
+ "learning_rate": 6.666666666666667e-06,
2341
+ "loss": 0.0006,
2342
+ "step": 2960
2343
+ },
2344
+ {
2345
+ "epoch": 3.8823529411764706,
2346
+ "grad_norm": 0.00944702047854662,
2347
+ "learning_rate": 6.013071895424837e-06,
2348
+ "loss": 0.0009,
2349
+ "step": 2970
2350
+ },
2351
+ {
2352
+ "epoch": 3.895424836601307,
2353
+ "grad_norm": 0.0034994047600775957,
2354
+ "learning_rate": 5.359477124183006e-06,
2355
+ "loss": 0.0006,
2356
+ "step": 2980
2357
+ },
2358
+ {
2359
+ "epoch": 3.9084967320261437,
2360
+ "grad_norm": 0.030153293162584305,
2361
+ "learning_rate": 4.705882352941177e-06,
2362
+ "loss": 0.0007,
2363
+ "step": 2990
2364
+ },
2365
+ {
2366
+ "epoch": 3.9215686274509802,
2367
+ "grad_norm": 0.0036808131262660027,
2368
+ "learning_rate": 4.052287581699347e-06,
2369
+ "loss": 0.0004,
2370
+ "step": 3000
2371
+ },
2372
+ {
2373
+ "epoch": 3.9215686274509802,
2374
+ "eval_accuracy": 0.976824034334764,
2375
+ "eval_loss": 0.12309978902339935,
2376
+ "eval_runtime": 13.6907,
2377
+ "eval_samples_per_second": 85.094,
2378
+ "eval_steps_per_second": 10.664,
2379
+ "step": 3000
2380
+ },
2381
+ {
2382
+ "epoch": 3.9346405228758172,
2383
+ "grad_norm": 0.0034610480070114136,
2384
+ "learning_rate": 3.398692810457516e-06,
2385
+ "loss": 0.0005,
2386
+ "step": 3010
2387
+ },
2388
+ {
2389
+ "epoch": 3.947712418300654,
2390
+ "grad_norm": 0.0036175919231027365,
2391
+ "learning_rate": 2.7450980392156863e-06,
2392
+ "loss": 0.0005,
2393
+ "step": 3020
2394
+ },
2395
+ {
2396
+ "epoch": 3.9607843137254903,
2397
+ "grad_norm": 0.003583298297598958,
2398
+ "learning_rate": 2.091503267973856e-06,
2399
+ "loss": 0.0005,
2400
+ "step": 3030
2401
+ },
2402
+ {
2403
+ "epoch": 3.973856209150327,
2404
+ "grad_norm": 0.003492480143904686,
2405
+ "learning_rate": 1.4379084967320261e-06,
2406
+ "loss": 0.0005,
2407
+ "step": 3040
2408
+ },
2409
+ {
2410
+ "epoch": 3.9869281045751634,
2411
+ "grad_norm": 0.003486211644485593,
2412
+ "learning_rate": 7.843137254901962e-07,
2413
+ "loss": 0.0004,
2414
+ "step": 3050
2415
+ },
2416
+ {
2417
+ "epoch": 4.0,
2418
+ "grad_norm": 0.0035035167820751667,
2419
+ "learning_rate": 1.30718954248366e-07,
2420
+ "loss": 0.0006,
2421
+ "step": 3060
2422
+ },
2423
+ {
2424
+ "epoch": 4.0,
2425
+ "step": 3060,
2426
+ "total_flos": 3.7909081319458406e+18,
2427
+ "train_loss": 0.047429592626662374,
2428
+ "train_runtime": 1590.6048,
2429
+ "train_samples_per_second": 30.756,
2430
+ "train_steps_per_second": 1.924
2431
+ }
2432
+ ],
2433
+ "logging_steps": 10,
2434
+ "max_steps": 3060,
2435
+ "num_input_tokens_seen": 0,
2436
+ "num_train_epochs": 4,
2437
+ "save_steps": 100,
2438
+ "total_flos": 3.7909081319458406e+18,
2439
+ "train_batch_size": 16,
2440
+ "trial_name": null,
2441
+ "trial_params": null
2442
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd0d85758fda32fffa3b8b691425a8e637d32f3a0e2a2d76d647bf00ebb2b21c
3
+ size 4984