imda-lseokmin commited on
Commit
691b03d
1 Parent(s): 717255d

Upload 159 files

Browse files
README.md CHANGED
@@ -17,8 +17,8 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 2.5745
21
- - Accuracy: 0.4842
22
 
23
  ## Model description
24
 
 
17
 
18
  This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 3.0447
21
+ - Accuracy: 0.3878
22
 
23
  ## Model description
24
 
all_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "epoch": 20.0,
3
- "eval_accuracy": 0.4841968067774519,
4
- "eval_loss": 2.5744614601135254,
5
- "eval_runtime": 0.6172,
6
- "eval_samples": 9,
7
- "eval_samples_per_second": 14.581,
8
- "eval_steps_per_second": 8.101,
9
- "perplexity": 13.124247324302939,
10
- "total_flos": 1442332016640000.0,
11
- "train_loss": 1.6038585939269134,
12
- "train_runtime": 546.7087,
13
- "train_samples": 138,
14
- "train_samples_per_second": 5.048,
15
- "train_steps_per_second": 2.524
16
  }
 
1
  {
2
  "epoch": 20.0,
3
+ "eval_accuracy": 0.3878299120234604,
4
+ "eval_loss": 3.044700860977173,
5
+ "eval_runtime": 0.5419,
6
+ "eval_samples": 8,
7
+ "eval_samples_per_second": 14.762,
8
+ "eval_steps_per_second": 7.381,
9
+ "perplexity": 21.00374722261463,
10
+ "total_flos": 1181039984640000.0,
11
+ "train_loss": 2.0282494394402755,
12
+ "train_runtime": 450.0172,
13
+ "train_samples": 113,
14
+ "train_samples_per_second": 5.022,
15
+ "train_steps_per_second": 2.533
16
  }
checkpoint-1000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0a3045c4618f75a91e43e2b4079b4f70ddfea71b549212fe00c8bc207fce25d
3
  size 497774208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eafae2b6ff425a68e1dff3b58217192755c5552624bec6e0a5a24c2cfeef0d7
3
  size 497774208
checkpoint-1000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5737a324483690f9aadd907a2e48f602e54024c812e42d2a6c0c28878d67400
3
  size 995642298
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da3865fd609302881be01c29801902af8955f2eaf6a500b6eddbdb1ecbccff3c
3
  size 995642298
checkpoint-1000/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fad09794a0db580c98cf06d0ebd5b66e92d3f86f4f7bd4a728e24f1a05467f0c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e15521664bcf9955b1a2b66e810668f825d186fef974c4b18a31c7cd19e0437a
3
  size 14244
checkpoint-1000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e4ac6e2694cbb9ef21cdf8af6f8d76becd952779eb03e00001aef172a063804
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56c9dff28a9d4ea03847c10914973912c6d11b9e39b2afb53832948fd687a713
3
  size 1064
checkpoint-1000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 14.492753623188406,
5
  "eval_steps": 500,
6
  "global_step": 1000,
7
  "is_hyper_param_search": false,
@@ -9,26 +9,26 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 7.246376811594203,
13
- "grad_norm": 4.941216468811035,
14
- "learning_rate": 3.188405797101449e-05,
15
- "loss": 1.9657,
16
  "step": 500
17
  },
18
  {
19
- "epoch": 14.492753623188406,
20
- "grad_norm": 4.604403495788574,
21
- "learning_rate": 1.3768115942028985e-05,
22
- "loss": 1.4853,
23
  "step": 1000
24
  }
25
  ],
26
  "logging_steps": 500,
27
- "max_steps": 1380,
28
  "num_input_tokens_seen": 0,
29
  "num_train_epochs": 20,
30
  "save_steps": 500,
31
- "total_flos": 1045168128000000.0,
32
  "train_batch_size": 2,
33
  "trial_name": null,
34
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 17.54385964912281,
5
  "eval_steps": 500,
6
  "global_step": 1000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 8.771929824561404,
13
+ "grad_norm": 5.5376715660095215,
14
+ "learning_rate": 2.8070175438596492e-05,
15
+ "loss": 2.3748,
16
  "step": 500
17
  },
18
  {
19
+ "epoch": 17.54385964912281,
20
+ "grad_norm": 5.5406880378723145,
21
+ "learning_rate": 6.140350877192982e-06,
22
+ "loss": 1.7925,
23
  "step": 1000
24
  }
25
  ],
26
  "logging_steps": 500,
27
+ "max_steps": 1140,
28
  "num_input_tokens_seen": 0,
29
  "num_train_epochs": 20,
30
  "save_steps": 500,
31
+ "total_flos": 1036284198912000.0,
32
  "train_batch_size": 2,
33
  "trial_name": null,
34
  "trial_params": null
checkpoint-1000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d74f83bec7ea349d722f90da656054543e87196b485a7f973f3b2bf816599956
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb06b4254316e1ab5e3df832bc95c5eed217d0c6e05ef2ece9717f8db05462b9
3
  size 4984
checkpoint-500/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:675128bde697f16287cd94a082b72bc459c336abc30693b18347577766156deb
3
  size 497774208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6f130cbd8a4515242330284192392edc8462adce4467e1764fab022c447c13a
3
  size 497774208
checkpoint-500/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e92d1b34033798c941cd79408ba58778a2e495f100658eb08b65531a075ad4f
3
  size 995642298
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:291721543b1bea75dc67b7f4214f7e495d5792dc17f7bbe192b2c5c500d1ce54
3
  size 995642298
checkpoint-500/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2468835bf602a0fb3a2f44ad85744a106df5db55b9864b0626319bec2d54fb1d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:777137f7b6438e37de19223f30fb87cd1210813798ec473dff745146fb62b610
3
  size 14244
checkpoint-500/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf5b518ffefd08bf15650baa38cbbf384a2122ba91aea9d95cd9aa0329fa2678
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9df7e73f7009fee1832457c31aeab89c9411a8aad568d49bf516ca4a4b45d88
3
  size 1064
checkpoint-500/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 7.246376811594203,
5
  "eval_steps": 500,
6
  "global_step": 500,
7
  "is_hyper_param_search": false,
@@ -9,19 +9,19 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 7.246376811594203,
13
- "grad_norm": 4.941216468811035,
14
- "learning_rate": 3.188405797101449e-05,
15
- "loss": 1.9657,
16
  "step": 500
17
  }
18
  ],
19
  "logging_steps": 500,
20
- "max_steps": 1380,
21
  "num_input_tokens_seen": 0,
22
  "num_train_epochs": 20,
23
  "save_steps": 500,
24
- "total_flos": 522584064000000.0,
25
  "train_batch_size": 2,
26
  "trial_name": null,
27
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 8.771929824561404,
5
  "eval_steps": 500,
6
  "global_step": 500,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 8.771929824561404,
13
+ "grad_norm": 5.5376715660095215,
14
+ "learning_rate": 2.8070175438596492e-05,
15
+ "loss": 2.3748,
16
  "step": 500
17
  }
18
  ],
19
  "logging_steps": 500,
20
+ "max_steps": 1140,
21
  "num_input_tokens_seen": 0,
22
  "num_train_epochs": 20,
23
  "save_steps": 500,
24
+ "total_flos": 518403391488000.0,
25
  "train_batch_size": 2,
26
  "trial_name": null,
27
  "trial_params": null
checkpoint-500/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d74f83bec7ea349d722f90da656054543e87196b485a7f973f3b2bf816599956
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb06b4254316e1ab5e3df832bc95c5eed217d0c6e05ef2ece9717f8db05462b9
3
  size 4984
eval_results.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
  "epoch": 20.0,
3
- "eval_accuracy": 0.4841968067774519,
4
- "eval_loss": 2.5744614601135254,
5
- "eval_runtime": 0.6172,
6
- "eval_samples": 9,
7
- "eval_samples_per_second": 14.581,
8
- "eval_steps_per_second": 8.101,
9
- "perplexity": 13.124247324302939
10
  }
 
1
  {
2
  "epoch": 20.0,
3
+ "eval_accuracy": 0.3878299120234604,
4
+ "eval_loss": 3.044700860977173,
5
+ "eval_runtime": 0.5419,
6
+ "eval_samples": 8,
7
+ "eval_samples_per_second": 14.762,
8
+ "eval_steps_per_second": 7.381,
9
+ "perplexity": 21.00374722261463
10
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:80b827b98d26451ec1300044ca990f9d2c0a2e66d6928b453bb6246c6e355525
3
  size 497774208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed42548dc68d3a36c9021a2620c13fbad3926a37f8f73cc0176886c465edf1ad
3
  size 497774208
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 20.0,
3
- "total_flos": 1442332016640000.0,
4
- "train_loss": 1.6038585939269134,
5
- "train_runtime": 546.7087,
6
- "train_samples": 138,
7
- "train_samples_per_second": 5.048,
8
- "train_steps_per_second": 2.524
9
  }
 
1
  {
2
  "epoch": 20.0,
3
+ "total_flos": 1181039984640000.0,
4
+ "train_loss": 2.0282494394402755,
5
+ "train_runtime": 450.0172,
6
+ "train_samples": 113,
7
+ "train_samples_per_second": 5.022,
8
+ "train_steps_per_second": 2.533
9
  }
trainer_state.json CHANGED
@@ -3,41 +3,41 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 20.0,
5
  "eval_steps": 500,
6
- "global_step": 1380,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 7.246376811594203,
13
- "grad_norm": 4.941216468811035,
14
- "learning_rate": 3.188405797101449e-05,
15
- "loss": 1.9657,
16
  "step": 500
17
  },
18
  {
19
- "epoch": 14.492753623188406,
20
- "grad_norm": 4.604403495788574,
21
- "learning_rate": 1.3768115942028985e-05,
22
- "loss": 1.4853,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 20.0,
27
- "step": 1380,
28
- "total_flos": 1442332016640000.0,
29
- "train_loss": 1.6038585939269134,
30
- "train_runtime": 546.7087,
31
- "train_samples_per_second": 5.048,
32
- "train_steps_per_second": 2.524
33
  }
34
  ],
35
  "logging_steps": 500,
36
- "max_steps": 1380,
37
  "num_input_tokens_seen": 0,
38
  "num_train_epochs": 20,
39
  "save_steps": 500,
40
- "total_flos": 1442332016640000.0,
41
  "train_batch_size": 2,
42
  "trial_name": null,
43
  "trial_params": null
 
3
  "best_model_checkpoint": null,
4
  "epoch": 20.0,
5
  "eval_steps": 500,
6
+ "global_step": 1140,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 8.771929824561404,
13
+ "grad_norm": 5.5376715660095215,
14
+ "learning_rate": 2.8070175438596492e-05,
15
+ "loss": 2.3748,
16
  "step": 500
17
  },
18
  {
19
+ "epoch": 17.54385964912281,
20
+ "grad_norm": 5.5406880378723145,
21
+ "learning_rate": 6.140350877192982e-06,
22
+ "loss": 1.7925,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 20.0,
27
+ "step": 1140,
28
+ "total_flos": 1181039984640000.0,
29
+ "train_loss": 2.0282494394402755,
30
+ "train_runtime": 450.0172,
31
+ "train_samples_per_second": 5.022,
32
+ "train_steps_per_second": 2.533
33
  }
34
  ],
35
  "logging_steps": 500,
36
+ "max_steps": 1140,
37
  "num_input_tokens_seen": 0,
38
  "num_train_epochs": 20,
39
  "save_steps": 500,
40
+ "total_flos": 1181039984640000.0,
41
  "train_batch_size": 2,
42
  "trial_name": null,
43
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d74f83bec7ea349d722f90da656054543e87196b485a7f973f3b2bf816599956
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb06b4254316e1ab5e3df832bc95c5eed217d0c6e05ef2ece9717f8db05462b9
3
  size 4984