stevenjunliu commited on
Commit
369d72f
1 Parent(s): c716f69

Model save

Browse files
README.md CHANGED
@@ -2,15 +2,11 @@
2
  license: mit
3
  base_model: gpt2
4
  tags:
5
- - alignment-handbook
6
- - trl
7
- - sft
8
- - generated_from_trainer
9
  - trl
10
  - sft
11
  - generated_from_trainer
12
  datasets:
13
- - shahadalkhalifa/Crypto_Whitepaper_Labeled
14
  model-index:
15
  - name: gpt2-cpt-white
16
  results: []
@@ -21,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
21
 
22
  # gpt2-cpt-white
23
 
24
- This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the shahadalkhalifa/Crypto_Whitepaper_Labeled dataset.
25
 
26
  ## Model description
27
 
@@ -45,9 +41,9 @@ The following hyperparameters were used during training:
45
  - eval_batch_size: 8
46
  - seed: 42
47
  - distributed_type: multi-GPU
48
- - num_devices: 2
49
- - total_train_batch_size: 32
50
- - total_eval_batch_size: 16
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: cosine
53
  - lr_scheduler_warmup_ratio: 0.1
@@ -60,6 +56,6 @@ The following hyperparameters were used during training:
60
  ### Framework versions
61
 
62
  - Transformers 4.39.0.dev0
63
- - Pytorch 2.2.2+cu121
64
  - Datasets 2.14.6
65
  - Tokenizers 0.15.2
 
2
  license: mit
3
  base_model: gpt2
4
  tags:
 
 
 
 
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
  datasets:
9
+ - generator
10
  model-index:
11
  - name: gpt2-cpt-white
12
  results: []
 
17
 
18
  # gpt2-cpt-white
19
 
20
+ This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the generator dataset.
21
 
22
  ## Model description
23
 
 
41
  - eval_batch_size: 8
42
  - seed: 42
43
  - distributed_type: multi-GPU
44
+ - num_devices: 4
45
+ - total_train_batch_size: 64
46
+ - total_eval_batch_size: 32
47
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
  - lr_scheduler_type: cosine
49
  - lr_scheduler_warmup_ratio: 0.1
 
56
  ### Framework versions
57
 
58
  - Transformers 4.39.0.dev0
59
+ - Pytorch 2.3.1+cu121
60
  - Datasets 2.14.6
61
  - Tokenizers 0.15.2
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
- "train_loss": 5.970038026571274,
4
- "train_runtime": 52.1808,
5
  "train_samples": 91,
6
- "train_samples_per_second": 19.241,
7
- "train_steps_per_second": 0.613
8
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "train_loss": 6.083620756864548,
4
+ "train_runtime": 28.044,
5
  "train_samples": 91,
6
+ "train_samples_per_second": 35.801,
7
+ "train_steps_per_second": 0.571
8
  }
config.json CHANGED
@@ -34,6 +34,6 @@
34
  },
35
  "torch_dtype": "bfloat16",
36
  "transformers_version": "4.39.0.dev0",
37
- "use_cache": true,
38
  "vocab_size": 50257
39
  }
 
34
  },
35
  "torch_dtype": "bfloat16",
36
  "transformers_version": "4.39.0.dev0",
37
+ "use_cache": false,
38
  "vocab_size": 50257
39
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:730443de84302ebc475f18fd8d665c46b3a6b824b37d233143186fc579c891b7
3
  size 248894656
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc60df8d071f4d74360fa1e0475e5ce996bd669ad8ba1313e5d9ca9385cf2c88
3
  size 248894656
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
- "train_loss": 5.970038026571274,
4
- "train_runtime": 52.1808,
5
  "train_samples": 91,
6
- "train_samples_per_second": 19.241,
7
- "train_steps_per_second": 0.613
8
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "train_loss": 6.083620756864548,
4
+ "train_runtime": 28.044,
5
  "train_samples": 91,
6
+ "train_samples_per_second": 35.801,
7
+ "train_steps_per_second": 0.571
8
  }
trainer_state.json CHANGED
@@ -3,72 +3,51 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 4.0,
5
  "eval_steps": 500,
6
- "global_step": 32,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.12,
13
- "grad_norm": 6.875,
14
- "learning_rate": 5e-05,
15
- "loss": 6.4878,
16
  "step": 1
17
  },
18
- {
19
- "epoch": 0.62,
20
- "grad_norm": 5.0,
21
- "learning_rate": 0.00019937122098932428,
22
- "loss": 6.2983,
23
- "step": 5
24
- },
25
  {
26
  "epoch": 1.25,
27
- "grad_norm": 3.078125,
28
  "learning_rate": 0.000178183148246803,
29
- "loss": 6.1058,
30
- "step": 10
31
- },
32
- {
33
- "epoch": 1.88,
34
- "grad_norm": 2.71875,
35
- "learning_rate": 0.00013302790619551674,
36
- "loss": 5.931,
37
- "step": 15
38
  },
39
  {
40
  "epoch": 2.5,
41
- "grad_norm": 2.609375,
42
  "learning_rate": 7.774790660436858e-05,
43
- "loss": 5.9051,
44
- "step": 20
45
- },
46
- {
47
- "epoch": 3.12,
48
- "grad_norm": 2.4375,
49
- "learning_rate": 2.9289321881345254e-05,
50
- "loss": 5.806,
51
- "step": 25
52
  },
53
  {
54
  "epoch": 3.75,
55
- "grad_norm": 2.390625,
56
  "learning_rate": 2.5072087818176382e-06,
57
- "loss": 5.859,
58
- "step": 30
59
  },
60
  {
61
  "epoch": 4.0,
62
- "step": 32,
63
  "total_flos": 535126081536000.0,
64
- "train_loss": 5.970038026571274,
65
- "train_runtime": 52.1808,
66
- "train_samples_per_second": 19.241,
67
- "train_steps_per_second": 0.613
68
  }
69
  ],
70
  "logging_steps": 5,
71
- "max_steps": 32,
72
  "num_input_tokens_seen": 0,
73
  "num_train_epochs": 4,
74
  "save_steps": 100,
 
3
  "best_model_checkpoint": null,
4
  "epoch": 4.0,
5
  "eval_steps": 500,
6
+ "global_step": 16,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.25,
13
+ "grad_norm": 6.53125,
14
+ "learning_rate": 0.0001,
15
+ "loss": 6.4013,
16
  "step": 1
17
  },
 
 
 
 
 
 
 
18
  {
19
  "epoch": 1.25,
20
+ "grad_norm": 3.84375,
21
  "learning_rate": 0.000178183148246803,
22
+ "loss": 6.2911,
23
+ "step": 5
 
 
 
 
 
 
 
24
  },
25
  {
26
  "epoch": 2.5,
27
+ "grad_norm": 2.765625,
28
  "learning_rate": 7.774790660436858e-05,
29
+ "loss": 6.0613,
30
+ "step": 10
 
 
 
 
 
 
 
31
  },
32
  {
33
  "epoch": 3.75,
34
+ "grad_norm": 2.5625,
35
  "learning_rate": 2.5072087818176382e-06,
36
+ "loss": 5.9747,
37
+ "step": 15
38
  },
39
  {
40
  "epoch": 4.0,
41
+ "step": 16,
42
  "total_flos": 535126081536000.0,
43
+ "train_loss": 6.083620756864548,
44
+ "train_runtime": 28.044,
45
+ "train_samples_per_second": 35.801,
46
+ "train_steps_per_second": 0.571
47
  }
48
  ],
49
  "logging_steps": 5,
50
+ "max_steps": 16,
51
  "num_input_tokens_seen": 0,
52
  "num_train_epochs": 4,
53
  "save_steps": 100,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a89b73d58e0158c0fea2ad4e8756257b4b11fe853a873525284f5dfda4b1694
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ba7aad08f79b4b82f8d2e7069e009b925897569aebe186ceb0472cbbb86765b
3
  size 4984