wdli commited on
Commit
18f08e2
1 Parent(s): 9dcf66a

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: gpt2
4
+ tags:
5
+ - trl
6
+ - sft
7
+ - generated_from_trainer
8
+ datasets:
9
+ - generator
10
+ model-index:
11
+ - name: gpt2-cpt-dutch
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # gpt2-cpt-dutch
19
+
20
+ This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the generator dataset.
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 0.0002
40
+ - train_batch_size: 16
41
+ - eval_batch_size: 8
42
+ - seed: 42
43
+ - distributed_type: multi-GPU
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: cosine
46
+ - lr_scheduler_warmup_ratio: 0.1
47
+ - num_epochs: 1
48
+
49
+ ### Training results
50
+
51
+
52
+
53
+ ### Framework versions
54
+
55
+ - Transformers 4.40.2
56
+ - Pytorch 2.1.2+cu121
57
+ - Datasets 2.19.1
58
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 684585123840000.0,
4
+ "train_loss": 4.120773559663354,
5
+ "train_runtime": 120.6544,
6
+ "train_samples": 32152,
7
+ "train_samples_per_second": 10.857,
8
+ "train_steps_per_second": 0.68
9
+ }
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "bfloat16",
36
+ "transformers_version": "4.40.2",
37
+ "use_cache": false,
38
+ "vocab_size": 50257
39
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.40.2"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d247bcd3ccd7f7ec05f948ad46f8c74999182be2346aeb1fb98f1b5f879f7e1
3
+ size 248894656
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "model_max_length": 1024,
17
+ "pad_token": "<|endoftext|>",
18
+ "tokenizer_class": "GPT2Tokenizer",
19
+ "unk_token": "<|endoftext|>"
20
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 684585123840000.0,
4
+ "train_loss": 4.120773559663354,
5
+ "train_runtime": 120.6544,
6
+ "train_samples": 32152,
7
+ "train_samples_per_second": 10.857,
8
+ "train_steps_per_second": 0.68
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 82,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.012195121951219513,
13
+ "grad_norm": 10.0,
14
+ "learning_rate": 2.2222222222222223e-05,
15
+ "loss": 4.6342,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.06097560975609756,
20
+ "grad_norm": 8.5,
21
+ "learning_rate": 0.00011111111111111112,
22
+ "loss": 4.6731,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.12195121951219512,
27
+ "grad_norm": 3.34375,
28
+ "learning_rate": 0.00019990741151022301,
29
+ "loss": 4.3963,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.18292682926829268,
34
+ "grad_norm": 2.890625,
35
+ "learning_rate": 0.00019668478136052774,
36
+ "loss": 4.2375,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.24390243902439024,
41
+ "grad_norm": 2.28125,
42
+ "learning_rate": 0.00018900275764346768,
43
+ "loss": 4.1537,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.3048780487804878,
48
+ "grad_norm": 2.4375,
49
+ "learning_rate": 0.00017721565844991643,
50
+ "loss": 4.1163,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.36585365853658536,
55
+ "grad_norm": 2.328125,
56
+ "learning_rate": 0.00016186714032625035,
57
+ "loss": 4.0943,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.4268292682926829,
62
+ "grad_norm": 1.9375,
63
+ "learning_rate": 0.0001436651231956064,
64
+ "loss": 4.0725,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.4878048780487805,
69
+ "grad_norm": 1.8515625,
70
+ "learning_rate": 0.00012344913895704097,
71
+ "loss": 4.0395,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 0.5487804878048781,
76
+ "grad_norm": 1.6328125,
77
+ "learning_rate": 0.00010215160974362223,
78
+ "loss": 4.0439,
79
+ "step": 45
80
+ },
81
+ {
82
+ "epoch": 0.6097560975609756,
83
+ "grad_norm": 1.625,
84
+ "learning_rate": 8.075484180291701e-05,
85
+ "loss": 4.0223,
86
+ "step": 50
87
+ },
88
+ {
89
+ "epoch": 0.6707317073170732,
90
+ "grad_norm": 1.7265625,
91
+ "learning_rate": 6.024571857174443e-05,
92
+ "loss": 4.0318,
93
+ "step": 55
94
+ },
95
+ {
96
+ "epoch": 0.7317073170731707,
97
+ "grad_norm": 1.78125,
98
+ "learning_rate": 4.1570182637163155e-05,
99
+ "loss": 4.0052,
100
+ "step": 60
101
+ },
102
+ {
103
+ "epoch": 0.7926829268292683,
104
+ "grad_norm": 1.7421875,
105
+ "learning_rate": 2.5589606012863963e-05,
106
+ "loss": 4.0278,
107
+ "step": 65
108
+ },
109
+ {
110
+ "epoch": 0.8536585365853658,
111
+ "grad_norm": 1.7734375,
112
+ "learning_rate": 1.30410610653389e-05,
113
+ "loss": 4.019,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.9146341463414634,
118
+ "grad_norm": 1.609375,
119
+ "learning_rate": 4.503324514474483e-06,
120
+ "loss": 4.0281,
121
+ "step": 75
122
+ },
123
+ {
124
+ "epoch": 0.975609756097561,
125
+ "grad_norm": 1.609375,
126
+ "learning_rate": 3.701825065392184e-07,
127
+ "loss": 4.0339,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 1.0,
132
+ "step": 82,
133
+ "total_flos": 684585123840000.0,
134
+ "train_loss": 4.120773559663354,
135
+ "train_runtime": 120.6544,
136
+ "train_samples_per_second": 10.857,
137
+ "train_steps_per_second": 0.68
138
+ }
139
+ ],
140
+ "logging_steps": 5,
141
+ "max_steps": 82,
142
+ "num_input_tokens_seen": 0,
143
+ "num_train_epochs": 1,
144
+ "save_steps": 100,
145
+ "total_flos": 684585123840000.0,
146
+ "train_batch_size": 16,
147
+ "trial_name": null,
148
+ "trial_params": null
149
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14a58caf7cfe2d00aff9be0bbb296e554d9ad71d260085b3e7f500ade161affb
3
+ size 5048
vocab.json ADDED
The diff for this file is too large to render. See raw diff