alakxender
commited on
Commit
•
b1e2ac9
1
Parent(s):
b2ea6f9
Upload folder using huggingface_hub
Browse files- added_tokens.json +3 -0
- checkpoint-100/config.json +39 -0
- checkpoint-100/generation_config.json +6 -0
- checkpoint-100/model.safetensors +3 -0
- checkpoint-100/optimizer.pt +3 -0
- checkpoint-100/rng_state.pth +3 -0
- checkpoint-100/scheduler.pt +3 -0
- checkpoint-100/trainer_state.json +111 -0
- checkpoint-100/training_args.bin +3 -0
- checkpoint-123/config.json +39 -0
- checkpoint-123/generation_config.json +6 -0
- checkpoint-123/model.safetensors +3 -0
- checkpoint-123/optimizer.pt +3 -0
- checkpoint-123/rng_state.pth +3 -0
- checkpoint-123/scheduler.pt +3 -0
- checkpoint-123/trainer_state.json +125 -0
- checkpoint-123/training_args.bin +3 -0
- config.json +39 -0
- generation_config.json +6 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +22 -0
- tokenizer.json +0 -0
- tokenizer_config.json +32 -0
- training_args.bin +3 -0
- vocab.json +0 -0
added_tokens.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<|startoftext|>": 50257
|
3 |
+
}
|
checkpoint-100/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.46.3",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50258
|
39 |
+
}
|
checkpoint-100/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 50256,
|
4 |
+
"eos_token_id": 50256,
|
5 |
+
"transformers_version": "4.46.3"
|
6 |
+
}
|
checkpoint-100/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1256181a39adbe068aa87574a1d09ac9415c0ba76c9cb697990d4de36c71f148
|
3 |
+
size 497777280
|
checkpoint-100/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:478e104bba614a49a6837a2ff7a185b39473b491b02ca126849cf5b547aa0743
|
3 |
+
size 995648442
|
checkpoint-100/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:22204b8ec7253f135580d3475a2867c1ed40c03f554f6e4956370281b08bef33
|
3 |
+
size 14244
|
checkpoint-100/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:84edd34d7d0f0f006b96588fbd196800facfb78577f2aa279cb574a7d9facb10
|
3 |
+
size 1064
|
checkpoint-100/trainer_state.json
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 1.382002830505371,
|
3 |
+
"best_model_checkpoint": "dhivehi-gpt2-base/checkpoint-100",
|
4 |
+
"epoch": 2.4390243902439024,
|
5 |
+
"eval_steps": 100,
|
6 |
+
"global_step": 100,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.24390243902439024,
|
13 |
+
"grad_norm": 83.27034759521484,
|
14 |
+
"learning_rate": 2.5000000000000004e-07,
|
15 |
+
"loss": 2.4436,
|
16 |
+
"step": 10
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.4878048780487805,
|
20 |
+
"grad_norm": 60.58613967895508,
|
21 |
+
"learning_rate": 7.5e-07,
|
22 |
+
"loss": 2.321,
|
23 |
+
"step": 20
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.7317073170731707,
|
27 |
+
"grad_norm": 29.228893280029297,
|
28 |
+
"learning_rate": 1.25e-06,
|
29 |
+
"loss": 2.0685,
|
30 |
+
"step": 30
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.975609756097561,
|
34 |
+
"grad_norm": 18.66552734375,
|
35 |
+
"learning_rate": 1.7500000000000002e-06,
|
36 |
+
"loss": 1.8913,
|
37 |
+
"step": 40
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 1.2195121951219512,
|
41 |
+
"grad_norm": 10.73972225189209,
|
42 |
+
"learning_rate": 2.25e-06,
|
43 |
+
"loss": 1.7331,
|
44 |
+
"step": 50
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.4634146341463414,
|
48 |
+
"grad_norm": 6.437626361846924,
|
49 |
+
"learning_rate": 2.7500000000000004e-06,
|
50 |
+
"loss": 1.6317,
|
51 |
+
"step": 60
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 1.7073170731707317,
|
55 |
+
"grad_norm": 3.9424028396606445,
|
56 |
+
"learning_rate": 3.2500000000000002e-06,
|
57 |
+
"loss": 1.5445,
|
58 |
+
"step": 70
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 1.951219512195122,
|
62 |
+
"grad_norm": 2.4789206981658936,
|
63 |
+
"learning_rate": 3.75e-06,
|
64 |
+
"loss": 1.4638,
|
65 |
+
"step": 80
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 2.1951219512195124,
|
69 |
+
"grad_norm": 1.7527034282684326,
|
70 |
+
"learning_rate": 4.250000000000001e-06,
|
71 |
+
"loss": 1.3972,
|
72 |
+
"step": 90
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 2.4390243902439024,
|
76 |
+
"grad_norm": 1.9903717041015625,
|
77 |
+
"learning_rate": 4.75e-06,
|
78 |
+
"loss": 1.3272,
|
79 |
+
"step": 100
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 2.4390243902439024,
|
83 |
+
"eval_loss": 1.382002830505371,
|
84 |
+
"eval_runtime": 5.1108,
|
85 |
+
"eval_samples_per_second": 56.742,
|
86 |
+
"eval_steps_per_second": 1.957,
|
87 |
+
"step": 100
|
88 |
+
}
|
89 |
+
],
|
90 |
+
"logging_steps": 10,
|
91 |
+
"max_steps": 123,
|
92 |
+
"num_input_tokens_seen": 0,
|
93 |
+
"num_train_epochs": 3,
|
94 |
+
"save_steps": 100,
|
95 |
+
"stateful_callbacks": {
|
96 |
+
"TrainerControl": {
|
97 |
+
"args": {
|
98 |
+
"should_epoch_stop": false,
|
99 |
+
"should_evaluate": false,
|
100 |
+
"should_log": false,
|
101 |
+
"should_save": true,
|
102 |
+
"should_training_stop": false
|
103 |
+
},
|
104 |
+
"attributes": {}
|
105 |
+
}
|
106 |
+
},
|
107 |
+
"total_flos": 3322589478912000.0,
|
108 |
+
"train_batch_size": 32,
|
109 |
+
"trial_name": null,
|
110 |
+
"trial_params": null
|
111 |
+
}
|
checkpoint-100/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33253810238ac5f6e19d65eaf18a7b5652de403635f7f896af35a9b7b932f10a
|
3 |
+
size 5240
|
checkpoint-123/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.46.3",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50258
|
39 |
+
}
|
checkpoint-123/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 50256,
|
4 |
+
"eos_token_id": 50256,
|
5 |
+
"transformers_version": "4.46.3"
|
6 |
+
}
|
checkpoint-123/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf64a93419d4aeb9079da0eb6f33276c930b740d901b4378ae9f11c5aa786d0f
|
3 |
+
size 497777280
|
checkpoint-123/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:47fe9f7fcec27cf0397fed0aea7d3d4b1d7b7b7d1a8e0ac31f5ed4f19e45be2e
|
3 |
+
size 995648442
|
checkpoint-123/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f9c2abd33f8421dbe1f81a265d65c5e9b7976e8c94fcbf0c6fa98d24a1e34d4
|
3 |
+
size 14244
|
checkpoint-123/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c4447704abe5a3947412c6e315c46fd7fa1c7b8d0c8048f8a9031f4f62c23812
|
3 |
+
size 1064
|
checkpoint-123/trainer_state.json
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 1.382002830505371,
|
3 |
+
"best_model_checkpoint": "dhivehi-gpt2-base/checkpoint-100",
|
4 |
+
"epoch": 3.0,
|
5 |
+
"eval_steps": 100,
|
6 |
+
"global_step": 123,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.24390243902439024,
|
13 |
+
"grad_norm": 83.27034759521484,
|
14 |
+
"learning_rate": 2.5000000000000004e-07,
|
15 |
+
"loss": 2.4436,
|
16 |
+
"step": 10
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.4878048780487805,
|
20 |
+
"grad_norm": 60.58613967895508,
|
21 |
+
"learning_rate": 7.5e-07,
|
22 |
+
"loss": 2.321,
|
23 |
+
"step": 20
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.7317073170731707,
|
27 |
+
"grad_norm": 29.228893280029297,
|
28 |
+
"learning_rate": 1.25e-06,
|
29 |
+
"loss": 2.0685,
|
30 |
+
"step": 30
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.975609756097561,
|
34 |
+
"grad_norm": 18.66552734375,
|
35 |
+
"learning_rate": 1.7500000000000002e-06,
|
36 |
+
"loss": 1.8913,
|
37 |
+
"step": 40
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 1.2195121951219512,
|
41 |
+
"grad_norm": 10.73972225189209,
|
42 |
+
"learning_rate": 2.25e-06,
|
43 |
+
"loss": 1.7331,
|
44 |
+
"step": 50
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.4634146341463414,
|
48 |
+
"grad_norm": 6.437626361846924,
|
49 |
+
"learning_rate": 2.7500000000000004e-06,
|
50 |
+
"loss": 1.6317,
|
51 |
+
"step": 60
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 1.7073170731707317,
|
55 |
+
"grad_norm": 3.9424028396606445,
|
56 |
+
"learning_rate": 3.2500000000000002e-06,
|
57 |
+
"loss": 1.5445,
|
58 |
+
"step": 70
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 1.951219512195122,
|
62 |
+
"grad_norm": 2.4789206981658936,
|
63 |
+
"learning_rate": 3.75e-06,
|
64 |
+
"loss": 1.4638,
|
65 |
+
"step": 80
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 2.1951219512195124,
|
69 |
+
"grad_norm": 1.7527034282684326,
|
70 |
+
"learning_rate": 4.250000000000001e-06,
|
71 |
+
"loss": 1.3972,
|
72 |
+
"step": 90
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 2.4390243902439024,
|
76 |
+
"grad_norm": 1.9903717041015625,
|
77 |
+
"learning_rate": 4.75e-06,
|
78 |
+
"loss": 1.3272,
|
79 |
+
"step": 100
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 2.4390243902439024,
|
83 |
+
"eval_loss": 1.382002830505371,
|
84 |
+
"eval_runtime": 5.1108,
|
85 |
+
"eval_samples_per_second": 56.742,
|
86 |
+
"eval_steps_per_second": 1.957,
|
87 |
+
"step": 100
|
88 |
+
},
|
89 |
+
{
|
90 |
+
"epoch": 2.682926829268293,
|
91 |
+
"grad_norm": 3.0437378883361816,
|
92 |
+
"learning_rate": 5.25e-06,
|
93 |
+
"loss": 1.292,
|
94 |
+
"step": 110
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"epoch": 2.926829268292683,
|
98 |
+
"grad_norm": 5.521629810333252,
|
99 |
+
"learning_rate": 5.750000000000001e-06,
|
100 |
+
"loss": 1.2725,
|
101 |
+
"step": 120
|
102 |
+
}
|
103 |
+
],
|
104 |
+
"logging_steps": 10,
|
105 |
+
"max_steps": 123,
|
106 |
+
"num_input_tokens_seen": 0,
|
107 |
+
"num_train_epochs": 3,
|
108 |
+
"save_steps": 100,
|
109 |
+
"stateful_callbacks": {
|
110 |
+
"TrainerControl": {
|
111 |
+
"args": {
|
112 |
+
"should_epoch_stop": false,
|
113 |
+
"should_evaluate": false,
|
114 |
+
"should_log": false,
|
115 |
+
"should_save": true,
|
116 |
+
"should_training_stop": true
|
117 |
+
},
|
118 |
+
"attributes": {}
|
119 |
+
}
|
120 |
+
},
|
121 |
+
"total_flos": 4080858955776000.0,
|
122 |
+
"train_batch_size": 32,
|
123 |
+
"trial_name": null,
|
124 |
+
"trial_params": null
|
125 |
+
}
|
checkpoint-123/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33253810238ac5f6e19d65eaf18a7b5652de403635f7f896af35a9b7b932f10a
|
3 |
+
size 5240
|
config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.46.3",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50258
|
39 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 50256,
|
4 |
+
"eos_token_id": 50256,
|
5 |
+
"transformers_version": "4.46.3"
|
6 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1256181a39adbe068aa87574a1d09ac9415c0ba76c9cb697990d4de36c71f148
|
3 |
+
size 497777280
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2cefc1ca7e5a412a741f84b3a88bcbd1d4b84204bb464de843c221eb346ad022
|
3 |
+
size 497818650
|
special_tokens_map.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
{
|
4 |
+
"content": "<|endoftext|>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"content": "<|startoftext|>",
|
12 |
+
"lstrip": false,
|
13 |
+
"normalized": false,
|
14 |
+
"rstrip": false,
|
15 |
+
"single_word": false
|
16 |
+
}
|
17 |
+
],
|
18 |
+
"bos_token": "<|endoftext|>",
|
19 |
+
"eos_token": "<|endoftext|>",
|
20 |
+
"pad_token": "<|endoftext|>",
|
21 |
+
"unk_token": "<|endoftext|>"
|
22 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"50256": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"50257": {
|
13 |
+
"content": "<|startoftext|>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": false,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
}
|
20 |
+
},
|
21 |
+
"additional_special_tokens": [
|
22 |
+
"<|endoftext|>",
|
23 |
+
"<|startoftext|>"
|
24 |
+
],
|
25 |
+
"bos_token": "<|endoftext|>",
|
26 |
+
"clean_up_tokenization_spaces": false,
|
27 |
+
"eos_token": "<|endoftext|>",
|
28 |
+
"model_max_length": 1024,
|
29 |
+
"pad_token": "<|endoftext|>",
|
30 |
+
"tokenizer_class": "GPT2Tokenizer",
|
31 |
+
"unk_token": "<|endoftext|>"
|
32 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33253810238ac5f6e19d65eaf18a7b5652de403635f7f896af35a9b7b932f10a
|
3 |
+
size 5240
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|