TinyPixel commited on
Commit
b258255
1 Parent(s): 1712d6a

Upload folder using huggingface_hub

Browse files
adapter_config.json CHANGED
@@ -19,10 +19,10 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "dense_4h_to_h",
23
- "dense_h_to_4h",
24
  "query_key_value",
25
- "dense"
 
 
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
 
22
  "query_key_value",
23
+ "dense",
24
+ "dense_h_to_4h",
25
+ "dense_4h_to_h"
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:881f2340c02d4d32c10f3e9d78bb208dcbce12b61a20156f43514be608ff8250
3
  size 134235712
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d3c94430da216e49b2f1edfd4d47a96775b1caf13c23787dde2966ddb28ff6
3
  size 134235712
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b97ad97485f7553240a4bd1e25597aeed805c4600a59cc77473faa5f96a97efa
3
  size 268514874
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db0cfcb41260dc3ac3f0d2d7b951d685f8708331d34c01bad2d150ab37755fec
3
  size 268514874
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5c75855cec5126735fc2188235300e2c81959c32df6ee02c5ff30127dcdc657
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8400da4ebd11656aee565d96885e25350dc287eec4ad76194986bfeef69bcc7
3
  size 14244
special_tokens_map.json CHANGED
@@ -30,7 +30,7 @@
30
  "single_word": false
31
  },
32
  "pad_token": {
33
- "content": "<|endoftext|>",
34
  "lstrip": false,
35
  "normalized": false,
36
  "rstrip": false,
 
30
  "single_word": false
31
  },
32
  "pad_token": {
33
+ "content": "[PAD]",
34
  "lstrip": false,
35
  "normalized": false,
36
  "rstrip": false,
tokenizer.json CHANGED
@@ -250,6 +250,15 @@
250
  "rstrip": false,
251
  "normalized": false,
252
  "special": true
 
 
 
 
 
 
 
 
 
253
  }
254
  ],
255
  "normalizer": {
 
250
  "rstrip": false,
251
  "normalized": false,
252
  "special": true
253
+ },
254
+ {
255
+ "id": 50279,
256
+ "content": "[PAD]",
257
+ "single_word": false,
258
+ "lstrip": false,
259
+ "rstrip": false,
260
+ "normalized": false,
261
+ "special": true
262
  }
263
  ],
264
  "normalizer": {
tokenizer_config.json CHANGED
@@ -216,6 +216,14 @@
216
  "rstrip": false,
217
  "single_word": false,
218
  "special": true
 
 
 
 
 
 
 
 
219
  }
220
  },
221
  "additional_special_tokens": [
@@ -226,7 +234,7 @@
226
  "clean_up_tokenization_spaces": true,
227
  "eos_token": "<|endoftext|>",
228
  "model_max_length": 1000000000000000019884624838656,
229
- "pad_token": "<|endoftext|>",
230
  "tokenizer_class": "GPTNeoXTokenizer",
231
  "unk_token": "<|endoftext|>"
232
  }
 
216
  "rstrip": false,
217
  "single_word": false,
218
  "special": true
219
+ },
220
+ "50279": {
221
+ "content": "[PAD]",
222
+ "lstrip": false,
223
+ "normalized": false,
224
+ "rstrip": false,
225
+ "single_word": false,
226
+ "special": true
227
  }
228
  },
229
  "additional_special_tokens": [
 
234
  "clean_up_tokenization_spaces": true,
235
  "eos_token": "<|endoftext|>",
236
  "model_max_length": 1000000000000000019884624838656,
237
+ "pad_token": "[PAD]",
238
  "tokenizer_class": "GPTNeoXTokenizer",
239
  "unk_token": "<|endoftext|>"
240
  }
trainer_state.json CHANGED
@@ -11,145 +11,145 @@
11
  {
12
  "epoch": 0.04,
13
  "learning_rate": 0.0002,
14
- "loss": 2.3604,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.08,
19
  "learning_rate": 0.0002,
20
- "loss": 2.4466,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.12,
25
  "learning_rate": 0.0002,
26
- "loss": 2.8212,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.16,
31
  "learning_rate": 0.0002,
32
- "loss": 2.638,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.21,
37
  "learning_rate": 0.0002,
38
- "loss": 2.8046,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.25,
43
  "learning_rate": 0.0002,
44
- "loss": 3.2636,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.29,
49
  "learning_rate": 0.0002,
50
- "loss": 2.2681,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.33,
55
  "learning_rate": 0.0002,
56
- "loss": 2.3826,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.37,
61
  "learning_rate": 0.0002,
62
- "loss": 2.6718,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.41,
67
  "learning_rate": 0.0002,
68
- "loss": 2.6197,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.45,
73
  "learning_rate": 0.0002,
74
- "loss": 2.573,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.49,
79
  "learning_rate": 0.0002,
80
- "loss": 3.1306,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.53,
85
  "learning_rate": 0.0002,
86
- "loss": 2.2594,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.57,
91
  "learning_rate": 0.0002,
92
- "loss": 2.4572,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.62,
97
  "learning_rate": 0.0002,
98
- "loss": 2.4869,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.66,
103
  "learning_rate": 0.0002,
104
- "loss": 2.6306,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.7,
109
  "learning_rate": 0.0002,
110
- "loss": 2.6139,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.74,
115
  "learning_rate": 0.0002,
116
- "loss": 2.8307,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.78,
121
  "learning_rate": 0.0002,
122
- "loss": 2.204,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.82,
127
  "learning_rate": 0.0002,
128
- "loss": 2.2423,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.86,
133
  "learning_rate": 0.0002,
134
- "loss": 2.3021,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.9,
139
  "learning_rate": 0.0002,
140
- "loss": 2.3643,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.94,
145
  "learning_rate": 0.0002,
146
- "loss": 2.6392,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.98,
151
  "learning_rate": 0.0002,
152
- "loss": 2.9109,
153
  "step": 48
154
  }
155
  ],
@@ -158,7 +158,7 @@
158
  "num_input_tokens_seen": 0,
159
  "num_train_epochs": 1,
160
  "save_steps": 500,
161
- "total_flos": 2058945513185280.0,
162
  "train_batch_size": 1,
163
  "trial_name": null,
164
  "trial_params": null
 
11
  {
12
  "epoch": 0.04,
13
  "learning_rate": 0.0002,
14
+ "loss": 2.3426,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.08,
19
  "learning_rate": 0.0002,
20
+ "loss": 2.3786,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.12,
25
  "learning_rate": 0.0002,
26
+ "loss": 2.6959,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.16,
31
  "learning_rate": 0.0002,
32
+ "loss": 2.792,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.21,
37
  "learning_rate": 0.0002,
38
+ "loss": 2.8302,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.25,
43
  "learning_rate": 0.0002,
44
+ "loss": 3.3717,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.29,
49
  "learning_rate": 0.0002,
50
+ "loss": 2.2267,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.33,
55
  "learning_rate": 0.0002,
56
+ "loss": 2.5093,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.37,
61
  "learning_rate": 0.0002,
62
+ "loss": 2.7368,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.41,
67
  "learning_rate": 0.0002,
68
+ "loss": 2.5378,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.45,
73
  "learning_rate": 0.0002,
74
+ "loss": 2.6,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.49,
79
  "learning_rate": 0.0002,
80
+ "loss": 3.0639,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.53,
85
  "learning_rate": 0.0002,
86
+ "loss": 2.1673,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.57,
91
  "learning_rate": 0.0002,
92
+ "loss": 2.4645,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.62,
97
  "learning_rate": 0.0002,
98
+ "loss": 2.4987,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.66,
103
  "learning_rate": 0.0002,
104
+ "loss": 2.3191,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.7,
109
  "learning_rate": 0.0002,
110
+ "loss": 2.7003,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.74,
115
  "learning_rate": 0.0002,
116
+ "loss": 2.8433,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.78,
121
  "learning_rate": 0.0002,
122
+ "loss": 2.2216,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.82,
127
  "learning_rate": 0.0002,
128
+ "loss": 2.346,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.86,
133
  "learning_rate": 0.0002,
134
+ "loss": 2.3354,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.9,
139
  "learning_rate": 0.0002,
140
+ "loss": 2.4828,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.94,
145
  "learning_rate": 0.0002,
146
+ "loss": 2.6882,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.98,
151
  "learning_rate": 0.0002,
152
+ "loss": 2.7263,
153
  "step": 48
154
  }
155
  ],
 
158
  "num_input_tokens_seen": 0,
159
  "num_train_epochs": 1,
160
  "save_steps": 500,
161
+ "total_flos": 2061789415243776.0,
162
  "train_batch_size": 1,
163
  "trial_name": null,
164
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3eeebac40f73e5cebd2ccfc5dbebb5cb8c8e9abdf4f8fae6ceab6d2b7c8eb9f
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d2faca9fc2994cb010b4af1851a2b88208d18c68e81e002db967e23a8aae6e1
3
  size 4728