TinyPixel commited on
Commit
39f2e9c
1 Parent(s): 799e660

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  library_name: peft
3
- base_model: EleutherAI/pythia-1b
4
  ---
5
 
6
  # Model Card for Model ID
 
1
  ---
2
  library_name: peft
3
+ base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
4
  ---
5
 
6
  # Model Card for Model ID
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "EleutherAI/pythia-1b",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -19,10 +19,9 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "dense_4h_to_h",
23
- "dense_h_to_4h",
24
- "query_key_value",
25
- "dense"
26
  ],
27
  "task_type": "CAUSAL_LM"
28
  }
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "gate_proj",
23
+ "down_proj",
24
+ "up_proj"
 
25
  ],
26
  "task_type": "CAUSAL_LM"
27
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01f246a04293608017a7ce637adc72e865ce93286d13286382ff9af6a81f2841
3
- size 33572288
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cf4126a093cc2744b811791cb49b4bb554a88d7e17d7a455c4e4f43f620892b
3
+ size 32457544
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ccf51e786cdb56065c858b7f0f7a7efd17c6b4a2cd490f129f0790a702e4a60f
3
- size 67185658
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:077681757457c2caab161f15957793e472c6d16b2cbfd25132fe62544cd967a8
3
+ size 64958970
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:24ef0df5b83694239f6a83e58e420db881fdd0e0442c1d73b2233f6b05ee3044
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9055ac9db4835b33df3880467ea6560e442fcdfc1be9cf492c952ca01ce18a9
3
  size 14244
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4372b8bc129cb832d61f89de539110be4b375eeb5fe693144c4dd880623a9aaf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bf1e902c3114bb5312c2c178dc4d942be3a06cb5931cabbb4ddb7748d0f53ee
3
  size 1064
special_tokens_map.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "bos_token": {
3
- "content": "<|endoftext|>",
4
  "lstrip": false,
5
  "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "<|endoftext|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "<|endoftext|>",
17
  "unk_token": {
18
- "content": "<|endoftext|>",
19
  "lstrip": false,
20
  "normalized": false,
21
  "rstrip": false,
 
1
  {
2
  "bos_token": {
3
+ "content": "<s>",
4
  "lstrip": false,
5
  "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "</s>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "</s>",
17
  "unk_token": {
18
+ "content": "<unk>",
19
  "lstrip": false,
20
  "normalized": false,
21
  "rstrip": false,
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "add_prefix_space": false,
 
3
  "added_tokens_decoder": {
4
  "0": {
5
- "content": "<|endoftext|>",
6
  "lstrip": false,
7
  "normalized": false,
8
  "rstrip": false,
@@ -10,203 +11,31 @@
10
  "special": true
11
  },
12
  "1": {
13
- "content": "<|padding|>",
14
  "lstrip": false,
15
  "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
- "50254": {
21
- "content": " ",
22
  "lstrip": false,
23
- "normalized": true,
24
- "rstrip": false,
25
- "single_word": false,
26
- "special": false
27
- },
28
- "50255": {
29
- "content": " ",
30
- "lstrip": false,
31
- "normalized": true,
32
- "rstrip": false,
33
- "single_word": false,
34
- "special": false
35
- },
36
- "50256": {
37
- "content": " ",
38
- "lstrip": false,
39
- "normalized": true,
40
- "rstrip": false,
41
- "single_word": false,
42
- "special": false
43
- },
44
- "50257": {
45
- "content": " ",
46
- "lstrip": false,
47
- "normalized": true,
48
- "rstrip": false,
49
- "single_word": false,
50
- "special": false
51
- },
52
- "50258": {
53
- "content": " ",
54
- "lstrip": false,
55
- "normalized": true,
56
- "rstrip": false,
57
- "single_word": false,
58
- "special": false
59
- },
60
- "50259": {
61
- "content": " ",
62
- "lstrip": false,
63
- "normalized": true,
64
- "rstrip": false,
65
- "single_word": false,
66
- "special": false
67
- },
68
- "50260": {
69
- "content": " ",
70
- "lstrip": false,
71
- "normalized": true,
72
- "rstrip": false,
73
- "single_word": false,
74
- "special": false
75
- },
76
- "50261": {
77
- "content": " ",
78
- "lstrip": false,
79
- "normalized": true,
80
- "rstrip": false,
81
- "single_word": false,
82
- "special": false
83
- },
84
- "50262": {
85
- "content": " ",
86
- "lstrip": false,
87
- "normalized": true,
88
- "rstrip": false,
89
- "single_word": false,
90
- "special": false
91
- },
92
- "50263": {
93
- "content": " ",
94
- "lstrip": false,
95
- "normalized": true,
96
- "rstrip": false,
97
- "single_word": false,
98
- "special": false
99
- },
100
- "50264": {
101
- "content": " ",
102
- "lstrip": false,
103
- "normalized": true,
104
- "rstrip": false,
105
- "single_word": false,
106
- "special": false
107
- },
108
- "50265": {
109
- "content": " ",
110
- "lstrip": false,
111
- "normalized": true,
112
- "rstrip": false,
113
- "single_word": false,
114
- "special": false
115
- },
116
- "50266": {
117
- "content": " ",
118
- "lstrip": false,
119
- "normalized": true,
120
- "rstrip": false,
121
- "single_word": false,
122
- "special": false
123
- },
124
- "50267": {
125
- "content": " ",
126
- "lstrip": false,
127
- "normalized": true,
128
- "rstrip": false,
129
- "single_word": false,
130
- "special": false
131
- },
132
- "50268": {
133
- "content": " ",
134
- "lstrip": false,
135
- "normalized": true,
136
- "rstrip": false,
137
- "single_word": false,
138
- "special": false
139
- },
140
- "50269": {
141
- "content": " ",
142
- "lstrip": false,
143
- "normalized": true,
144
- "rstrip": false,
145
- "single_word": false,
146
- "special": false
147
- },
148
- "50270": {
149
- "content": " ",
150
- "lstrip": false,
151
- "normalized": true,
152
- "rstrip": false,
153
- "single_word": false,
154
- "special": false
155
- },
156
- "50271": {
157
- "content": " ",
158
- "lstrip": false,
159
- "normalized": true,
160
- "rstrip": false,
161
- "single_word": false,
162
- "special": false
163
- },
164
- "50272": {
165
- "content": " ",
166
- "lstrip": false,
167
- "normalized": true,
168
- "rstrip": false,
169
- "single_word": false,
170
- "special": false
171
- },
172
- "50273": {
173
- "content": " ",
174
- "lstrip": false,
175
- "normalized": true,
176
- "rstrip": false,
177
- "single_word": false,
178
- "special": false
179
- },
180
- "50274": {
181
- "content": " ",
182
- "lstrip": false,
183
- "normalized": true,
184
- "rstrip": false,
185
- "single_word": false,
186
- "special": false
187
- },
188
- "50275": {
189
- "content": " ",
190
- "lstrip": false,
191
- "normalized": true,
192
- "rstrip": false,
193
- "single_word": false,
194
- "special": false
195
- },
196
- "50276": {
197
- "content": " ",
198
- "lstrip": false,
199
- "normalized": true,
200
  "rstrip": false,
201
  "single_word": false,
202
- "special": false
203
  }
204
  },
205
- "bos_token": "<|endoftext|>",
206
- "clean_up_tokenization_spaces": true,
207
- "eos_token": "<|endoftext|>",
 
208
  "model_max_length": 1000000000000000019884624838656,
209
- "pad_token": "<|endoftext|>",
210
- "tokenizer_class": "GPTNeoXTokenizer",
211
- "unk_token": "<|endoftext|>"
 
 
 
212
  }
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
+ "content": "<unk>",
7
  "lstrip": false,
8
  "normalized": false,
9
  "rstrip": false,
 
11
  "special": true
12
  },
13
  "1": {
14
+ "content": "<s>",
15
  "lstrip": false,
16
  "normalized": false,
17
  "rstrip": false,
18
  "single_word": false,
19
  "special": true
20
  },
21
+ "2": {
22
+ "content": "</s>",
23
  "lstrip": false,
24
+ "normalized": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  "rstrip": false,
26
  "single_word": false,
27
+ "special": true
28
  }
29
  },
30
+ "bos_token": "<s>",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "</s>",
33
+ "legacy": false,
34
  "model_max_length": 1000000000000000019884624838656,
35
+ "pad_token": "</s>",
36
+ "padding_side": "right",
37
+ "sp_model_kwargs": {},
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": false
41
  }
trainer_state.json CHANGED
@@ -1,164 +1,218 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9846153846153847,
5
  "eval_steps": 500,
6
- "global_step": 48,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.04,
13
  "learning_rate": 2e-05,
14
- "loss": 2.3303,
15
  "step": 2
16
  },
17
  {
18
- "epoch": 0.08,
19
  "learning_rate": 2e-05,
20
- "loss": 2.5495,
21
  "step": 4
22
  },
23
  {
24
- "epoch": 0.12,
25
  "learning_rate": 2e-05,
26
- "loss": 2.5854,
27
  "step": 6
28
  },
29
  {
30
- "epoch": 0.16,
31
  "learning_rate": 2e-05,
32
- "loss": 2.612,
33
  "step": 8
34
  },
35
  {
36
- "epoch": 0.21,
37
  "learning_rate": 2e-05,
38
- "loss": 2.9175,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.25,
43
  "learning_rate": 2e-05,
44
- "loss": 3.2592,
45
  "step": 12
46
  },
47
  {
48
- "epoch": 0.29,
49
  "learning_rate": 2e-05,
50
- "loss": 2.2769,
51
  "step": 14
52
  },
53
  {
54
- "epoch": 0.33,
55
  "learning_rate": 2e-05,
56
- "loss": 2.2051,
57
  "step": 16
58
  },
59
  {
60
- "epoch": 0.37,
61
  "learning_rate": 2e-05,
62
- "loss": 2.5131,
63
  "step": 18
64
  },
65
  {
66
- "epoch": 0.41,
67
  "learning_rate": 2e-05,
68
- "loss": 2.4979,
69
  "step": 20
70
  },
71
  {
72
- "epoch": 0.45,
73
  "learning_rate": 2e-05,
74
- "loss": 2.7152,
75
  "step": 22
76
  },
77
  {
78
- "epoch": 0.49,
79
  "learning_rate": 2e-05,
80
- "loss": 2.9744,
81
  "step": 24
82
  },
83
  {
84
- "epoch": 0.53,
85
  "learning_rate": 2e-05,
86
- "loss": 2.166,
87
  "step": 26
88
  },
89
  {
90
- "epoch": 0.57,
91
  "learning_rate": 2e-05,
92
- "loss": 2.4005,
93
  "step": 28
94
  },
95
  {
96
- "epoch": 0.62,
97
  "learning_rate": 2e-05,
98
- "loss": 2.651,
99
  "step": 30
100
  },
101
  {
102
- "epoch": 0.66,
103
  "learning_rate": 2e-05,
104
- "loss": 2.3863,
105
  "step": 32
106
  },
107
  {
108
- "epoch": 0.7,
109
  "learning_rate": 2e-05,
110
- "loss": 2.4171,
111
  "step": 34
112
  },
113
  {
114
- "epoch": 0.74,
115
  "learning_rate": 2e-05,
116
- "loss": 2.7263,
117
  "step": 36
118
  },
119
  {
120
- "epoch": 0.78,
121
  "learning_rate": 2e-05,
122
- "loss": 2.2354,
123
  "step": 38
124
  },
125
  {
126
- "epoch": 0.82,
127
  "learning_rate": 2e-05,
128
- "loss": 2.4808,
129
  "step": 40
130
  },
131
  {
132
- "epoch": 0.86,
133
  "learning_rate": 2e-05,
134
- "loss": 2.5683,
135
  "step": 42
136
  },
137
  {
138
- "epoch": 0.9,
139
  "learning_rate": 2e-05,
140
- "loss": 2.5998,
141
  "step": 44
142
  },
143
  {
144
- "epoch": 0.94,
145
  "learning_rate": 2e-05,
146
- "loss": 2.3712,
147
  "step": 46
148
  },
149
  {
150
- "epoch": 0.98,
151
  "learning_rate": 2e-05,
152
- "loss": 2.6373,
153
  "step": 48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  }
155
  ],
156
  "logging_steps": 2,
157
- "max_steps": 48,
158
  "num_input_tokens_seen": 0,
159
  "num_train_epochs": 1,
160
  "save_steps": 500,
161
- "total_flos": 2029337749118976.0,
162
  "train_batch_size": 1,
163
  "trial_name": null,
164
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9990680335507922,
5
  "eval_steps": 500,
6
+ "global_step": 67,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.03,
13
  "learning_rate": 2e-05,
14
+ "loss": 2.1412,
15
  "step": 2
16
  },
17
  {
18
+ "epoch": 0.06,
19
  "learning_rate": 2e-05,
20
+ "loss": 1.606,
21
  "step": 4
22
  },
23
  {
24
+ "epoch": 0.09,
25
  "learning_rate": 2e-05,
26
+ "loss": 1.3632,
27
  "step": 6
28
  },
29
  {
30
+ "epoch": 0.12,
31
  "learning_rate": 2e-05,
32
+ "loss": 1.3368,
33
  "step": 8
34
  },
35
  {
36
+ "epoch": 0.15,
37
  "learning_rate": 2e-05,
38
+ "loss": 1.4371,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.18,
43
  "learning_rate": 2e-05,
44
+ "loss": 1.4571,
45
  "step": 12
46
  },
47
  {
48
+ "epoch": 0.21,
49
  "learning_rate": 2e-05,
50
+ "loss": 1.9018,
51
  "step": 14
52
  },
53
  {
54
+ "epoch": 0.24,
55
  "learning_rate": 2e-05,
56
+ "loss": 2.2,
57
  "step": 16
58
  },
59
  {
60
+ "epoch": 0.27,
61
  "learning_rate": 2e-05,
62
+ "loss": 2.1347,
63
  "step": 18
64
  },
65
  {
66
+ "epoch": 0.3,
67
  "learning_rate": 2e-05,
68
+ "loss": 1.4048,
69
  "step": 20
70
  },
71
  {
72
+ "epoch": 0.33,
73
  "learning_rate": 2e-05,
74
+ "loss": 1.4807,
75
  "step": 22
76
  },
77
  {
78
+ "epoch": 0.36,
79
  "learning_rate": 2e-05,
80
+ "loss": 1.4868,
81
  "step": 24
82
  },
83
  {
84
+ "epoch": 0.39,
85
  "learning_rate": 2e-05,
86
+ "loss": 1.2677,
87
  "step": 26
88
  },
89
  {
90
+ "epoch": 0.42,
91
  "learning_rate": 2e-05,
92
+ "loss": 1.6585,
93
  "step": 28
94
  },
95
  {
96
+ "epoch": 0.45,
97
  "learning_rate": 2e-05,
98
+ "loss": 1.8421,
99
  "step": 30
100
  },
101
  {
102
+ "epoch": 0.48,
103
  "learning_rate": 2e-05,
104
+ "loss": 1.9776,
105
  "step": 32
106
  },
107
  {
108
+ "epoch": 0.51,
109
  "learning_rate": 2e-05,
110
+ "loss": 1.7642,
111
  "step": 34
112
  },
113
  {
114
+ "epoch": 0.54,
115
  "learning_rate": 2e-05,
116
+ "loss": 1.72,
117
  "step": 36
118
  },
119
  {
120
+ "epoch": 0.57,
121
  "learning_rate": 2e-05,
122
+ "loss": 1.3232,
123
  "step": 38
124
  },
125
  {
126
+ "epoch": 0.6,
127
  "learning_rate": 2e-05,
128
+ "loss": 1.4866,
129
  "step": 40
130
  },
131
  {
132
+ "epoch": 0.63,
133
  "learning_rate": 2e-05,
134
+ "loss": 1.2556,
135
  "step": 42
136
  },
137
  {
138
+ "epoch": 0.66,
139
  "learning_rate": 2e-05,
140
+ "loss": 1.3743,
141
  "step": 44
142
  },
143
  {
144
+ "epoch": 0.69,
145
  "learning_rate": 2e-05,
146
+ "loss": 1.7033,
147
  "step": 46
148
  },
149
  {
150
+ "epoch": 0.72,
151
  "learning_rate": 2e-05,
152
+ "loss": 1.8748,
153
  "step": 48
154
+ },
155
+ {
156
+ "epoch": 0.75,
157
+ "learning_rate": 2e-05,
158
+ "loss": 2.1102,
159
+ "step": 50
160
+ },
161
+ {
162
+ "epoch": 0.78,
163
+ "learning_rate": 2e-05,
164
+ "loss": 1.4274,
165
+ "step": 52
166
+ },
167
+ {
168
+ "epoch": 0.81,
169
+ "learning_rate": 2e-05,
170
+ "loss": 1.317,
171
+ "step": 54
172
+ },
173
+ {
174
+ "epoch": 0.84,
175
+ "learning_rate": 2e-05,
176
+ "loss": 1.2868,
177
+ "step": 56
178
+ },
179
+ {
180
+ "epoch": 0.86,
181
+ "learning_rate": 2e-05,
182
+ "loss": 1.4609,
183
+ "step": 58
184
+ },
185
+ {
186
+ "epoch": 0.89,
187
+ "learning_rate": 2e-05,
188
+ "loss": 1.4965,
189
+ "step": 60
190
+ },
191
+ {
192
+ "epoch": 0.92,
193
+ "learning_rate": 2e-05,
194
+ "loss": 1.6175,
195
+ "step": 62
196
+ },
197
+ {
198
+ "epoch": 0.95,
199
+ "learning_rate": 2e-05,
200
+ "loss": 1.6328,
201
+ "step": 64
202
+ },
203
+ {
204
+ "epoch": 0.98,
205
+ "learning_rate": 2e-05,
206
+ "loss": 1.4841,
207
+ "step": 66
208
  }
209
  ],
210
  "logging_steps": 2,
211
+ "max_steps": 67,
212
  "num_input_tokens_seen": 0,
213
  "num_train_epochs": 1,
214
  "save_steps": 500,
215
+ "total_flos": 3677439948509184.0,
216
  "train_batch_size": 1,
217
  "trial_name": null,
218
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37cd9403e23e3499bfa37285945b68a8b825a9d9793d8f63ae57f0317364c5f2
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52fb4bad19dcae53734968d3ad050ff304f1677c89d912c30a8116ba2a464ba1
3
  size 4728