Globaly commited on
Commit
532cf7f
·
1 Parent(s): 5552377

Upload 33 files

Browse files
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-generation
5
+ widget:
6
+ - text: "I love AutoTrain because "
7
+ ---
8
+
9
+ # Model Trained Using AutoTrain
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "marianbasti/Llama-2-13b-fp16-alpaca-spanish",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da8a4f46c2c781fa1a9bcc56f85a753e09301b946def97ccac2375f65d785ee2
3
+ size 52450328
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<PAD>": 32000
3
+ }
checkpoint-2484/README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float16
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: False
22
+ - load_in_4bit: True
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: nf4
28
+ - bnb_4bit_use_double_quant: False
29
+ - bnb_4bit_compute_dtype: float16
30
+ ### Framework versions
31
+
32
+ - PEFT 0.5.0
33
+
34
+ - PEFT 0.5.0
checkpoint-2484/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "marianbasti/Llama-2-13b-fp16-alpaca-spanish",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-2484/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cea6bacf694d81926e90f6a8cd05289da266e6e0bd8576e394c72b80bed1d1e
3
+ size 52486922
checkpoint-2484/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da8a4f46c2c781fa1a9bcc56f85a753e09301b946def97ccac2375f65d785ee2
3
+ size 52450328
checkpoint-2484/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<PAD>": 32000
3
+ }
checkpoint-2484/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5934fc7e26f1e252766582d1e0afc50c9f433505c019a64c8df67c3648c2065
3
+ size 104992698
checkpoint-2484/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:049c26b844b79121ddd8379f7f69194e63f6fbf6aa007eeac0c66f17eebb8893
3
+ size 888
checkpoint-2484/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8814df3ba3b52b9222c4109317254c31df8dbe8bf57c5cc9f0a907032ad2b66b
3
+ size 15984
checkpoint-2484/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ec334893fff5e1856d6df56cdedf625b6162d19979868ead3562682319a2a1a
3
+ size 15984
checkpoint-2484/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20cbcadc3faabf35c69ded742a181d6008e991111cdd6a48d4d0dcec4bc6946a
3
+ size 15984
checkpoint-2484/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52bcb8c349feed4a1824a95215ad3b330aef467cc1e40ca8be2850a8986b2586
3
+ size 15984
checkpoint-2484/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e66ae24f272166ea81e1105ee8a940d5bc3a157dd52d28369eb3d7f28555f2d
3
+ size 15984
checkpoint-2484/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d65bd5e151081c4b85ec14777894163baa3df74abf2b5b633872b8412de675f6
3
+ size 15984
checkpoint-2484/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33b2c574e4ff5b66ceb95a757e9839be25f0144b9c4f84f51b863c76fdbcd8a4
3
+ size 15984
checkpoint-2484/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fb004b94e1517cbfc7e4b8c7958a2b339016bbf3694aaa725a1ec39a1201ea9
3
+ size 15984
checkpoint-2484/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25d3f7dacb5b11fae0cadb2fc862d1abc669ad2e8a86254d6c9a75d0e1f4200b
3
+ size 1064
checkpoint-2484/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-2484/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2484/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-2484/tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<PAD>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<s>",
39
+ "clean_up_tokenization_spaces": false,
40
+ "eos_token": "</s>",
41
+ "legacy": true,
42
+ "model_max_length": 1024,
43
+ "pad_token": "<PAD>",
44
+ "sp_model_kwargs": {},
45
+ "spaces_between_special_tokens": false,
46
+ "tokenizer_class": "LlamaTokenizer",
47
+ "unk_token": "<unk>",
48
+ "use_default_system_prompt": false
49
+ }
checkpoint-2484/trainer_state.json ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.9951749095295535,
5
+ "eval_steps": 500,
6
+ "global_step": 2484,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 1.606425702811245e-05,
14
+ "loss": 1.1216,
15
+ "step": 20
16
+ },
17
+ {
18
+ "epoch": 0.06,
19
+ "learning_rate": 3.132530120481928e-05,
20
+ "loss": 0.9435,
21
+ "step": 40
22
+ },
23
+ {
24
+ "epoch": 0.1,
25
+ "learning_rate": 4.738955823293173e-05,
26
+ "loss": 0.7337,
27
+ "step": 60
28
+ },
29
+ {
30
+ "epoch": 0.13,
31
+ "learning_rate": 6.345381526104418e-05,
32
+ "loss": 0.4553,
33
+ "step": 80
34
+ },
35
+ {
36
+ "epoch": 0.16,
37
+ "learning_rate": 7.951807228915663e-05,
38
+ "loss": 0.2854,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.19,
43
+ "learning_rate": 9.558232931726909e-05,
44
+ "loss": 0.2014,
45
+ "step": 120
46
+ },
47
+ {
48
+ "epoch": 0.23,
49
+ "learning_rate": 0.00011164658634538152,
50
+ "loss": 0.1777,
51
+ "step": 140
52
+ },
53
+ {
54
+ "epoch": 0.26,
55
+ "learning_rate": 0.00012771084337349396,
56
+ "loss": 0.164,
57
+ "step": 160
58
+ },
59
+ {
60
+ "epoch": 0.29,
61
+ "learning_rate": 0.00014377510040160642,
62
+ "loss": 0.1561,
63
+ "step": 180
64
+ },
65
+ {
66
+ "epoch": 0.32,
67
+ "learning_rate": 0.00015983935742971888,
68
+ "loss": 0.1513,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.35,
73
+ "learning_rate": 0.00017590361445783134,
74
+ "loss": 0.1481,
75
+ "step": 220
76
+ },
77
+ {
78
+ "epoch": 0.39,
79
+ "learning_rate": 0.00019196787148594377,
80
+ "loss": 0.1468,
81
+ "step": 240
82
+ },
83
+ {
84
+ "epoch": 0.42,
85
+ "learning_rate": 0.00019910514541387027,
86
+ "loss": 0.1437,
87
+ "step": 260
88
+ },
89
+ {
90
+ "epoch": 0.45,
91
+ "learning_rate": 0.00019731543624161075,
92
+ "loss": 0.1408,
93
+ "step": 280
94
+ },
95
+ {
96
+ "epoch": 0.48,
97
+ "learning_rate": 0.00019552572706935123,
98
+ "loss": 0.1414,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 0.51,
103
+ "learning_rate": 0.00019373601789709173,
104
+ "loss": 0.137,
105
+ "step": 320
106
+ },
107
+ {
108
+ "epoch": 0.55,
109
+ "learning_rate": 0.0001919463087248322,
110
+ "loss": 0.1371,
111
+ "step": 340
112
+ },
113
+ {
114
+ "epoch": 0.58,
115
+ "learning_rate": 0.0001901565995525727,
116
+ "loss": 0.1367,
117
+ "step": 360
118
+ },
119
+ {
120
+ "epoch": 0.61,
121
+ "learning_rate": 0.0001883668903803132,
122
+ "loss": 0.1358,
123
+ "step": 380
124
+ },
125
+ {
126
+ "epoch": 0.64,
127
+ "learning_rate": 0.0001865771812080537,
128
+ "loss": 0.1362,
129
+ "step": 400
130
+ },
131
+ {
132
+ "epoch": 0.68,
133
+ "learning_rate": 0.0001847874720357942,
134
+ "loss": 0.1339,
135
+ "step": 420
136
+ },
137
+ {
138
+ "epoch": 0.71,
139
+ "learning_rate": 0.0001829977628635347,
140
+ "loss": 0.1345,
141
+ "step": 440
142
+ },
143
+ {
144
+ "epoch": 0.74,
145
+ "learning_rate": 0.00018120805369127517,
146
+ "loss": 0.1309,
147
+ "step": 460
148
+ },
149
+ {
150
+ "epoch": 0.77,
151
+ "learning_rate": 0.00017941834451901567,
152
+ "loss": 0.1315,
153
+ "step": 480
154
+ },
155
+ {
156
+ "epoch": 0.8,
157
+ "learning_rate": 0.00017762863534675615,
158
+ "loss": 0.1353,
159
+ "step": 500
160
+ },
161
+ {
162
+ "epoch": 0.84,
163
+ "learning_rate": 0.00017583892617449665,
164
+ "loss": 0.1339,
165
+ "step": 520
166
+ },
167
+ {
168
+ "epoch": 0.87,
169
+ "learning_rate": 0.00017404921700223716,
170
+ "loss": 0.1319,
171
+ "step": 540
172
+ },
173
+ {
174
+ "epoch": 0.9,
175
+ "learning_rate": 0.00017225950782997763,
176
+ "loss": 0.1328,
177
+ "step": 560
178
+ },
179
+ {
180
+ "epoch": 0.93,
181
+ "learning_rate": 0.00017046979865771814,
182
+ "loss": 0.1332,
183
+ "step": 580
184
+ },
185
+ {
186
+ "epoch": 0.97,
187
+ "learning_rate": 0.0001686800894854586,
188
+ "loss": 0.131,
189
+ "step": 600
190
+ },
191
+ {
192
+ "epoch": 1.0,
193
+ "learning_rate": 0.00016689038031319912,
194
+ "loss": 0.1278,
195
+ "step": 620
196
+ },
197
+ {
198
+ "epoch": 1.03,
199
+ "learning_rate": 0.00016510067114093962,
200
+ "loss": 0.1331,
201
+ "step": 640
202
+ },
203
+ {
204
+ "epoch": 1.06,
205
+ "learning_rate": 0.0001633109619686801,
206
+ "loss": 0.1286,
207
+ "step": 660
208
+ },
209
+ {
210
+ "epoch": 1.09,
211
+ "learning_rate": 0.0001615212527964206,
212
+ "loss": 0.1276,
213
+ "step": 680
214
+ },
215
+ {
216
+ "epoch": 1.13,
217
+ "learning_rate": 0.00015973154362416107,
218
+ "loss": 0.1287,
219
+ "step": 700
220
+ },
221
+ {
222
+ "epoch": 1.16,
223
+ "learning_rate": 0.00015794183445190158,
224
+ "loss": 0.1312,
225
+ "step": 720
226
+ },
227
+ {
228
+ "epoch": 1.19,
229
+ "learning_rate": 0.00015615212527964208,
230
+ "loss": 0.1298,
231
+ "step": 740
232
+ },
233
+ {
234
+ "epoch": 1.22,
235
+ "learning_rate": 0.00015436241610738256,
236
+ "loss": 0.1288,
237
+ "step": 760
238
+ },
239
+ {
240
+ "epoch": 1.25,
241
+ "learning_rate": 0.00015257270693512303,
242
+ "loss": 0.1297,
243
+ "step": 780
244
+ },
245
+ {
246
+ "epoch": 1.29,
247
+ "learning_rate": 0.00015078299776286354,
248
+ "loss": 0.129,
249
+ "step": 800
250
+ },
251
+ {
252
+ "epoch": 1.32,
253
+ "learning_rate": 0.00014899328859060404,
254
+ "loss": 0.1286,
255
+ "step": 820
256
+ },
257
+ {
258
+ "epoch": 1.35,
259
+ "learning_rate": 0.00014720357941834454,
260
+ "loss": 0.1289,
261
+ "step": 840
262
+ },
263
+ {
264
+ "epoch": 1.38,
265
+ "learning_rate": 0.00014541387024608502,
266
+ "loss": 0.1285,
267
+ "step": 860
268
+ },
269
+ {
270
+ "epoch": 1.42,
271
+ "learning_rate": 0.0001436241610738255,
272
+ "loss": 0.1283,
273
+ "step": 880
274
+ },
275
+ {
276
+ "epoch": 1.45,
277
+ "learning_rate": 0.00014183445190156602,
278
+ "loss": 0.1247,
279
+ "step": 900
280
+ },
281
+ {
282
+ "epoch": 1.48,
283
+ "learning_rate": 0.0001400447427293065,
284
+ "loss": 0.1279,
285
+ "step": 920
286
+ },
287
+ {
288
+ "epoch": 1.51,
289
+ "learning_rate": 0.00013825503355704698,
290
+ "loss": 0.1249,
291
+ "step": 940
292
+ },
293
+ {
294
+ "epoch": 1.54,
295
+ "learning_rate": 0.00013646532438478748,
296
+ "loss": 0.1244,
297
+ "step": 960
298
+ },
299
+ {
300
+ "epoch": 1.58,
301
+ "learning_rate": 0.00013467561521252796,
302
+ "loss": 0.1259,
303
+ "step": 980
304
+ },
305
+ {
306
+ "epoch": 1.61,
307
+ "learning_rate": 0.00013288590604026846,
308
+ "loss": 0.1257,
309
+ "step": 1000
310
+ },
311
+ {
312
+ "epoch": 1.64,
313
+ "learning_rate": 0.00013109619686800896,
314
+ "loss": 0.1249,
315
+ "step": 1020
316
+ },
317
+ {
318
+ "epoch": 1.67,
319
+ "learning_rate": 0.00012930648769574944,
320
+ "loss": 0.1244,
321
+ "step": 1040
322
+ },
323
+ {
324
+ "epoch": 1.7,
325
+ "learning_rate": 0.00012751677852348994,
326
+ "loss": 0.125,
327
+ "step": 1060
328
+ },
329
+ {
330
+ "epoch": 1.74,
331
+ "learning_rate": 0.00012572706935123044,
332
+ "loss": 0.1213,
333
+ "step": 1080
334
+ },
335
+ {
336
+ "epoch": 1.77,
337
+ "learning_rate": 0.00012393736017897092,
338
+ "loss": 0.1235,
339
+ "step": 1100
340
+ },
341
+ {
342
+ "epoch": 1.8,
343
+ "learning_rate": 0.00012214765100671142,
344
+ "loss": 0.1265,
345
+ "step": 1120
346
+ },
347
+ {
348
+ "epoch": 1.83,
349
+ "learning_rate": 0.0001203579418344519,
350
+ "loss": 0.1245,
351
+ "step": 1140
352
+ },
353
+ {
354
+ "epoch": 1.87,
355
+ "learning_rate": 0.00011856823266219239,
356
+ "loss": 0.1245,
357
+ "step": 1160
358
+ },
359
+ {
360
+ "epoch": 1.9,
361
+ "learning_rate": 0.0001167785234899329,
362
+ "loss": 0.1249,
363
+ "step": 1180
364
+ },
365
+ {
366
+ "epoch": 1.93,
367
+ "learning_rate": 0.00011498881431767338,
368
+ "loss": 0.1252,
369
+ "step": 1200
370
+ },
371
+ {
372
+ "epoch": 1.96,
373
+ "learning_rate": 0.00011319910514541387,
374
+ "loss": 0.1236,
375
+ "step": 1220
376
+ },
377
+ {
378
+ "epoch": 1.99,
379
+ "learning_rate": 0.00011140939597315436,
380
+ "loss": 0.1213,
381
+ "step": 1240
382
+ },
383
+ {
384
+ "epoch": 2.03,
385
+ "learning_rate": 0.00010961968680089485,
386
+ "loss": 0.1254,
387
+ "step": 1260
388
+ },
389
+ {
390
+ "epoch": 2.06,
391
+ "learning_rate": 0.00010782997762863535,
392
+ "loss": 0.1221,
393
+ "step": 1280
394
+ },
395
+ {
396
+ "epoch": 2.09,
397
+ "learning_rate": 0.00010604026845637584,
398
+ "loss": 0.1207,
399
+ "step": 1300
400
+ },
401
+ {
402
+ "epoch": 2.12,
403
+ "learning_rate": 0.00010425055928411633,
404
+ "loss": 0.1218,
405
+ "step": 1320
406
+ },
407
+ {
408
+ "epoch": 2.16,
409
+ "learning_rate": 0.00010246085011185682,
410
+ "loss": 0.1244,
411
+ "step": 1340
412
+ },
413
+ {
414
+ "epoch": 2.19,
415
+ "learning_rate": 0.00010067114093959733,
416
+ "loss": 0.1218,
417
+ "step": 1360
418
+ },
419
+ {
420
+ "epoch": 2.22,
421
+ "learning_rate": 9.888143176733782e-05,
422
+ "loss": 0.1245,
423
+ "step": 1380
424
+ },
425
+ {
426
+ "epoch": 2.25,
427
+ "learning_rate": 9.70917225950783e-05,
428
+ "loss": 0.1229,
429
+ "step": 1400
430
+ },
431
+ {
432
+ "epoch": 2.28,
433
+ "learning_rate": 9.53020134228188e-05,
434
+ "loss": 0.123,
435
+ "step": 1420
436
+ },
437
+ {
438
+ "epoch": 2.32,
439
+ "learning_rate": 9.351230425055928e-05,
440
+ "loss": 0.1214,
441
+ "step": 1440
442
+ },
443
+ {
444
+ "epoch": 2.35,
445
+ "learning_rate": 9.172259507829977e-05,
446
+ "loss": 0.1231,
447
+ "step": 1460
448
+ },
449
+ {
450
+ "epoch": 2.38,
451
+ "learning_rate": 8.993288590604028e-05,
452
+ "loss": 0.123,
453
+ "step": 1480
454
+ },
455
+ {
456
+ "epoch": 2.41,
457
+ "learning_rate": 8.814317673378077e-05,
458
+ "loss": 0.123,
459
+ "step": 1500
460
+ },
461
+ {
462
+ "epoch": 2.44,
463
+ "learning_rate": 8.635346756152126e-05,
464
+ "loss": 0.1179,
465
+ "step": 1520
466
+ },
467
+ {
468
+ "epoch": 2.48,
469
+ "learning_rate": 8.456375838926175e-05,
470
+ "loss": 0.1233,
471
+ "step": 1540
472
+ },
473
+ {
474
+ "epoch": 2.51,
475
+ "learning_rate": 8.277404921700224e-05,
476
+ "loss": 0.1195,
477
+ "step": 1560
478
+ },
479
+ {
480
+ "epoch": 2.54,
481
+ "learning_rate": 8.098434004474274e-05,
482
+ "loss": 0.1195,
483
+ "step": 1580
484
+ },
485
+ {
486
+ "epoch": 2.57,
487
+ "learning_rate": 7.919463087248322e-05,
488
+ "loss": 0.1204,
489
+ "step": 1600
490
+ },
491
+ {
492
+ "epoch": 2.61,
493
+ "learning_rate": 7.740492170022372e-05,
494
+ "loss": 0.1197,
495
+ "step": 1620
496
+ },
497
+ {
498
+ "epoch": 2.64,
499
+ "learning_rate": 7.561521252796421e-05,
500
+ "loss": 0.1195,
501
+ "step": 1640
502
+ },
503
+ {
504
+ "epoch": 2.67,
505
+ "learning_rate": 7.382550335570471e-05,
506
+ "loss": 0.1205,
507
+ "step": 1660
508
+ },
509
+ {
510
+ "epoch": 2.7,
511
+ "learning_rate": 7.203579418344519e-05,
512
+ "loss": 0.1205,
513
+ "step": 1680
514
+ },
515
+ {
516
+ "epoch": 2.73,
517
+ "learning_rate": 7.024608501118568e-05,
518
+ "loss": 0.116,
519
+ "step": 1700
520
+ },
521
+ {
522
+ "epoch": 2.77,
523
+ "learning_rate": 6.845637583892618e-05,
524
+ "loss": 0.1184,
525
+ "step": 1720
526
+ },
527
+ {
528
+ "epoch": 2.8,
529
+ "learning_rate": 6.666666666666667e-05,
530
+ "loss": 0.1225,
531
+ "step": 1740
532
+ },
533
+ {
534
+ "epoch": 2.83,
535
+ "learning_rate": 6.487695749440716e-05,
536
+ "loss": 0.1195,
537
+ "step": 1760
538
+ },
539
+ {
540
+ "epoch": 2.86,
541
+ "learning_rate": 6.308724832214765e-05,
542
+ "loss": 0.1189,
543
+ "step": 1780
544
+ },
545
+ {
546
+ "epoch": 2.9,
547
+ "learning_rate": 6.129753914988815e-05,
548
+ "loss": 0.1206,
549
+ "step": 1800
550
+ },
551
+ {
552
+ "epoch": 2.93,
553
+ "learning_rate": 5.9507829977628635e-05,
554
+ "loss": 0.1199,
555
+ "step": 1820
556
+ },
557
+ {
558
+ "epoch": 2.96,
559
+ "learning_rate": 5.771812080536914e-05,
560
+ "loss": 0.1189,
561
+ "step": 1840
562
+ },
563
+ {
564
+ "epoch": 2.99,
565
+ "learning_rate": 5.592841163310962e-05,
566
+ "loss": 0.1174,
567
+ "step": 1860
568
+ },
569
+ {
570
+ "epoch": 3.02,
571
+ "learning_rate": 5.413870246085011e-05,
572
+ "loss": 0.1197,
573
+ "step": 1880
574
+ },
575
+ {
576
+ "epoch": 3.06,
577
+ "learning_rate": 5.234899328859061e-05,
578
+ "loss": 0.119,
579
+ "step": 1900
580
+ },
581
+ {
582
+ "epoch": 3.09,
583
+ "learning_rate": 5.05592841163311e-05,
584
+ "loss": 0.1161,
585
+ "step": 1920
586
+ },
587
+ {
588
+ "epoch": 3.12,
589
+ "learning_rate": 4.8769574944071586e-05,
590
+ "loss": 0.1171,
591
+ "step": 1940
592
+ },
593
+ {
594
+ "epoch": 3.15,
595
+ "learning_rate": 4.697986577181208e-05,
596
+ "loss": 0.1201,
597
+ "step": 1960
598
+ },
599
+ {
600
+ "epoch": 3.18,
601
+ "learning_rate": 4.519015659955257e-05,
602
+ "loss": 0.1191,
603
+ "step": 1980
604
+ },
605
+ {
606
+ "epoch": 3.22,
607
+ "learning_rate": 4.340044742729307e-05,
608
+ "loss": 0.1193,
609
+ "step": 2000
610
+ },
611
+ {
612
+ "epoch": 3.25,
613
+ "learning_rate": 4.161073825503356e-05,
614
+ "loss": 0.119,
615
+ "step": 2020
616
+ },
617
+ {
618
+ "epoch": 3.28,
619
+ "learning_rate": 3.9821029082774055e-05,
620
+ "loss": 0.1188,
621
+ "step": 2040
622
+ },
623
+ {
624
+ "epoch": 3.31,
625
+ "learning_rate": 3.8031319910514545e-05,
626
+ "loss": 0.1165,
627
+ "step": 2060
628
+ },
629
+ {
630
+ "epoch": 3.35,
631
+ "learning_rate": 3.6241610738255034e-05,
632
+ "loss": 0.1204,
633
+ "step": 2080
634
+ },
635
+ {
636
+ "epoch": 3.38,
637
+ "learning_rate": 3.4451901565995524e-05,
638
+ "loss": 0.1196,
639
+ "step": 2100
640
+ },
641
+ {
642
+ "epoch": 3.41,
643
+ "learning_rate": 3.266219239373602e-05,
644
+ "loss": 0.1186,
645
+ "step": 2120
646
+ },
647
+ {
648
+ "epoch": 3.44,
649
+ "learning_rate": 3.087248322147651e-05,
650
+ "loss": 0.1135,
651
+ "step": 2140
652
+ },
653
+ {
654
+ "epoch": 3.47,
655
+ "learning_rate": 2.9082774049217003e-05,
656
+ "loss": 0.1196,
657
+ "step": 2160
658
+ },
659
+ {
660
+ "epoch": 3.51,
661
+ "learning_rate": 2.7293064876957496e-05,
662
+ "loss": 0.1161,
663
+ "step": 2180
664
+ },
665
+ {
666
+ "epoch": 3.54,
667
+ "learning_rate": 2.550335570469799e-05,
668
+ "loss": 0.1162,
669
+ "step": 2200
670
+ },
671
+ {
672
+ "epoch": 3.57,
673
+ "learning_rate": 2.371364653243848e-05,
674
+ "loss": 0.1165,
675
+ "step": 2220
676
+ },
677
+ {
678
+ "epoch": 3.6,
679
+ "learning_rate": 2.192393736017897e-05,
680
+ "loss": 0.1164,
681
+ "step": 2240
682
+ },
683
+ {
684
+ "epoch": 3.63,
685
+ "learning_rate": 2.013422818791946e-05,
686
+ "loss": 0.116,
687
+ "step": 2260
688
+ },
689
+ {
690
+ "epoch": 3.67,
691
+ "learning_rate": 1.8344519015659954e-05,
692
+ "loss": 0.1172,
693
+ "step": 2280
694
+ },
695
+ {
696
+ "epoch": 3.7,
697
+ "learning_rate": 1.6554809843400447e-05,
698
+ "loss": 0.117,
699
+ "step": 2300
700
+ },
701
+ {
702
+ "epoch": 3.73,
703
+ "learning_rate": 1.4765100671140942e-05,
704
+ "loss": 0.1127,
705
+ "step": 2320
706
+ },
707
+ {
708
+ "epoch": 3.76,
709
+ "learning_rate": 1.2975391498881432e-05,
710
+ "loss": 0.1157,
711
+ "step": 2340
712
+ },
713
+ {
714
+ "epoch": 3.8,
715
+ "learning_rate": 1.1185682326621925e-05,
716
+ "loss": 0.1193,
717
+ "step": 2360
718
+ },
719
+ {
720
+ "epoch": 3.83,
721
+ "learning_rate": 9.395973154362418e-06,
722
+ "loss": 0.1153,
723
+ "step": 2380
724
+ },
725
+ {
726
+ "epoch": 3.86,
727
+ "learning_rate": 7.606263982102908e-06,
728
+ "loss": 0.1164,
729
+ "step": 2400
730
+ },
731
+ {
732
+ "epoch": 3.89,
733
+ "learning_rate": 5.8165548098434e-06,
734
+ "loss": 0.1181,
735
+ "step": 2420
736
+ },
737
+ {
738
+ "epoch": 3.92,
739
+ "learning_rate": 4.026845637583892e-06,
740
+ "loss": 0.1166,
741
+ "step": 2440
742
+ },
743
+ {
744
+ "epoch": 3.96,
745
+ "learning_rate": 2.237136465324385e-06,
746
+ "loss": 0.1156,
747
+ "step": 2460
748
+ },
749
+ {
750
+ "epoch": 3.99,
751
+ "learning_rate": 4.4742729306487696e-07,
752
+ "loss": 0.1148,
753
+ "step": 2480
754
+ }
755
+ ],
756
+ "logging_steps": 20,
757
+ "max_steps": 2484,
758
+ "num_train_epochs": 4,
759
+ "save_steps": 500,
760
+ "total_flos": 1.2566003090639028e+19,
761
+ "trial_name": null,
762
+ "trial_params": null
763
+ }
checkpoint-2484/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4806a1f13e6c7021172324a73521b8881ff3059df131797f624b5fa428509ad
3
+ size 4536
runs/Dec07_16-48-41_globaly/events.out.tfevents.1701989333.globaly.3970.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7adcb2951311f285f0e9903d8b30a9c22a56d9892a70caa0dde9b37f940fe8
3
+ size 24431
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<PAD>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<s>",
39
+ "clean_up_tokenization_spaces": false,
40
+ "eos_token": "</s>",
41
+ "legacy": true,
42
+ "model_max_length": 1024,
43
+ "pad_token": "<PAD>",
44
+ "sp_model_kwargs": {},
45
+ "spaces_between_special_tokens": false,
46
+ "tokenizer_class": "LlamaTokenizer",
47
+ "unk_token": "<unk>",
48
+ "use_default_system_prompt": false
49
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4806a1f13e6c7021172324a73521b8881ff3059df131797f624b5fa428509ad
3
+ size 4536
training_params.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "marianbasti/Llama-2-13b-fp16-alpaca-spanish", "data_path": "Globaly/families-es-etapa2", "project_name": "gl_autotrain", "train_split": "train", "valid_split": null, "text_column": "text", "rejected_text_column": "rejected", "lr": 0.0002, "epochs": 4, "batch_size": 2, "warmup_ratio": 0.1, "gradient_accumulation": 4, "optimizer": "adamw_torch", "scheduler": "linear", "weight_decay": 0.01, "max_grad_norm": 1.0, "seed": 42, "add_eos_token": false, "block_size": 1024, "use_peft": true, "lora_r": 16, "lora_alpha": 32, "lora_dropout": 0.05, "logging_steps": 20, "evaluation_strategy": "epoch", "save_total_limit": 1, "save_strategy": "epoch", "auto_find_batch_size": false, "fp16": true, "push_to_hub": true, "use_int8": false, "model_max_length": 1024, "repo_id": "Globaly/Globaly-1-es-families-164k", "use_int4": true, "trainer": "default", "target_modules": null, "merge_adapter": false, "username": null, "use_flash_attention_2": false, "log": "tensorboard", "disable_gradient_checkpointing": false}