sizhkhy commited on
Commit
a4159b1
·
verified ·
1 Parent(s): aa3e8c8
README.md CHANGED
@@ -1,25 +1,25 @@
1
  ---
2
  library_name: peft
3
  license: other
4
- base_model: unsloth/Llama-3.2-3B-Instruct
5
  tags:
6
  - llama-factory
7
  - lora
8
  - unsloth
9
  - generated_from_trainer
10
  model-index:
11
- - name: llm3br256
12
  results: []
13
  ---
14
 
15
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
  should probably proofread and complete it, then remove this comment. -->
17
 
18
- # llm3br256
19
 
20
  This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on the asianpaints dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 0.0114
23
 
24
  ## Model description
25
 
@@ -39,128 +39,94 @@ More information needed
39
 
40
  The following hyperparameters were used during training:
41
  - learning_rate: 0.0001
42
- - train_batch_size: 4
43
- - eval_batch_size: 4
44
  - seed: 42
45
- - gradient_accumulation_steps: 8
46
- - total_train_batch_size: 32
47
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
  - lr_scheduler_type: cosine
49
  - lr_scheduler_warmup_ratio: 0.1
50
- - num_epochs: 5.0
51
 
52
  ### Training results
53
 
54
  | Training Loss | Epoch | Step | Validation Loss |
55
  |:-------------:|:------:|:----:|:---------------:|
56
- | 0.0567 | 0.0460 | 5 | 0.0584 |
57
- | 0.0378 | 0.0920 | 10 | 0.0384 |
58
- | 0.0301 | 0.1379 | 15 | 0.0318 |
59
- | 0.0248 | 0.1839 | 20 | 0.0281 |
60
- | 0.0241 | 0.2299 | 25 | 0.0256 |
61
- | 0.021 | 0.2759 | 30 | 0.0234 |
62
- | 0.0213 | 0.3218 | 35 | 0.0225 |
63
- | 0.0211 | 0.3678 | 40 | 0.0214 |
64
- | 0.0185 | 0.4138 | 45 | 0.0200 |
65
- | 0.0162 | 0.4598 | 50 | 0.0196 |
66
- | 0.0177 | 0.5057 | 55 | 0.0189 |
67
- | 0.0168 | 0.5517 | 60 | 0.0184 |
68
- | 0.017 | 0.5977 | 65 | 0.0182 |
69
- | 0.0143 | 0.6437 | 70 | 0.0177 |
70
- | 0.0143 | 0.6897 | 75 | 0.0176 |
71
- | 0.0155 | 0.7356 | 80 | 0.0176 |
72
- | 0.0162 | 0.7816 | 85 | 0.0169 |
73
- | 0.0164 | 0.8276 | 90 | 0.0164 |
74
- | 0.0154 | 0.8736 | 95 | 0.0162 |
75
- | 0.0164 | 0.9195 | 100 | 0.0159 |
76
- | 0.0156 | 0.9655 | 105 | 0.0160 |
77
- | 0.0145 | 1.0115 | 110 | 0.0159 |
78
- | 0.0133 | 1.0575 | 115 | 0.0156 |
79
- | 0.0126 | 1.1034 | 120 | 0.0155 |
80
- | 0.0145 | 1.1494 | 125 | 0.0154 |
81
- | 0.0125 | 1.1954 | 130 | 0.0150 |
82
- | 0.0122 | 1.2414 | 135 | 0.0148 |
83
- | 0.0127 | 1.2874 | 140 | 0.0147 |
84
- | 0.0139 | 1.3333 | 145 | 0.0144 |
85
- | 0.0122 | 1.3793 | 150 | 0.0144 |
86
- | 0.0138 | 1.4253 | 155 | 0.0139 |
87
- | 0.0143 | 1.4713 | 160 | 0.0139 |
88
- | 0.0124 | 1.5172 | 165 | 0.0138 |
89
- | 0.0124 | 1.5632 | 170 | 0.0135 |
90
- | 0.0138 | 1.6092 | 175 | 0.0132 |
91
- | 0.0112 | 1.6552 | 180 | 0.0136 |
92
- | 0.0102 | 1.7011 | 185 | 0.0135 |
93
- | 0.0135 | 1.7471 | 190 | 0.0133 |
94
- | 0.01 | 1.7931 | 195 | 0.0135 |
95
- | 0.0115 | 1.8391 | 200 | 0.0131 |
96
- | 0.0113 | 1.8851 | 205 | 0.0127 |
97
- | 0.0107 | 1.9310 | 210 | 0.0128 |
98
- | 0.0122 | 1.9770 | 215 | 0.0128 |
99
- | 0.0099 | 2.0230 | 220 | 0.0128 |
100
- | 0.0121 | 2.0690 | 225 | 0.0129 |
101
- | 0.0103 | 2.1149 | 230 | 0.0128 |
102
- | 0.01 | 2.1609 | 235 | 0.0127 |
103
- | 0.0089 | 2.2069 | 240 | 0.0127 |
104
- | 0.0089 | 2.2529 | 245 | 0.0127 |
105
- | 0.0105 | 2.2989 | 250 | 0.0125 |
106
- | 0.0093 | 2.3448 | 255 | 0.0124 |
107
- | 0.0097 | 2.3908 | 260 | 0.0126 |
108
- | 0.0091 | 2.4368 | 265 | 0.0126 |
109
- | 0.0095 | 2.4828 | 270 | 0.0124 |
110
- | 0.0094 | 2.5287 | 275 | 0.0123 |
111
- | 0.0092 | 2.5747 | 280 | 0.0119 |
112
- | 0.0084 | 2.6207 | 285 | 0.0121 |
113
- | 0.0098 | 2.6667 | 290 | 0.0120 |
114
- | 0.0097 | 2.7126 | 295 | 0.0122 |
115
- | 0.0093 | 2.7586 | 300 | 0.0121 |
116
- | 0.0096 | 2.8046 | 305 | 0.0119 |
117
- | 0.0097 | 2.8506 | 310 | 0.0117 |
118
- | 0.0101 | 2.8966 | 315 | 0.0118 |
119
- | 0.0088 | 2.9425 | 320 | 0.0118 |
120
- | 0.0096 | 2.9885 | 325 | 0.0118 |
121
- | 0.0078 | 3.0345 | 330 | 0.0119 |
122
- | 0.0064 | 3.0805 | 335 | 0.0119 |
123
- | 0.0073 | 3.1264 | 340 | 0.0121 |
124
- | 0.0066 | 3.1724 | 345 | 0.0121 |
125
- | 0.0067 | 3.2184 | 350 | 0.0117 |
126
- | 0.007 | 3.2644 | 355 | 0.0118 |
127
- | 0.0072 | 3.3103 | 360 | 0.0116 |
128
- | 0.0074 | 3.3563 | 365 | 0.0117 |
129
- | 0.0067 | 3.4023 | 370 | 0.0117 |
130
- | 0.0072 | 3.4483 | 375 | 0.0117 |
131
- | 0.0069 | 3.4943 | 380 | 0.0117 |
132
- | 0.0076 | 3.5402 | 385 | 0.0116 |
133
- | 0.0068 | 3.5862 | 390 | 0.0114 |
134
- | 0.0074 | 3.6322 | 395 | 0.0115 |
135
- | 0.0065 | 3.6782 | 400 | 0.0114 |
136
- | 0.007 | 3.7241 | 405 | 0.0112 |
137
- | 0.0064 | 3.7701 | 410 | 0.0112 |
138
- | 0.0073 | 3.8161 | 415 | 0.0111 |
139
- | 0.0065 | 3.8621 | 420 | 0.0113 |
140
- | 0.0069 | 3.9080 | 425 | 0.0111 |
141
- | 0.0065 | 3.9540 | 430 | 0.0111 |
142
- | 0.0076 | 4.0 | 435 | 0.0111 |
143
- | 0.0047 | 4.0460 | 440 | 0.0115 |
144
- | 0.0053 | 4.0920 | 445 | 0.0119 |
145
- | 0.0053 | 4.1379 | 450 | 0.0120 |
146
- | 0.0055 | 4.1839 | 455 | 0.0119 |
147
- | 0.0053 | 4.2299 | 460 | 0.0117 |
148
- | 0.0053 | 4.2759 | 465 | 0.0117 |
149
- | 0.0053 | 4.3218 | 470 | 0.0117 |
150
- | 0.0058 | 4.3678 | 475 | 0.0116 |
151
- | 0.0053 | 4.4138 | 480 | 0.0116 |
152
- | 0.0053 | 4.4598 | 485 | 0.0118 |
153
- | 0.0051 | 4.5057 | 490 | 0.0117 |
154
- | 0.0053 | 4.5517 | 495 | 0.0117 |
155
- | 0.0059 | 4.5977 | 500 | 0.0117 |
156
- | 0.0055 | 4.6437 | 505 | 0.0117 |
157
- | 0.0054 | 4.6897 | 510 | 0.0116 |
158
- | 0.0055 | 4.7356 | 515 | 0.0117 |
159
- | 0.0056 | 4.7816 | 520 | 0.0116 |
160
- | 0.0048 | 4.8276 | 525 | 0.0116 |
161
- | 0.0049 | 4.8736 | 530 | 0.0116 |
162
- | 0.0043 | 4.9195 | 535 | 0.0116 |
163
- | 0.0046 | 4.9655 | 540 | 0.0116 |
164
 
165
 
166
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  license: other
4
+ base_model: unsloth/llama-3.2-3b-instruct-bnb-4bit
5
  tags:
6
  - llama-factory
7
  - lora
8
  - unsloth
9
  - generated_from_trainer
10
  model-index:
11
+ - name: llm3br256-v1.5
12
  results: []
13
  ---
14
 
15
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
  should probably proofread and complete it, then remove this comment. -->
17
 
18
+ # llm3br256-v1.5
19
 
20
  This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on the asianpaints dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Loss: 0.0157
23
 
24
  ## Model description
25
 
 
39
 
40
  The following hyperparameters were used during training:
41
  - learning_rate: 0.0001
42
+ - train_batch_size: 48
43
+ - eval_batch_size: 48
44
  - seed: 42
 
 
45
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
46
  - lr_scheduler_type: cosine
47
  - lr_scheduler_warmup_ratio: 0.1
48
+ - num_epochs: 25
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss |
53
  |:-------------:|:------:|:----:|:---------------:|
54
+ | 0.1302 | 0.1208 | 25 | 0.1429 |
55
+ | 0.0909 | 0.2415 | 50 | 0.0968 |
56
+ | 0.062 | 0.3623 | 75 | 0.0748 |
57
+ | 0.0602 | 0.4831 | 100 | 0.0608 |
58
+ | 0.057 | 0.6039 | 125 | 0.0552 |
59
+ | 0.0456 | 0.7246 | 150 | 0.0501 |
60
+ | 0.0432 | 0.8454 | 175 | 0.0470 |
61
+ | 0.0416 | 0.9662 | 200 | 0.0447 |
62
+ | 0.0441 | 1.0870 | 225 | 0.0428 |
63
+ | 0.0348 | 1.2077 | 250 | 0.0405 |
64
+ | 0.0355 | 1.3285 | 275 | 0.0386 |
65
+ | 0.0379 | 1.4493 | 300 | 0.0358 |
66
+ | 0.032 | 1.5700 | 325 | 0.0354 |
67
+ | 0.0342 | 1.6908 | 350 | 0.0335 |
68
+ | 0.0318 | 1.8116 | 375 | 0.0324 |
69
+ | 0.031 | 1.9324 | 400 | 0.0318 |
70
+ | 0.0283 | 2.0531 | 425 | 0.0321 |
71
+ | 0.0275 | 2.1739 | 450 | 0.0337 |
72
+ | 0.026 | 2.2947 | 475 | 0.0314 |
73
+ | 0.0244 | 2.4155 | 500 | 0.0285 |
74
+ | 0.0281 | 2.5362 | 525 | 0.0285 |
75
+ | 0.0212 | 2.6570 | 550 | 0.0268 |
76
+ | 0.0221 | 2.7778 | 575 | 0.0267 |
77
+ | 0.0225 | 2.8986 | 600 | 0.0266 |
78
+ | 0.0264 | 3.0193 | 625 | 0.0292 |
79
+ | 0.0196 | 3.1401 | 650 | 0.0280 |
80
+ | 0.0185 | 3.2609 | 675 | 0.0264 |
81
+ | 0.0161 | 3.3816 | 700 | 0.0248 |
82
+ | 0.0186 | 3.5024 | 725 | 0.0226 |
83
+ | 0.0166 | 3.6232 | 750 | 0.0213 |
84
+ | 0.0141 | 3.7440 | 775 | 0.0215 |
85
+ | 0.0186 | 3.8647 | 800 | 0.0211 |
86
+ | 0.0119 | 3.9855 | 825 | 0.0204 |
87
+ | 0.0097 | 4.1063 | 850 | 0.0210 |
88
+ | 0.0095 | 4.2271 | 875 | 0.0204 |
89
+ | 0.0119 | 4.3478 | 900 | 0.0207 |
90
+ | 0.0131 | 4.4686 | 925 | 0.0257 |
91
+ | 0.0123 | 4.5894 | 950 | 0.0228 |
92
+ | 0.0133 | 4.7101 | 975 | 0.0204 |
93
+ | 0.0115 | 4.8309 | 1000 | 0.0191 |
94
+ | 0.0152 | 4.9517 | 1025 | 0.0201 |
95
+ | 0.0075 | 5.0725 | 1050 | 0.0188 |
96
+ | 0.0069 | 5.1932 | 1075 | 0.0169 |
97
+ | 0.0073 | 5.3140 | 1100 | 0.0182 |
98
+ | 0.0076 | 5.4348 | 1125 | 0.0166 |
99
+ | 0.0084 | 5.5556 | 1150 | 0.0173 |
100
+ | 0.0091 | 5.6763 | 1175 | 0.0175 |
101
+ | 0.0081 | 5.7971 | 1200 | 0.0176 |
102
+ | 0.0071 | 5.9179 | 1225 | 0.0175 |
103
+ | 0.0058 | 6.0386 | 1250 | 0.0187 |
104
+ | 0.0081 | 6.1594 | 1275 | 0.0165 |
105
+ | 0.0057 | 6.2802 | 1300 | 0.0171 |
106
+ | 0.0068 | 6.4010 | 1325 | 0.0165 |
107
+ | 0.0059 | 6.5217 | 1350 | 0.0163 |
108
+ | 0.0057 | 6.6425 | 1375 | 0.0151 |
109
+ | 0.0061 | 6.7633 | 1400 | 0.0164 |
110
+ | 0.006 | 6.8841 | 1425 | 0.0156 |
111
+ | 0.0062 | 7.0048 | 1450 | 0.0161 |
112
+ | 0.006 | 7.1256 | 1475 | 0.0178 |
113
+ | 0.0059 | 7.2464 | 1500 | 0.0169 |
114
+ | 0.0043 | 7.3671 | 1525 | 0.0175 |
115
+ | 0.0049 | 7.4879 | 1550 | 0.0178 |
116
+ | 0.0058 | 7.6087 | 1575 | 0.0156 |
117
+ | 0.0062 | 7.7295 | 1600 | 0.0158 |
118
+ | 0.0045 | 7.8502 | 1625 | 0.0151 |
119
+ | 0.0054 | 7.9710 | 1650 | 0.0150 |
120
+ | 0.0042 | 8.0918 | 1675 | 0.0157 |
121
+ | 0.0039 | 8.2126 | 1700 | 0.0157 |
122
+ | 0.0046 | 8.3333 | 1725 | 0.0170 |
123
+ | 0.0025 | 8.4541 | 1750 | 0.0154 |
124
+ | 0.0047 | 8.5749 | 1775 | 0.0156 |
125
+ | 0.0044 | 8.6957 | 1800 | 0.0166 |
126
+ | 0.0031 | 8.8164 | 1825 | 0.0172 |
127
+ | 0.0029 | 8.9372 | 1850 | 0.0167 |
128
+ | 0.0032 | 9.0580 | 1875 | 0.0169 |
129
+ | 0.0036 | 9.1787 | 1900 | 0.0167 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
 
132
  ### Framework versions
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "unsloth/Llama-3.2-3B-Instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
- "down_proj",
25
- "up_proj",
26
  "gate_proj",
 
27
  "k_proj",
28
  "q_proj",
29
- "o_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/llama-3.2-3b-instruct-bnb-4bit",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
23
  "gate_proj",
24
+ "up_proj",
25
  "k_proj",
26
  "q_proj",
27
+ "o_proj",
28
+ "down_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c0db4f9395705ada4e6f7e70d4f25423faaa24ebf979b0e99ba9ec5396e62a6
3
  size 1556140392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31d343f6b6b08f7edae93eab588cda7cda9d79bbbaf9fa07d5e1d6c5cb5d3b84
3
  size 1556140392
all_results.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
- "epoch": 4.9655172413793105,
3
- "eval_loss": 0.011437705717980862,
4
- "eval_runtime": 7.6123,
5
- "eval_samples_per_second": 6.568,
6
- "eval_steps_per_second": 1.708,
7
- "total_flos": 1.0432767520256164e+18,
8
- "train_loss": 0.011179022066709067,
9
- "train_runtime": 8360.2497,
10
- "train_samples_per_second": 2.081,
11
- "train_steps_per_second": 0.065
12
  }
 
1
  {
2
+ "epoch": 9.178743961352657,
3
+ "eval_loss": 0.015713630244135857,
4
+ "eval_runtime": 20.5637,
5
+ "eval_samples_per_second": 4.863,
6
+ "eval_steps_per_second": 0.146,
7
+ "total_flos": 7.164017972028703e+18,
8
+ "train_loss": 0.021690809104444556,
9
+ "train_runtime": 50941.7483,
10
+ "train_samples_per_second": 4.858,
11
+ "train_steps_per_second": 0.102
12
  }
eval_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "epoch": 4.9655172413793105,
3
- "eval_loss": 0.011437705717980862,
4
- "eval_runtime": 7.6123,
5
- "eval_samples_per_second": 6.568,
6
- "eval_steps_per_second": 1.708
7
  }
 
1
  {
2
+ "epoch": 9.178743961352657,
3
+ "eval_loss": 0.015713630244135857,
4
+ "eval_runtime": 20.5637,
5
+ "eval_samples_per_second": 4.863,
6
+ "eval_steps_per_second": 0.146
7
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.9655172413793105,
3
- "total_flos": 1.0432767520256164e+18,
4
- "train_loss": 0.011179022066709067,
5
- "train_runtime": 8360.2497,
6
- "train_samples_per_second": 2.081,
7
- "train_steps_per_second": 0.065
8
  }
 
1
  {
2
+ "epoch": 9.178743961352657,
3
+ "total_flos": 7.164017972028703e+18,
4
+ "train_loss": 0.021690809104444556,
5
+ "train_runtime": 50941.7483,
6
+ "train_samples_per_second": 4.858,
7
+ "train_steps_per_second": 0.102
8
  }
trainer_log.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2924fae1a3bc69dae4de109dcab3f2bf31ade53fb954148daf0bf8c3d469a49
3
  size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feb201ca3a258d1e7b54c4a344a5922ba56be5d3f6ad2abb6d618d250dacc72a
3
  size 5432
training_eval_loss.png CHANGED
training_loss.png CHANGED