hugodk-sch commited on
Commit
4751745
1 Parent(s): e76985a

Training in progress, step 700

Browse files
README.md CHANGED
@@ -20,15 +20,15 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.6747
24
- - Rewards/chosen: -0.2693
25
- - Rewards/rejected: -0.4359
26
- - Rewards/accuracies: 0.6009
27
- - Rewards/margins: 0.1666
28
- - Logps/rejected: -38.1393
29
- - Logps/chosen: -34.4192
30
- - Logits/rejected: -2.1170
31
- - Logits/chosen: -2.1217
32
 
33
  ## Model description
34
 
@@ -57,33 +57,21 @@ The following hyperparameters were used during training:
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
- - num_epochs: 4
61
 
62
  ### Training results
63
 
64
- | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
- |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 0.6577 | 0.26 | 100 | -2.2338 | -2.2289 | -34.0400 | -37.5357 | 0.6949 | 0.5316 | -0.0038 | 0.0095 | -0.0133 |
67
- | 0.6156 | 0.52 | 200 | -2.2317 | -2.2268 | -34.0535 | -37.5578 | 0.6943 | 0.5191 | -0.0132 | 0.0156 | -0.0288 |
68
- | 0.5468 | 0.78 | 300 | -2.2277 | -2.2229 | -34.0684 | -37.5860 | 0.6903 | 0.5191 | -0.0237 | 0.0249 | -0.0486 |
69
- | 0.4243 | 1.04 | 400 | 0.6886 | -0.0963 | -0.1489 | 0.5540 | 0.0526 | -37.7293 | -34.1721 | -2.1933 | -2.1981 |
70
- | 0.353 | 1.3 | 500 | 0.6901 | -0.1994 | -0.2743 | 0.5569 | 0.0749 | -37.9085 | -34.3194 | -2.1884 | -2.1932 |
71
- | 0.3554 | 1.56 | 600 | 0.6763 | -0.1468 | -0.2572 | 0.5806 | 0.1103 | -37.8840 | -34.2443 | -2.1701 | -2.1749 |
72
- | 0.3333 | 1.82 | 700 | 0.6813 | -0.1817 | -0.2946 | 0.5743 | 0.1129 | -37.9375 | -34.2941 | -2.1549 | -2.1596 |
73
- | 0.2025 | 2.08 | 800 | 0.6800 | -0.2316 | -0.3667 | 0.5660 | 0.1351 | -38.0405 | -34.3655 | -2.1413 | -2.1461 |
74
- | 0.2153 | 2.34 | 900 | 0.6866 | -0.2482 | -0.3826 | 0.5835 | 0.1344 | -38.0632 | -34.3891 | -2.1292 | -2.1340 |
75
- | 0.2381 | 2.6 | 1000 | 0.6821 | -0.2624 | -0.4162 | 0.5864 | 0.1538 | -38.1112 | -34.4094 | -2.1207 | -2.1255 |
76
- | 0.1898 | 2.86 | 1100 | 0.6858 | -0.2673 | -0.4161 | 0.5831 | 0.1487 | -38.1110 | -34.4164 | -2.1188 | -2.1235 |
77
- | 0.2231 | 3.12 | 1200 | 0.6780 | -0.2626 | -0.4264 | 0.5889 | 0.1637 | -38.1257 | -34.4097 | -2.1175 | -2.1223 |
78
- | 0.164 | 3.38 | 1300 | 0.6834 | -0.2678 | -0.4194 | 0.5947 | 0.1516 | -38.1158 | -34.4172 | -2.1174 | -2.1221 |
79
- | 0.1562 | 3.64 | 1400 | 0.6753 | -0.2666 | -0.4361 | 0.5922 | 0.1696 | -38.1396 | -34.4154 | -2.1172 | -2.1219 |
80
- | 0.2163 | 3.9 | 1500 | 0.6831 | -0.2684 | -0.4218 | 0.5801 | 0.1534 | -38.1192 | -34.4180 | -2.1173 | -2.1220 |
81
 
82
 
83
  ### Framework versions
84
 
85
- - PEFT 0.10.0
86
- - Transformers 4.39.0.dev0
87
  - Pytorch 2.1.2+cu121
88
- - Datasets 2.14.6
89
  - Tokenizers 0.15.1
 
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.4913
24
+ - Rewards/chosen: 0.1914
25
+ - Rewards/rejected: 0.1556
26
+ - Rewards/accuracies: 0.5453
27
+ - Rewards/margins: 0.0358
28
+ - Logps/rejected: -37.2944
29
+ - Logps/chosen: -33.7611
30
+ - Logits/rejected: -2.2272
31
+ - Logits/chosen: -2.2320
32
 
33
  ## Model description
34
 
 
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 1
61
 
62
  ### Training results
63
 
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.4799 | 0.26 | 100 | 0.4978 | 0.1217 | 0.1117 | 0.5341 | 0.0100 | -37.3570 | -33.8607 | -2.2333 | -2.2381 |
67
+ | 0.4453 | 0.52 | 200 | 0.4928 | 0.1862 | 0.1561 | 0.5370 | 0.0302 | -37.2937 | -33.7685 | -2.2299 | -2.2347 |
68
+ | 0.3947 | 0.78 | 300 | 0.4910 | 0.1956 | 0.1591 | 0.5565 | 0.0365 | -37.2894 | -33.7551 | -2.2274 | -2.2322 |
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  ### Framework versions
72
 
73
+ - PEFT 0.8.2
74
+ - Transformers 4.37.2
75
  - Pytorch 2.1.2+cu121
76
+ - Datasets 2.17.0
77
  - Tokenizers 0.15.1
adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "k_proj",
24
- "v_proj",
25
- "q_proj",
26
- "up_proj",
27
- "down_proj",
28
  "gate_proj",
29
- "o_proj"
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "k_proj",
24
+ "o_proj",
 
 
 
25
  "gate_proj",
26
+ "up_proj",
27
+ "q_proj",
28
+ "v_proj",
29
+ "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:026a6415f2106bbe9c74c61bfa08dae685e1f6c824a2d2e66189c50a8330cb3f
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30d462aa3dfb9338c49917319859d4440a70aea4fafbe93d299c226a2a22aaf4
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.1216881275177,
4
- "eval_logits/rejected": -2.1169610023498535,
5
- "eval_logps/chosen": -34.41923522949219,
6
- "eval_logps/rejected": -38.13933563232422,
7
- "eval_loss": 0.6746898293495178,
8
- "eval_rewards/accuracies": 0.6009136438369751,
9
- "eval_rewards/chosen": -0.2692793905735016,
10
- "eval_rewards/margins": 0.16662099957466125,
11
- "eval_rewards/rejected": -0.43590039014816284,
12
- "eval_runtime": 145.1817,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.363,
15
- "eval_steps_per_second": 0.296,
16
- "train_loss": 0.2216110908365869,
17
- "train_runtime": 10768.4727,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 1.144,
20
- "train_steps_per_second": 0.143
21
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2319846153259277,
4
+ "eval_logits/rejected": -2.227167844772339,
5
+ "eval_logps/chosen": -33.76113510131836,
6
+ "eval_logps/rejected": -37.29436492919922,
7
+ "eval_loss": 0.49125906825065613,
8
+ "eval_rewards/accuracies": 0.545265793800354,
9
+ "eval_rewards/chosen": 0.19139321148395538,
10
+ "eval_rewards/margins": 0.03581271693110466,
11
+ "eval_rewards/rejected": 0.15558050572872162,
12
+ "eval_runtime": 145.6689,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.355,
15
+ "eval_steps_per_second": 0.295,
16
+ "train_loss": 0.45851393303313814,
17
+ "train_runtime": 3252.0771,
18
  "train_samples": 3079,
19
+ "train_samples_per_second": 0.947,
20
+ "train_steps_per_second": 0.118
21
  }
config.json CHANGED
@@ -18,8 +18,6 @@
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
21
- "_load_in_4bit": true,
22
- "_load_in_8bit": false,
23
  "bnb_4bit_compute_dtype": "float16",
24
  "bnb_4bit_quant_type": "nf4",
25
  "bnb_4bit_use_double_quant": false,
@@ -47,7 +45,7 @@
47
  },
48
  "tie_word_embeddings": false,
49
  "tokenizer_class": "GPT2Tokenizer",
50
- "transformers_version": "4.39.0.dev0",
51
  "use_cache": true,
52
  "vocab_size": 50400
53
  }
 
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
 
 
21
  "bnb_4bit_compute_dtype": "float16",
22
  "bnb_4bit_quant_type": "nf4",
23
  "bnb_4bit_use_double_quant": false,
 
45
  },
46
  "tie_word_embeddings": false,
47
  "tokenizer_class": "GPT2Tokenizer",
48
+ "transformers_version": "4.37.2",
49
  "use_cache": true,
50
  "vocab_size": 50400
51
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.1216881275177,
4
- "eval_logits/rejected": -2.1169610023498535,
5
- "eval_logps/chosen": -34.41923522949219,
6
- "eval_logps/rejected": -38.13933563232422,
7
- "eval_loss": 0.6746898293495178,
8
- "eval_rewards/accuracies": 0.6009136438369751,
9
- "eval_rewards/chosen": -0.2692793905735016,
10
- "eval_rewards/margins": 0.16662099957466125,
11
- "eval_rewards/rejected": -0.43590039014816284,
12
- "eval_runtime": 145.1817,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.363,
15
- "eval_steps_per_second": 0.296
16
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.2319846153259277,
4
+ "eval_logits/rejected": -2.227167844772339,
5
+ "eval_logps/chosen": -33.76113510131836,
6
+ "eval_logps/rejected": -37.29436492919922,
7
+ "eval_loss": 0.49125906825065613,
8
+ "eval_rewards/accuracies": 0.545265793800354,
9
+ "eval_rewards/chosen": 0.19139321148395538,
10
+ "eval_rewards/margins": 0.03581271693110466,
11
+ "eval_rewards/rejected": 0.15558050572872162,
12
+ "eval_runtime": 145.6689,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.355,
15
+ "eval_steps_per_second": 0.295
16
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.0,
3
- "train_loss": 0.2216110908365869,
4
- "train_runtime": 10768.4727,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 1.144,
7
- "train_steps_per_second": 0.143
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.45851393303313814,
4
+ "train_runtime": 3252.0771,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
+ "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a7873256b464917bacb6f47f59f51881713a7cee62cb71ff848461a2de6635e
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e2af54b22e0c705b7be129d81b530e212ba6e920cf50f5a6b51678e37ee371e
3
  size 5176