hugodk-sch commited on
Commit
3d88aee
1 Parent(s): 572c514

Training in progress, step 500

Browse files
README.md CHANGED
@@ -20,15 +20,15 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.4763
24
- - Rewards/chosen: 0.2774
25
- - Rewards/rejected: 0.1670
26
- - Rewards/accuracies: 0.5685
27
- - Rewards/margins: 0.1104
28
- - Logps/rejected: -37.2781
29
- - Logps/chosen: -33.6382
30
- - Logits/rejected: -2.1561
31
- - Logits/chosen: -2.1608
32
 
33
  ## Model description
34
 
@@ -57,33 +57,21 @@ The following hyperparameters were used during training:
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
- - num_epochs: 4
61
 
62
  ### Training results
63
 
64
- | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
- |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
- | 0.4799 | 0.26 | 100 | -2.2381 | -2.2333 | -33.8607 | -37.3570 | 0.4978 | 0.5341 | 0.1217 | 0.0100 | 0.1117 |
67
- | 0.4453 | 0.52 | 200 | -2.2347 | -2.2299 | -33.7685 | -37.2937 | 0.4928 | 0.5370 | 0.1862 | 0.0302 | 0.1561 |
68
- | 0.3947 | 0.78 | 300 | -2.2322 | -2.2274 | -33.7551 | -37.2894 | 0.4910 | 0.5565 | 0.1956 | 0.0365 | 0.1591 |
69
- | 0.3136 | 1.04 | 400 | 0.4857 | 0.2846 | 0.2244 | 0.5797 | 0.0602 | -37.1961 | -33.6280 | -2.2032 | -2.2080 |
70
- | 0.2784 | 1.3 | 500 | 0.4891 | 0.2959 | 0.2519 | 0.5220 | 0.0439 | -37.1567 | -33.6119 | -2.2050 | -2.2098 |
71
- | 0.2593 | 1.56 | 600 | 0.4795 | 0.3345 | 0.2439 | 0.5743 | 0.0906 | -37.1682 | -33.5567 | -2.1866 | -2.1914 |
72
- | 0.2606 | 1.82 | 700 | 0.4764 | 0.3188 | 0.2158 | 0.6063 | 0.1031 | -37.2084 | -33.5791 | -2.1788 | -2.1836 |
73
- | 0.1758 | 2.08 | 800 | 0.4767 | 0.2840 | 0.1749 | 0.5860 | 0.1091 | -37.2668 | -33.6289 | -2.1680 | -2.1727 |
74
- | 0.1687 | 2.34 | 900 | 0.4770 | 0.2898 | 0.1833 | 0.5486 | 0.1065 | -37.2547 | -33.6205 | -2.1626 | -2.1674 |
75
- | 0.1826 | 2.6 | 1000 | 0.4764 | 0.2700 | 0.1574 | 0.5831 | 0.1126 | -37.2917 | -33.6489 | -2.1578 | -2.1625 |
76
- | 0.1541 | 2.86 | 1100 | 0.4751 | 0.2864 | 0.1692 | 0.5777 | 0.1171 | -37.2748 | -33.6254 | -2.1561 | -2.1608 |
77
- | 0.194 | 3.12 | 1200 | 0.4748 | 0.2856 | 0.1654 | 0.5801 | 0.1202 | -37.2803 | -33.6265 | -2.1565 | -2.1612 |
78
- | 0.1414 | 3.38 | 1300 | 0.4753 | 0.2859 | 0.1690 | 0.5831 | 0.1169 | -37.2751 | -33.6261 | -2.1558 | -2.1605 |
79
- | 0.1492 | 3.64 | 1400 | 0.4744 | 0.2846 | 0.1627 | 0.5918 | 0.1220 | -37.2842 | -33.6279 | -2.1556 | -2.1603 |
80
- | 0.1694 | 3.9 | 1500 | 0.4747 | 0.2822 | 0.1614 | 0.5569 | 0.1208 | -37.2860 | -33.6314 | -2.1560 | -2.1607 |
81
 
82
 
83
  ### Framework versions
84
 
85
- - PEFT 0.10.0
86
- - Transformers 4.39.0.dev0
87
  - Pytorch 2.1.2+cu121
88
- - Datasets 2.14.6
89
  - Tokenizers 0.15.1
 
20
 
21
  This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.5837
24
+ - Rewards/chosen: -0.0077
25
+ - Rewards/rejected: -0.0206
26
+ - Rewards/accuracies: 0.5428
27
+ - Rewards/margins: 0.0128
28
+ - Logps/rejected: -37.5460
29
+ - Logps/chosen: -34.0456
30
+ - Logits/rejected: -2.2319
31
+ - Logits/chosen: -2.2368
32
 
33
  ## Model description
34
 
 
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: cosine
59
  - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 1
61
 
62
  ### Training results
63
 
64
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
+ | 0.4711 | 0.26 | 100 | 0.5755 | 0.0163 | 0.0131 | 0.5195 | 0.0032 | -37.4979 | -34.0113 | -2.2352 | -2.2401 |
67
+ | 0.5061 | 0.52 | 200 | 0.5877 | -0.0108 | -0.0202 | 0.4992 | 0.0094 | -37.5455 | -34.0500 | -2.2337 | -2.2385 |
68
+ | 0.3371 | 0.78 | 300 | 0.5843 | 0.0001 | -0.0131 | 0.5278 | 0.0132 | -37.5353 | -34.0344 | -2.2322 | -2.2371 |
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  ### Framework versions
72
 
73
+ - PEFT 0.8.2
74
+ - Transformers 4.37.2
75
  - Pytorch 2.1.2+cu121
76
+ - Datasets 2.17.0
77
  - Tokenizers 0.15.1
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
- "o_proj",
25
  "gate_proj",
26
  "up_proj",
 
 
27
  "q_proj",
28
  "v_proj",
29
- "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "gate_proj",
24
  "up_proj",
25
+ "o_proj",
26
+ "down_proj",
27
  "q_proj",
28
  "v_proj",
29
+ "k_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e4f8e306a73a999a41c8de964894eaa93994481de6722767ddb43b3c17557965
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65a417ec51ff0410fddcaee4741abe927bf937a91102d2c02f81ad7f949d370d
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.1607675552368164,
4
- "eval_logits/rejected": -2.156055450439453,
5
- "eval_logps/chosen": -33.63822555541992,
6
- "eval_logps/rejected": -37.278076171875,
7
- "eval_loss": 0.4762561619281769,
8
- "eval_rewards/accuracies": 0.5685215592384338,
9
- "eval_rewards/chosen": 0.27742961049079895,
10
- "eval_rewards/margins": 0.11044850200414658,
11
- "eval_rewards/rejected": 0.16698110103607178,
12
- "eval_runtime": 145.2249,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.362,
15
- "eval_steps_per_second": 0.296,
16
- "train_loss": 0.17319794591371115,
17
- "train_runtime": 10768.0107,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 1.144,
20
- "train_steps_per_second": 0.143
21
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.236751079559326,
4
+ "eval_logits/rejected": -2.2318832874298096,
5
+ "eval_logps/chosen": -34.04561233520508,
6
+ "eval_logps/rejected": -37.546016693115234,
7
+ "eval_loss": 0.5836836099624634,
8
+ "eval_rewards/accuracies": 0.5427741408348083,
9
+ "eval_rewards/chosen": -0.007742464076727629,
10
+ "eval_rewards/margins": 0.01283606793731451,
11
+ "eval_rewards/rejected": -0.02057853527367115,
12
+ "eval_runtime": 145.5744,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.356,
15
+ "eval_steps_per_second": 0.295,
16
+ "train_loss": 0.46899499707407766,
17
+ "train_runtime": 3249.86,
18
  "train_samples": 3079,
19
+ "train_samples_per_second": 0.947,
20
+ "train_steps_per_second": 0.118
21
  }
config.json CHANGED
@@ -18,8 +18,6 @@
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
21
- "_load_in_4bit": true,
22
- "_load_in_8bit": false,
23
  "bnb_4bit_compute_dtype": "float16",
24
  "bnb_4bit_quant_type": "nf4",
25
  "bnb_4bit_use_double_quant": false,
@@ -47,7 +45,7 @@
47
  },
48
  "tie_word_embeddings": false,
49
  "tokenizer_class": "GPT2Tokenizer",
50
- "transformers_version": "4.39.0.dev0",
51
  "use_cache": true,
52
  "vocab_size": 50400
53
  }
 
18
  "n_layer": 28,
19
  "n_positions": 2048,
20
  "quantization_config": {
 
 
21
  "bnb_4bit_compute_dtype": "float16",
22
  "bnb_4bit_quant_type": "nf4",
23
  "bnb_4bit_use_double_quant": false,
 
45
  },
46
  "tie_word_embeddings": false,
47
  "tokenizer_class": "GPT2Tokenizer",
48
+ "transformers_version": "4.37.2",
49
  "use_cache": true,
50
  "vocab_size": 50400
51
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
- "epoch": 4.0,
3
- "eval_logits/chosen": -2.1607675552368164,
4
- "eval_logits/rejected": -2.156055450439453,
5
- "eval_logps/chosen": -33.63822555541992,
6
- "eval_logps/rejected": -37.278076171875,
7
- "eval_loss": 0.4762561619281769,
8
- "eval_rewards/accuracies": 0.5685215592384338,
9
- "eval_rewards/chosen": 0.27742961049079895,
10
- "eval_rewards/margins": 0.11044850200414658,
11
- "eval_rewards/rejected": 0.16698110103607178,
12
- "eval_runtime": 145.2249,
13
  "eval_samples": 343,
14
- "eval_samples_per_second": 2.362,
15
- "eval_steps_per_second": 0.296
16
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -2.236751079559326,
4
+ "eval_logits/rejected": -2.2318832874298096,
5
+ "eval_logps/chosen": -34.04561233520508,
6
+ "eval_logps/rejected": -37.546016693115234,
7
+ "eval_loss": 0.5836836099624634,
8
+ "eval_rewards/accuracies": 0.5427741408348083,
9
+ "eval_rewards/chosen": -0.007742464076727629,
10
+ "eval_rewards/margins": 0.01283606793731451,
11
+ "eval_rewards/rejected": -0.02057853527367115,
12
+ "eval_runtime": 145.5744,
13
  "eval_samples": 343,
14
+ "eval_samples_per_second": 2.356,
15
+ "eval_steps_per_second": 0.295
16
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.0,
3
- "train_loss": 0.17319794591371115,
4
- "train_runtime": 10768.0107,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 1.144,
7
- "train_steps_per_second": 0.143
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.46899499707407766,
4
+ "train_runtime": 3249.86,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.947,
7
+ "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e2af54b22e0c705b7be129d81b530e212ba6e920cf50f5a6b51678e37ee371e
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42bcfdc2f497fa88a496c41b8d8bbebd3c7aa9e6b2566ebda67c8fed3e34f63b
3
  size 5176