Minbyul commited on
Commit
0b3f71d
1 Parent(s): 68ffcc1

End of training

Browse files
Files changed (4) hide show
  1. README.md +15 -9
  2. all_results.json +13 -0
  3. config.json +1 -1
  4. eval_results.json +16 -0
README.md CHANGED
@@ -2,9 +2,15 @@
2
  license: apache-2.0
3
  base_model: Minbyul/biomistral-7b-wo-medication_qa-sft
4
  tags:
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
 
 
 
 
 
8
  model-index:
9
  - name: biomistral-7b-dpo-full-sft-wo-medication_qa
10
  results: []
@@ -15,17 +21,17 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # biomistral-7b-dpo-full-sft-wo-medication_qa
17
 
18
- This model is a fine-tuned version of [Minbyul/biomistral-7b-wo-medication_qa-sft](https://huggingface.co/Minbyul/biomistral-7b-wo-medication_qa-sft) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Logits/chosen: -4.1247
21
- - Logits/rejected: -4.0843
22
- - Logps/chosen: -456.2566
23
- - Logps/rejected: -691.8618
24
- - Loss: 0.4671
25
  - Rewards/accuracies: 0.75
26
- - Rewards/chosen: -0.5020
27
- - Rewards/margins: 0.8245
28
- - Rewards/rejected: -1.3265
 
 
29
 
30
  ## Model description
31
 
 
2
  license: apache-2.0
3
  base_model: Minbyul/biomistral-7b-wo-medication_qa-sft
4
  tags:
5
+ - alignment-handbook
6
  - trl
7
  - dpo
8
  - generated_from_trainer
9
+ - trl
10
+ - dpo
11
+ - generated_from_trainer
12
+ datasets:
13
+ - HuggingFaceH4/ultrafeedback_binarized
14
  model-index:
15
  - name: biomistral-7b-dpo-full-sft-wo-medication_qa
16
  results: []
 
21
 
22
  # biomistral-7b-dpo-full-sft-wo-medication_qa
23
 
24
+ This model is a fine-tuned version of [Minbyul/biomistral-7b-wo-medication_qa-sft](https://huggingface.co/Minbyul/biomistral-7b-wo-medication_qa-sft) on the HuggingFaceH4/ultrafeedback_binarized dataset.
25
  It achieves the following results on the evaluation set:
26
+ - Loss: 0.4656
27
+ - Rewards/chosen: -0.5066
28
+ - Rewards/rejected: -1.3382
 
 
29
  - Rewards/accuracies: 0.75
30
+ - Rewards/margins: 0.8316
31
+ - Logps/rejected: -693.0344
32
+ - Logps/chosen: -456.7123
33
+ - Logits/rejected: -4.0821
34
+ - Logits/chosen: -4.1231
35
 
36
  ## Model description
37
 
all_results.json CHANGED
@@ -1,5 +1,18 @@
1
  {
2
  "epoch": 1.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "train_loss": 0.021612109477854958,
4
  "train_runtime": 343.2656,
5
  "train_samples": 7736,
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -4.123082160949707,
4
+ "eval_logits/rejected": -4.0820841789245605,
5
+ "eval_logps/chosen": -456.7123107910156,
6
+ "eval_logps/rejected": -693.0343627929688,
7
+ "eval_loss": 0.4655996561050415,
8
+ "eval_rewards/accuracies": 0.75,
9
+ "eval_rewards/chosen": -0.5065802931785583,
10
+ "eval_rewards/margins": 0.8316203951835632,
11
+ "eval_rewards/rejected": -1.338200569152832,
12
+ "eval_runtime": 12.1695,
13
+ "eval_samples": 76,
14
+ "eval_samples_per_second": 6.245,
15
+ "eval_steps_per_second": 0.247,
16
  "train_loss": 0.021612109477854958,
17
  "train_runtime": 343.2656,
18
  "train_samples": 7736,
config.json CHANGED
@@ -21,6 +21,6 @@
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.39.0.dev0",
24
- "use_cache": false,
25
  "vocab_size": 32000
26
  }
 
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.39.0.dev0",
24
+ "use_cache": true,
25
  "vocab_size": 32000
26
  }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -4.123082160949707,
4
+ "eval_logits/rejected": -4.0820841789245605,
5
+ "eval_logps/chosen": -456.7123107910156,
6
+ "eval_logps/rejected": -693.0343627929688,
7
+ "eval_loss": 0.4655996561050415,
8
+ "eval_rewards/accuracies": 0.75,
9
+ "eval_rewards/chosen": -0.5065802931785583,
10
+ "eval_rewards/margins": 0.8316203951835632,
11
+ "eval_rewards/rejected": -1.338200569152832,
12
+ "eval_runtime": 12.1695,
13
+ "eval_samples": 76,
14
+ "eval_samples_per_second": 6.245,
15
+ "eval_steps_per_second": 0.247
16
+ }