End of training
Browse files- README.md +15 -9
- all_results.json +12 -12
- config.json +1 -1
- eval_results.json +12 -12
README.md
CHANGED
@@ -1,10 +1,16 @@
|
|
1 |
---
|
2 |
base_model: Minbyul/selfbiorag-7b-wo-medication_qa-sft
|
3 |
tags:
|
|
|
|
|
|
|
|
|
4 |
- trl
|
5 |
- dpo
|
6 |
- alignment-handbook
|
7 |
- generated_from_trainer
|
|
|
|
|
8 |
model-index:
|
9 |
- name: selfbiorag-7b-dpo-full-sft-wo-medication_qa
|
10 |
results: []
|
@@ -15,17 +21,17 @@ should probably proofread and complete it, then remove this comment. -->
|
|
15 |
|
16 |
# selfbiorag-7b-dpo-full-sft-wo-medication_qa
|
17 |
|
18 |
-
This model is a fine-tuned version of [Minbyul/selfbiorag-7b-wo-medication_qa-sft](https://huggingface.co/Minbyul/selfbiorag-7b-wo-medication_qa-sft) on
|
19 |
It achieves the following results on the evaluation set:
|
20 |
-
-
|
21 |
-
-
|
22 |
-
-
|
23 |
-
- Logps/rejected: -1442.4718
|
24 |
-
- Loss: 0.2756
|
25 |
- Rewards/accuracies: 0.8920
|
26 |
-
- Rewards/
|
27 |
-
-
|
28 |
-
-
|
|
|
|
|
29 |
|
30 |
## Model description
|
31 |
|
|
|
1 |
---
|
2 |
base_model: Minbyul/selfbiorag-7b-wo-medication_qa-sft
|
3 |
tags:
|
4 |
+
- alignment-handbook
|
5 |
+
- trl
|
6 |
+
- dpo
|
7 |
+
- generated_from_trainer
|
8 |
- trl
|
9 |
- dpo
|
10 |
- alignment-handbook
|
11 |
- generated_from_trainer
|
12 |
+
datasets:
|
13 |
+
- HuggingFaceH4/ultrafeedback_binarized
|
14 |
model-index:
|
15 |
- name: selfbiorag-7b-dpo-full-sft-wo-medication_qa
|
16 |
results: []
|
|
|
21 |
|
22 |
# selfbiorag-7b-dpo-full-sft-wo-medication_qa
|
23 |
|
24 |
+
This model is a fine-tuned version of [Minbyul/selfbiorag-7b-wo-medication_qa-sft](https://huggingface.co/Minbyul/selfbiorag-7b-wo-medication_qa-sft) on the HuggingFaceH4/ultrafeedback_binarized dataset.
|
25 |
It achieves the following results on the evaluation set:
|
26 |
+
- Loss: 0.2759
|
27 |
+
- Rewards/chosen: -1.2305
|
28 |
+
- Rewards/rejected: -7.1000
|
|
|
|
|
29 |
- Rewards/accuracies: 0.8920
|
30 |
+
- Rewards/margins: 5.8695
|
31 |
+
- Logps/rejected: -1442.5582
|
32 |
+
- Logps/chosen: -679.8936
|
33 |
+
- Logits/rejected: -0.3285
|
34 |
+
- Logits/chosen: -0.3524
|
35 |
|
36 |
## Model description
|
37 |
|
all_results.json
CHANGED
@@ -1,18 +1,18 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"eval_logits/chosen": -0.
|
4 |
-
"eval_logits/rejected": -0.
|
5 |
-
"eval_logps/chosen": -679.
|
6 |
-
"eval_logps/rejected": -1442.
|
7 |
-
"eval_loss": 0.
|
8 |
-
"eval_rewards/accuracies": 0.
|
9 |
-
"eval_rewards/chosen": -1.
|
10 |
-
"eval_rewards/margins": 5.
|
11 |
-
"eval_rewards/rejected": -7.
|
12 |
-
"eval_runtime":
|
13 |
"eval_samples": 1392,
|
14 |
-
"eval_samples_per_second": 8.
|
15 |
-
"eval_steps_per_second": 0.
|
16 |
"train_loss": 0.0030385508506429234,
|
17 |
"train_runtime": 144.6456,
|
18 |
"train_samples": 19761,
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"eval_logits/chosen": -0.35237225890159607,
|
4 |
+
"eval_logits/rejected": -0.32851526141166687,
|
5 |
+
"eval_logps/chosen": -679.8935546875,
|
6 |
+
"eval_logps/rejected": -1442.5582275390625,
|
7 |
+
"eval_loss": 0.27591243386268616,
|
8 |
+
"eval_rewards/accuracies": 0.8920454382896423,
|
9 |
+
"eval_rewards/chosen": -1.230484962463379,
|
10 |
+
"eval_rewards/margins": 5.869514465332031,
|
11 |
+
"eval_rewards/rejected": -7.099999904632568,
|
12 |
+
"eval_runtime": 171.0958,
|
13 |
"eval_samples": 1392,
|
14 |
+
"eval_samples_per_second": 8.136,
|
15 |
+
"eval_steps_per_second": 0.257,
|
16 |
"train_loss": 0.0030385508506429234,
|
17 |
"train_runtime": 144.6456,
|
18 |
"train_samples": 19761,
|
config.json
CHANGED
@@ -23,6 +23,6 @@
|
|
23 |
"tie_word_embeddings": false,
|
24 |
"torch_dtype": "bfloat16",
|
25 |
"transformers_version": "4.39.0.dev0",
|
26 |
-
"use_cache":
|
27 |
"vocab_size": 32016
|
28 |
}
|
|
|
23 |
"tie_word_embeddings": false,
|
24 |
"torch_dtype": "bfloat16",
|
25 |
"transformers_version": "4.39.0.dev0",
|
26 |
+
"use_cache": true,
|
27 |
"vocab_size": 32016
|
28 |
}
|
eval_results.json
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"eval_logits/chosen": -0.
|
4 |
-
"eval_logits/rejected": -0.
|
5 |
-
"eval_logps/chosen": -679.
|
6 |
-
"eval_logps/rejected": -1442.
|
7 |
-
"eval_loss": 0.
|
8 |
-
"eval_rewards/accuracies": 0.
|
9 |
-
"eval_rewards/chosen": -1.
|
10 |
-
"eval_rewards/margins": 5.
|
11 |
-
"eval_rewards/rejected": -7.
|
12 |
-
"eval_runtime":
|
13 |
"eval_samples": 1392,
|
14 |
-
"eval_samples_per_second": 8.
|
15 |
-
"eval_steps_per_second": 0.
|
16 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"eval_logits/chosen": -0.35237225890159607,
|
4 |
+
"eval_logits/rejected": -0.32851526141166687,
|
5 |
+
"eval_logps/chosen": -679.8935546875,
|
6 |
+
"eval_logps/rejected": -1442.5582275390625,
|
7 |
+
"eval_loss": 0.27591243386268616,
|
8 |
+
"eval_rewards/accuracies": 0.8920454382896423,
|
9 |
+
"eval_rewards/chosen": -1.230484962463379,
|
10 |
+
"eval_rewards/margins": 5.869514465332031,
|
11 |
+
"eval_rewards/rejected": -7.099999904632568,
|
12 |
+
"eval_runtime": 171.0958,
|
13 |
"eval_samples": 1392,
|
14 |
+
"eval_samples_per_second": 8.136,
|
15 |
+
"eval_steps_per_second": 0.257
|
16 |
}
|