dctanner commited on
Commit
fc9d379
1 Parent(s): 609f4da

End of training

Browse files
Files changed (2) hide show
  1. README.md +5 -1
  2. config.json +2 -2
README.md CHANGED
@@ -2,9 +2,13 @@
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
 
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
 
 
8
  base_model: sablo/sablo-pebble-mistral
9
  model-index:
10
  - name: sablo-pebble-mistral-dpo-lora-HelpSteer_binarized
@@ -16,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # sablo-pebble-mistral-dpo-lora-HelpSteer_binarized
18
 
19
- This model is a fine-tuned version of [sablo/sablo-pebble-mistral](https://huggingface.co/sablo/sablo-pebble-mistral) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.5371
22
  - Rewards/chosen: -0.9335
 
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
5
+ - alignment-handbook
6
+ - generated_from_trainer
7
  - trl
8
  - dpo
9
  - generated_from_trainer
10
+ datasets:
11
+ - sablo/HelpSteer_binarized
12
  base_model: sablo/sablo-pebble-mistral
13
  model-index:
14
  - name: sablo-pebble-mistral-dpo-lora-HelpSteer_binarized
 
20
 
21
  # sablo-pebble-mistral-dpo-lora-HelpSteer_binarized
22
 
23
+ This model is a fine-tuned version of [sablo/sablo-pebble-mistral](https://huggingface.co/sablo/sablo-pebble-mistral) on the sablo/HelpSteer_binarized dataset.
24
  It achieves the following results on the evaluation set:
25
  - Loss: 0.5371
26
  - Rewards/chosen: -0.9335
config.json CHANGED
@@ -5,7 +5,7 @@
5
  ],
6
  "attention_dropout": 0.0,
7
  "bos_token_id": 1,
8
- "eos_token_id": 2,
9
  "hidden_act": "silu",
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
@@ -22,5 +22,5 @@
22
  "torch_dtype": "float16",
23
  "transformers_version": "4.36.2",
24
  "use_cache": true,
25
- "vocab_size": 32000
26
  }
 
5
  ],
6
  "attention_dropout": 0.0,
7
  "bos_token_id": 1,
8
+ "eos_token_id": 32000,
9
  "hidden_act": "silu",
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
 
22
  "torch_dtype": "float16",
23
  "transformers_version": "4.36.2",
24
  "use_cache": true,
25
+ "vocab_size": 32002
26
  }