John David Pressman commited on
Commit
0c036eb
1 Parent(s): 8ee9327
README.md CHANGED
@@ -1,3 +1,43 @@
1
  ---
 
2
  license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
  license: apache-2.0
4
  ---
5
+ # minihf_evaluator_mistral_7b_v0.1
6
+
7
+ `minihf_evaluator_mistral_7b_v0.1` is a LoRA instruct fine-tune of [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1).
8
+
9
+ The sequence `<|end|>` was used to separate the prompt and response. The correct way to prompt the model is: `Does 2 + 2 = 4?<|end|>`. The tokenizer will prepend a BOS token (`<s>`) by default. The response will end with an EOS token (`</s>`).
10
+
11
+ ## Training procedure
12
+
13
+ `minihf_evaluator_mistral_7b_v0.1` was fine-tuned for 100,000 examples on 90% [Muennighoff/flan](https://huggingface.co/datasets/Muennighoff/flan) / 10% [databricks/databricks-dolly-15k](https://huggingface.co/datasets/databricks/databricks-dolly-15k) using batch size 4 per GPU on 8 80GB H100 GPUs. Examples where the prompt and response would not fit into 4096 tokens were dropped. The fine-tuning was done using the following command:
14
+
15
+ ```bash
16
+ accelerate launch sft_evaluator.py --output-dir minihf_evaluator_mistral_7b_v0.1
17
+ ```
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: False
22
+ - load_in_4bit: True
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: nf4
28
+ - bnb_4bit_use_double_quant: True
29
+ - bnb_4bit_compute_dtype: bfloat16
30
+
31
+ ### Framework versions
32
+
33
+ - PEFT 0.5.0
34
+ - PEFT 0.5.0
35
+ - PEFT 0.5.0
36
+ - PEFT 0.5.0
37
+ - PEFT 0.5.0
38
+ - PEFT 0.5.0
39
+ - PEFT 0.5.0
40
+ - PEFT 0.5.0
41
+ - PEFT 0.5.0
42
+
43
+ - PEFT 0.5.0
adapter_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": {
3
+ "base_model_class": "MistralForCausalLM",
4
+ "parent_library": "transformers.models.mistral.modeling_mistral"
5
+ },
6
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
7
+ "bias": "none",
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layers_pattern": null,
12
+ "layers_to_transform": null,
13
+ "lora_alpha": 8,
14
+ "lora_dropout": 0.0,
15
+ "modules_to_save": null,
16
+ "peft_type": "LORA",
17
+ "r": 32,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "self_attn.q_proj",
21
+ "self_attn.k_proj",
22
+ "self_attn.v_proj",
23
+ "self_attn.o_proj",
24
+ "mlp.gate_proj",
25
+ "mlp.up_proj",
26
+ "mlp.down_proj",
27
+ "lm_head"
28
+ ],
29
+ "task_type": null
30
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5b41c3b7ab4216b02d1322b62caa099df7ee319197560b9099f6beefe2d98ca
3
+ size 340225224
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [],
29
+ "bos_token": "<s>",
30
+ "clean_up_tokenization_spaces": false,
31
+ "eos_token": "</s>",
32
+ "legacy": true,
33
+ "model_max_length": 1000000000000000019884624838656,
34
+ "pad_token": "</s>",
35
+ "sp_model_kwargs": {},
36
+ "spaces_between_special_tokens": false,
37
+ "tokenizer_class": "LlamaTokenizer",
38
+ "unk_token": "<unk>",
39
+ "use_default_system_prompt": true
40
+ }