tgaddair commited on
Commit
1d86083
1 Parent(s): dd8c0b1

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +86 -1
  2. adapter_config.json +26 -0
  3. adapter_model.safetensors +3 -0
README.md CHANGED
@@ -1,3 +1,88 @@
1
  ---
2
- license: apache-2.0
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ datasets:
5
+ - gsm8k
6
  ---
7
+
8
+ # Model Card for Model ID
9
+
10
+ Trained with [Ludwig.ai](https://ludwig.ai) and [Predibase](https://predibase.com)!
11
+
12
+ Given a grade school math question, provide the answer including reasoning steps.
13
+
14
+ Try it in [LoRAX](https://github.com/predibase/lorax):
15
+
16
+ ```python
17
+ from lorax import Client
18
+
19
+ client = Client("http://<your_endpoint>")
20
+
21
+ question = "<your math question>"
22
+
23
+ prompt = f"""
24
+ Please answer the following question: {question}
25
+
26
+ Answer:
27
+ """
28
+
29
+ adapter_id = "tgaddair/mistral-7b-gsmk8k-lora-r8"
30
+ resp = client.generate(prompt, max_new_tokens=64, adapter_id=adapter_id)
31
+ print(resp.generated_text)
32
+ ```
33
+
34
+
35
+
36
+ ## Model Details
37
+
38
+ ### Model Description
39
+
40
+ Ludwig config (v0.9.3):
41
+
42
+ ```yaml
43
+ model_type: llm
44
+ input_features:
45
+ - name: prompt
46
+ type: text
47
+ preprocessing:
48
+ max_sequence_length: null
49
+ column: prompt
50
+ output_features:
51
+ - name: answer
52
+ type: text
53
+ preprocessing:
54
+ max_sequence_length: null
55
+ column: answer
56
+ prompt:
57
+ template: |-
58
+ Please answer the following question: {question}
59
+
60
+ Answer:
61
+ preprocessing:
62
+ split:
63
+ type: fixed
64
+ column: split
65
+ global_max_sequence_length: 2048
66
+ adapter:
67
+ type: lora
68
+ generation:
69
+ max_new_tokens: 64
70
+ trainer:
71
+ type: finetune
72
+ epochs: 3
73
+ optimizer:
74
+ type: paged_adam
75
+ batch_size: 1
76
+ eval_steps: 100
77
+ learning_rate: 0.0002
78
+ eval_batch_size: 2
79
+ steps_per_checkpoint: 1000
80
+ learning_rate_scheduler:
81
+ decay: cosine
82
+ warmup_fraction: 0.03
83
+ gradient_accumulation_steps: 16
84
+ enable_gradient_checkpointing: true
85
+ base_model: mistralai/Mistral-7B-v0.1
86
+ quantization:
87
+ bits: 4
88
+ ```
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "q_proj",
23
+ "v_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17baada90dfd19618d8515f5fb56a82fb120d72dc74ad8993cb14b428e73339f
3
+ size 13648432