Update README.md
Browse files
README.md
CHANGED
@@ -108,7 +108,7 @@ _Note: first prompt may be slower as there is a slight warmup time_
|
|
108 |
#### Install
|
109 |
|
110 |
```bash
|
111 |
-
git clone https://github.com/
|
112 |
(cd fms-extras && pip install -e .)
|
113 |
pip install transformers==4.35.0 sentencepiece numpy
|
114 |
```
|
@@ -120,12 +120,13 @@ pip install transformers==4.35.0 sentencepiece numpy
|
|
120 |
```bash
|
121 |
MODEL_PATH=/path/to/instructlab/granite-7b-lab
|
122 |
python fms-extras/scripts/paged_speculative_inference.py \
|
123 |
-
--variant=
|
124 |
--model_path=$MODEL_PATH \
|
125 |
--model_source=hf \
|
126 |
--tokenizer=$MODEL_PATH \
|
127 |
--speculator_path=ibm/granite-7b-lab-accelerator \
|
128 |
--speculator_source=hf \
|
|
|
129 |
--top_k_tokens_per_head=4,3,2,2,2 \
|
130 |
--compile \
|
131 |
--compile_mode=reduce-overhead
|
@@ -136,12 +137,13 @@ python fms-extras/scripts/paged_speculative_inference.py \
|
|
136 |
```bash
|
137 |
MODEL_PATH=/path/to/instructlab/granite-7b-lab
|
138 |
python fms-extras/scripts/paged_speculative_inference.py \
|
139 |
-
--variant=
|
140 |
--model_path=$MODEL_PATH \
|
141 |
--model_source=hf \
|
142 |
--tokenizer=$MODEL_PATH \
|
143 |
--speculator_path=ibm/granite-7b-lab-accelerator \
|
144 |
--speculator_source=hf \
|
|
|
145 |
--top_k_tokens_per_head=4,3,2,2,2 \
|
146 |
--compile \
|
147 |
```
|
@@ -151,12 +153,13 @@ python fms-extras/scripts/paged_speculative_inference.py \
|
|
151 |
```bash
|
152 |
MODEL_PATH=/path/to/instructlab/granite-7b-lab
|
153 |
python fms-extras/scripts/paged_speculative_inference.py \
|
154 |
-
--variant=
|
155 |
--model_path=$MODEL_PATH \
|
156 |
--model_source=hf \
|
157 |
--tokenizer=$MODEL_PATH \
|
158 |
--speculator_path=ibm/granite-7b-lab-accelerator \
|
159 |
--speculator_source=hf \
|
|
|
160 |
--top_k_tokens_per_head=4,3,2,2,2 \
|
161 |
--batch_input \
|
162 |
--compile \
|
|
|
108 |
#### Install
|
109 |
|
110 |
```bash
|
111 |
+
git clone --branch ibm_7b_instruct_lab_variant --single-branch https://github.com/JRosenkranz/fms-extras.git
|
112 |
(cd fms-extras && pip install -e .)
|
113 |
pip install transformers==4.35.0 sentencepiece numpy
|
114 |
```
|
|
|
120 |
```bash
|
121 |
MODEL_PATH=/path/to/instructlab/granite-7b-lab
|
122 |
python fms-extras/scripts/paged_speculative_inference.py \
|
123 |
+
--variant=7b.ibm_instruct_lab \
|
124 |
--model_path=$MODEL_PATH \
|
125 |
--model_source=hf \
|
126 |
--tokenizer=$MODEL_PATH \
|
127 |
--speculator_path=ibm/granite-7b-lab-accelerator \
|
128 |
--speculator_source=hf \
|
129 |
+
--speculator_variant=1.4b \
|
130 |
--top_k_tokens_per_head=4,3,2,2,2 \
|
131 |
--compile \
|
132 |
--compile_mode=reduce-overhead
|
|
|
137 |
```bash
|
138 |
MODEL_PATH=/path/to/instructlab/granite-7b-lab
|
139 |
python fms-extras/scripts/paged_speculative_inference.py \
|
140 |
+
--variant=7b.ibm_instruct_lab \
|
141 |
--model_path=$MODEL_PATH \
|
142 |
--model_source=hf \
|
143 |
--tokenizer=$MODEL_PATH \
|
144 |
--speculator_path=ibm/granite-7b-lab-accelerator \
|
145 |
--speculator_source=hf \
|
146 |
+
--speculator_variant=1.4b \
|
147 |
--top_k_tokens_per_head=4,3,2,2,2 \
|
148 |
--compile \
|
149 |
```
|
|
|
153 |
```bash
|
154 |
MODEL_PATH=/path/to/instructlab/granite-7b-lab
|
155 |
python fms-extras/scripts/paged_speculative_inference.py \
|
156 |
+
--variant=7b.ibm_instruct_lab \
|
157 |
--model_path=$MODEL_PATH \
|
158 |
--model_source=hf \
|
159 |
--tokenizer=$MODEL_PATH \
|
160 |
--speculator_path=ibm/granite-7b-lab-accelerator \
|
161 |
--speculator_source=hf \
|
162 |
+
--speculator_variant=1.4b \
|
163 |
--top_k_tokens_per_head=4,3,2,2,2 \
|
164 |
--batch_input \
|
165 |
--compile \
|