Upload README.md
Browse files
README.md
CHANGED
@@ -7,7 +7,7 @@ license: apache-2.0
|
|
7 |
model_creator: YeungNLP
|
8 |
model_name: Firefly Mixtral 8X7B
|
9 |
model_type: mixtral
|
10 |
-
prompt_template: '{prompt}
|
11 |
|
12 |
'
|
13 |
quantized_by: TheBloke
|
@@ -76,10 +76,10 @@ AWQ models are supported by (note that not all of these may support Mixtral mode
|
|
76 |
<!-- repositories-available end -->
|
77 |
|
78 |
<!-- prompt-template start -->
|
79 |
-
## Prompt template:
|
80 |
|
81 |
```
|
82 |
-
{prompt}
|
83 |
|
84 |
```
|
85 |
|
@@ -145,7 +145,7 @@ prompts = [
|
|
145 |
"What is 291 - 150?",
|
146 |
"How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
|
147 |
]
|
148 |
-
prompt_template=f'''{prompt}
|
149 |
'''
|
150 |
|
151 |
prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
|
@@ -187,7 +187,7 @@ from huggingface_hub import InferenceClient
|
|
187 |
endpoint_url = "https://your-endpoint-url-here"
|
188 |
|
189 |
prompt = "Tell me about AI"
|
190 |
-
prompt_template=f'''{prompt}
|
191 |
'''
|
192 |
|
193 |
client = InferenceClient(endpoint_url)
|
@@ -250,7 +250,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
250 |
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
251 |
|
252 |
prompt = "Tell me about AI"
|
253 |
-
prompt_template=f'''{prompt}
|
254 |
'''
|
255 |
|
256 |
# Convert prompt to tokens
|
|
|
7 |
model_creator: YeungNLP
|
8 |
model_name: Firefly Mixtral 8X7B
|
9 |
model_type: mixtral
|
10 |
+
prompt_template: '[INST] {prompt} [/INST]
|
11 |
|
12 |
'
|
13 |
quantized_by: TheBloke
|
|
|
76 |
<!-- repositories-available end -->
|
77 |
|
78 |
<!-- prompt-template start -->
|
79 |
+
## Prompt template: Mistral
|
80 |
|
81 |
```
|
82 |
+
[INST] {prompt} [/INST]
|
83 |
|
84 |
```
|
85 |
|
|
|
145 |
"What is 291 - 150?",
|
146 |
"How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
|
147 |
]
|
148 |
+
prompt_template=f'''[INST] {prompt} [/INST]
|
149 |
'''
|
150 |
|
151 |
prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
|
|
|
187 |
endpoint_url = "https://your-endpoint-url-here"
|
188 |
|
189 |
prompt = "Tell me about AI"
|
190 |
+
prompt_template=f'''[INST] {prompt} [/INST]
|
191 |
'''
|
192 |
|
193 |
client = InferenceClient(endpoint_url)
|
|
|
250 |
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
251 |
|
252 |
prompt = "Tell me about AI"
|
253 |
+
prompt_template=f'''[INST] {prompt} [/INST]
|
254 |
'''
|
255 |
|
256 |
# Convert prompt to tokens
|