TheBloke commited on
Commit
1aa380b
1 Parent(s): 4ede146

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +29 -5
README.md CHANGED
@@ -11,10 +11,34 @@ license: apache-2.0
11
  model_creator: Mistral AI_
12
  model_name: Mixtral 8X7B Instruct v0.1
13
  model_type: mixtral
14
- prompt_template: '<s>[INST] {prompt} [/INST]
15
 
16
  '
17
  quantized_by: TheBloke
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  ---
19
  <!-- markdownlint-disable MD041 -->
20
 
@@ -73,7 +97,7 @@ It is supported by:
73
  ## Prompt template: Mistral
74
 
75
  ```
76
- <s>[INST] {prompt} [/INST]
77
 
78
  ```
79
 
@@ -139,7 +163,7 @@ prompts = [
139
  "What is 291 - 150?",
140
  "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
141
  ]
142
- prompt_template=f'''<s>[INST] {prompt} [/INST]
143
  '''
144
 
145
  prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
@@ -181,7 +205,7 @@ from huggingface_hub import InferenceClient
181
  endpoint_url = "https://your-endpoint-url-here"
182
 
183
  prompt = "Tell me about AI"
184
- prompt_template=f'''<s>[INST] {prompt} [/INST]
185
  '''
186
 
187
  client = InferenceClient(endpoint_url)
@@ -244,7 +268,7 @@ model = AutoModelForCausalLM.from_pretrained(
244
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
245
 
246
  prompt = "Tell me about AI"
247
- prompt_template=f'''<s>[INST] {prompt} [/INST]
248
  '''
249
 
250
  # Convert prompt to tokens
 
11
  model_creator: Mistral AI_
12
  model_name: Mixtral 8X7B Instruct v0.1
13
  model_type: mixtral
14
+ prompt_template: '[INST] {prompt} [/INST]
15
 
16
  '
17
  quantized_by: TheBloke
18
+ widget:
19
+ - output:
20
+ text: 'Arr, shiver me timbers! Ye have a llama on yer lawn, ye say? Well, that
21
+ be a new one for me! Here''s what I''d suggest, arr:
22
+
23
+
24
+ 1. Firstly, ensure yer safety. Llamas may look gentle, but they can be protective
25
+ if they feel threatened.
26
+
27
+ 2. Try to make the area less appealing to the llama. Remove any food sources
28
+ or water that might be attracting it.
29
+
30
+ 3. Contact local animal control or a wildlife rescue organization. They be the
31
+ experts and can provide humane ways to remove the llama from yer property.
32
+
33
+ 4. If ye have any experience with animals, you could try to gently herd the
34
+ llama towards a nearby field or open space. But be careful, arr!
35
+
36
+
37
+ Remember, arr, it be important to treat the llama with respect and care. It
38
+ be a creature just trying to survive, like the rest of us.'
39
+ text: '[INST] You are a pirate chatbot who always responds with Arr and pirate speak!
40
+
41
+ There''s a llama on my lawn, how can I get rid of him? [/INST]'
42
  ---
43
  <!-- markdownlint-disable MD041 -->
44
 
 
97
  ## Prompt template: Mistral
98
 
99
  ```
100
+ [INST] {prompt} [/INST]
101
 
102
  ```
103
 
 
163
  "What is 291 - 150?",
164
  "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
165
  ]
166
+ prompt_template=f'''[INST] {prompt} [/INST]
167
  '''
168
 
169
  prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
 
205
  endpoint_url = "https://your-endpoint-url-here"
206
 
207
  prompt = "Tell me about AI"
208
+ prompt_template=f'''[INST] {prompt} [/INST]
209
  '''
210
 
211
  client = InferenceClient(endpoint_url)
 
268
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
269
 
270
  prompt = "Tell me about AI"
271
+ prompt_template=f'''[INST] {prompt} [/INST]
272
  '''
273
 
274
  # Convert prompt to tokens