TheBloke commited on
Commit
0a13ba0
1 Parent(s): 09a2bcf

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +20 -27
README.md CHANGED
@@ -13,15 +13,12 @@ model_creator: Jeonghwan Park
13
  model_name: Pivot 0.1 Evil A
14
  model_type: mistral
15
  pipeline_tag: text-generation
16
- prompt_template: '<|im_start|>system
17
 
18
- {system_message}<|im_end|>
19
 
20
- <|im_start|>user
21
 
22
- {prompt}<|im_end|>
23
-
24
- <|im_start|>assistant
25
 
26
  '
27
  quantized_by: TheBloke
@@ -82,14 +79,13 @@ It is supported by:
82
  <!-- repositories-available end -->
83
 
84
  <!-- prompt-template start -->
85
- ## Prompt template: ChatML
86
 
87
  ```
88
- <|im_start|>system
89
- {system_message}<|im_end|>
90
- <|im_start|>user
91
- {prompt}<|im_end|>
92
- <|im_start|>assistant
93
 
94
  ```
95
 
@@ -155,11 +151,10 @@ prompts = [
155
  "What is 291 - 150?",
156
  "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
157
  ]
158
- prompt_template=f'''<|im_start|>system
159
- {system_message}<|im_end|>
160
- <|im_start|>user
161
- {prompt}<|im_end|>
162
- <|im_start|>assistant
163
  '''
164
 
165
  prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
@@ -201,11 +196,10 @@ from huggingface_hub import InferenceClient
201
  endpoint_url = "https://your-endpoint-url-here"
202
 
203
  prompt = "Tell me about AI"
204
- prompt_template=f'''<|im_start|>system
205
- {system_message}<|im_end|>
206
- <|im_start|>user
207
- {prompt}<|im_end|>
208
- <|im_start|>assistant
209
  '''
210
 
211
  client = InferenceClient(endpoint_url)
@@ -268,11 +262,10 @@ model = AutoModelForCausalLM.from_pretrained(
268
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
269
 
270
  prompt = "Tell me about AI"
271
- prompt_template=f'''<|im_start|>system
272
- {system_message}<|im_end|>
273
- <|im_start|>user
274
- {prompt}<|im_end|>
275
- <|im_start|>assistant
276
  '''
277
 
278
  # Convert prompt to tokens
 
13
  model_name: Pivot 0.1 Evil A
14
  model_type: mistral
15
  pipeline_tag: text-generation
16
+ prompt_template: '### Instruction:
17
 
18
+ {prompt}
19
 
 
20
 
21
+ ### Response:
 
 
22
 
23
  '
24
  quantized_by: TheBloke
 
79
  <!-- repositories-available end -->
80
 
81
  <!-- prompt-template start -->
82
+ ## Prompt template: Alpaca-InstructOnly2
83
 
84
  ```
85
+ ### Instruction:
86
+ {prompt}
87
+
88
+ ### Response:
 
89
 
90
  ```
91
 
 
151
  "What is 291 - 150?",
152
  "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
153
  ]
154
+ prompt_template=f'''### Instruction:
155
+ {prompt}
156
+
157
+ ### Response:
 
158
  '''
159
 
160
  prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
 
196
  endpoint_url = "https://your-endpoint-url-here"
197
 
198
  prompt = "Tell me about AI"
199
+ prompt_template=f'''### Instruction:
200
+ {prompt}
201
+
202
+ ### Response:
 
203
  '''
204
 
205
  client = InferenceClient(endpoint_url)
 
262
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
263
 
264
  prompt = "Tell me about AI"
265
+ prompt_template=f'''### Instruction:
266
+ {prompt}
267
+
268
+ ### Response:
 
269
  '''
270
 
271
  # Convert prompt to tokens