TheBloke commited on
Commit
d15afa9
1 Parent(s): 75a4c48

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -6
README.md CHANGED
@@ -7,7 +7,9 @@ license: mit
7
  model_creator: Nobody.png
8
  model_name: Yi 34B GiftedConvo Llama
9
  model_type: llama
10
- prompt_template: '{prompt}
 
 
11
 
12
  '
13
  quantized_by: TheBloke
@@ -65,10 +67,11 @@ It is supported by:
65
  <!-- repositories-available end -->
66
 
67
  <!-- prompt-template start -->
68
- ## Prompt template: Unknown
69
 
70
  ```
71
- {prompt}
 
72
 
73
  ```
74
 
@@ -141,7 +144,8 @@ prompts = [
141
  "What is 291 - 150?",
142
  "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
143
  ]
144
- prompt_template=f'''{prompt}
 
145
  '''
146
 
147
  prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
@@ -183,7 +187,8 @@ from huggingface_hub import InferenceClient
183
  endpoint_url = "https://your-endpoint-url-here"
184
 
185
  prompt = "Tell me about AI"
186
- prompt_template=f'''{prompt}
 
187
  '''
188
 
189
  client = InferenceClient(endpoint_url)
@@ -234,7 +239,8 @@ model = AutoAWQForCausalLM.from_quantized(model_name_or_path, fuse_layers=True,
234
  trust_remote_code=True, safetensors=True)
235
 
236
  prompt = "Tell me about AI"
237
- prompt_template=f'''{prompt}
 
238
  '''
239
 
240
  print("*** Running model.generate:")
 
7
  model_creator: Nobody.png
8
  model_name: Yi 34B GiftedConvo Llama
9
  model_type: llama
10
+ prompt_template: 'USER: {prompt}
11
+
12
+ ASSISTANT:
13
 
14
  '
15
  quantized_by: TheBloke
 
67
  <!-- repositories-available end -->
68
 
69
  <!-- prompt-template start -->
70
+ ## Prompt template: User-Assistant
71
 
72
  ```
73
+ USER: {prompt}
74
+ ASSISTANT:
75
 
76
  ```
77
 
 
144
  "What is 291 - 150?",
145
  "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
146
  ]
147
+ prompt_template=f'''USER: {prompt}
148
+ ASSISTANT:
149
  '''
150
 
151
  prompts = [prompt_template.format(prompt=prompt) for prompt in prompts]
 
187
  endpoint_url = "https://your-endpoint-url-here"
188
 
189
  prompt = "Tell me about AI"
190
+ prompt_template=f'''USER: {prompt}
191
+ ASSISTANT:
192
  '''
193
 
194
  client = InferenceClient(endpoint_url)
 
239
  trust_remote_code=True, safetensors=True)
240
 
241
  prompt = "Tell me about AI"
242
+ prompt_template=f'''USER: {prompt}
243
+ ASSISTANT:
244
  '''
245
 
246
  print("*** Running model.generate:")