TheBloke commited on
Commit
f154af3
1 Parent(s): 81840d4

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -23
README.md CHANGED
@@ -5,16 +5,9 @@ license: llama2
5
  model_creator: Xwin-LM
6
  model_name: Xwin-LM 70B V0.1
7
  model_type: llama
8
- prompt_template: 'Below is an instruction that describes a task. Write a response
9
- that appropriately completes the request.
10
-
11
-
12
- ### Instruction:
13
-
14
- {prompt}
15
-
16
-
17
- ### Response:
18
 
19
  '
20
  quantized_by: TheBloke
@@ -63,15 +56,10 @@ It is also now supported by continuous batching server [vLLM](https://github.com
63
  <!-- repositories-available end -->
64
 
65
  <!-- prompt-template start -->
66
- ## Prompt template: Alpaca
67
 
68
  ```
69
- Below is an instruction that describes a task. Write a response that appropriately completes the request.
70
-
71
- ### Instruction:
72
- {prompt}
73
-
74
- ### Response:
75
 
76
  ```
77
 
@@ -161,12 +149,7 @@ model = AutoAWQForCausalLM.from_quantized(model_name_or_path, fuse_layers=True,
161
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=False)
162
 
163
  prompt = "Tell me about AI"
164
- prompt_template=f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
165
-
166
- ### Instruction:
167
- {prompt}
168
-
169
- ### Response:
170
 
171
  '''
172
 
 
5
  model_creator: Xwin-LM
6
  model_name: Xwin-LM 70B V0.1
7
  model_type: llama
8
+ prompt_template: 'A chat between a curious user and an artificial intelligence assistant.
9
+ The assistant gives helpful, detailed, and polite answers to the user''s questions.
10
+ USER: {prompt} ASSISTANT:
 
 
 
 
 
 
 
11
 
12
  '
13
  quantized_by: TheBloke
 
56
  <!-- repositories-available end -->
57
 
58
  <!-- prompt-template start -->
59
+ ## Prompt template: Vicuna
60
 
61
  ```
62
+ A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT:
 
 
 
 
 
63
 
64
  ```
65
 
 
149
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=False)
150
 
151
  prompt = "Tell me about AI"
152
+ prompt_template=f'''A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT:
 
 
 
 
 
153
 
154
  '''
155