hiyouga commited on
Commit
11fa3e4
1 Parent(s): 29e92e7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -6
README.md CHANGED
@@ -35,12 +35,10 @@ tokenizer = AutoTokenizer.from_pretrained("hiyouga/Qwen-14B-Chat-LLaMAfied")
35
  model = AutoModelForCausalLM.from_pretrained("hiyouga/Qwen-14B-Chat-LLaMAfied", torch_dtype="auto", device_map="auto")
36
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
37
 
38
- query = (
39
- "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
40
- "<|im_start|>user\nWho are you?<|im_end|>\n"
41
- "<|im_start|>assistant\n"
42
- )
43
- inputs = tokenizer([query], return_tensors="pt")
44
  inputs = inputs.to("cuda")
45
  generate_ids = model.generate(**inputs, eos_token_id=[151643, 151645], max_new_tokens=256, streamer=streamer)
46
  ```
 
35
  model = AutoModelForCausalLM.from_pretrained("hiyouga/Qwen-14B-Chat-LLaMAfied", torch_dtype="auto", device_map="auto")
36
  streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
37
 
38
+ messages = [
39
+ {"role": "user", "content": "Who are you?"}
40
+ ]
41
+ inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
 
 
42
  inputs = inputs.to("cuda")
43
  generate_ids = model.generate(**inputs, eos_token_id=[151643, 151645], max_new_tokens=256, streamer=streamer)
44
  ```