JustinLin610 commited on
Commit
7cbf3e3
1 Parent(s): 19bd3e0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -7
README.md CHANGED
@@ -50,19 +50,23 @@ model = AutoModelForCausalLM.from_pretrained("Qwen2/Qwen2-beta-4B-Chat", device_
50
  tokenizer = AutoTokenizer.from_pretrained("Qwen2/Qwen2-beta-4B-Chat")
51
 
52
  prompt = "Give me a short introduction to large language model."
53
-
54
  messages = [
55
  {"role": "system", "content": "You are a helpful assistant."},
56
  {"role": "user", "content": prompt}
57
  ]
58
-
59
- text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
60
-
 
61
  model_inputs = tokenizer([text], return_tensors="pt").to(device)
62
 
63
- generated_ids = model.generate(model_inputs.input_ids, max_new_tokens=512, do_sample=True)
64
-
65
- generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)]
 
 
 
 
66
 
67
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
68
  ```
 
50
  tokenizer = AutoTokenizer.from_pretrained("Qwen2/Qwen2-beta-4B-Chat")
51
 
52
  prompt = "Give me a short introduction to large language model."
 
53
  messages = [
54
  {"role": "system", "content": "You are a helpful assistant."},
55
  {"role": "user", "content": prompt}
56
  ]
57
+ text = tokenizer.apply_chat_template(
58
+ messages, tokenize=False,
59
+ add_generation_prompt=True
60
+ )
61
  model_inputs = tokenizer([text], return_tensors="pt").to(device)
62
 
63
+ generated_ids = model.generate(
64
+ model_inputs.input_ids,
65
+ max_new_tokens=512
66
+ )
67
+ generated_ids = [
68
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
69
+ ]
70
 
71
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
72
  ```