pglo commited on
Commit
5237d8d
1 Parent(s): 8a76261

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -53,7 +53,7 @@ sample = [{'role': 'user', 'content': input_string}, {'role': 'assistant', 'cont
53
  chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
54
 
55
  # Tokenize input and generate output
56
- input_ids = tokenizer(chat_sample, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda")
57
  outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
58
  print((tokenizer.decode(outputs[0])))
59
  ```
 
53
  chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
54
 
55
  # Tokenize input and generate output
56
+ input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
57
  outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
58
  print((tokenizer.decode(outputs[0])))
59
  ```