UnicomAI commited on
Commit
26c60fc
1 Parent(s): 3fee206

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +16 -13
README.md CHANGED
@@ -38,24 +38,27 @@ messages = [
38
  {"role": "user", "content": "Who are you?"},
39
  ]
40
 
41
- input_ids = tokenizer.apply_chat_template(
42
- messages,
43
- add_generation_prompt=True,
44
- return_tensors="pt"
45
- ).to(model.device)
 
46
 
47
  terminators = [
48
- tokenizer.eos_token_id,
49
- tokenizer.convert_tokens_to_ids("<|eot_id|>")
50
  ]
51
 
 
52
  outputs = model.generate(
53
- input_ids,
54
- max_new_tokens=256,
55
- eos_token_id=terminators,
56
- do_sample=True,
57
- temperature=0.6,
58
- top_p=0.9,
 
59
  )
60
  response = outputs[0][input_ids.shape[-1]:]
61
  print(tokenizer.decode(response, skip_special_tokens=True))
 
38
  {"role": "user", "content": "Who are you?"},
39
  ]
40
 
41
+
42
+ prompt = pipeline.tokenizer.apply_chat_template(
43
+ messages,
44
+ tokenize=False,
45
+ add_generation_prompt=True
46
+ )
47
 
48
  terminators = [
49
+ pipeline.tokenizer.eos_token_id,
50
+ pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
51
  ]
52
 
53
+
54
  outputs = model.generate(
55
+ prompt,
56
+ max_new_tokens=2048,
57
+ eos_token_id=terminators,
58
+ do_sample=False,
59
+ temperature=0.6,
60
+ top_p=1,
61
+ repetition_penalty=1.05
62
  )
63
  response = outputs[0][input_ids.shape[-1]:]
64
  print(tokenizer.decode(response, skip_special_tokens=True))