limhyeonseok commited on
Commit
5e12374
1 Parent(s): f1a2785

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -121,7 +121,8 @@ generation_kwargs = {
121
  "max_tokens":512,
122
  "stop":["<|eot_id|>"],
123
  "echo":True, # Echo the prompt in the output
124
- "top_k":1 # This is essentially greedy decoding, since the model will always return the highest-probability token. Set this value > 1 for sampling decoding
 
125
  }
126
 
127
  resonse_msg = model(prompt, **generation_kwargs)
 
121
  "max_tokens":512,
122
  "stop":["<|eot_id|>"],
123
  "echo":True, # Echo the prompt in the output
124
+ "top_p":0.9,
125
+ "temperature":0.6,
126
  }
127
 
128
  resonse_msg = model(prompt, **generation_kwargs)