jackangel commited on
Commit
48024cb
1 Parent(s): de8d85a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -24,8 +24,10 @@ llm = Llama(
24
  generation_kwargs = {
25
  "max_tokens":1000,
26
  "stop":["</s>"],
 
27
  "echo":False, # Echo the prompt in the output
28
- "top_k":1 # This is essentially greedy decoding, since the model will always return the highest-probability token. Set this value > 1 for sampling decoding
 
29
  }
30
 
31
 
 
24
  generation_kwargs = {
25
  "max_tokens":1000,
26
  "stop":["</s>"],
27
+ "temperature":0.2,
28
  "echo":False, # Echo the prompt in the output
29
+ "top_k":20,
30
+ "top_p":0.7
31
  }
32
 
33