Sethblocks commited on
Commit
f019c26
1 Parent(s): 9fda673

patch output (again) and fix token limit

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -34,13 +34,13 @@ def llm(msgs):
34
  ]
35
  outputs = llama(
36
  prompt,
37
- max_new_tokens=10,
38
  eos_token_id=terminators,
39
  do_sample=True,
40
  temperature=0.6,
41
  top_p=0.9,
42
  )
43
- return outputs[0]["generated_text"]#[len(prompt):]
44
 
45
  def randomize():
46
  global randtxt
 
34
  ]
35
  outputs = llama(
36
  prompt,
37
+ max_new_tokens=100,
38
  eos_token_id=terminators,
39
  do_sample=True,
40
  temperature=0.6,
41
  top_p=0.9,
42
  )
43
+ return outputs[0]["generated_text"][len(prompt):]#for the 801100128909120989534879th time remember to transfer changes between test and app.py -_-
44
 
45
  def randomize():
46
  global randtxt