jbochi commited on
Commit
38479f3
1 Parent(s): 1eab707

Include max_length in generation config

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -19,7 +19,7 @@ def inference(max_length, input_text, history=[]):
19
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
20
  outputs = model.generate(
21
  input_ids=input_ids,
22
- generation_config=GenerationConfig(decoder_start_token_id=2),
23
  )
24
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
  history.append((input_text, result))
 
19
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
20
  outputs = model.generate(
21
  input_ids=input_ids,
22
+ generation_config=GenerationConfig(max_length=max_length, decoder_start_token_id=2),
23
  )
24
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
  history.append((input_text, result))