merve HF staff commited on
Commit
04f9f37
1 Parent(s): 23aee34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -14,7 +14,7 @@ def infer(input_ids, max_length, temperature, top_k, top_p, num_return_sequences
14
 
15
  output_sequences = model.generate(
16
  input_ids=input_ids,
17
- max_length=args.length + len(encoded_prompt[0]),
18
  temperature=temperature,
19
  top_k=top_k,
20
  top_p=top_p,
@@ -55,7 +55,7 @@ for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
55
  text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
56
 
57
  # Remove all text after the stop token
58
- text = text[: text.find(args.stop_token) if args.stop_token else None]
59
 
60
  # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
61
  total_sequence = (
 
14
 
15
  output_sequences = model.generate(
16
  input_ids=input_ids,
17
+ max_length=max_length,
18
  temperature=temperature,
19
  top_k=top_k,
20
  top_p=top_p,
 
55
  text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
56
 
57
  # Remove all text after the stop token
58
+ #text = text[: text.find(args.stop_token) if args.stop_token else None]
59
 
60
  # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
61
  total_sequence = (