zetavg commited on
Commit
ae567a3
1 Parent(s): 6148b7c
Files changed (1) hide show
  1. llama_lora/lib/inference.py +0 -2
llama_lora/lib/inference.py CHANGED
@@ -67,8 +67,6 @@ def generate(
67
  for output in generator:
68
  decoded_output = tokenizer.decode(output, skip_special_tokens=skip_special_tokens)
69
  yield decoded_output, output, False
70
- if output[-1] in [tokenizer.eos_token_id]:
71
- break
72
 
73
  if generation_output:
74
  output = generation_output.sequences[0]
 
67
  for output in generator:
68
  decoded_output = tokenizer.decode(output, skip_special_tokens=skip_special_tokens)
69
  yield decoded_output, output, False
 
 
70
 
71
  if generation_output:
72
  output = generation_output.sequences[0]