Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -95,7 +95,7 @@ for sample in inputs:
95
  input_ids = tokenizer(
96
  input_sequences, return_tensors="pt", truncation=True
97
  ).input_ids
98
- output = model.generate(input_ids, no_repeat_ngram_size=3, num_beams=4)
99
  predicted = tokenizer.decode(output[0], skip_special_tokens=True)
100
  print(sample, "\n --->", predicted)
101
 
 
95
  input_ids = tokenizer(
96
  input_sequences, return_tensors="pt", truncation=True
97
  ).input_ids
98
+ output = model.generate(input_ids, no_repeat_ngram_size=3, num_beams=20)
99
  predicted = tokenizer.decode(output[0], skip_special_tokens=True)
100
  print(sample, "\n --->", predicted)
101