Suva commited on
Commit
8c5411c
1 Parent(s): 282e18f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -39,7 +39,7 @@ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
39
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
40
  tokenizer = AutoTokenizer.from_pretrained(model_name)
41
  input_ids = tokenizer.encode("summarize: " + abstract, return_tensors="pt", add_special_tokens=True)
42
- generated_ids = model.generate(input_ids=input_ids,num_beams=5,max_length=50,repetition_penalty=2.5,length_penalty=1,early_stopping=True,num_return_sequences=3)
43
  preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
44
  print(preds)
45
  # output
39
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
40
  tokenizer = AutoTokenizer.from_pretrained(model_name)
41
  input_ids = tokenizer.encode("summarize: " + abstract, return_tensors="pt", add_special_tokens=True)
42
+ generated_ids = model.generate(input_ids=input_ids,num_beams=5,max_length=100,repetition_penalty=2.5,length_penalty=1,early_stopping=True,num_return_sequences=3)
43
  preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
44
  print(preds)
45
  # output