data-silence commited on
Commit
eb57418
1 Parent(s): 89ddec0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -3
README.md CHANGED
@@ -87,15 +87,14 @@ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
87
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
88
 
89
 
90
- def generate_summary_with_special_tokens(text, max_length=600):
91
- inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True).to(device)
92
 
93
  outputs = model.generate(
94
  **inputs,
95
  max_length=max_length,
96
  num_return_sequences=1,
97
  no_repeat_ngram_size=4,
98
- # early_stopping=True
99
  )
100
 
101
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=False)
 
87
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
88
 
89
 
90
+ def generate_summary_with_special_tokens(text, max_length=512):
91
+ inputs = tokenizer(text, return_tensors="pt", max_length=max_length, truncation=True).to(device)
92
 
93
  outputs = model.generate(
94
  **inputs,
95
  max_length=max_length,
96
  num_return_sequences=1,
97
  no_repeat_ngram_size=4,
 
98
  )
99
 
100
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=False)