abdulmatinomotoso commited on
Commit
471b76d
1 Parent(s): bdece0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -35,17 +35,17 @@ from transformers import (
35
  )
36
 
37
  #initializing the tokenizer and the model
38
- tokenizer = AutoTokenizer.from_pretrained("valurank/headline_generator_baseline")
39
- model = AutoModelForSeq2SeqLM.from_pretrained("valurank/headline_generator_baseline")
40
 
41
  #Defining a function to generate the headlines
42
  def headline_generator_2(file):
43
- input_text = clean_text(file)
44
- input_text = sent_tokenize(input_text)
45
- text = ''.join(input_text[:6])
46
 
47
- inputs = tokenizer(text,truncation=True, return_tensors="pt")
48
- summary_ids = model.generate(inputs["input_ids"])
49
  summary = tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
50
 
51
  return summary
 
35
  )
36
 
37
  #initializing the tokenizer and the model
38
+ tokenizer = AutoTokenizer.from_pretrained("valurank/final_headline_generator")
39
+ model = AutoModelForSeq2SeqLM.from_pretrained("valurank/final_headline_generator")
40
 
41
  #Defining a function to generate the headlines
42
  def headline_generator_2(file):
43
+ input_text = file
44
+ #input_text = sent_tokenize(input_text)
45
+ #text = ''.join(input_text[:6])
46
 
47
+ inputs = tokenizer(input_text,truncation=True, return_tensors="pt")
48
+ summary_ids = model.generate(inputs["input_ids"],min_length=20, max_length=40)
49
  summary = tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
50
 
51
  return summary