NewsHelper / app.py
rg089's picture
Update app.py
270a4af
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification
import gradio
mapper = {"India Today": 0, "NDTV": 1, "The Indian Express": 2, "The Times Of India": 3, "The Hindu": 4}
rev_mapper = {k:v for v,k in mapper.items()}
source_path = "rg089/bert_newspaper_source"
title_path = "rg089/t5-headline-generation"
summary_path = "rg089/distilbart-summarization"
device = "cuda" if torch.cuda.is_available() else "cpu"
source_model = AutoModelForSequenceClassification.from_pretrained(source_path).to(device)
source_tokenizer = AutoTokenizer.from_pretrained(source_path)
title_model = AutoModelForSeq2SeqLM.from_pretrained(title_path).to(device)
title_tokenizer = AutoTokenizer.from_pretrained(title_path)
summary_model = AutoModelForSeq2SeqLM.from_pretrained(summary_path).to(device)
summary_tokenizer = AutoTokenizer.from_pretrained(summary_path)
def generate(model, tokenizer, test_samples, prefix="", max_length=256):
model.eval()
with torch.no_grad():
if type(test_samples) == str:
test_samples = prefix + test_samples
else:
for i in range(len(test_samples)):
test_samples[i] = prefix + test_samples[i]
with tokenizer.as_target_tokenizer():
inputs = tokenizer(
test_samples,
truncation=True,
padding="max_length",
max_length=max_length,
return_tensors="pt")
input_ids = inputs.input_ids.to(device)
attention_mask = inputs.attention_mask.to(device)
outputs = model.generate(input_ids, attention_mask=attention_mask, num_beams=10, max_length=max_length) #, min_length=50)
output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
return output_str[0]
def classify(model, tokenizer, content, title):
model.eval()
with torch.no_grad():
model_inputs = tokenizer(title, content, padding=True, truncation=True, return_tensors="pt").to(device)
outputs = model(**model_inputs)
logits = outputs.logits
selected = logits.argmax(dim=-1).cpu().tolist()
answers = [rev_mapper[sel] for sel in selected]
return answers[0]
def main(content, classify_source=False):
output = ""
title = generate(title_model, title_tokenizer, content, prefix="headline: ")
output += f"Title: {title}\n"
if classify_source:
source = classify(source_model, source_tokenizer, content, title)
output += f"Source: {source}\n\n"
else:
output += "\n"
summary = generate(summary_model, summary_tokenizer, content, prefix="")
output += f"Summary: {summary}"
return output
title = "News Helper: Generate Headlines, Summary and Classify the Newspaper Source!"
description = """
The current sources supported for classification are: The Times of India, The Indian Express, NDTV, The Hindu and India Today.
"""
placeholder = "Enter the content of the article here."
iface = gradio.Interface(fn=main, inputs=[gradio.inputs.Textbox(lines=10, placeholder=placeholder, label='Article Content:'),
gradio.inputs.Checkbox(default=True, label='Classify the Source:')], outputs="textbox", title=title,
description=description)
iface.launch()