Summarization / app.py
Mr-Vicky-01's picture
Update app.py
76ae1d9 verified
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import gradio as gr
checkpoint = "Mr-Vicky-01/conversational_sumarization"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
def generate_summary(text):
inputs = tokenizer([text], max_length=1024, return_tensors='pt', truncation=True)
summary_ids = model.generate(inputs['input_ids'], max_new_tokens=100, do_sample=False)
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
return summary
# examples = [
# ["hello everyone"],
# ["hardwork never fails."],
# ["A room without books is like a body without a soul."],
# ["The Sun is approximately 4.6 billion years older than Earth."],
# ]
demo = gr.Interface(fn=language_translator, inputs='text',outputs='text',title='Text Summarization'), #examples=examples)
demo.launch(debug=True,share=True)