|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
import gradio as gr |
|
|
|
checkpoint = "Mr-Vicky-01/conversational_sumarization" |
|
tokenizer = AutoTokenizer.from_pretrained(checkpoint) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) |
|
|
|
def generate_summary(text): |
|
inputs = tokenizer([text], max_length=1024, return_tensors='pt', truncation=True) |
|
summary_ids = model.generate(inputs['input_ids'], max_new_tokens=100, do_sample=False) |
|
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) |
|
return summary |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
demo = gr.Interface(fn=language_translator, inputs='text',outputs='text',title='Text Summarization'), |
|
demo.launch(debug=True,share=True) |