Spaces:
Runtime error
Runtime error
import streamlit as st | |
import time | |
from transformers import pipeline | |
from transformers import T5Tokenizer, T5ForConditionalGeneration | |
from transformers import BartTokenizer, BartForConditionalGeneration | |
#from transformers import AutoTokenizer, EncoderDecoderModel | |
#from transformers import AutoTokenizer, LEDForConditionalGeneration | |
#from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration | |
##initializing models | |
#Transformers Approach | |
def transform_summarize(text): | |
pp = pipeline("summarization") | |
k=pp(text,max_length=100,do_sample=False) | |
return k | |
#T5 | |
def t5_summarize(text): | |
tokenizer = T5Tokenizer.from_pretrained("t5-small") | |
model = T5ForConditionalGeneration.from_pretrained("t5-small") | |
input_text = "summarize: " + text | |
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True) | |
outputs = model.generate(inputs, max_length=200, min_length=50, length_penalty=2.0, num_beams=4, early_stopping=True) | |
pp = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return pp | |
#BART | |
def bart_summarize(text): | |
tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") | |
model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") | |
inputs = tokenizer([text], max_length=1024, return_tensors="pt", truncation=True) | |
summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=150, early_stopping=True) | |
pp = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
return pp | |
#Encoder-Decoder | |
# def encoder_decoder(text): | |
# model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail") | |
# tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail") | |
# # let's perform inference on a long piece of text | |
# input_ids = tokenizer(text, return_tensors="pt").input_ids | |
# # autoregressively generate summary (uses greedy decoding by default) | |
# generated_ids = model.generate(input_ids) | |
# generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] | |
# return generated_text | |
# Result | |
def result(summary): | |
st.success('Please wait while we process and summarize') | |
time.sleep(12) | |
st.subheader(":violet[Your summarized text is:]") | |
st.write(summary) | |
#Title | |
st.title("SummarizeEasy") | |
st.header(":violet[Summarize your text with ease!]") | |
st.divider() | |
st.write("Enter your text below and click on the button to summarize it.") | |
text = st.text_area("Enter your text here", height=200) | |
model = st.radio("Select the model you want to use", ("Transformer","T5", "BART", "Encoder-Decoder")) | |
st.write("Click on the button to summarize your text.") | |
button = st.button("Summarize") | |
st.divider() | |
st.info("Please note that this is a beta version and summarized content may not be accurate. To get an accurate content the models need to be fined tuned and trained on respective context which requires GPUS. Please feel free to share your feedback with us.") | |
st.divider() | |
if button: | |
if text: | |
if model == "Transformer": | |
st.write("You have selected Transformer model.") | |
try: | |
summary = transform_summarize(text) | |
result(summary) | |
except Exception: | |
st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.") | |
elif model == "T5": | |
st.write("You have selected T5 model.") | |
try: | |
summary = t5_summarize(text) | |
except Exception: | |
st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.") | |
elif model == "BART": | |
st.write("You have selected BART model.") | |
try: | |
summary = bart_summarize(text) | |
result(summary) | |
except Exception: | |
st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.") | |
# elif model == "Encoder-Decoder": | |
# st.write("You have selected Encoder-Decoder model.") | |
# try: | |
# summary = encoder_decoder(text) | |
# result(summary) | |
# except Exception: | |
# st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.") | |
#st.toast("Please wait while we summarize your text.") | |
#with st.spinner("Summarizing..."): | |
# time.sleep(5) | |
# st.toast("Done!!",icon="π") | |
# st.success('Please wait while we process and summarize') | |
# time.sleep(15) | |
# st.subheader(":violet[Your summarized text is:]") | |
# st.write(summary) | |
else: | |
st.warning("Please enter the text !!") | |