Spaces:
Runtime error
Runtime error
File size: 4,964 Bytes
7ad93dc 9c4eeaf bb9e1c7 37f895c ea24c96 0c6c8a7 9c4eeaf 0c6c8a7 bb9e1c7 9c4eeaf 0c6c8a7 b4dc99d 0c6c8a7 b4dc99d 9c4eeaf 0c6c8a7 ea24c96 e448421 9c4eeaf 8827883 e448421 9c4eeaf e334159 e448421 e334159 9c4eeaf e334159 9c4eeaf e334159 e448421 e334159 ea24c96 9c4eeaf 9e866e0 e448421 9c4eeaf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import streamlit as st
import time
from transformers import pipeline
from transformers import T5Tokenizer, T5ForConditionalGeneration
from transformers import BartTokenizer, BartForConditionalGeneration
#from transformers import AutoTokenizer, EncoderDecoderModel
#from transformers import AutoTokenizer, LEDForConditionalGeneration
#from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
##initializing models
#Transformers Approach
def transform_summarize(text):
pp = pipeline("summarization")
k=pp(text,max_length=100,do_sample=False)
return k
#T5
def t5_summarize(text):
tokenizer = T5Tokenizer.from_pretrained("t5-small")
model = T5ForConditionalGeneration.from_pretrained("t5-small")
input_text = "summarize: " + text
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)
outputs = model.generate(inputs, max_length=200, min_length=50, length_penalty=2.0, num_beams=4, early_stopping=True)
pp = tokenizer.decode(outputs[0], skip_special_tokens=True)
return pp
#BART
def bart_summarize(text):
tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
inputs = tokenizer([text], max_length=1024, return_tensors="pt", truncation=True)
summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=150, early_stopping=True)
pp = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
return pp
#Encoder-Decoder
# def encoder_decoder(text):
# model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
# tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
# # let's perform inference on a long piece of text
# input_ids = tokenizer(text, return_tensors="pt").input_ids
# # autoregressively generate summary (uses greedy decoding by default)
# generated_ids = model.generate(input_ids)
# generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
# return generated_text
# Result
def result(summary):
st.success('Please wait while we process and summarize')
time.sleep(12)
st.subheader(":violet[Your summarized text is:]")
st.write(summary)
#Title
st.title("SummarizeEasy")
st.header(":violet[Summarize your text with ease!]")
st.divider()
st.write("Enter your text below and click on the button to summarize it.")
text = st.text_area("Enter your text here", height=200)
model = st.radio("Select the model you want to use", ("Transformer","T5", "BART", "Encoder-Decoder"))
st.write("Click on the button to summarize your text.")
button = st.button("Summarize")
st.divider()
st.info("Please note that this is a beta version and summarized content may not be accurate. To get an accurate content the models need to be fined tuned and trained on respective context which requires GPUS. Please feel free to share your feedback with us.")
st.divider()
if button:
if text:
if model == "Transformer":
st.write("You have selected Transformer model.")
try:
summary = transform_summarize(text)
result(summary)
except Exception:
st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
elif model == "T5":
st.write("You have selected T5 model.")
try:
summary = t5_summarize(text)
except Exception:
st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
elif model == "BART":
st.write("You have selected BART model.")
try:
summary = bart_summarize(text)
result(summary)
except Exception:
st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
# elif model == "Encoder-Decoder":
# st.write("You have selected Encoder-Decoder model.")
# try:
# summary = encoder_decoder(text)
# result(summary)
# except Exception:
# st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
#st.toast("Please wait while we summarize your text.")
#with st.spinner("Summarizing..."):
# time.sleep(5)
# st.toast("Done!!",icon="π")
# st.success('Please wait while we process and summarize')
# time.sleep(15)
# st.subheader(":violet[Your summarized text is:]")
# st.write(summary)
else:
st.warning("Please enter the text !!")
|