File size: 1,196 Bytes
60cb352
 
 
7181d11
 
 
 
 
 
 
60cb352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import streamlit as st
from transformers import AutoTokenizer, BartForConditionalGeneration

@st.cache_resource
def load_model():
    summarizer = BartForConditionalGeneration.from_pretrained("sshleifer/distilbart-cnn-12-6")
    tokenizer_sum = AutoTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
    return summarizer, tokenizer_sum

summarizer, tokenizer_sum = load_model()

def generate_summary(text, length):
    inputs = tokenizer_sum([text], max_length=1024, return_tensors="pt")
    summary_ids = summarizer.generate(inputs["input_ids"], num_beams=2, min_length=1, max_length=length)
    out = tokenizer_sum.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
    st.write(out)

st.title('Summarizer')
st.write('Submit a news article in the field below, and the Bart-based model with provide a summary.')

length = st.slider('Maximum length of summary', value = 50, min_value = 15, max_value = 150, step = 1)
user_input = st.text_area("Enter your text:")
if st.button("Send a review for processing"):
    if user_input:
        generate_summary(user_input, length)
    else:
        st.warning("Please enter some text before processing.")