import streamlit as st from transformers import T5Tokenizer, T5ForConditionalGeneration from transformers import pipeline # Model and tokenizer loading checkpoint = "t5-small" # Use the smaller "t5-small" model tokenizer = T5Tokenizer.from_pretrained(checkpoint) base_model = T5ForConditionalGeneration.from_pretrained(checkpoint) # LLM pipeline def llm_pipeline(text): # Use the pipeline to generate the summary pipe_sum = pipeline( 'summarization', model=base_model, tokenizer=tokenizer, max_length=500, min_length=50 ) result = pipe_sum(text) summary = result[0]['summary_text'] return summary # Streamlit code st.set_page_config(layout="wide") def main(): st.title("Document Summarization App using a Smaller Model") # Text input area uploaded_text = st.text_area("Paste your document text here:") if uploaded_text: if st.button("Summarize"): summary = llm_pipeline(uploaded_text) # Display the summary st.info("Summarization Complete") st.success(summary) if __name__ == "__main__": main()