Nattiman's picture
Upload app.py
9f13628
import json
import pickle
import streamlit as st
#from streamlit_option_menu import option_menu
import pandas as pd
import numpy as np
import transformers
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
#from transformers import sentencepiece
import sentencepiece as spm
import webbrowser
activities=["Home","Summarize","Training Dataset","Model info","About Us"]
choice=st.sidebar.selectbox("OPTIONS",activities)
##################################
model = AutoModelForSeq2SeqLM.from_pretrained("SmonF/YTFineTunePegasus")
#model.save_pretrained("my_model_checkpoint")
tokenizer = AutoTokenizer.from_pretrained("google/pegasus-cnn_dailymail")
##################################
#st.markdown(hide_st_style, unsafe_allow_html=True)
#text_input = st.text_input("Enter some text:")
####with open('src/fine_tuned_model.pkl', 'rb') as f:
#### model = pickle.load(f)
#tokenizer = AutoTokenizer.from_pretrained("tuner007/pegasus_paraphrase")
#### tokenizer = AutoTokenizer.from_pretrained("google/pegasus-cnn_dailymail")
#model=AutoModelForSeq2SeqLM.from_pretrained('Nattiman/CHATSUMMARY')
#tokenizer = AutoTokenizer.from_pretrained("Nattiman/CHATSUMMARY")
def generate_summary(text, max_length=100, min_length=30):
summarizer = pipeline("summarization", model=model, tokenizer=tokenizer)
summary = summarizer(text, max_length=max_length,
min_length=min_length, do_sample=True)
return summary[0]["summary_text"]
####################################################################
def main():
if choice=="Home":
#st.image("src\samsung.jpg",width=680)
#st.image("https://drive.google.com/file/d/1Cy73Cr1CZ90E_E4iWmf_xOAWNTAKmqcj/view?usp=share_link")
st.markdown("<h1 style='color: #FF9900; font-size: 3em;'>SAMSUNG INNOVATION CAMPUS</h1>", unsafe_allow_html=True)
st.subheader("Welcome to Our Dialogue Summarizer App!")
st.markdown(">*This is a capstone project developed by Group-6 under the supervision of SIC team*.")
st.markdown("---")
#txt = st.text_area('Enter your long dialogue below please')
#txt_out = st.text_area('Output summary')
elif choice=="Summarize":
st.markdown("<h1 style='color: #FF9900; font-size: 2em;'>Dialog Summarizing Tool</h1>", unsafe_allow_html=True)
input_dialogue=st.text_area("Enter Your Dialogue Below","Type here")
if st.button("Summarize"):
summary = generate_summary(input_dialogue)
st.markdown("*<h1 style='color: #9925be; font-size: 1.2em;'>Here is your summarized dialogue*</h1>", unsafe_allow_html=True)
st.write(summary)
elif choice=="Training Dataset":
st.subheader("")
st.markdown("<h1 style='color: #FF9900; font-size: 3em;'>TRAINING DATASET Info</h1>", unsafe_allow_html=True)
st.header("Dataset Card for SAMSum Corpus")
st.markdown("> *Dataset Summary\n The SAMSum dataset contains about 16k messenger-like conversations with summaries. Conversations were created and written down by linguists fluent in English. Linguists were asked to create conversations similar to those they write on a daily basis, reflecting the proportion of topics of their real-life messenger convesations. The style and register are diversified - conversations could be informal, semi-formal or formal, they may contain slang words, emoticons and typos. Then, the conversations were annotated with summaries. It was assumed that summaries should be a concise brief of what people talked about in the conversation in third person. The SAMSum dataset was prepared by Samsung R&D Institute Poland and is distributed for research purposes* (non-commercial licence: CC BY-NC-ND 4.0)",unsafe_allow_html=True)
#st.button("Read more")
url = 'https://huggingface.co/datasets/samsum'
# Create a button with the label 'Go to Google'
if st.button('Read More'):
webbrowser.open_new_tab(url)
elif choice=="Model info":
st.markdown("<h1 style='color: #FF9900; font-size: 3em;'>PEGASUS MODEL Info</h1>", unsafe_allow_html=True)
st.markdown(">*The Pegasus model was proposed in PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu on Dec 18, 2019. According to the abstract, Pegasus’ pretraining task is intentionally similar to summarization: important sentences are removed/masked from an input document and are generated together as one output sequence from the remaining sentences, similar to an extractive summary. Pegasus achieves SOTA summarization performance on all 12 downstream tasks, as measured by ROUGE and human eval. This model was contributed by sshleifer. The Authors’ code can be found here. Tips: Sequence-to-sequence model with the same encoder-decoder model architecture as BART. Pegasus is pre-trained jointly on two self-supervised objective functions: Masked Language Modeling (MLM) and a novel summarization specific pretraining objective, called Gap Sentence Generation (GSG). MLM: encoder input tokens are randomly replaced by a mask tokens and have to be predicted by the encoder (like in BERT) GSG: whole encoder input sentences are replaced by a second mask token and fed to the decoder, but which has a causal mask to hide the future words like a regular auto-regressive transformer decoder.*")
url = 'https://huggingface.co/google/pegasus-cnn_dailymail'
# Create a button with the label 'Go to Google'
if st.button('Read More'):
webbrowser.open_new_tab(url)
elif choice=="About Us":
st.markdown("<h1 style='color: #FF9900; font-size: 3em;'>ABOUT US</h1>", unsafe_allow_html=True)
st.markdown("> *Welcome to our website! We are a team of passionate individuals dedicated to providing new NLP based services to our customers. Our goal is to create a positive impact in the world by leveraging our expertise and innovative solutions. With passion and resilence and through experience, we strive to exceed expectations and build lasting relationships with our clients. We, the developers of this capstone project are from Fujairah Emirate. We proudly own this project as it was the product of our hectic crash course that was offered by Samsung Innovation Campus. Thank you for choosing us, and we look forward to serving you!*")
st.markdown(">*<h1 style='color: #EA8770; font-size: 2em;'>Developers Name List</h1>*", unsafe_allow_html=True)
st.markdown("*<h1 style='color: #EA8790; font-size: 1.2em;'>This project was developed by: Nathan Berhe, Smon Fitwi, Dawit Andebrhan, Bereket Kibreab, Eyasu Tesfamichael, Milkias Butsuamlak</h1>*", unsafe_allow_html=True)
st.markdown("*<h1 style='color: #EA8790; font-size: 1.2em;'>This project was developed under the supervision of Mrs. Rabab, Mr.Mrad and Mr. Marc, honourable staffs of SIC program</h1>*", unsafe_allow_html=True)
if __name__=='__main__':
main()