Modfiededition's picture
Update app.py
ab1772f
raw
history blame contribute delete
No virus
1.63 kB
import streamlit as st
import transformers
import tensorflow
import PIL
from PIL import Image
import time
from transformers import pipeline
model_checkpoint = "Modfiededition/t5-base-fine-tuned-on-jfleg"
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def load_model():
return pipeline("text2text-generation", model=model_checkpoint)
model = load_model()
#prompts
st.title("Writing Assistant for you πŸ€–")
st.markdown("This writing assistant detects and corrects grammatical mistakes for you! This assitant uses **T5-base model ✍️** fine-tuned on jfleg dataset.")
#image = Image.open('new_grammar.jpg')
#st.image(image, caption='Image Credit: https://abrc.org.au/wp-content/uploads/2020/12/Grammar-checker.jpg')
st.subheader("Some examples: ")
example_1 = st.button("I am write on AI")
example_2 = st.button("This sentence has, bads grammar mistake!")
textbox = st.text_area('Write your text in this box:', '',height=100, max_chars=500 )
button = st.button('Detect grammar mistakes:')
# output
st.subheader("Correct sentence: ")
if example_1:
with st.spinner('In progress.......'):
output_text = model("I am write on AI")[0]["generated_text"]
st.markdown("## "+output_text)
if example_2:
with st.spinner('In progress.......'):
output_text = model("This sentence has, bads grammar mistake!")[0]["generated_text"]
st.markdown("## "+output_text)
if button:
with st.spinner('In progress.......'):
if textbox:
output_text = model(textbox)[0]["generated_text"]
else:
output_text = " "
st.markdown("## "+output_text)