Spaces:
Sleeping
Sleeping
File size: 1,575 Bytes
144f980 6555dd1 144f980 6555dd1 144f980 6555dd1 b90214d 144f980 b90214d 144f980 b90214d 144f980 b90214d 144f980 b90214d 6555dd1 b90214d 6555dd1 b90214d 6555dd1 b90214d 144f980 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import streamlit as st
from transformers import T5Tokenizer, AutoModelForSeq2SeqLM
# Load the Hugging Face model with SentencePiece tokenizer
@st.cache_resource
def load_model():
tokenizer = T5Tokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws")
return tokenizer, model
# Load the model and tokenizer
tokenizer, model = load_model()
# Streamlit app interface
st.title("Paraphrasing Tool - AI to Human")
st.write("Paste your AI-generated text below, and the tool will humanize it:")
# Input text box
input_text = st.text_area("Enter text here (no word limit):")
if st.button("Paraphrase"):
if input_text.strip():
with st.spinner("Paraphrasing... Please wait."):
try:
# Prepare input for the model
inputs = tokenizer.encode("paraphrase: " + input_text,
return_tensors="pt")
# Generate paraphrased output
outputs = model.generate(
inputs,
num_beams=5,
temperature=0.7,
early_stopping=True
)
paraphrased_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
st.success("Here is the paraphrased text:")
st.write(paraphrased_text)
except Exception as e:
st.error(f"An error occurred: {e}")
else:
st.error("Please enter some text to paraphrase.")
|