brian-challenge / app.py
dosenbiiir's picture
Create app.py
f48bfec
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-small-finetuned-quora-for-paraphrasing")
model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/t5-small-finetuned-quora-for-paraphrasing")
st.title('Question Generator by Eddevs')
left_column, right_column = st.columns(2)
left_column.selectbox('Type', ['Question Generator', 'Paraphrasing'])
right_column.selectbox('Question Generator', ['T5', 'GPT Neo-X'])
input = st.text_area("Input Text")
if st.button('Generate'):
st.write(input)
st.success("We have generated 105 Questions for you")
st.snow()
##else:
##nothing here
def paraphrase(text, max_length=128):
input_ids = tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)
generated_ids = model.generate(input_ids=input_ids, num_return_sequences=5, num_beams=5, max_length=max_length, no_repeat_ngram_size=2, repetition_penalty=3.5, length_penalty=1.0, early_stopping=True)
preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
return preds
preds = paraphrase("paraphrase: What is the best framework for dealing with a huge text dataset?")
for pred in preds:
st.write(pred)