dosenbiiir commited on
Commit
f48bfec
1 Parent(s): f480398

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -0
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+
4
+
5
+ tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-small-finetuned-quora-for-paraphrasing")
6
+ model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/t5-small-finetuned-quora-for-paraphrasing")
7
+
8
+
9
+ st.title('Question Generator by Eddevs')
10
+
11
+ left_column, right_column = st.columns(2)
12
+ left_column.selectbox('Type', ['Question Generator', 'Paraphrasing'])
13
+ right_column.selectbox('Question Generator', ['T5', 'GPT Neo-X'])
14
+
15
+ input = st.text_area("Input Text")
16
+
17
+
18
+ if st.button('Generate'):
19
+ st.write(input)
20
+ st.success("We have generated 105 Questions for you")
21
+ st.snow()
22
+ ##else:
23
+ ##nothing here
24
+
25
+ def paraphrase(text, max_length=128):
26
+
27
+ input_ids = tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)
28
+
29
+ generated_ids = model.generate(input_ids=input_ids, num_return_sequences=5, num_beams=5, max_length=max_length, no_repeat_ngram_size=2, repetition_penalty=3.5, length_penalty=1.0, early_stopping=True)
30
+
31
+ preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
32
+
33
+ return preds
34
+
35
+ preds = paraphrase("paraphrase: What is the best framework for dealing with a huge text dataset?")
36
+
37
+ for pred in preds:
38
+ st.write(pred)