aditi2222 commited on
Commit
246c337
1 Parent(s): c1d459d

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -0
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import (T5ForConditionalGeneration,T5Tokenizer)
3
+ import gradio as gr
4
+
5
+ best_model_path = "aditi2222/t5_paraphrase_updated"
6
+ model = T5ForConditionalGeneration.from_pretrained(best_model_path)
7
+ tokenizer = T5Tokenizer.from_pretrained("aditi2222/t5_paraphrase_updated")
8
+
9
+ def tokenize_data(text):
10
+ # Tokenize the review body
11
+ input_ = "paraphrase: "+ str(text) + ' </s>'
12
+ max_len = 64
13
+ # tokenize inputs
14
+ tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
15
+
16
+ inputs={"input_ids": tokenized_inputs['input_ids'],
17
+ "attention_mask": tokenized_inputs['attention_mask']}
18
+ return inputs
19
+
20
+ def generate_answers(text):
21
+ inputs = tokenize_data(text)
22
+ results= model.generate(input_ids= inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=True,
23
+ max_length=64,
24
+ top_k=120,
25
+ top_p=0.98,
26
+ early_stopping=True,
27
+ num_return_sequences=1)
28
+ answer = tokenizer.decode(results[0], skip_special_tokens=True)
29
+ return answer
30
+
31
+ iface = gr.Interface(fn=generate_answers, inputs=['text'], outputs=["text"])
32
+ iface.launch(inline=False, share=True)