File size: 2,938 Bytes
4d53442
 
 
 
 
 
 
 
 
ee18ca1
 
eec124c
ee18ca1
eec124c
 
6cb7fcd
4d53442
 
 
 
 
1b0a1ba
4d53442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# -*- coding: utf-8 -*-
"""Ai Re-Phraser.py

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/18bvmXQqMIkk7G0gY_1dUolI08RK6Ajrf
"""

from parrot import Parrot
import warnings

warnings.filterwarnings("ignore")


parrot = Parrot(model_tag="prithivida/parrot_paraphraser_on_T5")
import os
from parrot import Parrot
import torch
import warnings
import nltk
#!pip install sentence-splitter   
from sentence_splitter import SentenceSplitter, split_text_into_sentences
warnings.filterwarnings("ignore")
splitter = SentenceSplitter(language='en')
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
from transformers import AutoTokenizer
from transformers import AutoModelForSeq2SeqLM
import pandas as pd
from parrot.filters import Adequacy
from parrot.filters import Fluency
from parrot.filters import Diversity
adequacy_score = Adequacy()
fluency_score  = Fluency()
diversity_score= Diversity()
device= "cuda:0"
adequacy_threshold = 0.90
fluency_threshold = 0.90
diversity_ranker="levenshtein"

model_name = 'tuner007/pegasus_paraphrase'
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = PegasusTokenizer.from_pretrained(model_name)
model_pegasus = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)

def get_max_str(lst):
    return max(lst, key=len)
def get_response(input_text,num_return_sequences=10,num_beams=10):
    batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=60,return_tensors='pt').to(torch_device)
    translated = model_pegasus.generate(**batch,max_length=60,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
    tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
    try:
        adequacy_filtered_phrases = adequacy_score.filter(input_text,tgt_text, adequacy_threshold, device)
        if len(adequacy_filtered_phrases) > 0 :
            fluency_filtered_phrases = fluency_score.filter(adequacy_filtered_phrases, fluency_threshold, device )
            if len(fluency_filtered_phrases) > 0 :
                diversity_scored_phrases = diversity_score.rank(input_text, fluency_filtered_phrases, diversity_ranker)
                return get_max_str(diversity_scored_phrases)
            else:
                return get_max_str(fluency_filtered_phrases)
        else:
            return get_max_str(adequacy_filtered_phrases)
    except:
        return(get_max_str(tgt_text))

import gradio as gr

def get_fun(txt):
  tokens = splitter.split(text=txt)

  txt_paraphrase=''
  for phrase in tokens:
    tmp=get_response(phrase,num_return_sequences=10,num_beams=10)
    txt_paraphrase=txt_paraphrase+' '+tmp
  return txt_paraphrase

iface = gr.Interface(fn=get_fun, inputs="text", outputs="text", title = " Ai Re-Phraser - Quotient Hackathon")
iface.launch(inline=False)

"""# New Section"""