import gradio as gr from gradio.mix import Parallel from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import os from transformers import T5TokenizerFast, T5ForConditionalGeneration from transformers import PegasusForConditionalGeneration, PegasusTokenizer import pytorch_lightning as pl import torch import itertools import random import nltk from nltk.tokenize import sent_tokenize nltk.download('punkt') device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") tokenizer = AutoTokenizer.from_pretrained("jaimin/T5-Large") model = AutoModelForSeq2SeqLM.from_pretrained("jaimin/T5-Large") tokenizer1 = PegasusTokenizer.from_pretrained('jaimin/pegasus') model1 = PegasusForConditionalGeneration.from_pretrained('jaimin/pegasus').to(device) def get_paraphrases(sentence, n_predictions=10, top_k=50, max_length=256, device="cpu"): text = "paraphrase: "+sentence + " " encoding = tokenizer.encode_plus(text, pad_to_max_length=True, return_tensors="pt") input_ids, attention_masks = encoding["input_ids"].to(device), encoding["attention_mask"].to(device) model_output = model.generate( input_ids=input_ids,attention_mask=attention_masks, max_length = 512, early_stopping=True, num_beams=15, num_beam_groups = 3, num_return_sequences=n_predictions, diversity_penalty = 0.70 ) outputs = [] for output in model_output: generated_sent = tokenizer.decode( output, skip_special_tokens=True, clean_up_tokenization_spaces=True ) if ( generated_sent.lower() != sentence.lower() and generated_sent not in outputs ): outputs.append(generated_sent) return outputs def get_paraphrases_pytorchlight(sentence, n_predictions=5, top_k=50, max_length=256, device="cpu"): para = [] for sent in sent_tokenize(sentence): text = "paraphrase: "+sent + " " encoding = tokenizer1.encode_plus(text, padding=True, return_tensors="pt", truncation=True) input_ids, attention_masks = encoding["input_ids"].to(device), encoding["attention_mask"].to(device) model_output = model1.generate( input_ids=input_ids,attention_mask=attention_masks, max_length = 512, early_stopping=True, num_beams=15, num_beam_groups = 3, num_return_sequences=n_predictions, diversity_penalty = 0.70) outputs = [] for output in model_output: generated_sent = tokenizer1.decode( output, skip_special_tokens=True, clean_up_tokenization_spaces=True ) if ( generated_sent.lower() != sentence.lower() and generated_sent not in outputs ): outputs.append(generated_sent) para.append(outputs) a = list(itertools.product(*para)) random.shuffle(a) l=[] for i in range(len(a)): l.append(" ".join(a[i])) return l iface = gr.Interface(fn=get_paraphrases, inputs=[gr.inputs.Textbox(lines=5)],outputs=["text"]) iface1 = gr.Interface(fn=get_paraphrases_pytorchlight, inputs=[gr.inputs.Textbox(lines=5)],outputs=["text"]) Parallel(iface,iface1).launch()