Jokes_Generator / app.py
VasudevaK's picture
error
d1c6a4d
raw
history blame
2.38 kB
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import nltk
nltk.download('all')
from nltk.corpus import wordnet as wn
import numpy as np
import gradio as gr
import pyjokes
def similarity(input, joke):
return cosine_similarity(input, joke)
def get_best(input):
model = SentenceTransformer('bert-base-nli-mean-tokens')
max_similarity = -1
max_idx = 0
jokes = pyjokes.get_jokes(language='en', category='all')
jokes_embedding = model.encode(jokes)
input_embedding = model.encode(input)
for idx, joke_embedding in enumerate(jokes_embedding):
sim = similarity(joke_embedding.reshape(-1, 1), input_embedding.reshape(-1,1))
if(np.sum(sim) > np.sum(max_similarity)):
max_idx = idx
max_similarity = sim
if(np.sum(max_similarity) != -1):
return jokes[max_idx]
else:
return None
def generate_list(input):
result = []
n = len(input)
for Len in range(2, n + 1):
for i in range(n - Len + 1):
j = i + Len - 1
tem = ""
for k in range(i, j + 1):
tem += input[k]
result.append(tem)
return result
def pattern(input):
response = input
for substr in generate_list(input):
try :
syn = wn.synsets(substr)[1].hypernyms()[0].hyponyms()[0].hyponyms()[0].lemmas()[0].name()
except:
continue
if(syn != None):
response = response.replace(substr, syn.upper())
if(input == response):
return None
else :
return response
def GPT(input):
return None
def generator(input=None):
response = []
if input:
out1 = GPT(input)
if(out1):
for out in out1:
response.append(out)
out2 = pattern(input)
if(out2):
response.append(out2)
out3 = get_best(input)
if(out3):
response.append(out3)
else:
out1 = GPT("Hi, what's the matter")
if(out1):
for out in out1:
response.append(out)
out2 = pyjokes.get_joke(language='en', category='all')
if(out2):
response.append(out2)
return response #[0] think of doing this
iface = gr.Interface(fn=generator, inputs="text", outputs="text")
iface.launch()