|
import json |
|
|
|
WORD_EMBEDDINGS_PATH = "./src/dup_ques/word_embeddings.json" |
|
STOPWORDS_PATH = "./src/utils/stopwords.json" |
|
|
|
with open(STOPWORDS_PATH, 'rb') as f: |
|
STOPWORDS = json.load(f) |
|
|
|
def text_preprocess(q, allow_stopwords=True): |
|
|
|
|
|
|
|
|
|
|
|
|
|
abbv = { |
|
"AFAIK":"as far as I know", "IMO": "in my opinion", "IMHO": "in my humble opinion", "LGTM": "look good to me", "AKA": "also know as", "ASAP": "as sone as possible", "BTW": "by the way", "FAQ": "frequently asked questions", "DIY": "do it yourself", "DM": "direct message", "FYI": "for your information", "IC": "i see", "IOW": "in other words", "IIRC": "If I Remember Correctly", "icymi":"In case you missed it", "CUZ": "because", "COS": "because", "nv": "nevermind", "PLZ": "please", |
|
} |
|
|
|
|
|
contractions = { |
|
"ain't": "am not", "aren't": "are not", "can't": "can not", "can't've": "can not have", "'cause": "because", "could've": "could have", "couldn't": "could not", "couldn't've": "could not have", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hadn't've": "had not have", "hasn't": "has not", "haven't": "have not", "he'd": "he would", "he'd've": "he would have", "he'll": "he will", "he'll've": "he will have", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have", "i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", "it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not", "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so as", "that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would", "y'all'd've": "you all would have", "y'all're": "you all are", "y'all've": "you all have", "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", "'ve": " have", "n't": " not", "'re": " are", "'ll": " will", |
|
} |
|
import re |
|
html_pattern = re.compile('<.*?>') |
|
urls_pattern = re.compile(r'https?://\S+|www\.\S+') |
|
emoji_pattern = re.compile("[" |
|
u"\U0001F600-\U0001F64F" |
|
u"\U0001F300-\U0001F5FF" |
|
u"\U0001F680-\U0001F6FF" |
|
u"\U0001F1E0-\U0001F1FF" |
|
"]+", flags=re.UNICODE) |
|
|
|
|
|
|
|
|
|
import string |
|
punc = string.punctuation |
|
|
|
|
|
|
|
q = str(q).lower().strip() |
|
|
|
|
|
q = html_pattern.sub(r'', q) |
|
|
|
|
|
q = urls_pattern.sub(r'', q) |
|
|
|
|
|
q = q.translate(str.maketrans("", "", punc)) |
|
|
|
|
|
q = emoji_pattern.sub(r'', q) |
|
|
|
|
|
q = q.replace('%', ' percent') |
|
q = q.replace('$', ' dollar ') |
|
q = q.replace('₹', ' rupee ') |
|
q = q.replace('€', ' euro ') |
|
q = q.replace('@', ' at ') |
|
|
|
|
|
q = q.replace('[math]', '') |
|
|
|
|
|
q = q.replace(',000,000,000 ', 'b ') |
|
q = q.replace(',000,000 ', 'm ') |
|
q = q.replace(',000 ', 'k ') |
|
q = re.sub(r'([0-9]+)000000000', r'\1b', q) |
|
q = re.sub(r'([0-9]+)000000', r'\1m', q) |
|
q = re.sub(r'([0-9]+)000', r'\1k', q) |
|
|
|
|
|
new_text = [] |
|
|
|
for word in q.split(): |
|
|
|
|
|
word = contractions.get(word.upper(), word) |
|
|
|
|
|
word = abbv.get(word.upper(), word) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if word not in STOPWORDS or allow_stopwords: |
|
new_text.append(word) |
|
|
|
q = ' '.join(new_text) |
|
|
|
return q |
|
|
|
import distance |
|
from fuzzywuzzy import fuzz |
|
import numpy as np |
|
from numpy.linalg import norm |
|
SAFE_DIV = 0.0001 |
|
|
|
def cos_sim(q1, q2, allow_stopwords=True): |
|
q1 = [i for i in q1.split() if i not in STOPWORDS or allow_stopwords] |
|
q2 = [i for i in q2.split() if i not in STOPWORDS or allow_stopwords] |
|
|
|
vocab = set(q1 + q2) |
|
|
|
vocab1 = dict(zip(vocab, [0]*len(vocab))) |
|
vocab2 = dict(zip(vocab, [0]*len(vocab))) |
|
|
|
for w in q1: |
|
vocab1[w] += 1 |
|
for w in q2: |
|
vocab2[w] += 1 |
|
|
|
v1 = list(vocab1.values()) |
|
v2 = list(vocab2.values()) |
|
|
|
return (np.dot(v1,v2) + SAFE_DIV)/(norm(v1)*norm(v2) + SAFE_DIV) |
|
|
|
def cos_sim_vec(v1, v2): |
|
return (np.dot(v1,v2) + SAFE_DIV)/(norm(v1)*norm(v2) + SAFE_DIV) |
|
|
|
def euler_distance(v1, v2): |
|
return sum((v1 - v2)**2) |
|
|
|
def sentence_emb(sent): |
|
embs = np.zeros(100) |
|
counter = 0 |
|
with open(WORD_EMBEDDINGS_PATH, 'rb') as f: |
|
WORD_EMBEDDINGS = json.load(f) |
|
for word in sent.split(): |
|
emb = WORD_EMBEDDINGS.get(word) |
|
if emb != None: |
|
embs += emb |
|
counter += 1 |
|
if counter == 0: |
|
counter = 1 |
|
return embs / counter |
|
|
|
def test_common_words(q1,q2): |
|
w1 = set(map(lambda word: word.lower().strip(), q1.split(" "))) |
|
w2 = set(map(lambda word: word.lower().strip(), q2.split(" "))) |
|
return len(w1 & w2) |
|
|
|
def test_total_words(q1,q2): |
|
w1 = set(map(lambda word: word.lower().strip(), q1.split(" "))) |
|
w2 = set(map(lambda word: word.lower().strip(), q2.split(" "))) |
|
return (len(w1) + len(w2)) |
|
|
|
|
|
def test_fetch_token_features(q1, q2): |
|
SAFE_DIV = 0.0001 |
|
|
|
token_features = [0.0] * 8 |
|
|
|
|
|
q1_tokens = q1.split() |
|
q2_tokens = q2.split() |
|
|
|
if len(q1_tokens) == 0 or len(q2_tokens) == 0: |
|
return token_features |
|
|
|
|
|
q1_words = set([word for word in q1_tokens if word not in STOPWORDS]) |
|
q2_words = set([word for word in q2_tokens if word not in STOPWORDS]) |
|
|
|
|
|
q1_stops = set([word for word in q1_tokens if word in STOPWORDS]) |
|
q2_stops = set([word for word in q2_tokens if word in STOPWORDS]) |
|
|
|
|
|
common_word_count = len(q1_words.intersection(q2_words)) |
|
|
|
|
|
common_stop_count = len(q1_stops.intersection(q2_stops)) |
|
|
|
|
|
common_token_count = len(set(q1_tokens).intersection(set(q2_tokens))) |
|
|
|
token_features[0] = common_word_count / (min(len(q1_words), len(q2_words)) + SAFE_DIV) |
|
token_features[1] = common_word_count / (max(len(q1_words), len(q2_words)) + SAFE_DIV) |
|
token_features[2] = common_stop_count / (min(len(q1_stops), len(q2_stops)) + SAFE_DIV) |
|
token_features[3] = common_stop_count / (max(len(q1_stops), len(q2_stops)) + SAFE_DIV) |
|
token_features[4] = common_token_count / (min(len(q1_tokens), len(q2_tokens)) + SAFE_DIV) |
|
token_features[5] = common_token_count / (max(len(q1_tokens), len(q2_tokens)) + SAFE_DIV) |
|
|
|
|
|
token_features[6] = int(q1_tokens[-1] == q2_tokens[-1]) |
|
|
|
|
|
token_features[7] = int(q1_tokens[0] == q2_tokens[0]) |
|
|
|
return token_features |
|
|
|
|
|
def test_fetch_length_features(q1, q2): |
|
length_features = [0.0] * 3 |
|
|
|
|
|
q1_tokens = q1.split() |
|
q2_tokens = q2.split() |
|
|
|
if len(q1_tokens) == 0 or len(q2_tokens) == 0: |
|
return length_features |
|
|
|
|
|
length_features[0] = abs(len(q1_tokens) - len(q2_tokens)) |
|
|
|
|
|
length_features[1] = (len(q1_tokens) + len(q2_tokens)) / 2 |
|
|
|
strs = list(distance.lcsubstrings(q1, q2)) |
|
if len(strs) > 0: |
|
length_features[2] = len(strs[0]) / (min(len(q1), len(q2)) + 1) |
|
|
|
return length_features |
|
|
|
|
|
def test_fetch_fuzzy_features(q1, q2): |
|
fuzzy_features = [0.0] * 4 |
|
|
|
|
|
fuzzy_features[0] = fuzz.QRatio(q1, q2) |
|
|
|
|
|
fuzzy_features[1] = fuzz.partial_ratio(q1, q2) |
|
|
|
|
|
fuzzy_features[2] = fuzz.token_sort_ratio(q1, q2) |
|
|
|
|
|
fuzzy_features[3] = fuzz.token_set_ratio(q1, q2) |
|
|
|
return fuzzy_features |
|
|
|
|
|
def query_point_creator(q1, q2, allow_stopwords): |
|
input_query = [] |
|
|
|
|
|
q1 = text_preprocess(q1, allow_stopwords) |
|
q2 = text_preprocess(q2, allow_stopwords) |
|
|
|
|
|
input_query.append(cos_sim(q1, q2)) |
|
|
|
|
|
input_query.append(len(q1)) |
|
input_query.append(len(q2)) |
|
|
|
input_query.append(len(q1.split(" "))) |
|
input_query.append(len(q2.split(" "))) |
|
|
|
input_query.append(test_common_words(q1, q2)) |
|
input_query.append(test_total_words(q1, q2)) |
|
input_query.append(round(test_common_words(q1, q2) / test_total_words(q1, q2), 2)) |
|
|
|
|
|
token_features = test_fetch_token_features(q1, q2) |
|
input_query.extend(token_features) |
|
|
|
|
|
length_features = test_fetch_length_features(q1, q2) |
|
input_query.extend(length_features) |
|
|
|
|
|
fuzzy_features = test_fetch_fuzzy_features(q1, q2) |
|
input_query.extend(fuzzy_features) |
|
|
|
return input_query |
|
|
|
def get_x(q1, q2): |
|
x = [] |
|
|
|
x.extend( |
|
query_point_creator(q1, q2, False) |
|
) |
|
x.extend( |
|
query_point_creator(q1, q2, True) |
|
) |
|
|
|
q1 = text_preprocess(q1, allow_stopwords=True) |
|
q2 = text_preprocess(q2, allow_stopwords=True) |
|
|
|
emb1 = sentence_emb(q1) |
|
emb2 = sentence_emb(q2) |
|
|
|
x.append(cos_sim_vec(emb1, emb2)) |
|
x.append(euler_distance(emb1, emb2)) |
|
|
|
x.extend(emb1) |
|
x.extend(emb2) |
|
|
|
return np.expand_dims(x, axis=0) |
|
|