extractive_summaries / paraphraser.py
ck46's picture
initial application
dc66d85
raw
history blame
7.85 kB
import re
import numpy as np
import itertools
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from sentence_transformers import SentenceTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
class KeywordExtraction:
def __init__(self, n_gram_range=(1, 1), stop_words='english', model_name='distilbert-base-nli-mean-tokens'):
self.n_gram_range = n_gram_range
self.stop_words = stop_words
self.model_name = model_name
self.model = SentenceTransformer(self.model_name)
def __call__(self, doc, top_n=5, diversity=('mmr', 0.7)):
doc_embedding = self.get_document_embeddings(doc)
candidates = self.get_candidates(doc)
candidate_embeddings = self.get_candidate_embeddings(candidates)
try:
if diversity[0] == 'mmr':
# print('using maximal marginal relevance method...')
return self.maximal_marginal_relevance(doc_embedding,
candidate_embeddings,
candidates,
top_n=top_n,
diversity=diversity[1])
elif diversity[0] == 'mss':
# print('using max sum similarity method...')
return self.max_sum_similarity(doc_embedding,
candidate_embeddings,
candidates,
top_n=top_n,
nr_candidates=diversity[1])
else:
# print('using default method...')
return self.get_keywords(doc_embedding, candidate_embeddings, candidates, top_n)
except Exception as e:
print(e)
def get_candidates(self, doc):
# Extract candidate words/phrases
count = CountVectorizer(ngram_range=self.n_gram_range, stop_words=self.stop_words).fit([doc])
return count.get_feature_names_out()
def get_candidate_embeddings(self, candidates):
return self.model.encode(candidates)
def get_document_embeddings(self, doc):
return self.model.encode([doc])
def get_keywords(self, doc_embedding, candidate_embeddings, candidates, top_n=5):
distances = cosine_similarity(doc_embedding, candidate_embeddings)
keywords = [candidates[index] for index in distances.argsort()[0][-top_n:]]
return keywords
def max_sum_similarity(self, doc_embedding, candidate_embeddings, candidates, top_n, nr_candidates):
# Calculate distances and extract keywords
distances = cosine_similarity(doc_embedding, candidate_embeddings)
distances_candidates = cosine_similarity(candidate_embeddings,
candidate_embeddings)
# Get top_n words as candidates based on cosine similarity
words_idx = list(distances.argsort()[0][-nr_candidates:])
words_vals = [candidates[index] for index in words_idx]
distances_candidates = distances_candidates[np.ix_(words_idx, words_idx)]
# Calculate the combination of words that are the least similar to each other
min_sim = np.inf
candidate = None
for combination in itertools.combinations(range(len(words_idx)), top_n):
sim = sum([distances_candidates[i][j] for i in combination for j in combination if i != j])
if sim < min_sim:
candidate = combination
min_sim = sim
return [words_vals[idx] for idx in candidate]
def maximal_marginal_relevance(self, doc_embedding, word_embeddings, words, top_n, diversity):
# Extract similarity within words, and between words and the document
word_doc_similarity = cosine_similarity(word_embeddings, doc_embedding)
word_similarity = cosine_similarity(word_embeddings)
# Initialize candidates and already choose best keyword/keyphras
keywords_idx = [np.argmax(word_doc_similarity)]
candidates_idx = [i for i in range(len(words)) if i != keywords_idx[0]]
for _ in range(top_n - 1):
# Extract similarities within candidates and
# between candidates and selected keywords/phrases
candidate_similarities = word_doc_similarity[candidates_idx, :]
target_similarities = np.max(word_similarity[candidates_idx][:, keywords_idx], axis=1)
# Calculate MMR
mmr = (1-diversity) * candidate_similarities - diversity * target_similarities.reshape(-1, 1)
mmr_idx = candidates_idx[np.argmax(mmr)]
# Update keywords & candidates
keywords_idx.append(mmr_idx)
candidates_idx.remove(mmr_idx)
return [words[idx] for idx in keywords_idx]
def regex(phrase, m=0, n=3):
strng = "([\s]*[a-zA-Z0-9]*[\s]*){%d,%d}" % (m,n)
return strng.join(phrase.split())
def remove_square_brackets(text):
return re.sub('\[[0-9]+\]', '', text)
def remove_extra_spaces(text):
return re.sub('[\s]{2,}', ' ', text)
def preprocess_text(text):
text = re.sub('\[[0-9]+\]', '', text)
text = re.sub('[\s]{2,}', ' ', text)
text = text.strip()
return text
def sent_tokenize(text):
sents = text.split('.')
sents = [s.strip() for s in sents if len(s)>0]
return sents
def get_key_sentences(text, top_n=5, diversity=('mmr', 0.6)):
kw_extractor = KeywordExtraction(n_gram_range=(1,3))
text = preprocess_text(text)
sentences = sent_tokenize(text)
key_phrases = kw_extractor(text, top_n=top_n, diversity=diversity)
if key_phrases is None:
return None
key_sents = dict()
for phrase in key_phrases:
found = False
for i, sent in enumerate(sentences):
if re.search(regex(phrase), sent):
found = True
if i not in key_sents:
key_sents[i] = sent
if not found:
print(f'The phrase "{phrase}" was not matched!')
return key_sents
class ParaphraseModel:
def __init__(self, model_name="Vamsi/T5_Paraphrase_Paws"):
self.model_name = model_name
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def __call__(self, inputs, top_k=200, top_p=0.95, num_sequences=5):
text = self.prepare_list_input(inputs) if type(inputs) == type([]) else f"paraphrase: {inputs} </s>"
encoding = self.tokenizer.batch_encode_plus(text, pad_to_max_length=True, return_tensors="pt")
input_ids = encoding["input_ids"].to(self.device)
attention_masks = encoding["attention_mask"].to(self.device)
outputs = self.model.generate(
input_ids=input_ids, attention_mask=attention_masks,
max_length=256,
do_sample=True,
top_k=top_k,
top_p=top_p,
early_stopping=True,
num_return_sequences=num_sequences
)
lines = []
for output in outputs:
line = self.tokenizer.decode(output,
skip_special_tokens=True,
clean_up_tokenization_spaces=True)
lines.append(line)
return lines
def prepare_list_input(self, lst):
sentences = []
for sent in lst:
sentences.append(f"paraphrase: {sent} </s>")
return sentences