Spaces:
Runtime error
Runtime error
File size: 4,729 Bytes
f4128ca 338de82 f4128ca 8e15bef 338de82 f4128ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import spacy
import wikipedia
from wikipedia.exceptions import DisambiguationError
from transformers import TFAutoModel, AutoTokenizer
import numpy as np
import pandas as pd
import faiss
try:
nlp = spacy.load("en_core_web_sm")
except:
spacy.cli.download("en_core_web_sm")
nlp = spacy.load("en_core_web_sm")
wh_words = ['what', 'who', 'how', 'when', 'which']
def get_concepts(text):
text = text.lower()
doc = nlp(text)
concepts = []
for chunk in doc.noun_chunks:
if chunk.text not in wh_words:
concepts.append(chunk.text)
return concepts
def get_passages(text, k=100):
doc = nlp(text)
passages = []
passage_len = 0
passage = ""
sents = list(doc.sents)
for i in range(len(sents)):
sen = sents[i]
passage_len+=len(sen)
if passage_len >= k:
passages.append(passage)
passage = sen.text
passage_len = len(sen)
continue
elif i==(len(sents)-1):
passage+=" "+sen.text
passages.append(passage)
passage = ""
passage_len = 0
continue
passage+=" "+sen.text
return passages
def get_dicts_for_dpr(concepts, n_results=20, k=100):
dicts = []
for concept in concepts:
wikis = wikipedia.search(concept, results=n_results)
print(concept, "No of Wikis: ",len(wikis))
for wiki in wikis:
try:
html_page = wikipedia.page(title = wiki, auto_suggest = False)
except DisambiguationError:
continue
passages = get_passages(html_page.content, k=k)
for passage in passages:
i_dicts = {}
i_dicts['text'] = passage
i_dicts['title'] = wiki
dicts.append(i_dicts)
return dicts
passage_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
query_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
p_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
q_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
def get_title_text_combined(passage_dicts):
res = []
for p in passage_dicts:
res.append(tuple((p['title'], p['text'])))
return res
def extracted_passage_embeddings(processed_passages, max_length=156):
passage_inputs = p_tokenizer.batch_encode_plus(
processed_passages,
add_special_tokens=True,
truncation=True,
padding="max_length",
max_length=max_length,
return_token_type_ids=True
)
passage_embeddings = passage_encoder.predict([np.array(passage_inputs['input_ids']),
np.array(passage_inputs['attention_mask']),
np.array(passage_inputs['token_type_ids'])],
batch_size=64,
verbose=1)
return passage_embeddings
def extracted_query_embeddings(queries, max_length=64):
query_inputs = q_tokenizer.batch_encode_plus(
queries,
add_special_tokens=True,
truncation=True,
padding="max_length",
max_length=max_length,
return_token_type_ids=True
)
query_embeddings = query_encoder.predict([np.array(query_inputs['input_ids']),
np.array(query_inputs['attention_mask']),
np.array(query_inputs['token_type_ids'])],
batch_size=1,
verbose=1)
return query_embeddings
def search(question):
concepts = get_concepts(question)
print("concepts: ",concepts)
dicts = get_dicts_for_dpr(concepts, n_results=1)
print("dicts len: ", len(dicts))
processed_passages = get_title_text_combined(dicts)
passage_embeddings = extracted_passage_embeddings(processed_passages)
query_embeddings = extracted_query_embeddings([question])
faiss_index = faiss.IndexFlatL2(128)
faiss_index.add(passage_embeddings.pooler_output)
prob, index = faiss_index.search(query_embeddings.pooler_output, k=10)
return pd.DataFrame([dicts[i] for i in index[0]])
import gradio as gr
inp = gr.inputs.Textbox(lines=2, default="Who is aamir khan?", label="Question")
out = gr.outputs.Dataframe(label="Answers")
gr.Interface(fn=search, inputs=inp, outputs=out).launch() |