File size: 1,638 Bytes
05da059
 
 
 
9bccd0c
 
05da059
 
2c96123
05da059
8985cde
 
 
 
 
 
 
 
 
 
 
05da059
454c118
64a1cc9
806fb28
d869f0d
 
8985cde
d869f0d
 
 
8985cde
7ee7401
 
d869f0d
05da059
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from langchain.vectorstores import FAISS
from langchain.embeddings import SentenceTransformerEmbeddings
import gradio as gr
import reranking
#from extract_keywords import init_keyword_extractor, extract_keywords
from extract_keywords import extract_keywords2

embeddings = SentenceTransformerEmbeddings(model_name="multi-qa-MiniLM-L6-cos-v1")
db = FAISS.load_local('faiss_qa_2023-08-20', embeddings)

def search_filter_function(query_keywords):
    def fn(doc):
        doc_keywords = extract_keywords2(doc[0].page_content)[0]
        intersection_keywords = doc_keywords.intersection(query_keywords)
        if len(query_keywords) == 0:
            return len(doc_keywords) == 0
        else:
            return len(intersection_keywords) >= len(query_keywords)
    return fn


def main(query):
    query = query.lower()
    query_keywords, query = extract_keywords2(query)
    result_docs = db.similarity_search_with_score(query, k=50)

    if len(query_keywords) > 0:
        result_docs = list(filter(search_filter_function(query_keywords), result_docs))

    if len(result_docs) == 0:
        return 'Ответ не найден', 0, ''

    sentences = [doc[0].page_content for doc in result_docs]
    score, index = reranking.search(query, sentences)

    return result_docs[index][0].metadata['answer'], score, result_docs[index][0].page_content

demo = gr.Interface(fn=main, inputs="text", outputs=[
  gr.Textbox(label="Ответ, который будет показан клиенту"),
  gr.Textbox(label="Score"),
  gr.Textbox(label="Вопрос, по которому был найден ответ"),
])

demo.launch()