File size: 2,716 Bytes
fa01b79
 
 
 
 
 
87f714b
fef4425
 
 
 
fa01b79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431a527
13db79f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36f41c8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import json
from text_utils import *
import pandas as pd
from qa_model import *
from bm25_utils import *
from pairwise_model import *
import gradio as gr
import nltk
nltk.download('punkt')


df_wiki_windows = pd.read_csv("./processed/wikipedia_chungta_cleaned.csv")
df_wiki = pd.read_csv("./processed/wikipedia_chungta_short.csv")
df_wiki.title = df_wiki.title.apply(str)


entity_dict = json.load(open("./processed/entities.json"))
new_dict = dict()
for key, val in entity_dict.items():
    val = val.replace("wiki/", "").replace("_", " ")
    entity_dict[key] = val
    key = preprocess(key)
    new_dict[key.lower()] = val
entity_dict.update(new_dict)
title2idx = dict([(x.strip(), y) for x, y in zip(df_wiki.title, df_wiki.index.values)])


qa_model = QAEnsembleModel_modify("letrunglinh/qa_pnc", entity_dict)
pairwise_model_stage1 = PairwiseModel_modify("nguyenvulebinh/vi-mrc-base")

bm25_model_stage1 = BM25Gensim("./outputs/bm25_stage1/", entity_dict, title2idx)


def get_answer_e2e(question):
    #Bm25 retrieval for top200 candidates
    query = preprocess(question).lower()
    top_n, bm25_scores = bm25_model_stage1.get_topk_stage1(query, topk=200)
    titles = [preprocess(df_wiki_windows.title.values[i]) for i in top_n]
    pre_texts = [preprocess(df_wiki_windows.text.values[i]) for i in top_n]

    #Reranking with pairwise model for top10
    question = preprocess(question)
    ranking_preds = pairwise_model_stage1.stage1_ranking(question, pre_texts)

    ranking_scores = ranking_preds * bm25_scores
    
    #Question answering
    best_idxs = np.argsort(ranking_scores)[-10:]
    ranking_scores = np.array(ranking_scores)[best_idxs]
    texts = np.array(pre_texts)[best_idxs]

    best_answer = qa_model(question, texts, ranking_scores)
    
    if best_answer is None:
        return pre_texts[0]
    
    return best_answer

with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="pink", neutral_hue="slate")) as demo:
    gr.Markdown("<h1><center>CHATBOT - I know what you want 💬</center></h1>")
    chatbot = gr.Chatbot(show_label=True, value=[[None,'Hi👋,How can i help you?']])
    msg = gr.Textbox(label="Question",placeholder="Enter your question and press Enter...")
    clear = gr.Button("Clear")

    def user(user_message, history):
        return "", history + [[user_message, None]]

    def bot(history):

        best_answer = get_answer_e2e(history[-1][0])
        
        history[-1][1] = best_answer

        return history

    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False ,scroll_to_output=True, show_progress=True, ).then(
        bot, chatbot, chatbot
    )
    clear.click(lambda: None, None, chatbot, queue=False)

demo.launch()