import sys
sys.path.append('..')
# sys.path.append(r'F:\DataCenter\Study\京东NLP高端训练营\assignments\chatbot_system')
from site_packages.utils.job import DataOp
import numpy as np
from gensim.summarization import bm25
from site_packages.utils.job import DataOp
import jieba
from site_packages.ml_libs.nlp.stopwords import Stopwords
from collections import Counter
import pandas as pd

pd.set_option('display.max_columns', 10)

stopwords = Stopwords(source='hit')


class BM25:

    def __init__(self, corpus, k1=1.2, k2=2, b=3):
        self.k1 = k1
        self.k2 = k2
        self.b = b
        self.avg_lens = np.mean([len(text) for text in corpus])
        self.corpus = corpus
        self.counters = self.build_word_counters(corpus)
        self.total = len(self.corpus)
        self.indices = np.arange(self.total)
        self.inverted_index = self.build_inverted_index()
        self.topk = 5

    def build_word_counters(self, corpus):
        counters = {}
        for idx, doc in enumerate(corpus):
            counters[idx] = Counter(doc)
        return counters

    def build_inverted_index(self):
        words = set()
        inverted_index = {}
        for i, doc in enumerate(self.corpus):
            for word in doc:
                if word in words:
                    inverted_index[word].add(i)
                else:
                    words.add(word)
                    inverted_index[word] = {i}
        return inverted_index

    def idf(self):
        pass

    def preprocess(self, text):
        words = jieba.lcut(text)
        clean_words = stopwords.clean(words)
        return clean_words

    def count(self, item, target_words, doc_len):
        if isinstance(item, list):
            source = Counter(item)
            doc_len = len(item)
        else:
            source = self.counters[item]
        result = {}
        for word in target_words:
            result[word] = source[word] / doc_len
        return result

    def R(self, f, qf, doc):
        K = self.k1 * (1 - self.b + self.b * len(doc) / self.avg_lens)
        r = f * (1 + self.k1) / (f + K) * qf * (1 + self.k2) / (qf + self.k2)
        return r

    def score_per_word(self, q_word, f, qf, doc):
        r = self.R(f, qf, doc)
        df = len(self.inverted_index[q_word])
        idf = np.log2((self.total - df + 0.5) / (df + 0.5))
        return idf * r

    def score_per_doc(self, query, doc, doc_idx):
        query = self.preprocess(query)
        common_words = set(query) & set(doc)
        doc_len = len(doc)
        qf = self.count(query, common_words, doc_len)
        f = self.count(doc_idx, common_words, doc_len)
        scores = []
        for word in common_words:
            scores.append(self.score_per_word(word, f[word], qf[word], doc))
        return np.sum(scores)

    def get_score(self, query):
        score_list = []
        for idx, doc in enumerate(self.corpus):
            score_list.append(self.score_per_doc(query, doc, idx))
        result = zip(self.indices, score_list)
        result = sorted(result, key=lambda x: x[1], reverse=True)
        result = result[:self.topk]
        print("result:",result)
        resp = [self.corpus[idx] for idx, score in result]
        return resp


def main():
    data_train = DataOp.load_data('data_train')
    # data_val = DataOp.load_data('data_val')
    # data_test = DataOp.load_data('data_test')

    corpus = data_train['question1_clean'].tolist() + data_train['question2_clean'].tolist()
    idx = 8000

    query = data_train['question1'][idx]
    print("data_train:", data_train.shape)
    print("len(corpus):", len(corpus))
    print("query:", query)
    print("corpus[{}]:".format(idx), corpus[idx])
    bm25 = BM25(corpus)
    result = bm25.get_score(query)
    print(result)
    # print(corpus[7164])


if __name__ == '__main__':
    main()