import json
import pysparnn.cluster_index as ci
import pickle
import os
from sklearn.feature_extraction.text import TfidfVectorizer

import config
from dnn.recall.Bm25Vectorizer import Bm25Vectorizer
from dnn.recall.fasttext_vectorizer import FasttextVectorizer


class Sentence2Vector:
    def __init__(self, method="BM25"):
        # 获取划分词频向量的方法
        self.method = method.lower()

        # 获取数据
        data_path = config.recall_corpus_bm25_path if self.method == "bm25" else config.recall_corpus_tfidf_path
        self.qa_dict = json.load(open(data_path, mode="r", encoding="UTF-8"))

        # 获取模型路径
        self.model_path = config.recall_search_index_bm25_model_path if self.method == "bm25" else \
            config.recall_search_index_tfidf_model_path if self.method == "tfidf" else \
                config.recall_search_index_fasttext_model_path

    def build_vector(self):
        """对问答对建立索引"""
        # 1. 句子处理
        # lines: [q1, q2, ...]
        lines = [q for q in self.qa_dict]
        # self.qa_dict[q]["cut"]是按词进行分词后的列表
        # lines_cut: [q1(按词分此后用空格拼接的字符串), q2, ...]
        lines_cut = [" ".join(self.qa_dict[q]["cut"]) for q in lines]

        # 2. 创建划分词频向量对象
        vectorizer = Bm25Vectorizer() if self.method == "bm25" else TfidfVectorizer() \
            if self.method == "tfidf" else FasttextVectorizer()
        # fit_transform的参数必须是存放在列表中的字符串（进行分词后的问题字符串列表）
        # print("lines_cut_pre:", lines_cut[:3])

        # 3. 计算词频向量
        # print(len(lines_cut))
        features_vector = vectorizer.fit_transform(lines_cut)
        # print("features_vector:", features_vector)
        # 返回tf_idf_vector，后续还要对用户输入的问题进行同样的处理

        # 4. 对句子创建索引
        search_index = self.get_cp(features_vector, lines)

        return vectorizer, features_vector, lines_cut, search_index

    def get_cp(self, vectors, data):

        if os.path.exists(self.model_path):
            search_index = pickle.load(open(self.model_path, "rb"))
        else:
            search_index = self.build_cp(vectors, data)

        return search_index

    def build_cp(self, vectors, data):
        search_index = ci.MultiClusterIndex(vectors, data)
        # 文件用二进制读写不用encoding
        pickle.dump(search_index, open(self.model_path, mode="wb"))
        return search_index
