# !/usr/bin/python3
# -*- coding:utf-8 -*-
# Copyright 2022 The Chinaunicom Software Team. All rights reserved.
# @FileName: Semantic_search.py
# @Author  : Dyu
# @Time    : 2022/8/11

##################################################################################################
##                                       Predict model                                          ##
##                                         预测模型                                              ##
##################################################################################################


import numpy as np
from annoy import AnnoyIndex
from collections import Counter
from bert4keras.backend import keras, K
from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer
from bert4keras.snippets import sequence_padding
from bert4keras.snippets import uniout, open
from tensorflow.keras.models import Model
import tensorflow as tf
import pandas as pd
import json
from tqdm import tqdm
from src.intelligent_interaction.engine.sem_conf import *


class SimbertEmbeding:
    """
    Simbert embedding transfrom vectors
    """

    def __init__(self):
        self.maxlen = config['simbert_path']['maxlen']
        # bert配置
        self.config_path = config['simbert_path']['config_path']
        self.checkpoint_path = config['simbert_path']['checkpoint_path']
        self.dict_path = config['simbert_path']['dict_path']

        # 建立分词器
        self.tokenizer = Tokenizer(self.dict_path, do_lower_case=True)  # 建立分词器

        # 建立加载模型
        self.bert = build_transformer_model(
            self.config_path,
            self.checkpoint_path,
            with_pool='linear',
            application='unilm',
            return_keras_model=False,
        )
        self.encoder = keras.models.Model(self.bert.model.inputs, self.bert.model.outputs[0])

    def single_embedding(self, text):
        """
        单条embeding
        :paramter text: 单个文本
        :return vec: 向量
        """
        token_ids, segment_ids = self.tokenizer.encode(text, maxlen=self.maxlen)
        vec = self.encoder.predict([[token_ids], [segment_ids]])[0]
        vec /= (vec ** 2).sum() ** 0.5
        return vec

    def batch_embeding(self, texts):
        """
        批量embeding
        :paramter texts:  多个文本
        :return vec: 向量集合
        """
        vecs = []  # 向量集合
        for text in tqdm(texts):
            token_ids, segment_ids = self.tokenizer.encode(text, maxlen=self.maxlen)
            vec = self.encoder.predict([[token_ids], [segment_ids]])[0]
            vec /= (vec ** 2).sum() ** 0.5
            vecs.append(vec)

        vecs_arr = np.array(vecs)
        # np.save('沃灵通_qa_vecs.npy', vecs_arr)   # 所有问题的向量集合
        return vecs_arr


if __name__ == '__main__':
    simbert = SimbertEmbeding()

    df = pd.read_csv(r"data/沃灵通_qa_data.csv", encoding='utf-8')
    questions = df['question']
    vecs_arr = simbert.batch_embeding(questions)
    print(len(vecs_arr))
    print(vecs_arr)
    print(type(vecs_arr))

