# -*- coding: utf-8 -*-
"""
@date: 2021/8/20 19:09
@file: sent_sim.py
@author: lilong
@desc: 
"""
import json
import numpy as np
import pandas as pd
from tqdm import tqdm

from keras.models import Model
from keras.layers import *
from keras.constraints import unit_norm
from keras.callbacks import Callback
import keras.backend as K
import tensorflow as tf

from marginTextSimilarity.margin_softmax import sparse_amsoftmax_loss
from marginTextSimilarity.utils import strQ2B


class Config:
    # 文件路径
    # file_path = './data/few-shot-44k-class.csv'
    file_path = '../data/mar_text_sim/data/tt.csv'

    # 参数设置
    num_train_groups = 90  # 前9万组问题拿来做训练  90000
    maxlen = 32  # 句子最大长度
    word_size = 128  # 字维度
    min_count = 5  # 最低词频
    batch_size = 100  # 批次大小  100
    epochs = 3  # amsoftmax需要25个epoch，其它需要20个epoch  原始：30


def data_process(file_path):
    """数据预处理"""
    data = pd.read_csv(file_path, encoding='utf-8', header=None, delimiter='\t')
    data[1] = data[1].apply(strQ2B)
    data[1] = data[1].str.lower()

    return data


class Evaluate(Callback):
    """定义Callback器，计算验证集的acc，并保存最优模型"""

    def __init__(self, evaluate, model):
        self.accs = {'top1': [], 'top5': [], 'top10': []}
        self.evaluate = evaluate  # 传入评价器
        self.model = model  # 训练模型
        self.highest = 0.  # Top1值

    def on_epoch_end(self, epoch, logs=None):
        top1_acc, top5_acc, top10_acc = self.evaluate()
        self.accs['top1'].append(top1_acc)
        self.accs['top5'].append(top5_acc)
        self.accs['top10'].append(top10_acc)
        if top1_acc >= self.highest:  # 保存最优模型权重
            self.highest = top1_acc
            self.model.save_weights('sent_sim_amsoftmax.model')
        json.dump({'accs': self.accs, 'highest_top1': self.highest},
                  open('valid_amsoftmax.log', 'w'), indent=4)
        print('top1_acc: %s, top5_acc: %s, top10_acc: %s' % (top1_acc, top5_acc, top10_acc))


class MarginSim:
    def __init__(self):
        self.padding = 0  # 0: padding标记
        self.unk = 1  # 1: unk标记
        self.data = data_process(Config.file_path)  # 数据预处理

    def string2id(self, s):
        """文本字典映射"""

        # 句子长度限制maxlen；未登录字为1；padding为0；
        _ = [self.char2id.get(i, self.unk) for i in s[:Config.maxlen]]
        _ = _ + [self.padding] * (Config.maxlen - len(_))  # padding拼接在末尾
        return _

    def static(self):
        """字频统计，构建字典"""
        chars = {}
        for s in tqdm(iter(self.data[1])):
            for c in s:
                if c not in chars:
                    chars[c] = 0
                chars[c] += 1
        self.chars = {i: j for i, j in chars.items() if j >= Config.min_count}  # 词频大于5的字
        self.id2char = {i + 2: j for i, j in enumerate(self.chars)}  # 前2个位置为特殊标记
        self.char2id = {j: i for i, j in self.id2char.items()}  # 字符-下标映射

    def train_data(self):
        """构造训练数据"""

        # 训练组数=类别数
        num_train_groups = Config.num_train_groups

        # 新增一列字典映射
        self.data[2] = self.data[1].apply(self.string2id)

        # 训练集
        train_data = self.data[self.data[0] < num_train_groups]
        train_data = train_data.sample(frac=1)  # 训练集的百分比
        self.x_train = np.array(list(train_data[2]))
        self.y_train = np.array(list(train_data[0])).reshape((-1, 1))

        # 验证集：不和训练集重合
        self.valid_data = self.data[self.data[0] >= num_train_groups]

    def build_model(self):
        """模型构建"""

        # 正式模型，基于GRU的分类器；输入层，句子的最大长度是32
        x_in = Input(shape=(Config.maxlen,))

        # 嵌入层
        x_embedded = Embedding(len(self.chars) + 2, Config.word_size)(x_in)

        # x = CuDNNGRU(Config.word_size)(x_embedded)  # GPU版本的GRU
        x = GRU(Config.word_size)(x_embedded)  # CPU版本的GRU

        # l2_normalize，按行L2泛化
        x = Lambda(lambda x: K.l2_normalize(x, 1))(x)

        # 全连接层
        pred = Dense(Config.num_train_groups,  # 类别数
                     use_bias=False,  # 不使用bias
                     kernel_constraint=unit_norm())(x)  # 权重约束：https://ai.51cto.com/art/202007/620947.htm

        # 模型1：用分类模型做训练
        model = Model(x_in, pred)
        model.compile(loss=sparse_amsoftmax_loss,
                      optimizer='adam',
                      metrics=['sparse_categorical_accuracy'])
        self.cls_model = model

        # 模型2：最终的目的是要得到一个编码器
        self.encoder = Model(x_in, x)

        # 模型3：为验证集的排序准备：实际上用numpy写也没有问题，但是用Keras写能借助GPU加速，纯计算
        x_in = Input(shape=(Config.word_size,))  # 输入128维
        x = Dense(len(self.valid_data), use_bias=False)(x_in)  # 纯计算，因为没有激活函数啊
        x = Lambda(lambda x: tf.nn.top_k(x, 11)[1])(x)  # 取出topk的下标
        self.model_sort = Model(x_in, x)

        # index与组号（类别）之间的映射
        self.id2g = dict(zip(self.valid_data.index - self.valid_data.index[0], self.valid_data[0]))

    def evaluate(self):
        """评测函数"""

        print('validing...')

        # encoder计算句向量：
        valid_vec = self.encoder.predict(np.array(list(self.valid_data[2])),
                                         verbose=True,
                                         batch_size=Config.batch_size)

        # 载入句向量为权重：主要是为了进行计算
        self.model_sort.set_weights([valid_vec.T])

        # 计算topk
        sorted_result = self.model_sort.predict(valid_vec,
                                                verbose=True,
                                                batch_size=Config.batch_size)

        new_result = np.vectorize(lambda s: self.id2g[s])(sorted_result)

        # 生成一个全为False的向量
        _ = new_result[:, 0] != new_result[:, 0]

        # 注意按照相似度排序的话，第一个就是输入句子（全匹配）
        for i in range(10):
            _ = _ + (new_result[:, 0] == new_result[:, i + 1])
            if i + 1 == 1:
                top1_acc = 1. * _.sum() / len(_)
            elif i + 1 == 5:
                top5_acc = 1. * _.sum() / len(_)
            elif i + 1 == 10:
                top10_acc = 1. * _.sum() / len(_)

        return top1_acc, top5_acc, top10_acc

    def train(self):
        """模型训练"""

        evaluator = Evaluate(self.evaluate, self.cls_model)

        history = self.cls_model.fit(self.x_train,
                                     self.y_train,
                                     batch_size=Config.batch_size,
                                     epochs=Config.epochs,
                                     callbacks=[evaluator])

        # encoder计算句向量
        self.valid_vec = self.encoder.predict(np.array(list(self.valid_data[2])),
                                         verbose=True,
                                         batch_size=Config.batch_size)

    def most_similar(self, s):
        """相似度计算"""
        v = self.encoder.predict(np.array([self.string2id(s)]))[0]
        sims = np.dot(self.valid_vec, v)
        for i in sims.argsort()[-10:][::-1]:
            print(self.valid_data.iloc[i][1], sims[i])


if __name__ == "__main__":
    ms = MarginSim()
    ms.static()
    ms.train_data()
    ms.build_model()
    ms.train()

    # # 预测
    # ms.most_similar(u'ps格式可以转换成ai格式吗')
    # ms.most_similar(u'广州的客运站的数目')
    # ms.most_similar(u'沙发一般有多高')
