# -*- coding: utf-8 -*-
"""
@date: 2021/6/2 19:52
@file: model.py
@author: lilong
@desc:  模型的构建
"""

import os
import json
import numpy as np

from keras.layers import *
import keras.backend as K
from keras.callbacks import Callback


class OurLayer(Layer):
    """定义新的Layer
    增加reuse方法，允许在定义Layer时调用现成的层
    """

    def reuse(self, layer, *args, **kwargs):

        # 根据参数构建层
        if not layer.built:
            if len(args) > 0:
                inputs = args[0]
            else:
                inputs = kwargs['inputs']
            if isinstance(inputs, list):
                input_shape = [K.int_shape(x) for x in inputs]
            else:
                input_shape = K.int_shape(inputs)   # K.int_shape查看shape
            layer.build(input_shape)

        # 层输出
        outputs = layer.call(,
        for w in layer.trainable_weights:
            if w not in self._trainable_weights:
                self._trainable_weights.append(w)
        for w in layer.non_trainable_weights:
            if w not in self._non_trainable_weights:
                self._non_trainable_weights.append(w)
        return outputs


class AttentionPooling1D(OurLayer):
    """通过加性Attention，将向量序列融合为一个定长向量"""

    def __init__(self, h_dim=None, **kwargs):
        super(AttentionPooling1D, self).__init__(**kwargs)
        self.h_dim = h_dim

    def build(self, input_shape):
        super(AttentionPooling1D, self).build(input_shape)

        # 隐层维度
        if self.h_dim is None:
            self.h_dim = input_shape[0][-1]

        # k计算
        self.k_dense = Dense(
            self.h_dim,
            use_bias=False,
            activation='tanh'
        )

        # o计算
        self.o_dense = Dense(1, use_bias=False)

    def call(self, inputs):
        """attention计算"""
        xo, mask = inputs
        x = xo
        x = self.reuse(self.k_dense, x)
        x = self.reuse(self.o_dense, x)
        x = x - (1 - mask) * 1e12
        x = K.softmax(x, 1)
        return K.sum(x * xo, 1)

    def compute_output_shape(self, input_shape):
        return (None, input_shape[0][-1])


class DilatedGatedConv1D(OurLayer):
    """膨胀门卷积（DGCNN）"""

    def __init__(self,
                 o_dim=None,
                 k_size=3,
                 rate=1,
                 skip_connect=True,
                 drop_gate=None,
                 **kwargs):
        super(DilatedGatedConv1D, self).__init__(**kwargs)
        self.o_dim = o_dim
        self.k_size = k_size
        self.rate = rate  # 扩张率
        self.skip_connect = skip_connect
        self.drop_gate = drop_gate

    def build(self, input_shape):
        super(DilatedGatedConv1D, self).build(input_shape)
        if self.o_dim is None:
            self.o_dim = input_shape[0][-1]
        self.conv1d = Conv1D(
            self.o_dim * 2,
            self.k_size,
            dilation_rate=self.rate,
            padding='same'
        )
        if self.skip_connect and self.o_dim != input_shape[0][-1]:
            self.conv1d_1x1 = Conv1D(self.o_dim, 1)

    def call(self, inputs):
        xo, mask = inputs
        x = xo * mask
        x = self.reuse(self.conv1d, x)
        x, g = x[..., :self.o_dim], x[..., self.o_dim:]
        if self.drop_gate is not None:
            g = K.in_train_phase(K.dropout(g, self.drop_gate), g)
        g = K.sigmoid(g)
        if self.skip_connect:
            if self.o_dim != K.int_shape(xo)[-1]:
                xo = self.reuse(self.conv1d_1x1, xo)
            return (xo * (1 - g) + x * g) * mask
        else:
            return x * g * mask

    def compute_output_shape(self, input_shape):
        return input_shape[0][:-1] + (self.o_dim,)


class MixEmbedding(OurLayer):
    """混合Embedding
    输入字id、词embedding，然后字id自动转字embedding，
    词embedding做一个dense，再加上字embedding，并且
    加上位置embedding。
    """

    def __init__(self, i_dim, o_dim, **kwargs):
        super(MixEmbedding, self).__init__(**kwargs)
        self.i_dim = i_dim
        self.o_dim = o_dim

    def build(self, input_shape):
        super(MixEmbedding, self).build(input_shape)
        self.char_embeddings = Embedding(self.i_dim, self.o_dim)  # (字表大小、字维度)
        self.word_dense = Dense(self.o_dim, use_bias=False)

    def call(self, inputs):
        # x1和x2的混合embedding: 维度相同，size相加
        # x1: 字输入
        # x2: 词输入
        x1, x2 = inputs
        x1 = self.reuse(self.char_embeddings, x1)
        x2 = self.reuse(self.word_dense, x2)
        return x1 + x2

    def compute_output_shape(self, input_shape):
        return input_shape[0] + (self.o_dim,)


class ExponentialMovingAverage:
    """对模型权重进行指数滑动平均。
    用法：在model.compile之后、第一次训练之前使用；
    先初始化对象，然后执行inject方法。
    """

    def __init__(self, model, momentum=0.9999):
        self.momentum = momentum
        self.model = model
        self.ema_weights = [K.zeros(K.shape(w)) for w in model.weights]

    def inject(self):
        """添加更新算子到model.metrics_updates。
        """
        self.initialize()
        for w1, w2 in zip(self.ema_weights, self.model.weights):
            op = K.moving_average_update(w1, w2, self.momentum)
            self.model.metrics_updates.append(op)

    def initialize(self):
        """ema_weights初始化跟原模型初始化一致。
        """
        self.old_weights = K.batch_get_value(self.model.weights)
        K.batch_set_value(zip(self.ema_weights, self.old_weights))

    def apply_ema_weights(self):
        """备份原模型权重，然后将平均权重应用到模型上去。
        """
        self.old_weights = K.batch_get_value(self.model.weights)
        ema_weights = K.batch_get_value(self.ema_weights)
        K.batch_set_value(zip(self.model.weights, ema_weights))

    def reset_old_weights(self):
        """恢复模型到旧权重。
        """
        K.batch_set_value(zip(self.model.weights, self.old_weights))


class Evaluate(Callback):
    """评估器"""

    def __init__(self):
        self.metrics = []
        self.best = 0.
        self.stage = 0

    def objTransport(self, train_model, dev_data, predict):
        self.dev_data = dev_data
        self.train_model = train_model
        self.predict = predict

        # 指数滑动平均
        EMAer = ExponentialMovingAverage(train_model)
        EMAer.inject()
        self.EMAer = EMAer

    def on_epoch_end(self, epoch, logs=None):
        self.EMAer.apply_ema_weights()
        acc, f1, final = self.evaluate()
        self.metrics.append((epoch, acc, f1, final))
        json.dump(self.metrics, open('train.log', 'w'), indent=4)
        if final > self.best:
            self.best = final
            self.train_model.save_weights('best_model.weights')
        print('learning rate: %s' % (K.eval(self.model.optimizer.lr)))
        print('acc: %.4f, f1: %.4f, final: %.4f, best final: %.4f\n' % (acc, f1, final, self.best))
        self.EMAer.reset_old_weights()
        if epoch + 1 == 30 or (
                self.stage == 0 and epoch > 15 and
                (final < 0.5 or np.argmax(self.metrics, 0)[3] < len(self.metrics) - 5)
        ):
            """达到30个epoch，或者final开始下降到0.5以下（开始发散），
            或者连续5个epoch都没提升，就降低学习率。
            """
            self.stage = 1
            self.train_model.load_weights('best_model.weights')
            self.EMAer.initialize()
            K.set_value(self.model.optimizer.lr, 1e-4)
            K.set_value(self.model.optimizer.iterations, 0)
            opt_weights = K.batch_get_value(self.model.optimizer.weights)
            opt_weights = [w * 0. for w in opt_weights]
            K.batch_set_value(zip(self.model.optimizer.weights, opt_weights))

    def evaluate(self, threshold=0.1):
        self.predict(self.dev_data, 'tmp_result.txt', threshold=threshold)
        acc, f1, final = json.loads(
            os.popen(
                'python ../evaluate_tool/evaluate.py tmp_result.txt tmp_output.txt'
            ).read().strip()
        )
        return acc, f1, final
