#!/usr/bin/python3
# -*- coding: utf-8 -*-
# File  : onlstm.py
# Author: anyongjin
# Date  : 2020/10/13

from tensorflow.keras.layers import Layer
import tensorflow.keras.backend as K
from DistillBert.students.base import *
import tensorflow as tf


def cumsoftmax(x, mode='l2r'):
    """先softmax，然后cumsum，
    cumsum区分从左到右、从右到左两种模式
    """
    axis = K.ndim(x) - 1
    if mode == 'l2r':
        x = K.softmax(x, axis=axis)
        x = K.cumsum(x, axis=axis)
        return x
    elif mode == 'r2l':
        x = x[..., ::-1]
        x = K.softmax(x, axis=axis)
        x = K.cumsum(x, axis=axis)
        return x[..., ::-1]
    else:
        return x


class ONLSTM(Layer):
    """实现有序LSTM，来自论文
    Ordered Neurons: Integrating Tree Structures into Recurrent Neural Networks
    """

    def __init__(self,
                 units,
                 levels,
                 return_sequences=False,
                 dropconnect=None,
                 in_train=False,
                 **kwargs):
        assert units % levels == 0
        self.in_train = in_train
        self.units = units
        self.levels = levels
        self.chunk_size = units // levels
        self.return_sequences = return_sequences
        self.dropconnect = dropconnect
        self.kernel, self.recurrent_kernel, self.bias = None, None, None
        self._kernel, self._recurrent_kernel = None, None
        super(ONLSTM, self).__init__(**kwargs)

    def build(self, input_shape):
        with tf.init_scope():
            input_dim = input_shape[-1]
            self.kernel = self.add_weight(
                shape=(input_dim, self.units * 4 + self.levels * 2),
                name='kernel',
                initializer='glorot_uniform')
            self.recurrent_kernel = self.add_weight(
                shape=(self.units, self.units * 4 + self.levels * 2),
                name='recurrent_kernel',
                initializer='orthogonal')
            self.bias = self.add_weight(
                shape=(self.units * 4 + self.levels * 2,),
                name='bias',
                initializer='zeros')
            self.built = True
            if self.dropconnect and self.in_train:
                self._kernel = K.dropout(self.kernel, self.dropconnect)
                self._recurrent_kernel = K.dropout(self.recurrent_kernel, self.dropconnect)
            else:
                self._kernel = self.kernel
                self._recurrent_kernel = self.recurrent_kernel

    def one_step(self, inputs, states):
        x_in, (c_last, h_last) = inputs, states
        x_out = K.dot(x_in, self._kernel) + K.dot(h_last, self._recurrent_kernel)
        x_out = K.bias_add(x_out, self.bias)
        f_master_gate = cumsoftmax(x_out[:, :self.levels], 'l2r')
        f_master_gate = K.expand_dims(f_master_gate, 2)
        i_master_gate = cumsoftmax(x_out[:, self.levels: self.levels * 2], 'r2l')
        i_master_gate = K.expand_dims(i_master_gate, 2)
        x_out = x_out[:, self.levels * 2:]
        x_out = K.reshape(x_out, (-1, self.levels * 4, self.chunk_size))
        f_gate = K.sigmoid(x_out[:, :self.levels])
        i_gate = K.sigmoid(x_out[:, self.levels: self.levels * 2])
        o_gate = K.sigmoid(x_out[:, self.levels * 2: self.levels * 3])
        c_in = K.tanh(x_out[:, self.levels * 3:])
        c_last = K.reshape(c_last, (-1, self.levels, self.chunk_size))
        overlap = f_master_gate * i_master_gate
        c_out = overlap * (f_gate * c_last + i_gate * c_in) + \
                (f_master_gate - overlap) * c_last + \
                (i_master_gate - overlap) * c_in
        h_out = o_gate * K.tanh(c_out)
        c_out = K.reshape(c_out, (-1, self.units))
        h_out = K.reshape(h_out, (-1, self.units))
        out = K.concatenate([h_out, f_master_gate[..., 0], i_master_gate[..., 0]], 1)
        return out, [c_out, h_out]

    def call(self, inputs):
        initial_states = [
            K.zeros((K.shape(inputs)[0], self.units)),
            K.zeros((K.shape(inputs)[0], self.units))
        ]  # 定义初始态(全零)
        outputs = K.rnn(self.one_step, inputs, initial_states)
        self.distance = 1 - K.mean(outputs[1][..., self.units: self.units + self.levels], -1)
        self.distance_in = K.mean(outputs[1][..., self.units + self.levels:], -1)
        if self.return_sequences:
            return outputs[1][..., :self.units]
        else:
            return outputs[0][..., :self.units]

    def compute_output_shape(self, input_shape):
        if self.return_sequences:
            return (input_shape[0], input_shape[1], self.units)
        else:
            return (input_shape[0], self.units)


class OnLstmStudent(StudentModel):

    def __init__(self, in_pretrain=False, model_dir=None, distill_from_teacher=True, do_quantize=False):
        super(OnLstmStudent, self).__init__(in_pretrain=in_pretrain, model_dir=model_dir,
                                            distill_from_teacher=distill_from_teacher,
                                            do_quantize=do_quantize)

    def create_student_model(self):
        logger.warning('creating onlstm model...')
        assert 'distill' in self.config and self.config['distill'], 'section of distill in task_config.yml is invalid!'
        hidden_size = self.hidden_size
        vocab_size = self.tokenizer.vocab_size
        self.seq_len = self.config['max_len'] if not self.in_pretrain else self.model_config.max_position_embeddings
        train_base = self.in_pretrain or self.fine_tune_base
        input_ids = tf.keras.layers.Input((self.seq_len,), name='input_ids', dtype='int32')
        word_embs = tf.keras.layers.Embedding(vocab_size, hidden_size,
                                              trainable=train_base, name='embedding')(input_ids)
        word_embs = tf.keras.layers.Dropout(self.config['distill_train']['dropout_prob'])(word_embs)
        lstm_out = word_embs
        for i, size in enumerate(self.config['distill']['layer_sizes']):
            lstm_size = hidden_size if size == -1 else size
            lstm_out = ONLSTM(lstm_size, 16, return_sequences=True, dropconnect=0.25,
                              in_train=self.train_mode, name=f'base_{i}')(lstm_out)
        classifier = tf.keras.layers.Dense(
            self.model_config.num_labels, name="classifier",
            kernel_initializer=get_initializer(self.model_config.initializer_range)
        )
        model_out = classifier(lstm_out)
        model = tf.keras.models.Model(inputs=[input_ids], outputs=[model_out], name=self.model_type)
        return model
