#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-3-19

import numpy as np
import tensorflow as tf
from model.AttentionLayer import AttentionLayer
tf.enable_eager_execution()


# class Embedding(tf.keras.layers.Layer):
#
#     def __init__(self, vocab_size, embedding_size, **kwargs):
#         super(Embedding, self).__init__(**kwargs)
#         self.vocab_size = vocab_size
#         self.embedding_size = embedding_size
#
#     def build(self, input_shape):
#         self.embedding = self.add_variable('embedding_matrix', (self.vocab_size, self.embedding_size), tf.float32,
#                                            tf.initializers.random_uniform(-0.1, 0.1), trainable=True)
#
#     def call(self, inputs):
#         return tf.nn.embedding_lookup(self.embedding, inputs, name='embedded')


class JointModel(tf.keras.Model):
    def __init__(self, seq_maxlen, emb_size, rnn_size, rnn_keep_prob, domain_keep_prob, use_self_att, soft_att_size=64,
                 fake_task='None',
                 num_class=31, use_crf=False,
                 **kwargs):
        super(JointModel, self).__init__()
        self.seq_maxlen = seq_maxlen
        self.emb_size = emb_size
        self.rnn_size = rnn_size
        self.rnn_keep_prob = rnn_keep_prob
        self.domain_keep_prob = domain_keep_prob
        self.use_self_att = use_self_att
        self.fake_task = fake_task
        self.num_class = num_class
        self.use_crf = use_crf
        self.global_step = tf.Variable(0, trainable=False, name='global_step')
        self.soft_att_size = soft_att_size
        self.self_att_output = None
        self.soft_att_output = None
        if use_crf:
            self.trans_params = None  # 如果使用CRF
        if fake_task == 'POS':
            if 'ntags' in kwargs:
                self.ntags = kwargs['ntags']
            else:
                raise ValueError('POS task should have the ntags keyword in **kwargs')

    def build(self, input_shape):
        # batch_shape, seq_len_shape = input_shape
        # with tf.device("/cpu:0"):
        self.cell_fw = tf.nn.rnn_cell.LSTMCell(self.rnn_size)
        self.cell_bw = tf.nn.rnn_cell.LSTMCell(self.rnn_size)
        self.W = tf.get_variable('logits_W', (2 * self.rnn_size, self.num_class), dtype=tf.float32,
                                 initializer=tf.initializers.random_normal())
        self.bias = tf.get_variable('logits_bias', (self.num_class,), dtype=tf.float32,
                                    initializer=tf.zeros_initializer())

        self.w_omega = tf.Variable(tf.random_normal([2 * self.rnn_size, self.soft_att_size], stddev=1.0))
        self.b_omega = tf.Variable(tf.random_normal([self.soft_att_size], stddev=1.0))
        self.u_omega = tf.Variable(tf.random_normal([self.soft_att_size], stddev=1.0))

        if self.fake_task == 'POS':
            self.Wpos = tf.get_variable('pos_W', (2 * self.rnn_size, self.ntags), dtype=tf.float32,
                                        initializer=tf.initializers.random_normal())
            self.bpos = tf.get_variable('pos_bias', (self.ntags,), dtype=tf.float32, initializer=tf.zeros_initializer())

    def self_attention_fun(self, Q, K, seq_len, scaled_=True, masked_=False):
        Q_ = tf.layers.dense(Q, self.rnn_size * 2, use_bias=False)
        K_ = tf.layers.dense(K, self.rnn_size * 2, use_bias=False)

        attention = tf.matmul(Q_, K_, transpose_b=True)  # [batch_size, sequence_length, sequence_length]
        # attention = tf.matmul(Q, tf.transpose(K, [0, 2, 1]), transpose_b=True)  # [batch_size, sequence_length, sequence_length]

        if scaled_:
            d_k = tf.cast(tf.shape(K)[-1], dtype=tf.float32)
            attention = tf.divide(attention, tf.sqrt(d_k))  # [batch_size, sequence_length, sequence_length]

        if masked_:
            mask = tf.sequence_mask(seq_len, self.seq_maxlen, tf.float32)
            mask = tf.expand_dims(mask, axis=1)  # [batch, 1, seq_len]
            mask = tf.broadcast_to(mask, attention.shape)  # [batch, seq_len, seq_len]
            attention = attention - (1 - mask) * 1e10

        attention = tf.nn.softmax(attention, dim=-1)  # [batch_size, sequence_length, sequence_length]
        return attention

    def soft_attention(self, inputs, seq_len, time_major=False, return_alphas=False, masked_=False, save_att=False):
        """
        Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector.
        The idea was proposed in the article by Z. Yang et al., "Hierarchical Attention Networks
         for Document Classification", 2016: http://www.aclweb.org/anthology/N16-1174.
        Variables notation is also inherited from the article

        Args:
            inputs: The Attention inputs.
                Matches outputs of RNN/Bi-RNN layer (not final state):
                    In case of RNN, this must be RNN outputs `Tensor`:
                        If time_major == False (default), this must be a tensor of shape:
                            `[batch_size, max_time, cell.output_size]`.
                        If time_major == True, this must be a tensor of shape:
                            `[max_time, batch_size, cell.output_size]`.
                    In case of Bidirectional RNN, this must be a tuple (outputs_fw, outputs_bw) containing the forward and
                    the backward RNN outputs `Tensor`.
                        If time_major == False (default),
                            outputs_fw is a `Tensor` shaped:
                            `[batch_size, max_time, cell_fw.output_size]`
                            and outputs_bw is a `Tensor` shaped:
                            `[batch_size, max_time, cell_bw.output_size]`.
                        If time_major == True,
                            outputs_fw is a `Tensor` shaped:
                            `[max_time, batch_size, cell_fw.output_size]`
                            and outputs_bw is a `Tensor` shaped:
                            `[max_time, batch_size, cell_bw.output_size]`.
            attention_size: Linear size of the Attention weights.
            time_major: The shape format of the `inputs` Tensors.
                If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
                If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
                Using `time_major = True` is a bit more efficient because it avoids
                transposes at the beginning and end of the RNN calculation.  However,
                most TensorFlow data is batch-major, so by default this function
                accepts input and emits output in batch-major form.
            return_alphas: Whether to return attention coefficients variable along with layer's output.
                Used for visualization purpose.
        Returns:
            The Attention output `Tensor`.
            In case of RNN, this will be a `Tensor` shaped:
                `[batch_size, cell.output_size]`.
            In case of Bidirectional RNN, this will be a `Tensor` shaped:
                `[batch_size, cell_fw.output_size + cell_bw.output_size]`.
        """

        if isinstance(inputs, tuple):
            # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
            inputs = tf.concat(inputs, 2)

        if time_major:
            # (T,B,D) => (B,T,D)
            inputs = tf.transpose(inputs, [1, 0, 2])

        with tf.name_scope('v'):
            # Applying fully connected layer with non-linear activation to each of the B*T timestamps;
            #  the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
            v = tf.tanh(tf.tensordot(inputs, self.w_omega, axes=1) + self.b_omega)

        # For each of the timestamps its vector of size A from `v` is reduced with `u` vector
        vu = tf.tensordot(v, self.u_omega, axes=1, name='vu')  # (B,T) shape
        if masked_:
            mask = tf.sequence_mask(seq_len, self.seq_maxlen, tf.float32)
            mask = tf.broadcast_to(mask, vu.shape)  # [batch, seq_len]
            vu = vu - (1 - mask) * 1e10
        alphas = tf.nn.softmax(vu, name='alphas')  # (B,T) shape
        if save_att:
            self.soft_att_output = alphas.numpy()
        # Output of (Bi-)RNN is reduced with attention vector; the result_and_model has (B,D) shape
        output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)

        if not return_alphas:
            return output
        else:
            return output, alphas

    # 暂时放弃使用
    def attention(self, sequence: tf.Tensor):
        # [batch, dim, seq_time] * [seq_time, seq_time] ->[batch, dim, seq_time]
        alpha = tf.layers.dense(tf.transpose(sequence, [0, 2, 1]), self.seq_maxlen,
                                activation=tf.nn.softmax,
                                kernel_initializer=tf.initializers.random_normal(),
                                bias_initializer=tf.zeros_initializer())
        att = tf.reduce_mean(alpha, axis=1)  # [batch, dim, seq_time] -> [batch, seq_time]
        att = tf.expand_dims(att, axis=2)
        att = tf.broadcast_to(att, sequence.shape)
        return tf.multiply(sequence, att)  # [batch, seq_time, dim] x [batch, seq_time] -> [batch, seq_time, dim]

    def tag_loss_func(self, scores, y, seq_len):
        """
        计算fake task的损失

        :param scores: 预测的时序的分数 [batch, maxLen, ntags]
        :param y: 正确的序列[batch, maxLen, ntags]
        :param seq_len: 句子长度[batch]
        :return: fake task的损失
        """
        # 如果使用crf
        # TODO: NEED TO MASK
        if self.use_crf:
            # 最大化似然
            log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(scores, y, seq_len, self.trans_params)
            self.trans_params = transition_params
            loss = tf.reduce_mean(-log_likelihood)
        else:
            # 使用交叉熵损失的时候要将大于当前句子长度的部分进行mask
            loss = tf.losses.sparse_softmax_cross_entropy(y, scores, )
            # mask = tf.sequence_mask(seq_len, self.seq_maxlen)
            # loss = tf.boolean_mask(loss, mask)
        return loss

    def domain_loss_func(self, logits, y):
        """

        :param logits: 预测的分数[batch, nDomain]
        :param y: 正确的结果[batch]
        :return: domain的损失
        """
        return tf.losses.sparse_softmax_cross_entropy(y, logits)

    def call(self, input, is_training, save_att=False):
        batch_input, seq_len = input
        batch_input = tf.convert_to_tensor(batch_input, dtype=tf.float32)
        outputs, output_states = tf.nn.bidirectional_dynamic_rnn(self.cell_fw,
                                                                 self.cell_bw,
                                                                 batch_input,
                                                                 sequence_length=seq_len,
                                                                 dtype=tf.float32)
        H = tf.concat(outputs, 2)
        H = tf.layers.dropout(H, rate=1 - self.rnn_keep_prob, training=is_training)
        fw, bw = output_states
        output_h = tf.concat([fw.h, bw.h], 1)  # [batch, 2*rnn_size]

        if self.use_self_att:

            # self attention, Q, K, V 都相同
            att = self.self_attention_fun(H, H, seq_len, masked_=True)  # [batch, maxlen, maxlen]
            if save_att:
                self.self_att_output = att.numpy()
            # [batch, maxlen, maxlen] x [batch, maxlen, 2*rnn_size] -> [batch, maxlen, 2*rnn_size]
            H_self_att = tf.matmul(att, H)

            # output_h_att = self.soft_attention(H_self_att, seq_len, masked_=False, save_att=save_att)  # [batch, 2*rnn_size]
            output_h_att, alpha = AttentionLayer(name='AttentionLayer')(H_self_att)
        else:
            # output_h_att = self.soft_attention(H, seq_len, masked_=False, save_att=save_att)  # [batch, 2*rnn_size]
            output_h_att, alpha = AttentionLayer(name='AttentionLayer')(H)

        # output_h = output_h + output_h_att  # 残差连接
        output_h = output_h_att  # 去除残差连接
        # output_h = tf.concat([output_h, output_h_att], axis=1)  # 普通拼接

        # domain输出层
        output_h_drop = tf.layers.dropout(tf.layers.flatten(output_h), rate=1 - self.domain_keep_prob,
                                          training=is_training)
        logits = tf.nn.bias_add(tf.matmul(output_h_drop, self.W), self.bias)

        # 预测POS标签
        if self.fake_task == 'POS':
            POS_scores = tf.map_fn(lambda x: tf.nn.bias_add(tf.matmul(x, self.Wpos), self.bpos), H)
            return POS_scores, logits
        return logits

    def predict_domain(self, inputs, top_k=1, batch_size=32, save_att=False):
        logits = []
        if isinstance(inputs, zip):
            inputs_ = [_ for _ in inputs]
        else:
            inputs_ = inputs
        for batch in self.generate_batch(inputs_, batch_size):
            if self.fake_task == 'None':
                logits_ = self(batch, is_training=False, save_att=save_att)
            else:
                _, logits_ = self(batch, is_training=False, save_att=save_att)
            if top_k > 1:
                logits.extend(logits_)
            else:
                logits.extend(tf.argmax(logits_, axis=1))
        if top_k > 1:
            return tf.nn.top_k(logits, top_k)
        return logits

    def predict_fake_task(self, inputs):
        if self.fake_task == 'None':
            raise RuntimeError('没有设置fake task，不能进行预测')
        else:
            if self.fake_task == 'POS':
                tags, _ = self(inputs, is_training=False)
                if self.use_crf:
                    # 使用crf 的话使用viterbi预测
                    seq_lens = inputs[1]
                    viterbi_sequences = []

                    for tag, seq_len in zip(tags, seq_lens):
                        tag = tag[:seq_len]  # 只保留有效长度的序列
                        viterbi_seq, viterbi_score = tf.contrib.crf.viterbi_decode(
                            tag, self.trans_params)
                        viterbi_sequences.append(viterbi_seq)
                    return viterbi_sequences
                else:
                    return tf.argmax(tags, axis=2)
            else:
                raise NotImplemented

    def generate_batch(self, data, batch_size=32):
        """
        可以按batch_size生成数据
        """
        length = len(data)

        for start in range(0, length, batch_size):
            end = min(start + batch_size, length)
            yield data[start:end]
        # raise StopIteration()
        return  # make it compatible with python3.7


if __name__ == '__main__':
    ntags = 6
    max_len = 20
    inp = np.random.random((10, max_len, 30)).astype(np.float32)
    y_tag = tf.convert_to_tensor(np.random.randint(0, 6, (10, max_len,), dtype=np.int32), dtype=tf.int32)
    y_domain = np.random.randint(0, 31, (10,), dtype=np.int32)
    seq_len = np.random.randint(1, 10, (10,), dtype=np.int32)
    model = JointModel(max_len, 2, 3, 0.9,0.1,True, fake_task='POS', use_crf=True, ntags=ntags)
    tag, domain = model([inp, seq_len], is_training=True, save_att=True)
    print(model.self_att_output)
    print(model.soft_att_output)
    model.tag_loss_func(tag, y_tag, seq_len)
    model.domain_loss_func(domain, y_domain)
