#!/usr/bin/python3
# -*- coding: utf-8 -*-
# https://github.com/dennissm/mtgru
# modify by Ross
import abc
import os
import re

import numpy as np
import tensorflow as tf
from keras.utils import to_categorical
from tensorflow.python.ops.rnn_cell import RNNCell, DropoutWrapper, LayerRNNCell
from tensorflow.contrib.layers import fully_connected


class MTGRUCell(RNNCell):
    '''An MTGRU Cell'''

    def __init__(self, num_units, tau_init, reuse=None, name=None, dtype=tf.float32, **kwargs):
        super(MTGRUCell, self).__init__(
            _reuse=reuse, name=name, dtype=dtype, **kwargs)

        self._num_units = num_units
        self.tau_init = tau_init

    @property
    def state_size(self):
        return self._num_units

    @property
    def output_size(self):
        return self._num_units

    def build(self, inputs_shape):
        if inputs_shape[-1] is None:
            raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
                             % inputs_shape)
        input_depth = inputs_shape[-1]

        self.tau = self.add_variable('MTGRUCell%s' % 'tau',
                                     [],
                                     tf.float32,
                                     initializer=tf.constant_initializer(self.tau_init, dtype=tf.float32))
        self.tau_clipped = tf.clip_by_value(self.tau, 1.0, 9999)
        self.W_xh = self.add_variable('MTGRUCell%s' % 'W_xh',
                                      [input_depth, 2 * self._num_units],
                                      initializer=orthogonal_initializer())
        self.W_hh = self.add_variable('MTGRUCell%s' % 'W_hh',
                                      [self._num_units, 2 * self._num_units],
                                      initializer=orthogonal_initializer())
        self.bias = self.add_variable('MTGRUCell%s' % 'bias', [2 * self._num_units],
                                      initializer=tf.initializers.zeros())
        self.W_xu = self.add_variable('MTGRUCell%s' % 'W_xu',
                                      [input_depth, 1 * self._num_units],
                                      initializer=orthogonal_initializer())
        self.W_hu = self.add_variable('MTGRUCell%s' % 'W_hu',
                                      [self._num_units, 1 * self._num_units],
                                      initializer=orthogonal_initializer())
        self.bias1 = self.add_variable('MTGRUCell%s' % 'bias1', [1 * self._num_units],
                                       initializer=tf.initializers.zeros())

        self.built = True

    def call(self, inputs, state):
        h = state
        # print(inputs)
        # print(state)
        concat = tf.concat([inputs, h], 1)
        W_both = tf.concat([self.W_xh, self.W_hh], 0)

        hidden = tf.matmul(concat, W_both) + self.bias

        z, r = tf.split(hidden, 2, axis=1)
        z, r = tf.sigmoid(z), tf.sigmoid(r)

        u = tf.tanh(tf.nn.bias_add((tf.matmul(inputs, self.W_xu) + tf.matmul(h * r, self.W_hu)), self.bias1))
        new_h = z * h + (1 - z) * u

        new_h = (1 - self.tau_clipped) * h + self.tau_clipped * new_h
        return new_h, new_h

    def get_config(self):
        config = {
            "num_units": self._num_units,
            "reuse": self._reuse,
        }
        base_config = super(MTGRUCell, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


def orthogonal(shape):
    flat_shape = (shape[0], np.prod(shape[1:]))
    a = np.random.normal(0.0, 1.0, flat_shape)
    u, _, v = np.linalg.svd(a, full_matrices=False)
    q = u if u.shape == flat_shape else v
    return q.reshape(shape)


def orthogonal_initializer():
    def _initializer(shape, dtype=tf.float32, partition_info=None):
        return tf.constant(orthogonal(shape), dtype)

    return _initializer


class CNNRNNBase:
    def __init__(self, num_filters: int, filter_sizes: list, sequence_length: int, embedding_size: int, max_pool_size=3,
                 use_embedding_layer=False):
        self.filter_sizes = filter_sizes
        self.sequence_length = sequence_length
        self.embedding_size = embedding_size
        self.num_filters = num_filters
        self.max_pool_size = max_pool_size
        self.use_embedding_layer = use_embedding_layer
        if self.use_embedding_layer:
            self.input = tf.placeholder(tf.int32, (None, self.sequence_length), name='input')
        else:
            self.input = tf.placeholder(tf.float32, (None, self.sequence_length, self.embedding_size), name='input')
        self.output = None
        self.global_step = tf.Variable(0, trainable=False, name='global_step')

    @abc.abstractmethod
    def build(self):
        return

    def get_cnn(self, embedded):
        """
        https://github.com/dennybritz/cnn-text-classification-tf/blob/master/text_cnn.py

        :param embedded: embedding后的Tensor
        :return: 卷积后做了最大池的多个输出
        """
        pooled_outputs = []
        for i, filter_size in enumerate(self.filter_sizes):
            with tf.name_scope("conv-maxpool-%s" % filter_size):
                # Convolution Layer
                filter_shape = [filter_size, self.embedding_size, 1, self.num_filters]
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[self.num_filters]), name="b")
                conv = tf.nn.conv2d(
                    embedded,
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv")
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")

                # 将卷积损失的长度补回来
                # [batch_size , time_step - filter_size + 1, embedding_dim, 1] -> [batch_size , time_step, embedding_dim, 1]
                h = tf.pad(h, [[0, 0], [filter_size - 1, 0], [0, 0], [0, 0]])
                # pooled = tf.reduce_max(h, 3, name='pool')

                # Maxpooling over the outputs
                with tf.device("/cpu:0"):
                    pooled = tf.nn.max_pool(
                        h,
                        ksize=[1, self.max_pool_size, 1, 1],
                        strides=[1, self.max_pool_size, 1, 1],
                        padding='VALID',
                        name="pool")
                pooled_outputs.append(pooled)
        return pooled_outputs

    @abc.abstractmethod
    def predict(self, sess: tf.Session, x):
        return

    @abc.abstractmethod
    def compute_accuracy(self, sess: tf.Session, x, y):
        return

    @abc.abstractmethod
    def compute_cost(self, sess: tf.Session, x, y):
        return

    @abc.abstractmethod
    def train(self, sess, x, y):
        return


class CNNLMTGRU(CNNRNNBase):

    def __init__(self, num_filters, filter_sizes, max_pool_size, rnn_units, sequence_length, embedding_size,
                 learning_rate=0.0001, dropout_keep_prob=0.5, use_embedding_layer=False, vocab_size=-1):
        """

        :param num_filters: CNN卷积核数量
        :param filter_sizes: 卷积核窗口
        :param max_pool_size: 池话窗口
        :param rnn_units: RNN单元数量
        :param sequence_length: 时序长度
        :param embedding_size: embedding的维度
        :param learning_rate: 学习率，默认0.001
        :param dropout_keep_prob: CNN和RNN的dropout dropout_keep_prob
        :param use_embedding_layer: 模型是否使用embedding层
        :param vocab_size: 词汇表大小
        """
        super(CNNLMTGRU, self).__init__(num_filters, filter_sizes, sequence_length, embedding_size, max_pool_size,
                                        use_embedding_layer)
        self.label = tf.placeholder(tf.int32, shape=(None, 31), name='label')
        self.dropout_keep_prob_tensor = tf.placeholder(tf.float32, name='dropout_keep_prob')
        self.rnn_units = rnn_units
        self.learning_rate = learning_rate
        self.dropout_keep_prob = dropout_keep_prob
        self.vocab_size = vocab_size
        self.build()
        self.saver = tf.train.Saver()

    def build(self):
        # 如果使用embedding层
        if self.use_embedding_layer:
            assert self.vocab_size > 0
            word_embeddings = tf.get_variable('embedding_W', [self.vocab_size, self.embedding_size], tf.float32,
                                tf.initializers.random_uniform(-1, 1))
            embedded_word_ids = tf.nn.embedding_lookup(word_embeddings, self.input)
            x_expanded = tf.expand_dims(embedded_word_ids, -1)
        else:
            x_expanded = tf.expand_dims(self.input, -1)
        cnn_outputs = self.get_cnn(x_expanded)
        # [ <tf.Tensor 'conv-maxpool-3/pool:0' shape=(?, 1, 1, 64) dtype=float32>] * 3
        cnn_outputs = tf.concat(cnn_outputs, -1)
        cnn_outputs = tf.squeeze(cnn_outputs, 2)
        cnn_dropped = tf.nn.dropout(cnn_outputs, self.dropout_keep_prob_tensor)

        # slow LMTGRU
        # slow_tau = tf.Variable(tf.constant(1.25, dtype=tf.float32), trainable=True, name='slow_tau')
        # slow_tau = tf.clip_by_value(slow_tau, 1.0, 999999999, name='slow_tau_clipped')
        # print(id(slow_tau))
        slow_tau = 1.25
        slow_cell = MTGRUCell(self.rnn_units, slow_tau)

        # fast LMTGRU
        # fast_tau = tf.Variable(tf.constant(1.0, dtype=tf.float32), trainable=True, name='fast_tau')
        # fast_tau = tf.get_variable('fast_tau', 1, tf.float32, tf.constant_initializer(1.0), trainable=True)
        # fast_tau = tf.clip_by_value(fast_tau, 1.0, 999999999, name='fast_tau_clipped')
        fast_tau = 1.0
        fast_cell = MTGRUCell(self.rnn_units, fast_tau)

        slow_outputs, slow_states = tf.nn.dynamic_rnn(
            DropoutWrapper(slow_cell, state_keep_prob=self.dropout_keep_prob_tensor), cnn_dropped,
            dtype=tf.float32, scope='slow')
        fast_outputs, fast_states = tf.nn.dynamic_rnn(
            DropoutWrapper(fast_cell, state_keep_prob=self.dropout_keep_prob_tensor), cnn_dropped, dtype=tf.float32,
            scope='fast')
        # multirnn_cell = tf.nn.rnn_cell.MultiRNNCell([slow_cell, fast_cell])
        # rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
        #     DropoutWrapper(multirnn_cell, state_keep_prob=self.dropout_keep_prob_tensor), cnn_dropped, dtype=tf.float32)

        states_concat = tf.concat([slow_states, fast_states], axis=1)

        # self.logits = tf.layers.dense(states_concat, 2, kernel_initializer=tf.initializers.random_uniform(-0.5, 0.5),
        #                               bias_initializer=tf.initializers.zeros())
        # self.logits = tf.layers.Dense(2)(states_concat)
        # self.logits = fully_connected(states_concat, 2)
        logits_W = tf.get_variable('logits_W', [states_concat.get_shape()[-1], 31], tf.float32,
                                   initializer=tf.initializers.glorot_normal())
        logits_b = tf.get_variable('logits_b', [31], tf.float32, initializer=tf.initializers.zeros())

        self.logits = tf.nn.bias_add(tf.matmul(states_concat, logits_W), logits_b)
        self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.label, logits=self.logits))
        self.output = tf.argmax(self.logits, axis=1)
        self.acc = tf.reduce_mean(tf.cast(tf.equal(self.output, tf.argmax(self.label, 1)), tf.float32))

        self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
        self.opt_step = self.optimizer.minimize(self.loss, self.global_step)

        # tf.summary.scalar('fast_tau', fast_tau)
        # tf.summary.scalar('slow_tau', slow_tau)
        tf.summary.scalar('loss', self.loss)
        tf.summary.scalar('slow_tau', slow_cell.tau)
        tf.summary.scalar('fast_tau', fast_cell.tau)
        self.merge = tf.summary.merge_all()

    def train(self, sess: tf.Session, x, y, summary: tf.summary.FileWriter = None):
        if summary:
            _, history = sess.run([self.opt_step, self.merge], feed_dict={self.input: x, self.label: y,
                                                                          self.dropout_keep_prob_tensor: self.dropout_keep_prob})
            summary.add_summary(history, sess.run(self.global_step))
        else:
            _ = sess.run(self.opt_step, feed_dict={self.input: x, self.label: y,
                                                   self.dropout_keep_prob_tensor: self.dropout_keep_prob})

    def compute_loss(self, sess: tf.Session, x, y):
        loss = sess.run(self.loss, feed_dict={self.input: x, self.label: y, self.dropout_keep_prob_tensor: 1.0})
        return loss

    def predict(self, sess: tf.Session, x):
        pred = sess.run(self.output, feed_dict={self.input: x, self.dropout_keep_prob_tensor: 1.0})
        return pred

    def compute_accuracy(self, sess: tf.Session, x, y):
        acc = sess.run(self.acc, feed_dict={self.input: x, self.label: y, self.dropout_keep_prob_tensor: 1.0})
        return acc

    def start_or_continue_training(self, sess: tf.Session, checkpoint_dir):
        """
        如果文件夹中存在checkpoint，则加载最新checkpoint
        :param sess:
        :param checkpoint_dir: 保存checkpoint的文件夹
        :return: 最大的dev_acc
        """
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt:
            model_path = ckpt.model_checkpoint_path
            max_dev_acc = float(re.findall(r'\d+\.\d+', model_path)[0])
            # for v in tf.global_variables():
            #     print(v)

            self.saver.restore(sess, model_path)
            print("load checkpoint:", model_path)

        else:
            max_dev_acc = 0
            sess.run(tf.global_variables_initializer())
        return max_dev_acc

    def save(self, sess, acc, save_dir='saved_model'):
        os.makedirs(save_dir, exist_ok=True)
        save_path = os.path.join(save_dir, 'model-acc{:4f}.ckpt'.format(acc))
        self.saver.save(sess, save_path, global_step=self.global_step)
        print('saved model to:', save_path)


if __name__ == '__main__':
    model = CNNLMTGRU(64, [3, 4, 5], 64, [20, 400])
    Xs = np.random.random((512, 20, 400))
    Ys = np.random.randint(0, 2, (512, 1))
    Ys = to_categorical(Ys, 2)

    # with tf.Session() as sess:
    #     sess.run(tf.global_variables_initializer())
    #     for _ in range(1000):
    #         model.train(sess, Xs, Ys)
    #
    #         if _ % 10 == 0:
    #             acc = model.compute_accuracy(sess, Xs, Ys)
    #             loss = model.compute_loss(sess, Xs, Ys)
    #             print('acc:', acc)
    #             print('loss:', loss)
