# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell_impl import RNNCell
import collections

class BNLSTMCell(RNNCell):
    def __init__(self, num_units, num_steps, training,
        forget_bias=1., mode="std", momentum=.8, gamma_scale=.1,
        epsilon=1e-3, r_max=3., d_max=5., renorm_mom=.9):

        self._num_units = num_units
        self._training = training
        self._num_steps = num_steps
        self._mode = mode
        self._forget_bias = forget_bias
        self._momentum = momentum
        self._gamma_scale = gamma_scale
        self._epsilon = epsilon
        self._r_max = r_max
        self._d_max = d_max
        self._renorm_mom = renorm_mom

    @property
    def state_size(self):
        return LSTMStateTuple(1, self._num_units, self._num_units)

    @property
    def output_size(self):
        return self._num_units

    def __call__(self, x, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__):
            step, c, h = state

            x_size = x.get_shape().as_list()[1]
            W_xh = tf.get_variable('W_xh',
                [x_size, 4 * self._num_units],
                initializer=tf.contrib.keras.initializers.glorot_uniform())
            W_hh = tf.get_variable('W_hh',
                [self._num_units, 4 * self._num_units],
                initializer=tf.contrib.keras.initializers.glorot_uniform())
            bias_xh = tf.get_variable('bias_xh', [4 * self._num_units],
                initializer=tf.zeros_initializer())

            xh = tf.matmul(x, W_xh)
            hh = tf.matmul(h, W_hh)

            normed_xh = self._batch_norm(xh, step, 'xh')
            normed_hh = self._batch_norm(hh, step, 'hh')

            hidden = normed_xh + normed_hh + bias_xh

            i, g, f, o = tf.split(hidden, 4, axis=1)
            
            new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(g)
            normed_new_c = self._batch_norm(new_c, step, 'c')
            new_h = tf.tanh(normed_new_c) * tf.sigmoid(o)

            return new_h, LSTMStateTuple(step + 1, new_c, new_h)

    
    def _batch_norm(self, x, step, name_scope):
        if self._mode == "std":
            return x
        elif self._mode == "bn":
            with tf.variable_scope("%s_%s" % (self._mode, name_scope)):
                size = x.get_shape().as_list()[1]

                gamma = tf.get_variable('gamma', [size],
                    initializer=tf.constant_initializer(self._gamma_scale))
                beta = tf.get_variable('beta', [size],
                    initializer=tf.zeros_initializer())

                pop_mean_all = tf.get_variable('pop_mean_all',
                    [self._num_steps, size], initializer=tf.zeros_initializer(), trainable=False)
                pop_var_all = tf.get_variable('pop_var_all',
                    [self._num_steps, size], initializer=tf.ones_initializer(), trainable=False)

                pop_mean = pop_mean_all[step]
                pop_var =  pop_var_all[step]
                batch_mean, batch_var = tf.nn.moments(x, [0])

                train_mean_op = tf.assign(pop_mean,
                    pop_mean * self._momentum + batch_mean * (1 - self._momentum))
                train_var_op = tf.assign(pop_var,
                    pop_var * self._momentum + batch_var * (1 - self._momentum))

            def batch_statistics():
                with tf.control_dependencies([train_mean_op, train_var_op]):
                    return tf.nn.batch_normalization(x, batch_mean, batch_var,
                        beta, gamma, self._epsilon)
            def population_statistics():
                return tf.nn.batch_normalization(x, pop_mean, pop_var,
                    beta, gamma, self._epsilon)
            
            if isinstance(self._training, bool):
                if self._training:
                    return batch_statistics()
                else:
                    return population_statistics()
            else:
                return tf.cond(self._training, batch_statistics, population_statistics)
        
        elif self._mode == "bren":
            with tf.variable_scope("%s_%s" % (self._mode, name_scope)):
                size = x.get_shape().as_list()[1]

                gamma = tf.get_variable('gamma', [size],
                    initializer=tf.constant_initializer(self._gamma_scale))
                beta = tf.get_variable('beta', [size],
                    initializer=tf.zeros_initializer())
                renorm_weight = tf.get_variable('renorm_weight', [],
                    initializer=tf.zeros_initializer(), trainable=False)

                pop_mean_all = tf.get_variable('pop_mean_all',
                    [self._num_steps, size], initializer=tf.zeros_initializer(), trainable=False)
                pop_var_all = tf.get_variable('pop_var_all',
                    [self._num_steps, size], initializer=tf.ones_initializer(), trainable=False)
                renorm_mean_all = tf.get_variable('renorm_mean_all',
                    [self._num_steps, size], initializer=tf.zeros_initializer(), trainable=False)
                renorm_std_all = tf.get_variable('renorm_std_all',
                    [self._num_steps, size], initializer=tf.ones_initializer(), trainable=False)

                pop_mean = pop_mean_all[step]
                pop_var =  pop_var_all[step]
                renorm_mean = renorm_mean_all[step]
                renorm_std =  renorm_std_all[step]
                batch_mean, batch_var = tf.nn.moments(x, [0])

                batch_std = tf.sqrt(batch_var + self._epsilon)
                mixed_renorm_mean = renorm_mean + (1 - renorm_weight) * batch_mean
                mixed_renorm_std = renorm_std + (1 - renorm_weight) * batch_std

                r = batch_std / mixed_renorm_std
                r = tf.stop_gradient(tf.clip_by_value(r, 1 / self._r_max, self._r_max))
                d = (batch_mean - mixed_renorm_mean) / mixed_renorm_std
                d = tf.stop_gradient(tf.clip_by_value(d, -self._d_max, self._d_max))

                renorm_new_mean = renorm_mean * self._renorm_mom + batch_mean * (1 - self._renorm_mom)
                renorm_new_std = renorm_std * self._renorm_mom + batch_std * (1 - self._renorm_mom)
                renorm_new_weight = renorm_weight * self._renorm_mom + 1 - self._renorm_mom

                new_mean = renorm_new_mean / renorm_new_weight
                new_std = renorm_new_std / renorm_new_weight
                new_var = tf.square(new_std) - self._epsilon

                scale = r
                center = d
                if gamma != None:
                    scale *= gamma
                    center *= gamma
                if beta != None:
                    center += beta

                if isinstance(self._training, bool):
                    if self._training:
                        m = batch_mean
                        v = batch_var
                    else:
                        m = pop_mean
                        v = pop_var
                else:
                    m = tf.cond(self._training, lambda: batch_mean, lambda: pop_mean)
                    v = tf.cond(self._training, lambda: batch_var, lambda: pop_var)

                update_pop_mean = tf.assign(pop_mean,
                    pop_mean * self._momentum + new_mean * (1 - self._momentum))
                update_pop_var = tf.assign(pop_var,
                    pop_var * self._momentum + new_var * (1 - self._momentum))
                update_renorm_mean = tf.assign(renorm_mean, renorm_new_mean)
                update_renorm_std = tf.assign(renorm_std, renorm_new_std)
                update_weight = tf.assign(renorm_weight, renorm_new_weight)
                updates = [update_pop_mean, update_pop_var,
                    update_renorm_mean, update_renorm_std,
                    update_weight]

            def batch_statistics():
                with tf.control_dependencies(updates):
                    return tf.nn.batch_normalization(x, m, v, center, scale, self._epsilon)
            def population_statistics():
                return tf.nn.batch_normalization(x, m, v, beta, gamma, self._epsilon)
            if isinstance(self._training, bool):
                if self._training:
                    return batch_statistics()
                else:
                    return population_statistics()
            else:
                return tf.cond(self._training, batch_statistics, population_statistics)

        else:
            raise ValueError("Unknown LSTM mode")

_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("t", "c", "h"))
class LSTMStateTuple(_LSTMStateTuple):
  __slots__ = ()

  @property
  def dtype(self):
    (c, h) = self
    if not c.dtype == h.dtype:
      raise TypeError("Inconsistent internal state: %s vs %s" %
                      (str(c.dtype), str(h.dtype)))
    return c.dtype