# -*- coding: utf-8 -*-
# @Author: lidongdong
# @time  : 18-12-16 下午10:33
# @file  : operations.py

import tensorflow as tf


class BatchNorm(object):
    """Code modification of http://stackoverflow.com/a/33950177"""
    def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
        with tf.variable_scope(name):
            self.epsilon = epsilon
            self.momentum = momentum

            self.ema = tf.train.ExponentialMovingAverage(decay=self.momentum, name=name+"expontial")
            self.name = name

    def __call__(self, x, train=True, reuse=False):
        shape = x.get_shape().as_list()

        if train:
            with tf.variable_scope(self.name) as scope:
                if reuse:
                    scope.reuse_variables()
                self.beta = tf.get_variable("beta", [shape[-1]],
                                            initializer=tf.constant_initializer(0.))
                self.gamma = tf.get_variable("gamma", [shape[-1]],
                                             initializer=tf.random_normal_initializer(1., 0.02))

                try:
                    batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
                except:
                    batch_mean, batch_var = tf.nn.moments(x, [0, 1], name='moments')

                ema_apply_op = self.ema.apply([batch_mean, batch_var])
                self.ema_mean, self.ema_var = self.ema.average(batch_mean), self.ema.average(batch_var)

                with tf.control_dependencies([ema_apply_op]):
                    mean, var = tf.identity(batch_mean), tf.identity(batch_var)
        else:
            mean, var = self.ema_mean, self.ema_var

        normed = tf.nn.batch_norm_with_global_normalization(
            x, mean, var, self.beta, self.gamma, self.epsilon, scale_after_normalization=True)

        return normed


def deconv2d(input, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="deconv2d", with_w=False):
    """
    逆卷积
    """
    with tf.variable_scope(name):
        w = tf.get_variable("W", [k_h, k_h, output_shape[-1], input.get_shape()[-1]],
                            initializer=tf.glorot_normal_initializer())
        deconv = tf.nn.conv2d_transpose(input, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
        bias = tf.get_variable("b", [output_shape[-1]], initializer=tf.constant_initializer(0.0))
        bias = tf.expand_dims(bias, axis=0)
        bias = tf.expand_dims(bias, axis=0)

        deconv = deconv + bias
        if not with_w:
            return deconv
        return deconv, w, bias


def conv2d(input, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"):
    with tf.variable_scope(name):
        filter = tf.get_variable("w", [k_h, k_w, input.shape[-1], output_dim],
                                 initializer=tf.glorot_normal_initializer())
        conv = tf.nn.conv2d(input, filter, strides=[1, d_h, d_w, 1], padding="SAME")
        bias = tf.get_variable("bias", [1, 1, output_dim], initializer=tf.constant_initializer(0.))
        return conv + bias
