import math
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import batch_norm

# 随机权重
def get_weight(name,shape,stddev=0.02,trainable=True):
   dtype=tf.float32
   weights=tf.get_variable(name,shape,dtype=dtype,trainable=trainable,initializer=tf.random_normal_initializer(stddev=stddev,dtype=dtype))
   return weights

# 常数偏置量
def get_bias(name,shape,bias_start=0.0,trainable=True):
    dtype = tf.float32
    bias=tf.get_variable(name,shape,dtype=dtype,trainable=trainable,initializer=tf.constant_initializer(bias_start,dtype=dtype))
    return bias

# 全连接
def full_connected(name,value,output_shape,with_w=False):
    with tf.variable_scope(name):
        shape=value.get_shape().as_list()
        weights=get_weight('weights_fc',[shape[1],output_shape],0.02)
        biases=get_bias('biases_fc',[output_shape],0.0)

        if with_w:
            return  weights,biases,tf.nn.bias_add(tf.matmul(weights,value),biases)
        else:
            return tf.nn.bias_add(tf.matmul(weights,value),biases)

#卷积
def conv2d(input_data,output_shape,k_h,k_w,d_h,d_w,name='conv2d'):
    with tf.variable_scope(name):
        weights=get_weight('weights_conv',[k_h,k_w,input_data.get_shape()[-1],output_shape],0.02)
        biases=get_bias('biases_conv',[output_shape],0.0)

        conv=tf.nn.conv2d(input_data,weights,strides=(1,d_h,d_w,1),padding='SAME')
        result=tf.reshape(tf.nn.bias_add(conv,biases),conv.get_shape())
        return result

#反卷积
def conv2d_transform(input_data,output_shape,k_h,k_w,d_h,d_w,name='transform_conv2d',with_w=False):
    with tf.variable_scope(name):
        weights=get_weight('weights_deconv',[k_h,k_w,input_data.get_shape()[-1],output_shape],0.02)
        biases=get_bias('biases_deconv',[output_shape],0.0)
        try:
            deconv=tf.nn.conv2d_transpose(input_data,weights,output_shape,strides=[1,d_h,d_w,1])
        except AttributeError:
            deconv=tf.nn.conv2d_transpose(input_data,weights,output_shape,strides=[1,d_h,d_w,1])

        result=tf.reshape(tf.nn.bias_add(deconv,biases),deconv.get_shape())

        if with_w:
           return deconv,weights,biases
        else:
           return deconv

#batch-norm
# 标准化
#decay 滑动平均的decay
#scale 如果为True则结果乘以gamma，否则不乘
#is_train 如果为真值，则表示训练过程，权重会更新，
# 否则表示测试过程，权重不动
def batch_norm(name,value,is_train=True):
    with tf.variable_scope(name) as scope:
        if is_train:
            return batch_norm(value,decay=0.9,scale=True,epsilon=1e-5,is_training=is_train,scope=scope)
        else:
            return batch_norm(value,decay=0.9,scale=True,epsilon=1e-5,is_training=is_train,scope=scope)

def relu(value):
    return tf.nn.relu(value)

def lrelu(value):
    return tf.nn.leaky_relu(value)

def conv_out_size_same(size,stride):
    return int(math.ceil(float(size)/float(stride)))

def element_wise_sum(value1,value2):
    return np.add(value1,value2)

def pixel_shuffer(input_data):
    return tf.nn.sufficient_statistics(input_data,2,keep_dims=True,name='pixel_shuffer x2')

