# -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 14:38:46 2021

@author: LI

#    tensorboard --logdir=./log --bind_all
"""
#import tensorflow as tf
import tensorflow.compat.v1 as tf   #2.x version
tf.disable_v2_behavior()
import numpy as np
import sys, os
from tensorflow.keras import layers



def variable_summaries(var):
    '''
    record w and b
    '''
    with tf.name_scope('summaries'):
        #mean = tf.reduce_mean(var)
        #tf.summary.scalar('mean', mean)
        #stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        #tf.summary.scalar('stddev', stddev)
        #tf.summary.scalar('max', tf.reduce_max(var))
        #tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram('histogram', var)


class CNNNET():
    """
    VGG model
    
    """
    def __init__(self, x,y, keepPro,learnrate, optimizer,is_bn, is_train, version, weightRate, weight_decay=0):
        '''
        s_bn=False, is_train=True
        '''
        self.X = x
        self.Y = y
        self.C = int(x.get_shape()[-1])
        self.KEEPPRO = keepPro
        self.learnRate = learnrate
        self.CLASSNUM = y.get_shape()[-1]
        self.optimizer = optimizer
        self.version = tf.constant(version, dtype=float, name="version")

        self.weightRate = weightRate    #[1,1,1,1,1]
        #build CNN
        #self.buildCNN_net4()
        activation_method_list = ['relu', 'sigmoid']
        self.activation_method = activation_method_list[1]

        self.global_step = tf.Variable(initial_value=0, name='global_step', trainable=False)

        self.weight_decay = weight_decay   #for weight L2 normalization,    =0 not L2
        self.is_bn = is_bn
        self.is_train = is_train

    def buildCNN_net(self):
        """build model"""
        pass
###########################################################################################
    #卷积层
    def convLayer(self,x, kHeight, kWidth, strideX, strideY,
              featureNum, name, padding = "SAME", is_bn=False, is_training=True):
        """convlutional
        bn is called, and params incloud is_bn and is_training
        """
        channel = int(x.get_shape()[-1])
        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [kHeight, kWidth, channel, featureNum], name=scope)
            b = self.bias_variable(shape = [featureNum], name=scope)
            featureMap = tf.nn.conv2d(x, w, strides = [1, strideY, strideX, 1], padding = padding) + b
            if is_bn:
                X = self.bnLayer(featureMap, name=name + '_bn', is_train=is_training)
            return self.activation_func(featureMap, method=self.activation_method)

    def convBasic(self, X, kHeight, kWidth, strideX, strideY, kernelNum, padding='SAME',
                  activation='relu',
                  batch_normal=True,
                  name='conv'):
        """
        code bn in this function
        eg.
            weight_decay = 1e-4
            shape=[n_size, n_size, X[3], n_filter]
        """
        w, h, c = int(X.get_shape()[1]), int(X.get_shape()[2]), int(X.get_shape()[3])
        with tf.variable_scope(name) as scope:
            # 权重矩阵
            channel = int(X.get_shape()[-1])
            shape = [kHeight, kWidth, channel, kernelNum]
            stddev = np.sqrt(2.0 / (w * h * c))
            weight = self.weight_variable(shape=shape, name=name, stddev=stddev)

            # 偏置向量
            shape = [kernelNum]
            bias = self.bias_variable(shape=shape, name=name, init_val=0.0)

            # hidden states
            conv = tf.nn.conv2d(input=X, filter=weight, strides=[1, strideX, strideY, 1], padding=padding)
            # batch normalization 技术
            if batch_normal and self.is_bn:
                # batch normalization 技术的参数
                self.epsilon = 1e-5
                self.gamma = tf.Variable(initial_value=tf.constant(1.0, shape=[kernelNum]),name='gamma_%s' % (name))
                mean, variance = tf.nn.moments(conv, axes=[0, 1, 2], keep_dims=False)
                hidden = tf.nn.batch_normalization(conv, mean, variance, bias, self.gamma, self.epsilon)
            else:
                hidden = conv + bias
            # activation
            return self.activation_func(hidden, method=activation)

    def conv2d_bn_keras(self, x, kHeight, kWidth, stride, kernelNum, padding='SAME',
                  activation='relu',
                  batch_normal=True,
                  name='conv'):
        with tf.variable_scope(name) as scope:
            x = layers.Conv2D(kernelNum, (kHeight, kWidth), strides=stride, padding=padding, use_bias=False, name=scope)(x)
            if batch_normal:
                x = layers.BatchNormalization(axis=3, scale=False, name='bn')(x)
            return self.activation_func(x, method=activation)
    #池化层
    def maxPoolLayer(self,x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
        """max-pooling"""
        return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1],
                          strides = [1, strideX, strideY, 1], padding = padding, name = name)
    def avgPoolLayer(self,x, strideX, strideY, name, padding = "VALID"):
        """avg-pooling"""
        _, kHeight, kWidth, _2 = x.get_shape()
        return tf.nn.avg_pool(x, ksize = [1, kHeight, kWidth, 1], strides = [1, strideX, strideY, 1], padding = padding, name = name)

    def activation_func(self, x, method='relu'):
        if method == 'sigmoid':
            return tf.nn.sigmoid(x)
        elif method=='relu':     
            return tf.nn.relu(x)
        elif method == 'tanh':
            return tf.nn.tanh(x)
        else:
            print('activation method error !!!')
            sys.exit(0)

    def dropout(self,x, keepPro, name = None):
        """dropout"""
        return tf.nn.dropout(x, keepPro, name)

    def fcLayer(self, x, inputD, outputD, reluFlag, name):
        """fully-connect"""
        # with tf.variable_scope(name) as scope:
        #     if reluFlag:
        #         return tf.layers.dense(x, outputD, activation=tf.nn.relu)
        #     elif not reluFlag:
        #         return tf.layers.dense(x, outputD,activation=None)
        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [inputD, outputD], name=scope)
            b = self.bias_variable(shape=[outputD], name=scope)
            out = tf.matmul(x,w)+b   #矩阵相乘
            if reluFlag:
                return self.activation_func(out, method=self.activation_method)
            else:
                return out

    # 定义初始化权值函数
    def weight_variable(self, shape, name, mean=0.0, stddev=0.1, is_L2=True):
        with tf.variable_scope(name) as scope:
            initial = tf.truncated_normal(shape, mean=mean, stddev=stddev)
            w = tf.Variable(initial, dtype=tf.float32, name="weights")
            if self.weight_decay > 0 and is_L2:
                weight_loss = tf.nn.l2_loss(w) * self.weight_decay
                tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, value=weight_loss)
            else:
                pass
        return w

    # 定义初始化偏置函数
    def bias_variable(self, shape, name, init_val=0.0):
        with tf.variable_scope(name) as scope:
            initial = tf.constant(init_val, shape=shape)
            return tf.Variable(initial, dtype=tf.float32, name="biases")

    def get_weights_varibale(self, layer_name):
        with tf.variable_scope(layer_name, reuse=True):
            variable = tf.get_variable('weights')
        return variable

    ########   bn  with updata ###
    def bnLayer(self, X, name, is_train):
        # X = tf.cond(is_train, lambda: tf.layers.batch_normalization(X,axis=3, name=name+'_T', training=True),
        #                         lambda: tf.layers.batch_normalization(X,axis=3, name=name+'_F', training=False), name=name)
        X = tf.layers.batch_normalization(X,axis=4, name=name, training=is_train)   # axis = channel_dim   [batch_size, height, width, channel]

        # _is_train = tf.cast(is_train, dtype=tf.int32)
        # tf.summary.scalar(name+'_is_train val', _is_train)
        # self.bn_val = X
        return X
        return self.bn_layer_top(X, scope=name, is_training=is_train)
    def bn_layer(self, x, scope, is_training, epsilon=0.001, decay=0.99, reuse=None):
        """
        Performs a batch normalization layer
        Args:
            x: input tensor
            scope: scope name
            is_training: python boolean value
            epsilon: the variance epsilon - a small float number to avoid dividing by 0
            decay: the moving average decay
        Returns:
            The ops of a batch normalization layer
        """
        with tf.variable_scope(scope, reuse=reuse):
            shape = x.get_shape().as_list()
            # gamma: a trainable scale factor
            gamma = tf.get_variable("gamma", shape[-1], initializer=tf.constant_initializer(1.0), trainable=True)
            # beta: a trainable shift value
            beta = tf.get_variable("beta", shape[-1], initializer=tf.constant_initializer(0.0), trainable=True)
            moving_avg = tf.get_variable("moving_avg", shape[-1], initializer=tf.constant_initializer(0.0),
                                         trainable=False)
            moving_var = tf.get_variable("moving_var", shape[-1], initializer=tf.constant_initializer(1.0),
                                         trainable=False)
            if is_training:
                # tf.nn.moments == Calculate the mean and the variance of the tensor x
                avg, var = tf.nn.moments(x, np.arange(len(shape) - 1), keep_dims=True)
                avg = tf.reshape(avg, [avg.shape.as_list()[-1]])
                var = tf.reshape(var, [var.shape.as_list()[-1]])
                # update_moving_avg = moving_averages.assign_moving_average(moving_avg, avg, decay)
                update_moving_avg = tf.assign(moving_avg, moving_avg * decay + avg * (1 - decay))
                # update_moving_var = moving_averages.assign_moving_average(moving_var, var, decay)
                update_moving_var = tf.assign(moving_var, moving_var * decay + var * (1 - decay))
                control_inputs = [update_moving_avg, update_moving_var]
            else:
                avg = moving_avg
                var = moving_var
                control_inputs = []
            with tf.control_dependencies(control_inputs):
                output = tf.nn.batch_normalization(x, avg, var, offset=beta, scale=gamma, variance_epsilon=epsilon)
        return output
    def bn_layer_top(self, x, scope, is_training, epsilon=0.001, decay=0.99):
        """
        Returns a batch normalization layer that automatically switch between train and test phases based on the
        tensor is_training
        Args:
            x: input tensor
            scope: scope name
            is_training: boolean tensor or variable
            epsilon: epsilon parameter - see batch_norm_layer
            decay: epsilon parameter - see batch_norm_layer
        Returns:
            The correct batch normalization layer based on the value of is_training
        """
        # assert isinstance(is_training, (ops.Tensor, variables.Variable)) and is_training.dtype == tf.bool
        return tf.cond(
            is_training,
            lambda: self.bn_layer(x=x, scope=scope, epsilon=epsilon, decay=decay, is_training=True, reuse=None),
            lambda: self.bn_layer(x=x, scope=scope, epsilon=epsilon, decay=decay, is_training=False, reuse=True),
        )
        
    def computeOut(self):
        #输出层
        #计算输出
        self.prediction=tf.nn.softmax(self.netout, name="softmax")
        # 结果存放在一个布尔列表中(argmax函数返回一维张量中最大的值所在的位置)
        correct_prediction = tf.equal(tf.argmax(self.prediction, 1), tf.argmax(self.Y, 1))
        # 求准确率(tf.cast将布尔值转换为float型)
        with tf.name_scope('accurary') as scope:
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='accuracy')  # tf.cast将目标转换为指定类型
            tf.summary.scalar(scope, self.accuracy)
        
        #交叉熵代价函数
        with tf.name_scope('cross_entropy'):
            cost = tf.multiply(tf.nn.softmax_cross_entropy_with_logits(labels=self.Y, logits=self.netout), self.weightRate) #  right
            self.cross_entropy=tf.reduce_mean(cost, name='loss_value')
            tf.summary.scalar('cross_entropy_scalar', self.cross_entropy)

            if self.weight_decay > 0:
                weight_loss_op = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
                weight_loss_op = tf.add_n(weight_loss_op)
                self.cross_entropy += weight_loss_op

        update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_op):
            #使用AdamOptimizer进行优化
            if self.optimizer == "Adam":
                self.train_step=tf.train.AdamOptimizer(self.learnRate).minimize(self.cross_entropy, global_step=self.global_step, name='train_opt')
                #self.train_step = tf.train.AdamOptimizer(0.00001).minimize(cross_entropy)      #0.001
            elif self.optimizer == "SGD":
                self.train_step = tf.train.GradientDescentOptimizer(0.2).minimize(self.cross_entropy, global_step=self.global_step, name='train_opt')
            elif self.optimizer == "Adagrad":
                self.train_step = tf.train.AdagradOptimizer(0.01).minimize(self.cross_entropy, global_step=self.global_step, name='train_opt')
            elif self.optimizer == "Momentum":  # lr = 0.01
                self.train_step = tf.train.MomentumOptimizer(learning_rate=self.learnRate, momentum=0.9).minimize(self.cross_entropy, name='train_opt')
            elif self.optimizer == "MomentumNAG":
                self.train_step = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9, use_nesterov=True).minimize(self.cross_entropy, name='train_opt')
            elif self.optimizer == "RMSProp":
                self.train_step = tf.train.RMSPropOptimizer(0.01).minimize(self.cross_entropy, name='train_opt')
            elif self.optimizer == "Adadelta":
                self.train_step = tf.train.AdadeltaOptimizer(1).minimize(self.cross_entropy, name='train_opt')
            else:
                print("optimizer error:  ", self.optimizer )
                sys.exit()
            


    # def load_weights(self, weight_file, sess):
    #     weights = np.load(weight_file)
    #     keys = sorted(weights.keys())
    #     for i, k in enumerate(keys):
    #         print(i, k, np.shape(weights[k]))
    #         sess.run(self.parameters[i].assign(weights[k]))
