

import numpy as np
import sys, os, time

#import tensorflow as tf    #1.x
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
print("tensorflow:  ", tf.__version__)
from tensorflow.python.training import moving_averages


# MOVING_AVERAGE_DECAY = 0.9997
# BN_DECAY = MOVING_AVERAGE_DECAY         #0.9997
# BN_EPSILON = 0.001
# CONV_WEIGHT_DECAY = 0.00004
# CONV_WEIGHT_STDDEV = 0.1
# FC_WEIGHT_DECAY = 0.00004
# FC_WEIGHT_STDDEV = 0.01
# UPDATE_OPS_COLLECTION = 'resnet_update_ops'  # must be grouped with training op

# RESNET_VARIABLES = 'resnet_variables'

class RESIDUAL_NET():
    """VGG model"""
    def __init__(self, x,y, learnrate,optimizer, version, is_train, is_bn):
        self.X = x
        self.Y = y
        # self.C = int(x.get_shape()[-1])

        self.learnRate = learnrate
        _, self.CLASSNUM = y.get_shape()
        self.CLASSNUM = int(self.CLASSNUM)
        self.optimizer = optimizer
        self.version = tf.constant(version, dtype=float, name="version")

        self.Is_bn = is_bn
        self.Is_train = is_train
    def ResNet18_32x32(self):     
        n, h, w, c = self.X.get_shape()
        if h != 32 or w !=32:
            print("X input dim error: %d, %d, %d" % (h, w, c))
            sys.exit(0)

        #stage 1
        #<<< 32
        stage = 'stage1'
        with tf.name_scope(stage) as scope:
            X = self.residual_conv(self.X, w_size=[3,3], w_outchannel=16, stride=1, name=stage+"_1",
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn)
            
            X = self.maxPoolLayer(X, 3,3,2,2,name=stage+'_max')
        #>>> 16
       
        #<<<16
        #stage 2
        stage = 'stage2'
        with tf.name_scope(stage) as scope:
            filters = [32,32,64]
            X = self.convolutional_block2(X, 3, filters=filters, stage=stage, block="a", stride=2)   #X: 56
            X = self.identity_block2(X, 3, filters=filters,stage=stage, block="b")
            #X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
        #>>>28

        #28
        #stage 3
        stage = 'stage3'
        with tf.name_scope(stage) as scope:
            filters = [64,64,128]
            X = self.convolutional_block2(X, 3, filters=filters, stage=stage, block="a", stride=2)
            X = self.identity_block2(X, 3, filters=filters,stage=stage, block="b")
            #X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            # X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
            # X = self.identity_block(X, 3, filters=filters,stage=stage, block="e")
        #>>>14

        #<<<14
        #stage 4
        stage = 'stage4'
        with tf.name_scope(stage) as scope:
            filters = [128, 128, 256]
            X = self.convolutional_block2(X, 3, filters=filters, stage=stage, block="a", stride=2) 
            X = self.identity_block2(X, 3, filters=filters,stage=stage, block="b")
            #X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
           # X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
        #>>>7x7

        #stage final
        #<<<7
        stage = 'stage_final'
        with tf.name_scope(stage) as scope:
            X = self.avgPoolLayer(X, 7,7,1,1,name=stage,padding='VALID')    #1x1x256
            
            flat_len = X[0].shape[0]*X[1].shape[0]*X[0].shape[2]
            X = tf.reshape(X, [-1, flat_len])
        
            #全连接层
            X = self.fcLayer(X, int(flat_len), self.CLASSNUM, False, name='fc')
            self.prediction = tf.nn.softmax(X, name='softmax')
            #tf.summary.histogram('softmax', self.prediction)
            
        self.optimizerLayer()

    def ResNet18_112x112(self):     
        X = self.X
        n, h, w, c = X.get_shape()
        if h != 112 or w !=112:
            print("X input dim error: %d, %d, %d" % (h, w, c))

        #stage 1
        #<<< 112
        stage = 'stage1'
        with tf.name_scope(stage) as scope:
            X = self.residual_conv(X, w_size=[3,3], w_outchannel=32, stride=1, name=stage+"_1",
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn)
            
            X = self.maxPoolLayer(X, 3,3,2,2,name=stage+'_max')
        #>>> 56
       
        #<<<56
        #stage 2
        stage = 'stage2'
        with tf.name_scope(stage) as scope:
            filters = [32,32,64]
            X = self.convolutional_block2(X, 3, filters=filters, stage=stage, block="a", stride=2)   #X: 56
            X = self.identity_block2(X, 3, filters=filters,stage=stage, block="b")
            #X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
        #>>>28

        #28
        #stage 3
        stage = 'stage3'
        with tf.name_scope(stage) as scope:
            filters = [64,64,128]
            X = self.convolutional_block2(X, 3, filters=filters, stage=stage, block="a", stride=2)
            X = self.identity_block2(X, 3, filters=filters,stage=stage, block="b")
            #X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            # X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
            # X = self.identity_block(X, 3, filters=filters,stage=stage, block="e")
        #>>>14

        #<<<14
        #stage 4
        stage = 'stage4'
        with tf.name_scope(stage) as scope:
            filters = [128, 128, 256]
            X = self.convolutional_block2(X, 3, filters=filters, stage=stage, block="a", stride=2) 
            X = self.identity_block2(X, 3, filters=filters,stage=stage, block="b")
            #X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
           # X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
        #>>>7x7

        #stage final
        #<<<7
        stage = 'stage_final'
        with tf.name_scope(stage) as scope:
            X = self.avgPoolLayer(X, 7,7,1,1,name=stage,padding='VALID')    #1x1x256
            
            flat_len = X[0].shape[0]*X[1].shape[0]*X[0].shape[2]
            X = tf.reshape(X, [-1, flat_len])
        
            #全连接层
            X = self.fcLayer(X, int(flat_len), self.CLASSNUM, False, name='fc')
            self.prediction = tf.nn.softmax(X, name='softmax')
            #tf.summary.histogram('softmax', self.prediction)
            
        self.optimizerLayer()

    def ResNet50(self):     
        #self.X   ,  [-1, 28,28.1]
        X = self.X
        n, h, w, c = X.get_shape()
        if h != 28 or w !=28:
            print("X input dim error: %d, %d, %d" % (h, w, c))

        #stage 1
        #<<< 28x28x1
        stage = 'stage1'
        with tf.name_scope(stage) as scope:
            X = self.residual_conv(X, w_size=[2,2], w_outchannel=64, stride=1, name=stage,
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn)
            # img = X[0,:, :, 0:1]
            # img = tf.expand_dims(img, 0)
            # self.outimg = tf.summary.image('outImg', img)
            X = self.maxPoolLayer(X, 3,3,2,2,name=stage+'_max')
        #>>> 14x14x64 
       
        #<<<14x14x64 
        #stage 2
        stage = 'stage2'
        with tf.name_scope(stage) as scope:
            filters = [64,64,128]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=1)   #X: 14x14x256
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
        #>>>14x14x256

        #14x14x256
        #stage 3
        stage = 'stage3'
        with tf.name_scope(stage) as scope:
            filters = [128,128,256]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=2)
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="e")
        #>>>7x7x256

        #<<<7x7x256
        #stage 4
        stage = 'stage4'
        with tf.name_scope(stage) as scope:
            filters = [256, 256, 64]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=1) 
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
        #>>>7x7x64

        #stage final
        #<<<7x7x256
        stage = 'stage_final'
        with tf.name_scope(stage) as scope:
            X = self.avgPoolLayer(X, 2,2,2,2,name=stage)
            flat_len = X[0].shape[0]*X[1].shape[0]*X[0].shape[2]

            X = tf.reshape(X, [-1, flat_len])
        
            #全连接层
            X = self.fcLayer(X, int(flat_len), self.CLASSNUM, False, name='fc')
            self.prediction = tf.nn.softmax(X, name='softmax')
            #tf.summary.histogram('softmax', self.prediction)
            
        self.optimizerLayer()
          
    def ResNet50_28x28(self):     
        #self.X   ,  [-1, 28,28.1]
        X = self.X
        n, h, w, c = X.get_shape()
        if h != 28 or w !=28:
            print("X input dim error: %d, %d, %d" % (h, w, c))

        #stage 1
        #<<< 28x28x1
        stage = 'stage1'
        with tf.name_scope(stage) as scope:
            X = self.residual_conv(X, w_size=[2,2], w_outchannel=64, stride=1, name=stage,
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn)

            # a,b,c,d = X.get_shape()
            # for i in range(d):
            #     img = X[0,:, :, i:i+1]
            #     img = tf.expand_dims(img, 0)
            #     self.outimg = tf.summary.image('outImg', img)
            
            X = self.maxPoolLayer(X, 3,3,2,2,name=stage+'_max')
        #>>> 14x14x64 
       
        #<<<14x14x64 
        #stage 2
        stage = 'stage2'
        with tf.name_scope(stage) as scope:
            filters = [64,64,128]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=1)   #X: 14x14x256
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
        #>>>14x14x256

        #14x14x256
        #stage 3
        stage = 'stage3'
        with tf.name_scope(stage) as scope:
            filters = [128,128,256]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=2)
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            # X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
            # X = self.identity_block(X, 3, filters=filters,stage=stage, block="e")
        #>>>7x7x256

        #<<<7x7x256
        #stage 4
        stage = 'stage4'
        with tf.name_scope(stage) as scope:
            filters = [256, 256, 64]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=1) 
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
        #>>>7x7x64

        #stage final
        #<<<7x7x256
        stage = 'stage_final'
        with tf.name_scope(stage) as scope:
            X = self.avgPoolLayer(X, 2,2,2,2,name=stage)
            flat_len = X[0].shape[0]*X[1].shape[0]*X[0].shape[2]

            X = tf.reshape(X, [-1, flat_len])
        
            #全连接层
            X = self.fcLayer(X, int(flat_len), self.CLASSNUM, False, name='fc')
            self.prediction = tf.nn.softmax(X, name='softmax')
            #tf.summary.histogram('softmax', self.prediction)
            
        self.optimizerLayer()
        
    

    def optimizerLayer(self):
        #交叉熵代价函数
        with tf.name_scope('cross_entropy'):
            self.cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.Y,logits=self.prediction), name='loss')
            tf.summary.histogram('cross_entropy_histogram', self.cross_entropy)
            tf.summary.scalar('cross_entropy_scalar', self.cross_entropy)

        #结果存放在一个布尔列表中(argmax函数返回一维张量中最大的值所在的位置)
        self.correct_prediction=tf.equal(tf.argmax(self.prediction,1),tf.argmax(self.Y,1), name='correct_bool')
        #求准确率(tf.cast将布尔值转换为float型)
        with tf.name_scope('accurary_scalar') as scope:
            self.accuracy=tf.reduce_mean(tf.cast(self.correct_prediction,tf.float32), name='accuracy')   #tf.cast将目标转换为指定类型
            tf.summary.scalar(scope, self.accuracy)
            
        update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_op):
            #使用AdamOptimizer进行优化
            if self.optimizer == "Adam":
                self.train_step=tf.train.AdamOptimizer(self.learnRate).minimize(self.cross_entropy, name='train_opt')
                #self.train_step = tf.train.AdamOptimizer(0.00001).minimize(cross_entropy)      #0.001
            elif self.optimizer == "SGD":
                self.train_step = tf.train.GradientDescentOptimizer(self.learnRate).minimize(self.cross_entropy, global_step=self.global_step, name='train_opt')
            elif self.optimizer == "Adagrad":
                self.train_step = tf.train.AdagradOptimizer(0.01).minimize(self.cross_entropy, global_step=self.global_step, name='train_opt')
            elif self.optimizer == "Momentum":
                self.train_step = tf.train.MomentumOptimizer(learning_rate=self.learnRate, momentum=0.9).minimize(self.cross_entropy, name='train_opt')
            elif self.optimizer == "MomentumNAG":
                self.train_step = tf.train.MomentumOptimizer(learning_rate=self.learnRate, momentum=0.9, use_nesterov=True).minimize(self.cross_entropy, name='train_opt')
            elif self.optimizer == "RMSProp":
                self.train_step = tf.train.RMSPropOptimizer(0.01).minimize(self.cross_entropy, name='train_opt')
            elif self.optimizer == "Adadelta":
                self.train_step = tf.train.AdadeltaOptimizer(1).minimize(self.cross_entropy, name='train_opt')
            else:
                print("optimizer error:  ", self.optimizer )
                sys.exit()
            

    def bnLayer(self, X, name, is_train):
        # X = tf.cond(is_train, lambda: tf.layers.batch_normalization(X,axis=3, name=name+'_T', training=True), 
        #                         lambda: tf.layers.batch_normalization(X,axis=3, name=name+'_F', training=False), name=name)
        X = tf.layers.batch_normalization(X,axis=3, name=name, training=is_train)

        # _is_train = tf.cast(is_train, dtype=tf.int32)
        # tf.summary.scalar(name+'_is_train val', _is_train)

        # self.bn_val = X
        return X
        return self.bn_layer_top(X, scope=name, is_training=is_train)


    #定义初始化权值函数
    def weight_variable(self, shape, name, reuse=None):
        with tf.variable_scope(name, reuse=reuse) as scope:
            initial=tf.truncated_normal(shape=shape, mean=0, stddev=1)
            w = tf.Variable(initial, dtype=tf.float32, name="weights")
            #variable_summaries(w)
        return w
    #定义初始化偏置函数
    def bias_variable(self,shape, name, reuse=None):
        with tf.variable_scope(name, reuse=reuse) as scope:
            initial=tf.constant(0.1,shape=shape)
            b = tf.Variable(initial, dtype=tf.float32, name="biases")
            #variable_summaries(b)
        return b
    #卷积层
    def convLayer(self,x, kHeight, kWidth, strideX, strideY,
                featureNum, name, padding = "SAME"):
        """convlutional"""
        channel = int(x.get_shape()[-1])
        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [kHeight, kWidth, channel, featureNum], name=scope)
            b = self.bias_variable(shape = [featureNum], name=scope)
            featureMap = tf.nn.conv2d(x, w, strides = [1, strideY, strideX, 1], padding = padding) + b
            tf.summary.histogram('featureMap', featureMap)
            return tf.nn.relu(featureMap)
    #池化层
    def maxPoolLayer(self,x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
        """max-pooling"""
        return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1], strides = [1, strideX, strideY, 1], padding = padding, name = name)
    def avgPoolLayer(self,x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
        """avg-pooling"""
        return tf.nn.avg_pool(x, ksize = [1, kHeight, kWidth, 1], strides = [1, strideX, strideY, 1], padding = padding, name = name)

    def dropout(self,x, keepPro, name = None):
        """dropout"""
        return tf.nn.dropout(x, keepPro, name)

    def fcLayer(self,x, inputD, outputD, reluFlag, name):
        """fully-connect"""
        # with tf.variable_scope(name) as scope:
        #     if reluFlag:
        #         return tf.layers.dense(x, outputD, activation=tf.nn.relu)
        #     elif not reluFlag:
        #         return tf.layers.dense(x, outputD,activation=None)
            
        with tf.variable_scope(name) as scope:
 
            w = self.weight_variable(shape = [inputD, outputD], name=scope)
            b = self.bias_variable(shape=[outputD], name=scope)
            out = tf.matmul(x,w)+b   #矩阵相乘
            if reluFlag:
                return tf.nn.relu(out)
            else:
                return out
        


    def identity_block(self,X, f, filters, stage, block):
        #defining name basis
        name_base = stage + '_' + block + '_branch'
        
        #Retrieve Filters
        F1, F2, F3 = filters

        #save the input value, you'll need this later to add back to the main path
        X_shorCut = X
        #First componet of main path
        X = self.residual_conv(X, w_size=[1,1], w_outchannel=F1,stride=1, name=name_base+'2a',
                                 padding='VALID',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn) 

        #Second componet of main path
        X = self.residual_conv(X, w_size=[f,f], w_outchannel=F2,stride=1,  name=name_base+'2b',
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn) 

        #third componet of ma, input_shape=(64, 64, 3), classes=6
        X = self.residual_conv(X, w_size=[1,1], w_outchannel=F3,stride=1,  name=name_base+'2c',
                                 padding='VALID',is_relu=False, in_training=self.Is_train, is_bn=self.Is_bn) 
        #shortcut part
        X = tf.add(X, X_shorCut)
        X = tf.nn.relu(X)
        return X        #[-, H, W, C]
    def identity_block2(self,X, f, filters, stage, block):
        #defining name basis
        name_base = stage + '_' + block + '_branch'
        
        #Retrieve Filters
        F1, F2, F3 = filters

        #save the input value, you'll need this later to add back to the main path
        X_shorCut = X
        #First componet of main path
        X = self.residual_conv(X, w_size=[f,f], w_outchannel=F3,stride=1, name=name_base+'2a',
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn) 

        #Second componet of main path
        X = self.residual_conv(X, w_size=[f,f], w_outchannel=F3,stride=1,  name=name_base+'2b',
                                 padding='SAME',is_relu=False, in_training=self.Is_train, is_bn=self.Is_bn) 

        #shortcut part
        X = tf.add(X, X_shorCut)
        X = tf.nn.relu(X)
        return X        #[-, H, W, C]
      
    def convolutional_block(self, X, f, filters, stage, block, stride=2):
        #defining name basis
        name_base = stage + '_' + block + '_branch'

        #Retrieve Filters
        F1, F2, F3 = filters

        #save the input value, you'll need this later to add back to the main path
        X_shorCut = X

        #First componet of main path
        X = self.residual_conv(X, w_size=[1,1], w_outchannel=F1,stride=stride, name=name_base+'2a',
                                 padding='VALID',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn) 

        #Second componet of main path
        X = self.residual_conv(X, w_size=[f,f], w_outchannel=F2,stride=1,  name=name_base+'2b',
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn) 

        #third componet of ma, input_shape=(64, 64, 3), classes=6
        X = self.residual_conv(X, w_size=[1,1], w_outchannel=F3, stride=1, name=name_base+'2c',
                                padding='VALID', is_relu=False, in_training=self.Is_train, is_bn=self.Is_bn) 
        
        #SHORTCUT PATH 
        X_shorCut = self.residual_conv(X_shorCut, w_size=[1,1], w_outchannel=F3, stride=stride, name=name_base+'1',
                                 padding='VALID',is_relu=False, in_training=self.Is_train, is_bn=self.Is_bn) 

        #
        X = tf.add(X, X_shorCut)
        X = tf.nn.relu(X)
        return X

    def convolutional_block2(self, X, f, filters, stage, block, stride=2):
        """
        start and end channel diffirent
        """
        #defining name basis
        name_base = stage + '_' + block + '_branch'

        #Retrieve Filters
        F1, F2, F3 = filters

        #save the input value, you'll need this later to add back to the main path
        X_shorCut = X

        #First componet of main path
        X = self.residual_conv(X, w_size=[f,f], w_outchannel=F3,stride=stride, name=name_base+'2a',
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn) 

        #Second componet of main path
        X = self.residual_conv(X, w_size=[f,f], w_outchannel=F3,stride=1,  name=name_base+'2b',
                                 padding='SAME',is_relu=False, in_training=self.Is_train, is_bn=self.Is_bn) 

       
        #SHORTCUT PATH 
        X_shorCut = self.residual_conv(X_shorCut, w_size=[1,1], w_outchannel=F3, stride=stride, name=name_base+'1',
                                 padding='VALID',is_relu=False, in_training=self.Is_train, is_bn=self.Is_bn) 

        #
        X = tf.add(X, X_shorCut)
        X = tf.nn.relu(X)
        return X

    def residual_conv(self, X, w_size, w_outchannel, stride, name, padding, is_relu, is_bn, in_training, reuse=None):
        w_inchannel = int(X.get_shape()[-1])

        #with tf.name_scope(name) as scope:
        w = self.weight_variable(shape = [w_size[0], w_size[1], w_inchannel, w_outchannel], name=name+'_conv', reuse=reuse)
        b = self.bias_variable(shape = [w_outchannel], name=name+'_b', reuse=reuse)
        X = tf.nn.conv2d(X, w, strides = [1, stride, stride, 1], padding = padding) + b

        if is_bn:
            X = self.bnLayer(X, name=name+'_bn', is_train=in_training)
        if is_relu:
            X = tf.nn.relu(X)
        return X

#######################################33


########   bn  with updata ###
    def bn_layer(self, x, scope, is_training, epsilon=0.001, decay=0.99, reuse=None):
        """
        Performs a batch normalization layer
        Args:
            x: input tensor
            scope: scope name
            is_training: python boolean value
            epsilon: the variance epsilon - a small float number to avoid dividing by 0
            decay: the moving average decay
        Returns:
            The ops of a batch normalization layer
        """
        with tf.variable_scope(scope, reuse=reuse):
            shape = x.get_shape().as_list()
            # gamma: a trainable scale factor
            gamma = tf.get_variable("gamma", shape[-1], initializer=tf.constant_initializer(1.0), trainable=True)
            # beta: a trainable shift value
            beta = tf.get_variable("beta", shape[-1], initializer=tf.constant_initializer(0.0), trainable=True)
            moving_avg = tf.get_variable("moving_avg", shape[-1], initializer=tf.constant_initializer(0.0), trainable=False)
            moving_var = tf.get_variable("moving_var", shape[-1], initializer=tf.constant_initializer(1.0), trainable=False)
            if is_training:
                # tf.nn.moments == Calculate the mean and the variance of the tensor x
                avg, var = tf.nn.moments(x, np.arange(len(shape)-1), keep_dims=True)
                avg=tf.reshape(avg, [avg.shape.as_list()[-1]])
                var=tf.reshape(var, [var.shape.as_list()[-1]])
                #update_moving_avg = moving_averages.assign_moving_average(moving_avg, avg, decay)
                update_moving_avg=tf.assign(moving_avg, moving_avg*decay+avg*(1-decay))
                #update_moving_var = moving_averages.assign_moving_average(moving_var, var, decay)
                update_moving_var=tf.assign(moving_var, moving_var*decay+var*(1-decay))
                control_inputs = [update_moving_avg, update_moving_var]
            else:
                avg = moving_avg
                var = moving_var
                control_inputs = []
            with tf.control_dependencies(control_inputs):
                output = tf.nn.batch_normalization(x, avg, var, offset=beta, scale=gamma, variance_epsilon=epsilon)
    
        return output
    def bn_layer_top(self, x, scope, is_training, epsilon=0.001, decay=0.99):
        """
        Returns a batch normalization layer that automatically switch between train and test phases based on the 
        tensor is_training
        Args:
            x: input tensor
            scope: scope name
            is_training: boolean tensor or variable
            epsilon: epsilon parameter - see batch_norm_layer
            decay: epsilon parameter - see batch_norm_layer
        Returns:
            The correct batch normalization layer based on the value of is_training
        """
        #assert isinstance(is_training, (ops.Tensor, variables.Variable)) and is_training.dtype == tf.bool
    
        return tf.cond(
            is_training,
            lambda: self.bn_layer(x=x, scope=scope, epsilon=epsilon, decay=decay, is_training=True, reuse=None),
            lambda: self.bn_layer(x=x, scope=scope, epsilon=epsilon, decay=decay, is_training=False, reuse=True),
        )










################################################################################3
'''
import numpy as np
import sys, os, time

#import tensorflow as tf    #1.x
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
print("tensorflow:  ", tf.__version__)
from tensorflow.python.training import moving_averages

import imageReady



def lossAndAcc(labels, logits):
    #交叉熵代价函数
    with tf.name_scope('cross_entropy'):
        cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=logits), name='loss')
        tf.summary.histogram('cross_entropy_histogram', cross_entropy)
        tf.summary.scalar('cross_entropy_scalar', cross_entropy)


    #求准确率(tf.cast将布尔值转换为float型)
    with tf.name_scope('accurary_scalar') as scope:
        #结果存放在一个布尔列表中(argmax函数返回一维张量中最大的值所在的位置)
        correct_prediction=tf.equal(tf.argmax(logits,1),tf.argmax(labels,1), name='correct_bool')
        accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32), name='accuracy')   #tf.cast将目标转换为指定类型
        tf.summary.scalar(scope, accuracy)

    return cross_entropy, accuracy

# def optimizerLayer(optimizer, loss, lr):
#     update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
#     with tf.control_dependencies(update_op):
#         #使用AdamOptimizer进行优化
#         if optimizer == "Adam":
#             train_step=tf.train.AdamOptimizer(lr).minimize(loss)
#             #self.train_step = tf.train.AdamOptimizer(0.00001).minimize(cross_entropy)      #0.001
#         elif optimizer == "SGD":
#             train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss, global_step=global_step)
#         elif optimizer == "Adagrad":
#             train_step = tf.train.AdagradOptimizer(0.01).minimize(loss, name='train_step')
#         elif optimizer == "Momentum":
#             train_step = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9).minimize(loss) #0.01
#         elif optimizer == "MomentumNAG":
#             train_step = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9, use_nesterov=True).minimize(loss)
#         elif optimizer == "RMSProp":
#             train_step = tf.train.RMSPropOptimizer(0.01).minimize(loss)
#         elif optimizer == "Adadelta":
#             train_step = tf.train.AdadeltaOptimizer(1).minimize(loss)
#         else:
#             print("optimizer error:  ", optimizer )
#             sys.exit()

#         return train_step

class RESIDUAL_NET():
    """VGG model"""
    def __init__(self, x,y, version, is_train, is_bn, is_reuse=None):
        self.X = x
        self.Y = y
        # self.C = int(x.get_shape()[-1])

        _, self.CLASSNUM = y.get_shape()
        self.version = tf.constant(version, dtype=float, name="version")

        self.Is_bn = is_bn
        self.Is_train = is_train
        self.Is_reuse = is_reuse

        global_step = tf.Variable(0, trainable=False)

    def ResNet50(self):
        #self.X   ,  [-1, 28,28.1]
        X = self.X
        n, h, w, c = X.get_shape()
        if h != 28 or w !=28:
            print("X input dim error: %d, %d, %d" % (h, w, c))

        #stage 1
        #<<< 28x28x1
        stage = 'stage1'
        with tf.name_scope(stage) as scope:
            X = self.residual_conv(X, w_size=[2,2], w_outchannel=128, stride=1, name=stage,
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn, reuse=self.Is_reuse)
            # img = X[0,:, :, 0:1]
            # img = tf.expand_dims(img, 0)
            # self.outimg = tf.summary.image('outImg', img)
            X = self.maxPoolLayer(X, 3,3,2,2,name=stage+'_max')
        #>>> 14x14x64

        #<<<14x14x64
        #stage 2
        stage = 'stage2'
        with tf.name_scope(stage) as scope:
            filters = [128,128,256]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=1)   #X: 14x14x256
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
        #>>>14x14x256

        #14x14x256
        #stage 3
        stage = 'stage3'
        with tf.name_scope(stage) as scope:
            filters = [256,256,512]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=2)
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="e")
        #>>>7x7x256

        #<<<7x7x256
        #stage 4
        stage = 'stage4'
        with tf.name_scope(stage) as scope:
            filters = [512, 512, 1024]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=1)
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
        #>>>7x7x64

        #stage final
        #<<<7x7x256
        stage = 'stage_final'
        with tf.name_scope(stage) as scope:
            X = self.avgPoolLayer(X, 2,2,2,2,name=stage)
            flat_len = X[0].shape[0]*X[1].shape[0]*X[0].shape[2]

            X = tf.reshape(X, [-1, flat_len])

            #全连接层
            X = self.fcLayer(X, int(flat_len), self.CLASSNUM, False, name='fc')
            self.prediction = tf.nn.softmax(X, name='softmax')
            #tf.summary.histogram('softmax', self.prediction)
        return self.prediction

    def ResNet50_28x28(self):
        #self.X   ,  [-1, 28,28.1]
        X = self.X
        n, h, w, c = X.get_shape()
        if h != 28 or w !=28:
            print("X input dim error: %d, %d, %d" % (h, w, c))

        #stage 1
        #<<< 28x28x1
        stage = 'stage1'
        with tf.name_scope(stage) as scope:
            X = self.residual_conv(X, w_size=[2,2], w_outchannel=64, stride=1, name=stage,
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn)

            # a,b,c,d = X.get_shape()
            # for i in range(d):
            #     img = X[0,:, :, i:i+1]
            #     img = tf.expand_dims(img, 0)
            #     self.outimg = tf.summary.image('outImg', img)

            X = self.maxPoolLayer(X, 3,3,2,2,name=stage+'_max')
        #>>> 14x14x64

        #<<<14x14x64
        #stage 2
        stage = 'stage2'
        with tf.name_scope(stage) as scope:
            filters = [64,64,128]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=1)   #X: 14x14x256
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
        #>>>14x14x256

        #14x14x256
        #stage 3
        stage = 'stage3'
        with tf.name_scope(stage) as scope:
            filters = [128,128,256]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=2)
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="e")
        #>>>7x7x256

        #<<<7x7x256
        #stage 4
        stage = 'stage4'
        with tf.name_scope(stage) as scope:
            filters = [256, 256, 512]
            X = self.convolutional_block(X, 3, filters=filters, stage=stage, block="a", stride=1)
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="b")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="c")
            X = self.identity_block(X, 3, filters=filters,stage=stage, block="d")
        #>>>7x7x64

        #stage final
        #<<<7x7x256
        stage = 'stage_final'
        with tf.name_scope(stage) as scope:
            X = self.avgPoolLayer(X, 2,2,2,2,name=stage)
            flat_len = X[0].shape[0]*X[1].shape[0]*X[0].shape[2]

            X = tf.reshape(X, [-1, flat_len])

            #全连接层
            X = self.fcLayer(X, int(flat_len), self.CLASSNUM, False, name='fc')
            self.prediction = tf.nn.softmax(X, name='softmax')
            #tf.summary.histogram('softmax', self.prediction)

        return self.prediction

    def bnLayer(self, X, name, is_train):
        # X = tf.cond(is_train, lambda: tf.layers.batch_normalization(X,axis=3, name=name+'_T', training=True),
        #                         lambda: tf.layers.batch_normalization(X,axis=3, name=name+'_F', training=False), name=name)

        # X = tf.layers.batch_normalization(X,axis=3, name=name, training=is_train)
        # _is_train = tf.cast(is_train, dtype=tf.int32)
        # tf.summary.scalar(name+'_is_train val', _is_train)
        # self.bn_val = X
        # return X
        #
        return self.bn_layer_top(X, scope=name, is_training=is_train)


    #定义初始化权值函数
    def weight_variable(self, shape, name, reuse=None):
        with tf.variable_scope(name, reuse=reuse) as scope:
            initial=tf.truncated_normal(shape=shape,stddev=0.1)
            w = tf.Variable(initial, dtype=tf.float32, name="weights")
            #variable_summaries(w)
        return w
    #定义初始化偏置函数
    def bias_variable(self,shape, name, reuse=None):
        with tf.variable_scope(name, reuse=reuse) as scope:
            initial=tf.constant(0.1,shape=shape)
            b = tf.Variable(initial, dtype=tf.float32, name="biases")
            #variable_summaries(b)
        return b
    #卷积层
    # def convLayer(self,x, kHeight, kWidth, strideX, strideY,
    #             featureNum, name, padding = "SAME"):
    #     """convlutional"""
    #     channel = int(x.get_shape()[-1])
    #     with tf.variable_scope(name) as scope:
    #         w = self.weight_variable(shape = [kHeight, kWidth, channel, featureNum], name=scope)
    #         b = self.bias_variable(shape = [featureNum], name=scope)
    #         featureMap = tf.nn.conv2d(x, w, strides = [1, strideY, strideX, 1], padding = padding) + b
    #         tf.summary.histogram('featureMap', featureMap)
    #         return tf.nn.relu(featureMap)
    #池化层
    def maxPoolLayer(self,x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
        """max-pooling"""
        return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1], strides = [1, strideX, strideY, 1], padding = padding, name = name)
    def avgPoolLayer(self,x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
        """avg-pooling"""
        return tf.nn.avg_pool(x, ksize = [1, kHeight, kWidth, 1], strides = [1, strideX, strideY, 1], padding = padding, name = name)

    def dropout(self,x, keepPro, name = None):
        """dropout"""
        return tf.nn.dropout(x, keepPro, name)

    def fcLayer(self,x, inputD, outputD, reluFlag, name, reuse=None):
        """fully-connect"""
        # with tf.variable_scope(name) as scope:
        #     if reluFlag:
        #         return tf.layers.dense(x, outputD, activation=tf.nn.relu)
        #     elif not reluFlag:
        #         return tf.layers.dense(x, outputD,activation=None)

        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [inputD, outputD], name=scope, reuse=reuse)
            b = self.bias_variable(shape=[outputD], name=scope, reuse=reuse)
            out = tf.matmul(x,w)+b   #矩阵相乘
            if reluFlag:
                return tf.nn.relu(out)
            else:
                return out


    def identity_block(self,X, f, filters, stage, block):
        #defining name basis
        name_base = stage + '_' + block + '_branch'

        #Retrieve Filters
        F1, F2, F3 = filters

        #save the input value, you'll need this later to add back to the main path
        X_shorCut = X
        #First componet of main path
        X = self.residual_conv(X, w_size=[1,1], w_outchannel=F1,stride=1, name=name_base+'2a',
                                 padding='VALID',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn, reuse=self.Is_reuse)

        #Second componet of main path
        X = self.residual_conv(X, w_size=[f,f], w_outchannel=F2,stride=1,  name=name_base+'2b',
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn, reuse=self.Is_reuse)

        #third componet of ma, input_shape=(64, 64, 3), classes=6
        X = self.residual_conv(X, w_size=[1,1], w_outchannel=F3,stride=1,  name=name_base+'2c',
                                 padding='VALID',is_relu=False, in_training=self.Is_train, is_bn=self.Is_bn, reuse=self.Is_reuse)
        #shortcut part
        X = tf.add(X, X_shorCut)
        X = tf.nn.relu(X)
        return X        #[-, H, W, C]


    def convolutional_block(self, X, f, filters, stage, block, stride=2):
        #defining name basis
        name_base = stage + '_' + block + '_branch'

        #Retrieve Filters
        F1, F2, F3 = filters

        #save the input value, you'll need this later to add back to the main path
        X_shorCut = X

        #First componet of main path
        X = self.residual_conv(X, w_size=[1,1], w_outchannel=F1,stride=stride, name=name_base+'2a',
                                 padding='VALID',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn, reuse=self.Is_reuse)

        #Second componet of main path
        X = self.residual_conv(X, w_size=[f,f], w_outchannel=F2,stride=1,  name=name_base+'2b',
                                 padding='SAME',is_relu=True, in_training=self.Is_train, is_bn=self.Is_bn, reuse=self.Is_reuse)

        #third componet of ma, input_shape=(64, 64, 3), classes=6
        X = self.residual_conv(X, w_size=[1,1], w_outchannel=F3, stride=1, name=name_base+'2c',
                                padding='VALID', is_relu=False, in_training=self.Is_train, is_bn=self.Is_bn, reuse=self.Is_reuse)

        #SHORTCUT PATH
        X_shorCut = self.residual_conv(X_shorCut, w_size=[1,1], w_outchannel=F3, stride=stride, name=name_base+'1',
                                 padding='VALID',is_relu=False, in_training=self.Is_train, is_bn=self.Is_bn, reuse=self.Is_reuse)

        #
        X = tf.add(X, X_shorCut)
        X = tf.nn.relu(X)
        return X

    def residual_conv(self, X, w_size, w_outchannel, stride, name, padding, is_relu, is_bn, in_training, reuse=None):
        w_inchannel = int(X.get_shape()[-1])

        #with tf.name_scope(name) as scope:
        w = self.weight_variable(shape = [w_size[0], w_size[1], w_inchannel, w_outchannel], name=name+'_conv', reuse=reuse)
        b = self.bias_variable(shape = [w_outchannel], name=name+'_b', reuse=reuse)
        X = tf.nn.conv2d(X, w, strides = [1, stride, stride, 1], padding = padding) + b

        if is_bn:
            X = self.bnLayer(X, name=name+'_bn', is_train=in_training)
        if is_relu:
            X = tf.nn.relu(X)
        return X


#######################################33
#    tensorboard --logdir=xxx --bind_all

########   bn  with updata ###


    def bn_layer(self, x, scope, is_training, epsilon=0.001, decay=0.99, reuse=None):
        """
        Performs a batch normalization layer
        Args:
            x: input tensor
            scope: scope name
            is_training: python boolean value
            epsilon: the variance epsilon - a small float number to avoid dividing by 0
            decay: the moving average decay
        Returns:
            The ops of a batch normalization layer
        """
        with tf.variable_scope(scope, reuse=reuse):
            shape = x.get_shape().as_list()
            # gamma: a trainable scale factor
            gamma = tf.get_variable("gamma", shape[-1], initializer=tf.constant_initializer(1.0), trainable=True)
            # beta: a trainable shift value
            beta = tf.get_variable("beta", shape[-1], initializer=tf.constant_initializer(0.0), trainable=True)
            moving_avg = tf.get_variable("moving_mean", shape[-1], initializer=tf.constant_initializer(0.0), trainable=False)
            moving_var = tf.get_variable("moving_variance", shape[-1], initializer=tf.constant_initializer(1.0), trainable=False)
            if is_training:
                # tf.nn.moments == Calculate the mean and the variance of the tensor x
                avg, var = tf.nn.moments(x, np.arange(len(shape)-1), keep_dims=True)
                avg=tf.reshape(avg, [avg.shape.as_list()[-1]])
                var=tf.reshape(var, [var.shape.as_list()[-1]])
                #update_moving_avg = moving_averages.assign_moving_average(moving_avg, avg, decay)
                update_moving_avg=tf.assign(moving_avg, moving_avg*decay+avg*(1-decay))
                #update_moving_var = moving_averages.assign_moving_average(moving_var, var, decay)
                update_moving_var=tf.assign(moving_var, moving_var*decay+var*(1-decay))
                control_inputs = [update_moving_avg, update_moving_var]
            else:
                avg = moving_avg
                var = moving_var
                control_inputs = []
            with tf.control_dependencies(control_inputs):
                output = tf.nn.batch_normalization(x, avg, var, offset=beta, scale=gamma, variance_epsilon=epsilon)

        return output

    def bn_layer_top(self, x, scope, is_training, epsilon=0.001, decay=0.99):
        """
        Returns a batch normalization layer that automatically switch between train and test phases based on the
        tensor is_training
        Args:
            x: input tensor
            scope: scope name
            is_training: boolean tensor or variable
            epsilon: epsilon parameter - see batch_norm_layer
            decay: epsilon parameter - see batch_norm_layer
        Returns:
            The correct batch normalization layer based on the value of is_training
        """
        #assert isinstance(is_training, (ops.Tensor, variables.Variable)) and is_training.dtype == tf.bool

        return tf.cond(
            is_training,
            lambda: self.bn_layer(x=x, scope=scope, epsilon=epsilon, decay=decay, is_training=True, reuse=None),
            lambda: self.bn_layer(x=x, scope=scope, epsilon=epsilon, decay=decay, is_training=False, reuse=True),
        )

'''

'''

    def fc(self,x, c):
        num_units_in = x.get_shape()[1]
        num_units_out = c['fc_units_out']
        weights_initializer = tf.truncated_normal_initializer(
            stddev=FC_WEIGHT_STDDEV)

        weights = self._get_variable('weights',
                                shape=[num_units_in, num_units_out],
                                initializer=weights_initializer,
                                weight_decay=FC_WEIGHT_STDDEV)
        biases = self._get_variable('biases',
                            shape=[num_units_out],
                            initializer=tf.zeros_initializer)
        x = tf.nn.xw_plus_b(x, weights, biases)
        return x



    def bn(self, x, c):
        x_shape = x.get_shape()
        params_shape = x_shape[-1:]

        # if c['use_bias']:
        #     bias = self._get_variable('bias', params_shape,
        #                         initializer=tf.zeros_initializer)
        #     return x + bias

        axis = list(range(len(x_shape) - 1))

        beta = self._get_variable('beta',
                            params_shape,
                            initializer=tf.zeros_initializer)
        gamma = self._get_variable('gamma',
                            params_shape,
                            initializer=tf.ones_initializer)

        moving_mean = self._get_variable('moving_mean',
                                    params_shape,
                                    initializer=tf.zeros_initializer,
                                    trainable=False)
        moving_variance = self._get_variable('moving_variance',
                                        params_shape,
                                        initializer=tf.ones_initializer,
                                        trainable=False)

        # These ops will only be preformed when training.
        mean, variance = tf.nn.moments(x, axis)
        update_moving_mean     = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)
        update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)

        tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
        tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)

        mean, variance = tf.cond(self.Is_train, lambda: (mean, variance),
                                                lambda: (moving_mean, moving_variance))

        x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)       #BN_EPSILON  0.001
        #x.set_shape(inputs.get_shape()) ??

        return x

    def _get_variable(self, name,
                    shape,
                    initializer,
                    weight_decay=0.0,
                    dtype='float',
                    trainable=True):
        "A little wrapper around tf.get_variable to do weight decay and add to"
        "resnet collection"
        if weight_decay > 0:
            regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
        else:
            regularizer = None
        collections = [tf.GraphKeys.VARIABLES, RESNET_VARIABLES]
        return tf.get_variable(name,
                            shape=shape,
                            initializer=initializer,
                            dtype=dtype,
                            regularizer=regularizer,
                            collections=collections,
                            trainable=trainable)


    def conv(self, x, ksize, knum, stride,):
        ksize = ksize
        stride = stride
        filters_out = knum

        filters_in = x.get_shape()[-1]
        shape = [ksize, ksize, filters_in, filters_out]
        initializer = tf.truncated_normal_initializer(stddev=CONV_WEIGHT_STDDEV)
        weights = self._get_variable('weights',
                                shape=shape,
                                dtype='float',
                                initializer=initializer,
                                weight_decay=CONV_WEIGHT_DECAY)
        return tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')


    def _max_pool(self, x, ksize=3, stride=2, padding='SAME'):
        return tf.nn.max_pool(x,
                            ksize=[1, ksize, ksize, 1],
                            strides=[1, stride, stride, 1],
                            padding=padding)




'''





'''
    def convolutional_block(self, X, f, filters, stage, block, in_training, stride=2, is_bn=False):
        #defining name basis
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' +str(stage) + block + '_branch'

        #Retrieve Filters
        F1, F2, F3 = filters

        #save the input value, you'll need this later to add back to the main path
        X_shorCut = X

        #First componet of main path
        channel = int(X.get_shape()[-1])
        featureNum = F1
        padding = 'VALID'
        name = conv_name_base+'2a'
        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [1, 1, channel, featureNum], name=scope)
            b = self.bias_variable(shape = [featureNum], name=scope)
            X = tf.nn.conv2d(X, w, strides = [1, stride, stride, 1], padding = padding) + b
            X = tf.layers.batch_normalization(X, axis=0, name=scope, training=in_training)
            X = tf.nn.relu(X)

        #Second componet of main path
        channel = int(X.get_shape()[-1])
        featureNum = F2
        padding = 'SAME'
        name = conv_name_base+'2b'
        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [f, f, channel, featureNum], name=scope)
            b = self.bias_variable(shape = [featureNum], name=scope)
            X = tf.nn.conv2d(X, w, strides = [1, 1, 1, 1], padding = padding) + b
            X = tf.layers.batch_normalization(X, axis=0,name=scope, training=in_training)
            X = tf.nn.relu(X)

        #third componet of ma, input_shape=(64, 64, 3), classes=6
        padding = 'VALID'
        name = conv_name_base+'2c'
        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [1, 1, channel, featureNum], name=scope)
            b = self.bias_variable(shape = [featureNum], name=scope)
            X = tf.nn.conv2d(X, w, strides = [1, 1, 1, 1], padding = padding) + b
            X = tf.layers.batch_normalization(X, axis=0,name=scope, training=in_training)

        #SHORTCUT PATH
        X = self.residual_conv(X, w_size=[1,1],w_outchannel=F3, name=conv_name_base+'1', padding='VALID', in_training=in_training, stride=stride, is_bn=is_bn)
        # channel = int(X.get_shape()[-1])
        # featureNum = F3
        # padding = 'VALID'
        # name = conv_name_base+'1'
        # with tf.variable_scope(name) as scope:
        #     w = self.weight_variable(shape = [1, 1, channel, featureNum], name=scope)
        #     b = self.bias_variable(shape = [featureNum], name=scope)
        #     X = tf.nn.conv2d(X, w, strides = [1, stride, stride, 1], padding = padding) + b
        #     X = tf.layers.batch_normalization(X, axis=0,name=scope, training=in_training)

        #
        X = tf.add(X, X_shorCut)
        X = tf.nn.relu(X)
        return X
'''

