
from cnn_net_and_third_part.CNNNET import CNNNET

import tensorflow.compat.v1 as tf   #2.x version
tf.disable_v2_behavior()

class net3(CNNNET):
    def buildCNN_net(self):
        """build model"""
        # 卷积、激励、池化操作
        # 32x32
        net = self.convBasic(self.X, 5, 5, 1, 1, 32, name="conv1", padding="VALID")  # 28x28
        # net = self.convLayer(self.X, 3, 3, 1, 1, 32, "conv12", padding="VALID")
        # net = self.convLayer(net, 3, 3, 1, 1, 32, "conv12", padding="VALID")
        net = self.maxPoolLayer(net, 2, 2, 2, 2, "pool1")  # 进行max_pooling 池化层        #14x14

        #    net = self.dropout(net, 0.5, name='drop1')

        # 14x14
        net = self.convBasic(net, 3, 3, 1, 1, 64, name="conv2_1", padding="SAME")  # 14x14
        # net = self.convBasic(net, 3, 3, 1, 1, 64, name="conv2_2", padding="SAME")        #
        # net = self.convLayer(net, 1, 1, 1, 1, 128, "conv22", padding="SAME")        #
        net = self.maxPoolLayer(net, 2, 2, 2, 2, "pool2")  # 进行max_pooling 池化层        #7x7

        # 7x7x256
        net = self.convBasic(net, 3, 3, 1, 1, 128, name="conv3_1")
        # net = self.convBasic(net, 3, 3, 1, 1, 128, name="conv3_2")
        # net = self.convLayer(net, 1, 1, 1, 1, 256, "conv3_2")
        net = self.avgPoolLayer(net, 1, 1, "avgPool")

        # 1x1x256
        fc_len = net[0].shape
        fc_len_value = fc_len[0] * fc_len[1] * fc_len[2]
        pool_flat = tf.reshape(net, [-1, fc_len_value])
        self.netout = self.fcLayer(pool_flat, int(fc_len_value), self.CLASSNUM, False, "netout")  # [none, classNum]

        self.computeOut()

    def inception_block(self,x, branch_i_out, inception_name):
        '''
        name = 'inception_1'
        '''
        branch_n = len(branch_i_out)
        with tf.variable_scope(inception_name) as scope:
            with tf.variable_scope(inception_name + 'branch_0'):
                branch_0 = tf.layers.conv2d(x, branch_i_out[0], (1,1), strides=(1,1), padding='same', activation=tf.nn.relu)

            with tf.variable_scope(inception_name + 'branch_1'):
                branch_1 = tf.layers.conv2d(x, branch_i_out[1], (3,3), strides=(1,1), padding='same', activation=tf.nn.relu)

            with tf.variable_scope(inception_name + 'branch_2'):
                branch_2 = tf.layers.conv2d(x, branch_i_out[2], (5,5), strides=(1,1), padding='same', activation=tf.nn.relu)

            with tf.variable_scope(inception_name + 'branch_3'):
                branch_3 = tf.layers.max_pooling2d(x, (2,2), (2,2) )

                max_pooling_shape = branch_3.get_shape().as_list()[1:]
                input_shape = x.get_shape().as_list()[1:]
                width_padding = (input_shape[0] - max_pooling_shape[0]) // 2
                height_padding = (input_shape[1] - max_pooling_shape[1]) // 2
                padded_pooling = tf.pad(branch_3, [[0,0], [width_padding, width_padding], [height_padding, height_padding], [0,0]])

            concat_layer = tf.concat([branch_0, branch_1, branch_2, padded_pooling], axis=3)
        return concat_layer



