# -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 14:38:46 2021

@author: LI

#    tensorboard --logdir=./log --bind_all
"""
#import tensorflow as tf
from os import name
import tensorflow.compat.v1 as tf   #2.x version

tf.disable_v2_behavior()

from cnn_net_and_third_part.CNNnetBasic import CNNnetBasic
import numpy as np
import sys, os



def variable_summaries(var):
    '''
    record w and b
    '''
    with tf.name_scope('summaries'):
        #mean = tf.reduce_mean(var)
        #tf.summary.scalar('mean', mean)
        #stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        #tf.summary.scalar('stddev', stddev)
        #tf.summary.scalar('max', tf.reduce_max(var))
        #tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram('histogram', var)


class CNNNET_VGG16(CNNnetBasic):
    """
    VGG model
    
    """
    def __init__(self, x,y, keepPro,learnrate, optimizer, version, weightRate):
        self.X = x
        self.Y = y
        self.C = int(x.get_shape()[-1])
        self.KEEPPRO = keepPro
        self.learnRate = learnrate
        self.CLASSNUM = y.get_shape()[-1]
        self.optimizer = optimizer
        self.version = tf.constant(version, dtype=float, name="version")

        self.weightRate = weightRate    #[1,1,1,1,1]
        #build CNN
        #self.buildCNN_net4()
        activation_method_list = ['relu', 'sigmoid']
        self.activation_method = activation_method_list[1]

        self.global_step = tf.Variable(initial_value=0, name='global_step', trainable=False)

    def buildCNN_net_28x28(self):
        """build model"""
        #卷积、激励、池化操作
        #28x28x1
        conv1 = self.convLayer(self.X, 5, 5, 1, 1, 32, "conv1")
        pool1=self.maxPoolLayer(conv1, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层
        
        #14x14x32
        conv2 = self.convLayer(pool1, 5, 5, 1, 1, 64, "conv2")
        h_pool2=self.maxPoolLayer(conv2, 2, 2, 2, 2, "pool2")  
            
        #28*28的图片第一次卷积后还是28*28，第一次池化后变为14*14
        #第二次卷积后为14*14，第二次池化后变为了7*7
        #经过上面操作后得到64张7*7的平面
        #7x7x64
        fc_len = h_pool2[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
        h_pool2_flat = tf.reshape(h_pool2,[-1,fc_len_value])
        #初始化第二个全连接层
        #len = 7x7x64
        fc1 = self.fcLayer(h_pool2_flat, int(fc_len_value), 1024, True, "fc1")
        fc1_drop=tf.nn.dropout(fc1,self.KEEPPRO)
            
        #初始化第二个全连接层
        self.fc_out = self.fcLayer(fc1_drop,1024,self.CLASSNUM,False,"netout")
        
        self.computeOut(self.fc_out, self.Y, self.learnRate)

    def buildCNN_net2_32x32(self):
        """build model"""
        #卷积、激励、池化操作
        #32x32
        conv1 = self.convLayer(self.X, 5, 5, 1, 1, 32, "conv1", padding="VALID")        #28x28
        pool1=self.maxPoolLayer(conv1, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层        #14x14
        
        #14x14x32
        conv2 = self.convLayer(pool1, 5, 5, 1, 1, 64, "conv2")
        h_pool2=self.maxPoolLayer(conv2, 2, 2, 2, 2, "pool2")  
         
        #28*28的图片第一次卷积后还是28*28，第一次池化后变为14*14
        #第二次卷积后为14*14，第二次池化后变为了7*7
        #经过上面操作后得到64张7*7的平面
        #7x7x64
        fc_len = h_pool2[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
        h_pool2_flat = tf.reshape(h_pool2,[-1,fc_len_value])
        #初始化第二个全连接层
        #len = 7x7x64
        fc1 = self.fcLayer(h_pool2_flat, int(fc_len_value), 1024, True, "fc1")
        fc1_drop=tf.nn.dropout(fc1,self.KEEPPRO)
         
        #初始化第二个全连接层
        self.fc_out = self.fcLayer(fc1_drop,1024,self.CLASSNUM,False,"netout")
        
        self.computeOut(self.fc_out, self.Y, self.learnRate)

    def buildCNN_net_32x32_gap(self):
        """build model"""
        #卷积、激励、池化操作
        #32x32
        net = self.convLayer(self.X, 5, 5, 1, 1, 32, "conv1", padding="VALID")        #28x28
        # net = self.convLayer(self.X, 3, 3, 1, 1, 32, "conv12", padding="VALID")   
        # net = self.convLayer(net, 3, 3, 1, 1, 32, "conv12", padding="VALID")     
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层        #14x14

    #    net = self.dropout(net, 0.5, name='drop1')
 
        
        #14x14
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv2_1", padding="SAME")        #14x14
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv2_2", padding="SAME")        #
        #net = self.convLayer(net, 1, 1, 1, 1, 128, "conv22", padding="SAME")        #
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool2")  #进行max_pooling 池化层        #7x7
        
         
        #7x7x256
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv3_1")
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv3_2")
        #net = self.convLayer(net, 1, 1, 1, 1, 256, "conv3_2")
        net=self.avgPoolLayer(net,1,1,"avgPool" )

        #1x1x256
        if 0:
            net = self.convLayer(net, 1, 1, 1, 1, self.CLASSNUM, "netout")   #[none, 1,1,5]
            fc_len = net[0].shape
            fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
            pool_flat = tf.reshape(net,[-1, fc_len_value])
            self.netout = pool_flat
        else:
            fc_len = net[0].shape
            fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
            pool_flat = tf.reshape(net,[-1, fc_len_value])
            self.netout = self.fcLayer(pool_flat,int(fc_len_value), self.CLASSNUM,False,"netout")   #[none, classNum]

        self.computeOut()

    def buildCNN_net_32x32_gap_test(self):
        """build model"""
        #卷积、激励、池化操作
        #32x32
        net = self.convBasic(self.X, 5, 5, 1, 1, 32, name="conv1", padding="VALID")        #28x28
        # net = self.convLayer(self.X, 3, 3, 1, 1, 32, "conv12", padding="VALID")   
        # net = self.convLayer(net, 3, 3, 1, 1, 32, "conv12", padding="VALID")     
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层        #14x14

    #    net = self.dropout(net, 0.5, name='drop1')
        
        #14x14
        net = self.convBasic(net, 3, 3, 1, 1, 64, name="conv2_1", padding="SAME")        #14x14
        #net = self.convBasic(net, 3, 3, 1, 1, 64, name="conv2_2", padding="SAME")        #
        #net = self.convLayer(net, 1, 1, 1, 1, 128, "conv22", padding="SAME")        #
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool2")  #进行max_pooling 池化层        #7x7

        #7x7x256
        net = self.convBasic(net, 3, 3, 1, 1, 128, name="conv3_1")
        #net = self.convBasic(net, 3, 3, 1, 1, 128, name="conv3_2")
        #net = self.convLayer(net, 1, 1, 1, 1, 256, "conv3_2")
        net=self.avgPoolLayer(net,1,1,"avgPool" )

        #1x1x256
        fc_len = net[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        pool_flat = tf.reshape(net,[-1, fc_len_value])
        self.netout = self.fcLayer(pool_flat,int(fc_len_value), self.CLASSNUM,False,"netout")   #[none, classNum]

        self.computeOut()

    def buildCNN_net9_32x32(self):
        """build model"""
        #卷积、激励、池化操作
        #32x32
        X = self.convLayer(self.X, 3, 3, 1, 1, 32, "conv11", padding="SAME")        
        X = self.convLayer(X, 3, 3, 1, 1, 32, "conv12", padding="SAME")      
        X=self.maxPoolLayer(X, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层        #16X16
        

        X = self.convLayer(X, 3, 3, 1, 1, 64, "conv21", padding="SAME")        #16x16
        X = self.convLayer(X, 3, 3, 1, 1, 64, "conv22", padding="SAME")        #
        X=self.maxPoolLayer(X, 2, 2, 2, 2, "pool2")  #进行max_pooling 池化层        #8x8
        
        #14x14x32
        X = self.convLayer(X, 3, 3, 1, 1, 128, "conv31")  #8x8
        X = self.convLayer(X, 3, 3, 1, 1, 128, "conv32")
        X=self.maxPoolLayer(X, 2, 2, 2, 2, "pool3")     #4X4
         
        #4X4x
        fc_len = X[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
        h_pool2_flat = tf.reshape(X,[-1,fc_len_value])
        #初始化第二个全连接层
        #len = 7x7x64
        fc1 = self.fcLayer(h_pool2_flat, int(fc_len_value), 1024, True, "fc1")
        fc1=tf.nn.dropout(fc1,self.KEEPPRO)

        fc2 = self.fcLayer(fc1, 1024, 1024, True, "fc2")
        fc2 = tf.nn.dropout(fc2, self.KEEPPRO)
         
        #初始化第二个全连接层
        self.fc_out = self.fcLayer(fc2,1024,self.CLASSNUM,False,"netout")
        
        self.computeOut(self.fc_out, self.Y, self.learnRate)


    def buildCNN_net2_56x56(self):
        """build model"""
        #卷积、激励、池化操作
        #56x56
        net = self.convLayer(self.X, 5, 5, 1, 1, 32, "conv1", padding="SAME")        #56
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层        #28x28
        
        #28x28x32
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv2")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool2") 

        #14x14x32
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv3")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool3")  
         
        #7x7x64
        fc_len = net[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
        h_pool2_flat = tf.reshape(net,[-1,fc_len_value])
        #初始化第二个全连接层
        #len = 7x7x64
        fc1 = self.fcLayer(h_pool2_flat, int(fc_len_value), 1024, True, "fc1")
        fc1_drop=tf.nn.dropout(fc1,self.KEEPPRO)
         
        #初始化第二个全连接层
        self.fc_out = self.fcLayer(fc1_drop,1024,self.CLASSNUM,False,"netout")
        
        self.computeOut(self.fc_out, self.Y, self.learnRate)
    def buildCNN_net5_56x56(self):
        """build model"""
        #卷积、激励、池化操作
        #56x56
        net = self.convLayer(self.X, 5, 5, 1, 1, 32, "conv1", padding="SAME")        #56
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层        #28x28
        
        #28x28x32
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv2_1")
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv2_2")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool2") 

        #14x14x32
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv3_1")
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv3_2")
        net = self.convLayer(net, 1, 1, 1, 1, 128, "conv3_3", padding="VALID")
        # for i in range(10):

        #     tf.summary.image("conv3", net[:,:,:,i:i+1], 20)
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool3")  
         
        #7x7x64
        fc_len = net[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
        h_pool2_flat = tf.reshape(net,[-1,fc_len_value])
        #初始化第二个全连接层
        #len = 7x7x64
        fc1 = self.fcLayer(h_pool2_flat, int(fc_len_value), 1024, True, "fc1")
        fc1_drop=tf.nn.dropout(fc1,self.KEEPPRO)
         
        #初始化第二个全连接层
        self.netout = self.fcLayer(fc1_drop,1024,self.CLASSNUM,False,"netout")
        self.computeOut()

    def buildCNN_net5_56x56_gap(self):
        """build model"""
        #卷积、激励、池化操作
        #56x56
        net = self.convLayer(self.X, 5, 5, 1, 1, 32, "conv1", padding="SAME")        #56
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层        #28x28
        
        #28x28x32
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv2_1")
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv2_2")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool2") 

        #14x14x32
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv3_1")
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv3_2")
        net = self.convLayer(net, 1, 1, 1, 1, 128, "conv3_3", padding="VALID")
        # for i in range(10):
        #     tf.summary.image("conv3", net[:,:,:,i:i+1], 20)
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool3")  
         
        #7x7x256
        net = self.convLayer(net, 3, 3, 1, 1, 256, "conv4_1")
        net = self.convLayer(net, 3, 3, 1, 1, 256, "conv4_2")

        net=self.avgPoolLayer(net,1,1,"avgPool" )
        fc_len = net[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        pool_flat = tf.reshape(net,[-1, fc_len_value])
       
        self.netout = self.fcLayer(pool_flat,int(fc_len_value), self.CLASSNUM,False,"netout")
        self.computeOut()


    def buildCNN_net14_112x112(self):
        """build model"""
        n, row, col, c = self.X.shape
        if row != 112 or col != 112 :
            print("CNN: input row and col error")
            sys.exit()
        #卷积、激励、池化操作
        #112
        net = self.convLayer(self.X, 5, 5, 1, 1, 32, "conv11")
        #conv12 = self.convLayer(conv11, 3, 3, 1, 1, 32, "conv12")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层   56x56x32
        
        #56
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv21")
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv22")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool2")     #28x28x64
        
        
        #28
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv31")
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv32")
     #   net = self.convLayer(net, 1, 1, 1, 1, 128, "conv33")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool3") #14x14x128
        
        #14
        net = self.convLayer(net, 3, 3, 1, 1, 256, "conv41")
        net = self.convLayer(net, 3, 3, 1, 1, 256, "conv42")
       # net = self.convLayer(net, 1, 1, 1, 1, 256, "conv43")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool4")     #7x7x256

        #7x7x256
        fc_len = net[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
        pool_flat = tf.reshape(net,[-1, fc_len_value])
        #初始化第1个全连接层
        net = self.fcLayer(pool_flat, int(fc_len_value), 1024, True, "fc1")
        net=tf.nn.dropout(net,self.KEEPPRO)
            
        #初始化第3个全连接层
        self.netout = self.fcLayer(net,1024, self.CLASSNUM,False,"netout")
        self.computeOut()

    def buildCNN_net14_112x112_gap(self):
        """build model"""
        n, row, col, c = self.X.shape
        if row != 112 or col != 112 :
            print("CNN: input row and col error")
            sys.exit()
        #卷积、激励、池化操作
        #112
        net = self.convLayer(self.X, 7, 7, 1, 1, 32, "conv11")
        #conv12 = self.convLayer(conv11, 3, 3, 1, 1, 32, "conv12")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层   56x56x32
        
        #56
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv21")
        net = self.convLayer(net, 3, 3, 1, 1, 64, "conv22")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool2")     #28x28x64
        
        
        #28
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv31")
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv32")
        #net = self.convLayer(net, 1, 1, 1, 1, 128, "conv33")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool3") #14x14x128
        
        #14
        net = self.convLayer(net, 3, 3, 1, 1, 256, "conv41")
        net = self.convLayer(net, 3, 3, 1, 1, 256, "conv42")
        #net = self.convLayer(net, 1, 1, 1, 1, 256, "conv43")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool4")     #7x7x256

        #7
        net = self.convLayer(net, 1, 1, 1, 1, 512, "conv51")
        #net = self.convLayer(net, 3, 3, 1, 1, 512, "conv52")
        net=self.avgPoolLayer(net,1,1,"avgPool" )
        
        #7x7x256
        fc_len = net[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
        #len = 7x7x64
        pool_flat = tf.reshape(net,[-1, fc_len_value])
        #初始化第1个全连接层
        #len = 7x7x64
        #net = self.fcLayer(pool_flat, int(fc_len_value), 1024, True, "fc1")
        # fc=tf.nn.dropout(fc1,self.KEEPPRO)
            
        #初始化第3个全连接层
        self.netout = self.fcLayer(pool_flat,int(fc_len_value), self.CLASSNUM,False,"netout")
        self.computeOut()

    def buildCNN_net16_224x224(self):
        """build model"""
        n, row, col, c = self.X.shape
        if row != 224 or col != 224 :
            print("CNN: input row and col error")
            sys.exit()
        #卷积、激励、池化操作
        #224x224x1
        net = self.convLayer(self.X, 5, 5, 1, 1, 64, "conv11")
        #net = self.convLayer(net, 3, 3, 1, 1, 64, "conv12")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层
        
        #112x112x32
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv21")
        net = self.convLayer(net, 3, 3, 1, 1, 128, "conv22")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool2")  
        
        
        #56x56x64
        net = self.convLayer(net, 3, 3, 1, 1, 256, "conv31")
        net = self.convLayer(net, 3, 3, 1, 1, 256, "conv32")
        #net = self.convLayer(net, 1, 1, 1, 1, 256, "conv33")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool3")
        
        #28x28x128
        net = self.convLayer(net, 3, 3, 1, 1, 512, "conv41")
        net = self.convLayer(net, 3, 3, 1, 1, 512, "conv42")
        #net = self.convLayer(net, 1, 1, 1, 1, 512, "conv43")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool4")
        
        #14x14x256
        net = self.convLayer(net, 3, 3, 1, 1, 512, "conv51")
        net = self.convLayer(net, 3, 3, 1, 1, 512, "conv52")
        #net = self.convLayer(net, 1, 1, 1, 1, 512, "conv53")
        net=self.maxPoolLayer(net, 2, 2, 2, 2, "pool5")
            
        #7x7x64
        fc_len = net[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
        #len = 7x7x64
        pool_flat = tf.reshape(net,[-1, fc_len_value])
        #初始化第1个全连接层
        #len = 7x7x64
        fc1 = self.fcLayer(pool_flat, int(fc_len_value), 1024, True, "fc1")
        fc1_drop=tf.nn.dropout(fc1,self.KEEPPRO)
            
        #初始化第2个全连接层
        fc2 = self.fcLayer(fc1_drop,1024,1024,True,"fc2")
        fc2_drop=tf.nn.dropout(fc2,self.KEEPPRO)
        
        #初始化第3个全连接层
        self.netout = self.fcLayer(fc2_drop,1024,self.CLASSNUM,False,"netout")
        self.computeOut()

###########################################################################################

    # #定义初始化权值函数
    # def weight_variable(self,shape, name):
    #     with tf.variable_scope(name) as scope:
    #         initial=tf.truncated_normal(shape,stddev=0.1)
    #         w = tf.Variable(initial, dtype=tf.float32, name="weights")
    #         #variable_summaries(w)
    #     return w
    # #定义初始化偏置函数
    # def bias_variable(self,shape, name):
    #     with tf.variable_scope(name) as scope:
    #         initial=tf.constant(0.1,shape=shape)
    #         b = tf.Variable(initial, dtype=tf.float32, name="biases")
    #         #variable_summaries(b)
    #     return b
    #卷积层
    def convLayer(self,x, kHeight, kWidth, strideX, strideY,
              featureNum, name, padding = "SAME"):
        """convlutional"""
        channel = int(x.get_shape()[-1])
        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [kHeight, kWidth, channel, featureNum], name=scope)
            b = self.bias_variable(shape = [featureNum], name=scope)
            featureMap = tf.nn.conv2d(x, w, strides = [1, strideY, strideX, 1], padding = padding) + b
            #tf.summary.histogram('featureMap', featureMap)
            return self.activation_func(featureMap, method=self.activation_method)
    #池化层
    def maxPoolLayer(self,x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
        """max-pooling"""
        return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1],
                          strides = [1, strideX, strideY, 1], padding = padding, name = name)
    def avgPoolLayer(self,x, strideX, strideY, name, padding = "VALID"):
        """avg-pooling"""
        _, kHeight, kWidth, _2 = x.get_shape()
        return tf.nn.avg_pool(x, ksize = [1, kHeight, kWidth, 1], strides = [1, strideX, strideY, 1], padding = padding, name = name)

    def activation_func(self, x, method='relu'):
        if method == 'sigmoid':
            return tf.nn.sigmoid(x)
        elif method=='relu':     
            return tf.nn.relu(x)
        else:
            print('activation method error !!!')
            sys.exit(0)


    def dropout(self,x, keepPro, name = None):
        """dropout"""
        return tf.nn.dropout(x, keepPro, name)

    def fcLayer(self, x, inputD, outputD, reluFlag, name):
        """fully-connect"""
        # with tf.variable_scope(name) as scope:
        #     if reluFlag:
        #         return tf.layers.dense(x, outputD, activation=tf.nn.relu)
        #     elif not reluFlag:
        #         return tf.layers.dense(x, outputD,activation=None)
            
        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [inputD, outputD], name=scope)
            b = self.bias_variable(shape=[outputD], name=scope)
            out = tf.matmul(x,w)+b   #矩阵相乘
            if reluFlag:
                return self.activation_func(out, method=self.activation_method)
            else:
                return out

   

    def convBlock(self, input, kHeight, kWidth, sX, sY, number, name):
        conv1 = self.convLayer(input, kHeight, kHeight, sX, sY, number, name+"1")
        conv2 = self.convLayer(conv1, kHeight, kHeight, sX, sY, number, name+"2")
        pool=self.maxPoolLayer(conv2, 2, 2, 2, 2, name+"pool")
        return pool


    # def loss_cross_entropy(self):
       
    #     return 1
        
    def computeOut(self):
        #输出层
        #计算输出
        self.prediction=tf.nn.softmax(self.netout, name="softmax")
        # 结果存放在一个布尔列表中(argmax函数返回一维张量中最大的值所在的位置)
        correct_prediction = tf.equal(tf.argmax(self.prediction, 1), tf.argmax(self.Y, 1))
        # 求准确率(tf.cast将布尔值转换为float型)
        with tf.name_scope('accurary') as scope:
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='accuracy')  # tf.cast将目标转换为指定类型
            tf.summary.scalar(scope, self.accuracy)
        
        #交叉熵代价函数
        with tf.name_scope('cross_entropy'):
            cost = tf.multiply(tf.nn.softmax_cross_entropy_with_logits(labels=self.Y, logits=self.netout), self.weightRate) #  right
            #cost = tf.multiply(tf.nn.softmax_cross_entropy_with_logits(labels=self.Y, logits=self.prediction), self.weightRate)  # wrong

            self.cross_entropy=tf.reduce_mean(cost, name='loss_value')
            #tf.summary.histogram('cross_entropy_histogram', self.cross_entropy)
            tf.summary.scalar('cross_entropy_scalar', self.cross_entropy)

        update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_op):
            #使用AdamOptimizer进行优化
            if self.optimizer == "Adam":
                self.train_step=tf.train.AdamOptimizer(self.learnRate).minimize(self.cross_entropy, global_step=self.global_step, name='train_opt')
                #self.train_step = tf.train.AdamOptimizer(0.00001).minimize(cross_entropy)      #0.001
            elif self.optimizer == "SGD":
                self.train_step = tf.train.GradientDescentOptimizer(0.2).minimize(self.cross_entropy, global_step=self.global_step, name='train_opt')
            elif self.optimizer == "Adagrad":
                self.train_step = tf.train.AdagradOptimizer(0.01).minimize(self.cross_entropy, global_step=self.global_step, name='train_opt')
            elif self.optimizer == "Momentum":
                self.train_step = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9).minimize(self.cross_entropy, name='train_opt')
            elif self.optimizer == "MomentumNAG":
                self.train_step = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9, use_nesterov=True).minimize(self.cross_entropy, name='train_opt')
            elif self.optimizer == "RMSProp":
                self.train_step = tf.train.RMSPropOptimizer(0.01).minimize(self.cross_entropy, name='train_opt')
            elif self.optimizer == "Adadelta":
                self.train_step = tf.train.AdadeltaOptimizer(1).minimize(self.cross_entropy, name='train_opt')
            else:
                print("optimizer error:  ", self.optimizer )
                sys.exit()
            
        

    def load_weights(self, weight_file, sess):
        weights = np.load(weight_file)
        keys = sorted(weights.keys())
        for i, k in enumerate(keys):
            print(i, k, np.shape(weights[k]))
            sess.run(self.parameters[i].assign(weights[k]))
