#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 19 14:09:35 2021

@author: ljl
"""

import tensorflow as tf
import numpy as np
# define different layer functions
# we usually don't do convolution and pooling on batch and channel
class ALAXNET(object):
    """VGG model"""
    def __init__(self, x,y, keepPro, learnrate, classNum):
        self.X = x
        self.Y = y
        self.C = int(x.get_shape()[-1])
        self.KEEPPRO = keepPro
        self.learnRate = learnrate
        self.CLASSNUM = classNum
        self.scal = tf.constant(5.0, name="scal")
        #self.version = tf.multiply(self.scal, 1.0, name="version")
        self.version = tf.constant(1.0, name="version")
        
        self.global_step = tf.Variable(0, trainable=False)
        
        #build CNN
        self.buildCNN()
        
    #定义初始化权值函数
    def weight_variable(self,shape):
        initial=tf.truncated_normal(shape,stddev=0.1)
        return tf.Variable(initial)
    #定义初始化偏置函数
    def bias_variable(self,shape):
        initial=tf.constant(0.1,shape=shape)
        return tf.Variable(initial)
    #卷积层
    def convLayer(self,x, kHeight, kWidth, strideX, strideY,
              featureNum, name, padding = "SAME"):
        """convlutional"""
        channel = int(x.get_shape()[-1])
        with tf.variable_scope(name) as scope:
            w = self.weight_variable(shape = [kHeight, kWidth, channel, featureNum])
            b = self.bias_variable(shape = [featureNum])
            featureMap = tf.nn.conv2d(x, w, strides = [1, strideY, strideX, 1], padding = padding) + b

            return tf.nn.relu(featureMap)
    #池化层
    def maxPoolLayer(self,x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
        """max-pooling"""
        return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1],
                          strides = [1, strideX, strideY, 1], padding = padding, name = name)
    def dropout(self,x, keepPro, name = None):
        """dropout"""
        return tf.nn.dropout(x, keepPro, name)

    def fcLayer(self,x, inputD, outputD, reluFlag, name):
        """fully-connect"""
        if 0:
            with tf.variable_scope(name) as scope:
                w = self.weight_variable(shape = [inputD, outputD])
                b = self.bias_variable([outputD])
                out = tf.matmul(x,w)+b   #矩阵相乘
                if reluFlag:
                    return tf.nn.relu(out)
                else:
                    return out
        elif reluFlag:
            return tf.layers.dense(x, outputD, activation=tf.nn.relu)
        else:
            return tf.layers.dense(x, outputD,activation=None)

    def buildCNN(self):
        """build model"""
        #卷积、激励、池化操作
        conv1 = self.convLayer(self.X, 5, 5, 1, 1, 32, "conv1")
        pool1=self.maxPoolLayer(conv1, 2, 2, 2, 2, "pool1")  #进行max_pooling 池化层

        conv2 = self.convLayer(pool1, 5, 5, 1, 1, 64, "conv2")
        h_pool2=self.maxPoolLayer(conv2, 2, 2, 2, 2, "pool2")  
         
        #28*28的图片第一次卷积后还是28*28，第一次池化后变为14*14
        #第二次卷积后为14*14，第二次池化后变为了7*7
        #经过上面操作后得到64张7*7的平面
        fc_len = h_pool2[0].shape
        fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
        h_pool2_flat = tf.reshape(h_pool2,[-1,fc_len_value])
        #初始化第二个全连接层
        fc1 = self.fcLayer(h_pool2_flat, int(fc_len_value), 1024, True, "fc1")
        fc1_drop=tf.nn.dropout(fc1,self.KEEPPRO)
         
        #初始化第二个全连接层
        fc2 = self.fcLayer(fc1_drop,1024,self.CLASSNUM,False,"fc2")

        #输出层
        #计算输出
        self.prediction=tf.nn.softmax(fc2,name = 'softmax')
         
        #交叉熵代价函数
        self.cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.Y,logits=self.prediction))
        #使用AdamOptimizer进行优化
        #self.train_step=tf.train.AdamOptimizer(self.learnRate).minimize(self.cross_entropy)
        
        optimizer = "R"
        # Setup Optimizer
	
        if optimizer == "SGD":
            self.train_step = tf.train.GradientDescentOptimizer(0.2).minimize(self.cross_entropy, global_step=self.global_step)
        elif optimizer == "Adagrad":
            self.train_step = tf.train.AdagradOptimizer(0.01).minimize(self.cross_entropy, global_step=self.global_step)
        elif optimizer == "Momentum":
            self.train_step = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9).minimize(self.cross_entropy)
        elif optimizer == "MomentumNAG":
            self.train_step = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9, use_nesterov=True).minimize(self.cross_entropy)
        elif optimizer == "RMSProp":
            self.train_step = tf.train.RMSPropOptimizer(self.learnRate).minimize(self.cross_entropy)
        elif optimizer == "Adadelta":
            self.train_step = tf.train.AdadeltaOptimizer(1).minimize(self.cross_entropy)
        else:
            self.train_step = tf.train.AdamOptimizer(self.learnRate).minimize(self.cross_entropy)
        
        
        
        
        
        #结果存放在一个布尔列表中(argmax函数返回一维张量中最大的值所在的位置)
        correct_prediction=tf.equal(tf.argmax(self.prediction,1),tf.argmax(self.Y,1))
        #求准确率(tf.cast将布尔值转换为float型)
        self.accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))   #tf.cast将目标转换为指定类型

        
