#-*-coding:utf-8-*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
slim=tf.contrib.slim
trunc_normal=lambda stddev:tf.truncated_normal_initializer(0.0,stddev)

sess=tf.InteractiveSession()

mnist=input_data.read_data_sets("MNIST_data/",one_hot=True )

x=tf.placeholder(tf.float32,[None,784])
y_=tf.placeholder(tf.float32,[None,10])
inputs=tf.reshape(x,[-1,28,28,1])

#生成常用函数默认参数
def inception_v3_arg_scope(weight_decay=0.00004,
                           stddev=0.1,
                           batch_norm_var_collection='moving_varts'):
    #参数字典
    batch_norm_params={
        'decay':0.9997,#衰减系数
        'epsilon':0.001,
        'updates_collections':{
            'beta':None,
            'gamma':None,
            'noving_mean':[batch_norm_var_collection],
            'moving_variance':[batch_norm_var_collection],
            }
        }
    #slim.arg_scope 给函数参数自动赋默认值
    with slim.arg_scope([slim.conv2d,slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(weight_decay)):
        #为卷积层生成函数slim.conv2d赋默认值
        with slim.arg_scope(
            [slim.conv2d],
            weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params) as sc:
            return sc

scope=None
end_points={}#保存关键节点

#五层卷积，两层池化
with tf.variable_scope(scope,'Inception',[inputs]):
    with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],
                        stride=1,padding='SAME'):
        #28*28*1
        net=slim.conv2d(inputs,32,[3,3],scope='Conv2d_1a_3x3')
        #28*28*32
        net=slim.conv2d(net,32,[3,3],scope='Conv2d_2a_3x3')
        #28*28*32
        net=slim.conv2d(net,64,[3,3],scope='Conv2d_2b_3x3')
        #28*28*64
            
        net=slim.max_pool2d(net,[3,3],scope='MaxPool_3a_3x3')
            
            
        net=slim.conv2d(net,80,[1,1],scope='Conv2d_3b_1x1')
        #28*28*80
        net=slim.conv2d(net,192,[3,3],scope='Conv2d_4a_3x3')
        #28*28*192
            
        net=slim.max_pool2d(net,[3,3],scope='MaxPool_5a_3x3')


    #Inception Module      
    with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],
                        stride=1,padding='SAME'):
            
        #One-first Mixed_5b 28*28*256
        with tf.variable_scope('Mixed_5b'):
            with tf.variable_scope('Branch_0'):
                branch_0=slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1=slim.conv2d(net,48,[1,1],scope='Conv2d_0a_1x1')
                branch_1=slim.conv2d(branch_1,64,[5,5],
                                        scope='Conv2d_0b_5x5')
            with tf.variable_scope('Branch_2'):
                branch_2=slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,96,[3,3],
                                        scope='Conv2d_0b_3x3')
                branch_2=slim.conv2d(branch_2,96,[3,3],
                                        scope='Conv2d_0c_3x3')
            with tf.variable_scope('Branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                branch_3=slim.conv2d(branch_3,32,[1,1],
                                        scope='Conv2d_0b_1x1')
            net=tf.concat([branch_0,branch_1,branch_2,branch_3],3)
                
        #One-second Mixed_5c 28*28*288  
        with tf.variable_scope('Mixed_5c'):
            with tf.variable_scope('Branch_0'):
                branch_0=slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1=slim.conv2d(net,48,[1,1],scope='Conv2d_0b_1x1')
                branch_1=slim.conv2d(branch_1,64,[5,5],
                                    scope='Conv_1_0c_5x5')                                                                  
            with tf.variable_scope('Branch_2'):
                branch_2=slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,64,[3,3],
                                        scope='Conv2d_0b_3x3')
                branch_2=slim.conv2d(branch_2,96,[3,3],
                                        scope='Conv2d_0c_3x3')
            with tf.variable_scope('Branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                branch_3=slim.conv2d(branch_3,64,[1,1],
                                         scope='Conv2d_0b_1x1')
            net=tf.concat([branch_0,branch_1,branch_2,branch_3],3)
     
        #One-third Mixed_5d 28*28*288
        with tf.variable_scope('Mixed_5d'):
            with tf.variable_scope('branch_0'):
                branch_0=slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,48,[1,1],scope='Conv2d_0a_1x1')
                branch_1=slim.conv2d(branch_1,64,[5,5],
                                         scope='Conv2d_0b_5x5')
            with tf.variable_scope('Branch_2'):
                branch_2=slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,96,[3,3],
                                         scope='Conv2d_0b_3x3')
                branch_2=slim.conv2d(branch_2,96,[3,3],
                                        scope='Conv2d_0c_3x3')
            with tf.variable_scope('Branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                branch_3=slim.conv2d(branch_3,64,[1,1],
                                        scope='Conv2d_0b_1x1')
            net=tf.concat([branch_0,branch_1,branch_2,branch_3],3)


        #Two-first Mixed_6a 13*13*768
        with tf.variable_scope('Mixed_6a'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net,384,[3,3],stride=2,
                                        padding='VALID', scope='Conv2d_1a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1=slim.conv2d(net,64,[1, 1],scope='Conv2d_0a_1x1')
                branch_1=slim.conv2d(branch_1,96,[3, 3],scope='Conv2d_0b_3x3')
                branch_1=slim.conv2d(branch_1,96,[3, 3],stride=2,
                                        padding='VALID',scope='Conv2d_1a_1x1')
            with tf.variable_scope('Branch_2'):
                branch_2=slim.max_pool2d(net,[3, 3],stride=2,padding='VALID',
                                            scope='MaxPool_1a_3x3')
            net=tf.concat([branch_0,branch_1,branch_2],3)

        #Two-second Mixed_6b 13*13*768
        with tf.variable_scope('Mixed_6b'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1],
                                        scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 128, [1, 1],
                                        scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1, 128, [1, 7],
                                        scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                        scope='Conv2d_0c_7x1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 128, [1, 1],
                                      scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2, 128, [7, 1],
                                      scope='Conv2d_0b_7x1')
                branch_2 = slim.conv2d(branch_2, 128, [1, 7],
                                      scope='Conv2d_0c_1x7')
                branch_2 = slim.conv2d(branch_2, 128, [7, 1],
                                         scope='Conv2d_0d_7x1')
                branch_2 = slim.conv2d(branch_2, 192, [1, 7],
                                         scope='Conv2d_0e_1x7')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                      scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3],3)

        #Two-third Mixed_6c 13*13*768
        with tf.variable_scope('Mixed_6c'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1],
                                       scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 160, [1, 1],
                                       scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1, 160, [1, 7],
                                         scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                       scope='Conv2d_0c_7x1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 160, [1, 1],
                                       scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                       scope='Conv2d_0b_7x1')
                branch_2 = slim.conv2d(branch_2, 160, [1, 7],
                                       scope='Conv2d_0c_1x7')
                branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                       scope='Conv2d_0d_7x1')
                branch_2 = slim.conv2d(branch_2, 192, [1, 7],
                                         scope='Conv2d_0e_1x7')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                      scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3],3)

        #Two-forth Mixed_6d 13*13*768
        with tf.variable_scope('Mixed_6d'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1],
                                       scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 160, [1, 1],
                                       scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1, 160, [1, 7],
                                       scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                      scope='Conv2d_0c_7x1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 160, [1, 1],
                                       scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                      scope='Conv2d_0b_7x1')
                branch_2 = slim.conv2d(branch_2, 160, [1, 7],
                                     scope='Conv2d_0c_1x7')
                branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                     scope='Conv2d_0d_7x1')
                branch_2 = slim.conv2d(branch_2, 192, [1, 7],
                                     scope='Conv2d_0e_1x7')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net, [3, 3],
                                            scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                        scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3],3)
                
        #Two-fifth Mixed_6e 13*13*768
        with tf.variable_scope('Mixed_6e'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1],
                                       scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 192, [1, 1],
                                      scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1, 192, [1, 7],
                                       scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                      scope='Conv2d_0c_7x1')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 192, [1, 1],
                                      scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2, 192, [7, 1],
                                      scope='Conv2d_0b_7x1')
                branch_2 = slim.conv2d(branch_2, 192, [1, 7],
                                      scope='Conv2d_0c_1x7')
                branch_2 = slim.conv2d(branch_2, 192, [7, 1],
                                      scope='Conv2d_0d_7x1')
                branch_2 = slim.conv2d(branch_2, 192, [1, 7],
                                         scope='Conv2d_0e_1x7')
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net, [3, 3],
                                            scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                       scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3],3)
                
            end_points['Mixed_6e'] = net


        #Three-first Mixed_7a 6*6*1280
        with tf.variable_scope('Mixed_7a'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1],
                                      scope='Conv2d_0a_1x1')
                branch_0 = slim.conv2d(branch_0, 320, [3, 3], stride=2,
                                     padding='VALID',
                                     scope='Conv2d_1a_3x3')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 192, [1, 1],
                                         scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1, 192, [1, 7],
                                         scope='Conv2d_0b_1x7')
                branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                         scope='Conv2d_0c_7x1')
                branch_1 = slim.conv2d(branch_1, 192, [3, 3],
                                         stride=2,padding='VALID',
                                         scope='Conv2d_1a_3x3')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.max_pool2d(net, [3, 3],
                                             stride=2, padding='VALID',
                                             scope='MaxPool_1a_3x3')
            net = tf.concat([branch_0, branch_1, branch_2],3)

        #Three-second Mixed_7b 6*6*2048
        with tf.variable_scope('Mixed_7b'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 320, [1, 1],
                                     scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 384, [1, 1],
                                       scope='Conv2d_0a_1x1')
                branch_1 = tf.concat([
                      slim.conv2d(branch_1, 384, [1, 3],
                                  scope='Conv2d_0b_1x3'),
                      slim.conv2d(branch_1, 384, [3, 1],
                                  scope='Conv2d_0b_3x1')],3)
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 448, [1, 1],
                                         scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(
                      branch_2, 384, [3, 3], scope='Conv2d_0b_3x3')
                branch_2 = tf.concat([
                      slim.conv2d(branch_2, 384, [1, 3],
                                  scope='Conv2d_0c_1x3'),
                      slim.conv2d(branch_2, 384, [3, 1],
                                  scope='Conv2d_0d_3x1')],3)
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net, [3, 3],
                                             scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(
                      branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3],3)

        #Three-third Mixed_7c 6*6*2048
        with tf.variable_scope('Mixed_7c'):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(net, 320, [1, 1],
                                         scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(net, 384, [1, 1],
                                         scope='Conv2d_0a_1x1')
                branch_1 = tf.concat([
                      slim.conv2d(branch_1, 384, [1, 3],
                                  scope='Conv2d_0b_1x3'),
                      slim.conv2d(branch_1, 384, [3, 1],
                                  scope='Conv2d_0c_3x1')],3)
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(net, 448, [1, 1],
                                         scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(
                      branch_2, 384, [3, 3], scope='Conv2d_0b_3x3')
                branch_2 = tf.concat([
                      slim.conv2d(branch_2, 384, [1, 3],
                                  scope='Conv2d_0c_1x3'),
                      slim.conv2d(branch_2, 384, [3, 1],
                                  scope='Conv2d_0d_3x1')],3)
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(net, [3, 3],
                                             scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(
                      branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3],3)


num_classes=10#分类的数量
is_training=True
dropout_keep_prob=0.8
prediction_fn=slim.softmax
spatial_squeeze=True
reuse=None
scope='InceptionV3'


with tf.variable_scope(scope,'InceptionV3',[inputs,num_classes]
                        ) as scope:

    # Auxiliary logits
    with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                        stride=1, padding='SAME'):
            
        aux_logits = end_points['Mixed_6e']
            
        '''with tf.variable_scope('AuxLogits'):
            aux_logits = slim.avg_pool2d(
                aux_logits, [5, 5], stride=2, padding='VALID',
                scope='AvgPool_1a_5x5')
            #5*5*768
            aux_logits = slim.conv2d(aux_logits, 128, [1, 1],
                                        scope='Conv2d_1b_1x1')

            aux_logits = slim.conv2d(
                aux_logits, 768, [5,5],
                weights_initializer=trunc_normal(0.01),
                padding='VALID', scope='Conv2d_2a_5x5')
            #1*1*768
            aux_logits = slim.conv2d(
                aux_logits, num_classes, [1, 1], activation_fn=None,
                normalizer_fn=None, weights_initializer=trunc_normal(0.001),
                scope='Conv2d_2b_1x1')
            #1*1*10
            if spatial_squeeze:
                aux_logits = tf.squeeze(aux_logits, [1, 2],
                                        name='SpatialSqueeze')
                
            end_points['AuxLogits'] = aux_logits'''
              
    # Final pooling and prediction
    with tf.variable_scope('Logits'):
        net = slim.avg_pool2d(net, [6, 6], padding='VALID',
                                  scope='AvgPool_1a_6x6')
        #1*1*2048
        net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
        end_points['PreLogits'] = net
            
        logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
                                  normalizer_fn=None, scope='Conv2d_1c_1x1')
        
        if spatial_squeeze:
            logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
            
    end_points['Logits'] = logits
    Predictions=prediction_fn(logits, scope='Predictions')
    end_points['Predictions'] = Predictions
    
cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(Predictions),
                                            reduction_indices=[1]))
train_step=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction=tf.equal(tf.argmax(Predictions,1),
                                            tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

tf.global_variables_initializer().run()

for i in range (20000):
    batch=mnist.train.next_batch(50)

    if i%5==0:
        
        train_accuracy=accuracy.eval(feed_dict={x:batch[0],y_:batch[1]})
        print('step %d ,training accuracy %g'%(i,train_accuracy))
    train_step.run(feed_dict={x:batch[0],y_:batch[1]})

print("test accuracy %g"%accuracy.eval(feed_dict={
    x:mnist.test.images,y_:mnist.test.labels}))

    

