from __future__ import division, print_function, absolute_import  

import tensorflow as tf  
import matplotlib.pyplot as plt  
import numpy as np  
import pandas as pd  

# Training Parameters  
learning_rate = 0.001  
num_steps = 2000  
batch_size = 128  
  
# Network Parameters  
num_input = 784 # MNIST data input (img shape: 28*28)  
num_classes = 10 # MNIST total classes (0-9 digits)  
dropout = 0.75 # Dropout, probability to keep units  

# Create the neural network  
def conv_net(x_dict, n_classes, dropout, reuse, is_training):  
      
    # Define a scope for reusing the variables  
    with tf.variable_scope('ConvNet', reuse=reuse):  
        # TF Estimator input is a dict, in case of multiple inputs  
        # MNIST data input is a 1-D vector of 784 features (28*28 pixels)  
        # Reshape to match picture format [Height x Width x Channel]  
        # Tensor input become 4-D: [Batch Size, Height, Width, Channel]  
        x = x_dict['images']
        x = tf.reshape(x, shape=[-1, 28, 28, 1])  
  
        # Convolution Layer with 32 filters and a kernel size of 5  
        conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)  
        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2  
        conv1 = tf.layers.max_pooling2d(conv1, 2, 2)  
  
        # Convolution Layer with 64 filters and a kernel size of 3  
        conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)  
        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2  
        conv2 = tf.layers.max_pooling2d(conv2, 2, 2)  
  
        # Flatten the data to a 1-D vector for the fully connected layer  
        fc1 = tf.contrib.layers.flatten(conv2)  
  
        # Fully connected layer (in tf contrib folder for now)  
        fc1 = tf.layers.dense(fc1, 512)  
        # Apply Dropout (if is_training is False, dropout is not applied)  
        fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)  
  
        # Output layer, class prediction  
        out = tf.layers.dense(fc1, n_classes)  
  
    return out  

def model_fn(features, labels, mode):  
      
    # Build the neural network  
    # Because Dropout have different behavior at training and prediction time, we  
    # need to create 2 distinct computation graphs that still share the same weights.  
    logits_train = conv_net(features, num_classes, dropout, reuse=False, is_training=True)  
    logits_test = conv_net(features, num_classes, dropout, reuse=True, is_training=False)  
      
    # Predictions  
    pred_classes = tf.argmax(logits_test, axis=1)  
    pred_probas = tf.nn.softmax(logits_test)  
      
    # If prediction mode, early return  
    if mode == tf.estimator.ModeKeys.PREDICT:  
        return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)   
          
    # Define loss and optimizer  
    loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(  
        logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))  
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)  
    train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step())  
      
    # Evaluate the accuracy of the model  
    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)  
      
    # TF Estimators requires to return a EstimatorSpec, that specify  
    # the different ops for training, evaluating, ...  
    estim_specs = tf.estimator.EstimatorSpec(  
      mode=mode,  
      predictions=pred_classes,  
      loss=loss_op,  
      train_op=train_op,  
      eval_metric_ops={'accuracy': acc_op})  
    return estim_specs  
# Build the Estimator  
model = tf.estimator.Estimator(model_fn=model_fn, model_dir='D:/kaggle/mnist2/model1/')  

#data import
data_train = pd.read_csv('D:/kaggle/mnist/train.csv')
data_np = data_train.as_matrix()
data_y = data_train.iloc[0:40000,0].values
data_x = data_np[0:42000,1:]#pixel
data_x = np.multiply(data_x, 1.0/255) #0-255-》0-1
data_x = data_x.astype(np.float32)
#测试集
test_x = data_np[40000:42000, 1:]
test_x = np.multiply(test_x, 1.0/255)
test_x = test_x.astype(np.float32)
test_y = data_train.iloc[40000:42000,0].values

# Define the input function for training  
input_fn = tf.estimator.inputs.numpy_input_fn(  
    x={'images': data_x}, y=data_y,  
    batch_size=batch_size, num_epochs=20, shuffle=True)  
# Train the Model  
model.train(input_fn, steps=num_steps)  
# Evaluate the Model  
# Define the input function for evaluating  
input_fn = tf.estimator.inputs.numpy_input_fn(  
    x={'images': test_x}, y=test_y,  
    batch_size=batch_size, shuffle=False)  
# Use the Estimator 'evaluate' method  
model.evaluate(input_fn)  
