import tensorflow as tf
from tensorflow import keras as ks
from tensorflow.keras import Sequential, layers, utils, losses, optimizers
import numpy as np
import matplotlib.pyplot as plt

def get_datasets():
    (x_train, y_train), (x_test, y_test) = ks.datasets.mnist.load_data('./mnist.npz')
    # x_train = x_train[:6000]
    # y_train = y_train[:6000]
    x_train = x_train/255
    y_train = utils.to_categorical(y_train)
    x_test = x_test/255
    # y_test = utils.to_categorical(y_test)
    train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
    train_dataset = train_dataset.shuffle(6000).batch(1000)

    test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
    test_dataset = test_dataset.batch(1000)

    return train_dataset, test_dataset
    pass

# def load_data():
#     (x_train, y_train), (x_test, y_test) = mnist.load_data('./mnist.npz')
#     number = 10000
#     x_train = x_train[0:number]
#     y_train = y_train[0:number]
#     x_train = x_train.reshape(number, 28*28)
#     x_test = x_test.reshape(x_test.shape[0], 28*28)
#     x_train = x_train.astype('float32')
#     x_test = x_test.astype('float32')
#     y_train = np_utils.to_categorical(y_train, 10)
#     # y = tf.one_hot(y, depth=10)
#     y_test = np_utils.to_categorical(y_test, 10)
#
#     x_train = x_train / 255
#     x_test = x_test / 255
#     return (x_train, y_train), (x_test, y_test)

def get_model():
    net_model = Sequential([
        layers.Conv2D(6, 3, (1, 1)),
        layers.MaxPooling2D((2, 2), 2),
        layers.ReLU(),
        layers.Conv2D(16, kernel_size=3, strides=1),  # 第二个卷积层, 16 个 3x3 卷积核
        layers.MaxPooling2D(pool_size=2, strides=2),  # 高宽各减半的池化层
        layers.ReLU(),  # 激活函数
        layers.Conv2D(120, kernel_size=5, strides=1),
        layers.ReLU(),
        layers.Flatten(),
        layers.Dense(84, activation='relu'),  # 全连接层，84 节点
        layers.Dense(10),  # 全连接层，10 个节点
        layers.ReLU(),
        # layers.conv
    ])
    net_model.build(input_shape=(1000, 28, 28, 1))
    net_model.summary()
    return net_model


def train_model(epoch = 3):
    criteon = losses.CategoricalCrossentropy(from_logits=True)
    train_dataset, test_dataset = get_datasets()
    net_model = get_model()
    ls_fn = losses.CategoricalCrossentropy(from_logits=False)
    optimizer = optimizers.SGD(learning_rate=1e-3)
    loss_set = []
    for e in range(epoch):
        for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
            with tf.GradientTape() as tape:
                x_batch_train = tf.expand_dims(x_batch_train, axis=3)
                out = net_model(x_batch_train)
                loss = ls_fn(y_batch_train, out)
                loss_set.append(float(loss))
            grade = tape.gradient(loss, net_model.trainable_variables)
            optimizer.apply_gradients(zip(grade, net_model.trainable_variables))
            print('{}.{}'.format(e, step))
        print('epoch over:', e)
    draw(loss_set)
    val_acc_metric = ks.metrics.SparseCategoricalAccuracy()
    for step, (x_test, y_test) in enumerate(test_dataset):
        x_test = tf.expand_dims(x_test, axis=3)
        out = net_model(x_test)
        # out = tf.squeeze(out, axis=(1, 2))
        out = tf.argmax(out, axis=1)
        y_test = tf.expand_dims(y_test, axis= 1)
        out = tf.expand_dims(out, axis= 1)
        val_acc_metric(out, y_test)
    val_acc = val_acc_metric.result()
    val_acc_metric.reset_states()
    print('result: {}'.format(val_acc))
    pass

def draw( y):
    # plt.subplot(7, 1, i + 1)
    plt.scatter(range(len(y)), y)
    # plt.title(datas.feature_names[i])
    plt.show()

if __name__ == '__main__':
    # asd = layers.Conv2D(1, kernel_size=2, strides=1)
    train_model()


    pass