import tensorflow as tf
from tensorflow.keras import layers, optimizers, datasets, Sequential
import tensorflow.keras as keras
import numpy as np
import GoogLeNet.InceptionV4.inception_V4 as inception

#physical_devices = tf.config.experimental.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(physical_devices[0], True)

tf.random.set_seed(22)

batchsize = 5000


def preprocess(x, y):  # 数据预处理
    x = tf.cast(x, dtype=tf.float32)/ 255. - 0.5
    y = tf.cast(y, dtype=tf.int32)
    return x,y


# (x_train, y_train), (x_test, y_test) = datasets.fashion_mnist.load_data()
# print(x_train.shape, y_train.shape)
#mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()


# [b, 28, 28] => [b, 28, 28, 1]
x_train, x_test = np.expand_dims(x_train, axis=3), np.expand_dims(x_test, axis=3)

# 训练集预处理
db_train = tf.data.Dataset.from_tensor_slices((x_train,y_train))   # 构造数据集,这里可以自动的转换为tensor类型了
db_train = db_train.map(preprocess).shuffle(10*batchsize).batch(batchsize)

# 测试集预处理
db_test = tf.data.Dataset.from_tensor_slices((x_test,y_test))  # 构造数据集
db_test = db_test.map(preprocess).shuffle(10*batchsize).batch(batchsize)

db_iter = iter(db_train)
sample = next(db_iter)
print("batch: ", sample[0].shape, sample[1].shape)

# 调用Inception
model = inception.Inception(2, 10) 								  # 第一参数为残差块数，第二个参数为类别数；
# derive input shape for every layers.
model.build(input_shape=(None, 28, 28, 1))
model.summary()

optimizer = optimizers.SGD(learning_rate=1e-2)
criteon = keras.losses.CategoricalCrossentropy(from_logits=True)  # 分类器

acc_meter = keras.metrics.Accuracy()

for epoch in range(200):

    for step, (x, y) in enumerate(db_train):

        with tf.GradientTape() as tape:
            # print(x.shape, y.shape)
            # [b, 10]
            logits = model(x)
            # [b] vs [b, 10]
            loss = criteon(tf.one_hot(y, depth=10), logits)

        grads = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

        if step % 20 == 0:
            print(epoch, step, 'loss:', loss.numpy())

    # 测试集测试
    acc_meter.reset_states()
    for x, y in db_test:
        # [b, 10]
        logits = model(x, training=False)
        # [b, 10] => [b]
        pred = tf.argmax(logits, axis=1)
        # [b] vs [b, 10]
        acc_meter.update_state(y, pred)

    print(epoch, 'evaluation acc:', acc_meter.result().numpy())
