import numpy as np
import tensorflow as tf

import pandas as pd  # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import warnings

import os
import gzip
import lzma
import codecs

from tensorflow.keras.datasets import mnist

warnings.filterwarnings("ignore")
plt.rcParams['figure.figsize'] = [20, 20]


def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    return dict


def plot_images(images, labels, shape=(3, 3)):
    fig, p = plt.subplots(shape[0], shape[1])
    i = 0
    for x in p:
        for ax in x:
            ax.imshow(images[i])
            ax.set_title(labels[i])
            i += 1


def plot_images_no_title(images, shape=(3, 3)):
    fig, p = plt.subplots(shape[0], shape[1])
    i = 0
    for x in p:
        for ax in x:
            ax.imshow(images[i])
            i += 1


def get_int(b):
    return int(codecs.encode(b, 'hex'), 16)


def open_maybe_compressed_file(path):
    if path.endswith('.gz'):
        return gzip.open(path, 'rb')
    elif path.endswith('.xz'):
        return lzma.open(path, 'rb')
    else:
        return open(path, 'rb')


def read_idx3_ubyte(path):
    with open_maybe_compressed_file(path) as f:
        data = f.read()
        assert get_int(data[:4]) == 8 * 256 + 3
        length = get_int(data[4:8])
        num_rows = get_int(data[8:12])
        num_cols = get_int(data[12:16])
        parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
        return tf.convert_to_tensor(parsed)


def read_idx2_int(path):
    with open_maybe_compressed_file(path) as f:
        data = f.read()
        assert get_int(data[:4]) == 12 * 256 + 2
        length = get_int(data[4:8])
        width = get_int(data[8:12])
        parsed = np.frombuffer(data, dtype=np.dtype('>i4'), offset=12)
        return tf.convert_to_tensor(parsed.astype('i4'))


# Load MNIST data
(X_train_mnist, y_train_mnist), (X_test_mnist, y_test_mnist) = mnist.load_data()

#X_mnist = np.concatenate((X_train_mnist, X_test_mnist))
#y_mnist = np.concatenate((y_train_mnist, y_test_mnist))

# Preprocess MNIST to match our preprocessing
X_mnist = X_train_mnist.reshape(-1, 28, 28, 1)
X_mnist = X_mnist.astype(np.float32) / 255
y_mnist = y_train_mnist

# 测试数据预处理
X_test_mnist = X_test_mnist.reshape(-1, 28, 28, 1)
X_test_mnist = X_test_mnist.astype(np.float32) / 255
#y_test_mnist = tf.keras.utils.to_categorical(y_test_mnist, num_classes=10)
# final dataset shape
print("MNIST image dataset shape:", X_mnist.shape)

# plot_images(X_mnist[:9], y_mnist[:9], shape=(3, 3))
# lt.show()


# Read qmnist data
qmnist_data = "d:/qmnist-main/qmnist-train-images-idx3-ubyte.gz"
qminst_label = "d:/qmnist-main/qmnist-train-labels-idx2-int.gz"

qmnist = read_idx3_ubyte(qmnist_data)
y_qmnist = read_idx2_int(qminst_label)

# we reshape and normalize the data
X_qmnist = np.array(qmnist, dtype="float32") / 255
X_qmnist = X_qmnist.reshape(-1, 28, 28, 1)

# 先将EagerTensor转换为标准张量，转换为二维，并取第一列转换为one hot张量
y_qmnist = np.array(y_qmnist)
y_qmnist = y_qmnist.reshape(-1, 8)
y_qmnist = y_qmnist[:, 0]

print("QMNIST image dataset shape:", X_qmnist.shape)

# plot_images(X_qmnist[6000:6009], y_qmnist[6000:6009], shape=(3, 3))


# Read qmnist_test data
qmnist_test_data = "d:/qmnist-main/qmnist-test-images-idx3-ubyte.gz"
qminst_test_label = "d:/qmnist-main/qmnist-test-labels-idx2-int.gz"

qmnist_test = read_idx3_ubyte(qmnist_test_data)
y_qmnist_test = read_idx2_int(qminst_test_label)

# we reshape and normalize the data
X_qmnist_test = np.array(qmnist_test, dtype="float32") / 255
X_qmnist_test = X_qmnist_test.reshape(-1, 28, 28, 1)

# 先将EagerTensor转换为标准张量，转换为二维，并取第一列转换为one hot张量
y_qmnist_test = np.array(y_qmnist_test)
y_qmnist_test = y_qmnist_test.reshape(-1, 8)
y_qmnist_test = y_qmnist_test[:, 0]

print("QMNIST test image dataset shape:", X_qmnist_test.shape)
# plot_images(X_qmnist_test[:9], y_qmnist_test[:9], shape=(3, 3))
# plt.show()

# Combine MNIST and QMNIST
x_train = np.concatenate((X_mnist, X_qmnist, X_qmnist_test))
y_train = np.concatenate((y_mnist, y_qmnist, y_qmnist_test))

print("Train image dataset shape:", x_train.shape)


datagen = tf.keras.preprocessing.image.ImageDataGenerator(
    rotation_range=20,
    width_shift_range=0.20,
    shear_range=15,
    zoom_range=0.10,
    validation_split=0.1,
    horizontal_flip=False
)

train_generator = datagen.flow(
    x_train,
    y_train,
    batch_size=256,
    subset='training',
)

validation_generator = datagen.flow(
    x_train,
    y_train,
    batch_size=64,
    subset='validation',
)


def create_model():
    model = tf.keras.Sequential([
        tf.keras.layers.Reshape((28, 28, 1)),
        tf.keras.layers.Conv2D(filters=32, kernel_size=(5, 5), activation="relu", padding="same",
                               input_shape=(28, 28, 1)),
        tf.keras.layers.MaxPool2D((2, 2)),

        tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu", padding="same"),
        tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu", padding="same"),
        tf.keras.layers.MaxPool2D((2, 2)),

        tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation="relu", padding="same"),
        tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation="relu", padding="same"),
        tf.keras.layers.MaxPool2D((2, 2)),

        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(512, activation="sigmoid"),
        tf.keras.layers.Dropout(0.25),

        tf.keras.layers.Dense(512, activation="sigmoid"),
        tf.keras.layers.Dropout(0.25),

        tf.keras.layers.Dense(256, activation="sigmoid"),
        tf.keras.layers.Dropout(0.1),

        tf.keras.layers.Dense(10, activation="sigmoid")
    ])

    model.compile(
        optimizer="adam",
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy']
    )

    return model


model = create_model()

reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=0.1,
                                                 patience=5,
                                                 min_lr=0.000001,
                                                 verbose=1)

checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath='model_qmnist.hdf5',
                                                monitor='val_loss',
                                                save_best_only=True,
                                                save_weights_only=True,
                                                verbose=1)

history = model.fit(train_generator,
                              epochs=100,
                              validation_data=validation_generator,
                              callbacks=[reduce_lr, checkpoint],
                              verbose=1)
model.summary()

# step5 模型测试
loss, acc = model.evaluate(X_test_mnist, y_test_mnist)
print("train model, accuracy:{:5.2f}%".format(100 * acc))