import numpy as np
import matplotlib.pyplot as plt
import struct
import os
import LeNet5         # 引入自己构建的LeNet5神经网络模型;
import normalization  # 引入标准化模块;


def load_mnist(file_dir, is_images='True'):
    # Read binary data
    bin_file = open(file_dir, 'rb')
    bin_data = bin_file.read()
    bin_file.close()
    # Analysis file header
    if is_images:
        # Read images
        fmt_header = '>iiii'
        magic, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, 0)
    else:
        # Read labels
        fmt_header = '>ii'
        magic, num_images = struct.unpack_from(fmt_header, bin_data, 0)
        num_rows, num_cols = 1, 1
    data_size = num_images * num_rows * num_cols
    mat_data = struct.unpack_from('>' + str(data_size) + 'B', bin_data, struct.calcsize(fmt_header))
    mat_data = np.reshape(mat_data, [num_images, num_rows * num_cols])
    print('Load images from %s, number: %d, data shape: %s' % (file_dir, num_images, str(mat_data.shape)))
    return mat_data


def load_data(mnist_dir, train_data_dir, train_label_dir, test_data_dir, test_label_dir):
    print('Loading MNIST data from files...')
    train_images = load_mnist(os.path.join(mnist_dir, train_data_dir), True)
    train_labels = load_mnist(os.path.join(mnist_dir, train_label_dir), False)
    test_images = load_mnist(os.path.join(mnist_dir, test_data_dir), True)
    test_labels = load_mnist(os.path.join(mnist_dir, test_label_dir), False)
    return train_images, train_labels, test_images, test_labels


if __name__ == '__main__':
    # initialize the parameters needed
    mnist_dir = "mnist_data/"
    train_data_dir = "train-images.idx3-ubyte"
    train_label_dir = "train-labels.idx1-ubyte"
    test_data_dir = "t10k-images.idx3-ubyte"
    test_label_dir = "t10k-labels.idx1-ubyte"

    # get the data
    train_images, train_labels, test_images, test_labels = load_data(mnist_dir, train_data_dir, train_label_dir,
                                                                     test_data_dir, test_label_dir)
    print("Got data. ")

    # 超参数设置:
    batch_size = 64       # 训练时的batch size
    test_batch = 50       # 测试时的batch size
    epoch = 50            # 迭代轮数
    learning_rate = 1e-3  # 学习率

    # Records for plot:
    train_times = []      # 训练的次数
    train_loss = []       # 训练过程中的损失值
    train_acc = []        # 训练过程中的精确度
    test_times = []       # 测试的次数
    test_loss = []        # 测试过程中的损失值
    test_acc = []         # 测试过程中的精确度
    iteration_times = 0  # 记录训练的迭代次数

    myLeNet5 = LeNet5.LeNet5()   # 构建一个LeNet5神经网络

    print("Training begin... ")
    # 开始epoch轮的训练:
    for E in range(epoch):
        # 数据统计:
        batch_loss = 0
        batch_acc = 0
        epoch_loss = 0
        epoch_acc = 0

        # 分批训练:
        for i in range(train_images.shape[0] // batch_size):
            img = train_images[i * batch_size:(i + 1) * batch_size].reshape(batch_size, 1, 28, 28) # 调整图片大小;
            img = normalization.normalization(img)  # 标准化;
            label = train_labels[i * batch_size:(i + 1) * batch_size]  # 图片标签;
            loss, prediction = myLeNet5.forward(img, label, is_train = True)  # 前向传播，计算出损失值和精确度;

            # 更新训练数据:
            epoch_loss += loss
            batch_loss += loss

            # 预测结果统计:
            for j in range(prediction.shape[0]):
                if np.argmax(prediction[j]) == label[j]:
                    epoch_acc += 1
                    batch_acc += 1

            # 反向传播更新参数:
            myLeNet5.backward(learning_rate)

            # 打印训练信息:
            if (i + 1) % 50 == 0:
                print("epoch: %2d , batch: %3d , avg_batch_acc: %.4f , avg_batch_loss: %.4f , learning_rate: %f "
                      % (E + 1, i + 1, batch_acc / (batch_size * 50), batch_loss / (batch_size * 50), learning_rate))

                iteration_times += 1
                train_times.append(iteration_times)
                train_loss.append(batch_loss / (batch_size * 50))
                train_acc.append(batch_acc / (batch_size * 50))

                batch_loss = 0
                batch_acc = 0

        # 每轮迭代后都用测试集测一遍精度:
        epoch_test_acc = 0
        for j in range(test_images.shape[0] // test_batch):
            img = test_images[j * test_batch:(j + 1) * test_batch].reshape(test_batch, 1, 28, 28)
            img = normalization.normalization(img)
            label = test_labels[j * test_batch:(j + 1) * test_batch]
            _, prediction = myLeNet5.forward(img, label, is_train=False)  # 测试

            for k in range(prediction.shape[0]):
                if np.argmax(prediction[k]) == label[k]:
                    epoch_test_acc += 1
        # 记录测试的结果:
        test_times.append(E)
        test_acc.append(epoch_test_acc / test_images.shape[0])
        print("Test_set_accuracy: %.4f" % (epoch_test_acc / test_images.shape[0]))

    print("Training finished. ")
    # 绘制loss和acc变化曲线:
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False    # 防止中文乱码

    plt.figure(1)
    plt.plot(train_times, train_loss, color = 'orange', marker = ' ', linestyle = 'dashed', linewidth = 1, markersize = 4)
    plt.xlabel('迭代次数', fontsize = 10)
    plt.ylabel('损失（loss）', fontsize = 10)
    plt.title('训练损失（loss）')
    plt.show()

    plt.figure(2)
    plt.plot(train_times, train_acc, color='orange', marker=' ', linestyle='dashed', linewidth=1, markersize=4)
    plt.xlabel('迭代次数', fontsize = 10)
    plt.ylabel('准确率（acc）', fontsize = 10)
    plt.title('训练准确率（acc）')
    plt.show()

    # print(len(test_times))
    # print(len(test_acc))
    plt.figure(3)
    plt.plot(test_times, test_acc, color='orange', marker=' ', linestyle='dashed', linewidth=1, markersize=4)
    plt.xlabel('迭代次数', fontsize = 10)
    plt.ylabel('准确率（acc）', fontsize = 10)
    plt.title('测试集准确率（acc）')
    plt.show()
