# cnn - MNIST
#
import os
import random
import torch
import torch.nn as nn
import torch.utils.data as Data     # 小批量
import torchvision                  # 提供图像数据集
import matplotlib.pyplot as plt     # 绘图
from matplotlib import cm
import utils
from cnn import CNN

# 超参数
EPOCH = 1               # 轮次
BATCH_SIZE = 50         # 批尺寸
LR = 0.001              # 学习率

MNIST_ROOT_DIR = 'e:/sfxData/DeepLearning'


def main():
    train_data = utils.get_mnist(MNIST_ROOT_DIR, train=True)
    # utils.disp_mnist(train_data)
    # utils.mnist_image(train_data, idx=random.randint(0, 60000))

    # 得到小批量数据 (shuffle - 打乱顺序)
    #
    train_loader = Data.DataLoader(
        dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)

    # 选择部分数据训练
    #
    test_data = utils.get_mnist(MNIST_ROOT_DIR, train=False)
    # shape from (1000, 28, 28) to (1000, 1, 28, 28), value in range(0,1)
    test_x = torch.unsqueeze(test_data.data, dim=1).type(
        torch.FloatTensor)[:1000]/255.0
    test_y = test_data.targets[:1000]  # 前1000个, real number

    # 搭建网络
    #
    cnn = CNN()
    # print(cnn)

    # 定义优化器和损失函数
    #
    optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)
    loss_fun = nn.CrossEntropyLoss()

    # 可视化
    #
    try:
        from sklearn.manifold import TSNE
        # HAS_SK = True
        HAS_SK = False
    except:
        HAS_SK = False
        print('Please install sklearn for layer visualization')

    # 打开图形交互 (interactive on)
    plt.ion()  # Enable interactive mode (interactive on)

    # 训练轮次
    for epoch in range(EPOCH):
        for step, (b_x, b_y) in enumerate(train_loader):
            # step = [0, BATCH_SIZE)
            output = cnn(b_x)[0]
            loss = loss_fun(output, b_y)
            optimizer.zero_grad()   # 清除旧梯度
            loss.backward()         # BP
            optimizer.step()
            if step % 50 == 0:  # BATCH_SIZE
                test_output, last_layer = cnn(test_x)
                pred_y = torch.max(test_output, 1)[1].data.numpy()
                accuracy = float((pred_y == test_y.data.numpy()).astype(
                    int).sum()) / float(test_y.size(0))
                print('Epoch: ', epoch, '| train loss: %.4f' %
                      loss.data.numpy(), '| test accuracy: %.2f' % accuracy)

                if HAS_SK:
                    # Visualization of trained flatten layer (T-SNE)
                    tsne = TSNE(perplexity=30, n_components=2,
                                init='pca', n_iter=5000)
                    plot_only = 500
                    low_dim_embs = tsne.fit_transform(
                        last_layer.data.numpy()[:plot_only, :])
                    labels = test_y.numpy()[:plot_only]
                    utils.plot_with_labels(low_dim_embs, labels)
                # if HAS_SK:
            # if step % 50 == 0:

            # 关闭图形交互 (interactive off)
            plt.ioff() 

            # print 10 predictions from test data
            test_output, _ = cnn(test_x[:10])
            pred_y = torch.max(test_output, 1)[1].data.numpy()
            print(pred_y, 'prediction number')
            print(test_y[:10].numpy(), 'real number')

        # for step, (b_x, b_y) in enumerate(train_loader):
    # for epoch in range(EPOCH):

    pass


if __name__ == '__main__':
    # main()
    pass
