import numpy as np
import torch as pt
from torch.utils.data import DataLoader, TensorDataset
import torchvision as ptv
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import sys
import os
from sklearn.preprocessing import StandardScaler

np.random.seed(777)
pt.manual_seed(777)

VER = 'v2.0'
ALPHA = 0.001
N_EPOCHS = 1
BATCH_SIZE = 64
VAL_RATE = 0.1
FILE_NAME = os.path.basename(__file__)
SAVE_PATH = os.path.join('_save', FILE_NAME, VER, 'model_stat.tmp.dat')

# prepare data
ROOT_DIR = '../../../../../large_data/DL2/pt/mnist'
if not os.path.exists(ROOT_DIR):
    print('Data directory wrong!')
    sys.exit(1)

print('Loading ...')
mnist_train = ptv.datasets.MNIST(ROOT_DIR,
                                 train=True,
                                 transform=ptv.transforms.ToTensor(),
                                 download=False)
mnist_test = ptv.datasets.MNIST(ROOT_DIR,
                                train=False,
                                transform=ptv.transforms.ToTensor(),
                                download=False)
M_TRAIN = len(mnist_train.targets)
print('x_train len', M_TRAIN)
N_CLS = len(pt.unique(mnist_train.targets))
print('N_CLS', N_CLS)

ds = pt.utils.data.DataLoader(dataset=mnist_train,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              drop_last=True)
N_BATCHES = int(np.floor(M_TRAIN / BATCH_SIZE))  # Discard reminder.

x_test = mnist_test.data.double() / 255.
y_test = mnist_test.targets
M_TEST, PIC_H, PIC_W = x_test.shape
print('x_test.shape', M_TEST, PIC_H, PIC_W)
a = pt.randperm(M_TEST)  # ATTENTION pt.randperm
x_test = x_test[a]
y_test = y_test[a]
M_VAL = int(np.ceil(M_TEST * VAL_RATE))
M_TEST -= M_VAL
x_test, x_val = pt.split(x_test, (M_TEST, M_VAL))
y_test, y_val = pt.split(y_test, (M_TEST, M_VAL))
x_test = x_test.view(-1, 1, PIC_H, PIC_W).double()
x_val = x_val.view(-1, 1, PIC_H, PIC_W).double()

# model
model = pt.nn.Sequential(
    pt.nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),
    pt.nn.BatchNorm2d(6),
    pt.nn.ReLU(),
    pt.nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
    pt.nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
    pt.nn.BatchNorm2d(16),
    pt.nn.ReLU(),
    pt.nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
    pt.nn.Flatten(),
    pt.nn.Linear(256, 120, bias=True),
    pt.nn.Sigmoid(),
    pt.nn.Linear(120, 84, bias=True),
    pt.nn.Sigmoid(),
    pt.nn.Linear(84, N_CLS, bias=True),
)

model = model.double()
criterion = pt.nn.CrossEntropyLoss()
optim = pt.optim.Adam(model.parameters(), lr=ALPHA)


def acc(ht, yt):
    return (pt.argmax(ht, 1) == yt.long()).double().mean()


if os.path.exists(SAVE_PATH):
    stat = pt.load(SAVE_PATH)
    model.load_state_dict(stat)
    print('Loaded.')
else:
    print('Training start')
    GROUP = int(np.ceil(N_BATCHES / 10))
    g_step = -1
    ALL_BATCHES = N_BATCHES * N_EPOCHS
    cost_his = np.zeros(ALL_BATCHES)
    acc_his = np.zeros(ALL_BATCHES)
    val_cost_his = np.zeros(ALL_BATCHES)
    val_acc_his = np.zeros(ALL_BATCHES)
    for epoch in range(N_EPOCHS):
        avg_cost = 0
        avg_acc = 0
        for i, (bx, by) in enumerate(ds):
            g_step += 1
            model.train(True)  # ATTENTION
            optim.zero_grad()
            bx = bx.view(-1, 1, PIC_H, PIC_W).double()
            ht = model(bx)
            cost = criterion(ht, by)
            cost.backward()
            optim.step()
            model.train(False)  # ATTENTION
            accv = acc(ht, by)
            avg_cost += cost
            avg_acc += accv
            ht_val = model(x_val)
            val_cost = criterion(ht_val, y_val)
            val_acc = acc(ht_val, y_val)
            cost_his[g_step] = cost
            acc_his[g_step] = accv
            val_cost_his[g_step] = val_cost
            val_acc_his[g_step] = val_acc
            if i % GROUP == 0:
                print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}, val_cost: {val_cost}, val_acc: {val_acc}')
        if i % GROUP != 0:
            print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}, val_cost: {val_cost}, val_acc: {val_acc}')
        avg_cost /= N_BATCHES
        avg_acc /= N_BATCHES
        model.eval()  # ATTENTION
        ht_val = model(x_val)
        val_cost = criterion(ht_val, y_val)
        val_acc = acc(ht_val, y_val)
        print(f'epoch#{epoch + 1}: avg cost: {avg_cost}, avg acc: {avg_acc}, val_cost: {val_cost}, val_acc: {val_acc}')
    print('Training over')

    spr = 1
    spc = 2
    spn = 0
    plt.figure(figsize=(12, 6))

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(cost_his, label='Training cost')
    plt.plot(val_cost_his, label='Validation cost')
    plt.legend()

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(acc_his, label='Training accuracy')
    plt.plot(val_acc_his, label='Validation accuracy')
    plt.legend()

    # save
    os.makedirs(os.path.split(SAVE_PATH)[0], exist_ok=True)
    pt.save(model.state_dict(), SAVE_PATH)
    print('Saved')

model.eval()  # ATTENTION
ht_test = model(x_test)
test_cost = criterion(ht_test, y_test)
test_acc = acc(ht_test, y_test)
print(f'Testing: cost = {test_cost}, acc = {test_acc}')
