import numpy as np
import torch as pt
from torch.utils.data import DataLoader
from torch.nn import functional
import torchvision as ptv
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import sys
import os
from sklearn.preprocessing import StandardScaler

np.random.seed(777)
pt.manual_seed(777)

VER = 'v2.0'
ALPHA = 0.001
N_EPOCHS = 2
BATCH_SIZE = 64

# prepare data
ROOT_DIR = '../../../../../large_data/DL2/pt/mnist'
if not os.path.exists(ROOT_DIR):
    print('Data directory wrong!')
    sys.exit(1)

mnist_train = ptv.datasets.MNIST(root=ROOT_DIR,
                                 train=True,
                                 download=False,
                                 transform=ptv.transforms.ToTensor())
mnist_test = ptv.datasets.MNIST(root=ROOT_DIR,
                                 train=False,
                                 download=False,
                                 transform=ptv.transforms.ToTensor())
# print(mnist_train.train_data[0])
# sys.exit(0)
x_train = mnist_train.train_data.double()
x_train /= 255.0
y_train = mnist_train.train_labels
x_test = mnist_test.test_data.double()
x_test /= 255.0
y_test = mnist_test.test_labels

M_TRAIN, PIC_H, PIC_W = x_train.shape
M_TEST = len(x_test)
M_VAL = int(np.ceil(0.1 * M_TEST))
M_TEST -= M_VAL

N_BATCHES = int(np.floor(M_TRAIN / BATCH_SIZE))  # Discard reminder.


def ds():
    for i in range(N_BATCHES):
        bx = x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
        by = y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
        yield bx, by


x_val, x_test = pt.split(x_test, [M_VAL, M_TEST])
y_val, y_test = pt.split(y_test, [M_VAL, M_TEST])
x_val = x_val.view(-1, PIC_H * PIC_W)
x_test = x_test.view(-1, PIC_H * PIC_W)

# model
L1 = 256
L2 = 512
N_CLS = len(pt.unique(mnist_train.train_labels))
model = pt.nn.Sequential(pt.nn.Linear(PIC_H * PIC_W, L1, bias=True),
                         pt.nn.ReLU(),
                         pt.nn.Linear(L1, L2, bias=True),
                         pt.nn.ReLU(),
                         pt.nn.Linear(L2, N_CLS, bias=True),
                         )
model = model.double()
# criterion = pt.nn.CrossEntropyLoss()


def criterion(ht, yt):
    ht = pt.nn.Softmax()(ht)
    yt = functional.one_hot(yt, N_CLS).double()  # ATTENTION pt.nn.functional.one_hot
    r = -(yt * pt.log(ht)).sum(dim=1).mean()
    return r

optim = pt.optim.Adam(model.parameters(), lr=ALPHA)


def acc(ht, yt):
    return (pt.argmax(ht, 1) == yt.long()).double().mean()


print('Training start')
GROUP = int(np.ceil(N_BATCHES / 10))
g_step = -1
ALL_BATCHES = N_BATCHES * N_EPOCHS
cost_his = np.zeros(ALL_BATCHES)
acc_his = np.zeros(ALL_BATCHES)
val_cost_his = np.zeros(ALL_BATCHES)
val_acc_his = np.zeros(ALL_BATCHES)
for epoch in range(N_EPOCHS):
    avg_cost = 0
    avg_acc = 0
    for i, (bx, by) in enumerate(ds()):
        g_step += 1
        model.train(True)
        optim.zero_grad()
        bx = bx.view(-1, PIC_H * PIC_W)
        ht = model(bx)
        cost = criterion(ht, by)
        cost.backward()
        optim.step()
        model.train(False)
        accv = acc(ht, by)
        avg_cost += cost
        avg_acc += accv
        ht_val = model(x_val)
        val_cost = criterion(ht_val, y_val)
        val_acc = acc(ht_val, y_val)
        cost_his[g_step] = cost
        acc_his[g_step] = accv
        val_cost_his[g_step] = val_cost
        val_acc_his[g_step] = val_acc
        if i % GROUP == 0:
            print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}, val_cost: {val_cost}, val_acc: {val_acc}')
    if i % GROUP != 0:
        print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}, val_cost: {val_cost}, val_acc: {val_acc}')
    avg_cost /= N_BATCHES
    avg_acc /= N_BATCHES
    model.eval()
    ht_val = model(x_val)
    val_cost = criterion(ht_val, y_val)
    val_acc = acc(ht_val, y_val)
    print(f'epoch#{epoch + 1}: avg cost: {avg_cost}, avg acc: {avg_acc}, val_cost: {val_cost}, val_acc: {val_acc}')
print('Training over')

model.eval()
ht_test = model(x_test)
test_cost = criterion(ht_test, y_test)
test_acc = acc(ht_test, y_test)
print(f'Testing: cost = {test_cost}, acc = {test_acc}')

spr = 1
spc = 2
spn = 0
plt.figure(figsize=(12, 6))

spn += 1
plt.subplot(spr, spc, spn)
plt.plot(cost_his, label='Training cost')
plt.plot(val_cost_his, label='Validation cost')
plt.legend()

spn += 1
plt.subplot(spr, spc, spn)
plt.plot(acc_his, label='Training accuracy')
plt.plot(val_acc_his, label='Validation accuracy')
plt.legend()
