import numpy as np
import torch as pt
from torch.utils.data import DataLoader, TensorDataset
import torchvision as ptv
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import sys
import os
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, Dataset, TensorDataset
import datetime as dt

np.random.seed(777)
pt.manual_seed(777)
device = pt.device("cuda:0" if pt.cuda.is_available() else "cpu")
print(device)

VER = 'v6.10'
ALPHA = 0.001
N_EPOCHS = 1
BATCH_SIZE = 64
VAL_RATE = 0.03
FILE_NAME = os.path.basename(__file__)
LOG_PATH = os.path.join('_log', FILE_NAME, VER)
SAVE_PATH = os.path.join('_save', FILE_NAME, VER, 'model_stat.tmp.dat')

# prepare data
ROOT_DIR = '../../../../../large_data/DL2/pt/mnist'
if not os.path.exists(ROOT_DIR):
    print('Data directory wrong!')
    sys.exit(1)

print('Loading ...')
mnist_train = ptv.datasets.MNIST(ROOT_DIR,
                                 train=True,
                                 transform=ptv.transforms.ToTensor(),
                                 download=True)
mnist_test = ptv.datasets.MNIST(ROOT_DIR,
                                train=False,
                                transform=ptv.transforms.ToTensor(),
                                download=True)
M_TRAIN = len(mnist_train.targets)
print('x_train len', M_TRAIN)
N_CLS = len(pt.unique(mnist_train.targets))
print('N_CLS', N_CLS)

ds = pt.utils.data.DataLoader(dataset=mnist_train,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              drop_last=True,
                              pin_memory=True)
N_BATCHES = int(np.floor(M_TRAIN / BATCH_SIZE))  # Discard reminder.

x_test = mnist_test.data.float() / 255.
y_test = mnist_test.targets
M_TEST, PIC_H, PIC_W = x_test.shape
print('x_test.shape', M_TEST, PIC_H, PIC_W)
a = pt.randperm(M_TEST)  # ATTENTION pt.randperm
x_test = x_test[a]
y_test = y_test[a]
M_VAL = int(np.ceil(M_TEST * VAL_RATE))
M_TEST -= M_VAL
x_test, x_val = pt.split(x_test, (M_TEST, M_VAL))
y_test, y_val = pt.split(y_test, (M_TEST, M_VAL))
x_test = x_test.view(-1, 1, PIC_H, PIC_W).float()
x_val = x_val.view(-1, 1, PIC_H, PIC_W).float().to(device)
y_val = y_val.to(device)


# class TestDataset(Dataset):
#
#     def __getitem__(self, item):
#         return x_test[item], y_test[item]
#
#     def __len__(self):
#         return M_TEST


ds_test = DataLoader(TensorDataset(x_test, y_test),
                     batch_size=BATCH_SIZE,
                     pin_memory=True)

# model
def my_conv(in_side, in_ch, out_ch, kernel, stride, padding='same'):
    if 'same' == padding:
        ps = kernel - 1

        # p1 = np.floor(ps / 2)
        # p2 = ps - p1
        # padding = (p1, p2)

        padding = ps // 2
    else:
        padding = 0
    print(padding)  # tmp
    return pt.nn.Conv2d(in_ch, out_ch, kernel_size=kernel, stride=stride, padding=padding)


class MyResnetBlock(pt.nn.Module):

    def __init__(self, residual, in_side, in_ch, out_ch, kernel=3, stride=1, **kwargs):
        super().__init__(**kwargs)
        self.residual = residual
        self.in_side = in_side
        self.in_ch = in_ch
        self.out_ch = out_ch
        self.kernel = kernel
        self.stride = stride

        self.conv1 = my_conv(in_side, in_ch, out_ch, kernel, stride)
        self.bn1 = pt.nn.BatchNorm2d(out_ch)
        self.relu1 = pt.nn.ReLU()

        self.conv2 = my_conv(np.ceil(in_side / stride), out_ch, out_ch, kernel, 1)
        self.bn2 = pt.nn.BatchNorm2d(out_ch)
        self.relu2 = pt.nn.ReLU()

        if residual:
            self.conv_down = my_conv(in_side, in_ch, out_ch, kernel, stride)

    def forward(self, input):
        x = input
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu1(x)

        x = self.conv2(x)
        x = self.bn2(x)

        if self.residual:
            res = self.conv_down(input)
        else:
            res = input
        x += res

        x = self.relu2(x)
        return x


class MyResnetByPt(pt.nn.Module):

    def __init__(self, blocks_spec_list, in_side, init_in_ch, init_out_ch, **kwargs):
        super().__init__(**kwargs)

        self.conv1 = my_conv(in_side, init_in_ch, init_out_ch, 3, 1)
        in_ch = out_ch = init_out_ch

        blocks = []
        for block_id, n_blocks in enumerate(blocks_spec_list):
            for layer_id in range(n_blocks):
                if layer_id == 0:
                    if block_id != 0:
                        out_ch *= 2
                    block = MyResnetBlock(True, in_side, in_ch, out_ch, 3, 2)
                    in_ch = out_ch
                    in_side = int(np.ceil(in_side / 2))
                else:
                    block = MyResnetBlock(False, in_side, in_ch, out_ch, 3, 1)
                blocks.append(block)
        self.blocks = pt.nn.Sequential(*blocks)
        self.final_ch = out_ch
        self.avg_pool = pt.nn.AvgPool2d(kernel_size=(in_side, in_side),
                                        stride=(1, 1),
                                        padding=(0, 0))
        self.fc = pt.nn.Linear(out_ch, N_CLS)

    def forward(self, input):
        x = input

        x = self.conv1(x)

        # for block in self.blocks:
        #     x = block(x)
        x = self.blocks(x)

        x = self.avg_pool(x)
        x = x.view(-1, self.final_ch)
        x = self.fc(x)
        return x


model = MyResnetByPt([2, 2, 2, 2], PIC_H, 1, 16)
model = model.float().to(device)

# from torch.utils.tensorboard import SummaryWriter
# writer = SummaryWriter(LOG_PATH)  #创建summary写对象
# writer.add_graph(model, input_to_model=x_val)#add_graph添加网络模型图;input_to_model模型数据数据
# writer.close()  #关闭写对象
# print('Graph generated.')
# # sys.exit(0)

criterion = pt.nn.CrossEntropyLoss().to(device)
optim = pt.optim.Adam(model.parameters(), lr=ALPHA)


def acc(ht, yt):
    return (pt.argmax(ht, 1) == yt.long()).float().mean()


if os.path.exists(SAVE_PATH):
    stat = pt.load(SAVE_PATH)
    model.load_state_dict(stat)
    print('Loaded.')
else:
    print('Training start')
    dt1 = dt.datetime.now()
    GROUP = int(np.ceil(N_BATCHES / 10))
    g_step = -1
    ALL_BATCHES = N_BATCHES * N_EPOCHS
    cost_his = []
    acc_his = []
    val_cost_his = []
    val_acc_his = []
    for epoch in range(N_EPOCHS):
        avg_cost = 0
        avg_acc = 0
        for i, (bx, by) in enumerate(ds):
            bx = bx.to(device)
            by = by.to(device)
            g_step += 1
            model.train(True)  # ATTENTION
            optim.zero_grad()
            bx = bx.view(-1, 1, PIC_H, PIC_W).float()
            ht = model(bx)
            cost = criterion(ht, by)
            cost.backward()
            optim.step()
            model.train(False)  # ATTENTION
            accv = acc(ht, by)
            cost = cost.cpu().item()
            accv = accv.cpu().item()
            avg_cost += cost
            avg_acc += accv
            # ht_val = model(x_val)
            # val_cost = criterion(ht_val, y_val)
            # val_acc = acc(ht_val, y_val)
            # val_cost = val_cost.cpu().item()
            # val_acc = val_acc.cpu().item()
            cost_his.append(cost)
            acc_his.append(accv)
            # val_cost_his.append(val_cost)
            # val_acc_his.append(val_acc)

            pt.cuda.empty_cache()  # ATTENTION For memory

            if i % GROUP == 0:
                # print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}, val_cost: {val_cost}, val_acc: {val_acc}')
                print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}')
        if i % GROUP != 0:
            # print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}, val_cost: {val_cost}, val_acc: {val_acc}')
            print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}')
        avg_cost /= i + 1
        avg_acc /= i + 1
        model.eval()  # ATTENTION
        ht_val = model(x_val)
        val_cost = criterion(ht_val, y_val)
        val_acc = acc(ht_val, y_val)
        val_cost = val_cost.cpu().item()
        val_acc = val_acc.cpu().item()
        val_cost_his.append(val_cost)
        val_acc_his.append(val_acc)
        print(f'epoch#{epoch + 1}: avg cost: {avg_cost}, avg acc: {avg_acc}, val_cost: {val_cost}, val_acc: {val_acc}')
        if val_acc > 0.95:
            break
    print('Training over')
    dt2 = dt.datetime.now()
    diff = dt2 - dt1
    print(f'Training used time: {diff}')

    spr = 2
    spc = 2
    spn = 0
    plt.figure(figsize=(6, 6))

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(cost_his, label='Training cost')
    # plt.plot(val_cost_his, label='Validation cost')
    plt.legend()

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(acc_his, label='Training accuracy')
    # plt.plot(val_acc_his, label='Validation accuracy')
    plt.legend()

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(val_cost_his, label='Validation cost')
    plt.legend()

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(val_acc_his, label='Validation accuracy')
    plt.legend()

    # save
    os.makedirs(os.path.split(SAVE_PATH)[0], exist_ok=True)
    pt.save(model.state_dict(), SAVE_PATH)
    print('Saved')

print('Testing start')
dt1 = dt.datetime.now()
model.eval()  # ATTENTION
# ht_test = model(x_test)
# test_cost = criterion(ht_test, y_test)
# test_acc = acc(ht_test, y_test)
test_cost_avg = 0.
test_acc_avg = 0.
GROUP = int(np.ceil(M_TEST / BATCH_SIZE / 10))
for i, (bx, by) in enumerate(ds_test):
    bx = bx.to(device)
    by = by.to(device)
    ht = model(bx)
    test_cost_avg += criterion(ht, by).detach().cpu().item()
    test_acc_avg += acc(ht, by).detach().cpu().item()

    pt.cuda.empty_cache()

    if i % GROUP == 0:
        print(f'Testing # {i + 1}')
if i % GROUP != 0:
    print(f'Testing # {i + 1}')
test_cost_avg /= i + 1
test_acc_avg /= i + 1
print(f'Testing: cost = {test_cost_avg}, acc = {test_acc_avg}')
print('Testing over.')
dt2 = dt.datetime.now()
diff = dt2 - dt1
print(f'Training used time: {diff}')

pt.cuda.empty_cache()
print('Cache cleared')
plt.show()
print('Over')

