import numpy as np
import torch as pt
from torch.utils.data import DataLoader, TensorDataset
import torchvision as ptv
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import sys
import os
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, Dataset, TensorDataset
from python_ai.common.xcommon import *

np.random.seed(777)
pt.manual_seed(777)

VER = 'v11.2'
ALPHA = 0.001
N_EPOCHS = 2
BATCH_SIZE = 64
VAL_RATE = 0.1
SAMPLE_RATE = 1.0
SAMPLE_RATE_TEST = 1.0
FILE_NAME = os.path.basename(__file__)
LOG_PATH = os.path.join('_log', FILE_NAME, VER)
SAVE_PATH = os.path.join('_save', FILE_NAME, VER, 'model_stat.tmp.dat')

# prepare data
ROOT_DIR = '../../../../../large_data/DL2/pt/mnist'
if not os.path.exists(ROOT_DIR):
    print('Data directory wrong!')
    sys.exit(1)

print('Loading ...')
mnist_train = ptv.datasets.MNIST(ROOT_DIR,
                                 train=True,
                                 transform=ptv.transforms.ToTensor(),
                                 download=False)
N_CLS = len(pt.unique(mnist_train.targets))

M = len(mnist_train.targets)
M2 = int(np.ceil(M * SAMPLE_RATE))
idx = pt.randperm(M)[:M2]
mnist_train = pt.utils.data.Subset(mnist_train, idx)

mnist_test_ori = mnist_test = ptv.datasets.MNIST(ROOT_DIR,
                                train=False,
                                transform=ptv.transforms.ToTensor(),
                                download=False)
M_TEST, PIC_H, PIC_W = mnist_test.data.shape
M = len(mnist_test.targets)
M2 = int(np.ceil(M * SAMPLE_RATE_TEST))
idx = pt.randperm(M)[:M2]
mnist_test = pt.utils.data.Subset(mnist_test, idx)

M_TRAIN = len(mnist_train)
M_TEST = len(mnist_test)
print('x_train len', M_TRAIN)
print('x_test len', M_TEST)
print('N_CLS', N_CLS)

dl = pt.utils.data.DataLoader(dataset=mnist_train,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              drop_last=True)
N_BATCHES = int(np.floor(M_TRAIN / BATCH_SIZE))  # Discard reminder.

dl_test = pt.utils.data.DataLoader(dataset=mnist_test,
                              batch_size=BATCH_SIZE,
                              shuffle=False,
                              drop_last=False)



x_test = mnist_test_ori.data.double() / 255.
y_test = mnist_test_ori.targets
print('x_test.shape', M_TEST, PIC_H, PIC_W)
a = pt.randperm(M_TEST)  # ATTENTION pt.randperm
x_test = x_test[a]
y_test = y_test[a]
M_VAL = int(np.ceil(M_TEST * VAL_RATE))
M = M_TEST - M_VAL
x_test, x_val = pt.split(x_test, (M, M_VAL))
y_test, y_val = pt.split(y_test, (M, M_VAL))
x_test = x_test.view(-1, 1, PIC_H, PIC_W).double()  # not used
x_val = x_val.view(-1, 1, PIC_H, PIC_W).double()
print('x_val', x_val.shape)

mnist_val = pt.utils.data.TensorDataset(x_val, y_val)
dl_val = pt.utils.data.DataLoader(dataset=mnist_val,
                              batch_size=BATCH_SIZE,
                              shuffle=False,
                              drop_last=False)
del x_val, y_val, x_test, y_test

# model
def my_conv(in_side, in_ch, out_ch, kernel, stride, padding='same'):
    if 'same' == padding:
        ps = kernel - 1

        # p1 = np.floor(ps / 2)
        # p2 = ps - p1
        # padding = (p1, p2)

        padding = ps // 2
    else:
        padding = 0
    print(padding)  # tmp
    return pt.nn.Conv2d(in_ch, out_ch, kernel_size=kernel, stride=stride, padding=padding)


class MyResnetBlock(pt.nn.Module):

    def __init__(self, residual, in_side, in_ch, out_ch, kernel=3, stride=1, **kwargs):
        super().__init__(**kwargs)
        self.residual = residual
        self.in_side = in_side
        self.in_ch = in_ch
        self.out_ch = out_ch
        self.kernel = kernel
        self.stride = stride

        self.conv1 = my_conv(in_side, in_ch, out_ch, kernel, stride)
        self.bn1 = pt.nn.BatchNorm2d(out_ch)
        self.relu1 = pt.nn.ReLU()

        self.conv2 = my_conv(np.ceil(in_side / stride), out_ch, out_ch, kernel, 1)
        self.bn2 = pt.nn.BatchNorm2d(out_ch)
        self.relu2 = pt.nn.ReLU()

        if residual:
            self.conv_down = my_conv(in_side, in_ch, out_ch, kernel, stride)

    def forward(self, input):
        # print('blk: input', input.shape)
        x = self.conv1(input)
        x = self.bn1(x)
        x = self.relu1(x)
        # print('blk: after conv bn relu 1', x.shape)

        x = self.conv2(x)
        x = self.bn2(x)
        # print('blk: after conv2 bn2', x.shape)

        if self.residual:
            res = self.conv_down(input)
        else:
            res = input
        x.add_(res)
        # print('blk: after add res', x.shape)

        x = self.relu2(x)
        # print('blk: after relu2', x.shape)
        return x


class MyResnetByPt(pt.nn.Module):

    def __init__(self, blocks_spec_list, in_side, init_in_ch, init_out_ch, **kwargs):
        super().__init__(**kwargs)

        self.conv1 = my_conv(in_side, init_in_ch, init_out_ch, 3, 1)
        in_ch = out_ch = init_out_ch

        blocks = []
        for block_id, n_blocks in enumerate(blocks_spec_list):
            for layer_id in range(n_blocks):
                if layer_id == 0:
                    if block_id != 0:
                        out_ch *= 2
                    block = MyResnetBlock(True, in_side, in_ch, out_ch, 3, 2)
                    in_ch = out_ch
                    in_side = int(np.ceil(in_side / 2))
                else:
                    block = MyResnetBlock(False, in_side, in_ch, out_ch, 3, 1)
                blocks.append(block)
        self.blocks = pt.nn.Sequential(*blocks)
        self.final_ch = out_ch
        self.avg_pool = pt.nn.AvgPool2d(kernel_size=(in_side, in_side),
                                        stride=(1, 1),
                                        padding=(0, 0))
        self.fc = pt.nn.Linear(out_ch, N_CLS)

    def forward(self, input):
        # print('input', input.shape)
        x = self.conv1(input)
        # print('after conv1', x.shape)

        x = self.blocks(x)
        # print('after blocks', x.shape)

        x = self.avg_pool(x)
        # print('after avg_pool', x.shape)
        x = x.view(-1, self.final_ch)
        # print('before fc', x.shape)
        x = self.fc(x)
        # print('after fc', x.shape)
        # sys.exit(0)
        return x


model = MyResnetByPt([2, 2, 2, 2], PIC_H, 1, 16)
model = model.double()

# from torch.utils.tensorboard import SummaryWriter
# writer = SummaryWriter(LOG_PATH)  #创建summary写对象
# writer.add_graph(model, input_to_model=x_val)#add_graph添加网络模型图;input_to_model模型数据数据
# writer.close()  #关闭写对象
# print('Graph generated.')
# sys.exit(0)

criterion = pt.nn.CrossEntropyLoss()
optim = pt.optim.Adam(model.parameters(), lr=ALPHA)


def acc(ht, yt):
    return (pt.argmax(ht, 1) == yt.long()).double().mean()


if os.path.exists(SAVE_PATH):
    print('Loading...')
    stat = pt.load(SAVE_PATH)
    model.load_state_dict(stat)
    print('Loaded.')
else:
    print('Training start')
    GROUP = int(np.ceil(N_BATCHES / 10))
    g_step = -1
    ALL_BATCHES = N_BATCHES * N_EPOCHS
    cost_his = []
    acc_his = []
    val_cost_his = []
    val_acc_his = []
    for epoch in range(N_EPOCHS):
        avg_cost = 0
        avg_acc = 0
        for i, (bx, by) in enumerate(dl):
            g_step += 1

            # print_numpy_ndarray_info(bx, 'bx')
            # print_numpy_ndarray_info(by, 'by')

            model.train(True)  # ATTENTION
            optim.zero_grad()
            bx = bx.view(-1, 1, PIC_H, PIC_W).double()
            ht = model(bx)
            cost = criterion(ht, by)
            cost.backward()
            optim.step()
            model.train(False)  # ATTENTION
            cost = cost.detach().numpy()
            accv = acc(ht, by).detach().numpy()
            avg_cost += cost
            avg_acc += accv

            if i % GROUP == 0:
                print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}')

        if i % GROUP != 0:
            print(f'epoch#{epoch + 1} batch#{i + 1}: cost: {cost}, acc: {accv}')
        avg_cost /= i + 1
        avg_acc /= i + 1
        model.eval()  # ATTENTION

        avg_cost_val = 0.
        avg_acc_val = 0.
        for i, (bx, by) in enumerate(dl_val):
            print_numpy_ndarray_info(bx, 'bx_val')
            print_numpy_ndarray_info(by, 'by_val')
            ht_val = model(bx)
            val_cost = criterion(ht_val, by).detach().numpy()
            val_acc = acc(ht_val, by).detach().numpy()
            avg_cost_val += val_cost
            avg_acc_val += val_acc
        avg_cost_val /= i + 1
        avg_acc_val /= i + 1
        val_cost_his.append(avg_cost_val)
        val_acc_his.append(avg_acc_val)

        print(f'epoch#{epoch + 1}: avg cost: {avg_cost}, avg acc: {avg_acc}, val_cost: {avg_cost_val}, val_acc: {avg_acc_val}')
        if avg_acc_val > 0.95:
            print('early stop')
            break
    print('Training over')

    # save
    os.makedirs(os.path.split(SAVE_PATH)[0], exist_ok=True)
    pt.save(model.state_dict(), SAVE_PATH)
    print('Saved')
    
    spr = 2
    spc = 2
    spn = 0
    plt.figure(figsize=(12, 12))

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(cost_his, label='Training cost')
    plt.legend()

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(acc_his, label='Training accuracy')
    plt.legend()

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(val_cost_his, label='Validation cost')
    plt.legend()

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(val_acc_his, label='Validation accuracy')
    plt.legend()

# sys.exit(0)

model.eval()  # ATTENTION
# ht_test = model(x_test)
# test_cost = criterion(ht_test, y_test)
# test_acc = acc(ht_test, y_test)

test_cost_avg = 0.
test_acc_avg = 0.
GROUP = int(np.ceil(np.ceil(M_TEST / BATCH_SIZE) / 10))
for i, (bx, by) in enumerate(dl_test):
    # print_numpy_ndarray_info(bx, 'bx')
    # print_numpy_ndarray_info(by, 'by')

    bx = bx.view(-1, 1, PIC_H, PIC_W).double()
    ht = model(bx)
    test_cost_avg += criterion(ht, by).detach().numpy()
    test_acc_avg += acc(ht, by).detach().numpy()
    if i % GROUP == 0:
        print(f'Testing # {i + 1}')
if i % GROUP != 0:
    print(f'Testing # {i + 1}')
test_cost_avg /= i + 1
test_acc_avg /= i + 1
print(f'Tested: cost = {test_cost_avg}, acc = {test_acc_avg}')
print('Over')
