import numpy as np
import torch as pt
import torchvision as ptv
from torch.utils.data import DataLoader, Dataset, Subset
from torchvision.datasets import MNIST
import os
import sys
import matplotlib.pyplot as plt

np.random.seed(777)
pt.manual_seed(777)
pt.set_printoptions(edgeitems=200)

# 1.	使用pytorch定义cnn模型进行mnist数据集分类（每题5分，共60分）
# ①	数据处理
# 1)	加载mnist数据集
# train data
DATA_DIR = '../../../../../large_data/DL2/pt/mnist'
ds_train_ori = ds_train = MNIST(root=DATA_DIR, train=True,
                 transform=ptv.transforms.ToTensor(),
                 download=False)
print('ds_train.data', ds_train.data.shape)
print('ds_train.targets', ds_train.targets.shape)
M_TRAIN, PIC_H, PIC_W = ds_train.data.shape
# test data
ds_test_ori = ds_test = MNIST(root=DATA_DIR, train=False,
                 transform=ptv.transforms.ToTensor(),
                 download=False)
print('ds_test.data', ds_test.data.shape)
print('ds_test.targets', ds_test.targets.shape)
M_TEST, _, _ = ds_test.data.shape

# 2)	定义相关参数
ALPHA = 0.01
N_EPOCHS = 1
BATCH_SIZE = 64
SELECT_RATE = 1.0  # 因数据集较大，为了快速演示，按这个比率抽取数据，正式代码可改成1.0
# select train data
M_TRAIN_ORI = M_TRAIN
M_TRAIN = int(np.ceil(M_TRAIN_ORI * SELECT_RATE))
idx = pt.randperm(M_TRAIN_ORI)[:M_TRAIN]
ds_train = pt.utils.data.Subset(ds_train, idx)
print('ds_train len', len(ds_train))
# select test data
M_TEST_ORI = M_TEST
M_TEST_VAL = int(np.ceil(M_TEST_ORI * SELECT_RATE))
M_TEST = M_TEST_VAL // 2
M_VAL = M_TEST_VAL - M_TEST
idx_test = pt.randperm(M_TEST_ORI)[:M_TEST]
idx_val = pt.randperm(M_TEST_ORI)[M_TEST:M_TEST_VAL]
ds_test = pt.utils.data.Subset(ds_test_ori, idx_test)
print('ds_test len', len(ds_test))
ds_val = pt.utils.data.Subset(ds_test_ori, idx_val)
print('ds_val len', len(ds_val))


# 3)	将数据放入dataloader中
dl_train = pt.utils.data.DataLoader(ds_train,
                                    batch_size=BATCH_SIZE,
                                    shuffle=True,
                                    drop_last=True)
dl_test = pt.utils.data.DataLoader(ds_test,
                                    batch_size=BATCH_SIZE,
                                    shuffle=True,
                                    drop_last=True)
dl_val = pt.utils.data.DataLoader(ds_val,
                                    batch_size=BATCH_SIZE,
                                    shuffle=True,
                                    drop_last=True)


def my_conv(in_ch, out_ch, kernel_size, strides, padding='same'):
    if padding == 'same':
        pad = (kernel_size - 1) // 2
    else:
        pad = 0
    return pt.nn.Conv2d(in_ch, out_ch, kernel_size, strides, pad)


class ConvNbRelu(pt.nn.Module):

    def __init__(self, in_ch, out_ch, kernel_size, strides, **kwargs):
        super().__init__(**kwargs)
        self.conv = my_conv(in_ch, out_ch, kernel_size, strides)
        self.bn = pt.nn.BatchNorm2d(out_ch)
        self.relu = pt.nn.ReLU()

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        return x


class InceptionNetBlock(pt.nn.Module):

    def __init__(self, in_ch, out_ch_unit, strides, **kwargs):
        super().__init__(**kwargs)

        self.conv1 = ConvNbRelu(in_ch, out_ch_unit * 2, 1, strides)
        self.conv2 = ConvNbRelu(in_ch, out_ch_unit * 4, 3, strides)
        self.conv3 = ConvNbRelu(in_ch, out_ch_unit, 5, strides)
        self.pool4 = pt.nn.MaxPool2d(3, strides, 1)
        self.conv4 = ConvNbRelu(in_ch, out_ch_unit, 1, 1)

    def forward(self, x):
        x1 = self.conv1(x)
        x2 = self.conv2(x)
        x3 = self.conv3(x)
        x4 = self.pool4(x)
        x4 = self.conv4(x4)
        # print(x1.shape)
        # print(x2.shape)
        # print(x3.shape)
        # print(x4.shape)
        # sys.exit(0)
        x = pt.cat((x1, x2, x3, x4), dim=1)
        return x


class InceptionNet(pt.nn.Module):

    def __init__(self, n_cls, in_side, init_in_ch, init_out_ch_unit, n_block_units, **kwargs):
        super().__init__(**kwargs)
        in_ch = None
        out_ch_unit = None

        blocks = []
        for block_id in range(n_block_units):
            for layer_id in range(2):
                if 0 == layer_id:
                    strides = 2
                    in_side //= 2
                    if in_ch is None:
                        in_ch = init_in_ch
                        out_ch_unit = init_out_ch_unit
                    else:
                        in_ch, out_ch_unit = out_ch_unit * 8, out_ch_unit * 2
                else:
                    strides = 1
                    in_ch, out_ch_unit = out_ch_unit * 8, out_ch_unit * 2
                block = InceptionNetBlock(in_ch, out_ch_unit, strides)
                blocks.append(block)
        self.blocks_seq = pt.nn.Sequential(*blocks)
        out_ch = out_ch_unit * 8
        self.final_out_ch = out_ch
        self.global_pool = pt.nn.AvgPool2d(in_side, 1, 0)
        self.fc = pt.nn.Linear(out_ch, n_cls)

    def forward(self, x):
        x = self.blocks_seq(x)
        x = self.global_pool(x)
        # print(x.shape)
        x = x.reshape(-1, self.final_out_ch)
        # print(x.shape)
        x = self.fc(x)
        # print(x.shape)
        # sys.exit(0)
        return x


model = InceptionNet(10, 28, 1, 16, 2)
criterion = pt.nn.CrossEntropyLoss()
optim = pt.optim.Adam(params=model.parameters(), lr=ALPHA)


def acc(h, y):
    return h.argmax(1).eq(y.long()).double().mean()


N_BATCH = int(np.floor(M_TRAIN / BATCH_SIZE))
GROUP = int(np.ceil(N_BATCH / 10))
for epoch in range(N_EPOCHS):
    i = -1
    for bx, by in dl_train:
        i += 1
        model.train(True)
        optim.zero_grad()
        h = model(bx)
        cost = criterion(h, by)
        cost.backward()
        optim.step()
        model.train(False)
        cost = cost.data.numpy()
        accv = acc(h, by).data.numpy()
        if i % GROUP == 0:
            print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {cost}, acc = {accv}')
    if i % GROUP != 0:
        print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {cost}, acc = {accv}')
    print('Validating...')
    i = -1
    avg_cost = 0.
    avg_acc = 0.
    for bx, by in dl_val:
        i += 1
        h = model(bx)
        cost = criterion(h, by).data.numpy()
        accv = acc(h, by).data.numpy()
        avg_cost += cost
        avg_acc += accv
    avg_cost /= i + 1
    avg_acc /= i + 1
    print(f'Val: cost = {avg_cost}, acc = {avg_acc}')

print('Testing')
i = -1
avg_cost = 0.
avg_acc = 0.
for bx, by in dl_test:
    i += 1
    h = model(bx)
    cost = criterion(h, by).data.numpy()
    accv = acc(h, by).data.numpy()
    avg_cost += cost
    avg_acc += accv
avg_cost /= i + 1
avg_acc /= i + 1
print(f'Val: cost = {avg_cost}, acc = {avg_acc}')
print('Over')
