import numpy as np
import torch as pt
import torchvision as ptv
from torch.utils.data import DataLoader, Dataset, Subset
from torchvision.datasets import MNIST
import os
import sys
import matplotlib.pyplot as plt

np.random.seed(777)
pt.manual_seed(777)

# 1.	使用pytorch定义cnn模型进行mnist数据集分类（每题5分，共60分）
# ①	数据处理
# 1)	加载mnist数据集
# train data
DATA_DIR = '../../../../large_data/DL2/pt/mnist'
ds_train_ori = ds_train = MNIST(root=DATA_DIR, train=True,
                 transform=ptv.transforms.ToTensor(),
                 download=False)
print('ds_train.data', ds_train.data.shape)
print('ds_train.targets', ds_train.targets.shape)
M_TRAIN, PIC_H, PIC_W = ds_train.data.shape
# test data
ds_test_ori = ds_test = MNIST(root=DATA_DIR, train=False,
                 transform=ptv.transforms.ToTensor(),
                 download=False)
print('ds_test.data', ds_test.data.shape)
print('ds_test.targets', ds_test.targets.shape)
M_TEST, _, _ = ds_test.data.shape

# 2)	定义相关参数
ALPHA = 0.01
N_EPOCHS = 10
BATCH_SIZE = 64
SELECT_RATE = 0.02  # 因数据集较大，为了快速演示，按这个比率抽取数据，正式代码可改成1.0
# select train data
M_TRAIN_ORI = M_TRAIN
M_TRAIN = int(np.ceil(M_TRAIN_ORI * SELECT_RATE))
idx = pt.randperm(M_TRAIN_ORI)[:M_TRAIN]
ds_train = pt.utils.data.Subset(ds_train, idx)
print('ds_train len', len(ds_train))
# select test data
M_TEST_ORI = M_TEST
M_TEST = int(np.ceil(M_TEST_ORI * SELECT_RATE))
idx = pt.randperm(M_TEST_ORI)[:M_TEST]
ds_test = pt.utils.data.Subset(ds_test, idx)
print('ds_test len', len(ds_test))

# 3)	将数据放入dataloader中
dl_train = pt.utils.data.DataLoader(ds_train,
                                    batch_size=BATCH_SIZE,
                                    shuffle=True,
                                    drop_last=True)
dl_test = pt.utils.data.DataLoader(ds_test,
                                    batch_size=BATCH_SIZE,
                                    shuffle=True,
                                    drop_last=True)


# ②	模型搭建
# 1)	定义卷积类，三层卷积两层全连接
# 2)	第一层卷积的通道数为32，每经过一次卷积通道数翻倍，图片尺寸减半
# 3)	第一层全连接使用正态分布初始化权重，并且加入0.3的随机失活
# 4)	进行正向传播
class MyCnnClf(pt.nn.Module):

    def __init__(self):
        super().__init__()
        # CNN Layer 1
        self.conv1 = pt.nn.Conv2d(in_channels=1,
                                  out_channels=32,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1)
        self.bn1 = pt.nn.BatchNorm2d(32)
        self.relu1 = pt.nn.ReLU()
        self.pool1 = pt.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        # CNN Layer 2
        self.conv2 = pt.nn.Conv2d(in_channels=32,
                                  out_channels=64,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1)
        self.bn2 = pt.nn.BatchNorm2d(64)
        self.relu2 = pt.nn.ReLU()
        self.pool2 = pt.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        # FC Layer 1
        self.fc1 = pt.nn.Linear(3136, 400)
        self.dp1 = pt.nn.Dropout(0.3)
        self.relu_fc1 = pt.nn.ReLU()
        self.fc2 = pt.nn.Linear(400, 10)

    def forward(self, x):
        # input torch.Size([64, 1, 28, 28])
        # after conv1 torch.Size([64, 32, 28, 28])
        # after bn1 torch.Size([64, 32, 28, 28])
        # after relu1 torch.Size([64, 32, 28, 28])
        # after pool1 torch.Size([64, 32, 14, 14])
        # after conv2 torch.Size([64, 64, 14, 14])
        # after bn2 torch.Size([64, 64, 14, 14])
        # after relu2 torch.Size([64, 64, 14, 14])
        # after pool2 torch.Size([64, 64, 7, 7])
        # before fc1 torch.Size([64, 3136])
        # after fc1 torch.Size([64, 400])
        # after dp1 torch.Size([64, 400])
        # after relu_fc1 torch.Size([64, 400])
        # after fc2 torch.Size([64, 10])

        # CNN Layer 1
        # print('input', x.shape)
        x = self.conv1(x)
        # print('after conv1', x.shape)
        x = self.bn1(x)
        # print('after bn1', x.shape)
        x = self.relu1(x)
        # print('after relu1', x.shape)
        x = self.pool1(x)
        # print('after pool1', x.shape)
        # CNN Layer 2
        x = self.conv2(x)
        # print('after conv2', x.shape)
        x = self.bn2(x)
        # print('after bn2', x.shape)
        x = self.relu2(x)
        # print('after relu2', x.shape)
        x = self.pool2(x)
        # print('after pool2', x.shape)
        # FC Layer 1
        x = x.reshape(-1, 3136)
        # print('before fc1', x.shape)
        x = self.fc1(x)
        # print('after fc1', x.shape)
        x = self.dp1(x)
        # print('after dp1', x.shape)
        x = self.relu_fc1(x)
        # print('after relu_fc1', x.shape)
        # FC Layer 2
        x = self.fc2(x)
        # print('after fc2', x.shape)
        # sys.exit(0)
        return x


# ③	模型预测
# 1)	定义损失函数和优化器
model = MyCnnClf()
criterion = pt.nn.CrossEntropyLoss()
optim = pt.optim.Adam(params=model.parameters(), lr=ALPHA)


def acc(h, y):
    return (h.argmax(1) == y.long()).double().mean()


# 2)	使用小批量循环，迭代更新参数，大循环次数为10次
N_BATCHS = int(np.floor(M_TRAIN / BATCH_SIZE))
N_BATCHS = 1 if N_BATCHS == 0 else N_BATCHS
N_GROUP = int(np.ceil(N_BATCHS / 5))
cost_history = np.zeros(N_EPOCHS)
for epoch in range(N_EPOCHS):
    cost_avg = 0.
    acc_avg = 0.
    for i, (bx, by) in enumerate(dl_train):
        model.train(True)
        optim.zero_grad()
        h = model(bx)
        cost = criterion(h, by)
        cost.backward()
        optim.step()
        model.train(False)
        cost = cost.item()
        accv = acc(h, by).item()
        cost_avg += cost
        acc_avg += accv
        if i % N_GROUP == 0:
            print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {cost}, acc = {accv}')
    if i % N_GROUP != 0:
        print(f'epoch#{epoch + 1}: batch#{i + 1}: cost = {cost}, acc = {accv}')
    cost_avg /= N_BATCHS
    acc_avg /= N_BATCHS
    cost_history[epoch] = cost_avg
    print(f'epoch#{epoch + 1}: avg cost = {cost_avg}, avg acc = {acc_avg}')

# 3)	每次大循环输出一次平均损失值，循环结束绘制平均损失值变化曲线
plt.plot(cost_history)
plt.xlabel('epoch')
plt.title('Cost value history')

# 4)	计算测试集的准确率
N_BATCHS = int(np.floor(M_TEST / BATCH_SIZE))
N_BATCHS = 1 if N_BATCHS == 0 else N_BATCHS
N_GROUP = int(np.ceil(N_BATCHS / 5))
model.eval()
acc_avg = 0.
for i, (bx, by) in enumerate(dl_test):
    h = model(bx)
    accv = acc(h, by).item()
    acc_avg += accv
    if i % N_GROUP == 0:
        print(f'Testing: batch#{i + 1}: acc = {accv}')
if i % N_GROUP != 0:
    print(f'Testing: batch#{i + 1}: acc = {accv}')
acc_avg /= N_BATCHS
print(f'测试集的准确率:{acc_avg}')

# 5)	在测试集的样本中随机抽取一张图片，计算并输出真实值和预测值
model.eval()
idx = pt.randperm(M_TEST_ORI)[0]
x = ds_test_ori.data[idx:idx+1].reshape(-1, 1, 28, 28).float() / 255.
h = model(x).detach().numpy().argmax(1)
y = ds_test_ori.targets[idx:idx+1].numpy()
print('在测试集的样本中随机抽取一张图片')
print(f'真实值:{y}, 预测值{h}')

# finally show all plotting
plt.show()
