"""
(1)	题目描述：
编程实现如下功能：
通过pytorch实现2个隐藏层的手写体识别（手写体识别数据集使用本地的）。
题目要求：
"""
# ①　导入相关包
import numpy as np
import torch as pt
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
import matplotlib.pyplot as plt

# parameters
BATCH_SIZE = 64
LEARNING_RATE = 0.01
N_EPOCHS = 4


def sep(label=''):
    print('-' * 32, label, '-' * 32, sep='')


# for GPU
device = 'cuda:0' if pt.cuda.is_available() else 'cpu'
print('device', device)
device = pt.device(device)

# ②　读入mnist数据集
path = '../../../../large_data/ML1/hand_writing/imgX.txt'
x = np.loadtxt(path, delimiter=',')
x = np.float32(x)
x /= 255.
path = '../../../../large_data/ML1/hand_writing/labely.txt'
y = np.loadtxt(path, dtype=np.int64)
y -= 1

# ③　将数据集分为训练数据和验证数据
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.9, random_state=1, shuffle=True)
print('x_train', x_train.shape)
print('x_test', x_test.shape)
print('y_train', y_train.shape)
print('y_test', y_test.shape)
x_train = pt.Tensor(x_train)
x_test = pt.Tensor(x_test)
y_train = pt.Tensor(y_train)
y_test = pt.Tensor(y_test)
ds_train = TensorDataset(x_train, y_train)
dl_train = DataLoader(ds_train, BATCH_SIZE, shuffle=True, drop_last=True)
ds_test = TensorDataset(x_test, y_test)
dl_test = DataLoader(ds_test, BATCH_SIZE, shuffle=True, drop_last=False)
print('dl_train', len(dl_train))
print('dl_test', len(dl_test))


# ④　定义MNIST类创建网络
class MNIST(pt.nn.Module):

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.seq = pt.nn.Sequential(
            pt.nn.Linear(400, 128),
            pt.nn.ReLU(),
            pt.nn.Linear(128, 64),
            pt.nn.ReLU(),
            pt.nn.Linear(64, 10),
        )

    # ⑤　定义前向传播函数
    def forward(self, inputs):
        x = self.seq(inputs)
        return x


# ⑥　实例化网络结构对象
model = MNIST().to(device)

# ⑦　创建损失函数对象
criterion = pt.nn.CrossEntropyLoss()

# ⑧　创建优化器对象
optim = pt.optim.Adam(model.parameters(), lr=LEARNING_RATE)


def acc(y_true, y_pred):
    y_true = y_true.long()
    y_pred = y_pred.argmax(dim=1)
    accv = pt.eq(y_true, y_pred).float().mean()
    return accv


# ⑨　数据批量训练，调用train函数
def process_data(dl, label, is_train):
    n_batches = len(dl)
    avg_lossv, avg_accv = 0., 0.
    n_group = int(np.ceil(n_batches / 10))
    for i, (bx, by) in enumerate(dl):
        bx = bx.to(device)
        by = by.long().to(device)
        if is_train:
            model.train(True)
            # 梯度清零
            optim.zero_grad()
            # ⑩　数据传入网络结构
            h = model(bx)
            # 11　预测值和真实值放入损失函数
            loss = criterion(h, by)
            # 12　反向传播，迭代更新，
            loss.backward()
            optim.step()
            model.train(False)
        else:
            model.train(False)
            h = model(bx)
            loss = criterion(h, by)
        accv = acc(by, h)
        lossv = loss.detach().cpu().numpy()
        accv = accv.detach().cpu().numpy()
        if i % n_group == 0 or i == n_batches - 1:
            print(f'{label} #{i + 1}: loss = {lossv}, acc = {accv}')
        avg_lossv += lossv
        avg_accv += accv

    avg_lossv /= n_batches
    avg_accv /= n_batches
    return avg_lossv, avg_accv


# 13　正常训练，打印损失值
loss_his, acc_his, loss_his_val, acc_his_val = [], [], [], []
for i in range(N_EPOCHS):
    sep(i+1)
    avg_lossv, avg_accv = process_data(dl_train, 'train', True)
    print(f'Epoch#{i + 1}: loss = {avg_lossv}, acc = {avg_accv}')
    loss_his.append(avg_lossv)
    acc_his.append(avg_accv)
    sep(f'{i + 1} val')
    avg_lossv_val, avg_accv_val = process_data(dl_test, 'val', False)
    print(f'Epoch#{i + 1}: loss = {avg_lossv}, acc = {avg_accv}, loss_val = {avg_lossv_val}, acc_val = {avg_accv_val}')
    loss_his_val.append(avg_lossv_val)
    acc_his_val.append(avg_accv_val)

sep('Test')
avg_lossv, avg_accv = process_data(dl_test, 'test', False)
print(f'Test: loss = {avg_lossv}, acc = {avg_accv}')

spr = 1
spc = 2
spn = 0
plt.figure(figsize=[12, 6])

spn += 1
plt.subplot(spr, spc, spn)
plt.title('loss')
plt.plot(loss_his, label='train')
plt.plot(loss_his_val, label='val')
plt.legend()

spn += 1
plt.subplot(spr, spc, spn)
plt.title('acc')
plt.plot(acc_his, label='train')
plt.plot(acc_his_val, label='val')
plt.legend()

plt.show()
