# 1.手动实现前馈神经网络解决多分类任务
import random

from common import *


#  下载数据库
def down_load_data(path_dir):
    mnist_train = torchvision.datasets.MNIST(root=path_dir, train=True, download=True, transform=transforms.ToTensor())
    mnist_test = torchvision.datasets.MNIST(root=path_dir, train=False, transform=transforms.ToTensor())
    return mnist_train, mnist_test


#  读取小批量图集
def load_data_iter(mnist_train, mnist_test, batch_size, num_workers):
    train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    return train_iter, test_iter


# 评估模型net在数据集data_iter中的准确率
def evaluate_accuracy(data_iter, net):
    loss_sum, acc_sum, n = 0.0, 0.0, 0
    for X, y in data_iter:
        y_hat = net(X)
        l = loss(y_hat, y).sum()
        loss_sum += l.item()
        acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
        n += y.shape[0]
    if n == 0:
        return 0, 0
    else:
        return acc_sum / n, loss_sum / n


def SGD(params, lr):
    """
        定义随机梯度下降函数。
    """
    for param in params:
        param.data -= lr * param.grad


# K折交叉验证的训练函数
def train_k(train_X, train_y, valid_X, valid_y, num_epochs, batch_size, optimizer=None):
    # 外层训练次数控制训练轮次
    train_loss = 0.0
    test_loss = 0.0
    train_acc_sum = 0.0
    test_acc_sum = 0.0
    # 处理数据
    train_iter = data_iter(train_X, train_y, batch_size, True)
    test_iter = data_iter(valid_X, valid_y, batch_size, False)
    for epoch in range(num_epochs):
        train_l_sum, train_acc = 0.0, 0.0
        # 内层循环控制训练批次
        for X, y in train_iter:
            # 根据模型预测y值
            y_hat = net(X)
            # 计算损失（加入惩罚权重）
            l = loss(y_hat, y).sum()
            # 反向传播
            l.backward()
            # 更新参数
            optimizer.step()
            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            # 训练损失
            train_l_sum += l.item()
            # 训练精确度
            train_acc += (y_hat.argmax(dim=1) == y).sum().item()
        # 训练精确度
        test_acc, test_l = evaluate_accuracy(test_iter, net)
        train_loss += (train_l_sum/train_X.shape[0])
        train_acc_sum += (train_acc/train_X.shape[0])
        test_acc_sum += test_acc
        test_loss += test_l
    return train_loss, test_loss, train_acc_sum, test_acc_sum


num_inputs, num_hiddens, num_outs = 784, 10, 256
# 模型定义及参数初始化
net = torch.nn.Sequential(
    FlattenLayer(),
    torch.nn.Linear(num_inputs, num_hiddens),
    torch.nn.ReLU(),
    torch.nn.Linear(num_hiddens, num_outs)
)

for params in net.parameters():
    torch.nn.init.normal_(params, mean=0, std=0.01)
# 循环次数， 学习率
num_epochs, lr = 10, 0.1
# 交叉熵
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr)

# 创建测试数据及训练数据
batch_size = 32
num_workers = 0  # 多线程数
path_dir = '~/Datasets/MNIST'

mnist_train, mnist_test = down_load_data(path_dir)
train_iter, test_iter = load_data_iter(mnist_train, mnist_test, batch_size, num_workers)

# 开始训练 学习率0.1  训练次数5
# train_loss, test_loss = train(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr, optimizer)
# loss_draw(range(1, num_epochs + 1), train_loss, "epochs", "loss", range(1, num_epochs + 1), test_loss,
#               ['train', 'test'])
k_fold_mult(train_k, k=10, train_iter=train_iter, num_epochs=num_epochs, batch_size=batch_size,
        optimizer=optimizer)
