# 前馈神经网络的基本实验
import random

import matplotlib.pyplot as plt
import torch
import torchvision
from prettytable import PrettyTable
from torchvision import transforms

# 读取数据集
# 训练集
mnist_train = torchvision.datasets.MNIST(root='~/Datasets/MNIST', train=True, download=True,
                                                transform=transforms.ToTensor())
# 测试集
mnist_test = torchvision.datasets.MNIST(root='~/Datasets/MNIST', train=False, download=True,
                                               transform=transforms.ToTensor())

# 批量读取数据
batch_size = 32
# 不开启多线程
num_workers = 0
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)

def data_iter(feathers, lables, batch_size, shuffle):
    feathersLen = len(feathers)
    # print("feathersLen {}".format(feathersLen))
    # print("batch_size {}".format(batch_size))
    indices = list(range(feathersLen))
    # 将数据进行打乱
    if shuffle:
        random.shuffle(indices)
    # start, end, step（每隔step输出一次）
    for i in range(0, feathersLen, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, feathersLen)])
        # 参数0表示按行索引，1表示按列进行索引
        yield feathers.index_select(0, j), lables.index_select(0, j)

# 定义FlattenLayer层
class FlattenLayer(torch.nn.Module):
    def __init__(self):
        super(FlattenLayer, self).__init__()
    def forward(self, x):
        return x.view(x.shape[0], -1)

# 定义模型和初始化参数
# dropout中的丢弃率
drop_prob = 0.5
num_inputs, num_outs, num_hidden1, num_hidden2, num_hidden3, num_hidden4 = 784, 10, 256, 264, 123, 56
net = torch.nn.Sequential(
        FlattenLayer(),
        # 一层隐藏层
        torch.nn.Linear(num_inputs, num_hidden1),
        torch.nn.ReLU(),
        # 使用dropout降低过拟合
        # torch.nn.Dropout(drop_prob),
        # 二层隐藏层
        # torch.nn.Linear(num_hidden1, num_hidden2),
        # torch.nn.ReLU(),
        # 三层隐藏层
        # torch.nn.Linear(num_hidden2, num_hidden3),
        # torch.nn.ReLU(),
        # # 四层隐藏层
        # torch.nn.Linear(num_hidden3, num_hidden4),
        # torch.nn.ReLU(),
        torch.nn.Linear(num_hidden1, num_outs)
        )
for params in net.parameters():
    torch.nn.init.normal_(params, mean=0, std=0.01)


# 计算模型在摸个数据集上的准确率
def evaluate_accuracy(data_iter, net, loss):
    acc_sum, test_l_sum, n = 0.0, 0.0, 0
    for X, y in data_iter:
        acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
        test_l_sum += loss(net(X), y).sum().item()
        n = n + y.shape[0]
    # print("n {}".format(n))
    if n != 0:
        return acc_sum/n, test_l_sum/n
    else:
        return 0, 0

# 定义交叉熵损失函数
loss = torch.nn.CrossEntropyLoss()

# 定义优化器
lr = 0.01
wd = 2
optimizer = torch.optim.SGD(net.parameters(), lr)
# 使用torch.optim的weight_decay参数实现L2范数正则化
# optimizer_w = torch.optim.SGD(params=[net.parameters().__getattribute__(".*weight")], lr=lr, weight_decay=wd)
# optimizer_b = torch.optim.SGD(params=[net.__getattribute__(".*bias")], lr=lr)

# K折交叉验证
def get_kfold_data(k, i, X, y):
    # 返回第i+1折（i=0-->k-1）
    # 每折的个数，总数除以折数
    fold_size = X.shape[0]//k
    val_start = i*fold_size
    if i != k-1:
        val_end = (i+1)*fold_size
        X_valid, y_valid = X[val_start:val_end], y[val_start:val_end]
        X_train = torch.cat((X[0:val_start], X[val_end:]), dim=0)
        y_train = torch.cat((y[0:val_start], y[val_end:]), dim=0)
    else:
        X_valid, y_valid = X[val_start:], y[val_start:]
        X_train = X[0:val_start]
        y_train = y[0:val_start]
    return X_train, y_train, X_valid, y_valid

# 对每折数进行训练和测试并计算平均值
def k_fold(k, train_iter, num_epochs, batch_size, optimizer):
    train_acc_sum, valid_acc_sum = 0.0, 0.0
    train_loss_sum, valid_loss_sum = 0.0, 0.0
    # 获取数据
    count = 0
    X_train = torch.zeros(1)
    y_train = torch.zeros(1)
    for X, y in train_iter:
        if count == 0:
            X_train = X
            y_train = y
        else:
            X_train = torch.cat((X_train, X), dim=0)
            y_train = torch.cat((y_train, y), dim=0)
        count += 1
    k_table = PrettyTable(["epoch", "train_acc", "valid_acc", "train_loss", "valid_loss"])
    # print("X_train.shape{}".format(X_train.shape))
    for i in range(k):
        data = get_kfold_data(k, i, X_train, y_train)
        # 对每份数据进行运算
        train_loss, valid_loss, train_acc, valid_acc = train_k(*data, num_epochs, batch_size, optimizer)
        train_acc_sum += train_acc
        valid_acc_sum += valid_acc
        train_loss_sum += train_loss
        valid_loss_sum += valid_loss
        k_table.add_row([(i+1), train_acc, valid_acc, train_loss, valid_loss])
    # 绘制每折的表格
    print(k_table)
    print("\n", "最终K折交叉验证的结果：")
    print("average train loss :{:.4f}, average train acc :{:.3f}%".format(train_loss_sum/k, train_acc_sum/k*100))
    print("average valid loss :{:.4f}, average valid acc :{:.3f}%".format(valid_loss_sum/k, valid_acc_sum/k*100))

# 绘制损失图像
def loss_curve(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()

# K折交叉验证的训练函数
def train_k(train_X, train_y, valid_X, valid_y, num_epochs, batch_size, optimizer=None):
    # 外层训练次数控制训练轮次
    train_loss = 0.0
    test_loss = 0.0
    train_acc = 0.0
    test_acc_s = 0.0
    # 处理数据
    # print("train_X.shape {}".format(train_X.shape))
    train_iter = data_iter(train_X, train_y, batch_size, True)
    test_iter = data_iter(valid_X, valid_y, batch_size, False)
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum = 0.0, 0.0
        # 内层循环控制训练批次
        for X, y in train_iter:
            # 根据模型预测y值
            y_hat = net(X)
            # 计算损失（加入惩罚权重）
            l = loss(y_hat, y).sum()
            # 反向传播
            l.backward()
            # 更新参数
            optimizer.step()
            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            # 训练损失
            train_l_sum += l.item()
            # 训练精确度
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
            # print("train_acc_sum {}".format(train_acc_sum))
        # 训练精确度
        test_acc, test_l = evaluate_accuracy(test_iter, net, loss)
        train_loss += (train_l_sum/train_X.shape[0])
        train_acc += (train_acc_sum/train_X.shape[0])
        # print("train_acc_sum/train_X.shape[0] {}".format(train_acc_sum/train_X.shape[0]))
        test_acc_s += test_acc
        test_loss += test_l
    return train_loss, test_loss, train_acc, test_acc_s

# 训练函数
def train(train_iter, test_iter, num_epochs, params=None, optimizer=None):
    # 外层训练次数控制训练轮次
    train_loss = []
    test_loss = []
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        # 内层循环控制训练批次
        for X, y in train_iter:
            # 根据模型预测y值
            y_hat = net(X)
            # 计算损失（加入惩罚权重）
            l = loss(y_hat, y).sum()
            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            # 使用L2范数后梯度清零
            # optimizer_w.zero_grad()
            # optimizer_b.zero_grad()

            # 反向传播
            l.backward()
            # 更新参数
            optimizer.step()
            # optimizer_w.step()
            # optimizer_b.step()

            # 训练损失
            train_l_sum += l.item()
            # 训练精确度
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
            # 次数
            n += y.shape[0]
        # 训练精确度
        test_acc, test_l = evaluate_accuracy(test_iter, net, loss)
        train_loss.append(train_l_sum / n)
        test_loss.append(test_l)
        # 输出结果
        print("epoch % d, loss %.4f, train acc %.3f, test acc %.3f" % (epoch+1, train_l_sum/n, train_acc_sum/n, test_acc))
    return train_loss, test_loss
if __name__ == '__main__':
    # 训练次数
    num_epochs = 15
    # 模型类型
    # modelType = "relu"
    # 训练模型（使用lambda控制惩罚权重）
    # train_loss, test_loss = train(train_iter, test_iter, num_epochs, net.parameters(), optimizer)
    # 绘制loss图像
    # loss_curve(range(1, num_epochs+1), train_loss, "epochs", "loss", range(1, num_epochs+1), test_loss, ['train', 'test'])
    k_fold(k=10, train_iter=train_iter, num_epochs=1, batch_size=32, optimizer=optimizer)