# 前馈神经网络的基本实验
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from torchvision import transforms

# 读取数据集
# 训练集
mnist_train = torchvision.datasets.MNIST(root='~/Datasets/MNIST', train=True, download=True,
                                                transform=transforms.ToTensor())
# 测试集
mnist_test = torchvision.datasets.MNIST(root='~/Datasets/MNIST', train=False, download=True,
                                               transform=transforms.ToTensor())

# 批量读取数据
batch_size = 32
# 不开启多线程
num_workers = 0
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)

# 初始化参数
num_inputs, num_outs, num_hiddens = 784, 10, 256
W1 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens, num_inputs)), dtype=torch.float)
b1 = torch.zeros(num_hiddens, dtype=torch.float)
W2 = torch.tensor(np.random.normal(0, 0.01, (num_outs, num_hiddens)), dtype=torch.float)
b2 = torch.zeros(num_outs, dtype=torch.float)
params = [W1, b1, W2, b2]
params_w = [W1, W2]
for param in params:
    param.requires_grad_(True)

# 定义激活函数(Relu其实就是个取最大值的函数)
def Rule(X):
    return torch.max(input=X, other=torch.tensor(0.0))

def simgod(X):
    return 1/(1+torch.exp(-X))

def tanh(X):
    return (torch.exp(X)-torch.exp(-X))/(torch.exp(X)+torch.exp(-X))

def leakyRelu(X):
    temp = torch.where(X>0, X, X*0.2)
    return torch.tensor(temp)

def elu(X):
    temp = torch.where(X>0, X, 0.2*(torch.exp(X)-1))
    return torch.tensor(temp)

# 定义模型
def net(X, f_type, is_training, drop_prob):
    # 参数中的 - 1就代表这个位置由其他位置的数字来推断
    X = X.view(-1, num_inputs)
    temp = torch.matmul(X, W1.t()) + b1
    # 获取隐藏层
    if f_type == "simgod":
        H = simgod(temp)
    elif f_type == "tanh":
        H = tanh(temp)
    elif f_type == "leakyRelu":
        H = leakyRelu(temp)
    elif f_type == "elu":
        H = elu(temp)
    else:
        H = Rule(temp)
    if is_training:
        # 如果是在训练中，需要加入dropout
        H = dropout(H, drop_prob)
    return torch.matmul(H, W2.t()) + b2

# 定义随机梯度下降函数
def SGD(params, lr):
    for param in params:
        # print("param.grad {}".format(param.grad))
        param.data -= param.grad*lr

# 计算模型在摸个数据集上的准确率
def evaluate_accuracy(data_iter, net, f_type):
    acc_sum, loss_sum, n = 0.0, 0.0, 0
    for X, y in data_iter:
        acc_sum += (net(X, f_type, False, 0).argmax(dim=1) == y).float().sum().item()
        loss_sum += loss(net(X, f_type, False, 0), y).sum().item()
        n += y.shape[0]
    return acc_sum/n, loss_sum/n

# 清空梯度
def grad_zero(optimizer, params):
    if optimizer is not None:
        optimizer.zero_grad()
    if params is not None and params[0].grad is not None:
        for param in params:
            param.grad.data.zero_()

# 定义交叉熵损失函数
loss = torch.nn.CrossEntropyLoss()

# 绘制损失图像
def loss_curve(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()

# 定义L2范数惩罚项
def l2_penalty(params_w):
    sum = 0.0
    for param in params_w:
        sum += (param**2).sum()/2
    return sum

# 定义Dropout
def dropout(X, drop_prob):
    X = X.float()
    # 检查丢弃率是否在0-1之间
    assert 0 <= drop_prob <= 1
    keep_drop = 1 - drop_prob
    if keep_drop == 0:
        return torch.zeros_like(X)
    # 生成mask矩阵（向量）
    mask = (torch.rand(X.shape) < keep_drop).float()
    # 按照mask对矩阵进行变换
    return mask * X /keep_drop

# 训练函数
def train(train_iter, test_iter, loss, num_epochs, params=None, params_w=None, lr=None, f_type="relu", lambd=0, drop_prob=0, optimizer=None):
    # 外层训练次数控制训练轮次
    train_loss = []
    test_loss = []
    print("lambd {}".format(lambd))
    print("f_type {}".format(f_type))
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        # 内层循环控制训练批次
        for X, y in train_iter:
            # 根据模型预测y值
            y_hat = net(X, f_type, True, drop_prob)
            # 计算损失（加入惩罚权重）
            l = loss(y_hat, y) + lambd * l2_penalty(params_w)
            l.sum()
            # 反向传播
            l.backward()
            # 更新参数
            if optimizer is None:
                SGD(params, lr)
            else:
                optimizer.step()
            # 梯度清零（放在更新完参数之后使用）
            grad_zero(optimizer, params)
            # 训练损失
            train_l_sum += l.item()
            # 训练精确度
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
            # 次数
            n += y.shape[0]
        # 训练精确度
        test_acc, test_l = evaluate_accuracy(test_iter, net, f_type)
        train_loss.append(train_l_sum / n)
        test_loss.append(test_l)
        # 输出结果
        print("epoch % d, loss %.4f, train acc %.3f, test acc %.3f" % (epoch+1, train_l_sum/n, train_acc_sum/n, test_acc))
    return train_loss, test_loss
if __name__ == '__main__':
    # 训练次数
    num_epochs = 15
    # 学习率
    lr = 0.01
    # L2惩罚项倍数
    lambd = 0
    # dropout中的丢弃率
    drop_prob = 0.9
    # 模型类型
    modelType = "relu"
    # 训练模型（使用lambda控制惩罚权重）
    train_loss, test_loss = train(train_iter, test_iter, loss, num_epochs, params, params_w, lr, modelType, lambd, drop_prob)
    # 绘制loss图像
    loss_curve(range(1, num_epochs+1), train_loss, "epochs", "loss", range(1, num_epochs+1), test_loss, ['train', 'test'])
