# 1.手动实现前馈神经网络解决上述二分类任务

from common import *


def generate_data(size_, ρ_):
    # 产生size_, ρ_维的数据集元数据
    x_data = torch.tensor(data=np.random.normal(0.1, 0.04, (size_, ρ_)), dtype=torch.float)
    x_label = torch.zeros(size_)
    y_data = torch.tensor(data=np.random.normal(-0.1, 0.04, (size_, ρ_)), dtype=torch.float)
    y_label = torch.ones(size_)

    return x_data, x_label, y_data, y_label


def data_iter(batch_size, feathers, lables, isShuffle=False):
    inputsLen = len(feathers)
    indices = list(range(inputsLen))
    if isShuffle:
        random.shuffle(indices)
    for i in range(0, inputsLen, batch_size):
        j = torch.LongTensor(indices[i: min(i + batch_size, inputsLen)])
        yield feathers.index_select(0, j), lables.index_select(0, j)


# 定义模型和初始化参数
num_inputs, num_hidden1, num_outs = 200, 1, 1
net = torch.nn.Sequential(
        FlattenLayer(),
        # 一层隐藏层
        torch.nn.Linear(num_inputs, num_hidden1),
        torch.nn.Sigmoid()
        )
for params in net.parameters():
    torch.nn.init.normal_(params, mean=0, std=0.1)


# 定义交叉熵损失函数
loss = torch.nn.MSELoss()

# 定义优化器
lr = 0.001
optimizer = torch.optim.SGD(net.parameters(), lr)


# 绘制损失图像
def loss_curve(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()


# 训练函数
def train(feathers, lables, test_data, test_lable, num_epochs, batch_size, params=None, optimizer=None):
    # 外层训练次数控制训练轮次
    train_loss = []
    test_loss = []
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        # 内层循环控制训练批次
        for X, y in data_iter(batch_size=batch_size, feathers=feathers, lables=lables):
            # 根据模型预测y值
            y_hat = net(X)
            l = loss(y_hat.squeeze(), y.squeeze())
            l.backward()
            # 更新参数
            optimizer.step()
            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            # 训练损失
            train_l_sum += l.sum().item()
            # 训练精确度
            train_acc_sum += ((y_hat.ge(0.5).float().squeeze()) == y).sum()
            # 次数
            n += y.shape[0]
        # 训练精确度
        test_acc, test_l = evaluate_accuracy(batch_size, test_data, test_lable, net,loss)
        train_loss.append(train_l_sum / n)
        test_loss.append(test_l)
        # 输出结果
        print("epoch % d, loss %.4f, train acc %.3f, test acc %.3f" % (epoch+1, train_l_sum/n, train_acc_sum/n, test_acc))
    return train_loss, test_loss


if __name__ == '__main__':
    # 产生10000个500维数据集，并分开7000训练，3000测试
    size, ρ = 10000, 200
    x_data, x_label, y_data, y_label = generate_data(size, ρ)
    x_train, y_train, x_test, y_test = train_test_split(x_data, y_data, 0.7)
    x_label_train, y_label_train, x_label_test, y_label_test = train_test_split(x_label, y_label, 0.7)
    # 打乱数据
    feathers = torch.cat((x_train, y_train), dim=0).type(torch.FloatTensor)
    labels = torch.cat((x_label_train, y_label_train), dim=0).type(torch.FloatTensor)

    test_data = torch.cat((x_test, y_test), dim=0).type(torch.FloatTensor)
    test_labels = torch.cat((x_label_test, y_label_test), dim=0).type(torch.FloatTensor)

    # 训练次数
    num_epochs = 15
    batch_size = 64
    # 训练模型（使用lambda控制惩罚权重）
    train_loss, test_loss = train(feathers, labels, test_data, test_labels, num_epochs, batch_size, net.parameters(), optimizer)
    # 绘制loss图像
    loss_curve(range(1, num_epochs+1), train_loss, "epochs", "loss", range(1, num_epochs+1), test_loss, ['train', 'test'])