# 使用前馈神经网络实现逻辑回归（二分类问题）
import random

import matplotlib.pyplot as plt
import numpy as np
import torch

# ----------------------生成数据集--------------------------------------------------
# 生成数据(torch.normal 离散正态分布)
train1 = torch.tensor(data=np.random.normal(0.1, 0.04, (10000, 200)), dtype=torch.float)
train_x1 = train1[3000:, ]
test_x1 = train1[7000:, ]
# print(train_x1.shape)
# print(test_x1.shape)
train_y1 = torch.zeros(7000)
test_y1 = torch.zeros(3000)

train2 = torch.tensor(data=np.random.normal(-0.1, 0.04, (10000, 200)), dtype=torch.float)
train_x2 = train2[3000:, ]
test_x2 = train2[7000:, ]
train_y2 = torch.ones(7000)
test_y2 = torch.ones(3000)

# 合并数据
feathers = torch.cat((train_x1, train_x2), dim=0).type(torch.FloatTensor)
lables = torch.cat((train_y1, train_y2), dim=0).type(torch.FloatTensor)

test_data = torch.cat((test_x1, test_x2), dim=0).type(torch.FloatTensor)
test_lable = torch.cat((test_y1, test_y2), dim=0).type(torch.FloatTensor)

# 绘制数据的图像
# plt.scatter(feathers[:, 1], feathers[:, 2], c=lables.numpy())
# plt.show()

# ---------------------批量读取数据----------------------------------------------
def data_iter(batch_size, feathers, lables):
    inputsLen = len(feathers)
    indices = list(range(inputsLen))
    # 将数据进行打乱
    random.shuffle(indices)
    # 读取数据
    # start, end, step（每隔step输出一次）
    for i in range(0, inputsLen, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, inputsLen)])
        # 参数0表示按行索引，1表示按列进行索引
        yield feathers.index_select(0, j), lables.index_select(0, j)


# ----------------------初始化参数（隐藏层为68）------------------------------
num_inputs, num_outs = 200, 1
W1 = torch.tensor(data=np.random.normal(0, 0.01, (num_outs, num_inputs)), dtype=torch.float)
b1 = torch.tensor(data=np.random.normal(0, 0.01, num_outs), dtype=torch.float)
params = [W1, b1]
# 设置参数为可求梯度
for param in params:
    param.requires_grad_(True)

# ---------------------绘制损失图像-----------------------------------------
def loss_curve(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()

# --------------------定义前馈神经网络模型----------------------
# 定义激活函数（去除不必要的特征）
def simgod(z):
    return 1/(1+torch.exp(-z))

# 定义模型
def net(X):
    # X转换成相应的维数
    X.view(-1, num_inputs)
    F = torch.matmul(X, W1.t()) + b1
    H = simgod(F)
    return H

# -----------------------计算模型在摸个数据集上的准确率--------------
def evaluate_accuracy(batch_size, test_data, test_lable, net):
    acc_sum, loss_sum, n = 0.0, 0.0, 0
    dataLen = len(test_data)
    indices = list(range(dataLen))
    for i in range(0, dataLen, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, dataLen)])
        # 参数0表示按行索引，1表示按列进行索引
        X, y = test_data.index_select(0, j), test_lable.index_select(0, j)
        acc_sum += ((net(X).ge(0.5).float().squeeze()) == y).sum().item()
        loss_sum += loss(net(X), y).sum().item()
        n += y.shape[0]
    return acc_sum/n, loss_sum/n

# ------------------------计算损失----------------------------
# y预测模型预测的值，lables真实的标签值
def loss(y_hat, y):
    return -torch.mean(y*torch.log(y_hat)+(1-y)*torch.log(1-y_hat))

# --------------------------清空梯度--------------------------
def grad_zero(params):
    for param in params:
        param.grad.data.zero_()

# --------------------------梯度下降法优化参数---------------------------
def optimize_param(batch_size, params, lr):
    for param in params:
        param.data -= param.grad/batch_size*lr

# ---------------------------训练模型---------------------------------
def train(epochs, batch_size, feathers, lables, lr):
    # 训练次数
    train_loss = []
    test_loss = []
    for epoch in range(epochs):
        # 批量读取数据进行训练
        l_sum, count, train_acc_sum = 0, 0, 0
        for X, y in data_iter(batch_size=batch_size, feathers=feathers, lables=lables):
            # 预测值
            y_hat = net(X)
            # print("y_hat {}".format(y_hat))
            # print("y {}".format(y))
            # 损失值
            l = loss(y_hat.squeeze(), y.squeeze()).sum()
            # print("l {}".format(l))
            # 反向传播
            l.backward()
            # 更新参数
            optimize_param(batch_size, params, lr)
            # 梯度清零
            grad_zero(params)
            # 记录次数
            count += y.shape[0]
            # 记录损失
            l_sum += l
            # 记录准确率
            mask = y_hat.ge(0.5).float().squeeze()  # 以0.5为阈值进行分类(大于0.5的为1)
            train_acc_sum += (mask == y).sum().item()  # 计算正确预测的样本个数
        train_loss.append(l_sum/count)
        # 训练精确度
        test_acc, test_l = evaluate_accuracy(batch_size, test_data, test_lable, net)
        test_loss.append(test_l)
        # 输出结果
        print("epoch % d, loss %.4f, train acc %.3f, test acc %.3f" % (epoch+1, l_sum/count, train_acc_sum/count, test_acc))
    # 绘制loss图像
    loss_curve(range(1, epochs+1), train_loss, "epochs", "loss", range(1, epochs+1), test_loss, ['train', 'test'])

if __name__ == '__main__':
    train(epochs=100, batch_size=25, feathers=feathers, lables=lables, lr=0.01)







