# 使用前馈神经网络实现逻辑回归（二分类问题）
import random

import matplotlib.pyplot as plt
import numpy as np
import torch
from prettytable import PrettyTable

# ----------------------生成数据集--------------------------------------------------
# 生成数据(torch.normal 离散正态分布)
train1 = torch.tensor(data=np.random.normal(0.1, 0.04, (10000, 200)), dtype=torch.float)
train_x1 = train1[3000:, ]
test_x1 = train1[7000:, ]
# print(train_x1.shape)
# print(test_x1.shape)
train_y1 = torch.zeros(7000)
test_y1 = torch.zeros(3000)

train2 = torch.tensor(data=np.random.normal(-0.1, 0.04, (10000, 200)), dtype=torch.float)
train_x2 = train2[3000:, ]
test_x2 = train2[7000:, ]
train_y2 = torch.ones(7000)
test_y2 = torch.ones(3000)

# 合并数据
feathers = torch.cat((train_x1, train_x2), dim=0).type(torch.FloatTensor)
lables = torch.cat((train_y1, train_y2), dim=0).type(torch.FloatTensor)

test_data = torch.cat((test_x1, test_x2), dim=0).type(torch.FloatTensor)
test_lable = torch.cat((test_y1, test_y2), dim=0).type(torch.FloatTensor)

# 绘制数据的图像
plt.scatter(feathers[:, 1], feathers[:, 2], c=lables.numpy())
plt.show()

# 定义FlattenLayer层
class FlattenLayer(torch.nn.Module):
    def __init__(self):
        super(FlattenLayer, self).__init__()
    def forward(self, x):
        return x.view(x.shape[0], -1)

# 定义模型和初始化参数
num_inputs, num_hidden1, num_outs = 200, 1, 1
net = torch.nn.Sequential(
        FlattenLayer(),
        # 一层隐藏层
        torch.nn.Linear(num_inputs, num_hidden1),
        torch.nn.Sigmoid(),
        # 二层隐藏层
        # torch.nn.Linear(num_hidden1, num_outs),
        # torch.nn.Sigmoid(),
        )
for params in net.parameters():
    torch.nn.init.normal_(params, mean=0, std=0.1)

# ---------------------批量读取数据----------------------------------------------
def data_iter(batch_size, feathers, lables, isShuffle=False):
    inputsLen = len(feathers)
    indices = list(range(inputsLen))
    if isShuffle:
        # 将数据进行打乱
        random.shuffle(indices)
    # 读取数据
    # start, end, step（每隔step输出一次）
    for i in range(0, inputsLen, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, inputsLen)])
        # 参数0表示按行索引，1表示按列进行索引
        yield feathers.index_select(0, j), lables.index_select(0, j)

# 定义交叉熵损失函数
# def loss(y, lables):
#     return -torch.mean(lables*torch.log(y)+(1-lables)*torch.log(1-y))
loss = torch.nn.MSELoss()

# 定义优化器
lr = 0.001
optimizer = torch.optim.SGD(net.parameters(), lr)

# K折交叉验证
def get_kfold_data(k, i, X, y):
    # 返回第i+1折（i=0-->k-1）
    # 每折的个数，总数除以折数
    fold_size = X.shape[0]//k
    val_start = i*fold_size
    if i != k-1:
        val_end = (i+1)*fold_size
        X_valid, y_valid = X[val_start:val_end], y[val_start:val_end]
        X_train = torch.cat((X[0:val_start], X[val_end:]), dim=0)
        y_train = torch.cat((y[0:val_start], y[val_end:]), dim=0)
    else:
        X_valid, y_valid = X[val_start:], y[val_start:]
        X_train = X[0:val_start]
        y_train = y[0:val_start]
    return X_train, y_train, X_valid, y_valid

# 对每折数进行训练和测试并计算平均值
def k_fold(k, feathers, lables, num_epochs, batch_size, optimizer):
    train_acc_sum, valid_acc_sum = 0.0, 0.0
    train_loss_sum, valid_loss_sum = 0.0, 0.0
    # 生成数据表格
    k_table = PrettyTable(["epoch", "train_acc", "valid_acc", "train_loss", "valid_loss"])
    # print("X_train.shape{}".format(X_train.shape))
    for i in range(k):
        data = get_kfold_data(k, i, feathers, lables)
        # 对每份数据进行运算
        train_loss, valid_loss, train_acc, valid_acc = train_k(*data, num_epochs, batch_size, optimizer)
        train_acc_sum += train_acc
        valid_acc_sum += valid_acc
        train_loss_sum += train_loss
        valid_loss_sum += valid_loss
        k_table.add_row([(i+1), train_acc, valid_acc, train_loss, valid_loss])
    # 绘制每折的表格
    print(k_table)
    print("\n", "K折交叉验证的结果：")
    print("average train loss :{:.4f}, average train acc :{:.3f}%".format(train_loss_sum/k, train_acc_sum/k*100))
    print("average valid loss :{:.4f}, average valid acc :{:.3f}%".format(valid_loss_sum/k, valid_acc_sum/k*100))

# 绘制损失图像
def loss_curve(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()

# -----------------------计算模型在摸个数据集上的准确率--------------
def evaluate_accuracy(batch_size, test_data, test_lable, net):
    acc_sum, loss_sum, n = 0.0, 0.0, 0
    dataLen = len(test_data)
    indices = list(range(dataLen))
    for i in range(0, dataLen, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, dataLen)])
        # 参数0表示按行索引，1表示按列进行索引
        X, y = test_data.index_select(0, j), test_lable.index_select(0, j)
        acc_sum += ((net(X).ge(0.5).float().squeeze()) == y).sum().item()
        loss_sum += loss(net(X), y).sum().item()
        n += y.shape[0]
    return acc_sum/n, loss_sum/n

# K折交叉验证的训练函数
def train_k(train_X, train_y, valid_X, valid_y, num_epochs, batch_size, optimizer=None):
    # 外层训练次数控制训练轮次
    train_loss = 0.0
    test_loss = 0.0
    train_acc = 0.0
    test_acc_s = 0.0
    # 处理数据
    # print("train_X.shape {}".format(train_X.shape))
    train_iter = data_iter(batch_size, train_X, train_y, True)
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum = 0.0, 0.0
        # 内层循环控制训练批次
        for X, y in train_iter:
            # 根据模型预测y值
            # print("X.shape{}".format(X))
            y_hat = net(X)
            # 计算损失
            l = loss(y_hat.squeeze(), y.squeeze()).sum()
            # 反向传播
            l.backward()
            # 更新参数
            optimizer.step()
            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            # 训练损失
            train_l_sum += l.item()
            # 训练精确度
            train_acc_sum += ((y_hat.ge(0.5).float().squeeze()) == y).sum().item()
            # print("train_acc_sum {}".format(train_acc_sum))
        # 训练精确度
        test_acc, test_l = evaluate_accuracy(batch_size, valid_X, valid_y, net)
        train_loss += (train_l_sum/train_X.shape[0])
        train_acc += (train_acc_sum/train_X.shape[0])
        # print("train_acc_sum/train_X.shape[0] {}".format(train_acc_sum/train_X.shape[0]))
        test_acc_s += test_acc
        test_loss += test_l
    return train_loss, test_loss, train_acc, test_acc_s

# 训练函数
def train(feathers, lables, test_data, test_lable, num_epochs, batch_size, params=None, optimizer=None):
    # 外层训练次数控制训练轮次
    train_loss = []
    test_loss = []
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        # 内层循环控制训练批次
        for X, y in data_iter(batch_size=batch_size, feathers=feathers, lables=lables):
            # 根据模型预测y值
            y_hat = net(X)
            # print("y_hat {}".format(y_hat))
            # print("y.shape {}".format(torch.sum(y.view(y.shape, -1))))
            # print("y_hat.shape {}".format(torch.sum(y_hat.view(y_hat.shape, -1)).float()))
            # 计算损失（加入惩罚权重）
            l = loss(y_hat.squeeze(), y.squeeze())
            # print("l {}".format(l))
            # 反向传播
            l.backward()
            # 更新参数
            optimizer.step()
            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            # 训练损失
            train_l_sum += l.sum().item()
            # 训练精确度
            train_acc_sum += ((y_hat.ge(0.5).float().squeeze()) == y).sum()
            # 次数
            n += y.shape[0]
        # 训练精确度
        test_acc, test_l = evaluate_accuracy(batch_size, test_data, test_lable, net)
        train_loss.append(train_l_sum / n)
        test_loss.append(test_l)
        # 输出结果
        print("epoch % d, loss %.4f, train acc %.3f, test acc %.3f" % (epoch+1, train_l_sum/n, train_acc_sum/n, test_acc))
    return train_loss, test_loss
if __name__ == '__main__':
    # 训练次数
    num_epochs = 15
    batch_size = 64
    # 训练模型（使用lambda控制惩罚权重）
    train_loss, test_loss = train(feathers, lables, test_data, test_lable, num_epochs, batch_size, net.parameters(), optimizer)
    # 绘制loss图像
    loss_curve(range(1, num_epochs+1), train_loss, "epochs", "loss", range(1, num_epochs+1), test_loss, ['train', 'test'])
    k_fold(k=10, feathers=feathers, lables=lables, num_epochs=1, batch_size=32, optimizer=optimizer)








