# 1.手动实现前馈神经网络解决上述二分类任务
import random
import numpy as np
from common import *


def generate_data(size_, ρ_):
    # 产生size_, ρ_维的数据集元数据
    x_data = torch.tensor(data=np.random.normal(0.1, 0.1, (size_, ρ_)), dtype=torch.float)
    x_label = torch.zeros(size_)
    y_data = torch.tensor(data=np.random.normal(-0.1, 0.1, (size_, ρ_)), dtype=torch.float)
    y_label = torch.ones(size_)

    return x_data, x_label, y_data, y_label


def data_iter(batch_size, features, labels):
    num_examples = len(features)
    indices = list(range(num_examples))
    random.shuffle(indices)  # 样本的读取顺序是随机的
    for i in range(0, num_examples, batch_size):
        j = torch.LongTensor(indices[i: min(i + batch_size, num_examples)])  # 最后一次可能不足一个batch
        yield features.index_select(0, j), labels.index_select(0, j).long()


# 定义模型
def logic_net(X):
    X.view(-1, num_inputs)
    F = torch.matmul(X, W1.t()) + b1
    H = sigmod(F)
    return H


def train_test_k(feathers, labels, test_data_, test_labels_, num_epochs, batch_size,
                 params=None, lr=None, optimizer=None):
    """ training network """
    train_l_avg, train_acc_sum = 0.0, 0.0
    test_l_avg, test_acc_sum = 0.0, 0.0
    for epoch in range(num_epochs):
        train_l_sum, test_l_sum, acc, n = 0.0, 0.0, 0, 0
        for X, y in data_iter(batch_size, feathers, labels):
            """ 第一步：计算模型输出和loss """
            y_hat = logic_net(X)
            l = loss_bce(y_true=y.squeeze(), y_pred_proba=y_hat.squeeze()).sum()
            l.backward()  # 反向传播
            if optimizer is None:
                SGD(params, lr)  # 更新参数
            else:
                optimizer.step()
            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                grad_zero(params)
            train_l_sum += l.item()
            n += y.shape[0]  # 统计次数
            mask = y_hat.ge(0.5).float().squeeze()  # 以0.5为阈值进行分类(大于0.5的为1)
            acc += (mask == y).sum().item()  # 计算正确预测的样本个数
        train_l_avg += train_l_sum / n  # 每个epoch算一个loss均值
        train_acc_sum += acc / n
        test_acc, test_loss = evaluate_accuracy_k(batch_size, test_data_, test_labels_, logic_net, loss_bce)
        test_l_avg += test_loss
        test_acc_sum += test_acc
        # print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
        #       % (epoch + 1, train_l_sum / n, acc / n, test_acc))
    return train_l_avg, test_l_avg, train_acc_sum, test_acc_sum


def loss_(y_hat, y):
    return -torch.mean(y * torch.log(y_hat) + (1 - y) * torch.log(1 - y_hat))


# 测试结果
def test_result(test_loss_avg):
    y_hat = logic_net(x_test)
    # 记录测试损失
    test_loss_avg.append(mseLoss(y_hat, y_test).mean())
    return test_loss_avg


# 画图
def loss_draw(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()


# 产生10000个500维数据集，并分开7000训练，3000测试
size, ρ = 10000, 200
x_data, x_label, y_data, y_label = generate_data(size, ρ)
x_train, y_train, x_test, y_test = train_test_split(x_data, y_data, 0.7)
x_label_train, y_label_train, x_label_test, y_label_test = train_test_split(x_label, y_label, 0.7)
# 打乱数据
feathers = torch.cat((x_train, y_train), dim=0).type(torch.FloatTensor)
labels = torch.cat((x_label_train, y_label_train), dim=0).type(torch.FloatTensor)

test_data = torch.cat((x_test, y_test), dim=0).type(torch.FloatTensor)
test_labels = torch.cat((x_label_test, y_label_test), dim=0).type(torch.FloatTensor)

# 初始化参数
num_inputs, num_outs = 200, 1
W1 = torch.tensor(data=np.random.normal(0, 0.01, (num_outs, num_inputs)), dtype=torch.float)
b1 = torch.tensor(data=np.random.normal(0, 0.01, num_outs), dtype=torch.float)
params = [W1, b1]
for param in params:
    param.requires_grad_(True)

# 开始训练 学习率0.1  训练次数5
batch_size = 64
num_workers = 0  # 多线程数
num_epochs, lr = 30, 0.01
if __name__ == '__main__':
    # 学习率为0.1的时候loss下降
    k_fold(train_test_k, k=10, feathers=feathers, labels=labels, num_epochs=num_epochs, batch_size=batch_size,
           params=params, lr=lr, optimizer=None)
