# 1.手动实现前馈神经网络解决上述回归，二分类、多分类任务
import random
import numpy as np
from common import *


def generate_data(size_, ρ_):
    y_datas = []
    # 产生size_, ρ_维的数据集元数据
    datas = torch.tensor(data=np.random.normal(0, 1, (size_, ρ_)), dtype=torch.float)
    for data in datas:
        # 服从高维线性函数，高斯白噪声 random.gauss(μ, sigm) 数值小一点，干扰小，方便训练
        y = 0.028 + (0.0056 * data).sum() + random.gauss(0.02, 0.16)
        y_datas.append(y.item())
    return datas, torch.tensor(y_datas, dtype=torch.float)


# 定义模型
def linear_net(X):
    return torch.matmul(X, W1) + b1


def data_iter(batch_size, features, labels):
    num_examples = len(features)
    indices = list(range(num_examples))
    random.shuffle(indices)  # 样本的读取顺序是随机的
    for i in range(0, num_examples, batch_size):
        j = torch.LongTensor(indices[i: min(i + batch_size, num_examples)])  # 最后一次可能不足一个batch
        yield features.index_select(0, j), labels.index_select(0, j)


def train_test_k(train_iter, test_iter, test_data, test_lable, num_epochs, batch_size,
                 params=None, lr=None, optimizer=None):
    """ training network """
    train_l_avg = 0.0
    test_l_avg = 0.0
    for epoch in range(num_epochs):
        train_l_sum, test_l_sum, n = 0.0, 0.0, 0
        for X, y in data_iter(batch_size, train_iter, test_iter):
            """ 第一步：计算模型输出和loss """
            y_hat = linear_net(X)
            l = mseLoss(y_hat, y)
            l.backward()  # 反向传播
            if optimizer is None:
                SGD(params, lr)  # 更新参数
            else:
                optimizer.step()
            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                grad_zero(params)
            train_l_sum += l.item()
            n += 1
        train_l_avg += train_l_sum / n  # 每个epoch算一个loss均值
        # print('epoch %d, loss %.4f' % (epoch + 1, train_l_sum / n))
        test_l_avg += test_result(test_data, test_lable)
    return train_l_avg/num_epochs, test_l_avg/num_epochs, 0, 0


# 测试结果
def test_result(x_test_, y_test_):
    test_loss_avg = 0.0
    y_hat = linear_net(x_test_)
    # 记录测试损失
    test_loss_avg += mseLoss(y_hat, y_test_).item()
    return test_loss_avg


# 画图
def loss_draw(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()


mseLoss = torch.nn.MSELoss()
# 产生10000个500维数据集，并分开7000训练，3000测试
size, ρ = 10000, 500
x_data, y_data = generate_data(size, ρ)
x_train, y_train, x_test, y_test = train_test_split(x_data, y_data, 0.7)
print(len(x_train))
print(len(x_test))

# 初始化参数
num_inputs, num_outs = 500, 1
W1 = torch.tensor(np.random.normal(0, 1, (num_inputs, 1)), dtype=torch.float32)
b1 = torch.zeros(1, dtype=torch.float32)
params = [W1, b1]
for param in params:
    param.requires_grad_(True)

# 开始训练 学习率0.1  训练次数5
batch_size = 64
num_workers = 0  # 多线程数
num_epochs, lr = 30, 0.01
if __name__ == '__main__':
    # 学习率为0.1的时候loss下降
    # train(300, 80, train_x, train_y, params, 0.01)
    # train_loss_avg, test_l_avg = train_test(x_data, y_data, num_epochs, batch_size, params, lr)
    # loss_draw(range(1, num_epochs + 1), train_loss_avg, "epochs", "loss", range(1, num_epochs + 1), test_l_avg,
    #           ['train', 'test'])

    k_fold(train_test_k, k=10, feathers=x_data, labels=y_data, num_epochs=num_epochs, batch_size=32, params=params, lr=lr,
           optimizer=None)
