# 前馈神经网络实现线性回归
import random

import matplotlib.pyplot as plt
import numpy as np
import torch
from prettytable import PrettyTable

# ------------------生成数据----------------------------
x_plt = []
y_plt = []
# 定义线性回归使用的函数
def fun(datas):
    mu = 0.02
    sigma = 0.012
    for data in datas:
        # 向数据中加入高斯白噪声，拟合线性图像 + random.gauss(mu, sigma)
        y = 0.028 + (0.0056*data).sum() + random.gauss(mu, sigma)
        y_plt.append(y.item())
    return torch.tensor(y_plt, dtype=torch.float)

# 训练数据
train_x = torch.tensor(data=np.random.normal(0, 1, (7000, 500)), dtype=torch.float)
train_y = fun(train_x)

# 测试数据
test_x = torch.tensor(data=np.random.normal(0, 1, (3000, 500)), dtype=torch.float)
test_y = fun(test_x)

# ------------------批量随机读取数据-------------------------------------
def readData(batch_size, feathers, lables):
    feathersLen = len(feathers)
    indices = list(range(feathersLen))
    # 将数据进行打乱
    random.shuffle(indices)
    # start, end, step（每隔step输出一次）
    for i in range(0, feathersLen, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, feathersLen)])
        # 参数0表示按行索引，1表示按列进行索引
        yield feathers.index_select(0, j), lables.index_select(0, j)

# ------------------初始化参数--------------------------------
num_inputs, num_outs = 500, 1
W1 = torch.tensor(np.random.normal(0, 1, (num_inputs, num_outs)), dtype=torch.float32)
b1 = torch.zeros(num_outs, dtype=torch.float32)
params = [W1, b1]
for param in params:
    param.requires_grad_(True)

# -------------------定义模型----------------------------------------
def net(X):
    return torch.matmul(X, W1) + b1

# --------------------定义平方差损失函数---------------------------------
# def loss(y_hat, y):
#     return torch.mean(((y_hat - y) ** 2).sum())
loss = torch.nn.MSELoss()

# --------------------梯度清零-----------------------------------------
def grad_zero(params):
    for param in params:
        param.grad.zero_()

# ---------------------更新参数--------------------------------------
def SGD(params, lr, batch_size):
    for param in params:
        param.data -= lr * param.grad/batch_size

# ---------------------绘制损失图像-----------------------------------------
def loss_curve(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()

# K折交叉验证
def get_kfold_data(k, i, X, y):
    # 返回第i+1折（i=0-->k-1）
    # 每折的个数，总数除以折数
    fold_size = X.shape[0]//k
    val_start = i*fold_size
    if i != k-1:
        val_end = (i+1)*fold_size
        X_valid, y_valid = X[val_start:val_end], y[val_start:val_end]
        X_train = torch.cat((X[0:val_start], X[val_end:]), dim=0)
        y_train = torch.cat((y[0:val_start], y[val_end:]), dim=0)
    else:
        X_valid, y_valid = X[val_start:], y[val_start:]
        X_train = X[0:val_start]
        y_train = y[0:val_start]
    return X_train, y_train, X_valid, y_valid

# 对每折数进行训练和测试并计算平均值
def k_fold(k, feathers, lables, num_epochs, batch_size):
    train_loss_sum, valid_loss_sum = 0.0, 0.0
    # 生成数据表格
    k_table = PrettyTable(["epoch", "train_loss", "valid_loss"])
    # print("X_train.shape{}".format(X_train.shape))
    for i in range(k):
        data = get_kfold_data(k, i, feathers, lables)
        # 对每份数据进行运算
        train_loss, valid_loss = train_k(*data, num_epochs, batch_size)
        train_loss_sum += train_loss
        valid_loss_sum += valid_loss
        k_table.add_row([(i+1), train_loss, valid_loss])
    print("train_loss_sum {}".format(train_loss_sum))
    # 绘制每折的表格
    print(k_table)
    print("\n", "K折交叉验证的结果：")
    print("average train loss :{:.4f}".format(train_loss_sum/k))
    print("average valid loss :{:.4f}".format(valid_loss_sum/k))

def train_k(feathers, lables, test_data, test_lable, num_epochs, batch_size, lr=0.01):
    # 训练的次数
    train_loss, test_loss = 0.0, 0.0
    for i in range(num_epochs):
        # 每次训练的损失
        l_sum, count = 0, 0
        # 记录每一次的损失
        for X, y in readData(batch_size, feathers, lables):
            # 获取预测值
            y_hat = net(X)
            # print("y_hat {}".format(y_hat.shape))
            # print("y {}".format(y.shape))
            # 损失值(将损失降为标量，才可以自动求梯度)
            l = loss(y_hat, y)
            # 反向传播
            l.backward()
            l_sum += l.item()
            # 更新参数
            SGD(params, lr, batch_size)
            # 梯度清零
            grad_zero(params)
            # 次数统计（每次循环一个批次）
            count += 1
        # 记录损失
        #print("l_sum/count {}".format(l_sum/count))
        train_loss += l_sum/count
        # 预测测试数据
        y_hat = net(test_data)
        # print("y_hat.shape {}".format(y_hat.shape))
        # print("test_lable.shape {}".format(test_lable.shape))
        # 记录测试损失
        test_loss += loss(y_hat, test_lable).mean().item()
    print("train_loss/num_epochs {}".format(train_loss/num_epochs))
    return train_loss/num_epochs, test_loss/num_epochs

# --------------------开始训练--------------------------------------
def train(epochs, batch_size, feathers, lables, params, lr):
    # 训练的次数
    train_loss = []
    test_loss = []
    for i in range(epochs):
        # 每次训练的损失
        l_sum, count = 0, 0
        # 记录每一次的损失
        for X, y in readData(batch_size, feathers, lables):
            # 获取预测值
            y_hat = net(X)
            # print("y_hat {}".format(y_hat[5]))
            # print("y {}".format(y[5]))
            # 损失值(将损失降为标量，才可以自动求梯度)
            l = loss(y_hat, y)
            # 反向传播
            l.backward()
            l_sum += l.item()
            # 更新参数
            SGD(params, lr, batch_size)
            # 梯度清零
            grad_zero(params)
            # 次数统计（每次循环一个批次）
            count += 1
        # 记录损失
        #print("l_sum/count {}".format(l_sum/count))
        train_loss.append(l_sum/count)
        print('epoch %d, loss %.4f' % (i + 1, l_sum/count))
        # 预测测试数据
        y_hat = net(test_x)
        # 记录测试损失
        test_loss.append(loss(y_hat, test_y).mean())

    # 绘制loss图像
    loss_curve(range(1, epochs+1), train_loss, "epochs", "loss", range(1, epochs+1), test_loss, ['train', 'test'])
    # 输出预测参数值
    for param in params:
        print(param.sum().mean())


if __name__ == '__main__':
    # 学习率为0.1的时候loss下降
    # train(300, 80, train_x, train_y, params, 0.01)
    # K折交叉验证
    k_fold(k=10, feathers=train_x, lables=train_y, num_epochs=100, batch_size=32)


