# 前馈神经网络实现线性回归
import random

import matplotlib.pyplot as plt
import numpy as np
import torch

# ------------------生成数据----------------------------
x_plt = []
y_plt = []
# 定义线性回归使用的函数
def fun(datas):
    mu = 0.02
    sigma = 0.012
    for data in datas:
        # 向数据中加入高斯白噪声，拟合线性图像 + random.gauss(mu, sigma)
        y = 0.028 + (0.0056*data).sum() + random.gauss(mu, sigma)
        y_plt.append(y.item())
    return torch.tensor(y_plt, dtype=torch.float)

# 训练数据
train_x = torch.tensor(data=np.random.normal(0, 1, (7000, 500)), dtype=torch.float)
train_y = fun(train_x)

# 测试数据
test_x = torch.tensor(data=np.random.normal(0, 1, (3000, 500)), dtype=torch.float)
test_y = fun(test_x)

# ------------------批量随机读取数据-------------------------------------
def readData(batch_size, feathers, lables):
    feathersLen = len(feathers)
    indices = list(range(feathersLen))
    # 将数据进行打乱
    random.shuffle(indices)
    # start, end, step（每隔step输出一次）
    for i in range(0, feathersLen, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, feathersLen)])
        # 参数0表示按行索引，1表示按列进行索引
        yield feathers.index_select(0, j), lables.index_select(0, j)

# -------------------定义模型----------------------------------------
num_inputs, num_outs = 500, 1
class LinearModel(torch.nn.Module):
    # 构造函数
    def __init__(self):
        # 调用父类的构造
        super(LinearModel, self).__init__()
        # 构造一个Linear对象，含有权重和偏置这两个Tensor，实现wx+b；（1，1）代表输入、输出的特征维度
        self.linear = torch.nn.Linear(num_inputs, num_outs)

    # 前馈过程中进行的计算，必须叫forward，实际上是覆盖
    def forward(self, x):
        y_pred = self.linear(x)
        return y_pred

# 实例化，callable
model = LinearModel()

# --------------------定义平方差损失函数---------------------------------
loss = torch.nn.MSELoss()

# --------------------定义优化器--------------------------------------
lr = 0.01  # 学习率
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)

# ---------------------绘制损失图像-----------------------------------------
def loss_curve(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()

# --------------------开始训练--------------------------------------
def train(epochs, batch_size, feathers, lables):
    # 训练的次数
    train_loss = []
    test_loss = []
    for i in range(epochs):
        count, l_sum = 0, 0
        for X, y in readData(batch_size, feathers, lables):
            # 预测值
            y_hat = model(X)
            # 计算损失
            l = loss(y_hat, y)
            # 记录损失
            l_sum += l.item()
            # 反向传播
            l.backward()
            # 更新参数
            optimizer.step()
            # 梯度清零
            optimizer.zero_grad()
            # 记录次数
            count += 1
        train_loss.append(l_sum/count)
        # 预测测试数据
        y_hat = model(test_x)
        # 记录测试损失
        test_loss.append(loss(y_hat, test_y).mean())
    # 绘制loss图像
    loss_curve(range(1, epochs+1), train_loss, "epochs", "loss", range(1, epochs+1), test_loss, ['train', 'test'])
    # 输出预测参数值
    print("w = ", model.linear.weight)
    print("w mean = ", torch.mean(model.linear.weight))
    print("b = ", model.linear.bias)

if __name__ == '__main__':
    # 学习率为0.1的时候loss下降
    train(epochs=200, batch_size=80, feathers=train_x, lables=train_y)


