import numpy as np
import torch


# 定义多维模型
class NeuralNetwork(torch.nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        # 构建3层网络
        self.linear0 = torch.nn.Linear(8, 6)
        self.linear1 = torch.nn.Linear(6, 4)
        # 最后一层必须收敛,输出特征1
        self.linear2 = torch.nn.Linear(4, 1)
        # 激活函数设置
        # self.activation = torch.nn.Sigmoid()
        self.activation = torch.nn.ReLU()

    def forward(self, x):
        # 使用三层网络进行数据计算
        x = self.activation(self.linear0(x))
        x = self.activation(self.linear1(x))
        # y_pred = self.activation(self.linear2(x))
        # 当激活函数使用ReLU的时候,需要在最终求取y_hat时使用sigmoid(logistic regression),
        # 因为ReLU的取值可能出现0,求取损失时计算ln0要报错
        y_pred = torch.sigmoid(self.linear2(x))
        # 返回最终获得的y
        return y_pred


# 获得数据集
dataSet = np.loadtxt("./data/8x_1y.csv", delimiter=',', dtype=np.float32)
# x为前面的8列
x_data = torch.from_numpy(dataSet[:, 0:8])
# y是最后一列
y_data = torch.from_numpy(dataSet[:, [8]])

print(x_data.data)
print(y_data.data)

# 初始化模型
model = NeuralNetwork()

# 初始化损失
criterion = torch.nn.BCELoss(reduction="sum")
# 初始化优化器
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
# 这个会下降的效果更好
# optimizer = torch.optim.Rprop(model.parameters(), lr=0.1)

# 训练
for epoch in range(1000):
    # 前馈计算
    y_pred = model(x_data)
    # 计算损失
    loss = criterion(y_pred, y_data)
    # 先清除以前的梯度
    optimizer.zero_grad()
    # 反向传播,计算出loss对权重的导数
    loss.backward()
    # 根据已经算出的梯度,更新权重
    optimizer.step()
    # 输出当前的loss和epoch
    print(f'Epoch: {epoch}, Loss: {loss.item():.4f}')

# 尝试计算一组数据,并获得结果
y_pred = model(torch.Tensor([-0.99, -0.15, 0.08, -0.41, 0, -0.51, -0.27, -0.67]))

print(y_pred.item())
