import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader

# 利用pandas读入数据
train_data = pd.read_csv("./data/BPdata_tr.txt")
test_data  = pd.read_csv("./data/BPdata_te.txt")

# 进行数据提取，存储为tensor类型
X_train = torch.tensor(train_data[['x1', 'x2']].values).float().cuda()
y_train = torch.tensor(train_data[['y']].values).float().cuda()

X_test = torch.tensor(test_data[['x1', 'x2']].values).float().cuda()
y_test = torch.tensor(test_data[['y']].values).float().cuda()

# 封装数据
TrainData_set = TensorDataset(X_train, y_train)
train_loader = DataLoader(dataset = TrainData_set, batch_size = 1, shuffle = True)

# 构架模型
class BP_Net(nn.Module):
    def __init__(self):
        super(BP_Net, self).__init__()
        self.fc1 = nn.Linear(2, 4)   # 由输入层到隐藏层
        self.fc2 = nn.Linear(4, 1)   # 由隐藏层到输出层

    def forward(self, x):
        x = torch.sigmoid(self.fc1(x))
        x = torch.sigmoid(self.fc2(x))
        return x
        
# 实例化模型
model = BP_Net().cuda()
# 损失函数和优化器
criterion = nn.MSELoss()  # 均方误差损失
optimizer = optim.SGD(model.parameters(), lr = 0.85)  # 优化器传入的是参数和学习率

# 训练模型
EPOCH = 20
for epoch in range(EPOCH):
    for inputs, targets in train_loader:
        optimizer.zero_grad()  # 优化器梯度清零
        outputs = model(inputs)  # 前向传播，得到当前模型的输出
        loss = criterion(outputs, targets)  # 计算损失
        loss.backward()  # 反向传播
        optimizer.step()  # 更新权重
    print("【已经完成{}次训练】{} / {}".format(epoch + 1, epoch + 1, EPOCH))
print('训练结束')