import torch
from torch import nn, optim
import numpy as np

# read file date
def load_data(f:str):
    # 逐行读取数据 
    row = 0
    data, l = np.array([]), np.array([])
    with open(f,'r') as file:
        while (line := file.readline().strip('\n')) != "":
            data_str = list(filter(None, line.split(' ')))
            if row%2 == 0:
                l = np.array(data_str, dtype=np.float32)
            else:
                l = np.hstack((l, np.array(data_str, dtype=np.float32)))
                data = l if row<=1 else np.vstack((data, l))
            row += 1
    return data

dataset = load_data(r"波士顿房价\房价数据集-未处理.txt")

# 定义模型
class FC_Model(nn.Module):
    def __init__(self, p=0.1) -> None:
        # 搭建模型结构
        super(FC_Model, self).__init__() # 继承父类方法
        self.p = p
        self.input_x = nn.Linear(13, 50, bias=False) # 创建输入层，并连接下一个节点为50的隐藏层
        self.fc1 = nn.Linear(50, 50) # 隐藏层2
        self.drop1 = nn.Dropout(self.p) # dropout层
        self.fc2 = nn.Linear(50, 50) # 隐藏层3
        self.drop2 = nn.Dropout(self.p) # dropout层
        self.fc4 = nn.Linear(50, 1) # 隐藏层4
        self.act4 = nn.ReLU()

    def forward(self, inputs):
        # 定义前向模型的数据流
        x = self.input_x(inputs)
        x = self.fc1(x)
        # x = self.act4(x)
        x = self.drop1(x)
        x = self.fc2(x)
        # x = self.act4(x)
        x = self.drop2(x)
        x = self.fc4(x)
        return self.act4(x)

# 模型实例化
fc_net = FC_Model(p=0.1)
print(fc_net)

# 定义损失函数
loss_mse = nn.MSELoss()

# 定义优化器
optimizer = optim.SGD(fc_net.parameters(), lr=0.0000005)

# 定义数据集
from torch.utils.data import Dataset, DataLoader, random_split
train_size, valid_size = int(dataset.shape[0]*0.75), int(dataset.shape[0]*0.15)
test_size = dataset.shape[0] - train_size - valid_size
train_data, valid_data, test_data = \
    random_split(torch.tensor(dataset), [train_size, valid_size, test_size]) # 数据集批量划分

train_dataloader = DataLoader(train_data, batch_size=4, shuffle=True) # 设置训练集加载器
valid_dataloader = DataLoader(valid_data, batch_size=1) # 设置验证集加载器
test_dataloader = DataLoader(test_data, batch_size=1) # 设置测试集加载器


# 设置训练循环
epoch = 100 # 设置100此主训练循环
from EarlyStopping import EarlyStopping
early_stop = EarlyStopping(patience=12, autodelta=True)
for i in range(epoch):
    fc_net.train() # 将模型设置为训练状态
    for k, x_y in enumerate(train_dataloader):
        x, y = torch.split(x_y, split_size_or_sections=[13, 1], dim=1) # 将数据集切分
        # 模型训练
        optimizer.zero_grad() # 将模型梯度置零
        # 前置计算
        t = fc_net(x) # 模型计算
        loss = loss_mse(t, y) # 计算损失
        loss.backward()
        optimizer.step()
        # 数据打印输出
        if k % 10 == 0 and k > 0:
            print('Train Epoch:{}, Train Time:{}, loss_mse:{}, \nlabel:{}, predict:{}'.format(
                i, k, loss.data, 
                y.data.numpy().reshape(-1).round(decimals=3),
                t.data.numpy().reshape(-1).round(decimals=3)
            ))
    
    # 模型验证
    fc_net.eval() # 将模型设置未验证状态
    total_loss = 0
    for k, x_y in enumerate(valid_dataloader):
        x, y = torch.split(x_y, split_size_or_sections=[13, 1], dim=1) # 将数据集切分
        with torch.no_grad():
            pred = fc_net(x)
            loss = loss_mse(pred, y)
        total_loss += loss.data
    else:
        total_loss = total_loss / (k + 1) # type: ignore
    # 建立早停训练条件
    early_stop(total_loss, k=0.1)
    if early_stop.early_stop:
        print('trainning stop')
        break
