import torch
import torch.nn as nn
import joblib
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
import pandas as pd

# 读取CSV文件
file_path = './data-process/data/train.csv'
df = pd.read_csv(file_path)

# 提取特征列和目标列
features = df.iloc[:, 1:-1].values
targets = df.iloc[:, -1].values

# 数据归一化
feature_scaler = MinMaxScaler()
target_scaler = MinMaxScaler()

normalized_features = feature_scaler.fit_transform(features)
normalized_targets = target_scaler.fit_transform(targets.reshape(-1, 1))

# 转换为PyTorch张量
features_tensor = torch.tensor(normalized_features, dtype=torch.float32)
targets_tensor = torch.tensor(normalized_targets, dtype=torch.float32)


# 定义LSTM模型
class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers)
        self.linear = nn.Linear(hidden_size, 1)

    def forward(self, x):
        out, _ = self.lstm(x)
        out = self.linear(out[-1])  # 只取最后一个时间步的输出
        return out


# 初始化模型
input_size = 9  # 特征数量
hidden_size = 16  # LSTM隐藏层单元数量
num_layers = 2  # LSTM层数
model = LSTMModel(input_size, hidden_size, num_layers)
# GPU可用判定
if torch.cuda.is_available():
    model = model.cuda()

# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# 定义学习率调度器
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3, verbose=True, min_lr=0.0000001)

# 定义批量大小
batch_size = 4

# 训练模型
num_epochs = 300
for epoch in tqdm(range(num_epochs)):
    sum_loss = []
    for batch_start in range(0, len(features_tensor), batch_size):
        batch_features = features_tensor[batch_start:batch_start+batch_size]
        batch_targets = targets_tensor[batch_start:batch_start+batch_size]

        if torch.cuda.is_available():
            batch_features = batch_features.cuda()
            batch_targets = batch_targets.cuda()

        outputs = model(batch_features)
        loss = criterion(outputs, batch_targets)
        sum_loss.append(loss.data.item())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    avg_loss = sum(sum_loss) / len(sum_loss)
    scheduler.step(avg_loss)
    if epoch == 0 or (epoch + 1) % 5 == 0:
        print('epoch:{},loss:{:.4}'.format(epoch + 1, avg_loss))

# 保存模型
torch.save(model.state_dict(), 'model.pth')
print('LSTM模型已保存！')
# 保存 feature_scaler 和 target_scaler 实例
joblib.dump(feature_scaler, 'feature_scaler.pkl')
joblib.dump(target_scaler, 'target_scaler.pkl')
print('训练数据已保存！')
