import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from sklearn.preprocessing import MinMaxScaler


# 解决中文显示问题
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


# 加载历史数据文件
# index_col=[0]，将第一列作为索引
dataset = pd.read_csv('data/load.csv', index_col=[0])
dataset = dataset.fillna(method='pad')

dataset = np.array(dataset)

# 将所有的数据放到一个列表里面，方便后续的训练集和测试集的制作
a = []
for item in dataset:
    for i in item:
        a.append(i)
dataset = pd.DataFrame(a)

# 获取前80%作为数据的训练集80%到90%作为验证集
# 划分训练集和验证集
train = dataset.iloc[0:int(len(a) * 0.8), [0]]
val = dataset.iloc[int(len(a) * 0.8):int(len(a) * 0.9), [0]]

# 进行数据归一化，将数据归一化到0 - 1之间
scaler = MinMaxScaler(feature_range=(0, 1))
train = scaler.fit_transform(train)
val = scaler.transform(val)

# 设置训练集的特征列表和对应标签列表
x_train = []
y_train = []

# 将前96个采样点的负荷特征作为训练特征添加到列表中
# 按照上述规律不断滑动取值
for i in np.arange(96, len(train)):
    x_train.append(train[i - 96:i, :])
    y_train.append(train[i])

# 将训练集由list格式变为array格式
x_train, y_train = np.array(x_train), np.array(y_train)

# 设置训练集的特征列表和对应标签列表
x_val = []
y_val = []

# 将前96个采样点的负荷特征作为训练特征添加到列表中
# 按照上述规律不断滑动取值
for i in np.arange(96, len(val)):
    x_val.append(val[i - 96:i, :])
    y_val.append(val[i])

# 将训练集由list格式变为array格式
x_val, y_val = np.array(x_val), np.array(y_val)

# 转换为PyTorch张量
x_train = torch.FloatTensor(x_train)
y_train = torch.FloatTensor(y_train)
x_val = torch.FloatTensor(x_val)
y_val = torch.FloatTensor(y_val)

# 创建数据集和数据加载器
train_dataset = TensorDataset(x_train, y_train)
train_loader = DataLoader(train_dataset, batch_size = 512, shuffle = True)
val_dataset = TensorDataset(x_val, y_val)
val_loader = DataLoader(val_dataset, batch_size = 512, shuffle = False)


class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(LSTMModel, self).__init__()
        self.lstm1 = nn.LSTM(input_size, hidden_size)
        self.lstm2 = nn.LSTM(hidden_size, hidden_size)
        self.fc1 = nn.Linear(hidden_size, 10)
        self.fc2 = nn.Linear(10, 1)

    def forward(self, x):
        # 调整维度为 (seq_length, batch_size, input_size)
        x = x.transpose(0, 1)
        out, _ = self.lstm1(x)
        out = out[:, -1, :].unsqueeze(1)
        out, _ = self.lstm2(out)
        out = nn.functional.relu(self.fc1(out.squeeze(1)))
        out = self.fc2(out)
        return out


# 初始化模型、损失函数和优化器
input_size = 1
hidden_size = 10
model = LSTMModel(input_size, hidden_size)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)

# 训练模型
num_epochs = 30
train_losses = []
val_losses = []

for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    for i, (inputs, labels) in enumerate(train_loader):
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    train_loss = running_loss / len(train_loader)
    train_losses.append(train_loss)

    model.eval()
    running_loss = 0.0
    with torch.no_grad():
        for i, (inputs, labels) in enumerate(val_loader):
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            running_loss += loss.item()
    val_loss = running_loss / len(val_loader)
    val_losses.append(val_loss)

    print(f'Epoch {epoch + 1}/{num_epochs}, Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}')

# 保存训练好的模型
torch.save(model.state_dict(), 'LSTM_model.pth')

# 绘制训练集和测试集的loss值对比图
# 创建一个大小为（12，8）的画布
plt.figure(figsize=(12, 8))

# 传入训练集的loss和验证集的loss
plt.plot(train_losses, label = 'train')
plt.plot(val_losses, label = 'val')

# 设置图的参数，设置图的名字
plt.title("LSTM神经网络loss值", fontsize = 15)

# 设置xy轴的刻度值大小
plt.xticks(fontsize = 15)
plt.yticks(fontsize = 15)

# 设置xy轴的标签
plt.ylabel('loss值', fontsize = 15)
plt.xlabel('训练轮次', fontsize = 15)

# 设置图例文字大小
plt.legend(fontsize = 15)
plt.show()
