import time
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error, mean_squared_error

matplotlib.use("Agg")

data = pd.read_csv('./dynamic_load_processed.csv')
data = data.drop(columns=['sample_time'])  # 删除时间列

"""
StandardScaler 是对每一列（每个特征）进行标准化处理，而不是对行进行标准化。
具体来说，它会计算每列数据的均值和标准差，然后将该列中的每个数值减去均值并除以标准差，使得标准化后的数据符合均值为0、方差为1的正态分布。
总结：这是按列标准化（column-wise normalization），即对每一个特征维度单独做标准化。
"""

scaler = StandardScaler()
data_scaled = scaler.fit_transform(data)  # 数据标准化


def create_sequences(data, n_steps):
    """
    输入参数：data是原始数据，n_steps是时间步长
    核心逻辑：
    遍历数据，每次取连续的n_steps个时间步作为特征序列seq_x
    取下一个时间步的数据作为目标值seq_y
    当剩余数据不足一个完整序列时停止
    输出结果：返回特征序列数组X和目标值数组y，用于训练时间序列预测模型
    """
    X, y = [], []
    for i in range(len(data)):
        end_ix = i + n_steps
        if end_ix > len(data) - 1:
            break
        seq_x, seq_y = data[i:end_ix, :], data[end_ix, :]
        X.append(seq_x)
        y.append(seq_y)
    return np.array(X), np.array(y)


n_steps = 50
X, y = create_sequences(data_scaled, n_steps)

"""
定义了需要输出的列名列表 output_columns
通过列表推导式遍历每个列名，在 data.columns 中查找对应的位置索引
get_loc() 方法返回指定列名在DataFrame列索引中的位置
最终生成一个包含各列位置索引的列表 output_indices
"""
output_columns = ['cpu', 'memory', 'net', 'io_rate']
output_indices = [data.columns.get_loc(col) for col in output_columns]
y = y[:, output_indices]

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
X_train = torch.tensor(X_train, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.float32)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.float32)
train_dataset = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)

"""
输入数据维度
特征维度：input_size = X_train.shape[2]，从训练数据推断得出
时间步维度：n_steps = 50，即每个序列包含50个时间步
批量维度：batch_size = 32，训练时的批次大小
输出数据维度
输出维度：output_size = len(output_columns) = 4，对应cpu、memory、net、io_rate四个指标
卷积核维度
卷积核大小：kernel_size = 2，每个卷积核覆盖2个时间步
通道数：每层卷积核的输入通道数和输出通道数根据网络层变化：
第一层输入通道数 = input_size
各层输出通道数 = num_channels[i] = 25
膨胀率：按层指数增长，dilation_size = 2 ** i，分别为1, 2, 4, 8
这些维度共同决定了TCN网络的结构和参数规模。
"""

"""
输入数据形状：(batch_size, features, time_steps)
例如：(32, 12, 50) - 32个样本，12个特征，50个时间步
卷积操作后：(batch_size, out_channels, time_steps)
例如：(32, 25, 50) - 32个样本，25个新特征，50个时间步
关键点：
时间步维度：保持50不变（通过适当padding实现）
特征维度：从12个输入特征变为25个输出特征
样本维度：保持32（batch_size）不变
所以卷积操作主要是对特征维度进行变换，通过学习不同的卷积核来提取和组合特征，而时间和样本维度基本保持不变。
"""

"""
输入数据维度
特征维度：input_size = X_train.shape[2]，从训练数据推断得出
时间步维度：n_steps = 50，即每个序列包含50个时间步
批量维度：batch_size = 32，训练时的批次大小
输出数据维度
输出维度：output_size = len(output_columns) = 4，对应cpu、memory、net、io_rate四个指标
卷积核维度
卷积核大小：kernel_size = 2，每个卷积核覆盖2个时间步
通道数：每层卷积核的输入通道数和输出通道数根据网络层变化：
第一层输入通道数 = input_size
各层输出通道数 = num_channels[i] = 25
膨胀率：按层指数增长，dilation_size = 2 ** i，分别为1, 2, 4, 8
这些维度共同决定了TCN网络的结构和参数规模。
"""


class TemporalConvNet(nn.Module):
    def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
        super(TemporalConvNet, self).__init__()
        layers = []  # 创建一个空列表，用于存储卷积层
        num_levels = len(num_channels)  # 获取卷积核数量
        for i in range(num_levels):
            dilation_size = 2 ** i  # 膨胀大小
            in_channels = num_inputs if i == 0 else num_channels[i - 1]  # 输入通道数
            out_channels = num_channels[i]  # 输出通道数
            layers += [nn.Conv1d(in_channels,
                                 out_channels,
                                 kernel_size,
                                 stride=1,
                                 padding=(kernel_size - 1) * dilation_size,
                                 dilation=dilation_size),  # 卷积层
                       nn.ReLU(),  # ReLU激活函数
                       nn.Dropout(dropout)]  # Dropout层
        self.network = nn.Sequential(*layers)

    def forward(self, x):
        return self.network(x)


class Attention(nn.Module):
    def __init__(self, input_dim):
        super(Attention, self).__init__()
        self.attention_weights = nn.Parameter(torch.Tensor(input_dim, 1))
        nn.init.xavier_uniform_(self.attention_weights.data)

    def forward(self, x):
        scores = torch.matmul(x, self.attention_weights).squeeze(-1)
        attention_weights = torch.nn.functional.softmax(scores, dim=1)
        weighted_sum = torch.bmm(attention_weights.unsqueeze(1), x).squeeze(1)
        return weighted_sum


class TCN(nn.Module):
    def __init__(self, input_size, output_size, num_channels, kernel_size=2, dropout=0.2):
        super(TCN, self).__init__()
        self.tcn = TemporalConvNet(input_size, num_channels, kernel_size, dropout)
        self.attention = Attention(num_channels[-1])
        self.linear = nn.Linear(num_channels[-1], output_size)

    def forward(self, x):
        y1 = self.tcn(x)
        y1 = y1.permute(0, 2, 1)
        o = self.attention(y1)
        o = self.linear(o)
        return o


input_size = X_train.shape[2]  # 输入特征维度
output_size = len(output_columns)  # 输出特征维度
num_channels = [25] * 4  # 卷积核数量
kernel_size = 2  # 卷积核大小
dropout = 0.2  # Dropout概率
model = TCN(input_size, output_size, num_channels, kernel_size, dropout)

criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, factor=0.1)

num_epochs = 50
"""
col: [] 表示创建一个字典，其中：
col 是字典的键(key)，代表每个输出列的名称
[] 是字典的值(value)，是一个空列表，用于存储对应列的损失值历史
"""
loss_history = {col: [] for col in output_columns}
epoch_losses = []
best_loss = float('inf')
best_model_wts = None
start_time = time.time()

for epoch in range(num_epochs):
    model.train()
    running_loss = {col: 0.0 for col in output_columns}

    for inputs, targets in train_loader:
        inputs = inputs.permute(0, 2, 1)
        optimizer.zero_grad()
        outputs = model(inputs)
        total_loss = 0
        for i, col in enumerate(output_columns):
            """
            计算第 i 个输出变量的损失值
            outputs[:, i] 表示模型预测结果的第 i 列
            targets[:, i] 表示真实标签的第 i 列
            使用 criterion（MSE损失函数）计算两者之间的误差
            """
            loss = criterion(outputs[:, i], targets[:, i])
            total_loss += loss

        total_loss.backward()

        optimizer.step()

        for i, col in enumerate(output_columns):
            loss = criterion(outputs[:, i], targets[:, i])
            running_loss[col] += loss.item() * inputs.size(0)

    epoch_loss = {}
    for col in output_columns:
        epoch_loss[col] = running_loss[col] / len(train_loader.dataset)
        loss_history[col].append(epoch_loss[col])
        print(f'Epoch [{epoch + 1}/{num_epochs}], {col} Loss: {epoch_loss[col]:.4f}')

    epoch_losses.append(epoch_loss)

    model.eval()
    with torch.no_grad():
        X_test_perm = X_test.permute(0, 2, 1)
        y_pred = model(X_test_perm)
        test_loss = {col: criterion(y_pred[:, i], y_test[:, i]).item() for i, col in enumerate(output_columns)}

    scheduler.step(sum(test_loss.values()) / len(test_loss))
    if sum(test_loss.values()) < best_loss:
        best_loss = sum(test_loss.values())
        best_model_wts = model.state_dict()

end_time = time.time()
training_time = end_time - start_time
print(f'Training Time: {training_time:.2f} seconds')

epoch_losses_df = pd.DataFrame(epoch_losses)
epoch_losses_df.to_csv(f"./50.csv", index=False)

model.load_state_dict(best_model_wts)

model.eval()
with torch.no_grad():
    X_test = X_test.permute(0, 2, 1)
    y_pred = model(X_test)
    test_loss = {col: criterion(y_pred[:, i], y_test[:, i]).item() for i, col in enumerate(output_columns)}
    print(f'Test Loss: {sum(test_loss.values()) / len(test_loss):.4f}')

for col in output_columns:
    """
    loss_history[col] 的作用是从字典中提取对应键的值：
    loss_history 是一个字典，键是列名（如 'cpu', 'memory', 'net', 'io_rate'），值是对应的损失值列表
    col 是当前遍历的列名
    loss_history[col] 就是获取该列名对应的损失值列表
    """
    plt.plot(loss_history[col], label=f'Training {col} Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Training Loss over Epochs')
plt.legend()
plt.tight_layout()
plt.savefig("loss_curve.png", dpi=200, bbox_inches="tight")
plt.close()

output_scaler = StandardScaler()
output_scaler.fit(data[output_columns])

y_test_rescaled = output_scaler.inverse_transform(y_test.numpy())
y_pred_rescaled = output_scaler.inverse_transform(y_pred.numpy())

y_pred_final = y_pred_rescaled[:, output_indices]

print(y_pred_final)

y_true = y_test_rescaled[:, output_indices]
mae = mean_absolute_error(y_true, y_pred_final)
mse = mean_squared_error(y_true, y_pred_final)
r2 = r2_score(y_true, y_pred_final)
print(f'MAE: {mae:.4f}')
print(f'MSE: {mse:.4f}')
print(f'R2: {r2:.4f}')
