import torch
import torch.nn as nn
import pandas as pd
import numpy as np
import os
# 删除matplotlib导入
from tqdm import tqdm
from torch_geometric.nn import GATv2Conv, MessagePassing
import torch.nn.functional as F
from sklearn.preprocessing import MinMaxScaler
import warnings
from datetime import datetime, timedelta
from utils.logger import create_logger
import sys
from config import settings

logger = create_logger(__name__)

warnings.filterwarnings('ignore', category=RuntimeWarning)


def create_time_intervals(num_steps):
    """
    创建从当前时间+5分钟开始的5分钟时间间隔

    参数:
        num_steps (int): 需要生成的时间步数

    返回:
        list: 从当前时间+5分钟开始的5分钟间隔时间点列表，格式为"YYYY/MM/DD HH:MM:SS"
    """
    now = datetime.now()
    # 修改：从当前时间+5分钟开始
    start_time = now + timedelta(minutes=5)
    time_intervals = []

    # 生成从当前时间+5分钟开始的5分钟间隔
    for i in range(num_steps):
        current_time = start_time + timedelta(minutes=5 * i)
        time_intervals.append(current_time.strftime("%Y/%m/%d %H:%M:%S"))

    return time_intervals


# 删除matplotlib设置代码

# 定义全局变量
NODE_FEATURES = ['p', 'q', 't', 'st', 'fr', 'p-', 'p+', 'q-', 'q+', 't-', 't+', 'pu', 'pd', 'tu', 'td', 'inv',
                 'NC1', 'NC2', 'NC3', 'NCO2', 'NN2', 'NNC4', 'NIC4', 'NH2S', 'NIC5', 'NNC5', 'NNC6']
CONTROL_FEATURES = ['p', 'q', 'st', 'fr', 'p+', 'pd', 'NC1', 'NC2', 'NC3', 'NCO2', 'NN2', 'NNC4', 'NIC4', 'NH2S',
                    'NIC5', 'NNC5', 'NNC6']  # 工况特征列表

# 特征分组配置
FEATURE_GROUPS = {
    'pressure': {'features': ['p', 'p-', 'p+', 'pu', 'pd']},  # 压力相关特征
    'flow': {'features': ['q', 'q-', 'q+']},  # 流量相关特征
    'temperature': {'features': ['t', 't-', 't+', 'tu', 'td']},  # 温度相关特征
    'inv': {'features': ['inv']},  # 库存特征
    'status': {'features': ['st', 'fr']},  # 状态特征
    'gas': {'features': ['NC1', 'NC2', 'NC3', 'NCO2', 'NN2', 'NNC4', 'NIC4', 'NH2S', 'NIC5', 'NNC5', 'NNC6']}  # 气体组分特征
}

# 节点类型特征配置
NODE_TYPE_FEATURES = {
    'E': ['p', 'q', 't', 'NC1', 'NC2', 'NC3', 'NCO2', 'NN2', 'NNC4', 'NIC4', 'NH2S', 'NIC5', 'NNC5', 'NNC6'],  # E类型节点特征
    'NO': ['p', 'q', 't', 'NC1', 'NC2', 'NC3', 'NCO2', 'NN2', 'NNC4', 'NIC4', 'NH2S', 'NIC5', 'NNC5', 'NNC6'],
    # NO类型节点特征
    'B': ['p-', 'p+', 'q-', 'q+', 't-', 't+', 'fr'],  # B类型节点特征
    'RG': ['pu', 'pd', 'q', 'tu', 'td'],  # RG类型节点特征
    'RE': ['p-', 'p+', 'q-', 'q+', 't-', 't+'],  # RE类型节点特征
    'KC': ['p-', 'p+', 'q-', 'q+', 't-', 't+', 'st'],  # KC类型节点特征，移除'pwr'
    'T': ['p-', 'p+', 'q-', 'q+', 't-', 't+', 'inv'],  # T类型节点特征
    'H': ['p-', 'p+', 'q-', 'q+', 't-', 't+']  # H类型节点特征
}


class FeatureNormalizer:
    def __init__(self, processor):
        self.processor = processor
        self.scalers = {}  # 修改为存储每个节点类型+特征的标准化器
        self.log_transform_features = ['inv', 'q', 'q-', 'q+',
                                       'NC1', 'NC2', 'NC3', 'NCO2', 'NN2', 'NNC4', 'NIC4', 'NH2S', 'NIC5', 'NNC5',
                                       'NNC6']  # 定义需要进行对数转换的特征

        # 定义输入特征（工况特征）和输出特征（非工况特征）
        self.input_features = CONTROL_FEATURES
        self.output_features = [f for f in processor.node_features if f not in self.input_features]

    def fit_transform(self, node_features, edge_features, train_size):
        """按节点类型+单个特征对特征进行归一化"""
        node_features_norm = torch.zeros_like(node_features)

        # 按节点类型遍历
        for node_type in set(self.processor.node_types.values()):
            # 获取此类型的所有节点索引
            node_indices = [
                self.processor.get_internal_node_index(node_id)
                for node_id, ntype in self.processor.node_types.items()
                if ntype == node_type and self.processor.get_internal_node_index(node_id) is not None
            ]

            if not node_indices:
                continue

            # 获取当前节点类型可能拥有的特征列表
            type_features = self.processor.node_type_features.get(node_type, [])

            # 对每个特征单独归一化
            for feat in type_features:
                if feat not in self.processor.node_features:
                    continue

                # 确定特征是输入还是输出
                is_input = feat in self.input_features
                feat_type = "input" if is_input else "output"

                # 获取特征索引
                idx = self.processor.node_features.index(feat)

                # 检查是否需要对数变换
                need_log_transform = feat in self.log_transform_features

                # 收集该类型节点在训练集上的所有特征数据
                feature_data = []
                for node_idx in node_indices:
                    data = node_features[node_idx, :train_size, idx].reshape(-1, 1).cpu().numpy()
                    if need_log_transform:
                        # 确保数据为正值，添加小偏移量
                        data = np.maximum(data, 1e-10)
                    feature_data.append(data)

                if feature_data:
                    combined_data = np.concatenate(feature_data, axis=0)

                    if need_log_transform:
                        # 对数变换
                        log_data = np.log1p(combined_data)

                        # 创建并拟合对数变换后的缩放器
                        scaler = MinMaxScaler()
                        scaler.fit(log_data)
                        self.scalers[f"{node_type}_{feat}_{feat_type}_log"] = scaler

                        # 应用对数变换和缩放
                        for node_idx in node_indices:
                            data = node_features[node_idx, :, idx].reshape(-1, 1).cpu().numpy()
                            # 确保数据为正值
                            data = np.maximum(data, 1e-10)
                            # 对数变换后缩放
                            log_data = np.log1p(data)
                            transformed = scaler.transform(log_data)
                            node_features_norm[node_idx, :, idx] = torch.tensor(
                                transformed.reshape(-1),
                                dtype=torch.float32
                            )

                    else:
                        # 创建并拟合常规缩放器
                        scaler = MinMaxScaler()
                        scaler.fit(combined_data)
                        self.scalers[f"{node_type}_{feat}_{feat_type}"] = scaler

                        # 应用缩放
                        for node_idx in node_indices:
                            data = node_features[node_idx, :, idx].reshape(-1, 1).cpu().numpy()
                            transformed = scaler.transform(data)
                            node_features_norm[node_idx, :, idx] = torch.tensor(
                                transformed.reshape(-1),
                                dtype=torch.float32
                            )

        # 返回标准化后的节点特征和空的边特征
        return node_features_norm, torch.zeros_like(edge_features) if edge_features.numel() > 0 else edge_features

    def inverse_transform_nodes(self, node_features):
        """将归一化的节点特征转换回原始尺度，基于节点类型+单个特征"""
        # 确保输入是3维的 [batch_size, num_nodes, num_features]
        original_shape = node_features.shape
        if len(original_shape) == 4:  # 如果是 [num_steps, batch_size, num_nodes, num_features]
            # 重塑为 [batch_size*num_steps, num_nodes, num_features]
            node_features = node_features.reshape(-1, original_shape[2], original_shape[3])

        batch_size, num_nodes, num_features = node_features.shape
        node_features_orig = torch.zeros_like(node_features)

        # 使用较小的批次大小进行处理
        batch_size_limit = 32
        num_batches = (batch_size + batch_size_limit - 1) // batch_size_limit

        for batch_idx in range(num_batches):
            start_idx = batch_idx * batch_size_limit
            end_idx = min((batch_idx + 1) * batch_size_limit, batch_size)
            current_batch_size = end_idx - start_idx

            # 按节点类型反归一化
            for node_type in set(self.processor.node_types.values()):
                # 获取此类型的所有节点索引
                node_indices = [
                    self.processor.get_internal_node_index(node_id)
                    for node_id, ntype in self.processor.node_types.items()
                    if ntype == node_type and self.processor.get_internal_node_index(node_id) is not None
                ]

                if not node_indices:
                    continue

                # 获取当前节点类型可能拥有的特征列表
                type_features = self.processor.node_type_features.get(node_type, [])

                # 对每个特征单独反归一化
                for feat in type_features:
                    if feat not in self.processor.node_features:
                        continue

                    # 确定特征是输入还是输出
                    is_input = feat in self.input_features
                    feat_type = "input" if is_input else "output"

                    # 获取特征索引
                    idx = self.processor.node_features.index(feat)

                    # 检查是否需要对数变换
                    need_log_transform = feat in self.log_transform_features

                    if need_log_transform:
                        # 获取对应的对数缩放器
                        log_scaler_key = f"{node_type}_{feat}_{feat_type}_log"

                        if log_scaler_key in self.scalers:
                            log_scaler = self.scalers[log_scaler_key]

                            for node_idx in node_indices:
                                if node_idx < num_nodes:  # 确保节点索引有效
                                    data = node_features[start_idx:end_idx, node_idx, idx].cpu().numpy()

                                    # 先反归一化
                                    if isinstance(log_scaler, dict) and 'min' in log_scaler and 'max' in log_scaler:
                                        x_min = log_scaler['min']
                                        x_max = log_scaler['max']
                                        inverse_scaled = (data + 1) * (x_max - x_min) / 2 + x_min
                                    else:
                                        # 兼容旧格式（sklearn scaler 对象）
                                        data_2d = data.reshape(-1, 1)
                                        inverse_scaled = log_scaler.inverse_transform(data_2d).reshape(-1)

                                    # 再指数变换回原始空间
                                    original_values = np.expm1(inverse_scaled)
                                    node_features_orig[start_idx:end_idx, node_idx, idx] = torch.tensor(
                                        original_values).reshape(current_batch_size)
                    else:
                        # 获取对应的常规缩放器
                        scaler_key = f"{node_type}_{feat}_{feat_type}"

                        if scaler_key in self.scalers:
                            scaler = self.scalers[scaler_key]

                            for node_idx in node_indices:
                                if node_idx < num_nodes:  # 确保节点索引有效
                                    data = node_features[start_idx:end_idx, node_idx, idx].cpu().numpy()

                                    # 反归一化
                                    if isinstance(scaler, dict) and 'min' in scaler and 'max' in scaler:
                                        x_min = scaler['min']
                                        x_max = scaler['max']
                                        transformed = (data + 1) * (x_max - x_min) / 2 + x_min
                                    else:
                                        # 兼容旧格式（sklearn scaler 对象）
                                        data_2d = data.reshape(-1, 1)
                                        transformed = scaler.inverse_transform(data_2d).reshape(-1)

                                    node_features_orig[start_idx:end_idx, node_idx, idx] = torch.tensor(
                                        transformed).reshape(current_batch_size)

        return node_features_orig.cpu().numpy()


class DataProcessor:
    def __init__(self, node_features, node_mapping, edge_mapping, edge_connections, node_types, control_features):
        self.node_features = node_features
        self.node_mapping = node_mapping
        self.edge_mapping = edge_mapping
        self.edge_connections = edge_connections
        self.node_types = node_types
        self.control_features = control_features

        # 确保工况点集合正确初始化
        self.control_nodes = {} if not hasattr(self,
                                               'control_nodes') or self.control_nodes is None else self.control_nodes

        # 节点类型特征
        self.node_type_features = NODE_TYPE_FEATURES

        # 添加安全检查方法
        self._validate_initialization()

    def _validate_initialization(self):
        """验证初始化是否正确"""
        if self.control_nodes is None:
            self.control_nodes = {}

        if not isinstance(self.control_nodes, dict):
            self.control_nodes = {}

    def get_internal_node_index(self, node_id):
        """获取节点的内部索引"""
        if self.node_mapping is None:
            return None
        return next((idx for idx, nid in self.node_mapping.items() if nid == node_id), None)


# 导入必要的类和函数
class NodeMessagePassingLayer(nn.Module):
    """节点消息传递层：节点聚合邻居节点的信息"""

    def __init__(self, hidden_dim, num_heads=4, dropout=0.1):
        super(NodeMessagePassingLayer, self).__init__()
        self.hidden_dim = hidden_dim
        self.num_heads = num_heads

        # 节点更新 - 使用GATv2Conv聚合邻居节点信息，不使用边特征
        self.node_gat = GATv2Conv(
            in_channels=hidden_dim,
            out_channels=hidden_dim // num_heads,
            heads=num_heads,
            dropout=dropout,
            edge_dim=None,  # 不使用边特征
            concat=True,
            add_self_loops=True
        )

        # 层归一化
        self.node_norm = nn.LayerNorm(hidden_dim)

    def forward(self, node_features, edge_index):
        """
        更新节点特征

        Args:
            node_features: 节点特征 [num_nodes, hidden_dim]
            edge_index: 边索引 [2, num_edges]

        Returns:
            updated_node_features: 更新后的节点特征
        """
        # 保存原始特征用于残差连接
        original_node_features = node_features

        # 节点特征更新 - 聚合邻居节点信息
        node_from_neighbors = self.node_gat(node_features, edge_index)

        # 节点特征更新 - 合并并应用残差连接
        updated_node_features = self.node_norm(original_node_features + node_from_neighbors)

        return updated_node_features


# 新模型：滑动窗口-GAT-LSTM模型类（来自训练代码）
class SlidingWindowGATLSTM(nn.Module):
    """滑动窗口 -> 空间聚合(GAT) -> 时序聚合(LSTM) -> 多步输出 的模型结构。

    新的处理顺序:
    1) 直接接收滑动窗口 window_size 个时间步的数据 [t−W+1, …, t−1, t] 包含当前时刻t
    2) 对窗口内每个时间步分别进行特征嵌入与GAT空间聚合
    3) 将GAT聚合后的特征继续以滑动窗口 [t−W+1, …, t−1, t] 输入LSTM进行时序聚合
    4) 通过多输出预测头一次性预测未来 num_steps 个时间步 [t+1, t+2, ..., t+num_steps]
    """

    def __init__(self, node_input_dim, hidden_dim, num_gnn_layers=2,
                 num_heads=4, dropout=0.1,
                 node_control_features=None, window_size=1, num_steps=144,
                 lstm_layers=1, decoder_lstm_layer_1=1, decoder_lstm_layer_2=1):
        super(SlidingWindowGATLSTM, self).__init__()

        self.window_size = window_size
        self.num_steps = num_steps

        # 存储工况特征索引
        self.node_control_indices = node_control_features if node_control_features is not None else []

        # 1. 特征嵌入层 - 对所有时间步进行特征嵌入
        self.node_embedding = nn.Sequential(
            nn.Linear(node_input_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout)
        )

        # 2. GAT层 - 对每个时间步进行空间特征聚合
        self.node_message_layers = nn.ModuleList([
            NodeMessagePassingLayer(
                hidden_dim=hidden_dim,
                num_heads=num_heads,
                dropout=dropout
            )
            for _ in range(num_gnn_layers)
        ])

        # 3. LSTM层 - 处理时间序列，聚合历史信息（仅历史窗口，不含当前时间步）
        self.node_lstm = nn.LSTM(hidden_dim, hidden_dim, num_layers=lstm_layers, batch_first=True)

        self.decoder_lstm1 = nn.LSTM(hidden_dim, 2*hidden_dim, num_layers=decoder_lstm_layer_1, batch_first=True)
        self.decoder_lstm2 = nn.LSTM(hidden_dim*2, node_input_dim, num_layers=decoder_lstm_layer_2, batch_first=True)

        # 4. 多输出预测层 - 根据预测步数动态调整输出维度（备用，当前使用解码器LSTM）
        self.multi_output_predictor = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim * 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim * 2, node_input_dim * self.num_steps)
        )

        # 简要结构日志
        logger.info(
            f"[Model] Pipeline: SlidingWindow[t-{window_size - 1}...t] -> GAT(x{num_gnn_layers}) -> LSTM[t-{window_size - 1}...t] -> MultiOutput[t+1...t+{num_steps}]; "
            f"node_in={node_input_dim}, hidden={hidden_dim}, heads={num_heads}, "
            f"window={self.window_size}, steps={self.num_steps}, dropout={dropout}")

    def forward(self, node_x=None, edge_index=None, is_training=True, window_data=None, num_steps=None):
        """
        新的前向传播逻辑：
        1) 直接输入滑动窗口 [t-W+1, ..., t-1, t] 包含t
        2) 对窗口内每个时间步分别进行特征嵌入与GAT空间聚合
        3) 将GAT聚合后的特征继续以滑动窗口输入LSTM
        4) 通过多输出预测头一次性预测未来 num_steps 个时间步

        Args:
            node_x: 当前时间步的节点特征（兼容旧接口，可选）
            window_data: 滑动窗口数据，长度为 window_size，每个元素形状为 [batch_size, num_nodes, node_input_dim]
            edge_index: 边索引，形状 [2, num_edges]
            is_training: 是否处于训练模式
            num_steps: 需要预测的时间步数；如果为None，使用初始化时的值

        Returns:
            multi_step_outputs: 形状为 [num_steps, batch_size, num_nodes, node_input_dim]
        """
        # 使用初始化时的num_steps
        if num_steps is None:
            num_steps = self.num_steps

        # 强制保证前向预测步数与初始化一致，避免线性层输出维度不匹配
        if num_steps != self.num_steps:
            raise ValueError(f"num_steps({num_steps}) 必须与初始化时的值({self.num_steps})一致")

        # 从窗口数据推断 batch_size/num_nodes/node_input_dim
        if window_data is None or len(window_data) == 0:
            raise ValueError("window_data 不能为空且长度需等于初始化的 window_size")
        if len(window_data) != self.window_size:
            raise ValueError(f"滑动窗口长度({len(window_data)})必须等于初始化的window_size({self.window_size})")

        batch_size, num_nodes, node_input_dim = window_data[0].shape

        # 为每个批次创建边索引（用于后续GAT处理）
        batch_edge_indices = []
        for b in range(batch_size):
            # 为当前批次偏移边索引
            offset = b * num_nodes
            batch_edges = edge_index.clone()
            batch_edges = batch_edges + offset
            batch_edge_indices.append(batch_edges)

        # 合并所有批次的边索引
        edge_index_expanded = torch.cat(batch_edge_indices, dim=1)

        # 1. 对所有时间步进行特征嵌入
        embedded_time_steps = []
        for t_data in window_data:
            t_embedded = self.node_embedding(t_data)  # [batch_size, num_nodes, hidden_dim]
            embedded_time_steps.append(t_embedded)

        # 2. GAT处理：对每个时间步进行空间特征聚合
        gat_processed_steps = []
        for t_embedded in embedded_time_steps:
            # 展平节点特征
            t_flat = t_embedded.reshape(-1, t_embedded.size(-1))

            # 应用多个节点消息传递层（GAT）
            for layer in self.node_message_layers:
                t_flat = layer(t_flat, edge_index_expanded)

            # 恢复原始形状
            t_processed = t_flat.view(batch_size, num_nodes, -1)  # [batch_size, num_nodes, hidden_dim]
            gat_processed_steps.append(t_processed)

        # 3. LSTM处理：将GAT聚合后的特征继续以滑动窗口输入LSTM
        # 将窗口数据堆叠成一个张量 [batch_size, num_nodes, window_size, hidden_dim]
        window_stacked = torch.stack(gat_processed_steps, dim=2)

        # 重塑为 [batch_size*num_nodes, window_size, hidden_dim]
        window_reshaped = window_stacked.reshape(batch_size * num_nodes, -1, window_stacked.size(-1))

        # 使用LSTM处理时间序列（包含当前时刻t）
        lstm_out, (h_n, c_n) = self.node_lstm(window_reshaped)

        # 取LSTM的最后一个隐藏状态
        lstm_hidden = h_n[-1].unsqueeze(1)
        lstm_hidden = lstm_hidden.repeat(1, self.num_steps, 1)
        multi_output, _ = self.decoder_lstm1(lstm_hidden)
        multi_output, _ = self.decoder_lstm2(multi_output)

        # 重塑为多个时间步的输出 [num_steps, batch_size, num_nodes, node_input_dim]
        multi_step_outputs = multi_output.view(batch_size, num_nodes, self.num_steps, node_input_dim)
        multi_step_outputs = multi_step_outputs.permute(2, 0, 1, 3)

        return multi_step_outputs


class ModelPredictor:
    def __init__(self, model_path):
        """
        初始化预测器

        Args:
            model_path: 模型文件路径
        """
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        # 确保反序列化时能在 __main__ 找到类
        try:
            main_module = sys.modules.get('__main__')
            if main_module is not None:
                setattr(main_module, 'FeatureNormalizer', FeatureNormalizer)
                setattr(main_module, 'DataProcessor', DataProcessor)
                setattr(main_module, 'SlidingWindowGATLSTM', SlidingWindowGATLSTM)
        except Exception:
            pass

        # 加载模型
        self.model_info = torch.load(model_path, map_location=self.device, weights_only=False)

        # 提取模型参数和信息
        self.node_features = self.model_info['processor_info']['node_features']
        self.node_mapping = self.model_info['processor_info']['node_mapping']
        self.edge_mapping = self.model_info['processor_info']['edge_mapping']
        self.edge_connections = self.model_info['processor_info']['edge_connections']
        self.node_types = self.model_info['processor_info']['node_types']
        self.control_features = self.model_info['processor_info']['control_features']

        # 创建DataProcessor对象
        self.processor = DataProcessor(
            node_features=self.node_features,
            node_mapping=self.node_mapping,
            edge_mapping=self.edge_mapping,
            edge_connections=self.edge_connections,
            node_types=self.node_types,
            control_features=self.control_features
        )

        # 兼容：优先用normalizer，若无则报错
        if 'normalizer' in self.model_info:
            self.normalizer = self.model_info['normalizer']
            self.normalizer.processor = self.processor  # 用当前processor
        else:
            raise RuntimeError("模型文件未包含normalizer字段，请确认训练及保存方式一致！")

        self.model_params = self.model_info['model_params']

        # 重建 SlidingWindowGATLSTM 模型
        prediction_params = self.model_info.get('prediction_params', {})
        num_steps = prediction_params.get('num_steps', 144)  # 默认144步

        self.model = SlidingWindowGATLSTM(
            node_input_dim=len(self.node_features),
            hidden_dim=self.model_params['hidden_dim'],
            num_gnn_layers=self.model_params.get('num_gnn_layers', 1),
            num_heads=self.model_params.get('num_heads', 2),
            dropout=self.model_params.get('dropout', 0.1),
            window_size=self.model_params.get('window_size', 1),
            num_steps=num_steps,
            lstm_layers=self.model_params.get('lstm_layers', 1),
            decoder_lstm_layer_1=self.model_params.get('decoder_lstm_layer_1', 1),
            decoder_lstm_layer_2=self.model_params.get('decoder_lstm_layer_2', 1)
        ).to(self.device)

        # 加载模型参数
        self.model.load_state_dict(self.model_info['model_state_dict'])
        self.model.eval()

        # 获取窗口大小
        self.window_size = self.model_params['window_size']

        # -- 保持原有属性 --
        self.num_nodes = len(self.node_mapping)
        self.num_features = len(self.node_features)
        self.edge_index = self._build_edge_index()

    def _build_edge_index(self):
        """构建边索引"""
        edges = []
        for edge_id, (source, target) in self.edge_connections.items():
            source_idx = next((idx for idx, nid in self.node_mapping.items() if nid == source), None)
            target_idx = next((idx for idx, nid in self.node_mapping.items() if nid == target), None)
            if source_idx is not None and target_idx is not None:
                edges.append((source_idx, target_idx))

        # 转换为PyTorch张量
        edge_index = torch.tensor(edges, dtype=torch.long).t() if edges else torch.zeros((2, 0), dtype=torch.long)
        return edge_index.to(self.device)

    def normalize_input(self, input_data):
        """
        标准化输入数据

        Args:
            input_data: 输入数据，形状为 [num_timesteps, num_nodes, num_features]

        Returns:
            normalized_data: 标准化后的数据
        """
        num_timesteps, num_nodes, num_features = input_data.shape
        normalized_data = torch.zeros_like(torch.tensor(input_data, dtype=torch.float32))

        # 遍历所有节点
        for node_idx in range(num_nodes):
            original_node_id = self.node_mapping[node_idx]
            node_type = self.node_types.get(original_node_id, "Unknown")

            # 获取当前节点类型可能拥有的特征列表
            type_features = self.normalizer.processor.node_type_features.get(node_type, [])

            # 对每个特征单独归一化
            for feat in type_features:
                if feat not in self.node_features:
                    continue

                # 确定特征是输入还是输出
                is_input = feat in self.normalizer.input_features
                feat_type = "input" if is_input else "output"

                # 获取特征索引
                feat_idx = self.node_features.index(feat)

                # 检查是否需要对数变换
                need_log_transform = feat in self.normalizer.log_transform_features

                if need_log_transform:
                    # 获取对应的对数缩放器
                    log_scaler_key = f"{node_type}_{feat}_{feat_type}_log"

                    if log_scaler_key in self.normalizer.scalers:
                        log_scaler = self.normalizer.scalers[log_scaler_key]
                        data = input_data[:, node_idx, feat_idx]
                        # 确保数据为正值
                        data = np.maximum(data, 1e-10)
                        # 对数变换
                        log_data = np.log1p(data)

                        # 使用 min-max 归一化
                        if isinstance(log_scaler, dict) and 'min' in log_scaler and 'max' in log_scaler:
                            x_min = log_scaler['min']
                            x_max = log_scaler['max']
                            # 使用公式: x_norm = 2 * (x - x_min) / (x_max - x_min) - 1
                            transformed = 2 * (log_data - x_min) / (x_max - x_min + 1e-10) - 1
                        else:
                            # 兼容旧格式（sklearn scaler 对象）
                            transformed = log_scaler.transform(log_data.reshape(-1, 1)).reshape(-1)

                        normalized_data[:, node_idx, feat_idx] = torch.tensor(transformed, dtype=torch.float32)
                else:
                    # 获取对应的常规缩放器
                    scaler_key = f"{node_type}_{feat}_{feat_type}"

                    if scaler_key in self.normalizer.scalers:
                        scaler = self.normalizer.scalers[scaler_key]
                        data = input_data[:, node_idx, feat_idx]

                        # 使用 min-max 归一化
                        if isinstance(scaler, dict) and 'min' in scaler and 'max' in scaler:
                            x_min = scaler['min']
                            x_max = scaler['max']
                            # 使用公式: x_norm = 2 * (x - x_min) / (x_max - x_min + 1e-10) - 1
                            transformed = 2 * (data - x_min) / (x_max - x_min + 1e-10) - 1
                        else:
                            # 兼容旧格式（sklearn scaler 对象）
                            transformed = scaler.transform(data.reshape(-1, 1)).reshape(-1)

                        normalized_data[:, node_idx, feat_idx] = torch.tensor(transformed, dtype=torch.float32)

        return normalized_data

    def _save_predictions(self, predictions, save_dir):
        """保存预测结果，按节点类型分类，包含时间列"""

        # 生成时间列
        num_timesteps = predictions.shape[0]
        time_intervals = create_time_intervals(num_timesteps)

        # 按节点类型分组存储列数据
        node_type_data = {
            'NO': {},
            'E': {},
            'T': {},
            'H': {},
            'B': {},
            'RG': {},
            'RE': {},
            'KC': {},
            'Unknown': {}
        }

        # 创建节点编号到节点名称的映射
        node_id_to_name = self._create_node_name_mapping()

        # 收集所有列数据并按节点类型分组

        for node_idx in range(predictions.shape[1]):
            original_node_id = self.node_mapping[node_idx]
            node_type = self.node_types.get(original_node_id, "Unknown")

            # 确保节点类型存在于字典中
            if node_type not in node_type_data:
                node_type_data[node_type] = {}

            # 获取节点名称，如果没有找到则使用节点编号
            node_name = node_id_to_name.get(original_node_id, f"node_{original_node_id}")

            # 获取当前节点类型应该有的特征列表
            allowed_features = NODE_TYPE_FEATURES.get(node_type, self.node_features)

            # 只保存该节点类型允许的特征
            for feat_idx, feat_name in enumerate(self.node_features):
                # 检查特征是否在允许的特征列表中
                if feat_name in allowed_features:
                    col_name = f'{node_name}_{feat_name}'
                    node_type_data[node_type][col_name] = predictions[:, node_idx, feat_idx]

        # 创建所有预测结果的合并字典
        all_predictions = {'TIME': time_intervals}  # 添加时间列

        # 按节点类型保存到不同的CSV文件
        for node_type, data in node_type_data.items():
            if data:  # 只保存有数据的节点类型
                # 创建包含时间列的数据字典
                data_with_time = {'TIME': time_intervals}
                data_with_time.update(data)

                # 创建DataFrame
                df = pd.DataFrame(data_with_time)

                # 保存到CSV文件，使用逗号分隔
                file_path = os.path.join(save_dir, f'{node_type}_predictions.csv')
                df.to_csv(file_path, index=False)

                # 将数据添加到合并字典（不包含时间列，避免重复）
                all_predictions.update(data)

        # 保存所有预测结果到一个文件
        all_predictions_df = pd.DataFrame(all_predictions)
        all_predictions_df.to_csv(os.path.join(save_dir, 'all_predictions.csv'), index=False)

        # 确保返回一个字典
        return all_predictions

    def _create_node_name_mapping(self, input_data: list = None):
        """创建节点编号到节点名称的映射"""
        node_id_to_name = {}

        # 定义输入文件路径
        input_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), settings["filepath"]["proxy_model_input_dir"])
        node_files = {
            'yc': "after_processing_yc_data.csv",
            'B': "after_processing_B_data.csv",
            'E': "after_processing_E_data.csv",
            'KC': "after_processing_KC_data.csv",
            'NO': "after_processing_NO_data.csv",
            'RG': "after_processing_RG_data.csv",
            'RE': "after_processing_RE_data.csv",
            'T': "after_processing_T_data.csv",
            'H': "after_processing_H_data.csv"
        }

        # 从各个节点文件中提取节点名称映射
        for file_type, file_name in node_files.items():
            file_path = os.path.join(input_dir, file_name)
            if not os.path.exists(file_path):
                logger.warning(f"⚠️ 文件不存在: {file_path}")
                continue

            try:
                # 读取CSV文件，尝试多种编码
                df = None
                for encoding in ['gbk', 'gb18030', 'utf-8', 'latin1']:
                    try:
                        df = pd.read_csv(file_path, header=None, encoding=encoding, low_memory=False)
                        break
                    except (UnicodeDecodeError, UnicodeError):
                        continue

                if df is None:
                    logger.warning(f"⚠️ 无法读取文件: {file_name}")
                    continue

                # 获取特征头信息和节点编号
                feature_headers = [str(x) for x in df.iloc[0].values]
                node_numbers = [str(float(x)).strip() if pd.notna(x) else '' for x in df.iloc[1].values]

                # 处理每个特征列
                for col_idx, header in enumerate(feature_headers):
                    if col_idx == 0:  # 跳过TIME列
                        continue

                    # 列名格式：节点名:特征名 或 节点名_特征名
                    # 优先按冒号分割，如果没有冒号则按下划线分割
                    if ':' in header:
                        # 格式：V_ZHEDXZD_10YONGQ_1201.02.03:FR
                        node_name = header.split(':')[0]  # 冒号前面是节点名
                    else:
                        # 格式：V_ZHEDXZD_10YONGQ_1201.02.03_p-
                        parts = header.split('_')
                        if len(parts) < 2:
                            continue
                        node_name = '_'.join(parts[:-1])  # 下划线分割，去掉最后一段

                    node_id_str = node_numbers[col_idx]

                    if not node_id_str:
                        continue

                    try:
                        node_id = int(float(node_id_str))
                        # 保留最长的节点名（完整的节点名肯定比不完整的长）
                        if node_id not in node_id_to_name or len(node_name) > len(node_id_to_name[node_id]):
                            node_id_to_name[node_id] = node_name
                    except ValueError:
                        continue

            except Exception as e:
                logger.warning(f"读取文件时出错: {str(e)}")
                continue

        return node_id_to_name


def load_input_data_from_files(data_dir, node_files, processor_info, window_size):
    """
    从多个文件加载输入数据，类似于Train.py中的方式

    Args:
        data_dir: 数据目录路径
        node_files: 节点数据文件字典
        processor_info: 处理器信息
        window_size: 窗口大小

    Returns:
        input_data: 输入数据，形状为 [num_timesteps, num_nodes, num_features]
    """
    # 获取节点特征信息
    node_features = processor_info['node_features']
    node_mapping = processor_info['node_mapping']
    node_types = processor_info['node_types']

    # 创建节点特征存储
    num_nodes = len(node_mapping)
    num_features = len(node_features)

    # 编码容错读取函数
    def read_csv_multi_encoding(path):
        for enc in ['utf-8', 'gbk', 'gb2312', 'latin1']:
            try:
                return pd.read_csv(path, header=None, encoding=enc, low_memory=False)
            except Exception:
                continue
        # 最后一次尝试不指定编码
        return pd.read_csv(path, header=None, low_memory=False)

    # 首先加载工况特征文件 (yc)
    yc_file = os.path.join(data_dir, node_files['yc'])
    yc_df = read_csv_multi_encoding(yc_file)

    # 获取工况点信息
    control_names = [str(x).strip() for x in yc_df.iloc[0, 1:].values]
    control_numbers = [str(x).strip() for x in yc_df.iloc[1, 1:].values]
    control_data = yc_df.iloc[2:]

    num_timesteps = len(control_data)

    # 初始化节点特征存储
    node_features_data = np.zeros((num_nodes, num_timesteps, num_features))

    # 解析工况点信息
    control_nodes = {}
    for col, (name, number) in enumerate(zip(control_names, control_numbers)):
        if pd.notna(number) and str(number).strip():
            try:
                node_id = int(float(number))

                # 提取控制类型 (SP, SNQ, FR, ST, SP+, SPD等)
                control_type = name.split(':')[-1].strip().upper() if ':' in name else name[-2:].upper()

                # 映射到标准特征
                if control_type in ['SP']:
                    control_feature = 'p'
                    control_type_code = 'p'
                elif control_type == 'SP+':
                    control_feature = 'p+'  # KC和RE类型节点的出口压力特征
                    control_type_code = 'p+'
                elif control_type == 'SPD':
                    control_feature = 'pd'  # RG类型节点的下游压力特征
                    control_type_code = 'pd'
                elif control_type in ['SNQ']:
                    control_feature = 'q'
                    control_type_code = 'q'
                elif control_type == 'FR':
                    control_feature = 'fr'  # B类型节点的状态特征
                    control_type_code = 'fr'
                elif control_type == 'ST':
                    control_feature = 'st'  # KC类型节点的状态特征
                    control_type_code = 'st'
                # 添加气体组分特征的映射
                elif control_type == 'SC1':
                    control_feature = 'NC1'
                    control_type_code = 'NC1'
                elif control_type == 'SC2':
                    control_feature = 'NC2'
                    control_type_code = 'NC2'
                elif control_type == 'SC3':
                    control_feature = 'NC3'
                    control_type_code = 'NC3'
                elif control_type == 'SCO2':
                    control_feature = 'NCO2'
                    control_type_code = 'NCO2'
                elif control_type == 'SN2':
                    control_feature = 'NN2'
                    control_type_code = 'NN2'
                elif control_type == 'SNC4':
                    control_feature = 'NNC4'
                    control_type_code = 'NNC4'
                elif control_type == 'SIC4':
                    control_feature = 'NIC4'
                    control_type_code = 'NIC4'
                elif control_type == 'SH2S':
                    control_feature = 'NH2S'
                    control_type_code = 'NH2S'
                elif control_type == 'SIC5':
                    control_feature = 'NIC5'
                    control_type_code = 'NIC5'
                elif control_type == 'SNC5':
                    control_feature = 'NNC5'
                    control_type_code = 'NNC5'
                elif control_type == 'SNC6':
                    control_feature = 'NNC6'
                    control_type_code = 'NNC6'
                else:
                    continue

                control_nodes[node_id] = {
                    'type': control_type_code,
                    'feature': control_feature,
                    'column': col
                }
            except ValueError:
                continue

    # 注意：YC 工况数据延后写入，放到类型文件之后，以免被覆盖

    # 加载所有节点特征文件 (B, E, KC, NO, RG, RE, T, H)
    for file_type in ['B', 'E', 'KC', 'NO', 'RG', 'RE', 'T', 'H']:
        file_path = os.path.join(data_dir, node_files[file_type])
        if not os.path.exists(file_path):
            logger.warning(f"⚠️ 节点特征文件不存在: {file_path}")
            continue
        df = read_csv_multi_encoding(file_path)

        # 获取特征头信息
        feature_headers = [str(x) for x in df.iloc[0].values]
        node_numbers = [str(float(x)).strip() if pd.notna(x) else '' for x in df.iloc[1].values]
        feature_data = df.iloc[2:]

        # 处理每个特征列
        for col_idx, header in enumerate(feature_headers):
            parts = header.split('_')
            if len(parts) < 2:
                continue

            feature_type = parts[-1].lower()
            node_id_str = node_numbers[col_idx]
            if not node_id_str:
                continue

            try:
                node_id = int(float(node_id_str))
            except ValueError:
                continue

            # 查找节点索引
            node_idx = next((idx for idx, nid in node_mapping.items() if nid == node_id), None)
            if node_idx is None:
                continue

            # 添加特征
            if feature_type in node_features:
                feat_idx = node_features.index(feature_type)

                # 特殊处理：B类型节点的'fr'特征映射到'fr'
                if file_type == 'B' and feature_type == 'fr':
                    feat_idx = node_features.index('fr')

                # 处理数据
                col_data = []
                for val in feature_data.iloc[:, col_idx]:
                    if isinstance(val, str):
                        if val.strip().upper() == 'OPEN':
                            col_data.append(1.0)
                        elif val.strip().upper() == 'CLOSED':
                            col_data.append(0.0)
                        else:
                            try:
                                col_data.append(float(val))
                            except ValueError:
                                col_data.append(0.0)
                    else:
                        col_data.append(float(val))

                # 确保数据长度匹配
                if len(col_data) > num_timesteps:
                    col_data = col_data[:num_timesteps]
                elif len(col_data) < num_timesteps:
                    col_data.extend([0.0] * (num_timesteps - len(col_data)))

                node_features_data[node_idx, :, feat_idx] = np.array(col_data)

    # 最后写入 YC 工况数据，确保其优先级最高（不被后续覆盖）
    for node_id, node_info in control_nodes.items():
        col = node_info['column']
        control_feature = node_info['feature']

        # 查找节点索引
        node_idx = next((idx for idx, nid in node_mapping.items() if nid == node_id), None)
        if node_idx is None:
            continue

        # 添加工况特征
        if control_feature in node_features:
            feat_idx = node_features.index(control_feature)

            # 处理数据
            if col < len(control_data.columns):
                col_data = []
                for val in control_data.iloc[:, col]:
                    if isinstance(val, str):
                        if val.strip().upper() == 'OPENED':
                            col_data.append(1.0)
                        elif val.strip().upper() == 'CLOSED':
                            col_data.append(0.0)
                        else:
                            try:
                                col_data.append(float(val))
                            except ValueError:
                                col_data.append(0.0)
                    else:
                        col_data.append(float(val))

                node_features_data[node_idx, :, feat_idx] = np.array(col_data)

    # 转置为 [num_timesteps, num_nodes, num_features]
    input_data = np.transpose(node_features_data, (1, 0, 2))

    # 确保时间步数足够
    if input_data.shape[0] < window_size:
        logger.warning(f"输入数据时间步数 {input_data.shape[0]} 小于等于窗口大小 {window_size}")

    # 返回工况节点信息和输入数据
    return input_data, control_nodes
