"""
比较原始DBO和改进DBO在SOC估算方面的性能

该脚本比较原始DBO-ELM和改进DBO-ELM在电池SOC估算任务上的性能，
使用MAE和RMSE作为主要评估指标
"""

import numpy as np
import matplotlib.pyplot as plt
import time
import pandas as pd
import os
import datetime
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import matplotlib as mpl

# 配置matplotlib支持中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
mpl.rcParams['font.family'] = 'SimHei'  # 设置默认字体

from dbo import DungBeetleOptimizer
from improved_dbo import ImprovedDungBeetleOptimizer
from elm import ExtremeLearningMachine
from idbo_elm_soc import IDBO_ELM_SOC
from battery_data import load_battery_data, prepare_sequence_data


class DBO_ELM_SOC:
    """原始DBO-ELM框架，用于SOC估算"""
    
    def __init__(
        self,
        hidden_neurons=20,
        activation='sigmoid',
        population_size=30,
        max_iterations=100,
        random_state=None
    ):
        """
        初始化DBO-ELM框架
        
        Args:
            hidden_neurons: ELM隐藏层神经元数量
            activation: 激活函数
            population_size: DBO种群大小
            max_iterations: DBO最大迭代次数
            random_state: 随机种子
        """
        self.hidden_neurons = hidden_neurons
        self.activation = activation
        self.population_size = population_size
        self.max_iterations = max_iterations
        self.random_state = random_state
        
        # 后续初始化
        self.elm = None
        self.optimizer = None
        self.input_size = None
        self.output_size = None
        
        # 结果存储
        self.best_params = None
        self.best_fitness = None
        self.convergence_curve = None
        
        # 数据标准化
        self.X_scaler = MinMaxScaler(feature_range=(0, 1))
        self.y_scaler = MinMaxScaler(feature_range=(0, 1))
    
    def _objective_function(self, params):
        """
        DBO优化的目标函数，评估ELM在给定参数下的性能
        
        Args:
            params: ELM参数（输入权重和偏置）
            
        Returns:
            误差指标（RMSE, MAE, R²的组合）
        """
        # 设置ELM参数
        self.elm.set_params(params)
        
        # 训练ELM
        self.elm.fit(self.X_train_scaled, self.y_train_scaled)
        
        # 在验证集上预测
        y_pred = self.elm.predict(self.X_val_scaled)
        
        # 计算误差指标
        rmse, mae, r2 = self._calculate_metrics(y_pred, self.y_val_scaled)
        
        # 组合适应度函数（调整权重以更重视R²）
        fitness = 0.6 * rmse + 0.3 * mae - 0.5 * r2
        
        return fitness
    
    def _calculate_metrics(self, y_pred, y_true):
        """
        计算评估指标
        
        Args:
            y_pred: 预测值
            y_true: 真实值
            
        Returns:
            (RMSE, MAE, R²)元组
        """
        # 均方根误差
        rmse = np.sqrt(np.mean((y_pred - y_true) ** 2))
        
        # 平均绝对误差
        mae = np.mean(np.abs(y_pred - y_true))
        
        # R²分数
        ss_total = np.sum((y_true - np.mean(y_true)) ** 2)
        ss_residual = np.sum((y_true - y_pred) ** 2)
        
        # 检查ss_total是否接近零（目标值恒定）
        if ss_total < 1e-10:
            if ss_residual < 1e-10:
                r2 = 1.0  # 完美预测
            else:
                r2 = 0.0  # 不完美预测
        else:
            r2 = 1 - (ss_residual / ss_total)
            # 将R²限制在有效范围[-1, 1]内
            r2 = np.clip(r2, -1.0, 1.0)
        
        return rmse, mae, r2
    
    def preprocess_data(self, X, y):
        """
        预处理训练和测试数据
        
        Args:
            X: 输入特征
            y: 目标值（SOC）
            
        Returns:
            (X_train, X_val, X_test, y_train, y_val, y_test)元组
        """
        # 检查并替换NaN值
        if np.isnan(X).any():
            print(f"警告: 在特征中发现{np.isnan(X).sum()}个NaN值，替换为零。")
            X = np.nan_to_num(X, nan=0.0)
        
        if np.isnan(y).any():
            print(f"警告: 在目标中发现{np.isnan(y).sum()}个NaN值，替换为零。")
            y = np.nan_to_num(y, nan=0.0)
        
        # 检查并替换无穷值
        if np.isinf(X).any():
            print(f"警告: 在特征中发现{np.isinf(X).sum()}个无穷值，替换为大值。")
            X = np.nan_to_num(X, posinf=1e10, neginf=-1e10)
        
        if np.isinf(y).any():
            print(f"警告: 在目标中发现{np.isinf(y).sum()}个无穷值，替换为边界值。")
            y = np.nan_to_num(y, posinf=100.0, neginf=0.0)
        
        # 将输入特征和目标值缩放到[0,1]范围
        X_scaled = self.X_scaler.fit_transform(X)
        y_scaled = self.y_scaler.fit_transform(y.reshape(-1, 1))
        
        # 分割数据为训练集和测试集（80%训练，20%测试）
        X_train, X_test, y_train, y_test = train_test_split(
            X_scaled, y_scaled, test_size=0.2, random_state=self.random_state
        )
        
        # 进一步将训练数据分割为训练集和验证集（80%训练，20%验证）
        X_train, X_val, y_train, y_val = train_test_split(
            X_train, y_train, test_size=0.2, random_state=self.random_state
        )
        
        return X_train, X_val, X_test, y_train, y_val, y_test
    
    def fit(self, X, y):
        """
        训练DBO-ELM框架
        
        Args:
            X: 输入特征
            y: 目标值（SOC）
            
        Returns:
            包含训练结果的字典
        """
        # 获取数据维度
        self.input_size = X.shape[1]
        self.output_size = 1  # SOC是单一值
        
        # 预处理数据
        self.X_train_scaled, self.X_val_scaled, self.X_test_scaled, \
        self.y_train_scaled, self.y_val_scaled, self.y_test_scaled = self.preprocess_data(X, y)
        
        print(f"数据预处理完成。训练样本: {self.X_train_scaled.shape[0]}, "
              f"验证样本: {self.X_val_scaled.shape[0]}, "
              f"测试样本: {self.X_test_scaled.shape[0]}")
        
        # 初始化ELM
        self.elm = ExtremeLearningMachine(
            input_size=self.input_size,
            hidden_size=self.hidden_neurons,
            output_size=self.output_size,
            activation=self.activation,
            random_state=self.random_state
        )
        
        # 计算参数维度
        param_size = self.input_size * self.hidden_neurons + self.hidden_neurons
        print(f"参数空间维度: {param_size}")
        
        # 初始化DBO优化器
        self.optimizer = DungBeetleOptimizer(
            objective_function=self._objective_function,
            dimensions=param_size,
            population_size=self.population_size,
            max_iterations=self.max_iterations,
            lower_bound=-1.0,
            upper_bound=1.0
        )
        
        # 运行DBO优化
        print("开始DBO-ELM优化进行SOC估算...")
        self.best_params, self.best_fitness, self.convergence_curve = self.optimizer.optimize()
        
        print(f"优化完成。最佳适应度: {self.best_fitness}")
        
        # 使用最佳参数设置ELM并训练最终模型
        self.elm.set_params(self.best_params)
        self.elm.fit(self.X_train_scaled, self.y_train_scaled)
        
        # 在测试集上评估
        test_rmse, test_mae, test_r2 = self.evaluate(self.X_test_scaled, self.y_test_scaled)
        
        print(f"最终模型在测试集上的评估:")
        print(f"RMSE: {test_rmse:.6f}")
        print(f"MAE: {test_mae:.6f}")
        print(f"R²: {test_r2:.6f}")
        
        # 返回结果
        results = {
            'best_fitness': self.best_fitness,
            'convergence_curve': self.convergence_curve,
            'test_rmse': test_rmse,
            'test_mae': test_mae,
            'test_r2': test_r2
        }
        
        return results
    
    def predict(self, X):
        """
        预测新数据的SOC值
        
        Args:
            X: 输入特征
            
        Returns:
            预测的SOC值
        """
        # 检查并替换NaN值
        if np.isnan(X).any():
            X = np.nan_to_num(X, nan=0.0)
        
        # 缩放输入
        X_scaled = self.X_scaler.transform(X)
        
        # 进行预测
        y_pred_scaled = self.elm.predict(X_scaled)
        
        # 反向转换到原始尺度
        y_pred = self.y_scaler.inverse_transform(y_pred_scaled)
        
        return y_pred
    
    def evaluate(self, X, y):
        """
        评估模型性能
        
        Args:
            X: 测试数据（已缩放）
            y: 真实值（已缩放）
            
        Returns:
            (RMSE, MAE, R²)元组
        """
        # 预测
        y_pred = self.elm.predict(X)
        
        # 计算指标
        return self._calculate_metrics(y_pred, y)


def run_comparison(X, y, sequence_length=10, runs=3, save_dir=None):
    """
    比较原始DBO-ELM和改进DBO-ELM在SOC估算任务上的性能
    
    Args:
        X: 特征数据
        y: SOC目标值
        sequence_length: 序列长度（用于时间序列特征）
        runs: 独立运行次数
        save_dir: 保存结果的目录路径
    """
    # 如果没有指定保存目录，创建一个以当前时间命名的目录
    if save_dir is None:
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        save_dir = f"{timestamp}_result"
    
    # 创建保存目录
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
        print(f"创建结果保存目录: {save_dir}")
    else:
        print(f"结果将保存到目录: {save_dir}")
    print(f"\n{'=' * 60}")
    print(f"比较DBO-ELM和IDBO-ELM在SOC估算任务上的性能")
    print(f"{'=' * 60}")
    
    # 准备序列数据
    X_seq, y_seq = prepare_sequence_data(X, y, sequence_length)
    print(f"序列数据准备完成。特征形状: {X_seq.shape}, 目标形状: {y_seq.shape}")
    
    # 结果存储
    dbo_rmses = []
    dbo_maes = []
    dbo_r2s = []
    dbo_times = []
    dbo_convergence_curves = []
    
    idbo_rmses = []
    idbo_maes = []
    idbo_r2s = []
    idbo_times = []
    idbo_convergence_curves = []
    
    # 参数
    hidden_neurons = 25  # 增加隐藏层神经元数量，提高模型复杂度
    population_size = 40  # 增加种群大小，提高搜索能力
    max_iterations = 60  # 增加迭代次数，给算法更多收敛时间
    
    # DBO参数（保持默认值）
    dbo_params = {
        "population_size": population_size,
        "max_iterations": max_iterations,
        "random_state": 42
    }
    
    # IDBO参数（进一步调优后的值）
    idbo_params = {
        "population_size": population_size,
        "max_iterations": max_iterations,
        "initial_rolling_factor": 0.8,      # 进一步增大初始滚动因子，增强局部搜索能力
        "communication_probability": 0.5,    # 增加通信概率，加强信息共享
        "elite_group_size": 10,             # 增大精英组大小，保留更多优秀解
        "opposition_probability": 0.2,       # 增加对立学习概率，增强探索能力
        "levy_alpha": 1.2,                  # 调整Lévy飞行参数，使步长分布更适合SOC估算问题
        "golden_ratio": 1.618,              # 黄金比例
        "random_state": 42
    }
    
    # 多次运行以获取统计结果
    for run in range(runs):
        print(f"\n运行 {run + 1}/{runs}:")
        
        # 原始DBO-ELM
        print("\n训练原始DBO-ELM模型...")
        start_time = time.time()
        dbo_elm = DBO_ELM_SOC(
            hidden_neurons=hidden_neurons,
            population_size=dbo_params["population_size"],
            max_iterations=dbo_params["max_iterations"],
            random_state=dbo_params["random_state"] + run
        )
        
        dbo_results = dbo_elm.fit(X_seq, y_seq)
        dbo_time = time.time() - start_time
        
        dbo_rmses.append(dbo_results['test_rmse'])
        dbo_maes.append(dbo_results['test_mae'])
        dbo_r2s.append(dbo_results['test_r2'])
        dbo_times.append(dbo_time)
        dbo_convergence_curves.append(dbo_results['convergence_curve'])
        
        print(f"原始DBO-ELM - RMSE: {dbo_results['test_rmse']:.6f}, MAE: {dbo_results['test_mae']:.6f}, "
              f"R²: {dbo_results['test_r2']:.6f}, 时间: {dbo_time:.2f}s")
        
        # 改进DBO-ELM
        print("\n训练改进DBO-ELM模型...")
        start_time = time.time()
        idbo_elm = IDBO_ELM_SOC(
            hidden_neurons=hidden_neurons,
            population_size=idbo_params["population_size"],
            max_iterations=idbo_params["max_iterations"],
            initial_rolling_factor=idbo_params["initial_rolling_factor"],
            communication_probability=idbo_params["communication_probability"],
            elite_group_size=idbo_params["elite_group_size"],
            opposition_probability=idbo_params["opposition_probability"],
            levy_alpha=idbo_params["levy_alpha"],
            golden_ratio=idbo_params["golden_ratio"],
            random_state=idbo_params["random_state"] + run
        )
        
        idbo_results = idbo_elm.fit(X_seq, y_seq)
        idbo_time = time.time() - start_time
        
        idbo_rmses.append(idbo_results['test_rmse'])
        idbo_maes.append(idbo_results['test_mae'])
        idbo_r2s.append(idbo_results['test_r2'])
        idbo_times.append(idbo_time)
        idbo_convergence_curves.append(idbo_results['convergence_curve'])
        
        print(f"改进DBO-ELM - RMSE: {idbo_results['test_rmse']:.6f}, MAE: {idbo_results['test_mae']:.6f}, "
              f"R²: {idbo_results['test_r2']:.6f}, 时间: {idbo_time:.2f}s")
        
        # 仅为第一次运行绘制预测结果
        if run == 0:
            # 获取原始数据用于预测
            X_orig = X.copy()
            y_orig = y.copy()
            
            # 分割为训练和测试集
            _, X_test_orig, _, y_test_orig = train_test_split(
                X_orig, y_orig, test_size=0.2, random_state=42
            )
            
            # 准备测试序列
            X_test_seq, y_test_seq = prepare_sequence_data(X_test_orig, y_test_orig, sequence_length)
            
            # 预测
            y_pred_dbo = dbo_elm.predict(X_test_seq)
            y_pred_idbo = idbo_elm.predict(X_test_seq)
            
            # 绘制预测结果对比
            plt.figure(figsize=(12, 6))
            plt.plot(y_test_orig[:100], 'b-', label='实际SOC')
            plt.plot(y_pred_dbo[:100], 'g--', label='DBO-ELM预测')
            plt.plot(y_pred_idbo[:100], 'r-.', label='IDBO-ELM预测')
            plt.title('SOC估算: 实际值与预测值对比')
            plt.xlabel('时间步')
            plt.ylabel('SOC (%)')
            plt.legend()
            plt.grid(True)
            plt.savefig(os.path.join(save_dir, 'soc_prediction_comparison.png'))
            plt.close()
            
            # 绘制收敛曲线对比
            plt.figure(figsize=(10, 6))
            plt.plot(dbo_results['convergence_curve'], 'g-', label='原始DBO-ELM')
            plt.plot(idbo_results['convergence_curve'], 'r-', label='改进DBO-ELM')
            plt.title('收敛曲线对比')
            plt.xlabel('迭代')
            plt.ylabel('适应度值')
            plt.grid(True)
            plt.legend()
            plt.savefig(os.path.join(save_dir, 'convergence_comparison.png'))
            plt.close()
            
            # 绘制误差对比
            plt.figure(figsize=(12, 6))
            plt.plot(np.abs(y_test_orig[:100] - y_pred_dbo[:100].flatten()), 'g-', label='DBO-ELM误差')
            plt.plot(np.abs(y_test_orig[:100] - y_pred_idbo[:100].flatten()), 'r-', label='IDBO-ELM误差')
            plt.title('SOC估算绝对误差对比')
            plt.xlabel('时间步')
            plt.ylabel('绝对误差 (%)')
            plt.legend()
            plt.grid(True)
            plt.savefig(os.path.join(save_dir, 'error_comparison.png'))
            plt.close()
    
    # 转换为numpy数组以便统计分析
    dbo_rmses = np.array(dbo_rmses)
    dbo_maes = np.array(dbo_maes)
    dbo_r2s = np.array(dbo_r2s)
    dbo_times = np.array(dbo_times)
    
    idbo_rmses = np.array(idbo_rmses)
    idbo_maes = np.array(idbo_maes)
    idbo_r2s = np.array(idbo_r2s)
    idbo_times = np.array(idbo_times)
    
    # 报告统计结果
    print("\n统计结果:")
    print("\n原始DBO-ELM:")
    print(f"平均RMSE: {np.mean(dbo_rmses):.6f} ± {np.std(dbo_rmses):.6f}")
    print(f"平均MAE: {np.mean(dbo_maes):.6f} ± {np.std(dbo_maes):.6f}")
    print(f"平均R²: {np.mean(dbo_r2s):.6f} ± {np.std(dbo_r2s):.6f}")
    print(f"平均时间: {np.mean(dbo_times):.2f}s ± {np.std(dbo_times):.2f}s")
    
    print("\n改进DBO-ELM:")
    print(f"平均RMSE: {np.mean(idbo_rmses):.6f} ± {np.std(idbo_rmses):.6f}")
    print(f"平均MAE: {np.mean(idbo_maes):.6f} ± {np.std(idbo_maes):.6f}")
    print(f"平均R²: {np.mean(idbo_r2s):.6f} ± {np.std(idbo_r2s):.6f}")
    print(f"平均时间: {np.mean(idbo_times):.2f}s ± {np.std(idbo_times):.2f}s")
    
    # 计算改进百分比
    rmse_improvement = (np.mean(dbo_rmses) - np.mean(idbo_rmses)) / np.mean(dbo_rmses) * 100
    mae_improvement = (np.mean(dbo_maes) - np.mean(idbo_maes)) / np.mean(dbo_maes) * 100
    r2_improvement = (np.mean(idbo_r2s) - np.mean(dbo_r2s)) / np.mean(dbo_r2s) * 100 if np.mean(dbo_r2s) > 0 else 0
    
    print(f"\n改进百分比:")
    print(f"RMSE改进: {rmse_improvement:.2f}%")
    print(f"MAE改进: {mae_improvement:.2f}%")
    print(f"R²改进: {r2_improvement:.2f}%")
    
    # 创建误差指标对比图
    plt.figure(figsize=(10, 6))
    bar_width = 0.35
    index = np.arange(2)
    
    plt.bar(index, [np.mean(dbo_maes), np.mean(idbo_maes)], bar_width, yerr=[np.std(dbo_maes), np.std(idbo_maes)], 
            label='MAE', color='blue', alpha=0.7)
    plt.bar(index + bar_width, [np.mean(dbo_rmses), np.mean(idbo_rmses)], bar_width, yerr=[np.std(dbo_rmses), np.std(idbo_rmses)], 
            label='RMSE', color='green', alpha=0.7)
    
    plt.xlabel('算法')
    plt.ylabel('误差')
    plt.title('MAE和RMSE对比')
    plt.xticks(index + bar_width / 2, ('原始DBO-ELM', '改进DBO-ELM'))
    plt.legend()
    plt.grid(True, axis='y')
    plt.savefig(os.path.join(save_dir, 'error_metrics_comparison.png'))
    plt.close()
    
    # 创建箱线图
    plt.figure(figsize=(14, 8))
    
    # MAE箱线图
    plt.subplot(1, 2, 1)
    plt.boxplot([dbo_maes, idbo_maes], tick_labels=['原始DBO-ELM', '改进DBO-ELM'])
    plt.title('MAE对比')
    plt.ylabel('平均绝对误差')
    plt.grid(True, axis='y')
    
    # RMSE箱线图
    plt.subplot(1, 2, 2)
    plt.boxplot([dbo_rmses, idbo_rmses], tick_labels=['原始DBO-ELM', '改进DBO-ELM'])
    plt.title('RMSE对比')
    plt.ylabel('均方根误差')
    plt.grid(True, axis='y')
    
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'error_boxplot_comparison.png'))
    plt.close()
    
    # 保存统计结果到CSV文件
    results_df = pd.DataFrame({
        '指标': ['RMSE', 'MAE', 'R²', '时间(秒)'],
        '原始DBO-ELM_平均值': [np.mean(dbo_rmses), np.mean(dbo_maes), np.mean(dbo_r2s), np.mean(dbo_times)],
        '原始DBO-ELM_标准差': [np.std(dbo_rmses), np.std(dbo_maes), np.std(dbo_r2s), np.std(dbo_times)],
        '改进DBO-ELM_平均值': [np.mean(idbo_rmses), np.mean(idbo_maes), np.mean(idbo_r2s), np.mean(idbo_times)],
        '改进DBO-ELM_标准差': [np.std(idbo_rmses), np.std(idbo_maes), np.std(idbo_r2s), np.std(idbo_times)],
        '改进百分比(%)': [rmse_improvement, mae_improvement, r2_improvement, 
                     (np.mean(dbo_times) - np.mean(idbo_times)) / np.mean(dbo_times) * 100 if np.mean(dbo_times) > 0 else 0]
    })
    
    # 保存到CSV
    results_df.to_csv(os.path.join(save_dir, 'comparison_results.csv'), index=False, encoding='utf-8-sig')
    
    # 保存统计结果到文本文件
    with open(os.path.join(save_dir, 'comparison_results.txt'), 'w', encoding='utf-8') as f:
        f.write(f"统计结果:\n\n")
        f.write(f"原始DBO-ELM:\n")
        f.write(f"平均RMSE: {np.mean(dbo_rmses):.6f} ± {np.std(dbo_rmses):.6f}\n")
        f.write(f"平均MAE: {np.mean(dbo_maes):.6f} ± {np.std(dbo_maes):.6f}\n")
        f.write(f"平均R²: {np.mean(dbo_r2s):.6f} ± {np.std(dbo_r2s):.6f}\n")
        f.write(f"平均时间: {np.mean(dbo_times):.2f}s ± {np.std(dbo_times):.2f}s\n\n")
        
        f.write(f"改进DBO-ELM:\n")
        f.write(f"平均RMSE: {np.mean(idbo_rmses):.6f} ± {np.std(idbo_rmses):.6f}\n")
        f.write(f"平均MAE: {np.mean(idbo_maes):.6f} ± {np.std(idbo_maes):.6f}\n")
        f.write(f"平均R²: {np.mean(idbo_r2s):.6f} ± {np.std(idbo_r2s):.6f}\n")
        f.write(f"平均时间: {np.mean(idbo_times):.2f}s ± {np.std(idbo_times):.2f}s\n\n")
        
        f.write(f"改进百分比:\n")
        f.write(f"RMSE改进: {rmse_improvement:.2f}%\n")
        f.write(f"MAE改进: {mae_improvement:.2f}%\n")
        f.write(f"R²改进: {r2_improvement:.2f}%\n")
        
    print(f"\n统计结果已保存到: {os.path.join(save_dir, 'comparison_results.txt')}")
    print(f"CSV结果已保存到: {os.path.join(save_dir, 'comparison_results.csv')}")
    print(f"图表已保存到: {save_dir}目录")
    
    # 返回结果
    return {
        'dbo_rmses': dbo_rmses,
        'dbo_maes': dbo_maes,
        'dbo_r2s': dbo_r2s,
        'dbo_times': dbo_times,
        'idbo_rmses': idbo_rmses,
        'idbo_maes': idbo_maes,
        'idbo_r2s': idbo_r2s,
        'idbo_times': idbo_times
    }


if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description='比较DBO-ELM和IDBO-ELM在SOC估算任务上的性能')
    parser.add_argument('--data_dir', type=str, default=None, help='数据目录路径')
    parser.add_argument('--data_file', type=str, default=None, help='数据文件名')
    parser.add_argument('--data_format', type=str, default='mat', choices=['csv', 'mat'], help='数据格式 (csv 或 mat)')
    parser.add_argument('--soc_method', type=str, default='voltage_based', 
                        choices=['ah_counting', 'voltage_based', 'combined'], 
                        help='SOC计算方法')
    parser.add_argument('--sequence_length', type=int, default=10, help='序列长度')
    parser.add_argument('--runs', type=int, default=3, help='独立运行次数')
    
    args = parser.parse_args()
    
    # 加载电池数据
    print("加载电池数据...")
    X, y = load_battery_data(
        data_dir=args.data_dir,
        data_file=args.data_file,
        data_format=args.data_format,
        soc_method=args.soc_method
    )
    
    print(f"数据加载完成。特征形状: {X.shape}, 目标形状: {y.shape}")
    
    # 创建保存目录
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    save_dir = f"{timestamp}_result"
    
    # 运行比较
    try:
        print("开始运行算法比较...")
        results = run_comparison(
            X=X,
            y=y,
            sequence_length=args.sequence_length,
            runs=args.runs,
            save_dir=save_dir
        )
        print("\n比较完成。")
    except Exception as e:
        print(f"\n运行过程中发生错误: {e}")
        import traceback
        traceback.print_exc()
