"""
Main script to run the IDBO-ELM SOC estimation framework
"""

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import os
import time
import glob
from sklearn.model_selection import train_test_split

from battery_data import load_panasonic_data, generate_synthetic_data, prepare_sequence_data, visualize_battery_data
from idbo_elm_soc import IDBO_ELM_SOC


def parse_arguments():
    """Parse command line arguments"""
    parser = argparse.ArgumentParser(description='Run IDBO-ELM SOC estimation framework')
    
    # 设置数据目录和文件参数
    parser.add_argument('--data_dir', type=str, 
                        default='E:/gong/plant/article/dung/wykht8y7tg-1/Panasonic 18650PF Data/-20degC/Drive cycles',
                        help='Directory containing battery data files')
    parser.add_argument('--data_file', type=str, default='06-23-17_23.35 n20degC_HWFET_Pan18650PF.mat',
                        help='Name of the data file (without path). If not provided, synthetic data will be generated.')
    parser.add_argument('--list_files', action='store_true',
                        help='List all .mat files in the data directory and exit')
    parser.add_argument('--data_format', type=str, choices=['csv', 'mat'], default='mat',
                        help='Format of the data file: csv for standard CSV, mat for Panasonic 18650PF .mat files')
    parser.add_argument('--soc_method', type=str, 
                        choices=['ah_counting', 'voltage_based', 'combined'], 
                        default='voltage_based',
                        help='Method to calculate SOC for Panasonic data: ah_counting (ampere-hour counting), '
                             'voltage_based (estimate from voltage), or combined (both methods)')
    parser.add_argument('--feature_cols', type=str, nargs='+',
                        default=['voltage', 'current', 'temperature', 'resistance', 'power'],
                        help='Column names to use as features (for CSV data)')
    parser.add_argument('--target_col', type=str, default='soc',
                        help='Column name of the target SOC (for CSV data)')
    parser.add_argument('--hidden_neurons', type=int, default=20,
                        help='Number of hidden neurons in ELM')
    parser.add_argument('--population_size', type=int, default=30,
                        help='Population size for IDBO')
    parser.add_argument('--max_iterations', type=int, default=100,
                        help='Maximum iterations for IDBO')
    parser.add_argument('--window_size', type=int, default=10,
                        help='Size of sliding window for sequence-based features')
    parser.add_argument('--random_state', type=int, default=42,
                        help='Random seed for reproducibility')
    parser.add_argument('--output_dir', type=str, default='results',
                        help='Directory to save results')
    parser.add_argument('--downsample', type=int, default=1,
                        help='Downsample factor for large datasets (take every Nth sample)')
    
    return parser.parse_args()


def find_file_in_directory(directory, filename):
    """
    在目录中查找文件，支持部分匹配
    
    Args:
        directory: 要搜索的目录
        filename: 要查找的文件名或部分文件名
        
    Returns:
        找到的文件的完整路径，如果没找到则返回None
    """
    # 确保目录存在
    if not os.path.exists(directory) or not os.path.isdir(directory):
        print(f"目录不存在或不是一个有效目录: {directory}")
        return None
    
    # 在目录中搜索所有.mat文件
    all_files = []
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.endswith('.mat'):
                all_files.append(os.path.join(root, file))
    
    # 如果文件名包含在文件路径中，则匹配
    matched_files = [f for f in all_files if filename.lower() in os.path.basename(f).lower()]
    
    if matched_files:
        # 返回第一个匹配的文件
        return matched_files[0]
    else:
        return None


def list_mat_files(directory):
    """
    列出目录中的所有.mat文件
    
    Args:
        directory: 要搜索的目录
    """
    if not os.path.exists(directory):
        print(f"目录不存在: {directory}")
        return
    
    print(f"\n在 {directory} 中找到的.mat文件:")
    print("-" * 80)
    
    count = 0
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.endswith('.mat'):
                rel_path = os.path.relpath(root, directory)
                if rel_path == ".":
                    print(f"{count+1}. {file}")
                else:
                    print(f"{count+1}. {os.path.join(rel_path, file)}")
                count += 1
    
    if count == 0:
        print("没有找到.mat文件")
    else:
        print(f"\n共找到 {count} 个.mat文件")


def clean_data(X, y):
    """
    清理数据，移除NaN值和异常值
    
    Args:
        X: 特征矩阵
        y: 目标向量
        
    Returns:
        清理后的特征矩阵和目标向量
    """
    # 首先打印数据的基本信息
    print(f"清理前数据形状: X={X.shape}, y={y.shape}")
    print(f"X中NaN值数量: {np.isnan(X).sum()}")
    print(f"y中NaN值数量: {np.isnan(y).sum()}")
    
    # 如果所有数据都是NaN，则返回原始数据并发出警告
    if np.isnan(X).all() or np.isnan(y).all():
        print("警告: 所有数据都是NaN值，无法清理。返回原始数据。")
        return X, y
    
    # 检查NaN值
    nan_mask = np.isnan(X).any(axis=1) | np.isnan(y)
    if np.any(nan_mask):
        print(f"发现 {np.sum(nan_mask)} 行包含NaN值，将被移除")
        # 确保至少保留一些数据
        if np.sum(~nan_mask) > 0:
            X = X[~nan_mask]
            y = y[~nan_mask]
        else:
            print("警告: 移除NaN值后没有剩余数据。将NaN值替换为0。")
            X = np.nan_to_num(X, nan=0.0)
            y = np.nan_to_num(y, nan=0.0)
    
    # 检查无穷值
    inf_mask = np.isinf(X).any(axis=1) | np.isinf(y)
    if np.any(inf_mask):
        print(f"发现 {np.sum(inf_mask)} 行包含无穷值，将被移除")
        # 确保至少保留一些数据
        if np.sum(~inf_mask) > 0:
            X = X[~inf_mask]
            y = y[~inf_mask]
        else:
            print("警告: 移除无穷值后没有剩余数据。将无穷值替换为大数值。")
            X = np.nan_to_num(X, posinf=1e10, neginf=-1e10)
            y = np.nan_to_num(y, posinf=100.0, neginf=0.0)
    
    # 检查是否有足够的数据进行异常值检测
    if len(y) > 10:
        try:
            # 检查异常值（可选，使用IQR方法）
            # 这里我们只检查目标变量y中的异常值
            q1 = np.percentile(y, 25)
            q3 = np.percentile(y, 75)
            iqr = q3 - q1
            lower_bound = q1 - 1.5 * iqr
            upper_bound = q3 + 1.5 * iqr
            outlier_mask = (y < lower_bound) | (y > upper_bound)
            if np.any(outlier_mask):
                print(f"发现 {np.sum(outlier_mask)} 行包含异常值，将被移除")
                # 确保至少保留一些数据
                if np.sum(~outlier_mask) > 10:  # 至少保留10个样本
                    X = X[~outlier_mask]
                    y = y[~outlier_mask]
                else:
                    print("警告: 移除异常值后数据太少，将保留所有数据。")
        except Exception as e:
            print(f"异常值检测失败: {e}")
    
    # 确保数据类型正确
    X = X.astype(np.float64)
    y = y.astype(np.float64)
    
    print(f"数据清理完成。清理后的数据形状: X={X.shape}, y={y.shape}")
    return X, y


def main():
    """Main function to run the SOC estimation framework"""
    # Parse arguments
    args = parse_arguments()
    
    # 如果指定了列出文件选项，则列出文件并退出
    if args.list_files:
        list_mat_files(args.data_dir)
        return
    
    # Create output directory if it doesn't exist
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 设置随机种子
    np.random.seed(args.random_state)
    
    # 构建完整的文件路径
    full_path = None
    if args.data_file:
        # 首先尝试直接查找文件
        if os.path.exists(args.data_file):
            full_path = args.data_file
            print(f"直接找到数据文件: {full_path}")
        # 然后尝试在指定目录中查找
        elif args.data_dir:
            # 尝试直接组合路径
            direct_path = os.path.join(args.data_dir, args.data_file)
            if os.path.exists(direct_path):
                full_path = direct_path
                print(f"在指定目录中找到数据文件: {full_path}")
            else:
                # 尝试在目录及其子目录中进行模糊搜索
                print(f"在目录 {args.data_dir} 中搜索文件 {args.data_file}...")
                found_path = find_file_in_directory(args.data_dir, args.data_file)
                if found_path:
                    full_path = found_path
                    print(f"找到匹配的数据文件: {full_path}")
                else:
                    print(f"在 {args.data_dir} 及其子目录中未找到匹配的文件")
                    # 列出目录中的所有.mat文件以供参考
                    list_mat_files(args.data_dir)
    
    # Load or generate data
    if full_path and os.path.exists(full_path):
        # Load data based on format
        if args.data_format == 'mat':
            # Load Panasonic 18650PF data from .mat file
            try:
                # 加载松下电池数据
                X, y = load_panasonic_data(
                    full_path, 
                    soc_calculation_method=args.soc_method
                )
                feature_names = ['Voltage', 'Current', 'Ah', 'Power', 'Battery_Temp']
                
                # 清理数据，移除NaN值和异常值
                X, y = clean_data(X, y)
                
                # 检查是否有足够的数据
                if X.shape[0] < 10:
                    print("警告: 数据量太少，无法进行有效的训练。将使用合成数据。")
                    X, y = generate_synthetic_data(n_samples=2000)
                    feature_names = ['Voltage', 'Current', 'Temperature', 'Resistance', 'Power']
                else:
                    # Downsample if needed (for large datasets)
                    if args.downsample > 1:
                        print(f"Downsampling data by factor of {args.downsample}")
                        X = X[::args.downsample]
                        y = y[::args.downsample]
                    
                    print(f"Data loaded. Features: {feature_names}")
                    print(f"Data shape: X={X.shape}, y={y.shape}")
            except Exception as e:
                print(f"加载.mat文件失败: {e}")
                print("将使用合成数据代替。")
                X, y = generate_synthetic_data(n_samples=2000)
                feature_names = ['Voltage', 'Current', 'Temperature', 'Resistance', 'Power']
        else:
            # Load CSV data
            try:
                # 直接读取CSV文件
                data = pd.read_csv(full_path)
                # 提取特征和目标
                X = data[args.feature_cols].values
                y = data[args.target_col].values
                feature_names = args.feature_cols
                
                # 清理数据
                X, y = clean_data(X, y)
            except Exception as e:
                print(f"加载CSV文件失败: {e}")
                print("将使用合成数据代替。")
                X, y = generate_synthetic_data(n_samples=2000)
                feature_names = ['Voltage', 'Current', 'Temperature', 'Resistance', 'Power']
    else:
        if args.data_file:
            print(f"未能找到数据文件，将使用合成数据")
        # Generate synthetic data
        print("Generating synthetic data...")
        X, y = generate_synthetic_data(n_samples=2000)
        feature_names = ['Voltage', 'Current', 'Temperature', 'Resistance', 'Power']
    
    # 检查SOC值的变化范围
    soc_min = np.min(y)
    soc_max = np.max(y)
    soc_range = soc_max - soc_min
    
    if soc_range < 1.0:
        print(f"警告: SOC变化范围太小 ({soc_min:.2f}% - {soc_max:.2f}%)，可能导致模型训练困难")
        
        # 如果SOC几乎不变，尝试使用不同的SOC计算方法
        if args.data_format == 'mat' and args.soc_method == 'ah_counting':
            print("尝试使用基于电压的方法重新计算SOC...")
            try:
                # 加载松下电池数据
                X, y = load_panasonic_data(
                    full_path, 
                    soc_calculation_method='voltage_based'
                )
                feature_names = ['Voltage', 'Current', 'Ah', 'Power', 'Battery_Temp']
                X, y = clean_data(X, y)
                
                # 再次检查SOC变化范围
                new_soc_min = np.min(y)
                new_soc_max = np.max(y)
                new_soc_range = new_soc_max - new_soc_min
                
                print(f"使用基于电压的方法后，SOC范围: {new_soc_min:.2f}% - {new_soc_max:.2f}%")
                
                if new_soc_range < 1.0:
                    print("警告: 即使使用基于电压的方法，SOC变化范围仍然太小，可能影响模型性能")
            except Exception as e:
                print(f"重新计算SOC失败: {e}")
    
    # Visualize data
    try:
        visualize_battery_data(X, y, feature_names)
    except Exception as e:
        print(f"可视化数据失败: {e}")
    
    # Prepare sliding window data if window_size > 1
    if args.window_size > 1:
        print(f"Preparing sliding window data with window size {args.window_size}...")
        X, y = prepare_sequence_data(X, y, window_size=args.window_size)
    
    # Split data into training and testing sets
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=args.random_state
    )
    
    print(f"Training data shape: {X_train.shape}, Testing data shape: {X_test.shape}")
    
    try:
        # Initialize IDBO-ELM framework with reduced complexity
        # 减少隐藏层神经元数量以避免SVD收敛问题
        hidden_neurons = min(args.hidden_neurons, 10)  # 限制隐藏层神经元数量
        print(f"Using {hidden_neurons} hidden neurons (reduced from {args.hidden_neurons} to avoid SVD convergence issues)")
        
        idbo_elm = IDBO_ELM_SOC(
            hidden_neurons=hidden_neurons,
            activation='sigmoid',
            population_size=min(args.population_size, 20),  # 减小种群大小
            max_iterations=args.max_iterations,
            random_state=args.random_state
        )
        
        # Train the model
        start_time = time.time()
        results = idbo_elm.fit(X_train, y_train.reshape(-1, 1))
        training_time = time.time() - start_time
        
        print(f"\nTraining completed in {training_time:.2f} seconds")
        print(f"Best fitness (validation RMSE): {results['best_fitness']:.4f}")
        print(f"Test RMSE: {results['test_rmse']:.4f}")
        print(f"Test MAE: {results['test_mae']:.4f}")
        print(f"Test R²: {results['test_r2']:.4f}")
        
        # Plot convergence curve
        idbo_elm.plot_convergence()
        
        # Make predictions on test set
        idbo_elm.plot_predictions(X_test, y_test.reshape(-1, 1))
        
        # Save results to CSV
        results_df = pd.DataFrame({
            'Metric': ['Training Time (s)', 'Best Fitness', 'Test RMSE', 'Test MAE', 'Test R²'],
            'Value': [training_time, results['best_fitness'], results['test_rmse'], 
                      results['test_mae'], results['test_r2']]
        })
        
        results_df.to_csv(os.path.join(args.output_dir, 'results.csv'), index=False)
        
        # Save model predictions
        predictions_df = pd.DataFrame({
            'Actual_SOC': y_test.flatten(),
            'Predicted_SOC': idbo_elm.predict(X_test).flatten()
        })
        predictions_df.to_csv(os.path.join(args.output_dir, 'predictions.csv'), index=False)
        
        print(f"Results saved to {args.output_dir}")
    
    except Exception as e:
        print(f"训练过程中出错: {e}")
        import traceback
        traceback.print_exc()
        
        # 尝试使用更简单的模型
        print("\n尝试使用更简单的模型...")
        try:
            from sklearn.ensemble import RandomForestRegressor
            
            print("使用随机森林回归器作为替代模型")
            rf_model = RandomForestRegressor(n_estimators=100, random_state=args.random_state)
            rf_model.fit(X_train, y_train)
            
            # 评估模型
            y_pred = rf_model.predict(X_test)
            from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
            rmse = np.sqrt(mean_squared_error(y_test, y_pred))
            mae = mean_absolute_error(y_test, y_pred)
            r2 = r2_score(y_test, y_pred)
            
            print(f"\n随机森林模型评估结果:")
            print(f"RMSE: {rmse:.4f}")
            print(f"MAE: {mae:.4f}")
            print(f"R²: {r2:.4f}")
            
            # 保存结果
            results_df = pd.DataFrame({
                'Metric': ['RMSE', 'MAE', 'R²'],
                'Value': [rmse, mae, r2]
            })
            results_df.to_csv(os.path.join(args.output_dir, 'rf_results.csv'), index=False)
            
            # 保存预测结果
            predictions_df = pd.DataFrame({
                'Actual_SOC': y_test,
                'Predicted_SOC': y_pred
            })
            predictions_df.to_csv(os.path.join(args.output_dir, 'rf_predictions.csv'), index=False)
            
            # 绘制预测结果
            plt.figure(figsize=(12, 6))
            plt.plot(range(len(y_test)), y_test, 'b-', label='Actual SOC')
            plt.plot(range(len(y_pred)), y_pred, 'r--', label='Predicted SOC')
            plt.title('SOC Estimation: Actual vs Predicted (Random Forest)')
            plt.xlabel('Time Step')
            plt.ylabel('SOC (%)')
            plt.legend()
            plt.grid(True)
            plt.savefig(os.path.join(args.output_dir, 'rf_prediction.png'))
            
            # 绘制散点图
            plt.figure(figsize=(8, 8))
            plt.scatter(y_test, y_pred, alpha=0.5)
            plt.plot([min(y_test), max(y_test)], [min(y_test), max(y_test)], 'r--')
            plt.title('Actual vs Predicted SOC (Random Forest)')
            plt.xlabel('Actual SOC (%)')
            plt.ylabel('Predicted SOC (%)')
            plt.grid(True)
            plt.savefig(os.path.join(args.output_dir, 'rf_scatter.png'))
            
            print(f"随机森林模型结果已保存到 {args.output_dir}")
            
        except Exception as e2:
            print(f"替代模型训练失败: {e2}")


if __name__ == "__main__":
    main()