"""EDA处理器批处理示例

演示如何使用EDA处理器进行批量数据处理，包括：
- 批量文件处理
- 并行处理配置
- 进度监控
- 结果汇总和报告生成
- 错误处理和恢复

适合需要处理大量EDA数据文件的场景。
"""

import numpy as np
import pandas as pd
from pathlib import Path
import logging
from typing import Dict, Any, List, Optional, Tuple
import json
import time
from concurrent.futures import ProcessPoolExecutor, as_completed
import multiprocessing as mp

# 导入EDA处理器模块
from ..utils.batch_processor import BatchProcessor, process_eda_batch, find_eda_files
from ..utils.data_io import load_eda_data, save_eda_results
from ..utils.report_generator import ReportGenerator, generate_batch_report
from ..core.pipeline import process_eda_pipeline

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def simple_batch_processing_example(data_dir: str, 
                                  output_dir: str = None) -> Dict[str, Any]:
    """简单批处理示例
    
    演示基本的批量EDA数据处理流程
    
    Args:
        data_dir: 包含EDA数据文件的目录
        output_dir: 输出目录路径
        
    Returns:
        批处理结果字典
    """
    print("=" * 60)
    print("简单批处理示例")
    print("=" * 60)
    
    try:
        # 1. 查找EDA数据文件
        print(f"搜索数据目录: {data_dir}")
        
        if not Path(data_dir).exists():
            print("数据目录不存在，创建示例数据...")
            data_dir = create_sample_batch_data()
        
        eda_files = find_eda_files(data_dir)
        print(f"找到 {len(eda_files)} 个EDA数据文件")
        
        if not eda_files:
            print("未找到EDA数据文件，创建示例数据...")
            data_dir = create_sample_batch_data()
            eda_files = find_eda_files(data_dir)
        
        # 显示找到的文件
        for i, file_path in enumerate(eda_files[:5], 1):  # 显示前5个文件
            print(f"  {i}. {Path(file_path).name}")
        if len(eda_files) > 5:
            print(f"  ... 还有 {len(eda_files) - 5} 个文件")
        
        # 2. 配置处理参数
        processing_config = {
            'preprocessing': {
                'algorithm': 'improved',
                'params': {
                    'outlier_threshold': 3.0,
                    'smooth_window': 5,
                    'lowpass_freq': 1.0
                }
            },
            'peak_detection': {
                'algorithm': 'improved',
                'params': {
                    'adaptive_threshold': True,
                    'min_amplitude': 0.01
                }
            },
            'metrics_calculation': {
                'algorithm': 'basic',
                'params': {}
            },
            'quality_assessment': {
                'algorithm': 'basic',
                'params': {}
            }
        }
        
        print(f"\n处理配置:")
        for step, config in processing_config.items():
            print(f"  {step}: {config['algorithm']}")
        
        # 3. 执行批处理
        print(f"\n开始批处理 {len(eda_files)} 个文件...")
        
        if output_dir is None:
            output_dir = Path(data_dir).parent / "batch_output"
        
        batch_results = process_eda_batch(
            file_paths=eda_files,
            output_dir=output_dir,
            config=processing_config,
            max_workers=2,  # 使用2个进程
            save_individual_results=True,
            generate_summary=True
        )
        
        # 4. 显示结果
        print(f"\n批处理完成！")
        print_batch_summary(batch_results)
        
        return batch_results
        
    except Exception as e:
        logger.error(f"简单批处理示例执行失败: {str(e)}")
        return {'success': False, 'error': str(e)}

def advanced_batch_processing_example(data_dir: str,
                                    output_dir: str = None) -> Dict[str, Any]:
    """高级批处理示例
    
    演示高级批处理功能，包括：
    - 自定义处理配置
    - 错误处理和重试
    - 详细进度监控
    - 结果分析和报告
    
    Args:
        data_dir: 包含EDA数据文件的目录
        output_dir: 输出目录路径
        
    Returns:
        批处理结果字典
    """
    print("=" * 60)
    print("高级批处理示例")
    print("=" * 60)
    
    try:
        # 1. 准备数据和配置
        if not Path(data_dir).exists():
            print("创建高级示例数据...")
            data_dir = create_advanced_batch_data()
        
        eda_files = find_eda_files(data_dir)
        print(f"找到 {len(eda_files)} 个EDA数据文件")
        
        if output_dir is None:
            output_dir = Path(data_dir).parent / "advanced_batch_output"
        
        # 2. 创建批处理器实例
        batch_processor = BatchProcessor(
            max_workers=min(4, mp.cpu_count()),
            chunk_size=2,
            timeout=300,  # 5分钟超时
            retry_attempts=2,
            error_handling='continue'  # 遇到错误继续处理其他文件
        )
        
        # 3. 配置多种处理方案
        processing_configs = {
            'standard': {
                'preprocessing': {'algorithm': 'improved', 'params': {'adaptive_filtering': True}},
                'peak_detection': {'algorithm': 'improved', 'params': {'adaptive_threshold': True}},
                'metrics_calculation': {'algorithm': 'advanced', 'params': {}},
                'quality_assessment': {'algorithm': 'comprehensive', 'params': {}}
            },
            'fast': {
                'preprocessing': {'algorithm': 'basic', 'params': {}},
                'peak_detection': {'algorithm': 'basic', 'params': {}},
                'metrics_calculation': {'algorithm': 'basic', 'params': {}},
                'quality_assessment': {'algorithm': 'basic', 'params': {}}
            },
            'high_quality': {
                'preprocessing': {'algorithm': 'neurokit2', 'params': {}},
                'peak_detection': {'algorithm': 'neurokit2', 'params': {}},
                'metrics_calculation': {'algorithm': 'all', 'params': {}},
                'quality_assessment': {'algorithm': 'comprehensive', 'params': {}}
            }
        }
        
        # 4. 执行多配置批处理
        all_results = {}
        
        for config_name, config in processing_configs.items():
            print(f"\n使用 '{config_name}' 配置处理...")
            
            config_output_dir = Path(output_dir) / config_name
            
            start_time = time.time()
            
            results = batch_processor.process_batch(
                file_paths=eda_files,
                processing_config=config,
                output_dir=str(config_output_dir),
                save_individual_results=True,
                progress_callback=create_progress_callback(config_name)
            )
            
            processing_time = time.time() - start_time
            
            print(f"'{config_name}' 配置处理完成，耗时: {processing_time:.2f}s")
            
            all_results[config_name] = {
                'results': results,
                'processing_time': processing_time,
                'config': config
            }
        
        # 5. 生成比较报告
        print(f"\n生成配置比较报告...")
        comparison_report = generate_configuration_comparison_report(all_results)
        
        # 保存比较报告
        report_file = Path(output_dir) / "configuration_comparison_report.json"
        with open(report_file, 'w', encoding='utf-8') as f:
            json.dump(comparison_report, f, indent=2, ensure_ascii=False)
        
        print(f"比较报告已保存: {report_file}")
        
        return {
            'success': True,
            'all_results': all_results,
            'comparison_report': comparison_report,
            'output_dir': output_dir
        }
        
    except Exception as e:
        logger.error(f"高级批处理示例执行失败: {str(e)}")
        return {'success': False, 'error': str(e)}

def parallel_processing_example(data_dir: str,
                              max_workers: int = None) -> Dict[str, Any]:
    """并行处理示例
    
    演示如何优化并行处理性能
    
    Args:
        data_dir: 包含EDA数据文件的目录
        max_workers: 最大工作进程数
        
    Returns:
        并行处理结果
    """
    print("=" * 60)
    print("并行处理性能测试")
    print("=" * 60)
    
    try:
        # 1. 准备测试数据
        if not Path(data_dir).exists():
            print("创建并行处理测试数据...")
            data_dir = create_parallel_test_data()
        
        eda_files = find_eda_files(data_dir)
        print(f"测试文件数量: {len(eda_files)}")
        
        if len(eda_files) < 4:
            print("文件数量不足，创建更多测试数据...")
            data_dir = create_parallel_test_data(num_files=8)
            eda_files = find_eda_files(data_dir)
        
        # 2. 测试不同的并行配置
        if max_workers is None:
            max_workers = mp.cpu_count()
        
        worker_configs = [1, 2, min(4, max_workers), max_workers]
        
        processing_config = {
            'preprocessing': {'algorithm': 'improved', 'params': {}},
            'peak_detection': {'algorithm': 'improved', 'params': {}},
            'metrics_calculation': {'algorithm': 'basic', 'params': {}},
            'quality_assessment': {'algorithm': 'basic', 'params': {}}
        }
        
        performance_results = {}
        
        for num_workers in worker_configs:
            print(f"\n测试 {num_workers} 个工作进程...")
            
            start_time = time.time()
            
            batch_processor = BatchProcessor(
                max_workers=num_workers,
                chunk_size=1,
                timeout=120
            )
            
            results = batch_processor.process_batch(
                file_paths=eda_files,
                processing_config=processing_config,
                output_dir=None,  # 不保存文件，只测试性能
                save_individual_results=False,
                progress_callback=lambda current, total, file_name: None  # 静默模式
            )
            
            processing_time = time.time() - start_time
            
            performance_results[num_workers] = {
                'processing_time': processing_time,
                'throughput': len(eda_files) / processing_time,
                'success_rate': results['summary']['success_rate'],
                'results': results
            }
            
            print(f"  处理时间: {processing_time:.2f}s")
            print(f"  吞吐量: {len(eda_files) / processing_time:.2f} 文件/秒")
            print(f"  成功率: {results['summary']['success_rate']:.1%}")
        
        # 3. 分析性能结果
        print(f"\n并行处理性能分析:")
        print("-" * 50)
        
        best_config = max(performance_results.items(), 
                         key=lambda x: x[1]['throughput'])
        
        print(f"最佳配置: {best_config[0]} 个工作进程")
        print(f"最高吞吐量: {best_config[1]['throughput']:.2f} 文件/秒")
        
        # 计算加速比
        baseline_time = performance_results[1]['processing_time']
        
        print(f"\n加速比分析 (相对于单进程):")
        for num_workers, result in performance_results.items():
            speedup = baseline_time / result['processing_time']
            efficiency = speedup / num_workers * 100
            print(f"  {num_workers} 进程: {speedup:.2f}x 加速, {efficiency:.1f}% 效率")
        
        return {
            'success': True,
            'performance_results': performance_results,
            'best_config': best_config,
            'test_files': eda_files
        }
        
    except Exception as e:
        logger.error(f"并行处理示例执行失败: {str(e)}")
        return {'success': False, 'error': str(e)}

def batch_report_generation_example(batch_results: Dict[str, Any],
                                  output_dir: str) -> Dict[str, Any]:
    """批处理报告生成示例
    
    演示如何生成各种格式的批处理报告
    
    Args:
        batch_results: 批处理结果
        output_dir: 输出目录
        
    Returns:
        报告生成结果
    """
    print("=" * 60)
    print("批处理报告生成示例")
    print("=" * 60)
    
    try:
        output_path = Path(output_dir)
        output_path.mkdir(parents=True, exist_ok=True)
        
        # 1. 生成HTML报告
        print("生成HTML报告...")
        html_report = generate_batch_report(
            batch_results,
            output_format='html',
            output_path=str(output_path / "batch_report.html"),
            include_plots=True,
            template='comprehensive'
        )
        
        # 2. 生成PDF报告
        print("生成PDF报告...")
        try:
            pdf_report = generate_batch_report(
                batch_results,
                output_format='pdf',
                output_path=str(output_path / "batch_report.pdf"),
                include_plots=True,
                template='summary'
            )
        except Exception as e:
            print(f"PDF报告生成失败: {str(e)}")
            pdf_report = None
        
        # 3. 生成Excel报告
        print("生成Excel报告...")
        excel_report = generate_batch_report(
            batch_results,
            output_format='excel',
            output_path=str(output_path / "batch_report.xlsx"),
            include_plots=False,
            template='data'
        )
        
        # 4. 生成自定义汇总报告
        print("生成自定义汇总报告...")
        summary_report = create_custom_summary_report(batch_results)
        
        summary_file = output_path / "custom_summary.txt"
        with open(summary_file, 'w', encoding='utf-8') as f:
            f.write(summary_report)
        
        # 5. 生成统计图表
        print("生成统计图表...")
        create_batch_statistics_plots(batch_results, output_path)
        
        print(f"\n所有报告已生成到: {output_path}")
        
        return {
            'success': True,
            'reports': {
                'html': html_report,
                'pdf': pdf_report,
                'excel': excel_report,
                'summary': str(summary_file)
            },
            'output_dir': str(output_path)
        }
        
    except Exception as e:
        logger.error(f"批处理报告生成失败: {str(e)}")
        return {'success': False, 'error': str(e)}

# 辅助函数

def create_sample_batch_data(num_files: int = 5) -> str:
    """创建示例批处理数据"""
    data_dir = Path("./sample_batch_data")
    data_dir.mkdir(exist_ok=True)
    
    print(f"创建 {num_files} 个示例EDA数据文件...")
    
    for i in range(num_files):
        # 生成不同特征的EDA数据
        duration = np.random.uniform(120, 300)  # 2-5分钟
        sampling_rate = 4.0
        
        eda_data = generate_sample_eda_data(
            duration=duration,
            sampling_rate=sampling_rate,
            subject_id=f"S{i+1:03d}",
            condition=np.random.choice(['rest', 'stress', 'task'])
        )
        
        # 保存为CSV文件
        filename = f"eda_subject_{i+1:03d}.csv"
        filepath = data_dir / filename
        
        df = pd.DataFrame({
            'timestamp': eda_data['timestamps'],
            'eda': eda_data['signal']
        })
        
        df.to_csv(filepath, index=False)
        
        # 保存元数据
        metadata = {
            'subject_id': eda_data['metadata']['subject_id'],
            'condition': eda_data['metadata']['condition'],
            'duration': duration,
            'sampling_rate': sampling_rate,
            'file_created': pd.Timestamp.now().isoformat()
        }
        
        metadata_file = data_dir / f"eda_subject_{i+1:03d}_metadata.json"
        with open(metadata_file, 'w') as f:
            json.dump(metadata, f, indent=2)
    
    print(f"示例数据已创建到: {data_dir}")
    return str(data_dir)

def create_advanced_batch_data(num_files: int = 6) -> str:
    """创建高级批处理测试数据"""
    data_dir = Path("./advanced_batch_data")
    data_dir.mkdir(exist_ok=True)
    
    print(f"创建 {num_files} 个高级示例EDA数据文件...")
    
    conditions = ['baseline', 'stress_low', 'stress_high', 'recovery', 'task_easy', 'task_hard']
    
    for i in range(num_files):
        condition = conditions[i % len(conditions)]
        
        # 根据条件生成不同特征的数据
        if 'stress' in condition:
            duration = np.random.uniform(180, 360)
            noise_level = 0.08 if 'high' in condition else 0.04
            scr_rate = 8 if 'high' in condition else 5
        elif 'task' in condition:
            duration = np.random.uniform(240, 480)
            noise_level = 0.05
            scr_rate = 6 if 'hard' in condition else 3
        else:  # baseline, recovery
            duration = np.random.uniform(300, 600)
            noise_level = 0.02
            scr_rate = 2
        
        eda_data = generate_condition_specific_eda_data(
            duration=duration,
            sampling_rate=4.0,
            condition=condition,
            noise_level=noise_level,
            scr_rate=scr_rate,
            subject_id=f"ADV{i+1:03d}"
        )
        
        # 保存数据
        filename = f"eda_advanced_{condition}_{i+1:03d}.csv"
        filepath = data_dir / filename
        
        df = pd.DataFrame({
            'timestamp': eda_data['timestamps'],
            'eda_signal': eda_data['signal'],
            'condition': condition
        })
        
        df.to_csv(filepath, index=False)
    
    print(f"高级示例数据已创建到: {data_dir}")
    return str(data_dir)

def create_parallel_test_data(num_files: int = 8) -> str:
    """创建并行处理测试数据"""
    data_dir = Path("./parallel_test_data")
    data_dir.mkdir(exist_ok=True)
    
    print(f"创建 {num_files} 个并行处理测试文件...")
    
    for i in range(num_files):
        # 创建不同大小的文件来测试并行处理
        if i < num_files // 3:
            duration = 120  # 短文件
        elif i < 2 * num_files // 3:
            duration = 300  # 中等文件
        else:
            duration = 600  # 长文件
        
        eda_data = generate_sample_eda_data(
            duration=duration,
            sampling_rate=4.0,
            subject_id=f"PAR{i+1:03d}",
            condition='test'
        )
        
        filename = f"eda_parallel_{i+1:03d}_{duration}s.csv"
        filepath = data_dir / filename
        
        df = pd.DataFrame({
            'time': eda_data['timestamps'],
            'eda': eda_data['signal']
        })
        
        df.to_csv(filepath, index=False)
    
    print(f"并行测试数据已创建到: {data_dir}")
    return str(data_dir)

def generate_sample_eda_data(duration: float,
                           sampling_rate: float,
                           subject_id: str,
                           condition: str) -> Dict[str, Any]:
    """生成示例EDA数据"""
    t = np.arange(0, duration, 1/sampling_rate)
    
    # 基础EDA信号
    baseline = 1.5 + 0.3 * np.sin(2 * np.pi * t / 120)  # 慢变化基线
    
    # 添加SCR峰值
    signal = baseline.copy()
    
    # 根据条件调整SCR特征
    if condition == 'stress':
        num_peaks = int(duration / 20)  # 更频繁的SCR
        amplitude_range = (0.1, 0.4)
    elif condition == 'task':
        num_peaks = int(duration / 30)
        amplitude_range = (0.05, 0.25)
    else:  # rest
        num_peaks = int(duration / 60)
        amplitude_range = (0.02, 0.15)
    
    # 添加SCR峰值
    peak_times = np.random.uniform(10, duration-10, num_peaks)
    for peak_time in peak_times:
        amplitude = np.random.uniform(*amplitude_range)
        add_scr_peak_simple(signal, t, peak_time, amplitude)
    
    # 添加噪声
    noise = np.random.normal(0, 0.02, len(signal))
    signal += noise
    
    return {
        'signal': signal,
        'timestamps': t,
        'sampling_rate': sampling_rate,
        'metadata': {
            'subject_id': subject_id,
            'condition': condition,
            'duration': duration,
            'num_peaks': num_peaks
        }
    }

def generate_condition_specific_eda_data(duration: float,
                                       sampling_rate: float,
                                       condition: str,
                                       noise_level: float,
                                       scr_rate: int,
                                       subject_id: str) -> Dict[str, Any]:
    """生成特定条件的EDA数据"""
    t = np.arange(0, duration, 1/sampling_rate)
    
    # 条件特定的基线
    if 'stress' in condition:
        baseline = 2.0 + 0.5 * np.sin(2 * np.pi * t / 80)
        if 'high' in condition:
            baseline += 0.3  # 更高的基线
    elif 'task' in condition:
        baseline = 1.8 + 0.4 * np.sin(2 * np.pi * t / 100)
        if 'hard' in condition:
            baseline += 0.2
    else:  # baseline, recovery
        baseline = 1.5 + 0.2 * np.sin(2 * np.pi * t / 150)
    
    signal = baseline.copy()
    
    # 添加SCR峰值
    num_peaks = int(duration * scr_rate / 60)  # scr_rate per minute
    peak_times = np.random.uniform(15, duration-15, num_peaks)
    
    for peak_time in peak_times:
        if 'stress' in condition:
            amplitude = np.random.uniform(0.1, 0.5)
        elif 'task' in condition:
            amplitude = np.random.uniform(0.05, 0.3)
        else:
            amplitude = np.random.uniform(0.02, 0.2)
        
        add_scr_peak_simple(signal, t, peak_time, amplitude)
    
    # 添加条件特定的噪声
    noise = np.random.normal(0, noise_level, len(signal))
    signal += noise
    
    return {
        'signal': signal,
        'timestamps': t,
        'sampling_rate': sampling_rate,
        'metadata': {
            'subject_id': subject_id,
            'condition': condition,
            'duration': duration,
            'noise_level': noise_level,
            'scr_rate': scr_rate,
            'num_peaks': num_peaks
        }
    }

def add_scr_peak_simple(signal: np.ndarray, t: np.ndarray, 
                       peak_time: float, amplitude: float):
    """添加简单的SCR峰值"""
    rise_time = np.random.uniform(1.0, 2.5)
    recovery_time = np.random.uniform(3.0, 8.0)
    
    peak_indices = np.where((t >= peak_time) & 
                           (t <= peak_time + rise_time + recovery_time))[0]
    
    for i in peak_indices:
        time_from_onset = t[i] - peak_time
        
        if time_from_onset <= rise_time:
            # 上升阶段
            progress = time_from_onset / rise_time
            scr_value = amplitude * (1 - np.exp(-3 * progress))
        else:
            # 恢复阶段
            progress = (time_from_onset - rise_time) / recovery_time
            scr_value = amplitude * np.exp(-2 * progress)
        
        signal[i] += scr_value

def create_progress_callback(config_name: str):
    """创建进度回调函数 - Windows兼容版本"""
    def progress_callback(current: int, total: int, file_name: str):
        progress = current / total * 100
        progress_msg = f"[{config_name}] 进度: {progress:.1f}% ({current}/{total}) - {Path(file_name).name}"
        
        try:
            import sys
            import os
            
            if os.name == 'nt':  # Windows系统
                # 使用更安全的进度显示方式
                sys.stdout.write('\r' + ' ' * 120 + '\r')  # 清除行
                sys.stdout.write(progress_msg)
                sys.stdout.flush()
            else:
                # 非Windows系统使用原来的方式
                print(f"\r{progress_msg}", end='')
            
            if current == total:
                print()  # 换行
                
        except Exception:
            # 如果进度显示出错，回退到简单的换行显示
            print(f"[{config_name}] {progress:.1f}% 完成 ({current}/{total})")
    
    return progress_callback

def print_batch_summary(batch_results: Dict[str, Any]):
    """打印批处理摘要"""
    summary = batch_results.get('summary', {})
    
    print(f"批处理摘要:")
    print(f"  总文件数: {summary.get('total_files', 0)}")
    print(f"  成功处理: {summary.get('successful_files', 0)}")
    print(f"  处理失败: {summary.get('failed_files', 0)}")
    print(f"  成功率: {summary.get('success_rate', 0):.1%}")
    print(f"  总处理时间: {summary.get('total_processing_time', 0):.2f}s")
    print(f"  平均处理时间: {summary.get('average_processing_time', 0):.2f}s/文件")
    
    # 显示错误信息
    errors = batch_results.get('errors', [])
    if errors:
        print(f"\n错误详情:")
        for error in errors[:3]:  # 显示前3个错误
            print(f"  - {error.get('file_path', 'Unknown')}: {error.get('error', 'Unknown error')}")
        if len(errors) > 3:
            print(f"  ... 还有 {len(errors) - 3} 个错误")

def generate_configuration_comparison_report(all_results: Dict[str, Any]) -> Dict[str, Any]:
    """生成配置比较报告"""
    comparison_report = {
        'timestamp': pd.Timestamp.now().isoformat(),
        'configurations': {},
        'performance_comparison': {},
        'recommendations': []
    }
    
    # 收集各配置的性能数据
    performance_data = {}
    
    for config_name, config_result in all_results.items():
        results = config_result['results']
        processing_time = config_result['processing_time']
        
        summary = results.get('summary', {})
        
        performance_data[config_name] = {
            'processing_time': processing_time,
            'success_rate': summary.get('success_rate', 0),
            'average_processing_time': summary.get('average_processing_time', 0),
            'total_files': summary.get('total_files', 0),
            'successful_files': summary.get('successful_files', 0),
            'failed_files': summary.get('failed_files', 0)
        }
        
        comparison_report['configurations'][config_name] = {
            'config': config_result['config'],
            'performance': performance_data[config_name]
        }
    
    # 性能比较分析
    if performance_data:
        # 最快配置
        fastest_config = min(performance_data.items(), 
                           key=lambda x: x[1]['processing_time'])
        
        # 最高成功率配置
        most_reliable_config = max(performance_data.items(), 
                                 key=lambda x: x[1]['success_rate'])
        
        # 最高吞吐量配置
        highest_throughput_config = max(performance_data.items(), 
                                      key=lambda x: x[1]['successful_files'] / x[1]['processing_time'])
        
        comparison_report['performance_comparison'] = {
            'fastest': {
                'config': fastest_config[0],
                'time': fastest_config[1]['processing_time']
            },
            'most_reliable': {
                'config': most_reliable_config[0],
                'success_rate': most_reliable_config[1]['success_rate']
            },
            'highest_throughput': {
                'config': highest_throughput_config[0],
                'throughput': highest_throughput_config[1]['successful_files'] / highest_throughput_config[1]['processing_time']
            }
        }
        
        # 生成建议
        recommendations = []
        
        if fastest_config[1]['success_rate'] > 0.9:
            recommendations.append(f"推荐使用 '{fastest_config[0]}' 配置以获得最快处理速度")
        
        if most_reliable_config[1]['success_rate'] == 1.0:
            recommendations.append(f"推荐使用 '{most_reliable_config[0]}' 配置以确保最高可靠性")
        
        recommendations.append(f"'{highest_throughput_config[0]}' 配置具有最高的整体吞吐量")
        
        comparison_report['recommendations'] = recommendations
    
    return comparison_report

def create_custom_summary_report(batch_results: Dict[str, Any]) -> str:
    """创建自定义汇总报告"""
    lines = []
    
    lines.append("EDA批处理自定义汇总报告")
    lines.append("=" * 60)
    lines.append(f"生成时间: {pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')}")
    lines.append("")
    
    # 基本统计
    summary = batch_results.get('summary', {})
    lines.append("1. 处理统计")
    lines.append("-" * 30)
    lines.append(f"总文件数: {summary.get('total_files', 0)}")
    lines.append(f"成功处理: {summary.get('successful_files', 0)}")
    lines.append(f"处理失败: {summary.get('failed_files', 0)}")
    lines.append(f"成功率: {summary.get('success_rate', 0):.1%}")
    lines.append(f"总处理时间: {summary.get('total_processing_time', 0):.2f} 秒")
    lines.append(f"平均处理时间: {summary.get('average_processing_time', 0):.2f} 秒/文件")
    lines.append("")
    
    # 结果分析
    individual_results = batch_results.get('individual_results', [])
    if individual_results:
        lines.append("2. 结果分析")
        lines.append("-" * 30)
        
        # 收集指标
        all_metrics = []
        quality_scores = []
        peak_counts = []
        
        for result in individual_results:
            if result.get('success', False):
                metrics = result.get('result', {}).get('metrics', {})
                quality = result.get('result', {}).get('quality_assessment', {})
                peaks = result.get('result', {}).get('peaks', [])
                
                all_metrics.append(metrics)
                quality_scores.append(quality.get('overall_score', 0))
                peak_counts.append(len(peaks))
        
        if all_metrics:
            # 平均指标
            lines.append(f"成功处理的文件数: {len(all_metrics)}")
            lines.append(f"平均质量评分: {np.mean(quality_scores):.3f}")
            lines.append(f"平均检测峰值数: {np.mean(peak_counts):.1f}")
            
            # EDA指标统计
            mean_eda_values = [m.get('mean_eda', 0) for m in all_metrics if 'mean_eda' in m]
            if mean_eda_values:
                lines.append(f"平均EDA水平: {np.mean(mean_eda_values):.4f} μS")
                lines.append(f"EDA水平范围: [{np.min(mean_eda_values):.4f}, {np.max(mean_eda_values):.4f}] μS")
            
            # SCR指标统计
            scr_rates = [m.get('scr_rate', 0) for m in all_metrics if 'scr_rate' in m]
            if scr_rates:
                lines.append(f"平均SCR频率: {np.mean(scr_rates):.2f} 个/分钟")
                lines.append(f"SCR频率范围: [{np.min(scr_rates):.2f}, {np.max(scr_rates):.2f}] 个/分钟")
        
        lines.append("")
    
    # 错误分析
    errors = batch_results.get('errors', [])
    if errors:
        lines.append("3. 错误分析")
        lines.append("-" * 30)
        lines.append(f"错误总数: {len(errors)}")
        
        # 错误类型统计
        error_types = {}
        for error in errors:
            error_msg = error.get('error', 'Unknown error')
            error_type = error_msg.split(':')[0] if ':' in error_msg else error_msg
            error_types[error_type] = error_types.get(error_type, 0) + 1
        
        lines.append("错误类型分布:")
        for error_type, count in error_types.items():
            lines.append(f"  {error_type}: {count} 次")
        
        lines.append("")
    
    # 建议
    lines.append("4. 处理建议")
    lines.append("-" * 30)
    
    success_rate = summary.get('success_rate', 0)
    if success_rate < 0.8:
        lines.append("- 成功率较低，建议检查数据质量和处理参数")
    elif success_rate < 0.95:
        lines.append("- 成功率良好，可考虑优化处理参数以提高成功率")
    else:
        lines.append("- 成功率优秀，处理配置良好")
    
    avg_time = summary.get('average_processing_time', 0)
    if avg_time > 10:
        lines.append("- 处理时间较长，可考虑优化算法或增加并行度")
    elif avg_time > 5:
        lines.append("- 处理时间适中，可根据需要调整并行配置")
    else:
        lines.append("- 处理速度良好")
    
    if quality_scores and np.mean(quality_scores) < 0.7:
        lines.append("- 数据质量偏低，建议加强预处理或数据筛选")
    
    lines.append("")
    lines.append("报告生成完成。")
    
    return "\n".join(lines)

def create_batch_statistics_plots(batch_results: Dict[str, Any], 
                                output_path: Path):
    """创建批处理统计图表"""
    try:
        import matplotlib.pyplot as plt
        
        individual_results = batch_results.get('individual_results', [])
        successful_results = [r for r in individual_results if r.get('success', False)]
        
        if not successful_results:
            print("没有成功的结果可用于生成图表")
            return
        
        # 创建多子图
        fig, axes = plt.subplots(2, 2, figsize=(12, 10))
        fig.suptitle('EDA批处理统计分析', fontsize=16)
        
        # 1. 质量评分分布
        quality_scores = [r.get('result', {}).get('quality_assessment', {}).get('overall_score', 0) 
                         for r in successful_results]
        
        axes[0, 0].hist(quality_scores, bins=10, alpha=0.7, color='skyblue', edgecolor='black')
        axes[0, 0].set_title('质量评分分布')
        axes[0, 0].set_xlabel('质量评分')
        axes[0, 0].set_ylabel('文件数量')
        axes[0, 0].grid(True, alpha=0.3)
        
        # 2. 峰值数量分布
        peak_counts = [len(r.get('result', {}).get('peaks', [])) for r in successful_results]
        
        axes[0, 1].hist(peak_counts, bins=10, alpha=0.7, color='lightgreen', edgecolor='black')
        axes[0, 1].set_title('SCR峰值数量分布')
        axes[0, 1].set_xlabel('峰值数量')
        axes[0, 1].set_ylabel('文件数量')
        axes[0, 1].grid(True, alpha=0.3)
        
        # 3. 处理时间分布
        processing_times = [r.get('processing_time', 0) for r in successful_results]
        
        axes[1, 0].hist(processing_times, bins=10, alpha=0.7, color='salmon', edgecolor='black')
        axes[1, 0].set_title('处理时间分布')
        axes[1, 0].set_xlabel('处理时间 (秒)')
        axes[1, 0].set_ylabel('文件数量')
        axes[1, 0].grid(True, alpha=0.3)
        
        # 4. 质量评分 vs 峰值数量散点图
        axes[1, 1].scatter(quality_scores, peak_counts, alpha=0.6, color='purple')
        axes[1, 1].set_title('质量评分 vs 峰值数量')
        axes[1, 1].set_xlabel('质量评分')
        axes[1, 1].set_ylabel('峰值数量')
        axes[1, 1].grid(True, alpha=0.3)
        
        # 添加趋势线
        if len(quality_scores) > 1:
            z = np.polyfit(quality_scores, peak_counts, 1)
            p = np.poly1d(z)
            axes[1, 1].plot(sorted(quality_scores), p(sorted(quality_scores)), "r--", alpha=0.8)
        
        plt.tight_layout()
        plt.savefig(output_path / "batch_statistics.png", dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"统计图表已保存: {output_path / 'batch_statistics.png'}")
        
    except Exception as e:
        logger.warning(f"创建统计图表失败: {str(e)}")

# 主函数示例
if __name__ == "__main__":
    print("EDA处理器批处理示例")
    print("=" * 60)
    
    # 1. 简单批处理示例
    print("\n1. 简单批处理示例")
    simple_results = simple_batch_processing_example(
        data_dir="./sample_data",
        output_dir="./output/simple_batch"
    )
    
    # 2. 高级批处理示例
    print("\n2. 高级批处理示例")
    advanced_results = advanced_batch_processing_example(
        data_dir="./advanced_data",
        output_dir="./output/advanced_batch"
    )
    
    # 3. 并行处理性能测试
    print("\n3. 并行处理性能测试")
    parallel_results = parallel_processing_example(
        data_dir="./parallel_data",
        max_workers=4
    )
    
    # 4. 批处理报告生成
    if simple_results.get('success', False):
        print("\n4. 批处理报告生成示例")
        report_results = batch_report_generation_example(
            simple_results,
            "./output/reports"
        )
    
    print("\n所有批处理示例运行完成！")