"""EDA批量处理工具

提供批量处理多个EDA数据文件的功能，支持：
- 多文件并行处理
- 进度监控和错误处理
- 结果汇总和统计
- 自定义处理参数

主要类和函数：
- BatchProcessor: 批量处理器类
- process_eda_batch: 批量处理函数
"""

import os
import json
import logging
from pathlib import Path
from typing import List, Dict, Any, Optional, Callable, Union
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
import pandas as pd
import numpy as np
from datetime import datetime
import traceback

from ..core.pipeline import process_eda_pipeline
from .data_io import load_eda_data, save_eda_results

logger = logging.getLogger(__name__)

class BatchProcessor:
    """EDA批量处理器
    
    支持批量处理多个EDA数据文件，提供进度监控、错误处理和结果汇总功能。
    """
    
    def __init__(self, 
                 output_dir: str = None,
                 max_workers: int = None,
                 use_multiprocessing: bool = False,
                 save_individual_results: bool = True,
                 save_summary: bool = True):
        """初始化批量处理器
        
        Args:
            output_dir: 输出目录路径
            max_workers: 最大并行工作线程数
            use_multiprocessing: 是否使用多进程（默认使用多线程）
            save_individual_results: 是否保存单个文件的处理结果
            save_summary: 是否保存汇总结果
        """
        self.output_dir = Path(output_dir) if output_dir else Path.cwd() / "eda_batch_results"
        self.max_workers = max_workers or min(32, (os.cpu_count() or 1) + 4)
        self.use_multiprocessing = use_multiprocessing
        self.save_individual_results = save_individual_results
        self.save_summary = save_summary
        
        # 创建输出目录
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # 处理统计
        self.processed_count = 0
        self.failed_count = 0
        self.results_summary = []
        self.error_log = []
        
        logger.info(f"批量处理器初始化完成，输出目录: {self.output_dir}")
    
    def process_files(self, 
                     file_paths: List[Union[str, Path]],
                     processing_params: Dict[str, Any] = None,
                     progress_callback: Callable[[int, int], None] = None) -> Dict[str, Any]:
        """批量处理EDA文件
        
        Args:
            file_paths: 要处理的文件路径列表
            processing_params: 处理参数字典
            progress_callback: 进度回调函数 (current, total)
            
        Returns:
            批量处理结果字典
        """
        logger.info(f"开始批量处理 {len(file_paths)} 个文件")
        
        # 重置统计
        self.processed_count = 0
        self.failed_count = 0
        self.results_summary = []
        self.error_log = []
        
        processing_params = processing_params or {}
        
        # 选择执行器
        executor_class = ProcessPoolExecutor if self.use_multiprocessing else ThreadPoolExecutor
        
        with executor_class(max_workers=self.max_workers) as executor:
            # 提交所有任务
            future_to_file = {
                executor.submit(self._process_single_file, file_path, processing_params): file_path
                for file_path in file_paths
            }
            
            # 处理完成的任务
            for future in as_completed(future_to_file):
                file_path = future_to_file[future]
                
                try:
                    result = future.result()
                    if result['success']:
                        self.processed_count += 1
                        self.results_summary.append(result)
                    else:
                        self.failed_count += 1
                        self.error_log.append({
                            'file': str(file_path),
                            'error': result.get('error', 'Unknown error'),
                            'timestamp': datetime.now().isoformat()
                        })
                    
                    # 调用进度回调
                    if progress_callback:
                        total_processed = self.processed_count + self.failed_count
                        progress_callback(total_processed, len(file_paths))
                        
                except Exception as e:
                    self.failed_count += 1
                    error_msg = f"处理文件 {file_path} 时发生异常: {str(e)}"
                    logger.error(error_msg)
                    self.error_log.append({
                        'file': str(file_path),
                        'error': error_msg,
                        'traceback': traceback.format_exc(),
                        'timestamp': datetime.now().isoformat()
                    })
                    
                    if progress_callback:
                        total_processed = self.processed_count + self.failed_count
                        progress_callback(total_processed, len(file_paths))
        
        # 生成批量处理报告
        batch_result = self._generate_batch_report()
        
        # 保存结果
        if self.save_summary:
            self._save_batch_summary(batch_result)
        
        logger.info(f"批量处理完成: 成功 {self.processed_count} 个，失败 {self.failed_count} 个")
        return batch_result
    
    def _process_single_file(self, file_path: Union[str, Path], 
                           processing_params: Dict[str, Any]) -> Dict[str, Any]:
        """处理单个文件
        
        Args:
            file_path: 文件路径
            processing_params: 处理参数
            
        Returns:
            处理结果字典
        """
        file_path = Path(file_path)
        result = {
            'file_path': str(file_path),
            'file_name': file_path.name,
            'success': False,
            'timestamp': datetime.now().isoformat()
        }
        
        try:
            logger.debug(f"开始处理文件: {file_path}")
            
            # 加载数据
            eda_data = load_eda_data(str(file_path))
            if eda_data is None:
                raise ValueError("无法加载EDA数据")
            
            # 提取信号和采样率
            if isinstance(eda_data, dict):
                eda_signal = eda_data.get('signal', eda_data.get('data'))
                sampling_rate = eda_data.get('sampling_rate', 4.0)
            else:
                eda_signal = eda_data
                sampling_rate = processing_params.get('sampling_rate', 4.0)
            
            if eda_signal is None:
                raise ValueError("EDA信号数据为空")
            
            # 转换为numpy数组
            if not isinstance(eda_signal, np.ndarray):
                eda_signal = np.array(eda_signal)
            
            # 处理EDA信号
            processing_result = process_eda_pipeline(
                eda_signal=eda_signal,
                sampling_rate=sampling_rate,
                **processing_params
            )
            
            # 添加文件信息到结果
            processing_result['file_info'] = {
                'file_path': str(file_path),
                'file_name': file_path.name,
                'file_size': file_path.stat().st_size if file_path.exists() else 0,
                'processing_timestamp': result['timestamp']
            }
            
            # 保存单个文件结果
            if self.save_individual_results:
                output_file = self.output_dir / f"{file_path.stem}_results.json"
                save_eda_results(processing_result, str(output_file))
                result['output_file'] = str(output_file)
            
            # 提取关键指标用于汇总
            result.update({
                'success': True,
                'signal_length': len(eda_signal),
                'signal_duration': len(eda_signal) / sampling_rate,
                'sampling_rate': sampling_rate,
                'quality_score': processing_result.get('quality_assessment', {}).get('overall_score', 0.0),
                'scr_count': processing_result.get('metrics', {}).get('scr_count', 0),
                'mean_eda': processing_result.get('metrics', {}).get('mean_eda', 0.0),
                'processing_time': processing_result.get('processing_info', {}).get('total_time', 0.0)
            })
            
            logger.debug(f"文件处理成功: {file_path}")
            
        except Exception as e:
            error_msg = f"处理文件失败: {str(e)}"
            logger.error(f"{file_path}: {error_msg}")
            result.update({
                'success': False,
                'error': error_msg,
                'traceback': traceback.format_exc()
            })
        
        return result
    
    def _generate_batch_report(self) -> Dict[str, Any]:
        """生成批量处理报告"""
        total_files = self.processed_count + self.failed_count
        success_rate = (self.processed_count / total_files * 100) if total_files > 0 else 0
        
        # 计算成功处理文件的统计信息
        if self.results_summary:
            df = pd.DataFrame(self.results_summary)
            
            stats = {
                'signal_length': {
                    'mean': float(df['signal_length'].mean()),
                    'std': float(df['signal_length'].std()),
                    'min': float(df['signal_length'].min()),
                    'max': float(df['signal_length'].max())
                },
                'signal_duration': {
                    'mean': float(df['signal_duration'].mean()),
                    'std': float(df['signal_duration'].std()),
                    'min': float(df['signal_duration'].min()),
                    'max': float(df['signal_duration'].max())
                },
                'quality_score': {
                    'mean': float(df['quality_score'].mean()),
                    'std': float(df['quality_score'].std()),
                    'min': float(df['quality_score'].min()),
                    'max': float(df['quality_score'].max())
                },
                'scr_count': {
                    'mean': float(df['scr_count'].mean()),
                    'std': float(df['scr_count'].std()),
                    'min': float(df['scr_count'].min()),
                    'max': float(df['scr_count'].max())
                },
                'processing_time': {
                    'mean': float(df['processing_time'].mean()),
                    'std': float(df['processing_time'].std()),
                    'total': float(df['processing_time'].sum())
                }
            }
        else:
            stats = {}
        
        batch_report = {
            'batch_info': {
                'total_files': total_files,
                'processed_successfully': self.processed_count,
                'failed': self.failed_count,
                'success_rate': success_rate,
                'output_directory': str(self.output_dir),
                'processing_timestamp': datetime.now().isoformat()
            },
            'statistics': stats,
            'successful_files': self.results_summary,
            'failed_files': self.error_log,
            'processing_settings': {
                'max_workers': self.max_workers,
                'use_multiprocessing': self.use_multiprocessing,
                'save_individual_results': self.save_individual_results
            }
        }
        
        return batch_report
    
    def _save_batch_summary(self, batch_result: Dict[str, Any]):
        """保存批量处理汇总结果"""
        try:
            # 保存完整报告
            summary_file = self.output_dir / "batch_summary.json"
            with open(summary_file, 'w', encoding='utf-8') as f:
                json.dump(batch_result, f, indent=2, ensure_ascii=False)
            
            # 保存CSV格式的统计表
            if self.results_summary:
                df = pd.DataFrame(self.results_summary)
                csv_file = self.output_dir / "batch_results.csv"
                df.to_csv(csv_file, index=False, encoding='utf-8')
            
            # 保存错误日志
            if self.error_log:
                error_file = self.output_dir / "error_log.json"
                with open(error_file, 'w', encoding='utf-8') as f:
                    json.dump(self.error_log, f, indent=2, ensure_ascii=False)
            
            logger.info(f"批量处理汇总已保存到: {self.output_dir}")
            
        except Exception as e:
            logger.error(f"保存批量处理汇总失败: {str(e)}")

def process_eda_batch(file_paths: List[Union[str, Path]],
                     output_dir: str = None,
                     processing_params: Dict[str, Any] = None,
                     max_workers: int = None,
                     progress_callback: Callable[[int, int], None] = None) -> Dict[str, Any]:
    """批量处理EDA文件的便捷函数
    
    Args:
        file_paths: 要处理的文件路径列表
        output_dir: 输出目录路径
        processing_params: 处理参数字典
        max_workers: 最大并行工作线程数
        progress_callback: 进度回调函数 (current, total)
        
    Returns:
        批量处理结果字典
    """
    processor = BatchProcessor(
        output_dir=output_dir,
        max_workers=max_workers
    )
    
    return processor.process_files(
        file_paths=file_paths,
        processing_params=processing_params,
        progress_callback=progress_callback
    )

def find_eda_files(directory: Union[str, Path], 
                  file_extensions: List[str] = None,
                  recursive: bool = True) -> List[Path]:
    """在目录中查找EDA数据文件
    
    Args:
        directory: 搜索目录
        file_extensions: 文件扩展名列表（默认: ['.csv', '.txt', '.json', '.xlsx']）
        recursive: 是否递归搜索子目录
        
    Returns:
        找到的文件路径列表
    """
    directory = Path(directory)
    
    if file_extensions is None:
        file_extensions = ['.csv', '.txt', '.json', '.xlsx', '.xls']
    
    file_paths = []
    
    if recursive:
        for ext in file_extensions:
            file_paths.extend(directory.rglob(f"*{ext}"))
    else:
        for ext in file_extensions:
            file_paths.extend(directory.glob(f"*{ext}"))
    
    return sorted(file_paths)

class ProgressTracker:
    """进度跟踪器"""
    
    def __init__(self, total: int, update_interval: int = 1):
        """初始化进度跟踪器
        
        Args:
            total: 总任务数
            update_interval: 更新间隔
        """
        self.total = total
        self.current = 0
        self.update_interval = update_interval
        self.start_time = datetime.now()
    
    def update(self, current: int, total: int = None):
        """更新进度
        
        Args:
            current: 当前完成数
            total: 总任务数（可选）
        """
        self.current = current
        if total is not None:
            self.total = total
        
        if current % self.update_interval == 0 or current == self.total:
            self._print_progress()
    
    def _print_progress(self):
        """打印进度信息 - Windows兼容版本"""
        if self.total > 0:
            percentage = (self.current / self.total) * 100
            elapsed = datetime.now() - self.start_time
            
            if self.current > 0:
                eta = elapsed * (self.total - self.current) / self.current
                eta_str = str(eta).split('.')[0]  # 去掉微秒
            else:
                eta_str = "未知"
            
            # 构建进度信息
            progress_msg = f"进度: {self.current}/{self.total} ({percentage:.1f}%) 已用时: {str(elapsed).split('.')[0]} 预计剩余: {eta_str}"
            
            try:
                # 在Windows环境下使用更安全的进度显示方式
                import sys
                import os
                
                if os.name == 'nt':  # Windows系统
                    # 清除当前行并重新打印，避免光标位置错误
                    sys.stdout.write('\r' + ' ' * 100 + '\r')  # 清除行
                    sys.stdout.write(progress_msg)
                    sys.stdout.flush()
                else:
                    # 非Windows系统使用原来的方式
                    print(f"\r{progress_msg}", end="")
                
                if self.current == self.total:
                    print()  # 完成时换行
                    
            except Exception:
                # 如果进度显示出错，回退到简单的换行显示
                print(f"[{self.current}/{self.total}] {percentage:.1f}% 完成")