#!/usr/bin/env python3
"""
Multi-threaded integrated beam analysis tool
Combines convex hull, Twiss parameter, and emittance analysis
Optimized for large file counts (1000+ files) with parallel processing
Note: Phase space analysis is available separately in phase_space.py
"""

import os
import time
import threading
from multiprocessing import cpu_count
from concurrent.futures import ThreadPoolExecutor, as_completed
from utils.csv_hanlder import select_and_read_files
from utils.console_hanlder import console
from convex import process_files_and_plot_area
from twiss import plot_beam_size_vs_frequency_log, plot_beam_size_vs_frequency_linear
from emmitance import plot_emittance_analysis_linear, plot_emittance_analysis_log
from convex import plot_scatter_area_vs_f, plot_scatter_area_vs_f_linear, plot_scatter_area_vs_f_log


def process_single_file(df, filename, file_index):
    """
    Process a single file for convex hull analysis
    
    Args:
        df: DataFrame with particle data
        filename: Name of the file
        file_index: Index of the file in the batch
        
    Returns:
        tuple: (file_index, frequency, area, success, error_message)
    """
    try:
        # Get frequency (assuming it's constant within each file)
        f_val = df['f'].iloc[0]
        
        # Calculate convex hull area from x and y coordinates
        from utils.convex_hanlder import calculate_convex_hull_area
        x = df['x'].values
        y = df['y'].values
        
        # Calculate convex hull area
        convex_hull_area = calculate_convex_hull_area(x, y)
        
        return (file_index, f_val, convex_hull_area, True, None)
        
    except Exception as e:
        return (file_index, 0, 0, False, str(e))


def process_files_parallel(dfs, filenames=None, max_workers=4, batch_size=200):
    """
    Process files in parallel for convex hull analysis
    
    Args:
        dfs: List of DataFrames
        filenames: List of filenames (optional)
        max_workers: Maximum number of worker threads
        batch_size: Number of files to process in each batch
        
    Returns:
        tuple: (areas, frequencies, hull_vertices)
    """
    total_files = len(dfs)
    console.log(f"使用 {max_workers} 个线程处理 {total_files} 个文件")
    
    areas = [0] * total_files
    frequencies = [0] * total_files
    hull_vertices = [None] * total_files
    
    # Process files in batches to avoid memory issues
    for batch_start in range(0, total_files, batch_size):
        batch_end = min(batch_start + batch_size, total_files)
        batch_dfs = dfs[batch_start:batch_end]
        batch_filenames = filenames[batch_start:batch_end] if filenames else None
        
        # Process batch in parallel
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # Submit all files in the batch
            future_to_index = {
                executor.submit(process_single_file, df, 
                              batch_filenames[idx] if batch_filenames else f'File {batch_start + idx + 1}',
                              batch_start + idx): batch_start + idx
                for idx, df in enumerate(batch_dfs)
            }
            
            # Collect results as they complete
            for future in as_completed(future_to_index):
                file_index, freq, area, success, error = future.result()
                
                if success:
                    frequencies[file_index] = freq
                    areas[file_index] = area
                else:
                    console.log(f"处理文件 {file_index + 1} 时出错: {error}")
                    frequencies[file_index] = 0
                    areas[file_index] = 0

    return areas, frequencies, hull_vertices


def integrated_beam_analysis_multithread(file_type='csv', batch_size=200, max_files=None, max_workers=4):
    """
    Multi-threaded integrated beam analysis with single file selection
    
    Analysis steps:
    1. Convex Hull Analysis - Calculate area vs frequency (parallel)
    2. Twiss Parameter Analysis - Beam size analysis
    3. Emittance Analysis - 3x3 parameter grid plots
    4. Summary Analysis - Statistical summary
    
    Parameters:
    file_type: 'csv', 'excel', or 'json'
    batch_size: Number of files to process in each batch
    max_files: Maximum number of files to process (None for all)
    max_workers: Maximum number of worker threads
    """
    
    # Clear previous results
    console.clear_file()
    console.log(f"批次大小: {batch_size}")
    console.log(f"最大工作线程数: {max_workers}")
    if max_files:
        console.log(f"最大文件数限制: {max_files}")
    
    # Select files once
    console.log("选择文件中...")
    dfs, filenames = select_and_read_files(file_type)
    
    if len(dfs) == 0:
        console.log("未找到有效文件。退出分析。")
        return
    
    # Limit files if specified
    if max_files and len(dfs) > max_files:
        console.log(f"限制为前 {max_files} 个文件，共 {len(dfs)} 个")
        dfs = dfs[:max_files]
        filenames = filenames[:max_files] if filenames else None
    
    total_files = len(dfs)
    console.log(f"处理 {total_files} 个文件进行分析")
    
    start_time = time.time()
    
    try:
        # 1. Convex Hull Analysis (Parallel)
        console.log_section("步骤 1: 凸包分析 (并行)")
        areas, frequencies, hull_vertices = process_files_parallel(
            dfs, filenames, max_workers=max_workers, batch_size=batch_size
        )
        # 1a. Convex Hull Analysis (Linear Scale)
        console.log_section("步骤 1a: 凸包分析 (线性尺度)")
        plot_scatter_area_vs_f_linear(areas, frequencies, fig_name="result/area_vs_f_linear.png")
        
        # 1b. Convex Hull Analysis (Log Scale)
        console.log_section("步骤 1b: 凸包分析 (对数尺度)")
        plot_scatter_area_vs_f_log(areas, frequencies, fig_name="result/area_vs_f_log.png")

        # 2. Twiss Parameter Analysis (Log Scale)
        console.log_section("步骤 2: TWISS 参数分析 (对数尺度)")
        plot_beam_size_vs_frequency_log(
            dfs, filenames,
            save_path='result/beam_size_analysis_logscale.png'
        )
        
        # 2b. Twiss Parameter Analysis (Linear Scale)
        console.log_section("步骤 2b: TWISS 参数分析 (线性尺度)")
        plot_beam_size_vs_frequency_linear(
            dfs, filenames,
            save_path='result/beam_size_analysis_linearscale.png'
        )
        
        # 3. Emittance Analysis (Linear Scale)
        console.log_section("步骤 3: 发射度分析 (线性尺度)")
        plot_emittance_analysis_linear(
            dfs, filenames,
            save_path='result/emittance_vs_frequency_lineascale.png'
        )
        
        # 3b. Emittance Analysis (Log Scale)
        console.log_section("步骤 3b: 发射度分析 (对数尺度)")
        plot_emittance_analysis_log(
            dfs, filenames,
            save_path='result/emittance_vs_frequency_logcale.png'
        )
        
        # 4. Summary Analysis
        console.log_section("步骤 4: 汇总分析")
        analyze_summary_results(areas, frequencies, total_files)
        
        # 5. Performance Summary
        elapsed_time = time.time() - start_time
        console.log_summary(total_files, total_files, 0)
        console.log(f"总分析时间: {elapsed_time:.2f} 秒")
        console.log(f"平均每个文件处理时间: {elapsed_time/total_files:.2f} 秒")
        console.log(f"处理速度: {total_files/elapsed_time:.2f} 文件/秒")
        
        console.log_section("分析完成")
        console.log("图表已保存到 result/ 目录")
        
    except Exception as e:
        console.log(f"分析过程中出错: {str(e)}")
        console.log("分析失败。请查看控制台输出以获取详细信息。")


def analyze_summary_results(areas, frequencies, total_files):
    """Analyze and summarize results"""
    
    # Filter valid data
    valid_data = [(f, a) for f, a in zip(frequencies, areas) if a > 0]
    
    if not valid_data:
        console.log("未找到有效数据进行汇总分析")
        return
    
    valid_freqs, valid_areas = zip(*valid_data)
    
    # Basic statistics
    console.log(f"有效文件: {len(valid_areas)}/{total_files}")
    console.log(f"平均面积: {sum(valid_areas)/len(valid_areas):.2e}")
    console.log(f"最小面积: {min(valid_areas):.2e}")
    console.log(f"最大面积: {max(valid_areas):.2e}")
    
    # Frequency analysis
    console.log(f"频率范围: {min(valid_freqs):.0f} - {max(valid_freqs):.0f} Hz")
    
    # Find optimal frequency (minimum area)
    min_area_idx = valid_areas.index(min(valid_areas))
    optimal_freq = valid_freqs[min_area_idx]
    optimal_area = valid_areas[min_area_idx]
    
    console.log(f"最优频率: {optimal_freq:.0f} Hz (面积: {optimal_area:.2e})")
    
    # Area distribution analysis
    area_percentiles = [
        (25, sorted(valid_areas)[int(len(valid_areas)*0.25)]),
        (50, sorted(valid_areas)[int(len(valid_areas)*0.50)]),
        (75, sorted(valid_areas)[int(len(valid_areas)*0.75)]),
        (90, sorted(valid_areas)[int(len(valid_areas)*0.90)]),
        (95, sorted(valid_areas)[int(len(valid_areas)*0.95)])
    ]
    
    console.log("面积分布百分位数:")
    for pct, val in area_percentiles:
        console.log(f"  {pct}%: {val:.2e}")


def main():
    """Main entry point"""
    console.log("多线程集成束流分析工具")
    console.log("=" * 60)
    
    # Configuration for speed optimization
    file_type = 'csv'
    batch_size = 200  # Process 200 files at a time
    max_workers = cpu_count()   # Number of parallel threads
    
    # Run analysis
    integrated_beam_analysis_multithread(
        file_type=file_type,
        batch_size=batch_size,
        max_files=None,
        max_workers=max_workers
    )


if __name__ == "__main__":
    main()
