#!/usr/bin/env python3
"""
Parallel Processing Engine for GSI/EnKF Fortran Code Commentary System

This module contains the complete parallel processing engine copied from
scripts_backup/gsi_modules/parallel_engine.py and updated for the
fortran_commentator package structure.

This system processes GSI/EnKF Fortran source code using local Ollama models
to automatically add comprehensive comments following a standardized format
compatible with Sphinx/Doxygen documentation generators.

Core Features:
- Parallel processing with configurable worker threads
- Dynamic task assignment with intelligent load balancing
- Real-time progress tracking and comprehensive statistics
- Worker performance monitoring and optimization
- Classification-aware complexity scoring for GSI code types
- Thread-safe operations with detailed logging and error handling

GSI Code Classifications Supported:
- background_grid: Grid operations and spatial interpolation
- core_analysis: Analysis algorithms and variational methods
- io_interface: I/O operations and diagnostic interfaces
- observation_processing: Observation operators and data assimilation
- utilities: Helper functions and support routines

Author: GSI Development Team
Package: fortran_commentator.utils.parallel
Original: scripts_backup/gsi_modules/parallel_engine.py
Date: 2025-08-12
"""

import logging
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import Dict, List, Optional, Callable, Any
from dataclasses import dataclass, field
from queue import Queue

logger = logging.getLogger(__name__)


@dataclass
class ProcessingTask:
    """Individual file processing task with metadata"""
    file_path: str
    classification: str
    line_count: int
    priority: int = 0


@dataclass
class WorkerStats:
    """Individual worker statistics and performance metrics"""
    worker_id: int
    files_processed: int = 0
    total_processing_time: float = 0.0
    average_file_time: float = 0.0
    current_task: Optional[str] = None
    last_completed_time: Optional[float] = None


@dataclass
class ProgressSnapshot:
    """Real-time progress tracking snapshot"""
    timestamp: float
    completed_count: int
    total_count: int
    active_workers: int
    average_completion_rate: float
    estimated_remaining_time: float
    current_throughput: float


class ParallelProcessingEngine:
    """Enhanced parallel processing engine with load balancing and progress tracking"""
    
    def __init__(self, worker_count: int = 4):
        self.worker_count = worker_count
        self.task_queue = Queue()
        self.result_queue = Queue()
        self.progress_lock = threading.Lock()
        self.worker_stats_lock = threading.Lock()
        
        # Progress tracking
        self.completed_tasks = 0
        self.total_tasks = 0
        self.start_time = None
        self.progress_snapshots = []
        self.worker_stats = {}
        
        # Dynamic task assignment
        self.pending_tasks = []
        self.active_tasks = {}  # worker_id -> task
        self.completed_files = []
        self.failed_files = []
        
    def initialize_workers(self, tasks: List[ProcessingTask]) -> None:
        """Initialize worker statistics and task queues"""
        self.total_tasks = len(tasks)
        self.pending_tasks = self._sort_tasks_by_load_balance(tasks.copy())
        
        # Initialize worker stats
        with self.worker_stats_lock:
            for worker_id in range(self.worker_count):
                self.worker_stats[worker_id] = WorkerStats(worker_id=worker_id)
        
        logger.info(f"Initialized {self.worker_count} workers for {self.total_tasks} tasks")
    
    def _sort_tasks_by_load_balance(self, tasks: List[ProcessingTask]) -> List[ProcessingTask]:
        """Sort tasks for optimal load balancing based on file size and complexity"""
        # Create load balance score: higher score = more complex/time-consuming
        def load_balance_score(task: ProcessingTask) -> float:
            # Base score from line count (larger files take longer)
            size_score = task.line_count / 100.0
            
            # Classification complexity multiplier
            complexity_multiplier = {
                "core_analysis": 2.0,        # Most complex algorithms
                "background_grid": 1.8,      # Complex grid operations
                "observation_processing": 1.5, # Moderate complexity
                "io_interface": 1.2,         # I/O operations
                "utilities": 1.0             # Simpler utility functions
            }.get(task.classification, 1.0)
            
            return size_score * complexity_multiplier
        
        # Sort by load balance score (highest first for better distribution)
        tasks.sort(key=load_balance_score, reverse=True)
        
        # Log task distribution
        if tasks:
            avg_score = sum(load_balance_score(t) for t in tasks) / len(tasks)
            max_score = load_balance_score(tasks[0])
            min_score = load_balance_score(tasks[-1])
            logger.info(f"Load balancing - Score range: {min_score:.1f} to {max_score:.1f}, avg: {avg_score:.1f}")
        
        return tasks
    
    def get_next_task(self, worker_id: int) -> Optional[ProcessingTask]:
        """Dynamic task assignment - get next available task for worker"""
        with self.progress_lock:
            if not self.pending_tasks:
                return None
            
            # Get next task and assign to worker
            task = self.pending_tasks.pop(0)
            self.active_tasks[worker_id] = task
            
            # Update worker stats
            with self.worker_stats_lock:
                self.worker_stats[worker_id].current_task = task.file_path
            
            return task
    
    def report_task_completion(self, worker_id: int, task: ProcessingTask, 
                             success: bool, message: str) -> None:
        """Report task completion and update statistics"""
        current_time = time.time()
        
        with self.progress_lock:
            self.completed_tasks += 1
            
            # Remove from active tasks
            if worker_id in self.active_tasks:
                del self.active_tasks[worker_id]
            
            # Update results
            if success:
                self.completed_files.append(task.file_path)
            else:
                self.failed_files.append((task.file_path, message))
        
        # Update worker stats
        with self.worker_stats_lock:
            worker_stat = self.worker_stats[worker_id]
            worker_stat.files_processed += 1
            worker_stat.current_task = None
            worker_stat.last_completed_time = current_time
            
            # Update average processing time
            if self.start_time:
                file_processing_time = current_time - (self.start_time + worker_stat.total_processing_time)
                worker_stat.total_processing_time += file_processing_time
                worker_stat.average_file_time = worker_stat.total_processing_time / worker_stat.files_processed
    
    def create_progress_snapshot(self) -> ProgressSnapshot:
        """Create current progress snapshot"""
        current_time = time.time()
        
        with self.progress_lock:
            with self.worker_stats_lock:
                active_workers = len([w for w in self.worker_stats.values() if w.current_task is not None])
                
                # Calculate completion rate
                if self.start_time and current_time > self.start_time:
                    elapsed_time = current_time - self.start_time
                    completion_rate = self.completed_tasks / elapsed_time if elapsed_time > 0 else 0
                else:
                    completion_rate = 0
                
                # Estimate remaining time
                remaining_tasks = self.total_tasks - self.completed_tasks
                if completion_rate > 0:
                    estimated_remaining = remaining_tasks / completion_rate
                else:
                    estimated_remaining = 0
                
                # Current throughput (files per minute)
                throughput = completion_rate * 60 if completion_rate > 0 else 0
                
                return ProgressSnapshot(
                    timestamp=current_time,
                    completed_count=self.completed_tasks,
                    total_count=self.total_tasks,
                    active_workers=active_workers,
                    average_completion_rate=completion_rate,
                    estimated_remaining_time=estimated_remaining,
                    current_throughput=throughput
                )
    
    def log_progress(self) -> None:
        """Log current progress information"""
        snapshot = self.create_progress_snapshot()
        
        progress_pct = (snapshot.completed_count / snapshot.total_count * 100) if snapshot.total_count > 0 else 0
        
        logger.info(f"Progress: {snapshot.completed_count}/{snapshot.total_count} ({progress_pct:.1f}%) - "
                   f"Workers: {snapshot.active_workers}/{self.worker_count} - "
                   f"Rate: {snapshot.current_throughput:.1f} files/min - "
                   f"ETA: {snapshot.estimated_remaining_time/60:.1f}m")
        
        # Log individual worker stats
        with self.worker_stats_lock:
            for worker_id, stats in self.worker_stats.items():
                current_task = f" [processing: {Path(stats.current_task).name}]" if stats.current_task else " [idle]"
                avg_time = f"{stats.average_file_time:.1f}s" if stats.average_file_time > 0 else "N/A"
                logger.debug(f"  Worker {worker_id}: {stats.files_processed} files, avg {avg_time}{current_task}")
    
    def get_worker_load_balance_info(self) -> Dict[str, Any]:
        """Get detailed load balancing information"""
        with self.worker_stats_lock:
            total_files = sum(stats.files_processed for stats in self.worker_stats.values())
            
            if total_files == 0:
                return {"status": "no_files_processed"}
            
            # Calculate load distribution metrics
            files_per_worker = [stats.files_processed for stats in self.worker_stats.values()]
            avg_files = sum(files_per_worker) / len(files_per_worker)
            max_files = max(files_per_worker)
            min_files = min(files_per_worker)
            
            load_balance_ratio = min_files / max_files if max_files > 0 else 1.0
            load_variance = sum((f - avg_files) ** 2 for f in files_per_worker) / len(files_per_worker)
            
            return {
                "total_files_processed": total_files,
                "files_per_worker": files_per_worker,
                "average_files_per_worker": avg_files,
                "load_balance_ratio": load_balance_ratio,
                "load_variance": load_variance,
                "efficiency_score": load_balance_ratio * (1 - min(load_variance / avg_files, 0.5)) if avg_files > 0 else 0
            }

    def process_files_parallel(self, files: List[str], 
                              processor_func: Callable[[str], Any]) -> Dict[str, Any]:
        """Legacy compatibility method for simple parallel processing"""
        results = {
            "successful": [],
            "failed": [],
            "results": {}
        }
        
        if not files:
            return results
        
        with ThreadPoolExecutor(max_workers=self.worker_count) as executor:
            # Submit all tasks
            future_to_file = {
                executor.submit(processor_func, file_path): file_path 
                for file_path in files
            }
            
            # Process completed tasks
            for future in as_completed(future_to_file):
                file_path = future_to_file[future]
                try:
                    result = future.result()
                    results["successful"].append(file_path)
                    results["results"][file_path] = result
                    
                except Exception as e:
                    logger.error(f"Processing failed for {file_path}: {e}")
                    results["failed"].append(file_path)
                    results["results"][file_path] = {"error": str(e)}
        
        return results
    
    def get_stats(self) -> Dict[str, Any]:
        """Get current processing statistics"""
        with self.worker_stats_lock:
            return {
                "total_tasks": self.total_tasks,
                "completed_tasks": self.completed_tasks,
                "failed_tasks": len(self.failed_files),
                "worker_stats": {wid: {
                    "files_processed": stats.files_processed,
                    "average_file_time": stats.average_file_time,
                    "current_task": stats.current_task
                } for wid, stats in self.worker_stats.items()},
                "load_balance_info": self.get_worker_load_balance_info()
            }