"""
Hybrid PETSc-Kokkos Runner Implementation.

This module provides the HybridRunner class that executes hybrid_prototype
with proper MPI support and resource management.
"""

import os
import subprocess
import time
import logging
import psutil
import signal
from pathlib import Path
from typing import Dict, List, Optional, Any

from gcr_solver_manager.extensions.base_runner import (
    RunnerInterface, RunConfig, RunResult, RunStatus, 
    EnvironmentResult, EnvironmentStatus
)


class HybridRunner(RunnerInterface):
    """Runner for Hybrid PETSc-Kokkos framework."""
    
    def __init__(self):
        """Initialize the hybrid runner."""
        self.logger = logging.getLogger(__name__)
        self._hybrid_root = Path(__file__).parent.parent.parent.parent.parent / "src" / "prototypes" / "hybrid"
        self._active_processes: Dict[int, subprocess.Popen] = {}
        
    @property
    def hybrid_root(self) -> Path:
        """Get the hybrid framework root directory."""
        return self._hybrid_root
    
    def run_solver(self, run_config: RunConfig) -> RunResult:
        """Execute solver with given configuration."""
        self.logger.info(f"Starting hybrid solver: {run_config.solver_type}")
        start_time = time.time()
        
        # Initialize result
        result = RunResult(
            status=RunStatus.FAILURE,
            start_time=start_time
        )
        
        try:
            # Validate configuration
            validation_errors = self.validate_config(run_config)
            if validation_errors:
                result.error_message = f"Configuration errors: {validation_errors}"
                return result
            
            # Check environment
            env_result = self.check_environment()
            if not env_result.ready_to_run:
                result.error_message = f"Environment not ready: {env_result.environment_issues}"
                return result
            
            # Prepare environment
            if not self.prepare_environment(run_config):
                result.error_message = "Failed to prepare execution environment"
                return result
            
            # Build command line
            command = self.build_command_line(run_config)
            result.command_line = command
            
            # Set working directory
            working_dir = run_config.working_dir or self._hybrid_root
            result.working_directory = working_dir
            
            # Setup environment variables
            env = os.environ.copy()
            env.update(run_config.environment)
            
            # Setup GPU environment if needed
            if run_config.gpu_backend:
                env = self._setup_gpu_environment(env, run_config)
            
            # Create output files if needed
            log_file = run_config.log_file
            if not log_file and run_config.output_dir:
                timestamp = time.strftime("%Y%m%d_%H%M%S")
                log_file = Path(run_config.output_dir) / f"hybrid_{run_config.solver_type}_{timestamp}.log"
                log_file.parent.mkdir(parents=True, exist_ok=True)
            
            # Execute solver
            self.logger.info(f"Executing: {' '.join(command)}")
            self.logger.info(f"Working directory: {working_dir}")
            
            stdout_target = subprocess.PIPE if run_config.capture_output else None
            stderr_target = subprocess.PIPE if run_config.capture_output else None
            
            process = subprocess.Popen(
                command,
                cwd=working_dir,
                env=env,
                stdout=stdout_target,
                stderr=stderr_target,
                text=True,
                preexec_fn=os.setsid if hasattr(os, 'setsid') else None
            )
            
            result.pid = process.pid
            self._active_processes[process.pid] = process
            
            # Monitor execution
            try:
                stdout, stderr = process.communicate(timeout=run_config.timeout)
                
                result.stdout = stdout or ""
                result.stderr = stderr or ""
                result.combined_output = f"{result.stdout}\n{result.stderr}"
                result.exit_code = process.returncode
                
                # Write to log file if specified
                if log_file:
                    self._write_log_file(log_file, result, command, env)
                    result.log_file = log_file
                
                # Determine status
                if process.returncode == 0:
                    result.status = RunStatus.SUCCESS
                    self.logger.info(f"Solver completed successfully in {result.duration:.2f}s")
                else:
                    result.status = RunStatus.FAILURE
                    result.error_message = f"Solver failed with exit code {process.returncode}"
                    self.logger.error(result.error_message)
                
                # Extract metrics from output
                result.metrics = self._extract_performance_metrics(result.stdout, result.stderr)
                
            except subprocess.TimeoutExpired:
                result.timeout_occurred = True
                result.status = RunStatus.TIMEOUT
                result.error_message = f"Solver timed out after {run_config.timeout} seconds"
                
                # Terminate process tree
                self._terminate_process_tree(process)
                
                self.logger.error(result.error_message)
            
            except KeyboardInterrupt:
                result.status = RunStatus.CANCELLED
                result.error_message = "Solver execution cancelled by user"
                self._terminate_process_tree(process)
                self.logger.warning(result.error_message)
            
            finally:
                # Cleanup process tracking
                if process.pid in self._active_processes:
                    del self._active_processes[process.pid]
        
        except Exception as e:
            result.error_message = f"Execution error: {str(e)}"
            result.status = RunStatus.FAILURE
            self.logger.error(result.error_message, exc_info=True)
        
        finally:
            result.end_time = time.time()
            result.duration = result.end_time - start_time
            
            # Cleanup environment
            self.cleanup_environment(run_config)
        
        return result
    
    def check_environment(self) -> EnvironmentResult:
        """Validate runtime environment."""
        self.logger.info("Checking hybrid execution environment...")
        
        environment_issues = []
        warnings = []
        system_info = {}
        
        # Check system resources
        try:
            system_info["cpu_count"] = psutil.cpu_count()
            system_info["memory_gb"] = psutil.virtual_memory().total / (1024**3)
            system_info["available_memory_gb"] = psutil.virtual_memory().available / (1024**3)
        except Exception as e:
            warnings.append(f"Could not get system info: {e}")
        
        # Check MPI availability
        mpi_available = False
        try:
            result = subprocess.run(["mpirun", "--version"], capture_output=True, timeout=10)
            if result.returncode == 0:
                mpi_available = True
                system_info["mpi_version"] = result.stdout.split('\n')[0]
        except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired):
            environment_issues.append("MPI not available")
        
        # Check PETSc environment
        petsc_dir = os.environ.get("PETSC_DIR")
        if not petsc_dir or not Path(petsc_dir).exists():
            environment_issues.append("PETSC_DIR not set or invalid")
        else:
            system_info["petsc_dir"] = petsc_dir
            system_info["petsc_arch"] = os.environ.get("PETSC_ARCH", "not set")
        
        # Check Kokkos (optional)
        kokkos_root = os.environ.get("Kokkos_ROOT")
        if kokkos_root and Path(kokkos_root).exists():
            system_info["kokkos_root"] = kokkos_root
        
        # Check GPU environment
        gpu_info = self._check_gpu_environment()
        system_info["gpu"] = gpu_info
        if gpu_info["cuda_available"] or gpu_info["hip_available"]:
            system_info["gpu_backend_available"] = True
        
        # Determine overall status
        if environment_issues:
            status = EnvironmentStatus.MISSING_DEPS
            valid = False
        else:
            status = EnvironmentStatus.VALID 
            valid = True
        
        return EnvironmentResult(
            status=status,
            valid=valid,
            missing_dependencies=[],  # These are in environment_issues
            environment_issues=environment_issues,
            warnings=warnings,
            system_info=system_info
        )
    
    def _check_gpu_environment(self) -> Dict[str, Any]:
        """Check GPU environment and capabilities."""
        gpu_info = {
            "cuda_available": False,
            "hip_available": False,
            "devices": [],
            "driver_version": None
        }
        
        # Check CUDA
        try:
            result = subprocess.run(["nvidia-smi", "--version"], capture_output=True, timeout=5)
            if result.returncode == 0:
                gpu_info["cuda_available"] = True
                
                # Get device info
                device_result = subprocess.run(
                    ["nvidia-smi", "-L"], capture_output=True, text=True, timeout=10
                )
                if device_result.returncode == 0:
                    devices = []
                    for line in device_result.stdout.split('\n'):
                        if line.startswith('GPU'):
                            devices.append(line.strip())
                    gpu_info["devices"] = devices
                    
        except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired):
            pass
        
        # Check HIP
        try:
            result = subprocess.run(["rocm-smi", "--version"], capture_output=True, timeout=5)
            if result.returncode == 0:
                gpu_info["hip_available"] = True
        except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired):
            pass
        
        return gpu_info
    
    def _setup_gpu_environment(self, env: Dict[str, str], run_config: RunConfig) -> Dict[str, str]:
        """Setup GPU-specific environment variables."""
        if run_config.gpu_backend == "cuda":
            # CUDA environment setup
            if run_config.gpu_devices:
                env["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, run_config.gpu_devices))
            
            # Kokkos CUDA settings
            env["KOKKOS_DEVICE"] = "Cuda"
            
        elif run_config.gpu_backend == "hip":
            # HIP environment setup
            if run_config.gpu_devices:
                env["HIP_VISIBLE_DEVICES"] = ",".join(map(str, run_config.gpu_devices))
            
            # Kokkos HIP settings
            env["KOKKOS_DEVICE"] = "HIP"
        
        return env
    
    def _terminate_process_tree(self, process: subprocess.Popen):
        """Terminate process and all its children."""
        try:
            if hasattr(os, 'killpg'):
                # Kill entire process group
                os.killpg(os.getpgid(process.pid), signal.SIGTERM)
                time.sleep(2)  # Give time for graceful shutdown
                os.killpg(os.getpgid(process.pid), signal.SIGKILL)
            else:
                # Fallback for systems without process groups
                process.terminate()
                time.sleep(2)
                process.kill()
        except (ProcessLookupError, OSError):
            pass  # Process already terminated
    
    def _extract_performance_metrics(self, stdout: str, stderr: str) -> Dict[str, Any]:
        """Extract performance metrics from solver output."""
        metrics = {}
        
        # Combine output for parsing
        output = f"{stdout}\n{stderr}"
        lines = output.split('\n')
        
        for line in lines:
            line = line.strip()
            
            # Look for common performance patterns
            if "Iteration" in line and "Residual" in line:
                # Extract iteration and residual information
                parts = line.split()
                for i, part in enumerate(parts):
                    if part.lower() == "iteration" and i + 1 < len(parts):
                        try:
                            metrics["final_iteration"] = int(parts[i + 1])
                        except ValueError:
                            pass
                    elif part.lower() == "residual" and i + 1 < len(parts):
                        try:
                            metrics["final_residual"] = float(parts[i + 1])
                        except ValueError:
                            pass
            
            # Look for timing information
            elif "Time" in line and "seconds" in line:
                # Extract execution time
                parts = line.split()
                for i, part in enumerate(parts):
                    if "seconds" in part.lower() and i > 0:
                        try:
                            metrics["solver_time"] = float(parts[i - 1])
                        except ValueError:
                            pass
            
            # Look for convergence information
            elif "converged" in line.lower():
                metrics["converged"] = True
            elif "diverged" in line.lower():
                metrics["converged"] = False
            
            # Look for memory usage
            elif "Memory" in line and ("MB" in line or "GB" in line):
                parts = line.split()
                for i, part in enumerate(parts):
                    if "mb" in part.lower() and i > 0:
                        try:
                            metrics["memory_mb"] = float(parts[i - 1])
                        except ValueError:
                            pass
                    elif "gb" in part.lower() and i > 0:
                        try:
                            metrics["memory_gb"] = float(parts[i - 1])
                        except ValueError:
                            pass
        
        return metrics
    
    def _write_log_file(self, log_file: Path, result: RunResult, 
                       command: List[str], env: Dict[str, str]):
        """Write execution log to file."""
        try:
            with open(log_file, 'w') as f:
                f.write(f"Hybrid Solver Execution Log\n")
                f.write(f"{'='*50}\n\n")
                f.write(f"Command: {' '.join(command)}\n")
                f.write(f"Start Time: {time.ctime(result.start_time)}\n")
                f.write(f"Duration: {result.duration:.2f} seconds\n")
                f.write(f"Exit Code: {result.exit_code}\n")
                f.write(f"Status: {result.status.value}\n\n")
                
                if result.metrics:
                    f.write(f"Performance Metrics:\n")
                    for key, value in result.metrics.items():
                        f.write(f"  {key}: {value}\n")
                    f.write("\n")
                
                f.write(f"Environment Variables:\n")
                for key, value in sorted(env.items()):
                    if any(pattern in key.upper() for pattern in ['PETSC', 'KOKKOS', 'CUDA', 'HIP', 'MPI']):
                        f.write(f"  {key}={value}\n")
                f.write("\n")
                
                f.write(f"Standard Output:\n")
                f.write(f"{'-'*30}\n")
                f.write(result.stdout)
                f.write(f"\n{'-'*30}\n\n")
                
                f.write(f"Standard Error:\n")
                f.write(f"{'-'*30}\n")
                f.write(result.stderr)
                f.write(f"\n{'-'*30}\n")
                
        except Exception as e:
            self.logger.warning(f"Failed to write log file {log_file}: {e}")
    
    def build_command_line(self, run_config: RunConfig) -> List[str]:
        """Build command line for execution."""
        cmd = []
        
        # Add MPI wrapper if needed
        if run_config.mpi_processes > 1:
            cmd.extend([run_config.mpi_command, "-np", str(run_config.mpi_processes)])
            cmd.extend(run_config.mpi_args)
        
        # Add the binary
        cmd.append(str(run_config.binary_path))
        
        # Add configuration file if specified
        if run_config.config_file:
            cmd.extend(["--config", str(run_config.config_file)])
        
        # Add solver-specific arguments
        cmd.extend(["--solver", run_config.solver_type])
        cmd.extend(["--max-iterations", str(run_config.max_iterations)])
        cmd.extend(["--tolerance", str(run_config.tolerance)])
        
        # Add debug/verbose flags
        if run_config.debug_mode:
            cmd.append("--debug")
        if run_config.verbose:
            cmd.append("--verbose")
        
        # Add GPU backend
        if run_config.gpu_backend:
            cmd.extend(["--gpu-backend", run_config.gpu_backend])
        
        # Add output directory
        if run_config.output_dir:
            cmd.extend(["--output-dir", str(run_config.output_dir)])
        
        # Add extra arguments
        cmd.extend(run_config.extra_args)
        
        return cmd
    
    def get_supported_solvers(self) -> List[str]:
        """Get list of supported solver types."""
        return ["gcr", "ca-gcr", "gmres", "bca-gmres"]
    
    def supports_parallel_execution(self) -> bool:
        """Check if runner supports parallel execution."""
        return True
    
    def supports_gpu_execution(self) -> bool:
        """Check if runner supports GPU execution."""
        return True