"""
NS-SEM Solver Runner Implementation

Handles running NS-SEM solver tests and simulations.
"""

import os
import subprocess
from pathlib import Path
from typing import Dict, List, Any, Optional
import logging
import tempfile
import xml.etree.ElementTree as ET

from gcr_solver_manager.extensions.base_runner import BaseRunner, TestResult

logger = logging.getLogger(__name__)


class NSsemRunner(BaseRunner):
    """
    Runner for NS-SEM (Navier-Stokes Spectral Element Method) solver.
    
    Handles execution of NS-SEM tests, simulations, and benchmarks
    with support for MPI, GPU backends, and various solver algorithms.
    """
    
    def __init__(self, extension_name: str):
        super().__init__(extension_name)
        self.supported_tests = [
            "tensor_mat19",
            "multi_cavity_matrix",
            "petsc_integration", 
            "fdm_boundary_comm",
            "multi_cavity_boundary_comm"
        ]
    
    def can_run(self, executable_path: str) -> bool:
        """
        Check if this runner can execute the given binary.
        
        Args:
            executable_path: Path to executable
            
        Returns:
            True if runner can handle this executable
        """
        executable_path_obj = Path(executable_path)
        
        if not executable_path_obj.exists():
            return False
        
        # Check executable name patterns
        name = executable_path_obj.name.lower()
        ns_sem_patterns = [
            "test_tensor_mat",
            "test_multi_cavity",
            "test_petsc_integration",
            "test_fdm_boundary",
            "ns_sem",
            "test_ns_sem"
        ]
        
        for pattern in ns_sem_patterns:
            if pattern in name:
                logger.debug(f"NS-SEM runner can handle: {executable_path}")
                return True
        
        return False
    
    def detect_test_type(self, executable_path: str) -> str:
        """
        Detect the type of test based on executable name.
        
        Args:
            executable_path: Path to executable
            
        Returns:
            Test type string
        """
        name = Path(executable_path).name.lower()
        
        if "tensor_mat" in name:
            return "tensor_matrix"
        elif "multi_cavity_matrix" in name:
            return "multi_cavity_matrix"
        elif "petsc_integration" in name:
            return "petsc_integration"
        elif "fdm_boundary" in name:
            return "boundary_communication"
        elif "multi_cavity_boundary" in name:
            return "multi_cavity_boundary"
        else:
            return "unknown"
    
    def prepare_environment(self, config: Dict[str, Any]) -> Dict[str, str]:
        """
        Prepare environment variables for execution.
        
        Args:
            config: Execution configuration
            
        Returns:
            Environment variables dictionary
        """
        env = os.environ.copy()
        
        # Set OpenMP threads
        omp_threads = config.get("openmp_threads", 4)
        env["OMP_NUM_THREADS"] = str(omp_threads)
        
        # Set PETSc options
        petsc_options = config.get("petsc_options", [])
        default_petsc_options = [
            "-log_view",
            "-pc_type", "none",
            "-ksp_type", "cg", 
            "-ksp_rtol", "1e-6",
            "-ksp_max_it", "1000"
        ]
        
        if not petsc_options:
            petsc_options = default_petsc_options
        
        env["PETSC_OPTIONS"] = " ".join(petsc_options)
        
        # Set GPU environment if enabled
        if config.get("enable_gpu", False):
            gpu_backend = config.get("gpu_backend", "cuda")
            if gpu_backend == "cuda":
                env["CUDA_VISIBLE_DEVICES"] = str(config.get("gpu_devices", "0"))
            elif gpu_backend == "hip":
                env["HIP_VISIBLE_DEVICES"] = str(config.get("gpu_devices", "0"))
        
        # Set Kokkos environment
        if config.get("enable_kokkos", False):
            kokkos_backend = config.get("kokkos_backend", "openmp")
            env["KOKKOS_BACKEND"] = kokkos_backend
        
        logger.debug(f"Environment prepared: OMP_NUM_THREADS={env.get('OMP_NUM_THREADS')}, "
                    f"PETSC_OPTIONS={env.get('PETSC_OPTIONS')}")
        
        return env
    
    def run_test(self, executable_path: str, config: Dict[str, Any]) -> TestResult:
        """
        Run a single NS-SEM test.
        
        Args:
            executable_path: Path to test executable
            config: Test configuration
            
        Returns:
            Test result with success/failure information
        """
        logger.info(f"Running NS-SEM test: {executable_path}")
        
        executable_path_obj = Path(executable_path)
        if not executable_path_obj.exists():
            return TestResult(
                success=False,
                message=f"Executable not found: {executable_path}",
                execution_time=0.0,
                outputs={},
                metrics={}
            )
        
        # Prepare execution environment
        env = self.prepare_environment(config)
        
        # Determine if MPI execution is needed
        mpi_processes = config.get("mpi_processes", 1)
        use_mpi = mpi_processes > 1
        
        # Build execution command
        if use_mpi:
            cmd = [
                "mpirun",
                "-np", str(mpi_processes),
                "--bind-to", "core",
                str(executable_path_obj)
            ]
            
            # Add MPI-specific options
            mpi_options = config.get("mpi_options", [])
            if mpi_options:
                cmd.extend(mpi_options)
        else:
            cmd = [str(executable_path_obj)]
        
        # Add test-specific arguments
        test_args = config.get("test_args", [])
        cmd.extend(test_args)
        
        logger.debug(f"Execution command: {' '.join(cmd)}")
        
        try:
            import time
            start_time = time.time()
            
            result = subprocess.run(
                cmd,
                env=env,
                capture_output=True,
                text=True,
                timeout=config.get("test_timeout", 600),  # 10 minutes default
                cwd=config.get("working_directory", executable_path_obj.parent)
            )
            
            execution_time = time.time() - start_time
            
            # Analyze output for success/failure
            success = result.returncode == 0
            outputs = {
                "stdout": result.stdout,
                "stderr": result.stderr,
                "returncode": result.returncode
            }
            
            # Extract metrics from output
            metrics = self._extract_metrics(result.stdout, result.stderr)
            
            # Determine test result message
            if success:
                message = f"Test completed successfully in {execution_time:.2f}s"
                if metrics.get("iterations"):
                    message += f" ({metrics['iterations']} iterations)"
            else:
                message = f"Test failed with return code {result.returncode}"
                if result.stderr:
                    # Include first line of stderr in message
                    first_error = result.stderr.split('\n')[0]
                    message += f": {first_error}"
            
            logger.info(f"Test result: {message}")
            
            return TestResult(
                success=success,
                message=message,
                execution_time=execution_time,
                outputs=outputs,
                metrics=metrics
            )
            
        except subprocess.TimeoutExpired:
            logger.error(f"Test timed out: {executable_path}")
            return TestResult(
                success=False,
                message=f"Test timed out after {config.get('test_timeout', 600)} seconds",
                execution_time=config.get("test_timeout", 600),
                outputs={"error": "Test timed out"},
                metrics={}
            )
        except Exception as e:
            logger.error(f"Test execution error: {e}")
            return TestResult(
                success=False,
                message=f"Execution error: {str(e)}",
                execution_time=0.0,
                outputs={"error": str(e)},
                metrics={}
            )
    
    def run_simulation(self, executable_path: str, config_file: str, config: Dict[str, Any]) -> TestResult:
        """
        Run a NS-SEM simulation with configuration file.
        
        Args:
            executable_path: Path to simulation executable
            config_file: Path to simulation configuration
            config: Execution configuration
            
        Returns:
            Test result with simulation information
        """
        logger.info(f"Running NS-SEM simulation: {executable_path} with config {config_file}")
        
        # Parse configuration file to determine simulation parameters
        sim_config = self._parse_simulation_config(config_file)
        
        # Prepare environment
        env = self.prepare_environment(config)
        
        # Build simulation command
        mpi_processes = config.get("mpi_processes", sim_config.get("mpi_processes", 1))
        
        if mpi_processes > 1:
            cmd = [
                "mpirun", 
                "-np", str(mpi_processes),
                str(executable_path),
                "--config", config_file
            ]
        else:
            cmd = [str(executable_path), "--config", config_file]
        
        # Add simulation arguments
        sim_args = config.get("simulation_args", [])
        cmd.extend(sim_args)
        
        logger.debug(f"Simulation command: {' '.join(cmd)}")
        
        try:
            import time
            start_time = time.time()
            
            result = subprocess.run(
                cmd,
                env=env,
                capture_output=True,
                text=True,
                timeout=config.get("simulation_timeout", 3600),  # 1 hour default
                cwd=Path(executable_path).parent
            )
            
            execution_time = time.time() - start_time
            
            success = result.returncode == 0
            outputs = {
                "stdout": result.stdout,
                "stderr": result.stderr,
                "returncode": result.returncode,
                "config_file": config_file
            }
            
            # Extract simulation metrics
            metrics = self._extract_simulation_metrics(result.stdout, result.stderr)
            metrics["execution_time"] = execution_time
            
            message = f"Simulation {'completed' if success else 'failed'} in {execution_time:.2f}s"
            
            return TestResult(
                success=success,
                message=message,
                execution_time=execution_time,
                outputs=outputs,
                metrics=metrics
            )
            
        except subprocess.TimeoutExpired:
            logger.error("Simulation timed out")
            return TestResult(
                success=False,
                message="Simulation timed out",
                execution_time=config.get("simulation_timeout", 3600),
                outputs={"error": "Simulation timed out"},
                metrics={}
            )
        except Exception as e:
            logger.error(f"Simulation execution error: {e}")
            return TestResult(
                success=False,
                message=f"Simulation error: {str(e)}",
                execution_time=0.0,
                outputs={"error": str(e)},
                metrics={}
            )
    
    def run_benchmark(self, executable_path: str, config: Dict[str, Any]) -> TestResult:
        """
        Run performance benchmarks.
        
        Args:
            executable_path: Path to benchmark executable
            config: Benchmark configuration
            
        Returns:
            Test result with benchmark metrics
        """
        logger.info(f"Running NS-SEM benchmark: {executable_path}")
        
        # Run multiple iterations for statistical significance
        iterations = config.get("benchmark_iterations", 3)
        results = []
        
        for i in range(iterations):
            logger.debug(f"Benchmark iteration {i+1}/{iterations}")
            result = self.run_test(executable_path, config)
            if result.success:
                results.append(result)
            else:
                logger.warning(f"Benchmark iteration {i+1} failed: {result.message}")
        
        if not results:
            return TestResult(
                success=False,
                message="All benchmark iterations failed",
                execution_time=0.0,
                outputs={},
                metrics={}
            )
        
        # Compute aggregate metrics
        total_time = sum(r.execution_time for r in results)
        avg_time = total_time / len(results)
        
        # Aggregate metrics from all runs
        aggregated_metrics = {
            "benchmark_iterations": len(results),
            "average_execution_time": avg_time,
            "total_execution_time": total_time,
            "success_rate": len(results) / iterations
        }
        
        # Add performance metrics if available
        if results[0].metrics:
            for key in results[0].metrics:
                if isinstance(results[0].metrics[key], (int, float)):
                    values = [r.metrics.get(key, 0) for r in results if key in r.metrics]
                    if values:
                        aggregated_metrics[f"avg_{key}"] = sum(values) / len(values)
                        aggregated_metrics[f"min_{key}"] = min(values)
                        aggregated_metrics[f"max_{key}"] = max(values)
        
        return TestResult(
            success=True,
            message=f"Benchmark completed: {len(results)}/{iterations} iterations successful",
            execution_time=avg_time,
            outputs={"benchmark_summary": f"{len(results)} successful iterations"},
            metrics=aggregated_metrics
        )
    
    def _extract_metrics(self, stdout: str, stderr: str) -> Dict[str, Any]:
        """Extract performance metrics from output."""
        metrics = {}
        
        # Look for common patterns in output
        lines = (stdout + '\n' + stderr).split('\n')
        
        for line in lines:
            line = line.strip().lower()
            
            # Iteration count
            if "iterations:" in line or "iter:" in line:
                try:
                    parts = line.split()
                    for i, part in enumerate(parts):
                        if "iteration" in part and i + 1 < len(parts):
                            metrics["iterations"] = int(parts[i + 1])
                            break
                except (ValueError, IndexError):
                    pass
            
            # Convergence information
            if "converged" in line:
                metrics["converged"] = True
            elif "failed to converge" in line:
                metrics["converged"] = False
            
            # Residual norm
            if "residual norm" in line or "final residual" in line:
                try:
                    parts = line.split()
                    for part in parts:
                        if 'e-' in part or 'e+' in part:
                            metrics["final_residual"] = float(part)
                            break
                except ValueError:
                    pass
            
            # Memory usage
            if "memory usage" in line:
                try:
                    parts = line.split()
                    for i, part in enumerate(parts):
                        if part in ["mb", "gb", "memory"] and i > 0:
                            metrics["memory_mb"] = float(parts[i - 1])
                            break
                except ValueError:
                    pass
        
        return metrics
    
    def _extract_simulation_metrics(self, stdout: str, stderr: str) -> Dict[str, Any]:
        """Extract simulation-specific metrics from output."""
        metrics = self._extract_metrics(stdout, stderr)
        
        # Additional simulation-specific metrics
        lines = (stdout + '\n' + stderr).split('\n')
        
        for line in lines:
            line = line.strip().lower()
            
            # Time step information
            if "time step" in line or "dt =" in line:
                try:
                    parts = line.split()
                    for i, part in enumerate(parts):
                        if part in ["dt", "timestep"] and i + 1 < len(parts):
                            metrics["time_step"] = float(parts[i + 1])
                            break
                except ValueError:
                    pass
            
            # Reynolds number
            if "reynolds" in line or "re =" in line:
                try:
                    parts = line.split()
                    for i, part in enumerate(parts):
                        if "reynolds" in part and i + 1 < len(parts):
                            metrics["reynolds_number"] = float(parts[i + 1])
                            break
                except ValueError:
                    pass
        
        return metrics
    
    def _parse_simulation_config(self, config_file: str) -> Dict[str, Any]:
        """Parse simulation configuration file."""
        config = {}
        config_path = Path(config_file)
        
        if not config_path.exists():
            return config
        
        try:
            if config_path.suffix.lower() == '.xml':
                # Parse XML configuration
                tree = ET.parse(config_path)
                root = tree.getroot()
                
                # Extract MPI configuration
                for param in root.findall(".//Parameter[@name='total_processes']"):
                    config["mpi_processes"] = int(param.get("value", 1))
                
                # Extract solver configuration
                for param in root.findall(".//Parameter[@name='max_iterations']"):
                    config["max_iterations"] = int(param.get("value", 1000))
                
                for param in root.findall(".//Parameter[@name='convergence_tol']"):
                    config["tolerance"] = float(param.get("value", 1e-6))
                    
            else:
                # For other formats, basic key=value parsing
                content = config_path.read_text()
                for line in content.split('\n'):
                    if '=' in line and not line.strip().startswith('#'):
                        key, value = line.split('=', 1)
                        key = key.strip().lower()
                        value = value.strip()
                        
                        if key in ["mpi_processes", "nproc"]:
                            config["mpi_processes"] = int(value)
                        elif key in ["max_iterations", "max_iter"]:
                            config["max_iterations"] = int(value)
                        elif key in ["tolerance", "tol"]:
                            config["tolerance"] = float(value)
        
        except Exception as e:
            logger.debug(f"Error parsing config file {config_file}: {e}")
        
        return config