"""
GCR-Julia Runner Implementation.

Executes GCR-Julia solvers with various configurations and backends.
Handles job execution, environment setup, and result collection.
"""

import json
import logging
import subprocess
import tempfile
import time
import os
from pathlib import Path
from typing import Dict, List, Optional, Any, Union

from gcr_solver_manager.extensions.base_runner import (
    BaseRunner,
    ExecutionResult
)

logger = logging.getLogger(__name__)


class GCRJuliaRunner(BaseRunner):
    """Runner for GCR-Julia solver executions."""
    
    def __init__(self, gcr_julia_path: Path):
        """Initialize the runner."""
        self.gcr_julia_path = gcr_julia_path
        self.temp_dir = None
        
    def setup_environment(self, config: RunConfig) -> EnvironmentResult:
        """Set up execution environment."""
        result = EnvironmentResult(
            success=True,
            environment_vars={},
            working_directory=str(self.gcr_julia_path),
            temporary_files=[],
            cleanup_commands=[],
            warnings=[]
        )
        
        try:
            # Create temporary directory for this run
            self.temp_dir = tempfile.mkdtemp(prefix="gcr_julia_run_")
            result.temporary_files.append(self.temp_dir)
            result.cleanup_commands.append(f"rm -rf {self.temp_dir}")
            
            # Set Julia environment variables
            result.environment_vars.update({
                "JULIA_PROJECT": str(self.gcr_julia_path),
                "JULIA_NUM_THREADS": str(config.config.get("system", {}).get("threads", 0)),
                "GCR_JULIA_TEMP_DIR": self.temp_dir
            })
            
            # GPU-specific environment setup
            backend = config.config.get("system", {}).get("backend", "cpu")
            if "gpu" in backend:
                gpu_device = config.config.get("system", {}).get("gpu_device", 0)
                if "cuda" in backend:
                    result.environment_vars["CUDA_VISIBLE_DEVICES"] = str(gpu_device)
                elif "amdgpu" in backend:
                    result.environment_vars["HIP_VISIBLE_DEVICES"] = str(gpu_device)
                    
            # MPI setup
            if "mpi" in backend:
                mpi_processes = config.config.get("system", {}).get("mpi_processes", 1)
                result.environment_vars["OMPI_ALLOW_RUN_AS_ROOT"] = "1"  # Common in containers
                result.metadata = {"mpi_processes": mpi_processes}
                
            # Performance settings
            if config.config.get("performance", {}).get("benchmark_mode", False):
                result.environment_vars["JULIA_CPU_TARGET"] = "native"
                result.environment_vars["JULIA_OPTIMIZE"] = "3"
                
        except Exception as e:
            logger.error(f"Environment setup failed: {e}")
            result.success = False
            result.warnings.append(f"Environment setup error: {e}")
            
        return result
    
    def run(self, config: RunConfig) -> RunResult:
        """Execute GCR-Julia solver."""
        run_start_time = time.time()
        
        result = RunResult(
            success=False,
            exit_code=-1,
            execution_time=0.0,
            output="",
            error="",
            artifacts=[],
            metrics={},
            metadata={}
        )
        
        try:
            # Setup environment
            env_result = self.setup_environment(config)
            if not env_result.success:
                result.error = f"Environment setup failed: {env_result.warnings}"
                return result
                
            # Create Julia script for execution
            julia_script = self._generate_julia_script(config)
            script_path = Path(self.temp_dir) / "run_gcr.jl"
            
            with open(script_path, 'w') as f:
                f.write(julia_script)
                
            result.artifacts.append({
                "name": "julia_script", 
                "path": str(script_path),
                "type": "execution_script"
            })
            
            # Prepare Julia execution command
            julia_cmd = self._build_julia_command(config, script_path)
            
            # Set up process environment
            env = os.environ.copy()
            env.update(env_result.environment_vars)
            
            # Execute Julia solver
            logger.info(f"Executing GCR-Julia: {' '.join(julia_cmd[:3])}...")
            
            process_result = subprocess.run(
                julia_cmd,
                env=env,
                cwd=self.gcr_julia_path,
                capture_output=True,
                text=True,
                timeout=config.timeout or 3600  # Default 1 hour timeout
            )
            
            result.exit_code = process_result.returncode
            result.output = process_result.stdout
            result.error = process_result.stderr
            
            if result.exit_code == 0:
                result.success = True
                
                # Parse results from output files
                self._parse_results(config, result)
                
                # Extract performance metrics
                self._extract_metrics(config, result)
                
            else:
                logger.error(f"GCR-Julia execution failed with exit code {result.exit_code}")
                
        except subprocess.TimeoutExpired:
            result.error = "Execution timed out"
            result.exit_code = -2
        except Exception as e:
            result.error = f"Execution error: {e}"
            logger.error(f"Run failed: {e}")
            
        finally:
            result.execution_time = time.time() - run_start_time
            result.metadata.update({
                "runner": "GCRJuliaRunner",
                "julia_project": str(self.gcr_julia_path),
                "temp_dir": self.temp_dir,
                "script_path": str(script_path) if 'script_path' in locals() else None
            })
            
        return result
    
    def _generate_julia_script(self, config: RunConfig) -> str:
        """Generate Julia execution script."""
        solver_config = config.config.get("solver", {})
        system_config = config.config.get("system", {})
        perf_config = config.config.get("performance", {})
        output_config = config.config.get("output", {})
        
        # Algorithm selection
        algorithm = solver_config.get("algorithm", "gcr")
        algorithm_map = {
            "gcr": "solve_gcr",
            "enhanced-gcr": "solve_enhanced_gcr", 
            "ca-gcr": "solve_ca_gcr",
            "gmres": "solve_gmres",
            "bca-gmres": "solve_bca_gmres"
        }
        solve_function = algorithm_map.get(algorithm, "solve_gcr")
        
        # Backend setup
        backend = system_config.get("backend", "cpu")
        precision = system_config.get("precision", "Float64")
        
        script = f'''
# GCR-Julia Solver Execution Script
# Generated automatically by GSM GCR-Julia Runner

using Pkg
Pkg.activate(".")

using GCR
using LinearAlgebra
using JSON
using Printf

# Configuration
const CONFIG = {json.dumps(config.config, indent=2)}

# Performance monitoring
if {perf_config.get("enable_profiling", False)}
    using Profile
end

# Output directory setup
output_dir = "{output_config.get("output_directory", "./output")}"
mkpath(output_dir)

# Results storage
results = Dict{{String, Any}}()
results["config"] = CONFIG
results["start_time"] = time()

try
    println("Starting GCR-Julia solver execution...")
    println("Algorithm: {algorithm}")
    println("Backend: {backend}")
    println("Precision: {precision}")
    
    # Problem setup (using configuration or default test problem)
    problem_config = get(CONFIG, "problem", Dict())
    n = get(problem_config, "problem_size", 1000)
    
    println("Setting up problem of size $n...")
    
    # Generate test matrix based on configuration
    matrix_type = get(problem_config, "matrix_type", "sparse")
    if haskey(problem_config, "condition_number")
        A = generate_test_matrix({precision}, n; 
                               type=:random, 
                               condition_number={problem_config.get("condition_number", 1e3)})
    else
        # Default to Laplacian for testing
        grid_size = isqrt(n)
        if grid_size^2 == n
            A = generate_test_matrix({precision}, n; type=:laplacian)
        else
            A = generate_test_matrix({precision}, n; type=:random)
        end
    end
    
    # Right-hand side
    b = rand({precision}, n)
    
    println("Matrix generated: $(size(A))")
    println("Matrix type: $(typeof(A))")
    println("Condition estimate: $(cond(Matrix(A[1:min(10,n), 1:min(10,n)])))")
    
    # Solver parameters
    solver_params = Dict{{Symbol, Any}}()
    
    # Common parameters
    solver_params[:tol] = {solver_config.get("tolerance", 1e-6)}
    solver_params[:maxiter] = {solver_config.get("max_iterations", 1000)}
    solver_params[:verbose] = {solver_config.get("verbose", False)}
    
    # Algorithm-specific parameters
    if "{algorithm}" in ["gmres", "bca-gmres"]
        solver_params[:restart] = {solver_config.get("restart", 50)}
    end
    
    if "{algorithm}" == "ca-gcr"
        advanced_config = get(CONFIG, "advanced", Dict())
        solver_params[:s] = get(advanced_config, "ca_gcr_s_step", 8)
    end
    
    # Save convergence history if requested
    if {perf_config.get("save_history", False)}
        solver_params[:save_history] = true
    end
    
    # Profiling setup
    if {perf_config.get("enable_profiling", False)}
        Profile.clear()
        Profile.init(n = 10^6, delay = 0.01)
        @profile begin
    '''
    
    # Backend-specific execution
    if backend == "cpu":
        script += f'''
            # CPU execution
            println("Executing on CPU...")
            solution = {solve_function}(A, b; solver_params...)
        '''
    elif "gpu" in backend:
        if "cuda" in backend:
            script += f'''
            # CUDA GPU execution
            try
                using CUDA
                if CUDA.has_cuda()
                    println("Executing on CUDA GPU...")
                    CUDA.device!({system_config.get("gpu_device", 0)})
                    A_gpu = CUDA.CuSparseMatrixCSR(A)
                    b_gpu = CuArray(b)
                    solution = {solve_function}(A_gpu, b_gpu; solver_params...)
                else
                    println("CUDA not available, falling back to CPU...")
                    solution = {solve_function}(A, b; solver_params...)
                end
            catch e
                println("GPU execution failed, falling back to CPU: $e")
                solution = {solve_function}(A, b; solver_params...)
            end
            '''
        elif "amdgpu" in backend:
            script += f'''
            # AMD GPU execution
            try
                using AMDGPU
                if AMDGPU.has_rocm_gpu()
                    println("Executing on AMD GPU...")
                    A_gpu = ROCSparseMatrixCSR(A)
                    b_gpu = ROCArray(b)
                    solution = {solve_function}(A_gpu, b_gpu; solver_params...)
                else
                    println("ROCm not available, falling back to CPU...")
                    solution = {solve_function}(A, b; solver_params...)
                end
            catch e
                println("GPU execution failed, falling back to CPU: $e")
                solution = {solve_function}(A, b; solver_params...)
            end
            '''
    elif "petsc" in backend:
        script += f'''
        # PETSc MPI execution
        try
            using MPI
            MPI.Init()
            
            # Convert to PETSc format and solve
            solution = {solve_function}(A, b; solver_params...)
            
            MPI.Finalize()
        catch e
            println("PETSc execution failed, falling back to CPU: $e")
            solution = {solve_function}(A, b; solver_params...)
        end
        '''
        
    # Continue script
    script += f'''
        
        # Close profiling block if enabled
        if {perf_config.get("enable_profiling", False)}
        end  # @profile block
    end
    
    println("Solver completed successfully!")
    println("Converged in $(solution.iterations) iterations")
    println("Final residual norm: $(solution.residual_norm)")
    
    # Store results
    results["success"] = true
    results["iterations"] = solution.iterations
    results["residual_norm"] = solution.residual_norm
    results["converged"] = solution.converged
    results["algorithm"] = "{algorithm}"
    results["backend"] = "{backend}"
    results["precision"] = "{precision}"
    results["problem_size"] = n
    results["execution_time"] = time() - results["start_time"]
    
    # Additional metrics if available
    if hasfield(typeof(solution), :history) && solution.history !== nothing
        results["convergence_history"] = solution.history.residuals
    end
    
    # Solution accuracy check
    if {output_config.get("save_solution", True)}
        residual_check = norm(A * solution.x - b)
        results["solution_accuracy"] = residual_check
        println("Solution accuracy check: $residual_check")
        
        # Save solution if requested
        solution_file = joinpath(output_dir, "solution.json")
        solution_data = Dict(
            "x" => Array(solution.x),
            "residual_norm" => solution.residual_norm,
            "iterations" => solution.iterations
        )
        open(solution_file, "w") do f
            JSON.print(f, solution_data, 2)
        end
        results["solution_file"] = solution_file
    end
    
    # Profiling results
    if {perf_config.get("enable_profiling", False)}
        Profile.print(IOBuffer())  # Process profile data
        results["profiling_available"] = true
    end
    
catch e
    println("ERROR: Solver execution failed")
    println("Error details: $e")
    println("Stack trace:")
    Base.show_backtrace(stdout, catch_backtrace())
    
    results["success"] = false
    results["error"] = string(e)
    results["execution_time"] = time() - results["start_time"]
end

# Save results
results_file = joinpath(output_dir, "results.json")
open(results_file, "w") do f
    JSON.print(f, results, 2)
end

println("Results saved to: $results_file")

# Print summary
println("=" ^ 50)
println("GCR-Julia Execution Summary")
println("=" ^ 50)
if results["success"]
    println("✅ Status: SUCCESS")
    println("📊 Algorithm: $(results["algorithm"])")
    println("🔢 Iterations: $(results["iterations"])")
    println("📏 Residual: $(results["residual_norm"])")
    println("⏱️  Time: $(results["execution_time"]) seconds")
else
    println("❌ Status: FAILED")
    println("💥 Error: $(get(results, "error", "Unknown error"))")
end
println("=" ^ 50)
'''

        return script
    
    def _build_julia_command(self, config: RunConfig, script_path: Path) -> List[str]:
        """Build Julia execution command."""
        cmd = [
            "julia",
            f"--project={self.gcr_julia_path}",
        ]
        
        # Performance optimizations
        system_config = config.config.get("system", {})
        perf_config = config.config.get("performance", {})
        
        if perf_config.get("benchmark_mode", False):
            cmd.extend(["--optimize=3", "--cpu-target=native"])
            
        # Threading
        threads = system_config.get("threads", 0)
        if threads > 0:
            cmd.extend([f"--threads={threads}"])
        elif threads == 0:
            cmd.extend(["--threads=auto"])
            
        # Memory limit
        memory_limit = perf_config.get("memory_limit_gb", 0)
        if memory_limit > 0:
            # Julia doesn't have direct memory limit, but we can set heap hint
            cmd.extend([f"--heap-size-hint={int(memory_limit * 1024)}M"])
            
        # Add script path
        cmd.append(str(script_path))
        
        # MPI wrapper if needed
        backend = system_config.get("backend", "cpu")
        if "mpi" in backend:
            mpi_processes = system_config.get("mpi_processes", 1)
            if mpi_processes > 1:
                cmd = ["mpiexec", "-n", str(mpi_processes)] + cmd
                
        return cmd
    
    def _parse_results(self, config: RunConfig, result: RunResult):
        """Parse execution results."""
        try:
            output_dir = Path(config.config.get("output", {}).get("output_directory", "./output"))
            results_file = output_dir / "results.json"
            
            if results_file.exists():
                with open(results_file, 'r') as f:
                    results_data = json.load(f)
                    
                result.metadata.update(results_data)
                
                # Add solution file as artifact
                if "solution_file" in results_data:
                    result.artifacts.append({
                        "name": "solution",
                        "path": results_data["solution_file"],
                        "type": "solution_data"
                    })
                    
        except Exception as e:
            logger.warning(f"Could not parse results: {e}")
    
    def _extract_metrics(self, config: RunConfig, result: RunResult):
        """Extract performance metrics."""
        try:
            # Basic metrics from metadata
            if "iterations" in result.metadata:
                result.metrics["iterations"] = result.metadata["iterations"]
            if "residual_norm" in result.metadata:
                result.metrics["final_residual"] = result.metadata["residual_norm"]
            if "solution_accuracy" in result.metadata:
                result.metrics["solution_accuracy"] = result.metadata["solution_accuracy"]
            if "execution_time" in result.metadata:
                result.metrics["solver_time"] = result.metadata["execution_time"]
                
            # Derived metrics
            if "problem_size" in result.metadata and "execution_time" in result.metadata:
                n = result.metadata["problem_size"]
                t = result.metadata["execution_time"]
                result.metrics["flops_estimate"] = (n * n * result.metrics.get("iterations", 1)) / t
                
        except Exception as e:
            logger.warning(f"Could not extract metrics: {e}")
    
    def cleanup(self, config: RunConfig) -> bool:
        """Clean up temporary files."""
        try:
            if self.temp_dir and Path(self.temp_dir).exists():
                import shutil
                shutil.rmtree(self.temp_dir)
                logger.info(f"Cleaned up temporary directory: {self.temp_dir}")
            return True
        except Exception as e:
            logger.error(f"Cleanup failed: {e}")
            return False
    
    def get_supported_features(self) -> List[str]:
        """Get list of supported features."""
        return [
            "cpu_execution",
            "gpu_cuda_execution", 
            "gpu_amdgpu_execution",
            "mpi_distributed_execution",
            "mixed_precision",
            "convergence_monitoring",
            "performance_profiling",
            "custom_preconditioners",
            "multiple_algorithms",
            "result_validation"
        ]