"""
GCR-Julia Validator Implementation.

Validates configurations, dependencies, and system requirements for GCR-Julia execution.
"""

import json
import logging
import subprocess
from pathlib import Path
from typing import Dict, List, Optional, Any
import jsonschema

from gcr_solver_manager.extensions.base_validator import (
    BaseValidator,
    ValidationResult
)

logger = logging.getLogger(__name__)


class GCRJuliaValidator(BaseValidator):
    """Validator for GCR-Julia configurations and environment."""
    
    def __init__(self, gcr_julia_path: Path):
        """Initialize the validator."""
        self.gcr_julia_path = gcr_julia_path
        self._schema = None
        
    def validate(self, config: ValidationConfig) -> ValidationResult:
        """Validate GCR-Julia configuration and environment."""
        result = ValidationResult(
            valid=True,
            errors=[],
            warnings=[],
            suggestions=[],
            metadata={}
        )
        
        try:
            # Schema validation
            if config.validate_schema:
                schema_result = self._validate_schema(config.config)
                result.errors.extend(schema_result.get("errors", []))
                result.warnings.extend(schema_result.get("warnings", []))
                if schema_result.get("errors"):
                    result.valid = False
                    
            # Dependency validation
            if config.check_dependencies:
                dep_result = self._validate_dependencies(config.config)
                result.errors.extend(dep_result.get("errors", []))
                result.warnings.extend(dep_result.get("warnings", []))
                result.suggestions.extend(dep_result.get("suggestions", []))
                if dep_result.get("errors"):
                    result.valid = False
                    
            # Environment validation
            if config.check_environment:
                env_result = self._validate_environment(config.config)
                result.warnings.extend(env_result.get("warnings", []))
                result.suggestions.extend(env_result.get("suggestions", []))
                result.metadata.update(env_result.get("metadata", {}))
                
            # Configuration consistency validation
            consistency_result = self._validate_consistency(config.config)
            result.errors.extend(consistency_result.get("errors", []))
            result.warnings.extend(consistency_result.get("warnings", []))
            result.suggestions.extend(consistency_result.get("suggestions", []))
            if consistency_result.get("errors"):
                result.valid = False
                
        except Exception as e:
            logger.error(f"Validation failed: {e}")
            result.valid = False
            result.errors.append(f"Validation exception: {e}")
            
        return result
    
    def _validate_schema(self, config: Dict[str, Any]) -> Dict[str, List[str]]:
        """Validate configuration against JSON schema."""
        result = {"errors": [], "warnings": []}
        
        try:
            schema = self._get_schema()
            if not schema:
                result["warnings"].append("Configuration schema not available")
                return result
                
            jsonschema.validate(config, schema)
            
        except jsonschema.ValidationError as e:
            result["errors"].append(f"Schema validation error: {e.message}")
        except jsonschema.SchemaError as e:
            result["errors"].append(f"Schema error: {e.message}")
        except Exception as e:
            result["warnings"].append(f"Schema validation failed: {e}")
            
        return result
    
    def _validate_dependencies(self, config: Dict[str, Any]) -> Dict[str, Any]:
        """Validate system dependencies."""
        result = {"errors": [], "warnings": [], "suggestions": []}
        
        try:
            # Check Julia
            julia_result = self._check_julia()
            if not julia_result["available"]:
                result["errors"].append("Julia not found. Please install Julia >=1.9.0")
                result["suggestions"].append("Visit https://julialang.org/downloads/ to install Julia")
            else:
                if not julia_result["version_ok"]:
                    result["warnings"].append(f"Julia version {julia_result['version']} detected. Version >=1.9.0 recommended for best compatibility.")
                    
            # Check GCR-Julia package
            gcr_result = self._check_gcr_julia()
            if not gcr_result["available"]:
                result["errors"].append(f"GCR-Julia package not found at {self.gcr_julia_path}")
                result["suggestions"].append("Ensure GCR-Julia package is properly installed")
            else:
                if not gcr_result["loadable"]:
                    result["errors"].append("GCR-Julia package cannot be loaded")
                    result["suggestions"].append("Check GCR-Julia dependencies with: julia --project -e 'using Pkg; Pkg.status()'")
                    
            # Check backend-specific dependencies
            backend = config.get("system", {}).get("backend", "cpu")
            backend_result = self._check_backend_dependencies(backend)
            result["warnings"].extend(backend_result.get("warnings", []))
            result["suggestions"].extend(backend_result.get("suggestions", []))
            
        except Exception as e:
            result["errors"].append(f"Dependency validation failed: {e}")
            
        return result
    
    def _validate_environment(self, config: Dict[str, Any]) -> Dict[str, Any]:
        """Validate execution environment."""
        result = {"warnings": [], "suggestions": [], "metadata": {}}
        
        try:
            # System resources
            system_config = config.get("system", {})
            
            # Memory validation
            memory_limit = config.get("performance", {}).get("memory_limit_gb", 0)
            problem_size = config.get("problem", {}).get("problem_size", 1000)
            
            if problem_size > 0:
                # Estimate memory requirements (rough)
                estimated_memory_gb = (problem_size ** 2 * 8) / (1024**3)  # 8 bytes per double
                
                if estimated_memory_gb > 1:
                    result["warnings"].append(f"Problem size {problem_size} may require ~{estimated_memory_gb:.1f}GB memory")
                    
                if memory_limit > 0 and estimated_memory_gb > memory_limit:
                    result["warnings"].append(f"Problem may exceed memory limit of {memory_limit}GB")
                    
            # Thread validation
            threads = system_config.get("threads", 0)
            if threads > 32:
                result["warnings"].append(f"High thread count ({threads}) may not improve performance for all problems")
                result["suggestions"].append("Consider testing with different thread counts for optimal performance")
                
            # GPU validation
            backend = system_config.get("backend", "cpu")
            if "gpu" in backend:
                gpu_result = self._check_gpu_environment(backend)
                result["warnings"].extend(gpu_result.get("warnings", []))
                result["suggestions"].extend(gpu_result.get("suggestions", []))
                result["metadata"].update(gpu_result.get("metadata", {}))
                
        except Exception as e:
            result["warnings"].append(f"Environment validation failed: {e}")
            
        return result
    
    def _validate_consistency(self, config: Dict[str, Any]) -> Dict[str, Any]:
        """Validate internal configuration consistency."""
        result = {"errors": [], "warnings": [], "suggestions": []}
        
        try:
            solver_config = config.get("solver", {})
            system_config = config.get("system", {})
            advanced_config = config.get("advanced", {})
            
            # Algorithm-specific validations
            algorithm = solver_config.get("algorithm", "gcr")
            
            # GMRES-specific
            if algorithm in ["gmres", "bca-gmres"]:
                restart = solver_config.get("restart", 50)
                max_iter = solver_config.get("max_iterations", 1000)
                
                if restart >= max_iter:
                    result["warnings"].append(f"GMRES restart ({restart}) >= max_iterations ({max_iter}). Consider adjusting.")
                    
                if restart < 10:
                    result["warnings"].append(f"Small restart value ({restart}) may hurt convergence")
                    result["suggestions"].append("Consider restart values between 20-100 for GMRES")
                    
            # CA-GCR specific
            if algorithm == "ca-gcr":
                s_step = advanced_config.get("ca_gcr_s_step", 8)
                if s_step < 2 or s_step > 50:
                    result["warnings"].append(f"CA-GCR s-step ({s_step}) outside typical range [2,50]")
                    
            # Precision and backend consistency
            precision = system_config.get("precision", "Float64")
            backend = system_config.get("backend", "cpu")
            
            if precision == "Float16" and backend == "cpu":
                result["warnings"].append("Float16 precision on CPU may be slower than Float32/Float64")
                result["suggestions"].append("Consider Float32 or Float64 for CPU execution")
                
            if "gpu" in backend and precision == "Float64":
                result["suggestions"].append("Consider Float32 precision for GPU execution to improve memory bandwidth")
                
            # Tolerance and iterations consistency
            tolerance = solver_config.get("tolerance", 1e-6)
            max_iter = solver_config.get("max_iterations", 1000)
            
            if tolerance < 1e-12 and precision != "Float64":
                result["warnings"].append(f"Tight tolerance ({tolerance}) may not be achievable with {precision} precision")
                
            if tolerance > 1e-3:
                result["warnings"].append(f"Loose tolerance ({tolerance}) may indicate suboptimal problem setup")
                
            if max_iter < 10:
                result["warnings"].append(f"Very low max_iterations ({max_iter}) may prevent convergence")
                
        except Exception as e:
            result["errors"].append(f"Consistency validation failed: {e}")
            
        return result
    
    def _check_julia(self) -> Dict[str, Any]:
        """Check Julia availability and version."""
        result = {"available": False, "version": None, "version_ok": False}
        
        try:
            version_result = subprocess.run(
                ["julia", "--version"],
                capture_output=True,
                text=True,
                timeout=10
            )
            
            if version_result.returncode == 0:
                result["available"] = True
                version_str = version_result.stdout.strip()
                result["version"] = version_str
                
                # Check if version >= 1.9.0
                try:
                    check_result = subprocess.run([
                        "julia", "-e", 
                        "if VERSION >= v\"1.9.0\"; println(\"OK\"); else; println(\"OLD\"); end"
                    ], capture_output=True, text=True, timeout=10)
                    
                    if check_result.returncode == 0:
                        result["version_ok"] = check_result.stdout.strip() == "OK"
                        
                except Exception:
                    pass  # Version check failed, but Julia is available
                    
        except (subprocess.TimeoutExpired, FileNotFoundError):
            pass  # Julia not available
            
        return result
    
    def _check_gcr_julia(self) -> Dict[str, Any]:
        """Check GCR-Julia package availability."""
        result = {"available": False, "loadable": False}
        
        try:
            # Check package structure
            if (self.gcr_julia_path.exists() and 
                (self.gcr_julia_path / "Project.toml").exists()):
                result["available"] = True
                
                # Test loading
                try:
                    load_result = subprocess.run([
                        "julia",
                        f"--project={self.gcr_julia_path}",
                        "-e",
                        "using GCR; println(\"OK\")"
                    ], capture_output=True, text=True, timeout=30)
                    
                    if load_result.returncode == 0:
                        result["loadable"] = True
                        
                except subprocess.TimeoutExpired:
                    pass  # Loading test timed out
                    
        except Exception:
            pass  # Check failed
            
        return result
    
    def _check_backend_dependencies(self, backend: str) -> Dict[str, Any]:
        """Check backend-specific dependencies."""
        result = {"warnings": [], "suggestions": []}
        
        try:
            if "cuda" in backend:
                cuda_result = self._check_cuda()
                if not cuda_result["available"]:
                    result["warnings"].append("CUDA backend requested but CUDA.jl not available")
                    result["suggestions"].append("Install CUDA.jl: julia -e 'using Pkg; Pkg.add(\"CUDA\")'")
                    
            if "amdgpu" in backend:
                amdgpu_result = self._check_amdgpu()
                if not amdgpu_result["available"]:
                    result["warnings"].append("AMDGPU backend requested but AMDGPU.jl not available")
                    result["suggestions"].append("Install AMDGPU.jl: julia -e 'using Pkg; Pkg.add(\"AMDGPU\")'")
                    
            if "mpi" in backend:
                mpi_result = self._check_mpi()
                if not mpi_result["available"]:
                    result["warnings"].append("MPI backend requested but MPI.jl not available")
                    result["suggestions"].append("Install MPI.jl: julia -e 'using Pkg; Pkg.add(\"MPI\")'")
                    
        except Exception as e:
            result["warnings"].append(f"Backend dependency check failed: {e}")
            
        return result
    
    def _check_cuda(self) -> Dict[str, bool]:
        """Check CUDA availability."""
        try:
            result = subprocess.run([
                "julia", "-e",
                "try; using CUDA; println(CUDA.has_cuda()); catch; println(false); end"
            ], capture_output=True, text=True, timeout=15)
            
            return {"available": result.stdout.strip() == "true"}
        except Exception:
            return {"available": False}
    
    def _check_amdgpu(self) -> Dict[str, bool]:
        """Check AMDGPU availability."""
        try:
            result = subprocess.run([
                "julia", "-e",
                "try; using AMDGPU; println(AMDGPU.has_rocm_gpu()); catch; println(false); end"
            ], capture_output=True, text=True, timeout=15)
            
            return {"available": result.stdout.strip() == "true"}
        except Exception:
            return {"available": False}
    
    def _check_mpi(self) -> Dict[str, bool]:
        """Check MPI availability."""
        try:
            result = subprocess.run([
                "julia", "-e",
                "try; using MPI; println(true); catch; println(false); end"
            ], capture_output=True, text=True, timeout=15)
            
            return {"available": result.stdout.strip() == "true"}
        except Exception:
            return {"available": False}
    
    def _check_gpu_environment(self, backend: str) -> Dict[str, Any]:
        """Check GPU environment specifics."""
        result = {"warnings": [], "suggestions": [], "metadata": {}}
        
        try:
            if "cuda" in backend:
                # Check CUDA devices
                cuda_info = subprocess.run([
                    "julia", "-e",
                    "try; using CUDA; println(length(CUDA.devices())); catch; println(0); end"
                ], capture_output=True, text=True, timeout=10)
                
                if cuda_info.returncode == 0:
                    num_devices = int(cuda_info.stdout.strip())
                    result["metadata"]["cuda_devices"] = num_devices
                    
                    if num_devices == 0:
                        result["warnings"].append("No CUDA devices found")
                        
            elif "amdgpu" in backend:
                # Check ROCm devices
                rocm_info = subprocess.run([
                    "julia", "-e", 
                    "try; using AMDGPU; println(length(AMDGPU.devices())); catch; println(0); end"
                ], capture_output=True, text=True, timeout=10)
                
                if rocm_info.returncode == 0:
                    num_devices = int(rocm_info.stdout.strip())
                    result["metadata"]["rocm_devices"] = num_devices
                    
                    if num_devices == 0:
                        result["warnings"].append("No ROCm devices found")
                        
        except Exception as e:
            result["warnings"].append(f"GPU environment check failed: {e}")
            
        return result
    
    def _get_schema(self) -> Optional[Dict[str, Any]]:
        """Get configuration schema."""
        if self._schema is None:
            try:
                schema_path = Path(__file__).parent / "config_schema.json"
                with open(schema_path, 'r') as f:
                    self._schema = json.load(f)
            except Exception as e:
                logger.warning(f"Could not load schema: {e}")
                
        return self._schema
    
    def validate_problem_config(self, problem_config: Dict[str, Any]) -> ValidationResult:
        """Validate problem-specific configuration."""
        result = ValidationResult(
            valid=True,
            errors=[],
            warnings=[],
            suggestions=[],
            metadata={}
        )
        
        try:
            # Problem size validation
            problem_size = problem_config.get("problem_size", 0)
            if problem_size <= 0:
                result.warnings.append("Problem size not specified, will use default test problem")
            elif problem_size > 100000:
                result.warnings.append(f"Large problem size ({problem_size}) may require significant resources")
                result.suggestions.append("Consider starting with smaller problems for testing")
                
            # Condition number validation
            condition_number = problem_config.get("condition_number", 0)
            if condition_number > 1e12:
                result.warnings.append(f"High condition number ({condition_number:.1e}) may cause convergence issues")
                result.suggestions.append("Consider preconditioning for ill-conditioned problems")
                
        except Exception as e:
            result.errors.append(f"Problem validation failed: {e}")
            result.valid = False
            
        return result