#!/usr/bin/env python3
# SPDX-License-Identifier: MulanPSL-2.0+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.

"""
Performance Bisect Requirements Validator

This module provides validation functions to check if input parameters
and conditions meet the requirements for performance bisect execution.
"""

from typing import Dict, Any, List, Tuple, Optional
import os


class BisectRequirementError(Exception):
    """Exception raised when bisect requirements are not met"""
    pass


class PerformanceBisectValidator:
    """Validator for performance bisect requirements and limitations"""

    # Minimum samples required per version
    MIN_SAMPLES_PER_VERSION = 2

    # Default sample count
    DEFAULT_SAMPLE_COUNT = 3

    # Supported job template structures
    SUPPORTED_COMMIT_FIELDS = [
        'ss.linux.commit',
        'program.makepkg.commit'
    ]

    @classmethod
    def validate_input_parameters(cls,
                                v1_commit: str,
                                v2_commit: str,
                                metric: str,
                                job_template: Dict[str, Any],
                                result_root: str,
                                sample_count: int = DEFAULT_SAMPLE_COUNT) -> None:
        """
        Validate all input parameters for performance bisect

        Args:
            v1_commit: Earlier kernel version
            v2_commit: Later kernel version
            metric: Performance metric name
            job_template: Job configuration template
            result_root: Results directory
            sample_count: Samples per version

        Raises:
            BisectRequirementError: If any requirement is not met
        """
        # Basic parameter validation
        cls._validate_basic_parameters(v1_commit, v2_commit, metric, sample_count)

        # Job template validation
        cls._validate_job_template(job_template)

        # Result directory validation
        cls._validate_result_directory(result_root)

    @classmethod
    def _validate_basic_parameters(cls, v1_commit: str, v2_commit: str,
                                 metric: str, sample_count: int) -> None:
        """Validate basic input parameters"""
        # Commit validation
        if not v1_commit or not v1_commit.strip():
            raise BisectRequirementError("v1_commit cannot be empty")

        if not v2_commit or not v2_commit.strip():
            raise BisectRequirementError("v2_commit cannot be empty")

        if v1_commit == v2_commit:
            raise BisectRequirementError("v1_commit and v2_commit cannot be the same")

        # Metric validation
        if not metric or not metric.strip():
            raise BisectRequirementError("metric cannot be empty")

        # Sample count validation
        if not isinstance(sample_count, int) or sample_count < 1:
            raise BisectRequirementError(f"sample_count must be positive integer, got {sample_count}")

    @classmethod
    def _validate_job_template(cls, job_template: Dict[str, Any]) -> None:
        """Validate job template structure"""
        if not isinstance(job_template, dict):
            raise BisectRequirementError("job_template must be a dictionary")

        # Check for supported commit field structures
        has_ss_linux = (
            'ss' in job_template and
            isinstance(job_template['ss'], dict) and
            'linux' in job_template['ss']
        )

        has_program_makepkg = (
            'program' in job_template and
            isinstance(job_template['program'], dict) and
            'makepkg' in job_template['program']
        )

        if not (has_ss_linux or has_program_makepkg):
            raise BisectRequirementError(
                f"job_template must contain one of: {cls.SUPPORTED_COMMIT_FIELDS}"
            )

    @classmethod
    def _validate_result_directory(cls, result_root: str) -> None:
        """Validate result directory"""
        if not result_root or not result_root.strip():
            raise BisectRequirementError("result_root cannot be empty")

        # Try to create directory to check permissions
        try:
            os.makedirs(result_root, exist_ok=True)
            if not os.access(result_root, os.W_OK):
                raise BisectRequirementError(f"No write permission for result_root: {result_root}")
        except OSError as e:
            raise BisectRequirementError(f"Cannot create result_root directory: {e}")

    @classmethod
    def validate_sample_requirements(cls, v1_samples: List[float], v2_samples: List[float]) -> None:
        """
        Validate collected performance samples meet bisect requirements

        Args:
            v1_samples: Performance samples from v1
            v2_samples: Performance samples from v2

        Raises:
            BisectRequirementError: If sample requirements are not met
        """
        # Check minimum sample count
        if len(v1_samples) < cls.MIN_SAMPLES_PER_VERSION:
            raise BisectRequirementError(
                f"Insufficient v1 samples: {len(v1_samples)} < {cls.MIN_SAMPLES_PER_VERSION}"
            )

        if len(v2_samples) < cls.MIN_SAMPLES_PER_VERSION:
            raise BisectRequirementError(
                f"Insufficient v2 samples: {len(v2_samples)} < {cls.MIN_SAMPLES_PER_VERSION}"
            )

        # Validate sample values
        cls._validate_sample_values(v1_samples, "v1")
        cls._validate_sample_values(v2_samples, "v2")

    @classmethod
    def _validate_sample_values(cls, samples: List[float], version: str) -> None:
        """Validate individual sample values"""
        for i, sample in enumerate(samples):
            if not isinstance(sample, (int, float)):
                raise BisectRequirementError(
                    f"Invalid {version} sample {i+1}: {sample} is not a number"
                )

            if sample < 0:
                raise BisectRequirementError(
                    f"Invalid {version} sample {i+1}: {sample} is negative"
                )

    @classmethod
    def validate_performance_gap(cls, v1_samples: List[float], v2_samples: List[float]) -> Tuple[bool, str]:
        """
        Validate that performance samples have a clear gap for bisecting

        Args:
            v1_samples: Performance samples from v1
            v2_samples: Performance samples from v2

        Returns:
            (has_gap, message): Boolean indicating if gap exists and explanation message
        """
        # Calculate ranges
        v1_min, v1_max = min(v1_samples), max(v1_samples)
        v2_min, v2_max = min(v2_samples), max(v2_samples)

        # Check for non-overlapping ranges
        has_gap = (v1_max < v2_min) or (v2_max < v1_min)

        if has_gap:
            if v1_max < v2_min:
                change_type = "regression (v1 better → v2 worse)"
                mid_point = (v1_max + v2_min) / 2
            else:
                change_type = "improvement (v1 worse → v2 better)"
                mid_point = (v2_max + v1_min) / 2

            message = (
                f"✅ Clear performance gap detected: {change_type}\n"
                f"   v1 range: [{v1_min}, {v1_max}]\n"
                f"   v2 range: [{v2_min}, {v2_max}]\n"
                f"   mid_point: {mid_point}"
            )
        else:
            overlap_start = max(v1_min, v2_min)
            overlap_end = min(v1_max, v2_max)
            message = (
                f"❌ No clear performance gap - ranges overlap\n"
                f"   v1 range: [{v1_min}, {v1_max}]\n"
                f"   v2 range: [{v2_min}, {v2_max}]\n"
                f"   overlap: [{overlap_start}, {overlap_end}]\n"
                f"   Not suitable for bisect"
            )

        return has_gap, message

    @classmethod
    def check_bisect_feasibility(cls,
                                v1_commit: str,
                                v2_commit: str,
                                metric: str,
                                job_template: Dict[str, Any],
                                result_root: str,
                                sample_count: int = DEFAULT_SAMPLE_COUNT) -> Dict[str, Any]:
        """
        Comprehensive feasibility check for performance bisect

        Returns:
            dict: Validation result with status and details
        """
        result = {
            'feasible': False,
            'errors': [],
            'warnings': [],
            'requirements_met': {
                'input_parameters': False,
                'job_template': False,
                'result_directory': False,
                'sample_count': False
            }
        }

        try:
            # Input parameter validation
            cls._validate_basic_parameters(v1_commit, v2_commit, metric, sample_count)
            result['requirements_met']['input_parameters'] = True
        except BisectRequirementError as e:
            result['errors'].append(f"Input parameters: {str(e)}")

        try:
            # Job template validation
            cls._validate_job_template(job_template)
            result['requirements_met']['job_template'] = True
        except BisectRequirementError as e:
            result['errors'].append(f"Job template: {str(e)}")

        try:
            # Result directory validation
            cls._validate_result_directory(result_root)
            result['requirements_met']['result_directory'] = True
        except BisectRequirementError as e:
            result['errors'].append(f"Result directory: {str(e)}")

        # Sample count warning
        if sample_count < cls.DEFAULT_SAMPLE_COUNT:
            result['warnings'].append(
                f"Sample count {sample_count} is below recommended {cls.DEFAULT_SAMPLE_COUNT}"
            )

        result['requirements_met']['sample_count'] = sample_count >= cls.MIN_SAMPLES_PER_VERSION

        # Overall feasibility
        result['feasible'] = all(result['requirements_met'].values()) and not result['errors']

        return result

    @classmethod
    def print_requirements_summary(cls) -> None:
        """Print a summary of all requirements"""
        print("=" * 60)
        print("Performance Bisect Requirements Summary")
        print("=" * 60)
        print()
        print("Input Requirements:")
        print(f"  • v1_commit: Non-empty string (earlier version)")
        print(f"  • v2_commit: Non-empty string (later version)")
        print(f"  • metric: Non-empty string (performance metric name)")
        print(f"  • sample_count: Positive integer (≥ {cls.MIN_SAMPLES_PER_VERSION})")
        print()
        print("Job Template Requirements:")
        for field in cls.SUPPORTED_COMMIT_FIELDS:
            print(f"  • Must contain: {field}")
        print()
        print("Sample Collection Requirements:")
        print(f"  • Minimum {cls.MIN_SAMPLES_PER_VERSION} valid samples per version")
        print(f"  • Samples must be positive numbers")
        print(f"  • Performance ranges must not overlap")
        print()
        print("Performance Gap Requirements:")
        print(f"  • v1_max < v2_min OR v2_max < v1_min")
        print(f"  • Clear separation between version performance")
        print()
        print("System Requirements:")
        print(f"  • Write access to result directory")
        print(f"  • Git repository access")
        print(f"  • Network connectivity for job submission")
        print("=" * 60)


# Convenience functions for quick validation
def validate_bisect_input(v1_commit: str, v2_commit: str, metric: str,
                         job_template: Dict[str, Any], result_root: str,
                         sample_count: int = 3) -> None:
    """Quick input validation - raises exception if invalid"""
    PerformanceBisectValidator.validate_input_parameters(
        v1_commit, v2_commit, metric, job_template, result_root, sample_count
    )


def check_performance_gap(v1_samples: List[float], v2_samples: List[float]) -> bool:
    """Quick gap check - returns True if bisectable"""
    has_gap, _ = PerformanceBisectValidator.validate_performance_gap(v1_samples, v2_samples)
    return has_gap


def print_gap_analysis(v1_samples: List[float], v2_samples: List[float]) -> None:
    """Print detailed gap analysis"""
    has_gap, message = PerformanceBisectValidator.validate_performance_gap(v1_samples, v2_samples)
    print(message)


if __name__ == "__main__":
    # Example usage and demonstration
    PerformanceBisectValidator.print_requirements_summary()

    print("\nExample Validation:")

    # Test valid input
    try:
        validate_bisect_input(
            v1_commit="abc123",
            v2_commit="def456",
            metric="boot_time",
            job_template={"ss": {"linux": {"commit": "placeholder"}}},
            result_root="/tmp/test"
        )
        print("✅ Input validation passed")
    except BisectRequirementError as e:
        print(f"❌ Input validation failed: {e}")

    # Test performance gap
    print("\nPerformance Gap Examples:")

    # Case 1: Clear gap (bisectable)
    v1_samples = [1.0, 1.2, 1.1]
    v2_samples = [2.1, 2.3, 2.2]
    print_gap_analysis(v1_samples, v2_samples)

    print()

    # Case 2: Overlapping ranges (not bisectable)
    v1_samples = [1.0, 2.0, 3.0]
    v2_samples = [2.5, 3.5, 4.0]
    print_gap_analysis(v1_samples, v2_samples)