#!/usr/bin/env python3
# SPDX-License-Identifier: MulanPSL-2.0+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.

"""
Performance Bisect System - Complete Refactored Implementation

This module implements the complete performance bisect workflow:
1. Check if 2 kernels have bisectable performance changes
2. Submit performance bisect job with mid_point logic
3. Execute git bisect with mid_point based evaluation

Design:
- v1 (good) < v2 (bad) in commit timeline
- Submit 3 jobs each for v1 and v2
- Check for non-overlapping performance ranges
- Calculate mid_point = (v1_max + v2_min)/2 or (v2_max + v1_min)/2
- Use mid_point for bisect evaluation: sample > mid_point → GOOD, sample ≤ mid_point → BAD
"""

import os
import sys
import time
import subprocess
import json
import traceback
from typing import Dict, Any, Optional, Tuple, List
from dataclasses import dataclass

from py_bisect import GitBisect
from bisect_log_config import logger, get_logger
from bisect_config import BisectConfig
from bisect_requirements_validator import PerformanceBisectValidator, BisectRequirementError


@dataclass
class PerformanceSamples:
    """Performance samples from kernel versions"""
    v1_samples: List[float]  # Earlier version samples
    v2_samples: List[float]  # Later version samples
    v1_range: Tuple[float, float]  # (min, max)
    v2_range: Tuple[float, float]  # (min, max)
    mid_point: float
    has_gap: bool
    performance_change_type: str  # 'regression' or 'improvement'


class PerformanceBisect:
    """
    Complete Performance Bisect System

    Implements the full workflow from performance comparison to bisect execution
    """

    def __init__(self, logger=None):
        """Initialize PerformanceBisect with optional logger"""
        if logger:
            self.logger = logger
            self.session_id = getattr(logger, 'session_id', None)
        else:
            self.session_id = f"{int(time.time() * 1000000) % 0xFFFFFF:06x}"
            self.logger = get_logger(session_id=self.session_id)

        # Initialize GitBisect for basic operations
        self.git_bisect = GitBisect(logger=self.logger)

        # Performance bisect state
        self.v1_commit = None
        self.v2_commit = None
        self.metric = None
        self.samples = None
        self.result_root = None

    def check_kernel_performance_difference(self,
                                          v1_commit: str,
                                          v2_commit: str,
                                          metric: str,
                                          job_template: Dict[str, Any],
                                          sample_count: int = 3) -> Optional[PerformanceSamples]:
        """
        Step 1: Check if 2 kernels have bisectable performance changes

        Args:
            v1_commit: Earlier kernel version (good candidate)
            v2_commit: Later kernel version (bad candidate)
            metric: Performance metric name
            job_template: Base job configuration
            sample_count: Number of samples per version (default: 3)

        Returns:
            PerformanceSamples if bisectable difference found, None otherwise
        """
        # Validate input parameters
        try:
            PerformanceBisectValidator.validate_input_parameters(
                v1_commit, v2_commit, metric, job_template, sample_count
            )
        except BisectRequirementError as e:
            self.logger.error(f"Input validation failed: {str(e)}")
            raise ValueError(f"Invalid input parameters: {str(e)}")

        self.logger.info(f"✅ Input validation passed")
        self.logger.info(f"Comparing {v1_commit[:8]} vs {v2_commit[:8]} on metric '{metric}'")

        # Store parameters
        self.v1_commit = v1_commit
        self.v2_commit = v2_commit
        self.metric = metric

        # Submit jobs for v1 (earlier version)
        self.logger.info(f"Submitting {sample_count} jobs for v1 ({v1_commit[:8]})")
        v1_samples = self._collect_performance_samples(v1_commit, metric, job_template, sample_count, "v1")

        # Submit jobs for v2 (later version)
        self.logger.info(f"Submitting {sample_count} jobs for v2 ({v2_commit[:8]})")
        v2_samples = self._collect_performance_samples(v2_commit, metric, job_template, sample_count, "v2")

        # Validate sample collection
        try:
            PerformanceBisectValidator.validate_sample_requirements(v1_samples, v2_samples)
        except BisectRequirementError as e:
            self.logger.error(f"Sample validation failed: {str(e)}")
            return None

        # Validate performance gap
        has_gap, gap_message = PerformanceBisectValidator.validate_performance_gap(v1_samples, v2_samples)
        self.logger.info(gap_message)

        if not has_gap:
            return None

        # Calculate ranges for mid_point calculation
        v1_min, v1_max = min(v1_samples), max(v1_samples)
        v2_min, v2_max = min(v2_samples), max(v2_samples)

        # Calculate mid_point based on gap direction
        if v1_max < v2_min:
            # Performance regression: v1 better (smaller) → v2 worse (larger)
            mid_point = (v1_max + v2_min) / 2
            change_type = "regression"
            self.logger.info(f"Performance REGRESSION detected: v1 < v2")
        else:
            # Performance improvement: v2 better (smaller) → v1 worse (larger)
            mid_point = (v2_max + v1_min) / 2
            change_type = "improvement"
            self.logger.info(f"Performance IMPROVEMENT detected: v2 < v1")

        self.logger.info(f"Mid-point calculated: {mid_point}")

        # Create samples object
        samples = PerformanceSamples(
            v1_samples=v1_samples,
            v2_samples=v2_samples,
            v1_range=(v1_min, v1_max),
            v2_range=(v2_min, v2_max),
            mid_point=mid_point,
            has_gap=True,
            performance_change_type=change_type
        )

        self.samples = samples
        return samples

    def submit_performance_bisect(self,
                                samples: PerformanceSamples,
                                result_root: str) -> Optional[Dict[str, Any]]:
        """
        Step 2: Submit bisect job with mid_point logic

        Args:
            samples: Performance samples from step 1
            result_root: Directory for bisect results

        Returns:
            Bisect result dictionary or None if failed
        """
        if not samples or not samples.has_gap:
            self.logger.error("Invalid samples provided - no performance gap detected")
            return None

        self.logger.info("Starting performance bisect execution")
        self.logger.info(f"Using mid_point: {samples.mid_point}")
        self.logger.info(f"Performance change type: {samples.performance_change_type}")

        # Store result root
        self.result_root = result_root

        # Set up GitBisect instance
        self.git_bisect.set_log(result_root)

        # Create bisect task with mid_point logic
        task = {
            'good_commit': self.v1_commit,
            'bad_commit': self.v2_commit,
            'bisect_metric': self.metric,
            'bisect_result_root': result_root,
            'use_mid_point': True,
            'mid_point': samples.mid_point,
            'performance_samples': {
                'v1_samples': samples.v1_samples,
                'v2_samples': samples.v2_samples,
                'v1_range': samples.v1_range,
                'v2_range': samples.v2_range,
                'change_type': samples.performance_change_type
            }
        }

        # Execute bisect with mid_point logic
        try:
            result = self._execute_midpoint_bisect(task)
            if result:
                self.logger.info("Performance bisect completed successfully")
                self.logger.info(f"First bad commit: {result.get('first_bad_commit', 'N/A')}")
                return result
            else:
                self.logger.error("Performance bisect execution failed")
                return None

        except Exception as e:
            self.logger.exception(f"Performance bisect failed: {str(e)}")
            return None

    def complete_performance_bisect_workflow(self,
                                           v1_commit: str,
                                           v2_commit: str,
                                           metric: str,
                                           job_template: Dict[str, Any],
                                           result_root: str,
                                           sample_count: int = 3) -> Optional[Dict[str, Any]]:
        """
        Complete workflow: Check performance difference → Submit bisect

        Args:
            v1_commit: Earlier kernel version
            v2_commit: Later kernel version
            metric: Performance metric name
            job_template: Base job configuration
            result_root: Directory for results
            sample_count: Samples per version

        Returns:
            Complete bisect result or None if no bisectable change
        """
        self.logger.info("=== Starting Complete Performance Bisect Workflow ===")
        self.logger.info(f"Comparing {v1_commit[:8]} → {v2_commit[:8]} on metric '{metric}'")

        # Step 1: Check performance difference
        samples = self.check_kernel_performance_difference(
            v1_commit, v2_commit, metric, job_template, sample_count
        )

        if not samples:
            self.logger.info("No bisectable performance change detected")
            return {
                'status': 'no_change',
                'message': 'No bisectable performance difference found',
                'v1_commit': v1_commit,
                'v2_commit': v2_commit,
                'metric': metric
            }

        # Step 2: Execute performance bisect
        self.logger.info("=== Performance difference detected, starting bisect ===")
        result = self.submit_performance_bisect(samples, result_root)

        if result:
            # Add workflow metadata
            result.update({
                'workflow_type': 'complete_performance_bisect',
                'v1_commit': v1_commit,
                'v2_commit': v2_commit,
                'metric': metric,
                'samples_used': {
                    'v1_samples': samples.v1_samples,
                    'v2_samples': samples.v2_samples,
                    'mid_point': samples.mid_point,
                    'change_type': samples.performance_change_type
                }
            })

        return result

    def _collect_performance_samples(self,
                                   commit: str,
                                   metric: str,
                                   job_template: Dict[str, Any],
                                   sample_count: int,
                                   version_label: str) -> List[float]:
        """Collect performance samples for a specific commit"""
        samples = []

        for i in range(sample_count):
            self.logger.info(f"Collecting {version_label} sample {i+1}/{sample_count} for commit {commit[:8]}")

            # Create job with specific commit
            job = self._create_job_with_commit(job_template, commit)

            try:
                # Submit job and wait for completion
                job_id, _ = self.git_bisect.submit_job(job)
                job_stats, job_health = self.git_bisect._poll_job_stats(job_id)

                # Extract metric value
                if job_stats and metric in job_stats:
                    try:
                        value = float(job_stats[metric])
                        samples.append(value)
                        self.logger.info(f"{version_label} sample {i+1}: {value}")
                    except (ValueError, TypeError) as e:
                        self.logger.warning(f"Invalid metric value in {version_label} sample {i+1}: {e}")
                else:
                    self.logger.warning(f"Metric '{metric}' not found in {version_label} sample {i+1}")

            except Exception as e:
                self.logger.error(f"Failed to collect {version_label} sample {i+1}: {str(e)}")

        self.logger.info(f"Collected {len(samples)}/{sample_count} valid {version_label} samples")
        return samples

    def _create_job_with_commit(self, job_template: Dict[str, Any], commit: str) -> Dict[str, Any]:
        """Create job configuration with specific commit"""
        import copy
        job = copy.deepcopy(job_template)

        # Set commit based on job structure
        if 'ss' in job and 'linux' in job['ss']:
            job['ss']['linux']['commit'] = commit
        elif 'program' in job and 'makepkg' in job['program']:
            job['program']['makepkg']['commit'] = commit
        else:
            raise ValueError("Unsupported job template structure")

        return job

    def _execute_midpoint_bisect(self, task: Dict[str, Any]) -> Optional[Dict[str, Any]]:
        """Execute bisect using mid_point logic"""
        # Set up GitBisect with mid_point
        self.git_bisect.mid_point = task['mid_point']
        self.git_bisect.metric = task['bisect_metric']
        self.git_bisect.good_commit = task['good_commit']
        self.git_bisect.bad_commit = task['bad_commit']

        # Create a minimal bad_job for GitBisect compatibility
        minimal_job = {
            'suite': 'performance_bisect',
            'ss': {'linux': {'commit': task['bad_commit']}},
            'stats': {},
            'job_health': 'success'
        }
        self.git_bisect.bad_job = minimal_job
        self.git_bisect.bad_job_id = f"perf_bisect_{int(time.time())}"
        self.git_bisect.commit_field = 'ss.linux.commit'

        # Execute bisect
        try:
            # Set up repository - use external repo if available
            if hasattr(self, 'repo_dir'):
                self.git_bisect.repo_dir = getattr(self, 'repo_dir', None)

            self.git_bisect.set_work_dir()

            # Run git bisect with mid_point logic
            bisect_result = self._run_midpoint_git_bisect()

            if bisect_result:
                return self.git_bisect.analyse_result(bisect_result)
            else:
                return None

        except Exception as e:
            self.logger.exception(f"Mid-point bisect execution failed: {str(e)}")
            return None

    def _run_midpoint_git_bisect(self) -> Optional[str]:
        """Run git bisect with mid_point parameters"""
        self.logger.info("Starting git bisect with mid_point logic")
        self.logger.info(f"Good commit: {self.git_bisect.good_commit}")
        self.logger.info(f"Bad commit: {self.git_bisect.bad_commit}")
        self.logger.info(f"Mid-point: {self.git_bisect.mid_point}")

        try:
            # Start git bisect
            start_command = f"git -C {self.git_bisect.work_dir} bisect start {self.git_bisect.bad_commit} {self.git_bisect.good_commit}"
            start_result = subprocess.getoutput(start_command)

            if "Bisecting:" not in start_result:
                self.logger.error(f"Failed to start bisect: {start_result}")
                return None

            self.logger.info("Git bisect started successfully")

            # Run bisect with new mid_point script
            run_command = [
                "git", "-C", self.git_bisect.work_dir, "bisect", "run",
                BisectConfig.BISECT_MIDPOINT_SCRIPT,  # New script for mid_point logic
                str(self.git_bisect.bad_job_id),
                str(self.git_bisect.metric),
                str(self.git_bisect.temp_result_root),
                str(self.git_bisect.mid_point),  # Pass mid_point directly
                str(self.session_id),
            ]

            self.logger.info(f"Running bisect command: {' '.join(run_command)}")
            bisect_result = self.git_bisect.run_command_real_time(run_command)

            if bisect_result:
                self.logger.info("Git bisect completed")
                return bisect_result
            else:
                self.logger.error("Git bisect returned empty result")
                return None

        except Exception as e:
            self.logger.exception(f"Git bisect execution failed: {str(e)}")
            return None


def main():
    """
    Command line interface for performance bisect

    Usage:
        python performance_bisect.py <v1_commit> <v2_commit> <metric> <job_template_path> <result_root>
    """
    if len(sys.argv) < 6:
        print("Usage: python performance_bisect.py <v1_commit> <v2_commit> <metric> <job_template_path> <result_root>")
        print("Example: python performance_bisect.py abc123 def456 'boot_time' job_template.yaml /tmp/bisect_results")
        sys.exit(1)

    v1_commit = sys.argv[1]
    v2_commit = sys.argv[2]
    metric = sys.argv[3]
    job_template_path = sys.argv[4]
    result_root = sys.argv[5]

    # Load job template
    try:
        import yaml
        with open(job_template_path, 'r') as f:
            job_template = yaml.safe_load(f)
    except Exception as e:
        print(f"Failed to load job template: {e}")
        sys.exit(1)

    # Create result directory
    os.makedirs(result_root, exist_ok=True)

    # Initialize performance bisect
    perf_bisect = PerformanceBisect()

    try:
        # Run complete workflow
        result = perf_bisect.complete_performance_bisect_workflow(
            v1_commit=v1_commit,
            v2_commit=v2_commit,
            metric=metric,
            job_template=job_template,
            result_root=result_root
        )

        if result:
            # Save result
            result_file = os.path.join(result_root, 'performance_bisect_result.json')
            with open(result_file, 'w') as f:
                json.dump(result, f, indent=2)

            print(f"Performance bisect completed. Result saved to: {result_file}")

            if result.get('status') == 'no_change':
                print("No bisectable performance change detected.")
                sys.exit(0)
            else:
                print(f"First bad commit: {result.get('first_bad_commit', 'N/A')}")
                sys.exit(0)
        else:
            print("Performance bisect failed.")
            sys.exit(1)

    except Exception as e:
        print(f"Performance bisect error: {e}")
        traceback.print_exc()
        sys.exit(1)


if __name__ == "__main__":
    main()