#!/usr/bin/env python3
# SPDX-License-Identifier: MulanPSL-2.0+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.

"""
File Correlation Analysis Tool

Analyze the correlation between error_id and first_bad_commit for successful bisect tasks.
This tool queries bisect tasks with status='success' from database, groups them by git repository,
and uses py_bisect.py's _verify_errids_with_git method to verify the correlation.

Usage:
    python3 analyze_file_correlation.py [--hours 168] [--limit 1000] [--output report.json]
"""

import os
import sys
import json
import argparse
import logging
from datetime import datetime, timedelta
from collections import defaultdict

# Add bisect module path
sys.path.append(os.path.join(os.environ['LKP_SRC'], 'programs/bisect-py'))

from bisect_log_config import logger
from manticore_simple import ManticoreClient
from py_bisect import GitBisect

class FileCorrelationAnalyzer:
    """Analyze file correlation between error_id and first_bad_commit"""

    def __init__(self, db_config=None):
        """Initialize analyzer"""
        db_config = db_config or {}
        host = db_config.get('host', os.getenv('MANTICORE_HOST', 'localhost'))
        port = int(db_config.get('port', os.getenv('MANTICORE_PORT', '9306')))
        if port == 9306:
            port = 9308  # Use HTTP port

        self.client = ManticoreClient(host=host, port=port)
        self.bisect_db = 'bisect'
        logger.info(f"Manticore HTTP client initialized: {host}:{port}")

        # Initialize GitBisect for verification
        self.gb = GitBisect()

    def query_successful_bisect_tasks(self, hours=168, limit=1000):
        """
        Query successful bisect tasks from database

        :param hours: Query time range (based on updated_at field)
        :param limit: Maximum number of results
        :return: List of successful bisect tasks
        """
        try:
            # Calculate time range
            cutoff_time = datetime.now() - timedelta(hours=hours)
            unix_cutoff = int(cutoff_time.timestamp())

            # Build query - only get success status tasks
            query = {
                "bool": {
                    "must": [
                        {"equals": {"bisect_status": "success"}},
                        {"range": {"updated_at": {"gte": unix_cutoff}}}
                    ]
                }
            }

            sort = [{"updated_at": {"order": "desc"}}]

            logger.info(f"Querying successful bisect tasks (last {hours} hours)...")
            logger.debug(f"Query: {json.dumps(query, indent=2)}")

            results = self.client.search(
                index=self.bisect_db,
                query=query,
                limit=limit,
                sort=sort,
                options={'max_matches': limit}
            )

            if results is None:
                logger.error("Query failed, client returned None")
                return []

            logger.info(f"Found {len(results)} successful bisect tasks")
            return results

        except Exception as err:
            logger.error(f"Query failed: {err}")
            import traceback
            traceback.print_exc()
            return []

    def group_by_git_repo(self, tasks):
        """
        Group tasks by git repository URL

        :param tasks: List of bisect tasks
        :return: Dictionary mapping git_url to list of tasks
        """
        grouped = defaultdict(list)

        for task in tasks:
            git_url = task.get('git_url', 'unknown')
            if git_url and git_url != 'unknown':
                grouped[git_url].append(task)

        logger.info(f"Grouped tasks into {len(grouped)} git repositories")
        for git_url, task_list in grouped.items():
            logger.info(f"  {git_url}: {len(task_list)} tasks")

        return dict(grouped)

    def extract_error_file_path(self, error_id):
        """
        Extract file path from error_id

        Examples:
        - "makepkg.eid.drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c:error:..."
          -> "drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c"
        - "boot.error.boot_failures" -> None (no file path)

        :param error_id: Error ID string
        :return: File path or None
        """
        if not error_id:
            return None

        # For makepkg.eid.* format
        if error_id.startswith('makepkg.eid.'):
            # Remove prefix
            rest = error_id[len('makepkg.eid.'):]
            # Extract file path before first colon
            if ':' in rest:
                file_path = rest.split(':')[0]
                # Validate it looks like a file path
                if '/' in file_path and ('.' in file_path or file_path.endswith('.c') or file_path.endswith('.h')):
                    return file_path

        # For other error formats, try to find file patterns
        # Look for patterns like "path/to/file.c" or "path/to/file.h"
        import re
        # Match file paths with extensions
        match = re.search(r'([a-zA-Z0-9_\-./]+\.[ch])\b', error_id)
        if match:
            potential_path = match.group(1)
            # Ensure it has directory separators
            if '/' in potential_path:
                return potential_path

        return None

    def analyze_repository(self, git_url, tasks, report_output=None):
        """
        Analyze tasks for a specific git repository

        :param git_url: Git repository URL
        :param tasks: List of tasks for this repository
        :param report_output: Optional file handle to write detailed report
        :return: Analysis results dictionary
        """
        logger.info(f"\n{'='*80}")
        logger.info(f"Analyzing repository: {git_url}")
        logger.info(f"Total tasks: {len(tasks)}")
        logger.info(f"{'='*80}")

        results = {
            'git_url': git_url,
            'total_tasks': len(tasks),
            'tasks_with_file_path': 0,
            'tasks_without_file_path': 0,
            'verified_tasks': [],
            'correlation_stats': {
                'has_correlation': 0,
                'no_correlation': 0,
                'verification_failed': 0
            }
        }

        # Clone repository for verification
        try:
            repo_dir = self.gb.clone_repo(git_url, persistent=True)
            if not repo_dir:
                logger.error(f"Failed to clone repository: {git_url}")
                return results

            logger.info(f"Repository cloned to: {repo_dir}")
        except Exception as e:
            logger.error(f"Failed to clone repository: {str(e)}")
            return results

        # Analyze each task
        for idx, task in enumerate(tasks, 1):
            task_id = task.get('id', 'unknown')
            error_id = task.get('error_id', '')
            first_bad_commit = task.get('first_bad_commit', '')

            logger.info(f"\n[{idx}/{len(tasks)}] Analyzing task {task_id}")
            logger.info(f"  error_id: {error_id}")
            logger.info(f"  first_bad_commit: {first_bad_commit}")

            # Extract file path from error_id
            file_path = self.extract_error_file_path(error_id)

            if not file_path:
                logger.info(f"  No file path found in error_id")
                results['tasks_without_file_path'] += 1

                verification_result = {
                    'task_id': task_id,
                    'error_id': error_id,
                    'first_bad_commit': first_bad_commit,
                    'file_path': None,
                    'has_correlation': None,
                    'verification_status': 'no_file_path'
                }
                results['verified_tasks'].append(verification_result)
                continue

            logger.info(f"  Extracted file path: {file_path}")
            results['tasks_with_file_path'] += 1

            # Verify correlation by checking if file was modified in first_bad_commit
            try:
                # Set work_dir on GitBisect instance for git operations
                self.gb.work_dir = repo_dir

                # Check if the file was modified in first_bad_commit
                modified_in_commit = self.gb._check_file_modified_in_commit(
                    first_bad_commit, file_path
                )

                if modified_in_commit is None:
                    # Git operation failed
                    logger.warning(f"  ! Failed to check if {file_path} was modified in {first_bad_commit[:12]}")
                    results['correlation_stats']['verification_failed'] += 1
                    verification_status = 'check_failed'
                    has_correlation = None
                elif modified_in_commit:
                    # File was modified in first_bad_commit
                    logger.info(f"  ✓ Correlation VERIFIED: {file_path} was modified in {first_bad_commit[:12]}")
                    results['correlation_stats']['has_correlation'] += 1
                    verification_status = 'verified'
                    has_correlation = True
                else:
                    # File was NOT modified in first_bad_commit
                    logger.info(f"  ✗ No correlation: {file_path} was NOT modified in {first_bad_commit[:12]}")
                    results['correlation_stats']['no_correlation'] += 1
                    verification_status = 'no_correlation'
                    has_correlation = False

                verification_result = {
                    'task_id': task_id,
                    'error_id': error_id,
                    'first_bad_commit': first_bad_commit,
                    'file_path': file_path,
                    'has_correlation': has_correlation,
                    'verification_status': verification_status
                }

            except Exception as e:
                logger.warning(f"  ! Verification failed: {str(e)}")
                results['correlation_stats']['verification_failed'] += 1

                verification_result = {
                    'task_id': task_id,
                    'error_id': error_id,
                    'first_bad_commit': first_bad_commit,
                    'file_path': file_path,
                    'has_correlation': None,
                    'verification_status': 'verification_failed',
                    'error': str(e)
                }

            results['verified_tasks'].append(verification_result)

            # Write to detailed report if provided
            if report_output:
                report_output.write(json.dumps(verification_result, indent=2) + '\n')

        # Print summary for this repository
        logger.info(f"\n{'='*80}")
        logger.info(f"Repository Analysis Summary: {git_url}")
        logger.info(f"{'='*80}")
        logger.info(f"Total tasks: {results['total_tasks']}")
        logger.info(f"Tasks with file path: {results['tasks_with_file_path']}")
        logger.info(f"Tasks without file path: {results['tasks_without_file_path']}")
        logger.info(f"\nCorrelation Statistics:")
        logger.info(f"  Has correlation: {results['correlation_stats']['has_correlation']}")
        logger.info(f"  No correlation: {results['correlation_stats']['no_correlation']}")
        logger.info(f"  Verification failed: {results['correlation_stats']['verification_failed']}")

        if results['tasks_with_file_path'] > 0:
            correlation_rate = results['correlation_stats']['has_correlation'] / results['tasks_with_file_path'] * 100
            logger.info(f"\nCorrelation Rate: {correlation_rate:.1f}%")

        return results

    def generate_summary_report(self, all_results):
        """
        Generate summary report for all repositories

        :param all_results: List of analysis results for each repository
        :return: Summary dictionary
        """
        summary = {
            'total_repositories': len(all_results),
            'total_tasks': 0,
            'total_tasks_with_file_path': 0,
            'total_has_correlation': 0,
            'total_no_correlation': 0,
            'total_verification_failed': 0,
            'repository_summaries': []
        }

        for result in all_results:
            summary['total_tasks'] += result['total_tasks']
            summary['total_tasks_with_file_path'] += result['tasks_with_file_path']
            summary['total_has_correlation'] += result['correlation_stats']['has_correlation']
            summary['total_no_correlation'] += result['correlation_stats']['no_correlation']
            summary['total_verification_failed'] += result['correlation_stats']['verification_failed']

            repo_summary = {
                'git_url': result['git_url'],
                'total_tasks': result['total_tasks'],
                'tasks_with_file_path': result['tasks_with_file_path'],
                'has_correlation': result['correlation_stats']['has_correlation'],
                'no_correlation': result['correlation_stats']['no_correlation'],
                'verification_failed': result['correlation_stats']['verification_failed']
            }

            if result['tasks_with_file_path'] > 0:
                repo_summary['correlation_rate'] = (
                    result['correlation_stats']['has_correlation'] / result['tasks_with_file_path'] * 100
                )
            else:
                repo_summary['correlation_rate'] = 0.0

            summary['repository_summaries'].append(repo_summary)

        # Calculate overall correlation rate
        if summary['total_tasks_with_file_path'] > 0:
            summary['overall_correlation_rate'] = (
                summary['total_has_correlation'] / summary['total_tasks_with_file_path'] * 100
            )
        else:
            summary['overall_correlation_rate'] = 0.0

        return summary

    def print_summary_report(self, summary):
        """Print summary report in human-readable format"""
        logger.info(f"\n{'='*80}")
        logger.info("OVERALL SUMMARY REPORT")
        logger.info(f"{'='*80}")
        logger.info(f"Total Repositories Analyzed: {summary['total_repositories']}")
        logger.info(f"Total Bisect Tasks: {summary['total_tasks']}")
        logger.info(f"Tasks with File Path: {summary['total_tasks_with_file_path']}")
        logger.info(f"Tasks without File Path: {summary['total_tasks'] - summary['total_tasks_with_file_path']}")
        logger.info(f"\nOverall Correlation Statistics:")
        logger.info(f"  Has Correlation: {summary['total_has_correlation']}")
        logger.info(f"  No Correlation: {summary['total_no_correlation']}")
        logger.info(f"  Verification Failed: {summary['total_verification_failed']}")
        logger.info(f"\nOverall Correlation Rate: {summary['overall_correlation_rate']:.1f}%")

        logger.info(f"\n{'='*80}")
        logger.info("PER-REPOSITORY SUMMARY")
        logger.info(f"{'='*80}")

        for repo in summary['repository_summaries']:
            logger.info(f"\nRepository: {repo['git_url']}")
            logger.info(f"  Tasks: {repo['total_tasks']}")
            logger.info(f"  With File Path: {repo['tasks_with_file_path']}")
            logger.info(f"  Has Correlation: {repo['has_correlation']}")
            logger.info(f"  No Correlation: {repo['no_correlation']}")
            logger.info(f"  Verification Failed: {repo['verification_failed']}")
            logger.info(f"  Correlation Rate: {repo['correlation_rate']:.1f}%")


def main():
    parser = argparse.ArgumentParser(
        description='File Correlation Analysis Tool for Bisect Tasks',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )

    parser.add_argument(
        '--hours',
        type=int,
        default=168,
        help='Query tasks within the last N hours (default: 168 = 1 week)'
    )

    parser.add_argument(
        '--limit',
        type=int,
        default=1000,
        help='Maximum number of tasks to query'
    )

    parser.add_argument(
        '--output',
        help='Output JSON file for detailed results'
    )

    parser.add_argument(
        '--summary-only',
        action='store_true',
        help='Only display summary, skip detailed per-task output'
    )

    parser.add_argument(
        '--debug',
        action='store_true',
        help='Enable debug logging'
    )

    args = parser.parse_args()

    if args.debug:
        logger.setLevel(logging.DEBUG)
        logger.debug("Debug mode enabled")

    # Create analyzer
    analyzer = FileCorrelationAnalyzer()

    # Query successful bisect tasks
    tasks = analyzer.query_successful_bisect_tasks(hours=args.hours, limit=args.limit)

    if not tasks:
        logger.warning("No successful bisect tasks found")
        return 0

    # Group by git repository
    grouped_tasks = analyzer.group_by_git_repo(tasks)

    if not grouped_tasks:
        logger.warning("No tasks with valid git_url found")
        return 0

    # Analyze each repository
    all_results = []
    detailed_report = None

    if args.output:
        detailed_report = open(args.output, 'w')
        logger.info(f"Writing detailed results to: {args.output}")

    try:
        for git_url, repo_tasks in grouped_tasks.items():
            result = analyzer.analyze_repository(git_url, repo_tasks, detailed_report)
            all_results.append(result)
    finally:
        if detailed_report:
            detailed_report.close()

    # Generate and print summary
    summary = analyzer.generate_summary_report(all_results)
    analyzer.print_summary_report(summary)

    # Save summary to file
    if args.output:
        summary_file = args.output.replace('.json', '_summary.json')
        with open(summary_file, 'w') as f:
            json.dump(summary, f, indent=2)
        logger.info(f"\nSummary report saved to: {summary_file}")

    return 0


if __name__ == "__main__":
    try:
        exit_code = main()
        sys.exit(exit_code)
    except KeyboardInterrupt:
        logger.info("\nInterrupted by user")
        sys.exit(1)
    except Exception as e:
        logger.exception("Analysis failed")
        sys.exit(1)
