import os
import pandas as pd
from collections import Counter
import subprocess
from tqdm import tqdm
from openpyxl import Workbook
from openpyxl.styles import PatternFill
import json
import multiprocessing
from functools import partial
import tempfile
import shutil
import argparse
import hashlib
import logging


# Define mappings for better readability
COMPILER_MAPPING = {
    0: "Clang",
    1: "GCC",
    # Add more if needed
}

FLAGS_MAPPING = {
    0: "O0",
    1: "O1",
    2: "O2",
    3: "O3",
    # Add more if needed
}

# First, add a new status mapping constant at the top of the file
FILE_STATUS_MAPPING = {
    'analyzed': 'Successfully Analyzed',
    'skipped': 'Skipped (System Library)',
    'failed': 'Analysis Failed',
}

def get_unique_file_id(file_path):
    """ Generate a unique identifier for a file that preserves directory information. """
    # Replace path separators with underscores and remove invalid characters
    unique_id = file_path.replace('/', '_').replace('\\', '_').replace(' ', '_')
    # Remove any leading/trailing underscores
    unique_id = unique_id.strip('_')
    # Ensure the ID isn't too long for a filename
    if len(unique_id) > 200:
        # If too long, use a hash of the path plus the basename
        path_hash = hashlib.md5(file_path.encode()).hexdigest()
        base_name = os.path.basename(file_path).replace(' ', '_')
        unique_id = f"{path_hash}_{base_name}"
    return unique_id

def find_so_files(directory):
    """ Find all .so files in the given directory and its subdirectories. """
    so_files = []
    for root, _, files in os.walk(directory):
        for file in files:
            if file.endswith('.so'):
                so_files.append(os.path.join(root, file))
    return so_files

def check_binary_integrity(binary_path) -> str:
    """ Check binary section headers and segments for executable code. """
    try:
        # First check section headers
        cmd_sections = ["readelf", "-S", binary_path]
        section_result = subprocess.run(cmd_sections, capture_output=True, text=True)

        # Then check program headers (segments)
        cmd_segments = ["readelf", "-l", binary_path]
        segment_result = subprocess.run(cmd_segments, capture_output=True, text=True)

        notes = []

        # Check for .text section
        if ".text" not in section_result.stdout:
            notes.append("No .text section found")

            # Look for executable LOAD segments instead
            if "LOAD" in segment_result.stdout and "R E" in segment_result.stdout:
                notes.append("Found executable LOAD segment")
            else:
                notes.append("No executable segments found")
        
        # Check for corrupted section headers
        if "^ELF" in section_result.stdout:
            notes.append("Corrupted section headers")
        
        if not notes:
            return "OK"
        
        return "; ".join(notes)

    except Exception as e:
        return f"Error checking binary: {str(e)}"


def run_analysis_with_detector(binary_file, flags_model, temp_dir):
    """ Run only the optimization flag detection using the optimization-detector.py tool. """
    # First check if we can analyze this binary
    integrity_check = check_binary_integrity(binary_file)

    # If no executable code found at all, skip analysis
    if "No executable segments found" in integrity_check:
        logging.info(f"Skipping {binary_file}: No executable code found")
        return None
    
    # Create a unique identifier for the file
    file_id = get_unique_file_id(binary_file)

    # Create temporary CSV filename for flags only
    temp_flags_csv = os.path.join(temp_dir, f"flags_{file_id}.csv")

    logging.debug(f"work directory: {os.getcwd()}")

    script_dir = os.path.dirname(os.path.abspath(__file__))
    script_path = os.path.join(script_dir, "optimization-detector.py")
    logging.debug(f"script_path: {script_path}")

    if not os.path.exists(script_path):
        logging.warning(f"{script_path} not exist.")
        if os.path.exists("optimization-detector.py"):
            script_path = "optimization-detector.py"
        else:
            logging.error(f"{script_path} not exist.")
            return None

    flags_cmd = [
        "python", script_path, "infer",
        "-m", flags_model,
        "-o", temp_flags_csv,
        binary_file
    ]

    try:
        logging.info(f"Running flags detection on {binary_file}")
        result = subprocess.run(flags_cmd, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
        if result.returncode != 0:
            logging.error(f"Error running flags detection on {binary_file}:\n STDOUT: {result.stdout}\n STDERR: {result.stderr}")
            return None

    except Exception as e:
        logging.error(f"Exception running flags detection on {binary_file}: {e}")

    # Read the results
    try:
        if os.path.exists(temp_flags_csv):
            flags_df = pd.read_csv(temp_flags_csv)
            return flags_df
        else:
            logging.info(f"Output file not found for {binary_file}")
            return None
    except Exception as e:
        logging.error(f"Error reading results for {binary_file}: {e}")
        return None

def process_binary(binary_file, flags_model, temp_dir, checkpoint_file):
    """ Process a single binary file and update the checkpoint. """
    try:
        # Run analysis
        flags_df = run_analysis_with_detector(binary_file, flags_model, temp_dir)

        # Update checkpoint
        with open(checkpoint_file, 'r') as f:
            checkpoint = json.load(f)

        checkpoint['analyzed_files'].append(binary_file)

        with open(checkpoint_file, 'w') as f:
            json.dump(checkpoint, f, indent=2)
    
        return binary_file, flags_df
    
    except Exception as e:
        logging.error(f"Error processing {binary_file}: {e}")
        return binary_file, None

def merge_chunk_results(df):
    """ Merge chunk-level predictions into file-level results with detailed distribution analysis. """
    results = {}

    for file, group in df.groupby('file'):
        # Get all predictions for this file
        predictions = group['prediction'].tolist()

        # Calculate distribution
        distribution = dict(Counter(predictions))

        # Calculate the most common prediction (for backward compatibility)
        most_common = Counter(predictions).most_common(1)[0][0]
        confidence = predictions.count(most_common) / len(predictions)

        # Calculate weighted optimization score (for flags)
        # Higher weights for higher optimization levels
        # Os (4) is treated similar to O2 (0.67) since it's a size-focused optimization
        if any(x in distribution for x in [0, 1, 2, 3, 4]):
            # This is likely flags data
            total_chunks = len(predictions)
            opt_score = (
                distribution.get(0, 0) * 0 +        # O0：no optimization
                distribution.get(1, 0) * 0.33 +     # O1: basic optimization
                distribution.get(2, 0) * 0.67 +     # O2: moderate optimization
                distribution.get(3, 0) * 1.0 +      # O3: full optimization
                distribution.get(4, 0) * 0.67       # Os: size optimization (similar to O2)
            ) / total_chunks

            # Determine optimization category
            if opt_score < 0.2:
                opt_category = "Unoptimized (O0 dominant)"
            elif opt_score < 0.4:
                opt_category = "Low Optimization (O1 dominant)"
            elif opt_score < 0.7:
                opt_category = "Medium Optimization (O2/Os dominant)"
            else:
                opt_category = "High Optimization (O3 dominant)"
        else:
            # This is likely compiler data
            opt_score = None
            opt_category = None

        results[file] = {
            'prediction': most_common,
            'confidence': confidence,
            'distribution': distribution,
            'opt_score': opt_score,
            'opt_category': opt_category,
            'total_chunks': len(predictions)
        }

    return results

def generate_excel_report(flags_results, file_metadata, all_input_files, output_file):
    """Generate a comprehensive Excel report with the analysis results."""
    wb = Workbook()

    # Summary sheet
    summary_sheet = wb.active
    summary_sheet.title = "Summary"

    # Add headers
    summary_sheet['A1'] = "Binary File"
    summary_sheet['B1'] = "Status"
    summary_sheet['C1'] = "Optimization Category"
    summary_sheet['D1'] = "Optimization Score"
    summary_sheet['E1'] = "O0 Chunks"
    summary_sheet['F1'] = "O1 Chunks"
    summary_sheet['G1'] = "O2 Chunks"
    summary_sheet['H1'] = "O3 Chunks"
    summary_sheet['I1'] = "Os Chunks"
    summary_sheet['J1'] = "Total Chunks"
    summary_sheet['K1'] = "File Size (bytes)"
    summary_sheet['L1'] = "Self Events"
    summary_sheet['M1'] = "Size Optimized"
    summary_sheet['N1'] = "Notes"

    # Add data
    row = 2
    for file in sorted(all_input_files):
        flags_result = flags_results.get(file, None)

        # Get file metadata
        file_size = file_metadata.get(file, {}).get('file_size', '')
        self_events = file_metadata.get(file, {}).get('self_events', '')

        if flags_result is None:
            status = FILE_STATUS_MAPPING['failed']
            opt_category = 'N/A'
            opt_score = 'N/A'
            distribution = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}
            total_chunks = 0
            size_optimized = 'N/A'
        else:
            status = FILE_STATUS_MAPPING['analyzed']
            opt_category = flags_result['opt_category']
            opt_score = flags_result['opt_score']
            distribution = flags_result['distribution']
            total_chunks = flags_result['total_chunks']

            # Calculate Os percentage and status
            os_chunks = distribution.get(4, 0)
            os_ratio = os_chunks / total_chunks if total_chunks > 0 else 0
            size_optimized = f"{'Yes' if os_ratio >= 0.5 else 'No'} ({os_ratio: .1%})"
        
        # Check binary integrity
        integrity_note = check_binary_integrity(file)

        # Write data to sheet
        summary_sheet[f'A{row}'] = file
        summary_sheet[f'B{row}'] = status
        summary_sheet[f'C{row}'] = opt_category if opt_category else "N/A"
        summary_sheet[f'D{row}'] = f"{opt_score: .2%}" if isinstance(opt_score, float) else opt_score
        summary_sheet[f'E{row}'] = distribution.get(0, 0)
        summary_sheet[f'F{row}'] = distribution.get(1, 0)
        summary_sheet[f'G{row}'] = distribution.get(2, 0)
        summary_sheet[f'H{row}'] = distribution.get(3, 0)
        summary_sheet[f'I{row}'] = distribution.get(4, 0)
        summary_sheet[f'J{row}'] = total_chunks
        summary_sheet[f'K{row}'] = file_size
        summary_sheet[f'L{row}'] = self_events
        summary_sheet[f'M{row}'] = size_optimized
        summary_sheet[f'N{row}'] = integrity_note

        # Colore-code the size optimization status
        if size_optimized.startswith("Yes"):
            summary_sheet[f'M{row}'].fill = PatternFill(start_color="CCFFCC", end_color="CCFFCC", fill_type="solid")

        row += 1
    # Adjust visualization sheet column widths
    for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']:
        max_length = 0
        for cell in summary_sheet[col]:
            if cell.value:
                max_length = max(max_length, len(str(cell.value)))
        summary_sheet.column_dimensions[col].width = max_length + 2

    # Update Visualization sheet
    viz_sheet = wb.create_sheet(title="Visualization")
    
    # Add headers including Os
    viz_sheet['A1'] = "Binary File"
    viz_sheet['B1'] = "Optimization Score"
    viz_sheet['C1'] = "O0 %"
    viz_sheet['D1'] = "O1 %"
    viz_sheet['E1'] = "O2 %"
    viz_sheet['F1'] = "O3 %"
    viz_sheet['G1'] = "Os %"
    viz_sheet['H1'] = "Category"
    viz_sheet['I1'] = "Size Optimized"
    viz_sheet['J1'] = "Total Chunks"

    row = 2
    for file in sorted(flags_results.keys(), key=lambda x: flags_results[x]['opt_score'] if flags_results[x]['opt_score'] is not None else -1):
        result = flags_results[file]
        distribution = result['distribution']
        total_chunks = result['total_chunks']

        # Calculate percentages including Os
        o0_pct = distribution.get(0, 0) / total_chunks if total_chunks > 0 else 0
        o1_pct = distribution.get(1, 0) / total_chunks if total_chunks > 0 else 0
        o2_pct = distribution.get(2, 0) / total_chunks if total_chunks > 0 else 0
        o3_pct = distribution.get(3, 0) / total_chunks if total_chunks > 0 else 0
        os_pct = distribution.get(4, 0) / total_chunks if total_chunks > 0 else 0

        size_optimized = f"{'Yes' if os_pct >= 0.5 else 'No'} ({os_pct:.1%})"

        viz_sheet[f'A{row}'] = file
        viz_sheet[f'B{row}'] = f"{result['opt_score']:.2%}" if result['opt_score'] is not None else 'N/A'
        viz_sheet[f'C{row}'] = f"{o0_pct:.1%}"
        viz_sheet[f'D{row}'] = f"{o1_pct:.1%}"
        viz_sheet[f'E{row}'] = f"{o2_pct:.1%}"
        viz_sheet[f'F{row}'] = f"{o3_pct:.1%}"
        viz_sheet[f'G{row}'] = f"{os_pct:.1%}"
        viz_sheet[f'H{row}'] = result['opt_category']
        viz_sheet[f'I{row}'] = size_optimized
        viz_sheet[f'J{row}'] = total_chunks

        # Color code size optimization status
        if size_optimized.startswith('Yes'):
            viz_sheet[f'I{row}'].fill = PatternFill(start_color="CCFFCC", end_color="CCFFCC", fill_type="solid")

        row += 1

    # Adjust visualization sheet column widths
    for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']:
        max_length = 0
        for cell in viz_sheet[col]:
            if cell.value:
                max_length = max(max_length, len(str(cell.value)))
        viz_sheet.column_dimensions[col].width = max_length + 2

    # Save the workbook
    wb.save(output_file)
    logging.info(f"Report saved to {output_file}")

def analyze_json_file(json_file, flags_model, output_file, checkpoint_file, parallel, num_workers):
    """Analyze binary files from a JSON list."""

    # Load the JSON file containing matched files
    logging.info(f"Loading matched files from {json_file}...")
    with open(json_file, 'r') as f:
        matched_files_data = json.load(f)

    # Extract the file paths and create metadata dictinary
    binary_files = []
    file_metadata = {}
    skipped_files = 0

    for item in matched_files_data:
        if 'matched_file' in item:
            file_path = item['matched_file']

            if "libc++_shared.so" in file_path or "libmmkv.so" in file_path:
                skipped_files += 1
                continue

            binary_files.append(file_path)

            # Get file size
            try:
                file_size = os.path.getsize(file_path)
            except (FileNotFoundError, OSError):
                file_size = None
            
            # Store metadata
            file_metadata[file_path] = {
                'file_size': file_size,
                'self_events': item.get('self_events', '')
            }

    logging.info(f"Found {len(binary_files)} binary files to analyze (skipped {skipped_files} libc++_shared.so files)")
    return analyze_binary_files(binary_files, file_metadata, flags_model, output_file, checkpoint_file, parallel, num_workers)


def analyze_directory(directory, flags_model, output_file, checkpoint_file, parallel, num_workers):
    """Analyze all .so files in a directory."""

    # Find all .so files in the directory
    logging.info(f"Finding .so files in {directory}...")
    binary_files = find_so_files(directory)

    # Skip files containing libc++_shared.so or libmmkv.so
    filtered_files = []
    skipped_files = 0
    file_metadata = {}

    for file_path in binary_files:
        if "libc++_shared.so" in file_path or "libmmkv.so" in file_path:
            skipped_files += 1
            continue

        filtered_files.append(file_path)

        # Get file size
        try:
            file_size = os.path.getsize(file_path)
        except (FileNotFoundError, OSError):
            file_size = None

        # Store metadata
        file_metadata[file_path] = {
            'file_size': file_size,
            "self_events": ''   # No self_events for directory mode
        }

    logging.info(f"Found {len(filtered_files)} binary files to analyze (skipped {skipped_files} libc++_shared.so files)")
    return analyze_binary_files(filtered_files, file_metadata, flags_model, output_file, checkpoint_file, parallel, num_workers)

def analyze_binary_files(binary_files, file_metadata, flags_model, output_file, checkpoint_file, parallel, num_workers):
    """ Common function to analyze binary files. """

    # Check if we have a checkpoint to resume from
    analyzed_files = []
    if os.path.exists(checkpoint_file):
        try:
            with open(checkpoint_file, 'r') as f:
                checkpoint = json.load(f)
                analyzed_files = checkpoint.get('analyzed_files', [])
                logging.info(f"Resuming analysis. {len(analyzed_files)} files already analyzed.")
        except json.JSONDecodeError:
            logging.error("Error reading checkpoint file. Creating a new one.")
            analyzed_files = []
            with open(checkpoint_file, 'w') as f:
                json.dump({'analyzed_files': []}, f)
    else:
        # Create a new checkpoint file
        with open(checkpoint_file, 'w') as f:
            json.dump({'analyzed_files': []}, f)

    # Filter out already analyzed files
    remaining_files = [f for f in binary_files if f not in analyzed_files]
    logging.info(f"{len(remaining_files)} files remaining to analyze")

    # Create a directory for intermediate results
    results_dir = "matched_files_results"
    os.makedirs(results_dir, exist_ok=True)

    # Process files
    if remaining_files:
        # Create temporary directory for CSV outputs
        temp_dir = tempfile.mkdtemp()

        # Check if model exists
        if not os.path.exists(flags_model):
            logging.error(f"Flags model not found: {flags_model}")
            return
        
        if parallel and len(remaining_files) > 1:
            # Determine number of workers
            if num_workers is None:
                num_workers = max(1, multiprocessing.cpu_count() - 1)
            
            logging.info(f"Using {num_workers} parallel workers")

            # Create a partial function with fixed arguments
            process_func = partial(process_binary, flags_model=flags_model, temp_dir=temp_dir, checkpoint_file=checkpoint_file)

            # Process files in parallel
            with multiprocessing.Pool(num_workers) as pool:
                results = list(tqdm(pool.imap(process_func, remaining_files),
                                total=len(remaining_files),
                                desc="Analyzing binaries"))
            
            # Collect results
            for binary_file, flags_df in results:
                if flags_df is not None:
                    # Save intermediate results
                    file_id = get_unique_file_id(binary_file)
                    flags_file = os.path.join(results_dir, f"flags_{file_id}.csv")
                    flags_df.to_csv(flags_file, index=False)
        else:
            # Process files sequentially
            for binary_file in tqdm(remaining_files, desc="Analyzing binaries"):
                # Run analysis
                flags_df = run_analysis_with_detector(binary_file, flags_model, temp_dir)

                if flags_df is not None:
                    # Save intermediate results using unique file ID
                    file_id = get_unique_file_id(binary_file)
                    flags_file = os.path.join(results_dir, f"flags_{file_id}.csv")
                    flags_df.to_csv(flags_file, index=False)

                    # Update checkpoint
                    with open(checkpoint_file, 'r') as f:
                        checkpoint = json.load(f)
                    
                    checkpoint['analyzed_files'].append(binary_file)

                    with open(checkpoint_file, 'w') as f:
                        json.dump(checkpoint, f, indent=2)

        # Clean up temporary directory
        shutil.rmtree(temp_dir)

    # Load ALL results from intermediate files
    logging.info("Loading all analysis results...")
    flags_results = {}

    # Count how many files have results
    files_with_results = 0
    files_missing_results = 0

    # First, try to load results for all files in binary_files
    for binary_file in tqdm(binary_files, desc="Loading results"):
        file_id = get_unique_file_id(binary_file)
        flags_file = os.path.join(results_dir, f"flags_{file_id}.csv")

        if os.path.exists(flags_file):
            try:
                flags_df = pd.read_csv(flags_file)
                
                # Check if dataframe has data
                if len(flags_df) > 0:
                    # Process the dataframe to get file-level results
                    file_flags_results = merge_chunk_results(flags_df)

                    # Check if we got results
                    if file_flags_results:
                        # Merge into overall results
                        flags_results.update(file_flags_results)
                        files_with_results += 1
                    else:
                        logging.warning(f"No results extracted from {binary_file} dataframe")
                        files_missing_results += 1
                else:
                    logging.warning(f"Empty dataframe for {binary_file}")
                    files_missing_results += 1
            except Exception as e:
                logging.error(f"Error loading results for {binary_file}: {e}")
                files_missing_results += 1
        else:
            logging.warning(f"Missing flags file: {flags_file}")
            files_missing_results += 1

    logging.info(f"Files with results: {files_with_results}")
    logging.info(f"Files missing results: {files_missing_results}")

    # Generate report with all results
    if binary_files:    # Changed condition to check if we have any input files
        logging.info(f"Generating report with {len(binary_files)} files...")
        generate_excel_report(flags_results, file_metadata, binary_files, output_file)
    else:
        logging.info("No binary files were found.")

    return files_with_results, files_missing_results

def main():
    parser = argparse.ArgumentParser(description="Analyze binary files for optimization flags")

    # Create a mutually exclusive group for input type
    input_group = parser.add_mutually_exclusive_group(required=True)
    input_group.add_argument("--json", help="JSON file containing matched binary files to analyze")
    input_group.add_argument("--dir", help="Directory containing binary files to analyze")

    parser.add_argument("--flags-model", default="models/aarch64-flag-lstm.h5", 
                        help="Path to optimization flags detection model (default: models/aarch64-flag-lstm.h5)")
    parser.add_argument("--output", "-o", default="binary_analysis_report.xlsx",
                        help="Output Excel file path(default: binary_analysis_report.xlsx)")
    parser.add_argument("--checkpoint", default="analysis_checkpoint.json",
                        help="Checkpoint file for resumable analysis (default: analysis_checkpoint.json)")
    parser.add_argument("--parallel", action="store_true", help="Enable parallel processing")
    parser.add_argument("--workers", type=int, default=None, help="Number of parallel workers (default: CPU count - 1)")

    args = parser.parse_args()

    # Determine which mode to run in
    if args.json:
        logging.info(f"Running in JSON mode with file: {args.json}")
        analyze_json_file(args.json, args.flags_model, args.output, args.checkpoint, args.parallel, args.workers)
    elif args.dir:
        logging.info(f"Running in directory mode with path: {args.dir}")
        analyze_directory(args.dir, args.flags_model, args.output, args.checkpoint, args.parallel, args.workers)

if __name__ == "__main__":
    main()