#!/usr/bin/env python3
"""
Comprehensive CUDA vs HIP Performance Analysis

Analyzes test results from output/cuda/ and output/hip/ directories to provide
detailed comparisons and answer the core research question about branching overhead.
"""

import os
import re
import json
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from dataclasses import dataclass
from typing import Dict, List, Tuple
import seaborn as sns

@dataclass
class TestResult:
    platform: str  # 'cuda' or 'hip'
    mode: str      # 'sequential', 'multistream', 'overlapped', 'masquerading'
    array_size: int
    task_count: int
    block_size: int
    avg_time: float
    runs: List[float]

def parse_log_file(filepath: Path) -> TestResult:
    """Parse a single log file and extract performance data."""
    try:
        # Parse filename: platform_mode_aARRAY_tTASKS_bBLOCK.log
        filename = filepath.stem
        parts = filename.split('_')
        
        if len(parts) < 5:
            return None
            
        platform = parts[0]  # cuda, hip, or dcu
        # Normalize platform name: treat 'dcu' as 'hip' for analysis
        if platform == 'dcu':
            platform = 'hip'
        mode = parts[1]      # sequential, multistream, overlapped, masquerading
        
        # Extract numeric values
        array_size = int(parts[2][1:])  # Remove 'a' prefix
        task_count = int(parts[3][1:])  # Remove 't' prefix  
        block_size = int(parts[4][1:])  # Remove 'b' prefix
        
        # Read file content
        with open(filepath, 'r') as f:
            content = f.read()
        
        # Extract timing data
        times = []
        avg_time = None
        
        # Look for individual run times
        run_pattern = r'Run \d+.*?(\d+\.?\d*)\s*ms'
        run_matches = re.findall(run_pattern, content, re.DOTALL)
        times = [float(t) for t in run_matches]
        
        # Look for average time
        avg_patterns = [
            r'Average.*?(\d+\.?\d*)\s*ms',
            r'Sequential.*?(\d+\.?\d*)\s*ms',
            r'Multi-stream.*?(\d+\.?\d*)\s*ms',
            r'Overlapped.*?(\d+\.?\d*)\s*ms',
            r'Masquerading.*?(\d+\.?\d*)\s*ms'
        ]
        
        for pattern in avg_patterns:
            match = re.search(pattern, content, re.IGNORECASE)
            if match:
                avg_time = float(match.group(1))
                break
        
        if avg_time is None and times:
            avg_time = sum(times) / len(times)
        elif avg_time is None:
            return None
            
        return TestResult(
            platform=platform,
            mode=mode,
            array_size=array_size,
            task_count=task_count,
            block_size=block_size,
            avg_time=avg_time,
            runs=times
        )
        
    except Exception as e:
        print(f"Error parsing {filepath}: {e}")
        return None

def load_test_results() -> List[TestResult]:
    """Load all test results from output directories."""
    results = []
    
    # Load CUDA results
    cuda_dir = Path("output/cuda")
    if cuda_dir.exists():
        for log_file in cuda_dir.glob("*.log"):
            result = parse_log_file(log_file)
            if result:
                results.append(result)
    
    # Load HIP results  
    hip_dir = Path("output/hip")
    if hip_dir.exists():
        for log_file in hip_dir.glob("*.log"):
            result = parse_log_file(log_file)
            if result:
                results.append(result)
    
    print(f"Loaded {len(results)} test results")
    return results

def calculate_metrics(results: List[TestResult]) -> Dict:
    """Calculate comprehensive performance metrics."""
    metrics = {
        'by_platform': {},
        'by_mode': {},
        'by_config': {},
        'cuda_vs_hip': {},
        'branching_analysis': {}
    }
    
    # Group results
    cuda_results = [r for r in results if r.platform == 'cuda']
    hip_results = [r for r in results if r.platform == 'hip']
    
    # Platform analysis
    for platform, platform_results in [('cuda', cuda_results), ('hip', hip_results)]:
        if not platform_results:
            continue
            
        platform_metrics = {}
        
        # Group by mode
        for mode in ['sequential', 'multistream', 'overlapped', 'masquerading']:
            mode_results = [r for r in platform_results if r.mode == mode]
            if mode_results:
                times = [r.avg_time for r in mode_results]
                platform_metrics[mode] = {
                    'avg_time': np.mean(times),
                    'std_time': np.std(times),
                    'min_time': np.min(times),
                    'max_time': np.max(times),
                    'count': len(times)
                }
        
        metrics['by_platform'][platform] = platform_metrics
    
    # Direct CUDA vs HIP comparison
    comparison_metrics = {}
    
    for mode in ['sequential', 'multistream', 'overlapped', 'masquerading']:
        cuda_mode = [r for r in cuda_results if r.mode == mode]
        hip_mode = [r for r in hip_results if r.mode == mode]
        
        if cuda_mode and hip_mode:
            cuda_avg = np.mean([r.avg_time for r in cuda_mode])
            hip_avg = np.mean([r.avg_time for r in hip_mode])
            
            comparison_metrics[mode] = {
                'cuda_avg': cuda_avg,
                'hip_avg': hip_avg,
                'cuda_faster_by': hip_avg / cuda_avg if cuda_avg > 0 else 0,
                'hip_faster_by': cuda_avg / hip_avg if hip_avg > 0 else 0
            }
    
    metrics['cuda_vs_hip'] = comparison_metrics
    
    # Branching overhead analysis (key research question)
    branching_analysis = analyze_branching_overhead(results)
    metrics['branching_analysis'] = branching_analysis
    
    return metrics

def analyze_branching_overhead(results: List[TestResult]) -> Dict:
    """Analyze branching overhead for single-task masquerading vs multi-stream."""
    analysis = {}
    
    for platform in ['cuda', 'hip']:
        platform_results = [r for r in results if r.platform == platform]
        
        # Compare masquerading vs multistream
        masq_results = [r for r in platform_results if r.mode == 'masquerading']
        multi_results = [r for r in platform_results if r.mode == 'multistream']
        
        platform_analysis = {
            'task_count_scaling': {},
            'overhead_by_tasks': {}
        }
        
        # Analyze scaling with task count
        for task_count in sorted(set(r.task_count for r in platform_results)):
            masq_tasks = [r for r in masq_results if r.task_count == task_count]
            multi_tasks = [r for r in multi_results if r.task_count == task_count]
            
            if masq_tasks and multi_tasks:
                masq_avg = np.mean([r.avg_time for r in masq_tasks])
                multi_avg = np.mean([r.avg_time for r in multi_tasks])
                
                overhead_pct = ((masq_avg - multi_avg) / multi_avg * 100) if multi_avg > 0 else 0
                
                platform_analysis['task_count_scaling'][task_count] = {
                    'masquerading_avg': masq_avg,
                    'multistream_avg': multi_avg,
                    'overhead_percent': overhead_pct,
                    'masq_slower_by': masq_avg / multi_avg if multi_avg > 0 else 0
                }
        
        analysis[platform] = platform_analysis
    
    return analysis

def create_visualizations(results: List[TestResult], metrics: Dict):
    """Create comprehensive visualizations."""
    
    # Set up the plotting style
    plt.style.use('seaborn-v0_8')
    sns.set_palette("husl")
    
    # Create figure with more subplots for new analyses
    fig = plt.figure(figsize=(24, 20))
    
    # 1. CUDA vs HIP Performance by Mode
    ax1 = plt.subplot(3, 3, 1)
    modes = ['sequential', 'multistream', 'overlapped', 'masquerading']
    cuda_times = []
    hip_times = []
    
    for mode in modes:
        cuda_mode = [r for r in results if r.platform == 'cuda' and r.mode == mode]
        hip_mode = [r for r in results if r.platform == 'hip' and r.mode == mode]
        
        cuda_avg = np.mean([r.avg_time for r in cuda_mode]) if cuda_mode else 0
        hip_avg = np.mean([r.avg_time for r in hip_mode]) if hip_mode else 0
        
        cuda_times.append(cuda_avg)
        hip_times.append(hip_avg)
    
    x = np.arange(len(modes))
    width = 0.35
    
    ax1.bar(x - width/2, cuda_times, width, label='CUDA', alpha=0.8)
    ax1.bar(x + width/2, hip_times, width, label='HIP', alpha=0.8)
    ax1.set_xlabel('Test Mode')
    ax1.set_ylabel('Average Execution Time (ms)')
    ax1.set_title('CUDA vs HIP Performance by Mode')
    ax1.set_xticks(x)
    ax1.set_xticklabels(modes, rotation=45)
    ax1.legend()
    ax1.grid(True, alpha=0.3)
    
    # 2. Branching Overhead Analysis
    ax2 = plt.subplot(3, 3, 2)
    
    branching = metrics['branching_analysis']
    
    for platform in ['cuda', 'hip']:
        if platform in branching:
            task_counts = []
            overhead_pcts = []
            
            for task_count, data in branching[platform]['task_count_scaling'].items():
                task_counts.append(task_count)
                overhead_pcts.append(data['overhead_percent'])
            
            if task_counts:
                ax2.plot(task_counts, overhead_pcts, marker='o', linewidth=2, 
                        label=f'{platform.upper()} Branching Overhead', markersize=8)
    
    ax2.set_xlabel('Number of Sub-tasks')
    ax2.set_ylabel('Masquerading Overhead (%)')
    ax2.set_title('Branching Overhead: Masquerading vs Multi-stream')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    
    # 3. Speedup Comparison
    ax3 = plt.subplot(3, 3, 3)
    
    for platform in ['cuda', 'hip']:
        platform_results = [r for r in results if r.platform == platform]
        sequential = [r for r in platform_results if r.mode == 'sequential']
        multistream = [r for r in platform_results if r.mode == 'multistream']
        
        if sequential and multistream:
            seq_avg = np.mean([r.avg_time for r in sequential])
            multi_avg = np.mean([r.avg_time for r in multistream])
            speedup = seq_avg / multi_avg if multi_avg > 0 else 0
            
            ax3.bar(platform.upper(), speedup, alpha=0.8, 
                   color='skyblue' if platform == 'cuda' else 'lightcoral')
    
    ax3.set_ylabel('Speedup (Sequential / Multi-stream)')
    ax3.set_title('Stream Speedup Comparison')
    ax3.grid(True, alpha=0.3)
    
    # 4. Array Size Scaling
    ax4 = plt.subplot(3, 3, 4)
    
    for mode in ['multistream', 'masquerading']:
        for platform in ['cuda', 'hip']:
            platform_mode = [r for r in results if r.platform == platform and r.mode == mode]
            
            array_sizes = []
            times = []
            
            for size in sorted(set(r.array_size for r in platform_mode)):
                size_results = [r for r in platform_mode if r.array_size == size]
                if size_results:
                    array_sizes.append(size)
                    times.append(np.mean([r.avg_time for r in size_results]))
            
            if array_sizes:
                linestyle = '-' if mode == 'multistream' else '--'
                ax4.loglog(array_sizes, times, marker='o', linestyle=linestyle,
                          label=f'{platform.upper()} {mode}', linewidth=2, markersize=6)
    
    ax4.set_xlabel('Array Size (elements)')
    ax4.set_ylabel('Execution Time (ms)')
    ax4.set_title('Performance Scaling with Array Size')
    ax4.legend()
    ax4.grid(True, alpha=0.3)
    
    # 5. Task Count Scaling (Enhanced for varying sub-tasks research)
    ax5 = plt.subplot(3, 3, 5)
    
    for mode in ['multistream', 'masquerading']:
        for platform in ['cuda', 'hip']:
            platform_mode = [r for r in results if r.platform == platform and r.mode == mode]
            
            task_counts = []
            times = []
            
            for count in sorted(set(r.task_count for r in platform_mode)):
                count_results = [r for r in platform_mode if r.task_count == count]
                if count_results:
                    task_counts.append(count)
                    times.append(np.mean([r.avg_time for r in count_results]))
            
            if task_counts:
                linestyle = '-' if mode == 'multistream' else '--'
                ax5.plot(task_counts, times, marker='o', linestyle=linestyle,
                        label=f'{platform.upper()} {mode}', linewidth=2, markersize=6)
    
    ax5.set_xlabel('Number of Sub-tasks')
    ax5.set_ylabel('Execution Time (ms)')
    ax5.set_title('Performance Scaling with Task Count')
    ax5.legend()
    ax5.grid(True, alpha=0.3)
    
    # 6. Efficiency Comparison
    ax6 = plt.subplot(3, 3, 6)
    
    efficiency_data = []
    labels = []
    
    for platform in ['cuda', 'hip']:
        for mode in ['multistream', 'overlapped']:
            platform_mode = [r for r in results if r.platform == platform and r.mode == mode]
            sequential_mode = [r for r in results if r.platform == platform and r.mode == 'sequential']
            
            if platform_mode and sequential_mode:
                mode_avg = np.mean([r.avg_time for r in platform_mode])
                seq_avg = np.mean([r.avg_time for r in sequential_mode])
                efficiency = (seq_avg / mode_avg) if mode_avg > 0 else 0
                
                efficiency_data.append(efficiency)
                labels.append(f'{platform.upper()}\n{mode}')
    
    colors = ['skyblue', 'lightblue', 'lightcoral', 'mistyrose']
    ax6.bar(labels, efficiency_data, color=colors[:len(efficiency_data)], alpha=0.8)
    ax6.set_ylabel('Efficiency (Sequential / Mode)')
    ax6.set_title('Stream Efficiency Comparison')
    ax6.grid(True, alpha=0.3)
    plt.setp(ax6.get_xticklabels(), rotation=45)
    
    # 7. NEW: Sub-Task Count Trend Analysis
    ax7 = plt.subplot(3, 3, 7)
    
    # Focus on varying sub-tasks research (8, 16, 32, 64, 128, 256, 512)
    valid_task_counts = [8, 16, 32, 64, 128, 256, 512]
    
    for platform in ['cuda', 'hip']:
        for mode in ['multistream', 'masquerading']:
            platform_mode = [r for r in results if r.platform == platform and r.mode == mode]
            
            trend_tasks = []
            trend_times = []
            trend_efficiency = []
            
            for count in valid_task_counts:
                count_results = [r for r in platform_mode if r.task_count == count]
                if count_results:
                    avg_time = np.mean([r.avg_time for r in count_results])
                    trend_tasks.append(count)
                    trend_times.append(avg_time)
                    
                    # Calculate efficiency as time per task
                    efficiency = avg_time / count if count > 0 else 0
                    trend_efficiency.append(efficiency)
            
            if trend_tasks:
                linestyle = '-' if mode == 'multistream' else '--'
                marker = 'o' if platform == 'cuda' else 's'
                ax7.semilogx(trend_tasks, trend_times, marker=marker, linestyle=linestyle,
                           label=f'{platform.upper()} {mode}', linewidth=2, markersize=6)
    
    ax7.set_xlabel('Number of Sub-tasks')
    ax7.set_ylabel('Execution Time (ms)')
    ax7.set_title('Sub-Task Scaling Trends (8-512 tasks)')
    ax7.legend(fontsize=8)
    ax7.grid(True, alpha=0.3)
    
    # 8. NEW: Thread Count Performance Analysis
    ax8 = plt.subplot(3, 3, 8)
    
    # Analyze performance vs thread count for different modes
    for platform in ['cuda', 'hip']:
        for mode in ['multistream', 'masquerading']:
            platform_mode = [r for r in results if r.platform == platform and r.mode == mode]
            
            thread_counts = []
            thread_times = []
            
            for threads in sorted(set(r.block_size for r in platform_mode)):
                thread_results = [r for r in platform_mode if r.block_size == threads]
                if thread_results:
                    thread_counts.append(threads)
                    thread_times.append(np.mean([r.avg_time for r in thread_results]))
            
            if thread_counts:
                linestyle = '-' if mode == 'multistream' else '--'
                marker = 'o' if platform == 'cuda' else 's'
                ax8.plot(thread_counts, thread_times, marker=marker, linestyle=linestyle,
                        label=f'{platform.upper()} {mode}', linewidth=2, markersize=6)
    
    ax8.set_xlabel('Threads per Block')
    ax8.set_ylabel('Execution Time (ms)')
    ax8.set_title('Thread Count vs Performance')
    ax8.legend(fontsize=8)
    ax8.grid(True, alpha=0.3)
    
    # 9. NEW: Efficiency vs Sub-Task Count
    ax9 = plt.subplot(3, 3, 9)
    
    for platform in ['cuda', 'hip']:
        for mode in ['multistream', 'masquerading']:
            platform_mode = [r for r in results if r.platform == platform and r.mode == mode]
            
            task_counts = []
            efficiencies = []
            
            for count in valid_task_counts:
                count_results = [r for r in platform_mode if r.task_count == count]
                if count_results:
                    avg_time = np.mean([r.avg_time for r in count_results])
                    # Efficiency: time per task (lower is better)
                    efficiency = avg_time / count if count > 0 else 0
                    task_counts.append(count)
                    efficiencies.append(efficiency)
            
            if task_counts:
                linestyle = '-' if mode == 'multistream' else '--'
                marker = 'o' if platform == 'cuda' else 's'
                ax9.semilogx(task_counts, efficiencies, marker=marker, linestyle=linestyle,
                           label=f'{platform.upper()} {mode}', linewidth=2, markersize=6)
    
    ax9.set_xlabel('Number of Sub-tasks')
    ax9.set_ylabel('Time per Task (ms/task)')
    ax9.set_title('Task Efficiency Trends')
    ax9.legend(fontsize=8)
    ax9.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig('output/cuda_vs_hip_comprehensive_analysis.png', dpi=300, bbox_inches='tight')
    print("Enhanced comprehensive analysis chart saved to: output/cuda_vs_hip_comprehensive_analysis.png")
    print("New analyses include: Sub-task scaling trends, Thread count analysis, Task efficiency")

def generate_report(results: List[TestResult], metrics: Dict) -> str:
    """Generate a comprehensive analysis report."""
    
    report = """# CUDA vs HIP Performance Analysis Report
==========================================

## Executive Summary

This report analyzes the performance comparison between CUDA (NVIDIA) and HIP (AMD) 
implementations of parallel computing tasks, specifically addressing the core research 
question: **Does single-task masquerading with branching logic create significant 
performance bottlenecks compared to explicit multi-stream approaches?**

"""
    
    # Test overview
    cuda_count = len([r for r in results if r.platform == 'cuda'])
    hip_count = len([r for r in results if r.platform == 'hip'])
    
    report += f"""## Test Overview

- **Total test configurations**: {len(results)}
- **CUDA test results**: {cuda_count}
- **HIP test results**: {hip_count}
- **Test modes**: Sequential, Multi-stream, Overlapped, Single-task Masquerading
- **Array sizes tested**: {sorted(set(r.array_size for r in results))}
- **Task counts tested**: {sorted(set(r.task_count for r in results))}
- **Block sizes tested**: {sorted(set(r.block_size for r in results))}

"""
    
    # Performance comparison
    report += "## Platform Performance Comparison\n\n"
    
    if 'cuda_vs_hip' in metrics:
        for mode, data in metrics['cuda_vs_hip'].items():
            cuda_avg = data['cuda_avg']
            hip_avg = data['hip_avg']
            
            if cuda_avg < hip_avg:
                faster_platform = "CUDA"
                speedup = data['cuda_faster_by']
            else:
                faster_platform = "HIP"  
                speedup = data['hip_faster_by']
            
            report += f"### {mode.capitalize()} Mode\n"
            report += f"- **CUDA average**: {cuda_avg:.2f} ms\n"
            report += f"- **HIP average**: {hip_avg:.2f} ms\n"
            report += f"- **Winner**: {faster_platform} ({speedup:.2f}x faster)\n\n"
    
    # Branching overhead analysis (core research question)
    report += "## Branching Overhead Analysis\n\n"
    report += "**Key Research Question**: Does single-task masquerading (Approach B) create significant performance bottlenecks compared to multi-stream execution (Approach A)?\n\n"
    
    if 'branching_analysis' in metrics:
        for platform, analysis in metrics['branching_analysis'].items():
            report += f"### {platform.upper()} Platform Results\n\n"
            
            if 'task_count_scaling' in analysis:
                report += "| Task Count | Multi-stream (ms) | Masquerading (ms) | Overhead (%) | Conclusion |\n"
                report += "|------------|-------------------|-------------------|--------------|------------|\n"
                
                for task_count, data in analysis['task_count_scaling'].items():
                    multi_avg = data['multistream_avg']
                    masq_avg = data['masquerading_avg']
                    overhead = data['overhead_percent']
                    
                    if overhead > 10:
                        conclusion = "⚠️ Significant overhead"
                    elif overhead > 5:
                        conclusion = "⚡ Moderate overhead"
                    else:
                        conclusion = "✅ Minimal overhead"
                    
                    report += f"| {task_count} | {multi_avg:.2f} | {masq_avg:.2f} | {overhead:.1f}% | {conclusion} |\n"
                
                report += "\n"
    
    # Conclusions and recommendations
    report += "## Key Findings & Conclusions\n\n"
    
    # Determine overall winner
    cuda_results = [r for r in results if r.platform == 'cuda']
    hip_results = [r for r in results if r.platform == 'hip']
    
    if cuda_results and hip_results:
        cuda_overall = np.mean([r.avg_time for r in cuda_results])
        hip_overall = np.mean([r.avg_time for r in hip_results])
        
        if cuda_overall < hip_overall:
            overall_winner = "CUDA"
            overall_speedup = hip_overall / cuda_overall
        else:
            overall_winner = "HIP"
            overall_speedup = cuda_overall / hip_overall
        
        report += f"### Overall Performance Winner\n"
        report += f"**{overall_winner}** demonstrates superior overall performance with {overall_speedup:.2f}x average speedup.\n\n"
    
    # Branching conclusion
    report += "### Branching Overhead Conclusion\n"
    
    # Calculate average overhead across platforms
    total_overhead = 0
    overhead_count = 0
    
    if 'branching_analysis' in metrics:
        for platform_analysis in metrics['branching_analysis'].values():
            if 'task_count_scaling' in platform_analysis:
                for data in platform_analysis['task_count_scaling'].values():
                    total_overhead += data['overhead_percent']
                    overhead_count += 1
    
    if overhead_count > 0:
        avg_overhead = total_overhead / overhead_count
        
        if avg_overhead > 15:
            conclusion = "**CONFIRMED**: Single-task masquerading creates significant performance bottlenecks"
        elif avg_overhead > 5:
            conclusion = "**MODERATE**: Single-task masquerading shows measurable overhead"
        else:
            conclusion = "**MINIMAL**: Single-task masquerading overhead is negligible"
        
        report += f"{conclusion}\n\n"
        report += f"**Average branching overhead**: {avg_overhead:.1f}%\n\n"
    
    # Recommendations
    report += "## Recommendations\n\n"
    report += "### For CUDA Development\n"
    report += "- Leverage CUDA's mature stream scheduling for optimal performance\n"
    report += "- Multi-stream approaches provide excellent parallelization benefits\n"
    report += "- Consider CUDA as the reference platform for algorithm validation\n\n"
    
    report += "### For HIP/DCU Development\n"
    report += "- Implement explicit multi-stream patterns rather than single-task masquerading\n"
    report += "- Focus on overlapped execution patterns for memory-bound workloads\n"
    report += "- Consider workload characteristics when choosing parallelization strategy\n\n"
    
    report += "### General Guidelines\n"
    report += "- **Multi-stream approaches** generally outperform single-task masquerading\n"
    report += "- **Explicit parallelization** is preferred over branch-heavy kernels\n"
    report += "- **Platform-specific optimization** is crucial for maximum performance\n\n"
    
    report += "---\n"
    report += f"*Report generated from {len(results)} test configurations*\n"
    
    return report

def main():
    """Main analysis function."""
    print("CUDA vs HIP Performance Analysis")
    print("=" * 40)
    
    # Load results
    print("Loading test results...")
    results = load_test_results()
    
    if not results:
        print("No test results found in output/cuda/ or output/hip/ directories")
        return
    
    # Calculate metrics
    print("Calculating performance metrics...")
    metrics = calculate_metrics(results)
    
    # Create visualizations
    print("Generating visualizations...")
    create_visualizations(results, metrics)
    
    # Generate report
    print("Generating comprehensive report...")
    report = generate_report(results, metrics)
    
    # Save report
    with open('output/cuda_vs_hip_analysis_report.md', 'w') as f:
        f.write(report)
    
    # Save metrics as JSON
    with open('output/cuda_vs_hip_metrics.json', 'w') as f:
        json.dump(metrics, f, indent=2, default=str)
    
    print("\nAnalysis Complete!")
    print("=" * 40)
    print("Generated files:")
    print("  📊 output/cuda_vs_hip_comprehensive_analysis.png")
    print("  📋 output/cuda_vs_hip_analysis_report.md")
    print("  📈 output/cuda_vs_hip_metrics.json")
    
    # Show key findings
    print("\n🔍 Quick Summary:")
    cuda_count = len([r for r in results if r.platform == 'cuda'])
    hip_count = len([r for r in results if r.platform == 'hip'])
    print(f"  • Analyzed {cuda_count} CUDA and {hip_count} HIP test results")
    print(f"  • Tested {len(set((r.array_size, r.task_count, r.block_size) for r in results))} unique configurations")
    print(f"  • Evaluated all 4 test modes: sequential, multistream, overlapped, masquerading")

if __name__ == "__main__":
    main()