#!/usr/bin/env python3
"""
Dumbo vs Bolt-Dumbo Performance Comparison
Compare pure Dumbo implementation with Bolt-Dumbo results
"""

import os
import json
import csv
from datetime import datetime


def load_dumbo_results(data_dir):
    """Load Dumbo test results"""
    metrics_file = os.path.join(data_dir, 'results', 'result.csv')
    if not os.path.exists(metrics_file):
        return None
    
    results = {}
    with open(metrics_file, 'r') as f:
        reader = csv.DictReader(f)
        for row in reader:
            for key, value in row.items():
                try:
                    results[key] = float(value)
                except ValueError:
                    results[key] = value
    
    return results


def get_bolt_dumbo_baseline():
    """Get Bolt-Dumbo baseline performance data"""
    return {
        'bolt_dumbo': {
            '4_nodes': {
                'proposals_per_second': 169.5,  # From our local test
                'commits_per_second': 677.3,
                'votes_per_second': 677.7,
                'environment': 'Local 127.0.0.1',
                'features': 'Bolt + Transformer + Dumbo'
            }
        },
        'paper_baseline': {
            '4_nodes': {
                'proposals_per_second': 150,
                'commits_per_second': 600,
                'latency_ms': 50,
                'environment': 'AWS EC2 c5.2xlarge',
                'features': 'Bolt + Transformer + Dumbo'
            }
        }
    }


def analyze_dumbo_performance(dumbo_results, baseline_data):
    """Analyze Dumbo performance compared to Bolt-Dumbo"""
    if not dumbo_results:
        return None
    
    node_count = int(dumbo_results.get('N', 4))
    duration = dumbo_results.get('duration_s', 15)
    
    # Get baseline data
    bolt_dumbo_baseline = baseline_data['bolt_dumbo'].get(f'{node_count}_nodes', {})
    paper_baseline = baseline_data['paper_baseline'].get(f'{node_count}_nodes', {})
    
    # Calculate performance metrics
    dumbo_aba_decisions = dumbo_results.get('total_aba_decisions', 0)
    dumbo_smvba_decisions = dumbo_results.get('total_smvba_decisions', 0)
    dumbo_errors = dumbo_results.get('total_errors', 0)
    
    # Estimate throughput (assuming each decision represents a batch)
    dumbo_decisions_per_sec = (dumbo_aba_decisions + dumbo_smvba_decisions) / duration if duration > 0 else 0
    
    bolt_dumbo_proposals = bolt_dumbo_baseline.get('proposals_per_second', 0)
    bolt_dumbo_commits = bolt_dumbo_baseline.get('commits_per_second', 0)
    
    # Calculate performance ratios
    decisions_vs_bolt_dumbo = (dumbo_decisions_per_sec / bolt_dumbo_proposals * 100) if bolt_dumbo_proposals > 0 else 0
    
    analysis = {
        'node_count': node_count,
        'duration_seconds': duration,
        'dumbo_results': {
            'aba_decisions': dumbo_aba_decisions,
            'smvba_decisions': dumbo_smvba_decisions,
            'total_decisions': dumbo_aba_decisions + dumbo_smvba_decisions,
            'decisions_per_second': dumbo_decisions_per_sec,
            'total_errors': dumbo_errors,
            'environment': 'Local 127.0.0.1'
        },
        'bolt_dumbo_baseline': bolt_dumbo_baseline,
        'paper_baseline': paper_baseline,
        'comparison': {
            'decisions_vs_bolt_dumbo_percent': decisions_vs_bolt_dumbo,
            'error_rate': dumbo_errors / max(1, dumbo_aba_decisions + dumbo_smvba_decisions) * 100
        },
        'assessment': {
            'performance_status': 'excellent' if decisions_vs_bolt_dumbo >= 80 else 'good' if decisions_vs_bolt_dumbo >= 60 else 'fair' if decisions_vs_bolt_dumbo >= 40 else 'poor',
            'stability_status': 'excellent' if dumbo_errors == 0 else 'good' if dumbo_errors <= 2 else 'fair' if dumbo_errors <= 5 else 'poor'
        }
    }
    
    return analysis


def generate_comparison_report(analysis, output_file=None):
    """Generate detailed comparison report"""
    if not analysis:
        print("No analysis data available")
        return
    
    if not output_file:
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        output_file = f"dumbo_vs_bolt_dumbo_comparison_{timestamp}.json"
    
    # Save detailed report
    with open(output_file, 'w') as f:
        json.dump(analysis, f, indent=2)
    
    # Print summary
    print("=" * 80)
    print("DUMBO vs BOLT-DUMBO PERFORMANCE COMPARISON")
    print("=" * 80)
    print(f"Node Count: {analysis['node_count']}")
    print(f"Duration: {analysis['duration_seconds']} seconds")
    print()
    
    print("PURE DUMBO RESULTS:")
    print(f"  ABA Decisions: {analysis['dumbo_results']['aba_decisions']}")
    print(f"  SMVBA Decisions: {analysis['dumbo_results']['smvba_decisions']}")
    print(f"  Total Decisions: {analysis['dumbo_results']['total_decisions']}")
    print(f"  Decisions/sec: {analysis['dumbo_results']['decisions_per_second']:.2f}")
    print(f"  Total Errors: {analysis['dumbo_results']['total_errors']}")
    print(f"  Environment: {analysis['dumbo_results']['environment']}")
    print()
    
    print("BOLT-DUMBO BASELINE (Local):")
    print(f"  Proposals/sec: {analysis['bolt_dumbo_baseline']['proposals_per_second']}")
    print(f"  Commits/sec: {analysis['bolt_dumbo_baseline']['commits_per_second']}")
    print(f"  Features: {analysis['bolt_dumbo_baseline']['features']}")
    print()
    
    print("PAPER BASELINE (AWS EC2):")
    print(f"  Proposals/sec: {analysis['paper_baseline']['proposals_per_second']}")
    print(f"  Commits/sec: {analysis['paper_baseline']['commits_per_second']}")
    print(f"  Latency: {analysis['paper_baseline']['latency_ms']}ms")
    print()
    
    print("PERFORMANCE COMPARISON:")
    print(f"  Decisions vs Bolt-Dumbo: {analysis['comparison']['decisions_vs_bolt_dumbo_percent']:.1f}% ({analysis['assessment']['performance_status']})")
    print(f"  Error Rate: {analysis['comparison']['error_rate']:.2f}% ({analysis['assessment']['stability_status']})")
    print()
    
    # Overall assessment
    if analysis['assessment']['performance_status'] == 'excellent' and analysis['assessment']['stability_status'] == 'excellent':
        overall_status = "Excellent - Pure Dumbo performs well and is stable"
    elif analysis['assessment']['performance_status'] in ['excellent', 'good'] and analysis['assessment']['stability_status'] in ['excellent', 'good']:
        overall_status = "Good - Pure Dumbo shows competitive performance"
    elif analysis['assessment']['stability_status'] == 'excellent':
        overall_status = "Fair - Pure Dumbo is stable but performance could be improved"
    else:
        overall_status = "Poor - Pure Dumbo needs optimization"
    
    print("OVERALL ASSESSMENT:")
    print(f"  {overall_status}")
    print()
    
    # Analysis notes
    print("ANALYSIS NOTES:")
    print("  - Pure Dumbo removes Bolt fastlane and Transformer synchronization")
    print("  - This simplifies the protocol but may reduce performance")
    print("  - Pure Dumbo always uses the ACS (Asynchronous Common Subset) path")
    print("  - Error rate indicates protocol stability and correctness")
    print("  - Both implementations use the same underlying ABA and SMVBA components")
    print()
    
    print("TRADE-OFFS:")
    print("  Pure Dumbo:")
    print("    + Simpler implementation and debugging")
    print("    + More predictable performance")
    print("    + Easier to understand and maintain")
    print("    - Lower peak performance")
    print("    - No fastlane optimization")
    print()
    print("  Bolt-Dumbo:")
    print("    + Higher peak performance")
    print("    + Fastlane optimization")
    print("    + Transformer synchronization")
    print("    - More complex implementation")
    print("    - More potential failure points")
    print()
    
    print(f"Detailed report saved to: {output_file}")
    print("=" * 80)


def main():
    import argparse
    
    parser = argparse.ArgumentParser(description='Compare Dumbo performance with Bolt-Dumbo')
    parser.add_argument('data_dir', help='Directory containing Dumbo test results')
    parser.add_argument('--output', '-o', help='Output file prefix')
    
    args = parser.parse_args()
    
    if not os.path.exists(args.data_dir):
        print(f"Error: Directory {args.data_dir} does not exist")
        return 1
    
    # Load Dumbo results
    dumbo_results = load_dumbo_results(args.data_dir)
    if not dumbo_results:
        print(f"Error: No Dumbo results found in {args.data_dir}")
        return 1
    
    # Get baseline data
    baseline_data = get_bolt_dumbo_baseline()
    
    # Analyze performance
    analysis = analyze_dumbo_performance(dumbo_results, baseline_data)
    
    # Generate report
    output_prefix = args.output or f"dumbo_comparison_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
    generate_comparison_report(analysis, f"{output_prefix}.json")
    
    return 0


if __name__ == '__main__':
    exit(main())

