#!/usr/bin/env python3

import os
import re
import json
import time
from datetime import datetime

def analyze_log_file(log_file):
    """Analyze a single log file and extract performance metrics"""
    metrics = {
        'rbc_delays': [],
        'aba_delays': [],
        'lt_delays': [],
        'proof_delays': [],
        'consensus_delays': [],
        'tps_values': [],
        'throughput_values': [],
        'proof_operations': 0,
        'error_count': 0
    }
    
    if not os.path.exists(log_file):
        return metrics
    
    with open(log_file, 'r') as f:
        for line in f:
            # Extract RBC delays
            rbc_match = re.search(r'RBC.*delay[:\s]+(\d+(?:\.\d+)?)', line)
            if rbc_match:
                metrics['rbc_delays'].append(float(rbc_match.group(1)))
            
            # Extract ABA delays
            aba_match = re.search(r'ABA.*delay[:\s]+(\d+(?:\.\d+)?)', line)
            if aba_match:
                metrics['aba_delays'].append(float(aba_match.group(1)))
            
            # Extract LT Code delays
            lt_match = re.search(r'LT.*delay[:\s]+(\d+(?:\.\d+)?)', line)
            if lt_match:
                metrics['lt_delays'].append(float(lt_match.group(1)))
            
            # Extract Proof Chain delays
            proof_match = re.search(r'Proof.*delay[:\s]+(\d+(?:\.\d+)?)', line)
            if proof_match:
                metrics['proof_delays'].append(float(proof_match.group(1)))
            
            # Extract consensus delays
            consensus_match = re.search(r'Consensus.*delay[:\s]+(\d+(?:\.\d+)?)', line)
            if consensus_match:
                metrics['consensus_delays'].append(float(consensus_match.group(1)))
            
            # Extract TPS values
            tps_match = re.search(r'TPS[:\s]+(\d+(?:\.\d+)?)', line)
            if tps_match:
                metrics['tps_values'].append(float(tps_match.group(1)))
            
            # Extract throughput values
            throughput_match = re.search(r'throughput[:\s]+(\d+(?:\.\d+)?)', line)
            if throughput_match:
                metrics['throughput_values'].append(float(throughput_match.group(1)))
            
            # Count proof operations
            if 'Proof.*generated' in line or 'Proof.*verified' in line:
                metrics['proof_operations'] += 1
            
            # Count errors
            if 'error' in line.lower() or 'fail' in line.lower():
                metrics['error_count'] += 1
    
    return metrics

def calculate_statistics(values):
    """Calculate statistics for a list of values"""
    if not values:
        return {'mean': 0, 'median': 0, 'min': 0, 'max': 0, 'count': 0}
    
    sorted_values = sorted(values)
    n = len(values)
    
    return {
        'mean': sum(values) / n,
        'median': sorted_values[n // 2] if n % 2 == 1 else (sorted_values[n // 2 - 1] + sorted_values[n // 2]) / 2,
        'min': min(values),
        'max': max(values),
        'count': n
    }

def analyze_performance():
    """Analyze performance of EbbFlow protocol"""
    print("EbbFlow Protocol Performance Analysis")
    print("=" * 50)
    
    # Analyze server logs
    server_metrics = []
    for i in range(6):  # 6 servers
        log_file = f"logs/server_{i}.log"
        metrics = analyze_log_file(log_file)
        server_metrics.append(metrics)
        print(f"\nServer {i} Analysis:")
        print(f"  RBC Delays: {calculate_statistics(metrics['rbc_delays'])}")
        print(f"  ABA Delays: {calculate_statistics(metrics['aba_delays'])}")
        print(f"  LT Delays: {calculate_statistics(metrics['lt_delays'])}")
        print(f"  Proof Delays: {calculate_statistics(metrics['proof_delays'])}")
        print(f"  Consensus Delays: {calculate_statistics(metrics['consensus_delays'])}")
        print(f"  Proof Operations: {metrics['proof_operations']}")
        print(f"  Errors: {metrics['error_count']}")
    
    # Analyze client logs
    client_metrics = []
    for i in range(101, 104):  # 3 clients
        log_file = f"logs/client_{i}.log"
        metrics = analyze_log_file(log_file)
        client_metrics.append(metrics)
        print(f"\nClient {i} Analysis:")
        print(f"  TPS: {calculate_statistics(metrics['tps_values'])}")
        print(f"  Throughput: {calculate_statistics(metrics['throughput_values'])}")
        print(f"  RBC Delays: {calculate_statistics(metrics['rbc_delays'])}")
        print(f"  ABA Delays: {calculate_statistics(metrics['aba_delays'])}")
        print(f"  LT Delays: {calculate_statistics(metrics['lt_delays'])}")
        print(f"  Proof Delays: {calculate_statistics(metrics['proof_delays'])}")
        print(f"  Consensus Delays: {calculate_statistics(metrics['consensus_delays'])}")
    
    # Calculate overall statistics
    print("\n" + "=" * 50)
    print("Overall Performance Summary")
    print("=" * 50)
    
    # Aggregate all delays
    all_rbc_delays = []
    all_aba_delays = []
    all_lt_delays = []
    all_proof_delays = []
    all_consensus_delays = []
    all_tps_values = []
    total_proof_operations = 0
    total_errors = 0
    
    for metrics in server_metrics + client_metrics:
        all_rbc_delays.extend(metrics['rbc_delays'])
        all_aba_delays.extend(metrics['aba_delays'])
        all_lt_delays.extend(metrics['lt_delays'])
        all_proof_delays.extend(metrics['proof_delays'])
        all_consensus_delays.extend(metrics['consensus_delays'])
        all_tps_values.extend(metrics['tps_values'])
        total_proof_operations += metrics['proof_operations']
        total_errors += metrics['error_count']
    
    print(f"RBC Delays: {calculate_statistics(all_rbc_delays)}")
    print(f"ABA Delays: {calculate_statistics(all_aba_delays)}")
    print(f"LT Code Delays: {calculate_statistics(all_lt_delays)}")
    print(f"Proof Chain Delays: {calculate_statistics(all_proof_delays)}")
    print(f"Consensus Delays: {calculate_statistics(all_consensus_delays)}")
    print(f"Total TPS: {calculate_statistics(all_tps_values)}")
    print(f"Total Proof Operations: {total_proof_operations}")
    print(f"Total Errors: {total_errors}")
    
    # Calculate component breakdown
    print("\n" + "=" * 50)
    print("Component Performance Breakdown")
    print("=" * 50)
    
    rbc_stats = calculate_statistics(all_rbc_delays)
    aba_stats = calculate_statistics(all_aba_delays)
    lt_stats = calculate_statistics(all_lt_delays)
    proof_stats = calculate_statistics(all_proof_delays)
    consensus_stats = calculate_statistics(all_consensus_delays)
    
    print(f"1. RBC (Reliable Broadcast):")
    print(f"   - Average Delay: {rbc_stats['mean']:.2f}ms")
    print(f"   - Median Delay: {rbc_stats['median']:.2f}ms")
    print(f"   - Min/Max Delay: {rbc_stats['min']:.2f}ms / {rbc_stats['max']:.2f}ms")
    
    print(f"\n2. ABA (Asynchronous Binary Agreement):")
    print(f"   - Average Delay: {aba_stats['mean']:.2f}ms")
    print(f"   - Median Delay: {aba_stats['median']:.2f}ms")
    print(f"   - Min/Max Delay: {aba_stats['min']:.2f}ms / {aba_stats['max']:.2f}ms")
    
    print(f"\n3. LT Code (Luby Transform):")
    print(f"   - Average Delay: {lt_stats['mean']:.2f}ms")
    print(f"   - Median Delay: {lt_stats['median']:.2f}ms")
    print(f"   - Min/Max Delay: {lt_stats['min']:.2f}ms / {lt_stats['max']:.2f}ms")
    
    print(f"\n4. Proof Chain:")
    print(f"   - Average Delay: {proof_stats['mean']:.2f}ms")
    print(f"   - Median Delay: {proof_stats['median']:.2f}ms")
    print(f"   - Min/Max Delay: {proof_stats['min']:.2f}ms / {proof_stats['max']:.2f}ms")
    print(f"   - Total Operations: {total_proof_operations}")
    
    print(f"\n5. Overall Consensus:")
    print(f"   - Average Delay: {consensus_stats['mean']:.2f}ms")
    print(f"   - Median Delay: {consensus_stats['median']:.2f}ms")
    print(f"   - Min/Max Delay: {consensus_stats['min']:.2f}ms / {consensus_stats['max']:.2f}ms")
    
    # Save results to JSON
    results = {
        'timestamp': datetime.now().isoformat(),
        'rbc_delays': all_rbc_delays,
        'aba_delays': all_aba_delays,
        'lt_delays': all_lt_delays,
        'proof_delays': all_proof_delays,
        'consensus_delays': all_consensus_delays,
        'tps_values': all_tps_values,
        'total_proof_operations': total_proof_operations,
        'total_errors': total_errors,
        'statistics': {
            'rbc': calculate_statistics(all_rbc_delays),
            'aba': calculate_statistics(all_aba_delays),
            'lt': calculate_statistics(all_lt_delays),
            'proof': calculate_statistics(all_proof_delays),
            'consensus': calculate_statistics(all_consensus_delays),
            'tps': calculate_statistics(all_tps_values)
        }
    }
    
    with open('logs/performance_analysis.json', 'w') as f:
        json.dump(results, f, indent=2)
    
    print(f"\nDetailed results saved to logs/performance_analysis.json")

if __name__ == "__main__":
    analyze_performance()






