#!/usr/bin/env python3
"""
BDT Performance Analysis Script
Analyzes logs from BDT benchmark runs and generates performance metrics
"""

import os
import re
import json
import csv
import glob
from datetime import datetime
from collections import defaultdict
import argparse


def parse_bdt_log(log_file):
    """Parse BDT log file and extract performance metrics"""
    metrics = {
        'file': os.path.basename(log_file),
        'total_lines': 0,
        'proposals': 0,
        'commits': 0,
        'votes': 0,
        'errors': 0,
        'connections': 0,
        'proposal_times': [],
        'commit_times': [],
        'vote_times': []
    }
    
    try:
        with open(log_file, 'r', errors='ignore') as f:
            for line in f:
                metrics['total_lines'] += 1
                
                # Count different message types
                if 'successfully broadcast a new proposal' in line:
                    metrics['proposals'] += 1
                    # Extract timestamp
                    timestamp_match = re.search(r'(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+)', line)
                    if timestamp_match:
                        metrics['proposal_times'].append(timestamp_match.group(1))
                
                elif 'successfully vote for the block' in line:
                    metrics['votes'] += 1
                    timestamp_match = re.search(r'(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+)', line)
                    if timestamp_match:
                        metrics['vote_times'].append(timestamp_match.group(1))
                
                elif 'commit' in line.lower():
                    metrics['commits'] += 1
                    timestamp_match = re.search(r'(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+)', line)
                    if timestamp_match:
                        metrics['commit_times'].append(timestamp_match.group(1))
                
                elif 'connection has been established' in line:
                    metrics['connections'] += 1
                
                elif 'error' in line.lower() or 'panic' in line.lower():
                    metrics['errors'] += 1
                    
    except Exception as e:
        metrics['parse_error'] = str(e)
    
    return metrics


def calculate_throughput(metrics, duration_seconds):
    """Calculate throughput metrics"""
    if duration_seconds <= 0:
        duration_seconds = 1
    
    total_proposals = sum(m.get('proposals', 0) for m in metrics.values())
    total_commits = sum(m.get('commits', 0) for m in metrics.values())
    total_votes = sum(m.get('votes', 0) for m in metrics.values())
    
    return {
        'proposals_per_second': total_proposals / duration_seconds,
        'commits_per_second': total_commits / duration_seconds,
        'votes_per_second': total_votes / duration_seconds,
        'total_proposals': total_proposals,
        'total_commits': total_commits,
        'total_votes': total_votes
    }


def analyze_latency(metrics):
    """Analyze latency from timestamps"""
    latency_analysis = {}
    
    for node, data in metrics.items():
        if 'proposal_times' in data and len(data['proposal_times']) > 1:
            # Calculate time between proposals
            times = data['proposal_times']
            intervals = []
            for i in range(1, len(times)):
                try:
                    t1 = datetime.fromisoformat(times[i-1].replace('+0800', ''))
                    t2 = datetime.fromisoformat(times[i].replace('+0800', ''))
                    interval = (t2 - t1).total_seconds()
                    intervals.append(interval)
                except:
                    continue
            
            if intervals:
                latency_analysis[node] = {
                    'avg_proposal_interval': sum(intervals) / len(intervals),
                    'min_proposal_interval': min(intervals),
                    'max_proposal_interval': max(intervals),
                    'proposal_count': len(times)
                }
    
    return latency_analysis


def generate_report(data_dir, output_file=None):
    """Generate comprehensive performance report"""
    if not output_file:
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        output_file = os.path.join(data_dir, f"performance_report_{timestamp}.json")
    
    # Find log files
    log_files = glob.glob(os.path.join(data_dir, "logs", "*.log"))
    if not log_files:
        print(f"No log files found in {data_dir}/logs/")
        return None
    
    # Parse all logs
    metrics = {}
    for log_file in log_files:
        node_name = os.path.basename(log_file).replace('.log', '')
        metrics[node_name] = parse_bdt_log(log_file)
    
    # Calculate throughput (assuming 15 second duration for now)
    duration = 15  # This should be read from the run parameters
    throughput = calculate_throughput(metrics, duration)
    
    # Analyze latency
    latency = analyze_latency(metrics)
    
    # Generate report
    report = {
        'timestamp': datetime.now().isoformat(),
        'data_directory': data_dir,
        'duration_seconds': duration,
        'node_count': len(metrics),
        'throughput': throughput,
        'latency_analysis': latency,
        'per_node_metrics': metrics,
        'summary': {
            'total_log_lines': sum(m.get('total_lines', 0) for m in metrics.values()),
            'total_errors': sum(m.get('errors', 0) for m in metrics.values()),
            'avg_proposals_per_node': throughput['total_proposals'] / len(metrics) if metrics else 0,
            'avg_commits_per_node': throughput['total_commits'] / len(metrics) if metrics else 0
        }
    }
    
    # Save report
    with open(output_file, 'w') as f:
        json.dump(report, f, indent=2)
    
    # Print summary
    print("=== BDT Performance Analysis ===")
    print(f"Data Directory: {data_dir}")
    print(f"Duration: {duration} seconds")
    print(f"Nodes: {len(metrics)}")
    print(f"Total Proposals: {throughput['total_proposals']}")
    print(f"Total Commits: {throughput['total_commits']}")
    print(f"Total Votes: {throughput['total_votes']}")
    print(f"Proposals/sec: {throughput['proposals_per_second']:.2f}")
    print(f"Commits/sec: {throughput['commits_per_second']:.2f}")
    print(f"Votes/sec: {throughput['votes_per_second']:.2f}")
    print(f"Total Errors: {report['summary']['total_errors']}")
    print(f"Report saved to: {output_file}")
    
    return report


def main():
    parser = argparse.ArgumentParser(description='Analyze BDT performance from logs')
    parser.add_argument('data_dir', help='Directory containing logs and results')
    parser.add_argument('--output', '-o', help='Output report file')
    parser.add_argument('--csv', action='store_true', help='Also generate CSV output')
    
    args = parser.parse_args()
    
    if not os.path.exists(args.data_dir):
        print(f"Error: Directory {args.data_dir} does not exist")
        return 1
    
    # Generate report
    report = generate_report(args.data_dir, args.output)
    
    if report and args.csv:
        # Generate CSV output
        csv_file = args.output.replace('.json', '.csv') if args.output else os.path.join(args.data_dir, 'performance_metrics.csv')
        
        with open(csv_file, 'w', newline='') as f:
            writer = csv.writer(f)
            writer.writerow(['metric', 'value'])
            writer.writerow(['duration_seconds', report['duration_seconds']])
            writer.writerow(['node_count', report['node_count']])
            writer.writerow(['total_proposals', report['throughput']['total_proposals']])
            writer.writerow(['total_commits', report['throughput']['total_commits']])
            writer.writerow(['total_votes', report['throughput']['total_votes']])
            writer.writerow(['proposals_per_second', report['throughput']['proposals_per_second']])
            writer.writerow(['commits_per_second', report['throughput']['commits_per_second']])
            writer.writerow(['votes_per_second', report['throughput']['votes_per_second']])
            writer.writerow(['total_errors', report['summary']['total_errors']])
        
        print(f"CSV metrics saved to: {csv_file}")
    
    return 0


if __name__ == '__main__':
    exit(main())
