#!/usr/bin/env python3
"""
HoneyBadgerBFT 日志分析工具
分析收集的性能统计数据并生成报告
"""

import json
import os
import sys
from pathlib import Path
from typing import Dict, List

def load_performance_stats(logs_dir: str) -> List[Dict]:
    """
    从日志目录加载所有性能统计文件
    """
    stats_list = []
    logs_path = Path(logs_dir)
    
    if not logs_path.exists():
        print(f"错误: 日志目录不存在: {logs_dir}")
        return stats_list
    
    # 查找所有性能统计文件
    for log_subdir in logs_path.iterdir():
        if not log_subdir.is_dir():
            continue
        
        # 查找 node_*_performance.json 文件
        for perf_file in log_subdir.glob("node_*_performance.json"):
            try:
                with open(perf_file, 'r') as f:
                    data = json.load(f)
                    data['source_file'] = str(perf_file)
                    stats_list.append(data)
            except Exception as e:
                print(f"警告: 无法读取 {perf_file}: {e}")
    
    return stats_list

def analyze_performance(stats_list: List[Dict]):
    """
    分析性能数据并打印报告
    """
    if not stats_list:
        print("没有找到性能数据")
        return
    
    print("=" * 60)
    print("HoneyBadgerBFT 性能分析报告")
    print("=" * 60)
    print()
    
    # 按节点ID排序
    stats_list.sort(key=lambda x: x.get('node_id', 0))
    
    # 单节点统计
    print("节点详细统计:")
    print("-" * 60)
    print(f"{'节点ID':<8} {'TPS':<12} {'Epochs':<8} {'Txs':<10} {'延迟(ms)':<10}")
    print("-" * 60)
    
    total_tps = 0
    total_epochs = 0
    total_txs = 0
    latencies = []
    
    for stats in stats_list:
        node_id = stats.get('node_id', 'N/A')
        tps = stats.get('throughput_tps', 0)
        epochs = stats.get('total_epochs', 0)
        txs = stats.get('total_transactions', 0)
        latency = stats.get('avg_epoch_latency', 0)
        
        print(f"{node_id:<8} {tps:<12.2f} {epochs:<8} {txs:<10} {latency:<10}")
        
        total_tps += tps
        total_epochs = max(total_epochs, epochs)
        total_txs += txs
        if latency > 0:
            latencies.append(latency)
    
    print("-" * 60)
    print()
    
    # 整体统计
    num_nodes = len(stats_list)
    avg_tps = total_tps / num_nodes if num_nodes > 0 else 0
    avg_latency = sum(latencies) / len(latencies) if latencies else 0
    
    print("整体统计:")
    print("-" * 60)
    print(f"节点数量:        {num_nodes}")
    print(f"平均 TPS:        {avg_tps:.2f}")
    print(f"总交易数:        {total_txs}")
    print(f"共识轮次:        {total_epochs}")
    print(f"平均延迟:        {avg_latency:.2f} ms")
    print("-" * 60)
    print()
    
    # 网络统计
    print("网络通信统计:")
    print("-" * 60)
    print(f"{'节点ID':<8} {'发送消息':<12} {'接收消息':<12} {'发送(KB)':<12} {'接收(KB)':<12}")
    print("-" * 60)
    
    total_msgs_sent = 0
    total_msgs_recv = 0
    total_bytes_sent = 0
    total_bytes_recv = 0
    
    for stats in stats_list:
        node_id = stats.get('node_id', 'N/A')
        net_stats = stats.get('network_stats', {})
        
        msgs_sent = net_stats.get('MessagesSent', 0)
        msgs_recv = net_stats.get('MessagesReceived', 0)
        bytes_sent = net_stats.get('BytesSent', 0) / 1024  # 转换为KB
        bytes_recv = net_stats.get('BytesReceived', 0) / 1024
        
        print(f"{node_id:<8} {msgs_sent:<12} {msgs_recv:<12} {bytes_sent:<12.2f} {bytes_recv:<12.2f}")
        
        total_msgs_sent += msgs_sent
        total_msgs_recv += msgs_recv
        total_bytes_sent += bytes_sent
        total_bytes_recv += bytes_recv
    
    print("-" * 60)
    print(f"{'总计':<8} {total_msgs_sent:<12} {total_msgs_recv:<12} {total_bytes_sent:<12.2f} {total_bytes_recv:<12.2f}")
    print("-" * 60)
    print()
    
    # 每轮交付统计
    print("每轮交易交付:")
    print("-" * 60)
    
    # 收集所有节点的轮次数据
    epoch_data = {}
    for stats in stats_list:
        node_id = stats.get('node_id', 'N/A')
        deliveries = stats.get('epoch_deliveries', {})
        for epoch, count in deliveries.items():
            if epoch not in epoch_data:
                epoch_data[epoch] = {}
            epoch_data[epoch][node_id] = count
    
    if epoch_data:
        # 按轮次排序
        sorted_epochs = sorted(epoch_data.keys(), key=lambda x: int(x) if str(x).isdigit() else 0)
        for epoch in sorted_epochs[:10]:  # 只显示前10轮
            node_counts = epoch_data[epoch]
            counts_str = ", ".join([f"N{nid}:{cnt}" for nid, cnt in sorted(node_counts.items())])
            print(f"  Epoch {epoch}: {counts_str}")
    
    print()
    print("=" * 60)
    print("分析完成")
    print("=" * 60)

def main():
    # 默认日志目录
    default_logs_dir = "./logs"
    
    # 从命令行参数获取日志目录
    logs_dir = sys.argv[1] if len(sys.argv) > 1 else default_logs_dir
    
    print(f"正在分析日志目录: {logs_dir}")
    print()
    
    # 加载性能统计
    stats_list = load_performance_stats(logs_dir)
    
    if not stats_list:
        print(f"错误: 在 {logs_dir} 中未找到性能统计文件")
        print(f"请确保已运行实验并收集了日志")
        sys.exit(1)
    
    # 分析并显示报告
    analyze_performance(stats_list)

if __name__ == "__main__":
    main()

