#!/bin/bash
# AWS指标收集脚本 - 从AWS节点收集Turritopsis性能指标

set -e

# 配置参数
KEY_NAME="turritopsis-key"
PROJECT_DIR="/home/ec2-user/turritopsis"
COLLECT_DIR="aws_metrics_$(date +%Y%m%d_%H%M%S)"

# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

log_info() {
    echo -e "${GREEN}[INFO]${NC} $1"
}

log_warn() {
    echo -e "${YELLOW}[WARN]${NC} $1"
}

log_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

log_debug() {
    echo -e "${BLUE}[DEBUG]${NC} $1"
}

# 检查hosts文件
check_hosts_file() {
    if [ ! -f "aws_hosts.txt" ]; then
        log_error "aws_hosts.txt not found. Please run aws_deploy.sh first."
        exit 1
    fi
    
    log_info "Found $(wc -l < aws_hosts.txt) nodes in aws_hosts.txt"
}

# 创建收集目录
create_collect_dir() {
    log_info "Creating collection directory: $COLLECT_DIR"
    mkdir -p "$COLLECT_DIR"
    mkdir -p "$COLLECT_DIR/logs"
    mkdir -p "$COLLECT_DIR/metrics"
    mkdir -p "$COLLECT_DIR/proofs"
}

# 收集日志文件
collect_logs() {
    log_info "Collecting log files from all nodes..."
    
    node_id=0
    while read -r node_name public_ip private_ip; do
        if [[ "$node_name" =~ ^node- ]]; then
            log_info "Collecting logs from $node_name ($public_ip)..."
            
            # 创建节点目录
            mkdir -p "$COLLECT_DIR/logs/node_$node_id"
            
            # 收集主要日志文件
            for log_file in "turritopsis.log" "acs_server.log" "router.log"; do
                if ssh -i ~/.ssh/$KEY_NAME.pem -o StrictHostKeyChecking=no ec2-user@$public_ip \
                    "test -f $PROJECT_DIR/$log_file"; then
                    scp -i ~/.ssh/$KEY_NAME.pem -o StrictHostKeyChecking=no \
                        ec2-user@$public_ip:$PROJECT_DIR/$log_file \
                        "$COLLECT_DIR/logs/node_$node_id/"
                    log_debug "Collected $log_file from node $node_id"
                else
                    log_warn "Log file $log_file not found on node $node_id"
                fi
            done
            
            # 收集系统日志
            ssh -i ~/.ssh/$KEY_NAME.pem -o StrictHostKeyChecking=no ec2-user@$public_ip \
                "dmesg | tail -100" > "$COLLECT_DIR/logs/node_$node_id/dmesg.log"
            
            ((node_id++))
        fi
    done < aws_hosts.txt
    
    log_info "Log collection completed"
}

# 收集指标文件
collect_metrics() {
    log_info "Collecting metrics files from all nodes..."
    
    node_id=0
    while read -r node_name public_ip private_ip; do
        if [[ "$node_name" =~ ^node- ]]; then
            log_info "Collecting metrics from $node_name ($public_ip)..."
            
            # 创建节点目录
            mkdir -p "$COLLECT_DIR/metrics/node_$node_id"
            
            # 收集指标文件
            for metrics_file in "turritopsis_metrics.json" "turritopsis_metrics.csv" "turritopsis_metrics_detailed.csv"; do
                if ssh -i ~/.ssh/$KEY_NAME.pem -o StrictHostKeyChecking=no ec2-user@$public_ip \
                    "test -f $PROJECT_DIR/$metrics_file"; then
                    scp -i ~/.ssh/$KEY_NAME.pem -o StrictHostKeyChecking=no \
                        ec2-user@$public_ip:$PROJECT_DIR/$metrics_file \
                        "$COLLECT_DIR/metrics/node_$node_id/"
                    log_debug "Collected $metrics_file from node $node_id"
                else
                    log_warn "Metrics file $metrics_file not found on node $node_id"
                fi
            done
            
            # 收集证明链文件
            if ssh -i ~/.ssh/$KEY_NAME.pem -o StrictHostKeyChecking=no ec2-user@$public_ip \
                "test -f $PROJECT_DIR/turritopsis_proof.jsonl"; then
                scp -i ~/.ssh/$KEY_NAME.pem -o StrictHostKeyChecking=no \
                    ec2-user@$public_ip:$PROJECT_DIR/turritopsis_proof.jsonl \
                    "$COLLECT_DIR/proofs/node_${node_id}_proof.jsonl"
                log_debug "Collected proof chain from node $node_id"
            fi
            
            ((node_id++))
        fi
    done < aws_hosts.txt
    
    log_info "Metrics collection completed"
}

# 收集系统信息
collect_system_info() {
    log_info "Collecting system information..."
    
    cat > "$COLLECT_DIR/system_info.txt" << EOF
# Turritopsis AWS Deployment System Information
# Collected on: $(date)
# Nodes: $(wc -l < aws_hosts.txt)

EOF
    
    node_id=0
    while read -r node_name public_ip private_ip; do
        if [[ "$node_name" =~ ^node- ]]; then
            log_info "Collecting system info from $node_name ($public_ip)..."
            
            cat >> "$COLLECT_DIR/system_info.txt" << EOF
## Node $node_id ($public_ip)
EOF
            
            # 收集系统信息
            ssh -i ~/.ssh/$KEY_NAME.pem -o StrictHostKeyChecking=no ec2-user@$public_ip \
                "uname -a; cat /proc/cpuinfo | grep 'model name' | head -1; free -h; df -h /" \
                >> "$COLLECT_DIR/system_info.txt" 2>/dev/null || log_warn "Failed to collect system info from node $node_id"
            
            ((node_id++))
        fi
    done < aws_hosts.txt
    
    log_info "System information collection completed"
}

# 聚合指标
aggregate_metrics() {
    log_info "Aggregating metrics from all nodes..."
    
    # 创建聚合脚本
    cat > "$COLLECT_DIR/aggregate_metrics.py" << 'EOF'
#!/usr/bin/env python3
import json
import csv
import os
import glob
from datetime import datetime

def aggregate_json_metrics():
    """聚合JSON指标文件"""
    all_metrics = []
    
    # 收集所有节点的JSON指标
    for metrics_file in glob.glob("metrics/node_*/turritopsis_metrics.json"):
        try:
            with open(metrics_file, 'r') as f:
                metrics = json.load(f)
                metrics['node_file'] = metrics_file
                all_metrics.append(metrics)
        except Exception as e:
            print(f"Error reading {metrics_file}: {e}")
    
    # 计算聚合统计
    if all_metrics:
        total_epochs = sum(m.get('epochs', 0) for m in all_metrics)
        total_duration = sum(m.get('duration_s', 0) for m in all_metrics)
        avg_duration = total_duration / len(all_metrics) if all_metrics else 0
        
        # 聚合批次统计
        total_batches = sum(m.get('batch_stats', {}).get('total_batches', 0) for m in all_metrics)
        total_transactions = sum(m.get('batch_stats', {}).get('total_transactions', 0) for m in all_metrics)
        
        aggregated = {
            'collection_time': datetime.now().isoformat(),
            'total_nodes': len(all_metrics),
            'total_epochs': total_epochs,
            'avg_duration_s': avg_duration,
            'total_batches': total_batches,
            'total_transactions': total_transactions,
            'node_metrics': all_metrics
        }
        
        with open('aggregated_metrics.json', 'w') as f:
            json.dump(aggregated, f, indent=2)
        
        print(f"Aggregated metrics from {len(all_metrics)} nodes")
        print(f"Total epochs: {total_epochs}")
        print(f"Average duration: {avg_duration:.2f}s")
        print(f"Total transactions: {total_transactions}")

def aggregate_csv_metrics():
    """聚合CSV指标文件"""
    all_rows = []
    
    # 收集所有节点的CSV指标
    for csv_file in glob.glob("metrics/node_*/turritopsis_metrics.csv"):
        try:
            with open(csv_file, 'r') as f:
                reader = csv.DictReader(f)
                for row in reader:
                    row['node_file'] = csv_file
                    all_rows.append(row)
        except Exception as e:
            print(f"Error reading {csv_file}: {e}")
    
    # 写入聚合CSV
    if all_rows:
        with open('aggregated_metrics.csv', 'w', newline='') as f:
            fieldnames = all_rows[0].keys()
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writeheader()
            writer.writerows(all_rows)
        
        print(f"Aggregated CSV metrics from {len(all_rows)} records")

if __name__ == "__main__":
    aggregate_json_metrics()
    aggregate_csv_metrics()
EOF
    
    # 执行聚合
    cd "$COLLECT_DIR"
    python3 aggregate_metrics.py
    
    log_info "Metrics aggregation completed"
}

# 生成报告
generate_report() {
    log_info "Generating performance report..."
    
    cat > "$COLLECT_DIR/performance_report.md" << EOF
# Turritopsis AWS Performance Report

**Collection Time:** $(date)  
**Nodes:** $(wc -l < aws_hosts.txt)  
**Collection Directory:** $COLLECT_DIR

## Summary

This report contains performance metrics collected from Turritopsis nodes running on AWS.

## Files Collected

### Logs
- \`logs/node_*/turritopsis.log\` - Main Turritopsis execution logs
- \`logs/node_*/acs_server.log\` - ACS stub server logs  
- \`logs/node_*/router.log\` - TCP router logs
- \`logs/node_*/dmesg.log\` - System kernel logs

### Metrics
- \`metrics/node_*/turritopsis_metrics.json\` - Detailed JSON metrics
- \`metrics/node_*/turritopsis_metrics.csv\` - Summary CSV metrics
- \`metrics/node_*/turritopsis_metrics_detailed.csv\` - Per-epoch detailed metrics

### Proofs
- \`proofs/node_*_proof.jsonl\` - Proof chain records

### Aggregated Data
- \`aggregated_metrics.json\` - Aggregated metrics from all nodes
- \`aggregated_metrics.csv\` - Aggregated CSV data
- \`system_info.txt\` - System information from all nodes

## Analysis

Use the collected data to analyze:
1. **Performance Metrics**: Latency, throughput, message volume
2. **Phase Timing**: ACSS, MVBA, KeyGen, Verify phase durations
3. **Batch Statistics**: Batch sizes, transaction counts
4. **System Performance**: CPU, memory, disk usage
5. **Proof Chain**: Cryptographic proof verification

## Next Steps

1. Analyze aggregated metrics for performance trends
2. Compare with Turritopsis paper benchmarks
3. Identify bottlenecks and optimization opportunities
4. Generate visualizations for presentation

EOF
    
    log_info "Performance report generated: $COLLECT_DIR/performance_report.md"
}

# 清理临时文件
cleanup_temp_files() {
    log_info "Cleaning up temporary files..."
    
    # 删除本地临时脚本
    rm -f start_acs_*.sh start_router_*.sh start_turritopsis_*.sh
    
    log_info "Cleanup completed"
}

# 主函数
main() {
    log_info "Starting metrics collection from AWS nodes..."
    
    check_hosts_file
    create_collect_dir
    collect_logs
    collect_metrics
    collect_system_info
    aggregate_metrics
    generate_report
    cleanup_temp_files
    
    log_info "Metrics collection completed!"
    log_info "Results saved in: $COLLECT_DIR"
    log_info "View report: cat $COLLECT_DIR/performance_report.md"
}

# 脚本参数处理
case "${1:-}" in
    "collect")
        main
        ;;
    "logs")
        check_hosts_file
        create_collect_dir
        collect_logs
        log_info "Log collection completed in: $COLLECT_DIR/logs"
        ;;
    "metrics")
        check_hosts_file
        create_collect_dir
        collect_metrics
        log_info "Metrics collection completed in: $COLLECT_DIR/metrics"
        ;;
    "aggregate")
        if [ -d "$COLLECT_DIR" ]; then
            cd "$COLLECT_DIR"
            aggregate_metrics
            log_info "Metrics aggregation completed"
        else
            log_error "Collection directory not found. Run 'collect' first."
            exit 1
        fi
        ;;
    *)
        echo "Usage: $0 {collect|logs|metrics|aggregate}"
        echo "  collect   - Collect all data (logs, metrics, system info)"
        echo "  logs      - Collect only log files"
        echo "  metrics   - Collect only metrics files"
        echo "  aggregate - Aggregate collected metrics"
        exit 1
        ;;
esac