#!/usr/bin/env python3
"""
PSI benchmark analysis tool
Usage: ./psi_analyze.py <results_directory>

This parser:
- Extracts stress-ng metrics (cpu/memory/mixed tests) from the .txt result files.
- Extracts fio metrics (io tests) from the corresponding .fio.json files.
- Computes enabled vs disabled differences.
- Parses vmstat files for context switch rates (cs column averaged over last samples).

Output: Human-readable comparison and simple bar charts per test where both enabled/disabled exist.

NOTE:
Stress-ng parsing assumes --metrics-brief format. If the stress-ng output format
changes, adjust the parsing rules accordingly.

Fio parsing expects --output-format=json output saved as <basename>.fio.json.
"""

import os
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict

# ---------------------------
# Stress-ng parsing helpers
# ---------------------------
# Map stressor names to (field_index, internal_metric_name)
# Field index may need adjustment depending on stress-ng version.
STRESS_NG_METRIC_MAP = {
    'cpu': {'field': 8, 'name': 'cpu_throughput'},   # Example: bogo ops/s
    'io': {'field': 8, 'name': 'io_throughput'},     # Included if mixed test uses iomix
    'vm': {'field': 8, 'name': 'vm_throughput'},
}

def parse_stress_ng_metrics(txt_path, test_type, psi_state, data):
    """
    Parse a stress-ng --metrics-brief output.
    We look for lines containing 'stress-ng:' and attempt to extract the numeric
    field defined in STRESS_NG_METRIC_MAP for known stressors.
    """
    try:
        with open(txt_path, 'r') as f:
            for line in f:
                if 'stress-ng:' not in line:
                    continue
                parts = line.strip().split()
                # Heuristic: stressor name usually appears after the PID field or bracket.
                # We search for any known stressor token in the parts.
                for stressor in STRESS_NG_METRIC_MAP:
                    if any(p.startswith(stressor) for p in parts):
                        metric_info = STRESS_NG_METRIC_MAP[stressor]
                        idx = metric_info['field']
                        if idx < len(parts):
                            raw = parts[idx].replace(',', '')
                            try:
                                val = float(raw)
                                metric_name = metric_info['name']
                                data[test_type][metric_name][psi_state] = val
                            except ValueError:
                                pass
                        break
    except FileNotFoundError:
        pass

# ---------------------------
# fio parsing helpers
# ---------------------------
def parse_fio_json(json_path, test_type, psi_state, data):
    """
    Parse fio JSON output and aggregate metrics.
    We collect:
      - io_read_bw_MBps
      - io_write_bw_MBps
      - io_read_iops
      - io_write_iops
      - io_clat_mean_ms (completion latency mean)
    bw in JSON is typically in KB/s; we convert to MB/s (decimal 1000 KB = 1 MB).
    Latency is typically nested under clat_ns (95% aggregated). We'll take mean if present.
    """
    if not os.path.isfile(json_path):
        return
    try:
        with open(json_path, 'r') as f:
            js = json.load(f)
    except json.JSONDecodeError:
        print(f"WARNING: Could not parse fio JSON: {json_path}")
        return

    jobs = js.get("jobs", [])
    if not jobs:
        return

    total_read_bw_kb = 0.0
    total_write_bw_kb = 0.0
    total_read_iops = 0.0
    total_write_iops = 0.0
    lat_means_ns = []

    for job in jobs:
        r = job.get("read", {})
        w = job.get("write", {})
        total_read_bw_kb += r.get("bw", 0.0)
        total_write_bw_kb += w.get("bw", 0.0)
        total_read_iops += r.get("iops", 0.0)
        total_write_iops += w.get("iops", 0.0)

        # Latency block: prefer clat_ns mean; fallback to lat_ns
        clat_ns = r.get("clat_ns") or w.get("clat_ns") or job.get("clat_ns") or {}
        if isinstance(clat_ns, dict):
            mean_ns = clat_ns.get("mean")
            if mean_ns is not None:
                lat_means_ns.append(mean_ns)

    # Convert KB/s to MB/s (decimal)
    read_bw_MBps = total_read_bw_kb / 1000.0
    write_bw_MBps = total_write_bw_kb / 1000.0

    if lat_means_ns:
        clat_mean_ms = (sum(lat_means_ns) / len(lat_means_ns)) / 1_000_000.0
    else:
        clat_mean_ms = None

    metrics = {
        "io_read_bw_MBps": read_bw_MBps,
        "io_write_bw_MBps": write_bw_MBps,
        "io_read_iops": total_read_iops,
        "io_write_iops": total_write_iops,
    }
    if clat_mean_ms is not None:
        metrics["io_clat_mean_ms"] = clat_mean_ms

    for k, v in metrics.items():
        data[test_type][k][psi_state] = v

# ---------------------------
# vmstat parsing
# ---------------------------
def parse_vmstat(vmstat_path, test_type, psi_state, data, tail_samples=100):
    """
    Parse vmstat output. The cs (context switches) column is index 11
    after skipping first two header lines.
    We average the last 'tail_samples' entries if enough exist.
    """
    if not os.path.isfile(vmstat_path):
        return
    cs_values = []
    try:
        with open(vmstat_path, 'r') as f:
            # Skip header lines
            next(f)
            next(f)
            for line in f:
                if not line.strip():
                    continue
                parts = line.split()
                if len(parts) > 11:
                    try:
                        cs_values.append(float(parts[11]))
                    except ValueError:
                        continue
    except (FileNotFoundError, StopIteration):
        return

    if cs_values:
        subset = cs_values[-tail_samples:] if len(cs_values) >= tail_samples else cs_values
        data[test_type]['context_switches'][psi_state] = float(np.mean(subset))

# ---------------------------
# General parsing routine
# ---------------------------
def parse_results(results_dir):
    data = defaultdict(lambda: defaultdict(dict))

    for fname in os.listdir(results_dir):
        full_path = os.path.join(results_dir, fname)

        # Determine PSI state
        if '_enabled' in fname:
            psi_state = 'enabled'
        elif '_disabled' in fname:
            psi_state = 'disabled'
        else:
            # Skip files that are not part of the standardized naming
            continue

        # Base test type (prefix before first underscore)
        test_type = fname.split('_')[0]

        # Text result from stress-ng or fio wrapper
        if fname.endswith('.txt'):
            # Parse stress-ng metrics (cpu/memory/mixed; io only if still present in mixed)
            parse_stress_ng_metrics(full_path, test_type, psi_state, data)

        # vmstat sidecar
        if fname.endswith('.vmstat'):
            parse_vmstat(full_path, test_type, psi_state, data)

        # fio JSON for IO test
        if fname.endswith('.fio.json'):
            parse_fio_json(full_path, test_type, psi_state, data)

    return data

# ---------------------------
# Reporting
# ---------------------------
def print_table_for_test(test_name, metrics):
    print(f"\nTest Type: {test_name.upper()}")
    print("-" * 60)
    print(f"{'Metric':<28} | {'PSI Enabled':>12} | {'PSI Disabled':>12} | {'Delta (%)':>10}")
    print("-" * 60)
    for metric_name, values in metrics.items():
        enabled = values.get('enabled')
        disabled = values.get('disabled')
        if enabled is not None and disabled is not None:
            if disabled == 0:
                delta = "N/A"
            else:
                diff_pct = (enabled - disabled) / disabled * 100.0
                delta = f"{diff_pct:.1f}%"
            print(f"{metric_name:<28} | {enabled:>12.3f} | {disabled:>12.3f} | {delta:>10}")
        elif enabled is not None:
            print(f"{metric_name:<28} | {enabled:>12.3f} | {'-':>12} | {'-':>10}")
        elif disabled is not None:
            print(f"{metric_name:<28} | {'-':>12} | {disabled:>12.3f} | {'-':>10}")
    print("-" * 60)

def generate_charts(test_name, metrics, out_dir="."):
    metric_names = []
    enabled_vals = []
    disabled_vals = []
    for m, vals in metrics.items():
        if 'enabled' in vals and 'disabled' in vals:
            metric_names.append(m)
            enabled_vals.append(vals['enabled'])
            disabled_vals.append(vals['disabled'])

    if not metric_names:
        return

    x = np.arange(len(metric_names))
    width = 0.35
    plt.figure(figsize=(max(8, len(metric_names)*1.2), 5))
    plt.bar(x - width/2, enabled_vals, width, label='PSI Enabled')
    plt.bar(x + width/2, disabled_vals, width, label='PSI Disabled')
    plt.xticks(x, metric_names, rotation=30, ha='right')
    plt.ylabel('Value')
    plt.title(f'PSI Performance Comparison - {test_name.upper()}')
    plt.legend()

    # Annotate delta
    for i, (en, dis) in enumerate(zip(enabled_vals, disabled_vals)):
        if dis != 0:
            diff_pct = (en - dis) / dis * 100.0
            plt.text(i, max(en, dis)*1.02, f'{diff_pct:+.1f}%', ha='center', fontsize=9)

    plt.tight_layout()
    chart_path = os.path.join(out_dir, f"{test_name}_comparison.png")
    plt.savefig(chart_path, dpi=120)
    print(f"Chart saved: {chart_path}")

def generate_report(data, out_dir="."):
    print("PSI Performance Analysis Report")
    print("=" * 60)

    for test_type, metrics in data.items():
        print_table_for_test(test_type, metrics)
        generate_charts(test_type, metrics, out_dir=out_dir)

    print("\nReport generation complete.")

# ---------------------------
# Main
# ---------------------------
def main():
    if len(sys.argv) != 2:
        print(f"Usage: {sys.argv[0]} <results_directory>")
        sys.exit(1)

    results_dir = sys.argv[1]
    if not os.path.isdir(results_dir):
        print(f"ERROR: Directory not found: {results_dir}")
        sys.exit(1)

    data = parse_results(results_dir)
    generate_report(data, out_dir=results_dir)

if __name__ == "__main__":
    main()
