#!/usr/bin/env python3
"""
Summarize FusionBench outputs JSON files (e.g., ViT-B-32_8task/1_0.json) into a Markdown table.

Assumptions:
- File name pattern: <round>_<stage>.json where round starts at 1 and stage starts at 0.
- Each JSON contains task keys (dataset names) mapping to {"accuracy": float, "loss": float}.
- As stages progress, new tasks are added. Earlier tasks reappear with updated accuracy.

Generated table layout (Markdown):
- Rows: tasks in the order they are introduced across stages.
- Columns: per-stage accuracy for each task (left-to-right by stage introduction) and final "Avg" column.
- Header (last row): task order followed by "Avg".

Usage:
  python scripts/summarize_outputs_to_md.py \
    --input-dir outputs/ViT-B-32_8task \
    --output-file outputs/ViT-B-32_8task/summary.md
"""

import argparse
import json
import os
import re
import math
from collections import defaultdict, OrderedDict
from typing import Dict, List, Tuple


FILENAME_RE = re.compile(r"^(\d+)_(\d+)\.json$")


def find_stage_files(input_dir: str) -> List[Tuple[int, int, str]]:
    """Return list of (round, stage, path) sorted by stage ascending.
    We ignore round in ordering for the table, assuming single experiment series.
    """
    files = []
    for name in os.listdir(input_dir):
        m = FILENAME_RE.match(name)
        if not m:
            continue
        round_idx = int(m.group(1))
        stage_idx = int(m.group(2))
        files.append((round_idx, stage_idx, os.path.join(input_dir, name)))
    # sort by stage, then round
    files.sort(key=lambda t: (t[1], t[0]))
    return files


def load_json(path: str) -> Dict:
    with open(path, "r", encoding="utf-8") as f:
        return json.load(f)


def collect_tasks_and_accuracies(files: List[Tuple[int, int, str]]) -> Tuple[List[str], List[Dict[str, float]], Dict[str, int]]:
    """Collect task introduction order and per-stage accuracies.

    Returns:
    - tasks_in_order: list of tasks in the order they first appear across stages
    - stage_task_accs: list per stage of {task_name: accuracy}
    """
    tasks_in_order: List[str] = []
    seen = set()
    stage_task_accs: List[Dict[str, float]] = []
    first_stage_for_task: Dict[str, int] = {}

    for _, stage, path in files:
        data = load_json(path)
        # extract task entries by excluding meta keys
        task_accs: Dict[str, float] = {}
        for key, val in data.items():
            if key in {"model_info", "average"}:
                continue
            if not isinstance(val, dict):
                continue
            if "accuracy" in val:
                task_accs[key] = float(val["accuracy"])
        stage_task_accs.append(task_accs)

        # update task order by first-seen
        for task in task_accs.keys():
            if task not in seen:
                tasks_in_order.append(task)
                seen.add(task)
                first_stage_for_task[task] = stage

    return tasks_in_order, stage_task_accs, first_stage_for_task


def compute_task_avgs(tasks: List[str], stage_task_accs: List[Dict[str, float]]) -> Dict[str, float]:
    """Compute average accuracy per task across stages where it appears.
    Average is arithmetic mean of available values per task.
    """
    sums = defaultdict(float)
    counts = defaultdict(int)
    for accs in stage_task_accs:
        for task, acc in accs.items():
            sums[task] += acc
            counts[task] += 1
    avgs = {}
    for task in tasks:
        if counts[task] > 0:
            avgs[task] = round(sums[task] / counts[task], 4)
        else:
            avgs[task] = float("nan")
    return avgs


def format_md_table(tasks: List[str], stage_task_accs: List[Dict[str, float]]) -> str:
    """Create lower-triangular Markdown table like the provided example.

    - Columns (excluding the first task-name column) correspond to stages labeled by the task introduced at that stage.
      Thus column headers are the tasks-in-order, plus a final "Avg" column.
    - Row i (task i) contains values for stages j <= i (lower triangle). For j > i, leave blank.
    - Avg is the mean of available values on that row (over j <= i that have values).
    """
    num_stages = len(stage_task_accs)
    num_tasks = len(tasks)

    # Compute first stage where each task appears
    first_stage_for_task: Dict[str, int] = {}
    for j in range(num_stages):
        accs = stage_task_accs[j]
        for task in tasks:
            if task in accs and task not in first_stage_for_task:
                first_stage_for_task[task] = j

    # For each stage, how many tasks have been introduced cumulatively
    introduced_count_by_stage: List[int] = []
    for j in range(num_stages):
        count = sum(1 for t in tasks if first_stage_for_task.get(t, 1e9) <= j)
        introduced_count_by_stage.append(count)

    def fmt(acc: float) -> str:
        return f"{acc:.3f}" if isinstance(acc, float) else ""

    rows: List[List[str]] = []
    for i, task in enumerate(tasks):
        row_cells: List[str] = [task]
        values_for_avg: List[float] = []
        # Determine which stage data to use for this row: stage where introduced_count == i+1
        stage_idx = None
        target_count = i + 1
        for j in range(num_stages):
            if introduced_count_by_stage[j] == target_count:
                stage_idx = j
                break
        # For each task/column j
        for j in range(num_tasks):
            # Only fill cells up to the diagonal (lower-triangle): j <= i
            if j <= i:
                # Use accuracy of column task at the row's stage; if no such stage, leave blank
                if stage_idx is not None:
                    col_task = tasks[j]
                    acc = stage_task_accs[stage_idx].get(col_task)
                    if acc is not None:
                        row_cells.append(fmt(acc))
                        values_for_avg.append(float(acc))
                    else:
                        row_cells.append("")
                else:
                    row_cells.append("")
            else:
                row_cells.append("")
        # Avg column (mean of available values_for_avg)
        if values_for_avg:
            avg_val = sum(values_for_avg) / len(values_for_avg)
            row_cells.append(fmt(avg_val))
        else:
            row_cells.append("")
        rows.append(row_cells)

    # Footer header row: empty first cell, then task names, then Avg
    footer = [""] + tasks + ["Avg"]

    # Build Markdown text
    # total column count = 1 (task name) + num_tasks (stages) + 1 (Avg)
    col_count = 1 + num_tasks + 1

    # Header blank row (aesthetic, matching the example)
    header_cells = ["" for _ in range(col_count)]
    header_line = " | ".join(["" ] + header_cells + [""])

    # Separator row
    sep_cells = ["-------------"] + ["-------" for _ in range(num_tasks)] + ["------"]
    sep_line = " | ".join(["" ] + sep_cells + [""])

    # Task rows
    task_lines: List[str] = []
    for row in rows:
        line = " | ".join(["" ] + row + [""])
        task_lines.append(line)

    # Footer line
    footer_line = " | ".join(["" ] + footer + [""])

    md = "\n".join([header_line, sep_line] + task_lines + [footer_line])
    return md


def compute_bwt(tasks: List[str], stage_task_accs: List[Dict[str, float]]) -> float:
    """Compute Backward Transfer (BWT) for the continue experiment.

    BWT = mean over tasks introduced before the last stage of (final_acc - acc_at_introduction).

    - final_acc: accuracy on a task at the final stage.
    - acc_at_introduction: accuracy on the task at the stage when it was first introduced.
    - Tasks introduced at the last stage are excluded because they have no later stages.
    """
    if not stage_task_accs:
        return float("nan")

    num_stages = len(stage_task_accs)
    final_accs = stage_task_accs[-1]

    # Find first stage index for each task
    first_stage_for_task: Dict[str, int] = {}
    for j, accs in enumerate(stage_task_accs):
        for t in tasks:
            if t not in first_stage_for_task and t in accs:
                first_stage_for_task[t] = j

    deltas: List[float] = []
    for t in tasks:
        j_intro = first_stage_for_task.get(t)
        # Exclude tasks introduced at the final stage
        if j_intro is None or j_intro == num_stages - 1:
            continue
        base = stage_task_accs[j_intro].get(t)
        fin = final_accs.get(t)
        if base is not None and fin is not None:
            deltas.append(float(fin) - float(base))

    if not deltas:
        return float("nan")
    return round(sum(deltas) / len(deltas), 4)


def main():
    parser = argparse.ArgumentParser(description="Summarize outputs JSON to Markdown table")
    parser.add_argument("--input-dir", required=True, help="Directory containing <round>_<stage>.json files")
    parser.add_argument("--output-file", required=True, help="Path to write summary markdown")
    args = parser.parse_args()

    files = find_stage_files(args.input_dir)
    if not files:
        raise SystemExit(f"No files matched in {args.input_dir}. Expect pattern '<round>_<stage>.json'.")

    tasks, stage_task_accs, _ = collect_tasks_and_accuracies(files)

    md = format_md_table(tasks, stage_task_accs)

    os.makedirs(os.path.dirname(args.output_file), exist_ok=True)
    with open(args.output_file, "w", encoding="utf-8") as f:
        f.write(md + "\n")

    # Append BWT metric to the end of the Markdown file
    bwt = compute_bwt(tasks, stage_task_accs)
    with open(args.output_file, "a", encoding="utf-8") as f:
        if not math.isnan(bwt):
            f.write(f"BWT: {bwt:.4f}\n")
        else:
            f.write("BWT: N/A\n")



    print(f"Wrote summary to {args.output_file}")


if __name__ == "__main__":
    main()