import csv
import time
import sys
import os
import pickle
import math
from typing import Any, Dict, List

# Ensure project root in path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# Scenario framework additions
LOG_CSV = "overhead_log.csv"
CSV_HEADER = ["scenario","step","compute_time_s","comm_bytes","payload_type","note"]

# Utility size function
try:
    import numpy as np
except ImportError:
    np = None

def payload_size(obj: Any) -> int:
    if np is not None and isinstance(obj, np.ndarray):
        return int(obj.nbytes)
    try:
        return len(pickle.dumps(obj))
    except Exception:
        return sys.getsizeof(obj)

# Write header (truncate old file)
with open(LOG_CSV, 'w', newline='') as f:
    writer = csv.writer(f)
    writer.writerow(CSV_HEADER)

# Basic log helper
def log_row(scenario: str, step: str, t0: float, t1: float, payload: Any, payload_type: str, note: str=""):
    with open(LOG_CSV, 'a', newline='') as f:
        writer = csv.writer(f)
        writer.writerow([
            scenario,
            step,
            f"{t1 - t0:.6f}",
            payload_size(payload),
            payload_type,
            note
        ])

# Placeholder markers for next steps (scenarios will be implemented in following edits)

from pr1_arbiter.arbiter import Arbiter
from pr2_he.he import HE
from pr3_pca.pca import PCACompressor
from pr4_guest.guest import Guest
import random

arbiter = Arbiter()
pk = arbiter.setup()
he = HE()

# Synthetic data for two hosts
import numpy as np
HOSTS = ["host1","host2"]
HOST_DATA = {
    "host1": np.random.randn(50, 16),
    "host2": np.random.randn(50, 16)
}
GUEST = Guest()


def simulate_forward(host_matrix):  # removed explicit numpy type hints
    # Simple mean pooling as placeholder forward
    return host_matrix.mean(axis=0)


def run_scenario(name: str, use_he: bool, use_pca: bool, pca_k: int|None, backup: bool):
    pca_models = {}
    transformed_vectors = {}

    # PCA fit (if enabled)
    if use_pca:
        for h, mat in HOST_DATA.items():
            t0 = time.perf_counter()
            pca = PCACompressor(n_components=pca_k)
            pca.fit_pca(mat)
            t1 = time.perf_counter()
            pca_models[h] = pca
            log_row(name, "pca_fit", t0, t1, mat, "matrix", f"host={h};k={pca_k}")

    # Forward + optional PCA transform
    for h, mat in HOST_DATA.items():
        t0 = time.perf_counter()
        vec = simulate_forward(mat)
        t1 = time.perf_counter()
        log_row(name, "forward", t0, t1, vec, "vector", f"host={h};dim={vec.shape[0]}")

        if use_pca:
            t2 = time.perf_counter()
            vec_t = pca_models[h].transform(vec)
            t3 = time.perf_counter()
            log_row(name, "pca_transform", t2, t3, vec_t, "pca_vector", f"host={h};k={pca_k}")
            transformed_vectors[h] = vec_t
        else:
            transformed_vectors[h] = vec

    # Encrypt + send simulation
    encrypted_payloads = {}
    for h, vec in transformed_vectors.items():
        send_vec = vec
        if use_he:
            t4 = time.perf_counter()
            enc = he.encrypt(pk, list(vec.tolist()))  # using placeholder HE impl
            t5 = time.perf_counter()
            log_row(name, "encrypt", t4, t5, enc, "cipher_list", f"host={h}")
            send_vec = enc
        # Communication size measurement (simulate send)
        t6 = time.perf_counter()
        _ = payload_size(send_vec)  # no-op, just to mirror timing structure
        t7 = time.perf_counter()
        log_row(name, "send", t6, t7, send_vec, "cipher_list" if use_he else "vector", f"host={h}")
        encrypted_payloads[h] = send_vec

    # Guest collection (with backup logic simulation)
    # For simplicity, reuse existing guest.collect_intermediates for timing baseline-like
    # but we override number of hosts actually used if backup.
    selected_hosts = HOSTS if not backup else HOSTS[:1]
    t8 = time.perf_counter()
    collected = {h: encrypted_payloads[h] for h in selected_hosts}
    t9 = time.perf_counter()
    log_row(name, "collect", t8, t9, collected, "dict", f"hosts_used={len(selected_hosts)};backup={backup}")

    # Decrypt & aggregate
    agg_inputs = []
    for h, payload in collected.items():
        if use_he:
            t10 = time.perf_counter()
            dec = he.decrypt("sk_placeholder", payload)  # placeholder decrypt
            t11 = time.perf_counter()
            log_row(name, "decrypt", t10, t11, dec, "vector", f"host={h}")
            agg_inputs.append(dec)
        else:
            agg_inputs.append(payload)

    # Aggregate (mean)
    t12 = time.perf_counter()
    # For placeholder, convert all lists to float arrays
    agg_matrix = np.vstack([np.array(v, dtype=float) for v in agg_inputs])
    agg = agg_matrix.mean(axis=0)
    t13 = time.perf_counter()
    log_row(name, "aggregate", t12, t13, agg, "vector", f"inputs={len(agg_inputs)}")


# Run three baseline scenarios
run_scenario("plain", use_he=False, use_pca=False, pca_k=None, backup=False)
run_scenario("he_no_pca", use_he=True, use_pca=False, pca_k=None, backup=False)
run_scenario("he_pca_backup", use_he=True, use_pca=True, pca_k=8, backup=True)

print(f"Multi-scenario overhead logged to {LOG_CSV}")

# Enhanced summary generation with startup vs runtime split
SUMMARY_CSV = "overhead_round.csv"
from collections import defaultdict

agg_time_total = defaultdict(float)
agg_comm_total = defaultdict(int)
agg_time_startup = defaultdict(float)
agg_comm_startup = defaultdict(int)
count_steps_total = defaultdict(int)
count_steps_startup = defaultdict(int)

import csv as _csv
with open(LOG_CSV, 'r') as f:
    reader = _csv.reader(f)
    header = next(reader, None)
    for row in reader:
        if len(row) < 6:
            continue
        scenario, step, compute_time_s, comm_bytes, payload_type, note = row
        try:
            ct = float(compute_time_s)
            cb = int(comm_bytes)
        except ValueError:
            continue
        agg_time_total[scenario] += ct
        agg_comm_total[scenario] += cb
        count_steps_total[scenario] += 1
        if step == 'pca_fit':
            agg_time_startup[scenario] += ct
            agg_comm_startup[scenario] += cb
            count_steps_startup[scenario] += 1

plain_time = agg_time_total.get('plain', None)
plain_comm = agg_comm_total.get('plain', None)

with open(SUMMARY_CSV, 'w', newline='') as f:
    w = _csv.writer(f)
    w.writerow([
        "scenario",
        "startup_compute_time_s",
        "startup_comm_bytes",
        "startup_steps",
        "runtime_compute_time_s",
        "runtime_comm_bytes",
        "runtime_steps",
        "total_compute_time_s",
        "total_comm_bytes",
        "runtime_avg_compute_per_step_s",
        "runtime_avg_comm_per_step",
        "rel_runtime_compute_vs_plain",
        "rel_runtime_comm_vs_plain",
        "comm_runtime_saving_vs_plain_pct"
    ])
    for sc in sorted(agg_time_total.keys()):
        startup_t = agg_time_startup[sc]
        startup_c = agg_comm_startup[sc]
        startup_steps = count_steps_startup[sc]
        total_t = agg_time_total[sc]
        total_c = agg_comm_total[sc]
        total_steps = count_steps_total[sc]
        runtime_t = total_t - startup_t
        runtime_c = total_c - startup_c
        runtime_steps = total_steps - startup_steps
        avg_rt_t = runtime_t / runtime_steps if runtime_steps else 0.0
        avg_rt_c = runtime_c / runtime_steps if runtime_steps else 0.0
        plain_runtime_t = (agg_time_total['plain'] - agg_time_startup.get('plain',0.0)) if plain_time is not None else None
        plain_runtime_c = (agg_comm_total['plain'] - agg_comm_startup.get('plain',0)) if plain_comm is not None else None
        rel_rt_t = (runtime_t / plain_runtime_t) if plain_runtime_t and sc != 'plain' else 1.0
        rel_rt_c = (runtime_c / plain_runtime_c) if plain_runtime_c and sc != 'plain' else 1.0
        comm_save_rt_pct = (1 - rel_rt_c) * 100 if sc != 'plain' and rel_rt_c is not None else 0.0
        w.writerow([
            sc,
            f"{startup_t:.6f}",
            startup_c,
            startup_steps,
            f"{runtime_t:.6f}",
            runtime_c,
            runtime_steps,
            f"{total_t:.6f}",
            total_c,
            f"{avg_rt_t:.6f}",
            f"{avg_rt_c:.2f}",
            f"{rel_rt_t:.3f}",
            f"{rel_rt_c:.3f}",
            f"{comm_save_rt_pct:.2f}"
        ])

print(f"Round summary written to {SUMMARY_CSV}")

# Generate Markdown summary from SUMMARY_CSV (compact columns)
MD_OUT = "overhead_round.md"
try:
    import csv as _c
    rows = []
    with open(SUMMARY_CSV, 'r') as fr:
        rd = _c.reader(fr)
        header = next(rd, None)
        for r in rd:
            if r:
                rows.append(r)
    # Map long headers to short names
    mapping = {
        'scenario':'scenario',
        'startup_compute_time_s':'start_comp',
        'startup_comm_bytes':'start_comm',
        'startup_steps':'start_steps',
        'runtime_compute_time_s':'run_comp',
        'runtime_comm_bytes':'run_comm',
        'runtime_steps':'run_steps',
        'total_compute_time_s':'total_comp',
        'total_comm_bytes':'total_comm',
        'runtime_avg_compute_per_step_s':'avg_comp',
        'runtime_avg_comm_per_step':'avg_comm',
        'rel_runtime_compute_vs_plain':'rel_comp',
        'rel_runtime_comm_vs_plain':'rel_comm',
        'comm_runtime_saving_vs_plain_pct':'comm_save%'
    }
    short_header = [mapping.get(h,h) for h in header]

    def esc(cell: str) -> str:
        return cell.replace('|','\\|').strip()

    # Markdown table
    md_lines = []
    md_lines.append("| " + " | ".join(short_header) + " |")
    md_lines.append("|" + "|".join([" --- " for _ in short_header]) + "|")
    for r in rows:
        md_lines.append("| " + " | ".join(esc(c) for c in r) + " |")

    # Fixed-width plain table
    all_lines = [short_header] + rows
    col_widths = [max(len(str(row[i])) for row in all_lines) for i in range(len(short_header))]
    def fmt_row(r):
        return " ".join(str(r[i]).ljust(col_widths[i]) for i in range(len(r)))
    plain_table = [fmt_row(short_header)]
    plain_table.append(" ".join('-'*w for w in col_widths))
    for r in rows:
        plain_table.append(fmt_row(r))

    with open(MD_OUT, 'w') as fw:
        fw.write("# Overhead Round Summary\n\n")
        fw.write("Generated from `overhead_round.csv`\n\n")
        fw.write("## Markdown Table\n\n")
        fw.write("\n".join(md_lines) + "\n\n")
        fw.write("## Fixed Width Table\n\n")
        fw.write("````\n")
        fw.write("\n".join(plain_table) + "\n")
        fw.write("````\n")
    print(f"Markdown summary written to {MD_OUT}")
except Exception as e:
    print(f"Failed to generate markdown summary: {e}")