"""End-to-end experiment runner for FedComm simulation.

Adds optional acceleration / measurement features:
 - --eval-interval: evaluate every N rounds (last round always)
 - --compress-fp16: send model deltas in float16 to reduce size
Generates two CSVs:
 - metrics.csv: per-client per-round metrics
 - metrics_eval.csv: evaluation rounds only (round, loss, acc)
"""
import argparse
import time
import pickle
import random
import pandas as pd
import torch
import numpy as np
from . import dataset as ds_mod
from . import model as model_mod
from .server import FedServer
from .client import Client
from .protocols.auth import ECDSAAuth, AuthScheme


def run_experiment(args):
    seed_everything(args.seed)
    loaders, test_loader = ds_mod.get_client_dataloaders(
        args.num_clients,
        batch_size=args.batch_size,
        non_iid=args.non_iid,
        alpha=args.alpha,
        synthetic=args.synthetic,
        synth_train_per_client=args.synth_train_per_client,
        synth_test_size=args.synth_test_size,
    )

    # Auth backend selection (extendable later)
    if args.auth_backend == 'ecdsa':
        backend: AuthScheme = ECDSAAuth()
    else:
        raise ValueError(f"Unsupported auth backend: {args.auth_backend}")

    server = FedServer(auth_scheme=backend)
    clients = []
    for cid in range(args.num_clients):
        c = Client(str(cid), loaders[cid], auth_scheme=backend)
        server.register_client(str(cid), c.get_public_key())
        clients.append(c)

    metrics_rows = []
    eval_rows = []

    for rnd in range(1, args.rounds+1):
        round_start = time.perf_counter()
        if args.max_clients_per_round and args.max_clients_per_round < args.num_clients:
            selected = random.sample(range(args.num_clients), args.max_clients_per_round)
        else:
            selected = range(args.num_clients)
        round_sign_times = []
        round_verify_times = []
        round_bytes = 0
        for cid in selected:
            c = clients[cid]
            delta, samples = c.train_local(epochs=args.local_epochs, lr=args.lr)
            # Optional FP16 compression BEFORE serialization
            if args.compress_fp16:
                delta_to_send = {k: v.detach().cpu().half() for k, v in delta.items()}
                compress_mode = 'fp16'
            else:
                delta_to_send = {k: v.detach().cpu() for k, v in delta.items()}
                compress_mode = 'none'

            message = pickle.dumps(delta_to_send, protocol=pickle.HIGHEST_PROTOCOL)
            t0 = time.perf_counter()
            sig, inner_time = backend.sign(c.sk, message)
            sign_time = time.perf_counter() - t0  # outer timing including call overhead
            valid, verify_time = backend.verify(c.pk, message, sig)
            round_sign_times.append(sign_time)
            round_verify_times.append(verify_time)
            # Decompress (restore to float32) before giving to server
            if compress_mode == 'fp16':
                recv_delta = {k: v.float() for k, v in pickle.loads(message).items()}
            else:
                recv_delta = pickle.loads(message)
            ok = server.receive_update(str(cid), recv_delta, sig, {'samples': samples}, raw_message=message)
            round_bytes += len(message) + len(sig)
            metrics_rows.append({
                'round': rnd,
                'client_id': cid,
                'accepted': ok,
                'samples': samples,
                'sig_size': len(sig),
                'msg_size': len(message),
                'sign_time': sign_time,
                'verify_time': verify_time,
                'compress': compress_mode
            })
        server.aggregate()
        # Conditional evaluation
        do_eval = (rnd % args.eval_interval == 0) or (rnd == args.rounds)
        if do_eval:
            loss, acc = evaluate_global(server, test_loader, device='cpu')
            eval_rows.append({'round': rnd, 'loss': loss, 'acc': acc})
        else:
            loss, acc = float('nan'), float('nan')
        round_time = time.perf_counter() - round_start
        print(
            f"Round {rnd}: acc={acc if not np.isnan(acc) else 'skip'} "
            f"loss={loss if not np.isnan(loss) else 'skip'} "
            f"bytes={round_bytes} sign_avg={np.mean(round_sign_times):.4f}s "
            f"verify_avg={np.mean(round_verify_times):.4f}s time={round_time:.2f}s")

    df = pd.DataFrame(metrics_rows)
    df.to_csv(args.out_csv, index=False)
    print("Metrics saved to", args.out_csv)

    eval_csv = args.out_csv.replace('.csv', '_eval.csv')
    pd.DataFrame(eval_rows).to_csv(eval_csv, index=False)
    print("Eval metrics saved to", eval_csv)


def evaluate_global(server: FedServer, test_loader, device='cpu'):
    model = server.global_model.to(device)
    model.eval()
    total_loss = 0.0
    total_correct = 0
    total = 0
    criterion = torch.nn.CrossEntropyLoss()
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            total_loss += loss.item() * images.size(0)
            preds = outputs.argmax(1)
            total_correct += (preds == labels).sum().item()
            total += images.size(0)
    return total_loss / total, total_correct / total


def parse_args():
    p = argparse.ArgumentParser()
    p.add_argument('--num-clients', type=int, default=2)
    p.add_argument('--rounds', type=int, default=2)
    p.add_argument('--local-epochs', type=int, default=1)
    p.add_argument('--batch-size', type=int, default=32)
    p.add_argument('--lr', type=float, default=0.01)
    p.add_argument('--non-iid', action='store_true')
    p.add_argument('--alpha', type=float, default=0.5)
    p.add_argument('--auth-backend', type=str, default='ecdsa')
    p.add_argument('--synthetic', action='store_true', help='Use synthetic random tensors instead of CIFAR10 (fast)')
    p.add_argument('--seed', type=int, default=42)
    p.add_argument('--out-csv', type=str, default='metrics.csv')
    p.add_argument('--synth-train-per-client', type=int, default=256)
    p.add_argument('--synth-test-size', type=int, default=512)
    p.add_argument('--max-clients-per-round', type=int, default=0, help='If >0, randomly sample this many clients each round')
    p.add_argument('--eval-interval', type=int, default=1, help='Evaluate every N rounds (last round always)')
    p.add_argument('--compress-fp16', action='store_true', help='Send deltas serialized in float16 to reduce size')
    return p.parse_args()


def seed_everything(seed: int = 42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():  # pragma: no cover
        torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True  # type: ignore
    torch.backends.cudnn.benchmark = False  # type: ignore

if __name__ == '__main__':
    args = parse_args()
    run_experiment(args)
