# Implement a simulation of Hierarchical Federated Learning (HFL) in a multi-hop cluster-based VANET.
# Requirements:
# 1. Define Vehicle, ClusterHead, and Server classes.
# 2. Each Vehicle has local data, speed, and a local model.
# 3. Vehicles train locally and send updates to their ClusterHead.
# 4. ClusterHead aggregates member updates and sends to the Server.
# 5. Server aggregates updates from all ClusterHeads and returns a global model.
# 6. Implement cluster formation based on relative speed + cosine similarity of model updates.
# 7. Implement cluster-head switching if current CH becomes unstable (e.g., high mobility).
# 8. Implement communication cost tracking (number of messages exchanged).
# 9. Run multiple rounds of HFL and record test accuracy, loss, and communication cost.

# TODO: Define Vehicle, ClusterHead, and Server classes with basic attributes and methods.
from __future__ import annotations
from typing import List, Dict, Any, Tuple
import csv
import numpy as np
from collections import defaultdict

class Vehicle:
    """Represents a vehicle (client) in hierarchical FL.

    Attributes:
        vid: Unique vehicle id.
        speed: Current scalar speed value.
        data: Tuple (X, y) of local dataset (numpy arrays).
        model: Parameter vector (numpy array).
        prev_model: Previous round model snapshot (for similarity computations).
    """
    def __init__(self, vid: int, speed: float, data: Tuple[np.ndarray, np.ndarray], model_dim: int):
        self.vid = vid
        self.speed = speed
        self.data = data  # (X, y)
        self.model = np.zeros(model_dim, dtype=float)
        self.prev_model = self.model.copy()

    def local_train(self, epochs: int = 1, lr: float = 0.05) -> Tuple[np.ndarray, float]:
        """Perform simple logistic regression full-batch GD and return (delta, comp_cost).

        Computational cost (approx) per epoch ~ 2 * N * D (forward + backward) where:
          N = number of samples, D = model_dim. We ignore lower-order terms.
        """
        X, y = self.data
        if len(X) == 0:
            return np.zeros_like(self.model), 0.0
        N, D = X.shape
        self.prev_model = self.model.copy()
        comp_cost = 0.0
        for _ in range(epochs):
            logits = X @ self.model              # N*D mult-add ~ N*D
            preds = 1 / (1 + np.exp(-logits))    # N sigmoid
            grad = X.T @ (preds - y) / len(X)     # N*D
            self.model -= lr * grad               # D
            comp_cost += 2 * N * D  # coarse estimate
        delta = self.model - self.prev_model
        return delta, comp_cost

    def similarity_with_prev(self) -> float:
        v1 = self.prev_model
        v2 = self.model
        denom = (np.linalg.norm(v1) * np.linalg.norm(v2) + 1e-12)
        return float(np.dot(v1, v2) / denom) if denom > 0 else 0.0

class ClusterHead(Vehicle):
    """Cluster head that can aggregate member updates."""
    def aggregate(self, member_updates: List[np.ndarray]) -> np.ndarray:
        if not member_updates:
            return np.zeros_like(self.model)
        agg = np.mean(member_updates, axis=0)
        # Apply update to its own model (treat as reference for cluster)
        self.model += agg
        return agg

class Server:
    """Global server maintaining the global model."""
    def __init__(self, model_dim: int):
        self.global_model = np.zeros(model_dim, dtype=float)

    def aggregate(self, cluster_updates: List[np.ndarray]) -> np.ndarray:
        if not cluster_updates:
            return self.global_model
        mean_update = np.mean(cluster_updates, axis=0)
        self.global_model += mean_update
        return self.global_model

def sigmoid(z: np.ndarray) -> np.ndarray:
    return 1 / (1 + np.exp(-z))

def predict_proba(model: np.ndarray, X: np.ndarray) -> np.ndarray:
    return sigmoid(X @ model)

def predict_label(model: np.ndarray, X: np.ndarray, threshold: float = 0.5) -> np.ndarray:
    return (predict_proba(model, X) >= threshold).astype(int)

def logistic_loss(model: np.ndarray, X: np.ndarray, y: np.ndarray) -> float:
    logits = X @ model
    # stable logistic loss
    return float(np.mean(np.log(1 + np.exp(-y * (2* (y-0.5)) * logits)))) if len(X) else 0.0

def accuracy(model: np.ndarray, X: np.ndarray, y: np.ndarray) -> float:
    if len(X) == 0:
        return 0.0
    preds = predict_label(model, X)
    return float(np.mean(preds == y))

def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:
    denom = (np.linalg.norm(a) * np.linalg.norm(b) + 1e-12)
    return float(np.dot(a, b) / denom) if denom > 0 else 0.0

def form_clusters(vehicles: List[Vehicle], max_cluster_size: int = 5) -> Tuple[Dict[int, List[Vehicle]], Dict[int, ClusterHead]]:
    """Form clusters and select cluster heads.

    Strategy:
      1. Compute pairwise speed differences and model similarity (using prev vs current delta or model vectors).
      2. Build a simple affinity score = w1 * (1 - normalized_speed_diff) + w2 * similarity.
      3. Greedy grouping: start from highest average score node not yet assigned; add closest peers.
      4. Select cluster head as node with highest stability proxy (lowest speed magnitude + highest mean similarity).
    Returns:
      clusters: mapping cluster_id -> list of member Vehicles (including head)
      heads: mapping cluster_id -> ClusterHead object (reference to vehicle promoted to head)
    """
    if not vehicles:
        return {}, {}
    speeds = np.array([v.speed for v in vehicles])
    max_speed = max(float(np.max(np.abs(speeds))), 1e-9)
    models = np.array([v.model for v in vehicles])
    n = len(vehicles)
    # similarity matrix (cosine of model vectors)
    sim_mat = np.zeros((n, n))
    for i in range(n):
        for j in range(i, n):
            s = cosine_similarity(models[i], models[j])
            sim_mat[i, j] = sim_mat[j, i] = s
    # speed diff normalized
    speed_diff = np.abs(speeds.reshape(-1,1) - speeds.reshape(1,-1)) / max_speed
    # affinity higher is better
    w1, w2 = 0.5, 0.5
    affinity = w1 * (1 - speed_diff) + w2 * sim_mat
    unassigned = set(range(n))
    clusters: Dict[int, List[Vehicle]] = {}
    heads: Dict[int, ClusterHead] = {}
    cid = 0
    while unassigned:
        # pick seed with highest mean affinity to others
        seed = max(unassigned, key=lambda i: float(np.mean(affinity[i, list(unassigned)])))
        group = [seed]
        candidates = list(unassigned - {seed})
        # sort candidates by affinity to seed descending
        candidates.sort(key=lambda j: affinity[seed, j], reverse=True)
        for c in candidates:
            if len(group) >= max_cluster_size:
                break
            group.append(c)
        # finalize cluster
        for idx in group:
            unassigned.discard(idx)
        cluster_vehicles = [vehicles[i] for i in group]
        # select head: max ( alpha * mean similarity to cluster - beta * abs(speed) )
        alpha, beta = 0.7, 0.3
        def head_score(v: Vehicle) -> float:
            indices = group
            vi = vehicles.index(v)
            mean_sim = float(np.mean([sim_mat[vi, j] for j in indices if j != vi])) if len(indices) > 1 else 1.0
            return alpha * mean_sim - beta * abs(v.speed)/max_speed
        head_vehicle = max(cluster_vehicles, key=head_score)
        # Promote to ClusterHead if not already
        if not isinstance(head_vehicle, ClusterHead):
            head_obj = ClusterHead(head_vehicle.vid, head_vehicle.speed, head_vehicle.data, len(head_vehicle.model))
            head_obj.model = head_vehicle.model.copy()
            head_obj.prev_model = head_vehicle.prev_model.copy()
            # Replace object reference inside vehicles list
            vehicles[vehicles.index(head_vehicle)] = head_obj
            head_vehicle = head_obj
        clusters[cid] = cluster_vehicles
        heads[cid] = head_vehicle  # type: ignore
        cid += 1
    return clusters, heads

comm_cost = 0  # global communication counter (cumulative message units)

def record_comm(count: int = 1):
    global comm_cost
    comm_cost += count

def simulate_round(vehicles: List[Vehicle], server: Server, max_cluster_size: int = 5, local_epochs: int = 1, lr: float = 0.05) -> Dict[str, Any]:
    global comm_cost
    round_metrics: Dict[str, Any] = {}
    # 1. Form clusters (could be cached, but re-run to reflect dynamic topology)
    clusters, heads = form_clusters(vehicles, max_cluster_size=max_cluster_size)
    # 2. Local training
    vehicle_updates: Dict[int, np.ndarray] = {}
    round_comp_cost = 0.0
    for v in vehicles:
        delta, comp = v.local_train(epochs=local_epochs, lr=lr)
        vehicle_updates[v.vid] = delta
        round_comp_cost += comp
    # 3. Cluster aggregation at heads
    cluster_updates = []
    cluster_agg_cost = 0.0
    for cid, members in clusters.items():
        head = heads[cid]
        member_updates = []
        for m in members:
            if m.vid != head.vid:
                member_updates.append(vehicle_updates[m.vid])
                record_comm(1)  # CM -> CH message
        agg = head.aggregate(member_updates)
        cluster_updates.append(agg)
        record_comm(1)  # CH -> Server
        if member_updates:
            update_dim = len(member_updates[0])
            cluster_agg_cost += update_dim * (len(member_updates))  # sum ops approx
    # 4. Global aggregation
    global_model = server.aggregate(cluster_updates)
    if cluster_updates:
        update_dim = len(cluster_updates[0])
        # server mean aggregation cost
        round_comp_cost += update_dim * len(cluster_updates)
    round_comp_cost += cluster_agg_cost
    # 5. Broadcast global model to all vehicles (count each as one message from server)
    for v in vehicles:
        v.model = global_model.copy()
        record_comm(1)
    round_metrics['num_clusters'] = len(clusters)
    round_metrics['comm_cost_cumulative'] = comm_cost
    round_metrics['comm_cost_round'] = 0  # placeholder will be set by caller (diff)
    round_metrics['comp_cost_round'] = round_comp_cost
    return round_metrics

# Placeholders for later steps to maintain structure
# TODO: Implement communication cost statistics.
# TODO: Run multi-round simulation and record results.
# TODO: Plot convergence curves and compare different strategies.

import matplotlib.pyplot as plt

def plot_results(metrics: Dict[str, List[float]], title: str = 'HFL Convergence'):
    rounds = range(1, len(metrics['accuracy']) + 1)
    fig, axs = plt.subplots(1, 3, figsize=(15,4))
    axs[0].plot(rounds, metrics['accuracy'], marker='o')
    axs[0].set_title('Accuracy')
    axs[0].set_xlabel('Round')
    axs[0].set_ylabel('Acc')

    axs[1].plot(rounds, metrics['loss'], marker='x', color='orange')
    axs[1].set_title('Loss')
    axs[1].set_xlabel('Round')
    axs[1].set_ylabel('Loss')

    axs[2].plot(rounds, metrics['comm_cost'], marker='s', color='green')
    axs[2].set_title('Comm Cost (cumulative)')
    axs[2].set_xlabel('Round')
    axs[2].set_ylabel('Messages')

    fig.suptitle(title)
    plt.tight_layout()
    plt.show()

# Main execution (moved to end after function definitions to ensure order)
    try:
        plot_results(metrics)
    except Exception as e:
        print('Plot failed (maybe headless env):', e)

_speed_history: Dict[int, List[float]] = defaultdict(list)

def reset_global_state():
    """Reset global mutable state so that multiple strategy runs are independent."""
    global comm_cost, _speed_history
    comm_cost = 0
    _speed_history = defaultdict(list)

def stability_metric(vehicle: Vehicle, window: int = 3) -> float:
    """Lower value means less stable (higher variance in recent speeds)."""
    hist = _speed_history[vehicle.vid]
    if len(hist) < 2:
        return 0.0
    recent = hist[-window:]
    return float(np.var(recent))

def update_speed_history(vehicles: List[Vehicle]):
    for v in vehicles:
        _speed_history[v.vid].append(v.speed)
        if len(_speed_history[v.vid]) > 20:
            _speed_history[v.vid].pop(0)

def maybe_switch_heads(clusters: Dict[int, List[Vehicle]], heads: Dict[int, ClusterHead], var_threshold: float = 0.5):
    for cid, members in clusters.items():
        current_head = heads[cid]
        var = stability_metric(current_head)
        if var <= var_threshold:
            continue  # stable
        # choose more stable candidate (min variance, then higher similarity)
        candidates = [m for m in members if m.vid != current_head.vid]
        if not candidates:
            continue
        def candidate_score(v: Vehicle):
            return (stability_metric(v), -v.similarity_with_prev())  # prefer low variance, high similarity
        new_head = min(candidates, key=candidate_score)
        if not isinstance(new_head, ClusterHead):
            promoted = ClusterHead(new_head.vid, new_head.speed, new_head.data, len(new_head.model))
            promoted.model = current_head.model.copy()  # transfer model
            promoted.prev_model = current_head.prev_model.copy()
            # replace in clusters list
            idx = members.index(new_head)
            members[idx] = promoted
            # also replace in global vehicles list? We assume reference; safe if list outside reused.
            heads[cid] = promoted
        else:
            # Already a head type, just transfer model parameters
            new_head.model = current_head.model.copy()
            heads[cid] = new_head

# TODO: Run multi-round simulation and record results.

def evaluate_global(vehicles: List[Vehicle]) -> Tuple[float, float]:
    # Combine all local data as test set (simple proxy)
    X_list, y_list = [], []
    for v in vehicles:
        X, y = v.data
        if len(X):
            X_list.append(X)
            y_list.append(y)
    if not X_list:
        return 0.0, 0.0
    X_all = np.vstack(X_list)
    y_all = np.concatenate(y_list)
    # assume all share same model reference after broadcast, take first
    model = vehicles[0].model
    acc = accuracy(model, X_all, y_all)
    # simple logistic loss (using labels 0/1 -> transform to {-1,1} if desired)
    preds = predict_proba(model, X_all)
    # binary cross entropy
    eps = 1e-9
    loss = float(-np.mean(y_all * np.log(preds + eps) + (1 - y_all) * np.log(1 - preds + eps)))
    return acc, loss

def run_simulation(num_rounds: int, vehicles: List[Vehicle], server: Server, max_cluster_size: int = 5, local_epochs: int = 1, lr: float = 0.05, head_var_threshold: float = 0.5, head_switch: bool = True) -> Dict[str, List[float]]:
    global comm_cost
    metrics: Dict[str, List[float]] = {
        'accuracy': [],
        'loss': [],
        'comm_cost_round': [],
        'comm_cost_cumulative': [],
        'comp_cost_round': [],
        'comp_cost_cumulative': [],
        'num_clusters': []
    }
    comp_cumulative = 0.0
    prev_comm_cumulative = comm_cost
    for r in range(num_rounds):
        update_speed_history(vehicles)
        round_info = simulate_round(vehicles, server, max_cluster_size=max_cluster_size, local_epochs=local_epochs, lr=lr)
        # head switching post-round (optional)
        if head_switch:
            clusters, heads = form_clusters(vehicles, max_cluster_size=max_cluster_size)
            maybe_switch_heads(clusters, heads, var_threshold=head_var_threshold)
        acc, l = evaluate_global(vehicles)
        metrics['accuracy'].append(acc)
        metrics['loss'].append(l)
        comm_cum = round_info['comm_cost_cumulative']
        comm_round = comm_cum - prev_comm_cumulative
        prev_comm_cumulative = comm_cum
        metrics['comm_cost_round'].append(comm_round)
        metrics['comm_cost_cumulative'].append(comm_cum)
        metrics['num_clusters'].append(round_info['num_clusters'])
        comp_round = round_info.get('comp_cost_round', 0.0)
        comp_cumulative += comp_round
        metrics['comp_cost_round'].append(comp_round)
        metrics['comp_cost_cumulative'].append(comp_cumulative)
    return metrics

def write_metrics_csv(path: str, metrics: Dict[str, List[float]], strategy: str = 'default'):
    rounds = len(next(iter(metrics.values()))) if metrics else 0
    # Wide format with strategy column
    fieldnames = ['round', 'strategy'] + list(metrics.keys())
    with open(path, 'w', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(fieldnames)
        for r in range(rounds):
            row = [r+1, strategy]
            for k in metrics.keys():
                row.append(metrics[k][r])
            writer.writerow(row)

def append_metrics_csv(path: str, metrics: Dict[str, List[float]], strategy: str):
    rounds = len(next(iter(metrics.values()))) if metrics else 0
    exists = False
    try:
        with open(path, 'r') as _:
            exists = True
    except FileNotFoundError:
        exists = False
    fieldnames = ['round', 'strategy'] + list(metrics.keys())
    with open(path, 'a', newline='') as f:
        writer = csv.writer(f)
        if not exists:
            writer.writerow(fieldnames)
        for r in range(rounds):
            row = [r+1, strategy]
            for k in metrics.keys():
                row.append(metrics[k][r])
            writer.writerow(row)

def compare_strategies(strategies: Dict[str, Dict[str, Any]], num_rounds: int, base_vehicles: List[Vehicle], model_dim: int) -> str:
    """Run multiple strategies; each strategy dict can specify parameters overriding defaults.
       Returns path to aggregated CSV.
    """
    csv_path = 'metrics_strategies.csv'
    # clear file if exists
    try:
        open(csv_path, 'w').close()
    except Exception:
        pass
    for name, params in strategies.items():
        reset_global_state()
        # Deep copy vehicles (re-initialize models & speeds from base)
        vehicles: List[Vehicle] = []
        for v in base_vehicles:
            X, y = v.data
            nv = Vehicle(v.vid, v.speed, (X.copy(), y.copy()), model_dim)
            vehicles.append(nv)
        server = Server(model_dim)
        m = run_simulation(
            num_rounds=num_rounds,
            vehicles=vehicles,
            server=server,
            max_cluster_size=params.get('max_cluster_size', 5),
            local_epochs=params.get('local_epochs', 1),
            lr=params.get('lr', 0.05),
            head_var_threshold=params.get('head_var_threshold', 0.5),
            head_switch=params.get('head_switch', True)
        )
        append_metrics_csv(csv_path, m, strategy=name)
    return csv_path

if __name__ == '__main__':
    np.random.seed(42)
    num_vehicles = 20
    model_dim = 15
    base_vehicles: List[Vehicle] = []
    for vid in range(num_vehicles):
        n_samples = np.random.randint(30, 60)
        X = np.random.randn(n_samples, model_dim)
        true_w = np.random.randn(model_dim)
        logits = X @ true_w
        probs = sigmoid(logits)
        y = (probs > 0.5).astype(int)
        speed = float(np.random.uniform(5, 30))
        base_vehicles.append(Vehicle(vid, speed, (X, y), model_dim))

    # Single default run
    reset_global_state()
    vehicles_single = [Vehicle(v.vid, v.speed, (v.data[0].copy(), v.data[1].copy()), model_dim) for v in base_vehicles]
    server_single = Server(model_dim)
    metrics_default = run_simulation(num_rounds=10, vehicles=vehicles_single, server=server_single, max_cluster_size=5, local_epochs=1, lr=0.05)
    write_metrics_csv('metrics.csv', metrics_default, strategy='default')
    print('Single strategy metrics written to metrics.csv')
    print('Final Acc (default):', metrics_default['accuracy'][-1])

    # Strategy comparison
    strategies = {
        'default': {},
        'larger_clusters': {'max_cluster_size': 8},
        'no_head_switch': {'head_switch': False},
        'more_local_epochs': {'local_epochs': 2},
    }
    csv_multi = compare_strategies(strategies, num_rounds=10, base_vehicles=base_vehicles, model_dim=model_dim)
    print(f'Multi-strategy metrics written to {csv_multi}')
