import torch
from tqdm import tqdm

from utils.metrics import compute_IoU

# CLASSES_NUSCENES = [
#     "noise",
#     "animal",
#     "human.pedestrian.adult",
#     "human.pedestrian.child",
#     "human.pedestrian.construction_worker",
#     "human.pedestrian.personal_mobility",
#     "human.pedestrian.police_officer",
#     "human.pedestrian.stroller",
#     "human.pedestrian.wheelchair",
#     "movable_object.barrier",
#     "movable_object.debris",
#     "movable_object.pushable_pullable",
#     "movable_object.trafficcone",
#     "static_object.bicycle_rack",
#     "vehicle.bicycle",
#     "vehicle.bus.bendy",
#     "vehicle.bus.rigid",
#     "vehicle.car",
#     "vehicle.construction",
#     "vehicle.emergency.ambulance",
#     "vehicle.emergency.police",
#     "vehicle.motorcycle",
#     "vehicle.trailer",
#     "vehicle.truck",
#     "flat.driveable_surface",
#     "flat.other",
#     "flat.sidewalk",
#     "flat.terrain",
#     "static.manmade",
#     "static.other",
#     "static.vegetation",
#     "vehicle.ego",
# ]

CLASSES_NUSCENES = [
    "noise",
    "barrier",
    "bicycle",
    "bus",
    "car",
    "construction_vehicle",
    "motorcycle",
    "pedestrian",
    "traffic_cone",
    "trailer",
    "truck",
    "driveable_surface",
    "other_flat",
    "sidewalk",
    "terrain",
    "manmade",
    "vegetation",
]


def evaluate(model, classifier, dataloader, config):
    """
    Function to evaluate the performances of a downstream training.
    It prints the per-class IoU, mIoU and fwIoU.
    """
    model.eval()
    with torch.no_grad():
        full_predictions = []
        ground_truth = []
        for batch in tqdm(dataloader):
            img_feats = model(batch["ims"].to(0)).permute(0, 2, 3, 1)
            img_probs = classifier(img_feats)

            torch.cuda.empty_cache()

            full_predictions.append(
                img_probs.flatten(start_dim=0, end_dim=2).argmax(1).cpu()
            )
            ground_truth.append(torch.flatten(batch["msks"]).cpu())

        m_IoU, fw_IoU, per_class_IoU = compute_IoU(
            torch.cat(full_predictions),
            torch.cat(ground_truth),
            config["classes"],
            ignore_indexes=config["ignore_indexes"],
        )
        classes = [
            CLASSES_NUSCENES[i]
            for i in range(len(CLASSES_NUSCENES))
            if i not in config["ignore_indexes"]
        ]
        print("Per class IoU:")
        if config["dataset"].lower() == "nuimages":
            print(
                *[
                    f"{a:40} - {b:.3f}"
                    for a, b in zip(classes, (per_class_IoU).numpy())
                ],
                sep="\n",
            )
        print()
        print(f"mIoU: {m_IoU}")
        print(f"fwIoU: {fw_IoU}")

    return m_IoU
