import os

import cv2
import MinkowskiEngine as ME
import numpy as np
import pytorch_lightning as pl
import torch
import torch.optim as optim

from distil.criterion import InfoNCELoss, LovaszLoss
from utils.metrics import compute_IoU_from_cmatrix, confusion_matrix


class LightningDistil(pl.LightningModule):
    def __init__(
        self,
        model_points,
        model_images,
        model_classifier,
        model_ori_classifier,
        model_da,
        config,
    ):
        super().__init__()
        self.model_points = model_points
        self.model_images = model_images
        self.model_classifier = model_classifier
        self.model_ori_classifier = model_ori_classifier
        self.model_da = model_da
        self._config = config
        self.losses = config["losses"]
        # # number of positive matches in the InfoNCE loss
        # self.num_matches = config["num_matches"]
        self.batch_size = config["batch_size"]
        self.num_epochs = config["num_epochs"]
        # self.superpixel_size = config["superpixel_size"]
        # self.criterion = NCELoss(temperature=config["NCE_temperature"])
        # # self.working_dir = os.path.join(
        # #     config["working_dir"], config["datetime"])
        # # if os.environ.get("LOCAL_RANK", 0) == 0:
        # #     os.makedirs(self.working_dir, exist_ok=True)

        self.ignore_index = config["ignore_index"]
        self.ignore_indexes = config["ignore_indexes"]

        self.LovaszLoss = LovaszLoss(
            ignore_index=config["ignore_index"],
            device=self.device,
        )
        self.CrossEntropyLoss = torch.nn.CrossEntropyLoss(
            ignore_index=config["ignore_index"],
        )

    def configure_optimizers(self):
        if self._config.get("lr_da", None) is None:
            self._config["lr_da"] = self._config["lr"]
        if not self._config["train_classifier"]:
            optimizer = optim.SGD(
                [
                    {
                        "params": iter(self.model_da.parameters()),
                        "lr": self._config["lr_da"],
                    },
                    {
                        "params": iter(self.model_points.parameters()),
                    },
                ],
                lr=self._config["lr"],
                momentum=self._config["sgd_momentum"],
                dampening=self._config["sgd_dampening"],
                weight_decay=self._config["weight_decay"],
            )
        else:
            optimizer = optim.SGD(
                [
                    {
                        "params": iter(self.model_da.parameters()),
                        "lr": self._config["lr_da"],
                    },
                    {
                        "params": list(self.model_points.parameters())
                        + list(self.model_classifier.parameters()),
                    },
                ],
                lr=self._config["lr"],
                momentum=self._config["sgd_momentum"],
                dampening=self._config["sgd_dampening"],
                weight_decay=self._config["weight_decay"],
            )
        # optimizer = optim.SGD(
        #     [{"params": list(self.model_points.parameters()), }],
        #     lr=self._config["lr"],
        #     momentum=self._config["sgd_momentum"],
        #     dampening=self._config["sgd_dampening"],
        #     weight_decay=self._config["weight_decay"],
        # )
        scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, self.num_epochs)
        # scheduler = optim.lr_scheduler.OneCycleLR(
        #     optimizer, max_lr=self._config["lr"], epochs=self.num_epochs, steps_per_epoch=1, pct_start=.05, div_factor=25, final_div_factor=1e4,)
        return [optimizer], [scheduler]

    def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
        optimizer.zero_grad(set_to_none=True)

    def training_step(self, batch, batch_idx):
        torch.cuda.empty_cache()
        # "sinput_C": coords_batch, "sinput_F": feats_batch
        sparse_input = ME.SparseTensor(batch["sinput_F"], batch["sinput_C"])
        pnt_feats = self.model_points(sparse_input)
        pnt_feats = self.model_da(pnt_feats).F  # .F: get features
        # pnt_feats = self.model_points(sparse_input).F
        img_feats = self.model_images(batch["input_I"])

        pnt_probs = self.model_classifier(pnt_feats)

        # tmp_pred = img_feats.permute(0, 2, 3, 1)[0].argmax(2).cpu()
        # tmp_pred = np.array(tmp_pred, dtype=np.uint8)*15
        # tmp_pred = cv2.applyColorMap(tmp_pred, cv2.COLORMAP_RAINBOW)
        # cv2.imwrite("pred.png", tmp_pred)

        del batch["sinput_F"]
        del batch["sinput_C"]
        del sparse_input
        # each loss is applied independtly on each GPU
        losses = [
            getattr(self, loss)(batch, pnt_feats, img_feats, pnt_probs)
            for loss in self.losses
        ]
        loss = torch.sum(torch.stack(losses))

        torch.cuda.empty_cache()
        self.log(
            "train_loss",
            loss,
            on_step=True,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            batch_size=self.batch_size,
        )
        return loss

    def loss_ce_hard_label(self, batch, pnt_feats, img_feats, pnt_probs):
        loss = self.CrossEntropyLoss(pnt_probs, batch["labels"])
        self.log(
            "ce_loss",
            loss,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )
        return loss

    def loss_lovasz_hard_label(self, batch, pnt_feats, img_feats, pnt_probs):
        loss = self.LovaszLoss(pnt_probs, batch["labels"])
        self.log(
            "lovasz_loss",
            loss,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )
        return loss

    def loss_MSE_soft_label(self, batch, pnt_feats, img_feats, pnt_probs):
        pairing_points = batch["pairing_points"]
        pairing_images = batch["pairing_images"]
        # idx = np.random.choice(
        #     pairing_points.shape[0], self.num_matches, replace=False)
        # k = pnt_feats[pairing_points[idx]]
        # m = tuple(pairing_images[idx].T.long())
        k = pnt_feats[pairing_points]
        m = tuple(pairing_images.T.long())
        q = img_feats.permute(0, 2, 3, 1)[m]

        k = torch.nn.functional.normalize(k, p=2, dim=1)
        q = torch.nn.functional.normalize(q, p=2, dim=1)

        loss = torch.nn.functional.mse_loss(k, q)
        self.log(
            "mse_loss",
            loss,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )
        return loss * 10
        # return loss

    def loss_KL_soft_label(self, batch, pnt_feats, img_feats, pnt_probs):
        pairing_points = batch["pairing_points"]
        pairing_images = batch["pairing_images"]
        # idx = np.random.choice(
        #     pairing_points.shape[0], self.num_matches, replace=False)
        # k = pnt_feats[pairing_points[idx]]
        # m = tuple(pairing_images[idx].T.long())
        k = pnt_probs[pairing_points]
        m = tuple(pairing_images.T.long())
        q = img_feats.permute(0, 2, 3, 1)[m]
        q = self.model_ori_classifier(q)

        k = torch.nn.functional.log_softmax(k, dim=1)
        q = torch.nn.functional.softmax(q, dim=1)

        loss = torch.nn.functional.kl_div(k, q, reduction="batchmean")
        self.log(
            "kl_loss",
            loss,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )
        return loss

    def loss_moment_soft_label(self, batch, pnt_feats, img_feats, pnt_probs):
        pairing_points = batch["pairing_points"]
        pairing_images = batch["pairing_images"]
        # idx = np.random.choice(
        #     pairing_points.shape[0], self.num_matches, replace=False)
        # pnt = pnt_feats[pairing_points[idx]]
        # m = tuple(pairing_images[idx].T.long())
        pnt_1st = pnt_feats[pairing_points]
        m = tuple(pairing_images.T.long())
        img_1st = img_feats.permute(0, 2, 3, 1)[m]

        pnt_1st = pnt_1st[:, 1:12]
        img_1st = img_1st[:, 1:12]

        pnt_ori = pnt_1st.clone()
        img_ori = img_1st.clone()

        pnt_1st = pnt_1st / torch.norm(pnt_1st, p=2, dim=1, keepdim=True)
        img_1st = img_1st / torch.norm(img_1st, p=2, dim=1, keepdim=True)

        thresholds = [
            0.9545,
            0.8703,
            0.7039,
            0.9643,
            0.7264,
            0.8611,
            0.9227,
            0.9153,
            0.4325,
            0.8092,
            0.9970,
        ]

        one_hot_mask = torch.zeros((11, img_1st.shape[0]), device=self.device)
        for clsi in range(11):
            msk_i = (
                torch.nn.functional.softmax(img_ori, dim=1)[:, clsi] > thresholds[clsi]
            )
            one_hot_mask[clsi, msk_i] = 1.0

        pnt_2nd = torch.square(pnt_1st)
        # pnt_3rd = torch.pow(pnt_1st, exponent=3)

        img_2nd = torch.square(img_1st)
        # img_3rd = torch.pow(img_1st, exponent=3)

        epsilon = 1e-8

        pnt_mean = (one_hot_mask @ pnt_1st) / (
            torch.sum(one_hot_mask, 1)[:, None] + epsilon
        )
        pnt_2nd_mom_ori = (one_hot_mask @ pnt_2nd) / (
            torch.sum(one_hot_mask, 1)[:, None] + epsilon
        )
        # pnt_3rd_mom_ori = (one_hot_mask @ pnt_3rd) / \
        #     (torch.sum(one_hot_mask, 1)[:, None] + epsilon)

        mask = torch.where(torch.sum(one_hot_mask, 1) > 0)

        pnt_mean = pnt_mean[mask]
        pnt_2nd_mom_ori = pnt_2nd_mom_ori[mask]
        # pnt_3rd_mom_ori = pnt_3rd_mom_ori[mask]

        pnt_var = pnt_2nd_mom_ori - torch.square(pnt_mean)
        # pnt_skew = (pnt_3rd_mom_ori -
        #             3*pnt_mean*pnt_var -
        #             torch.pow(pnt_mean, exponent=3))/torch.pow(pnt_var, 1.5)
        img_mean = (one_hot_mask @ img_1st) / (
            torch.sum(one_hot_mask, 1)[:, None] + epsilon
        )
        img_2nd_mom_ori = (one_hot_mask @ img_2nd) / (
            torch.sum(one_hot_mask, 1)[:, None] + epsilon
        )
        # img_3rd_mom_ori = (one_hot_mask @ img_3rd) / \
        #     (torch.sum(one_hot_mask, 1)[:, None] + epsilon)

        img_mean = img_mean[mask]
        img_2nd_mom_ori = img_2nd_mom_ori[mask]
        # img_3rd_mom_ori = img_3rd_mom_ori[mask]

        img_var = img_2nd_mom_ori - torch.square(img_mean)
        # img_skew = (img_3rd_mom_ori -
        #             3*img_mean*img_var -
        #             torch.pow(img_mean, exponent=3))/torch.pow(img_var, 1.5)

        indexes = torch.combinations(torch.arange(pnt_mean.shape[0]), 2)

        cos_sim_pnt_mean = torch.nn.functional.cosine_similarity(
            pnt_mean[indexes[:, 0]], pnt_mean[indexes[:, 1]], dim=1
        )
        cos_sim_pnt_var = torch.nn.functional.cosine_similarity(
            pnt_var[indexes[:, 0]], pnt_var[indexes[:, 1]], dim=1
        )
        # cos_sim_pnt_skew = torch.nn.functional.cosine_similarity(
        #     pnt_skew[indexes[:, 0]], pnt_skew[indexes[:, 1]], dim=1)

        cos_sim_img_mean = torch.nn.functional.cosine_similarity(
            img_mean[indexes[:, 0]], img_mean[indexes[:, 1]], dim=1
        )
        cos_sim_img_var = torch.nn.functional.cosine_similarity(
            img_var[indexes[:, 0]], img_var[indexes[:, 1]], dim=1
        )
        # cos_sim_img_skew = torch.nn.functional.cosine_similarity(
        #     img_skew[indexes[:, 0]], img_skew[indexes[:, 1]], dim=1)

        l_mean = torch.nn.functional.mse_loss(cos_sim_pnt_mean, cos_sim_img_mean)
        l_var = torch.nn.functional.mse_loss(cos_sim_pnt_var, cos_sim_img_var)
        # l_skew = torch.nn.functional.mse_loss(cos_sim_pnt_skew,
        #                                       cos_sim_img_skew)

        self.log(
            "mom_mean",
            l_mean,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )
        self.log(
            "mom_var",
            l_var,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )
        # self.log(
        #     "mom_skew", l_skew, on_epoch=True, prog_bar=True, logger=True, sync_dist=True, batch_size=self.batch_size
        # )

        loss = torch.mean(torch.stack((l_mean, l_var)))
        self.log(
            "mom_loss",
            loss,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )

        # return loss*(1-self.current_epoch/self.num_epochs)*5
        return loss * 5

    def validation_step(self, batch, batch_idx):
        torch.cuda.empty_cache()
        sparse_input = ME.SparseTensor(batch["sinput_F"], batch["sinput_C"])
        pnt_feats = self.model_points(sparse_input)
        pnt_feats = self.model_da(pnt_feats).F  # .F: get features
        # pnt_feats = self.model_points(sparse_input).F
        img_feats = self.model_images(batch["input_I"])

        pnt_probs = self.model_classifier(pnt_feats)

        if batch["labels"] is not None:
            loss = self.loss_lovasz_hard_label(batch, pnt_feats, img_feats, pnt_probs)

            pnt_probs = pnt_probs.softmax(1)
            preds = []
            offset = 0
            pnt_probs = pnt_probs.argmax(1)
            for i, lb in enumerate(batch["len_batch"]):
                preds.append(pnt_probs[batch["inverse_indexes"][i] + offset])
                offset += lb
            preds = torch.cat(preds, dim=0)
            c_matrix = confusion_matrix(
                preds, batch["evaluation_labels"], self._config["classes"]
            )

        else:
            losses = [
                getattr(self, loss)(batch, pnt_feats, img_feats, pnt_probs)
                for loss in self.losses
            ]
            loss = torch.sum(torch.stack(losses))

        self.log(
            "val_loss",
            loss,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )

        try:
            return loss, c_matrix
        except NameError:
            return loss

    def validation_epoch_end(self, outputs):
        if len(outputs[0]) == 2:
            c_matrix = sum([o[1] for o in outputs])

            c_matrix = torch.sum(self.all_gather(c_matrix), 0)

            m_IoU, fw_IoU, per_class_IoU = compute_IoU_from_cmatrix(
                c_matrix, self.ignore_indexes
            )

            self.log(
                "m_IoU",
                m_IoU,
                prog_bar=True,
                logger=True,
                sync_dist=True,
                rank_zero_only=True,
            )
            self.log(
                "fw_IoU",
                fw_IoU,
                prog_bar=True,
                logger=True,
                sync_dist=True,
                rank_zero_only=True,
            )
            for i in range(len(per_class_IoU)):
                self.log(
                    f"IoU_{i}",
                    per_class_IoU[i],
                    prog_bar=False,
                    logger=True,
                    sync_dist=True,
                    rank_zero_only=True,
                )
