import os

import pytorch_lightning as pl
import torch
import torch.optim as optim
from MinkowskiEngine import SparseTensor
from pytorch_lightning.utilities import rank_zero_only

from downstream.criterion import DownstreamLoss
from utils.metrics import compute_IoU_from_cmatrix, confusion_matrix


class LightningDownstream(pl.LightningModule):
    def __init__(self, model_points, model_classifier, config):
        super().__init__()
        self.model_points = model_points
        self.model_classifier = model_classifier
        # self.best_mIoU = 0.0
        # self.metrics = {"val mIoU": [], "val_loss": [], "train_loss": []}
        self.batch_size = config["batch_size"]
        self._config = config
        # self.train_losses = []
        # self.val_losses = []
        self.ignore_index = config["ignore_index"]
        self.ignore_indexes = config["ignore_indexes"]
        self.n_classes = config["classes"]
        self.n_out = config["model_n_out"]
        if config["loss"].lower() == "lovasz":
            self.criterion = DownstreamLoss(
                ignore_index=config["ignore_index"],
                device=self.device,
            )
        else:
            self.criterion = torch.nn.CrossEntropyLoss(
                ignore_index=config["ignore_index"],
            )
        # self.working_dir = os.path.join(config["working_dir"],
        #                                 str(config["dataset_skip_step"]))
        # if os.environ.get("LOCAL_RANK", 0) == 0:
        #     os.makedirs(self.working_dir, exist_ok=True)

    def configure_optimizers(self):
        if self._config.get("lr_head", None) is not None:
            print("Use different learning rates between the head and trunk.")

            # def is_final_head(key):
            #     return key.find('final.') != -1
            # param_group_head = [
            #     param for key, param in self.model_points.named_parameters()
            #     if param.requires_grad and is_final_head(key)]
            # param_group_trunk = [
            #     param for key, param in self.model_points.named_parameters()
            #     if param.requires_grad and (not is_final_head(key))]
            # param_group_all = [
            #     param for key, param in self.model_points.named_parameters()
            #     if param.requires_grad]
            # assert len(param_group_all) == (
            #     len(param_group_head)+len(param_group_trunk))

            # weight_decay = self._config["weight_decay"]
            # weight_decay_head = self._config["weight_decay_head"] if (
            #     self._config["weight_decay_head"] is not None) else weight_decay
            # parameters = [
            #     {"params": iter(param_group_head),
            #      "lr": self._config["lr_head"], "weight_decay": weight_decay_head},
            #     {"params": iter(param_group_trunk)}]  # prepared for the optimizer
            # print(
            #     f"==> Head:  #{len(param_group_head)} params with learning rate: {self._config['lr_head']} and weight_decay: {weight_decay_head}")
            # print(
            #     f"==> Trunk: #{len(param_group_trunk)} params with learning rate: {self._config['lr']} and weight_decay: {weight_decay}")

            # optimizer = optim.SGD(
            #     parameters,
            #     lr=self._config["lr"],
            #     momentum=self._config["sgd_momentum"],
            #     dampening=self._config["sgd_dampening"],
            #     weight_decay=self._config["weight_decay"],
            # )

            weight_decay = self._config["weight_decay"]
            weight_decay_head = (
                self._config["weight_decay_head"]
                if (self._config["weight_decay_head"] is not None)
                else weight_decay
            )
            optimizer = optim.SGD(
                [
                    {
                        "params": self.model_classifier.parameters(),
                        "lr": self._config["lr_head"],
                        "weight_decay": weight_decay_head,
                    },
                    {"params": self.model_points.parameters()},
                ],
                lr=self._config["lr"],
                momentum=self._config["sgd_momentum"],
                dampening=self._config["sgd_dampening"],
                weight_decay=self._config["weight_decay"],
            )
        else:
            if self._config.get("optimizer") and self._config["optimizer"] == "adam":
                print("Optimizer: AdamW")
                optimizer = optim.AdamW(
                    list(self.model_points.parameters())
                    + list(self.model_classifier.parameters()),
                    lr=self._config["lr"],
                    weight_decay=self._config["weight_decay"],
                )
            else:
                print("Optimizer: SGD")
                optimizer = optim.SGD(
                    list(self.model_points.parameters())
                    + list(self.model_classifier.parameters()),
                    lr=self._config["lr"],
                    momentum=self._config["sgd_momentum"],
                    dampening=self._config["sgd_dampening"],
                    weight_decay=self._config["weight_decay"],
                )

        if self._config.get("scheduler") and self._config["scheduler"] == "steplr":
            print("Scheduler: StepLR")
            scheduler = torch.optim.lr_scheduler.StepLR(
                optimizer,
                int(0.9 * self._config["num_epochs"]),
            )
        else:
            print("Scheduler: Cosine")
            scheduler = optim.lr_scheduler.CosineAnnealingLR(
                optimizer, self._config["num_epochs"]
            )
        return [optimizer], [scheduler]

    def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
        # set_to_none=True is a modest speed-up
        optimizer.zero_grad(set_to_none=True)

    # def forward(self, x):
    #     return self.model_points(x).F

    def training_step(self, batch, batch_idx):
        if self._config["freeze_layers"]:
            self.model_points.eval()
        else:
            self.model_points.train()
        sparse_input = SparseTensor(batch["sinput_F"], batch["sinput_C"])
        pnt_feats = self.model_points(sparse_input).F
        pnt_probs = self.model_classifier(pnt_feats)

        loss = self.criterion(pnt_probs, batch["labels"])
        # empty the cache to reduce the memory requirement: ME is known to slowly
        # filling the cache otherwise
        torch.cuda.empty_cache()
        self.log(
            "train_loss",
            loss,
            on_step=True,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )
        # self.train_losses.append(loss.detach().cpu())
        return loss

    # def training_epoch_end(self, outputs) -> None:
    #     self.save()

    def validation_step(self, batch, batch_idx):
        sparse_input = SparseTensor(batch["sinput_F"], batch["sinput_C"])
        pnt_feats = self.model_points(sparse_input).F
        pnt_probs = self.model_classifier(pnt_feats)

        loss = self.criterion(pnt_probs, batch["labels"])
        # self.val_losses.append(loss.detach().cpu())
        self.log(
            "val_loss",
            loss,
            on_step=True,
            on_epoch=True,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            batch_size=self.batch_size,
        )

        # Ensure we ignore the index 0
        # (probably not necessary after some training)
        pnt_probs = pnt_probs.softmax(1)
        if self.ignore_index is not None:
            pnt_probs[:, self.ignore_index] = 0.0
        preds = []
        labels = []
        offset = 0
        pnt_probs = pnt_probs.argmax(1)
        for i, lb in enumerate(batch["len_batch"]):
            preds.append(pnt_probs[batch["inverse_indexes"][i] + offset])
            labels.append(batch["evaluation_labels"][i])
            offset += lb
        preds = torch.cat(preds, dim=0)
        labels = torch.cat(labels, dim=0)
        c_matrix = confusion_matrix(preds, labels, self.n_classes)
        return loss, c_matrix

    def validation_epoch_end(self, outputs):
        c_matrix = sum([o[1] for o in outputs])

        c_matrix = torch.sum(self.all_gather(c_matrix), 0)

        m_IoU, fw_IoU, per_class_IoU = compute_IoU_from_cmatrix(
            c_matrix, self.ignore_indexes
        )

        # self.train_losses = []
        # self.val_losses = []
        self.log(
            "m_IoU",
            m_IoU,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            rank_zero_only=True,
        )
        self.log(
            "fw_IoU",
            fw_IoU,
            prog_bar=True,
            logger=True,
            sync_dist=True,
            rank_zero_only=True,
        )
        for i in range(len(per_class_IoU)):
            self.log(
                f"IoU_{i}",
                per_class_IoU[i],
                prog_bar=False,
                logger=True,
                sync_dist=True,
                rank_zero_only=True,
            )

        # self.save()

    # @rank_zero_only
    # def save(self):
    #     path = os.path.join(self.working_dir, "model.pt")
    #     torch.save(
    #         {"model_points": self.model_points.state_dict(),
    #          "model_classifier": self.model_classifier.state_dict(),
    #          "config": self._config}, path
    #     )
