import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from utils import instantiate_from_config
from models.segment.segment_utils import metric_mPA, metric_mIOU


class XuUNet(pl.LightningModule):
    def __init__(
        self,
        *,
        ckpt_path,
        ignore_keys,
        monitor,
        unet_config,
        scheduler_config=None,
        automatic_optimization=True,
    ):
        super().__init__()
        self.save_hyperparameters()
        self.automatic_optimization = automatic_optimization
        self.scheduler_config = scheduler_config
        self.model = instantiate_from_config(unet_config)
        if monitor is not None:
            self.monitor = monitor
        if ckpt_path is not None:
            self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)

    def init_from_ckpt(self, path, ignore_keys=list()):
        sd = torch.load(path, map_location="cpu")["state_dict"]
        keys = list(sd.keys())
        for k in keys:
            for ik in ignore_keys:
                if k.startswith(ik):
                    print("Deleting key {} from state_dict.".format(k))
                    del sd[k]
        self.load_state_dict(sd, strict=False)
        print(f"Restored from {path}")

    def forward(self, raw):
        out = self.model(raw)
        return F.softmax(out, dim=1)

    def training_step(self, batch, batch_idx):
        x, gt = self.get_input(batch, "raw", "gt")
        y_hat = self(x)
        loss = self.loss_fn(y_hat, gt)
        self.log("train/loss", loss)
        opt = self.optimizers()
        opt.zero_grad()
        loss.backward()
        opt.step()

    def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
        if self.scheduler_config is not None:
            scheduler = self.lr_schedulers()
            scheduler.step()

    @torch.no_grad()
    def validation_step(self, batch, batch_idx):
        x, gt = self.get_input(batch, "raw", "gt")
        y_hat = self(x)
        loss = self.loss_fn(y_hat, gt)
        acc = (y_hat.argmax(dim=1) == gt).float().mean()
        self.log("val/loss", loss)
        self.log("val/acc", acc)

    @torch.no_grad()
    def test_step(self, batch, batch_idx):
        x, gt = self.get_input(batch, "raw", "gt")
        y_hat = self(x)
        TP = (y_hat.argmax(dim=1) == gt).float().sum()
        TN = (y_hat.argmax(dim=1) != gt).float().sum()
        FN = (y_hat.argmax(dim=1) != gt).float().sum()
        FP = (y_hat.argmax(dim=1) == gt).float().sum()

        PA = (TP + TN) / (TP + TN + FP + FN)
        IOU = TP / (TP + FP + FN)
        FWIOU = PA * IOU
        mPA = metric_mPA(y_hat, gt)
        mIOU = metric_mIOU(y_hat, gt)
        self.log_dict({"PA": PA, "IOU": IOU, "FWIOU": FWIOU, "mPA": mPA, "mIOU": mIOU})

    def loss_fn(self, y_hat, y):
        dice_loss = 1 - (y_hat * y).sum() / (y_hat.sum() + y.sum())
        bce_loss = F.binary_cross_entropy_with_logits(y_hat, y)
        return dice_loss + bce_loss

    def get_input(self, batch, *keys):
        output = []
        for k in keys:
            if k in batch:
                x = batch[k]
                if len(x.shape) == 3:
                    x = (
                        x[..., None]
                        .permute(0, 3, 1, 2)
                        .to(memory_format=torch.contiguous_format)
                        .float()
                    )
                elif len(x.shape) == 4:
                    x = (
                        x.permute(0, 3, 1, 2)
                        .to(memory_format=torch.contiguous_format)
                        .float()
                    )
                else:
                    x = x.to(memory_format=torch.contiguous_format).float()
                output.append(x)
        return output

    def configure_optimizers(self):
        lr = self.learning_rate
        opt = torch.optim.AdamW(self.model.parameters(), lr=lr)
        if self.scheduler_config is not None:
            scheduler = torch.optim.lr_scheduler.ExponentialLR(
                opt, **self.scheduler_config
            )
            print(f"Setting up {scheduler.__class__} scheduler...")
            return [opt], [scheduler]
        return opt

    @torch.no_grad()
    def log_images(self, batch, split, only_inputs=False, **kwargs):
        # TODO fix this function
        log = dict()
        if split == "train" or split == "val":
            x, gt = self.get_input(batch, "raw", "gt")
            x = x.to(self.device)
            if not only_inputs:
                yhat = self(x)
                if x.shape[1] > 3:
                    # colorize with random projection
                    x = self.to_rgb(x)
                log["pred"] = yhat[:, 0].unsqueeze(1) * 2 - 1
            log["gt"] = gt.unsqueeze(1) * 2 - 1
            log["raw"] = x
        elif split == "test":
            x, gt = self.get_input(batch, "raw", "gt")
            yhat = self(x)
            log["test"] = yhat[:, 0].unsqueeze(1) * 2 - 1
        return log
