import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import instantiate_from_config
from torchvision.ops import RoIAlign, nms, generalized_box_iou_loss
from torchvision.utils import draw_bounding_boxes


class EmamiCNN15C2F1(nn.Module):
    def __init__(self, *args, **kwargs):
        super().__init__()
        self.model = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=10, kernel_size=4, stride=2),
            nn.Conv2d(in_channels=10, out_channels=20, kernel_size=5, padding=2),
            nn.ReLU(),
            nn.Conv2d(in_channels=20, out_channels=20, kernel_size=5, padding=2),
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(in_features=20 * 15 * 15, out_features=500),
            nn.ReLU(),
        )
        self.head = nn.Sequential(
            nn.Linear(in_features=500, out_features=2), nn.Softmax(dim=1)
        )

    def forward(self, x):
        out = self.model(x)
        out = self.head(out)
        return out


class EmamiCNN32C2F1(nn.Module):
    def __init__(self, *args, **kwargs):
        super().__init__()
        self.model = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Flatten(),
            nn.Linear(in_features=16 * 8 * 8, out_features=768),
            nn.ReLU(),
        )
        self.head = nn.Sequential(
            nn.Linear(in_features=768, out_features=2), nn.Softmax(dim=1)
        )

    def forward(self, x):
        out = self.model(x)
        out = self.head(out)
        return out


class EmamiCNN32C2F3(nn.Module):
    def __init__(self, *args, **kwargs):
        super().__init__()
        self.model = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Flatten(),
            nn.Linear(in_features=16 * 8 * 8, out_features=120),
            nn.ReLU(),
            nn.Linear(in_features=120, out_features=84),
            nn.ReLU(),
            nn.Linear(in_features=84, out_features=10),
            nn.ReLU(),
        )
        self.head = nn.Sequential(
            nn.Linear(in_features=10, out_features=2), nn.Softmax(dim=1)
        )

    def forward(self, x):
        out = self.model(x)
        out = self.head(out)
        return out


class Emami(pl.LightningModule):
    def __init__(
        self,
        *,
        ckpt_path,
        ignore_keys,
        monitor,
        detect_head_config,
        cls_head_config,
        scheduler_config=None,
        automatic_optimization=True,
    ):
        super().__init__()
        self.save_hyperparameters()
        self.automatic_optimization = automatic_optimization
        self.scheduler_config = scheduler_config
        self.model = instantiate_from_config(cls_head_config)
        self.detector = instantiate_from_config(detect_head_config)
        self.roi_align = RoIAlign(
            **cls_head_config["params"], spatial_scale=1, sampling_ratio=-1
        )
        if monitor is not None:
            self.monitor = monitor
        if ckpt_path is not None:
            self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)

    def init_from_ckpt(self, path, ignore_keys=list()):
        sd = torch.load(path, map_location="cpu")["state_dict"]
        keys = list(sd.keys())
        for k in keys:
            for ik in ignore_keys:
                if k.startswith(ik):
                    print("Deleting key {} from state_dict.".format(k))
                    del sd[k]
        self.load_state_dict(sd, strict=False)
        print(f"Restored from {path}")

    def forward(self, raw):
        return self.model(raw)

    def training_step(self, batch, batch_idx):
        x, cls = self.get_input(batch, "raw", "cls")
        y_hat = self(x)
        loss = F.cross_entropy(y_hat, cls.long())
        self.log("train/loss", loss)
        opt = self.optimizers()
        opt.zero_grad()
        loss.backward()
        opt.step()

    @torch.no_grad()
    def validation_step(self, batch, batch_idx):
        x, cls = self.get_input(batch, "raw", "cls")
        y_hat = self(x)
        loss = F.cross_entropy(y_hat, cls.long())
        acc = (y_hat.argmax(dim=1) == cls).float().mean()
        self.log("val/loss", loss)
        self.log("val/acc", acc)

    @torch.no_grad()
    def test_step(self, batch, batch_idx):
        # generalized_box_iou_loss()
        x, cls = self.get_input(batch, "raw", "gt")
        # y_hat = self(x)
        # loss = F.cross_entropy(y_hat, cls)
        # acc = (y_hat.argmax(dim=1) == cls).float().mean()
        # self.log("test/loss", loss)
        # self.log("test/acc", acc)
        pass

    @torch.no_grad()
    def predict(self, x):
        bboxes = list(self.detector(x))
        try:
            roi = self.roi_align(x, bboxes)
            y_hat = self(roi)
            bboxes = self.refine_bboxes(bboxes, y_hat)
            for img, bbox in zip(x, bboxes):
                img = ((img + 1) * 127.5).to(dtype=torch.uint8)
                bbox = draw_bounding_boxes(img, bbox, colors=(240, 10, 157))

                yield bbox.float() / 127.5 - 1  # , roi, y_hat
        except:
            pass

        # loss = F.cross_entropy(y_hat, cls)
        # acc = (y_hat.argmax(dim=1) == cls).float().mean()
        # self.log("test/loss", loss)
        # self.log("test/acc", acc)

    def refine_bboxes(self, bboxes, yhat):
        it = 0
        for bbox in bboxes:
            y = yhat[it : it + bbox.shape[0]]
            b = bbox[y.argmax(dim=1) == 0]
            ind = nms(b, y[y.argmax(dim=1) == 0, 0], 0.1)
            yield b[ind]

    def union(self, box1, box2):
        temp = torch.stack([box1, box2])
        temp_min = temp.min(dim=0).values
        temp_max = temp.max(dim=0).values
        return torch.cat([temp_min[:2], temp_max[2:]])

    def get_input(self, batch, *keys):
        output = []
        for k in keys:
            if k in batch:
                x = batch[k]
                if len(x.shape) == 3:
                    x = (
                        x[..., None]
                        .permute(0, 3, 1, 2)
                        .to(memory_format=torch.contiguous_format)
                        .float()
                    )
                elif len(x.shape) == 4:
                    x = (
                        x.permute(0, 3, 1, 2)
                        .to(memory_format=torch.contiguous_format)
                        .float()
                    )
                else:
                    x = x.to(memory_format=torch.contiguous_format).float()
                output.append(x)
        return output

    def configure_optimizers(self):
        lr = self.learning_rate
        opt = torch.optim.AdamW(
            self.model.parameters(),
            lr=lr,
            betas=(0.5, 0.9),
        )
        if self.scheduler_config is not None:
            scheduler = torch.optim.lr_scheduler.ExponentialLR(
                opt, **self.scheduler_config
            )
            print(f"Setting up {scheduler.__class__} scheduler...")
            return [opt], [scheduler]
        return opt

    @torch.no_grad()
    def log_images(self, batch, split, only_inputs=False, **kwargs):
        # TODO fix this function
        log = dict()
        if split == "train" or split == "val":
            x, cls = self.get_input(batch, "raw", "cls")
            x = x.to(self.device)
            if not only_inputs:
                yhat = self(x)
                if x.shape[1] > 3:
                    # colorize with random projection
                    x = self.to_rgb(x)
                log["pred/crater"] = x[yhat.argmax(dim=1) == 0]
                log["pred/non_crater"] = x[yhat.argmax(dim=1) == 1]
            log["gt/crater"] = x[cls == 0]
            log["gt/non_crater"] = x[cls == 1]
        elif split == "test":
            x, _ = self.get_input(batch, "raw", "gt")
            x_bbox = []
            craters = []
            non_craters = []
            for x in self.predict(x):
                x_bbox.append(x)
                # if roi[yhat.argmax(dim=1) == 0].shape[0] > 0:
                #     craters.append(roi[yhat.argmax(dim=1) == 0])
                # if roi[yhat.argmax(dim=1) == 1].shape[0] > 0:
                #     non_craters.append(roi[yhat.argmax(dim=1) == 1])
            if len(x_bbox) > 0:
                log["overall"] = torch.stack(x_bbox)
            # if len(craters) > 0:
            #     log["craters"] = torch.stack(craters).flatten(0, 1)
            # if len(non_craters) > 0:
            #     log["non_craters"] = torch.stack(non_craters).flatten(0, 1)
        return log
