from torchvision.models.detection.mask_rcnn import FasterRCNN
import torch
from torchvision.models.detection.backbone_utils import _resnet_fpn_extractor
import pytorch_lightning as pl
from torch import nn
from ..misc.utils import instantiate_from_config
import numpy as np
from torchvision.utils import make_grid, draw_bounding_boxes
from torchvision.ops import nms
from ..misc.ema import LitEma
from collections import OrderedDict
import warnings


def collate_fn(batch):
    output = {"trainA": [], "trainB": [], "targets": []}
    for item in batch:
        d = {}
        for k, v in item.items():
            if k == "trainA" or k == "trainB":
                output[k].append(v)
            else:
                d[k] = v
        output["targets"].append(d)
    for k, v in output.items():
        if k == "trainA" or k == "trainB":
            output[k] = np.stack(v)
    return output


def KLGaussLoss(
    mu1: torch.Tensor, logvar1: torch.Tensor, mu2: torch.Tensor, logvar2: torch.Tensor
):
    return (
        0.5
        * (
            ((mu1 - mu2).pow(2) / logvar2.exp()).sum()
            - (logvar1 - logvar2).sum()
            + (logvar1.exp() / logvar2.exp()).sum()
            - mu1.numel()
        )
    ).log()


class Projector(nn.Module):
    def __init__(self, in_channels, out_channels, hidden_channels=256):
        super(Projector, self).__init__()
        self.projector = nn.Sequential(
            nn.Conv2d(in_channels, hidden_channels, 1),
            nn.ReLU(),
            nn.Conv2d(hidden_channels, out_channels, 1),
            nn.Softmax(dim=1),
        )

    def forward(self, x):
        x = x.flatten(2).permute(0, 2, 1)
        return self.projector(x)


class SiameseRCNN(pl.LightningModule):
    def __init__(
        self,
        backbone_config,
        scheduler_config,
        num_classes,
        learning_rate,
        momentum=0.999,
        ignore_keys=[],
        ckpt_path=None,
        image_key="image",
        monitor=None,
        use_ema=False,
        detect_learn_interval=100,
        automatic_optimization=True,
        **kwargs,
    ):
        super(SiameseRCNN, self).__init__()
        # backbone = resnet50(progress=True)
        backbone = instantiate_from_config(backbone_config)
        backbone = _resnet_fpn_extractor(backbone, 5, norm_layer=nn.BatchNorm2d)
        # 主检测分支，保留全部参数
        self.model = FasterRCNN(backbone, num_classes)
        # 从动分支，只保留backbone，完成不同输入数据集的风格迁移
        self.sia_backbone = FasterRCNN(backbone, num_classes).backbone
        # 从动分支并不参与梯度更新，其参数用动量更新，因此计算图可以不用保存
        self.sia_backbone.eval()
        self.save_hyperparameters()
        if ckpt_path is not None:
            self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
            print(f"Restored from {ckpt_path}")
        self.learning_rate = learning_rate
        if monitor is not None:
            self.monitor = monitor
        self.image_key = image_key
        self.scheduler_config = scheduler_config
        self.automatic_optimization = automatic_optimization
        self.use_ema = use_ema
        self.momentum = momentum
        self.detect_learn_interval = detect_learn_interval
        if self.use_ema:
            self.model_ema = LitEma(self.model)
            print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")

    def init_from_ckpt(self, path, ignore_keys=list()):
        sd = torch.load(path, map_location="cpu")["state_dict"]
        keys = list(sd.keys())
        for k in keys:
            for ik in ignore_keys:
                if k.startswith(ik):
                    print("Deleting key {} from state_dict.".format(k))
                    del sd[k]
        self.load_state_dict(sd, strict=False)
        print(f"Restored from {path}")

    def post_feature(self, images, targets, features):
        if isinstance(features, torch.Tensor):
            features = OrderedDict([("0", features)])
        proposals, proposal_losses = self.model.rpn(images, features, targets)
        detections, detector_losses = self.model.roi_heads(
            features, proposals, images.image_sizes, targets
        )
        detections = self.model.transform.postprocess(detections, images.image_sizes, original_image_sizes)  # type: ignore[operator]

        losses = {}
        losses.update(detector_losses)
        losses.update(proposal_losses)

        if torch.jit.is_scripting():
            if not self.model._has_warned:
                warnings.warn(
                    "RCNN always returns a (Losses, Detections) tuple in scripting"
                )
                self.model._has_warned = True
            return losses, detections
        else:
            return self.model.eager_outputs(losses, detections)

    def get_input(self, batch, keys):
        for k, v in batch.items():
            if "train" in k:
                batch[k] = (
                    v.permute(0, 3, 1, 2)
                    .to(memory_format=torch.contiguous_format)
                    .float()
                )
        return list(batch.values())

    def contrast(self, trainA, trainB, targets, stage="train"):
        assert self.model.training
        # 检测分支
        A, targets = self.model.transform(trainA, targets)
        features_A = self.model.backbone(A.tensors)
        B, targets = self.model.transform(trainB, targets)
        features_B = self.model.backbone(B.tensors)
        kl_loss_dict = dict()
        # 认为特征都服从椭圆高斯分布，则可以KL散度度量其分布差异，强行优化这个差异loss，试图让两个数据集下特征提取器产生的特征分布相似
        # 实践表明，Mask RCNN的金字塔是下采样产生的，算第一层就够了
        # for k, feat_A, feat_B in zip(
        #     features_A.keys(), features_A.values(), features_B.values()
        # ):
        #     # 将金字塔特征的每一层都保存为一个特征向量，计算其均值和方差
        #     feat_B = feat_B.detach().clone().requires_grad_(True)
        #     mu1 = feat_A.mean(dim=0)
        #     var1 = feat_A.var(dim=0)
        #     mu2 = feat_B.mean(dim=0)
        #     var2 = feat_B.var(dim=0)
        #     kl_loss_dict[f"{stage}/kl_loss/{k}"] = l * KLGaussLoss(
        #         mu1, var1.log(), mu2, var2.log()
        #     )
        # 实验表明，使用下采样特征pool能取得更好的迁移检测效果，而直接使用起始特征0的效果并不好，基本检测不到陨石坑
        # features_B["pool"] = features_B["pool"].detach().clone().requires_grad_(True)
        mu1 = features_A["pool"].mean(dim=0)
        var1 = features_A["pool"].var(dim=0)
        mu2 = features_B["pool"].mean(dim=0)
        var2 = features_B["pool"].var(dim=0)
        kl_loss_dict[f"{stage}/kl_loss"] = KLGaussLoss(mu1, var1.log(), mu2, var2.log())
        return kl_loss_dict

    def forward(self, trainA, targets, stage="train"):
        if self.model.training:
            loss_dict = self.model(trainA, targets)
            return {f"{stage}/{k}": v for k, v in loss_dict.items()}
        else:
            return self.model(trainA, targets)

    def shared_step(self, batch, batch_idx, stage="train"):
        trainA, trainB, targets = self.get_input(batch, self.image_key)
        if stage == "val":
            loss_dict = self(trainA, targets, stage=stage)
            kl_loss_dict = self.contrast(trainA, trainB, targets, stage)
            loss_dict.update(**kl_loss_dict)
            return loss_dict
        else:
            if (
                self.detect_learn_interval < 0
                or batch_idx % self.detect_learn_interval == 0
            ):
                return self(trainA, targets, stage=stage)
            else:
                kl_loss_dict = self.contrast(trainA, trainB, targets, stage)
                return kl_loss_dict

    def on_train_start(self):
        for m, sia in zip(self.model.parameters(), self.sia_backbone.parameters()):
            sia.data = m.data.detach().clone()

    def training_step(self, batch, batch_idx):
        loss_dict = self.shared_step(batch, batch_idx, stage="train")
        losses = sum(loss_dict.values())
        self.log_dict(loss_dict, on_step=True, on_epoch=False, sync_dist=True)
        opt_model, opt_sia = self.optimizers()
        if (
            self.detect_learn_interval < 0
            or batch_idx % self.detect_learn_interval == 0
        ):
            opt_model.zero_grad()
            losses.backward()
            opt_model.step()
        else:
            opt_sia.zero_grad()
            losses.backward()
            opt_sia.step()
        self.log(
            "train/loss",
            losses,
            on_step=True,
            on_epoch=True,
            prog_bar=True,
            sync_dist=True,
        )
        return losses

    # def on_train_batch_end(self, losses, batch, batch_idx):
    #     # if self.use_ema:
    #     #     self.model_ema(self.model)
    #     # if batch_idx % self.detect_learn_interval == 0:
    #     for m, sia in zip(self.model.parameters(), self.sia_backbone.parameters()):
    #         sia.data.mul_(self.momentum).add_(m.data, alpha=1 - self.momentum)

    def on_train_epoch_end(self):
        if self.scheduler_config is not None:
            self.lr_schedulers().step()
            self.log(
                "lr",
                self.lr_schedulers().get_last_lr()[0],
                on_epoch=True,
                sync_dist=True,
            )

    def on_validation_start(self) -> None:
        self.model.train()

    @torch.no_grad()
    def validation_step(self, batch, batch_idx):
        loss_dict = self.shared_step(batch, batch_idx, stage="val")
        losses = sum(loss_dict.values())
        self.log_dict(loss_dict, on_step=False, on_epoch=True, sync_dist=True)
        self.log(
            "val/loss",
            losses,
            on_step=False,
            on_epoch=True,
            sync_dist=True,
        )

    @torch.no_grad()
    def test_step(self, batch, batch_idx):
        return self.log_images(batch, prefix="test")

    @torch.no_grad()
    def predict_step(self, batch, batch_idx):
        assert not self.model.training
        image = (
            batch.pop("image")
            .permute(0, 3, 1, 2)
            .to(memory_format=torch.contiguous_format)
            .float()
        )
        log = {}
        log["predict/input"] = ((image + 1) * 127.5).to(torch.uint8)
        prediction = self.model(image)
        box = []
        for img, pred in zip(((image + 1) * 127.5).to(torch.uint8), prediction):
            ind = nms(pred["boxes"], pred["scores"], 0.05)
            box.append(
                draw_bounding_boxes(
                    img, boxes=pred["boxes"][ind], colors="blue", width=1
                )
            )
        log["predict/detect"] = box
        return log, (batch, prediction)

    @torch.no_grad()
    def log_images(self, batch, prefix="val", **kwargs):
        log = {}
        flag = False
        if self.model.training:
            self.model.eval()
            flag = True
        trainA, trainB, targets = self.get_input(batch, self.image_key)
        # 使用self.model检测，输出有标签的检测结果
        prediction = self.model(trainA)
        log[f"{prefix}/label/input"] = make_grid((trainA + 1) * 127.5).to(torch.uint8)
        box = []
        target_box = []
        for img, pred, target in zip(
            ((trainA + 1) * 127.5).to(torch.uint8), prediction, targets
        ):
            ind = nms(pred["boxes"], pred["scores"], 0.05)
            box.append(
                draw_bounding_boxes(
                    img, boxes=pred["boxes"][ind], colors="blue", width=1
                ).to(device=self.device)
            )
            target_box.append(
                draw_bounding_boxes(
                    img, boxes=target["boxes"], colors="white", width=1
                ).to(device=self.device)
            )
        log[f"{prefix}/label/prediction"] = make_grid(torch.stack(box))
        log[f"{prefix}/label/target"] = make_grid(torch.stack(target_box))
        # 检测无标签的图像，输出无标签的检测结果
        prediction = self.model(trainB)
        log[f"{prefix}/unlabel/input"] = make_grid((trainB + 1) * 127.5).to(torch.uint8)
        box = []
        for img, pred in zip(((trainB + 1) * 127.5).to(torch.uint8), prediction):
            ind = nms(pred["boxes"], pred["scores"], 0.05)
            box.append(
                draw_bounding_boxes(
                    img, boxes=pred["boxes"][ind], colors="red", width=1
                ).to(device=self.device)
            )
        log[f"{prefix}/unlabel/prediction"] = make_grid(torch.stack(box))
        if flag:
            self.model.train()
        return log

    def configure_optimizers(self):
        opt_model = torch.optim.AdamW(self.model.parameters(), lr=self.learning_rate[0])
        opt_sia = torch.optim.AdamW(
            self.model.backbone.parameters(), lr=self.learning_rate[1]
        )
        if self.scheduler_config is not None:
            scheduler = torch.optim.lr_scheduler.ExponentialLR(
                opt_sia, **self.scheduler_config
            )
            print(f"Setting up {scheduler.__class__} scheduler...")
            return [opt_model, opt_sia], [scheduler]
        return [opt_model, opt_sia]
