import os

import requests
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import torchvision.transforms as T
from torchvision.models import get_weight
from torchvision.models.resnet import model_urls

import model.modules.dino.vision_transformer as dino_vit
from model.modules.resnet_encoder import resnet_encoders

_MEAN_PIXEL_IMAGENET = [0.485, 0.456, 0.406]
_STD_PIXEL_IMAGENET = [0.229, 0.224, 0.225]


def adapt_weights(architecture):
    if architecture == "imagenet" or architecture is None:
        return

    weights_url = {
        "moco_v2": "https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v2_800ep/moco_v2_800ep_pretrain.pth.tar",
        "moco_v1": "https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v1_200ep/moco_v1_200ep_pretrain.pth.tar",
        "swav": "https://dl.fbaipublicfiles.com/deepcluster/swav_800ep_pretrain.pth.tar",
        "deepcluster_v2": "https://dl.fbaipublicfiles.com/deepcluster/deepclusterv2_800ep_pretrain.pth.tar",
        "dino": "https://dl.fbaipublicfiles.com/dino/dino_resnet50_pretrain/dino_resnet50_pretrain.pth",
    }

    if not os.path.exists(f"weights/{architecture}.pt"):
        r = requests.get(weights_url[architecture], allow_redirects=True)
        os.makedirs("weights", exist_ok=True)
        with open(f"weights/{architecture}.pt", "wb") as f:
            f.write(r.content)

    weights = torch.load(f"weights/{architecture}.pt")

    if architecture == "obow":
        return weights["network"]

    if architecture == "pixpro":
        weights = {
            k.replace("module.encoder.", ""): v
            for k, v in weights["model"].items()
            if k.startswith("module.encoder.")
        }
        return weights

    if architecture in ("moco_v1", "moco_v2", "moco_coco"):
        weights = {
            k.replace("module.encoder_q.", ""): v
            for k, v in weights["state_dict"].items()
            if k.startswith("module.encoder_q.")
            and not k.startswith("module.encoder_q.fc")
        }
        return weights

    if architecture in ("swav", "deepcluster_v2"):
        weights = {
            k.replace("module.", ""): v
            for k, v in weights.items()
            if k.startswith("module.") and not k.startswith("module.pro")
        }
        return weights

    if architecture == "dino":
        return weights


class Preprocessing:
    """
    Use the ImageNet preprocessing.
    """

    def __init__(self):
        normalize = T.Normalize(mean=_MEAN_PIXEL_IMAGENET, std=_STD_PIXEL_IMAGENET)
        self.preprocessing_img = normalize

    def __call__(self, image):
        return self.preprocessing_img(image)


class SegNetEncoder(nn.Module):
    def __init__(self):
        super(SegNetEncoder, self).__init__()
        # 前13层是VGG16的前13层,分为5个stage
        # 因为在下采样时要保存最大池化层的索引, 方便起见, 池化层不写在stage中
        self.stage_1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
        )

        self.stage_2 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
        )

        self.stage_3 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
        )

        self.stage_4 = nn.Sequential(
            nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
        )

        self.stage_5 = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
        )

    def forward(self, x):
        # 用来保存各层的池化索引
        pool_indices = []

        x = self.stage_1(x)
        # pool_indice_1保留了第一个池化层的索引
        x, pool_indice_1 = nn.MaxPool2d(2, stride=2, return_indices=True)(x)
        pool_indices.append(pool_indice_1)

        x = self.stage_2(x)
        x, pool_indice_2 = nn.MaxPool2d(2, stride=2, return_indices=True)(x)
        pool_indices.append(pool_indice_2)

        x = self.stage_3(x)
        x, pool_indice_3 = nn.MaxPool2d(2, stride=2, return_indices=True)(x)
        pool_indices.append(pool_indice_3)

        x = self.stage_4(x)
        x, pool_indice_4 = nn.MaxPool2d(2, stride=2, return_indices=True)(x)
        pool_indices.append(pool_indice_4)

        x = self.stage_5(x)
        x, pool_indice_5 = nn.MaxPool2d(2, stride=2, return_indices=True)(x)
        pool_indices.append(pool_indice_5)

        return x, pool_indices


# SegNet网络, Encoder-Decoder
class SegNetDecoder(nn.Module):
    def __init__(self, out_channels):
        super(SegNetDecoder, self).__init__()
        self.out_channels = out_channels

        # 上采样 从下往上, 1->2->3->4->5
        self.upsample_1 = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
        )

        self.upsample_2 = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
        )

        self.upsample_3 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
        )

        self.upsample_4 = nn.Sequential(
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
        )

        self.upsample_5 = nn.Sequential(
            nn.Conv2d(64, self.out_channels, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(self.out_channels),
            nn.ReLU(),
        )

    def forward(self, x, pool_indices):

        # 池化索引上采样
        x = nn.MaxUnpool2d(2, 2, padding=0)(x, pool_indices[4])
        x = self.upsample_1(x)

        x = nn.MaxUnpool2d(2, 2, padding=0)(x, pool_indices[3])
        x = self.upsample_2(x)

        x = nn.MaxUnpool2d(2, 2, padding=0)(x, pool_indices[2])
        x = self.upsample_3(x)

        x = nn.MaxUnpool2d(2, 2, padding=0)(x, pool_indices[1])
        x = self.upsample_4(x)

        x = nn.MaxUnpool2d(2, 2, padding=0)(x, pool_indices[0])
        x = self.upsample_5(x)

        return x


class SegNet(nn.Module):
    def __init__(self, config, preprocessing=None):
        super(SegNet, self).__init__()
        self.config = config
        self.preprocessing = preprocessing
        self.normalize_feature = config["normalize_features"]

        # 加载Encoder
        self.encoder = SegNetEncoder()
        weights = get_weight("VGG16_BN_Weights.DEFAULT").get_state_dict(True)
        del weights["classifier.0.weight"]
        del weights["classifier.0.bias"]
        del weights["classifier.3.weight"]
        del weights["classifier.3.bias"]
        del weights["classifier.6.weight"]
        del weights["classifier.6.bias"]
        names = []
        for key, value in self.encoder.state_dict().items():
            if "num_batches_tracked" in key:
                continue
            names.append(key)
        weights_new = {}
        for name, dict in zip(names, weights.items()):
            weights_new[name] = dict[1]
        self.encoder.load_state_dict(weights_new)

        self.decoder = SegNetDecoder(config["model_n_out"])

    def forward(self, x):
        if self.preprocessing:
            x = self.preprocessing(x)

        x, pool_indices = self.encoder(x)
        x = self.decoder(x, pool_indices)

        if self.normalize_feature:
            x = F.normalize(x, p=2, dim=1)

        return x


class ResNetFCNDecoder(nn.Module):
    def __init__(self, out_channels):
        super(ResNetFCNDecoder, self).__init__()

        self.out_channels = out_channels

        # Decoder
        self.deconv_layer1 = nn.Sequential(
            nn.Conv2d(256, 64, kernel_size=7, stride=1, padding=3, bias=False),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(
                64,
                self.out_channels,
                kernel_size=3,
                stride=2,
                padding=1,
                dilation=1,
                output_padding=1,
            ),
            nn.ReLU(inplace=True),
            nn.UpsamplingBilinear2d(scale_factor=2),
        )
        self.deconv_layer2 = nn.Sequential(
            nn.Conv2d(512, 64, kernel_size=7, stride=1, padding=3, bias=False),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(
                64,
                self.out_channels,
                kernel_size=3,
                stride=2,
                padding=1,
                dilation=1,
                output_padding=1,
            ),
            nn.ReLU(inplace=True),
            nn.UpsamplingBilinear2d(scale_factor=2),
        )
        self.deconv_layer3 = nn.Sequential(
            nn.Conv2d(1024, 64, kernel_size=7, stride=1, padding=3, bias=False),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(
                64,
                self.out_channels,
                kernel_size=3,
                stride=2,
                padding=1,
                dilation=1,
                output_padding=1,
            ),
            nn.ReLU(inplace=True),
            nn.UpsamplingBilinear2d(scale_factor=2),
        )
        self.deconv_layer4 = nn.Sequential(
            nn.Conv2d(2048, 64, kernel_size=7, stride=1, padding=3, bias=False),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(
                64,
                self.out_channels,
                kernel_size=3,
                stride=2,
                padding=1,
                dilation=1,
                output_padding=1,
            ),
            nn.ReLU(inplace=True),
            nn.UpsamplingBilinear2d(scale_factor=2),
        )

    def forward(self, layer1, layer2, layer3, layer4):
        layer1 = self.deconv_layer1(layer1)
        layer2 = self.deconv_layer2(layer2)
        layer3 = self.deconv_layer3(layer3)
        layer4 = self.deconv_layer4(layer4)

        out = layer1 + layer2 + layer3 + layer4 * 3

        return out


class ResNetFCN(nn.Module):
    def __init__(self, config, preprocessing=None):
        super(ResNetFCN, self).__init__()
        # assert (
        #     config["images_encoder"] == "resnet50"
        # ), "DilationFeatureExtractor is only available for resnet50"
        Encoder = resnet_encoders["resnet50"]["encoder"]  # ResNetEncoder
        params = resnet_encoders["resnet50"]["params"]  # "block" & "layers"
        params.update(
            replace_stride_with_dilation=[True, True, True]
        )  # add an element into the dict
        self.encoder = Encoder(**params)  # ResNet encoder

        if config.get("image_weights") == "imagenet":
            self.encoder.load_state_dict(
                model_zoo.load_url(model_urls["resnet50"])
            )  # official weights

        weights = adapt_weights(
            architecture=config.get("image_weights")
        )  # better weights
        if weights is not None:
            self.encoder.load_state_dict(weights)

        self.decoder = ResNetFCNDecoder(config["model_n_out"])

        self.preprocessing = preprocessing
        self.normalize_feature = config["normalize_features"]

    def forward(self, x):
        if self.preprocessing:
            x = self.preprocessing(x)

        stages = self.encoder.get_stages()
        features = []
        for i in range(6):
            x = stages[i](x)
            features.append(x)

        x = self.decoder(features[2], features[3], features[4], features[5])

        if self.normalize_feature:
            x = F.normalize(x, p=2, dim=1)
        return x


class Classifier(nn.Module):
    def __init__(self, config):
        super(Classifier, self).__init__()

        self.model_n_out = config["model_n_out"]
        self.num_classes = config["classes"]
        self.hiden_size = 128

        self.classifier = nn.Sequential(
            nn.Linear(self.model_n_out, self.hiden_size),
            nn.ReLU(inplace=True),
            nn.Linear(self.hiden_size, self.num_classes),
        )

    def forward(self, x):
        return self.classifier(x)


class DilationFeatureExtractor(nn.Module):
    """
    Dilated ResNet Feature Extractor
    """

    def __init__(self, config, preprocessing=None):
        super(DilationFeatureExtractor, self).__init__()
        assert (
            config["images_encoder"] == "resnet50"
        ), "DilationFeatureExtractor is only available for resnet50"
        Encoder = resnet_encoders["resnet50"]["encoder"]  # ResNetEncoder
        params = resnet_encoders["resnet50"]["params"]  # "block" & "layers"
        params.update(
            replace_stride_with_dilation=[True, True, True]
        )  # add an element into the dict
        self.encoder = Encoder(**params)  # ResNet encoder

        if config["image_weights"] == "imagenet":
            self.encoder.load_state_dict(
                model_zoo.load_url(model_urls["resnet50"])
            )  # official weights

        weights = adapt_weights(architecture=config["image_weights"])  # better weights
        if weights is not None:
            self.encoder.load_state_dict(weights)

        for param in self.encoder.parameters():
            param.requires_grad = False  # freeze image network

        in1 = 2048  # hardcoded encoder output channels

        self.decoder = nn.Sequential(
            nn.Conv2d(in1, config["model_n_out"], 1),
            nn.Upsample(scale_factor=4, mode="bilinear", align_corners=True),
        )
        self.preprocessing = preprocessing
        self.normalize_feature = config["normalize_features"]

    def forward(self, x):
        if self.preprocessing:
            x = self.preprocessing(x)
        x = self.decoder(self.encoder(x))
        if self.normalize_feature:
            x = F.normalize(x, p=2, dim=1)
        return x


class PPKTFeatureExtractor(nn.Module):
    """
    PPKT baseline
    """

    def __init__(self, config, preprocessing=None):
        super(PPKTFeatureExtractor, self).__init__()
        Encoder = resnet_encoders[config["images_encoder"]]["encoder"]
        params = resnet_encoders[config["images_encoder"]]["params"]
        self.encoder = Encoder(**params)

        if config["image_weights"] == "imagenet":
            self.encoder.load_state_dict(
                model_zoo.load_url(model_urls[config["images_encoder"]])
            )

        if config["image_weights"] not in (None, "imagenet"):
            assert (
                config["images_encoder"] == "resnet50"
            ), "{} weights are only available for resnet50".format(
                config["images_weights"]
            )
            weights = adapt_weights(architecture=config["image_weights"])
            if weights is not None:
                self.encoder.load_state_dict(weights)

        for param in self.encoder.parameters():
            param.requires_grad = False

        if config["images_encoder"] == "resnet18":
            in1 = 512
        elif config["images_encoder"] == "resnet50":
            in1 = 2048

        self.decoder = nn.Sequential(
            nn.Conv2d(in1, config["model_n_out"], 1),
            nn.Upsample(scale_factor=32, mode="bilinear", align_corners=True),
        )
        self.preprocessing = preprocessing
        self.normalize_feature = config["normalize_features"]

    def forward(self, x):
        if self.preprocessing:
            x = self.preprocessing(x)
        x = self.decoder(self.encoder(x))
        if self.normalize_feature:
            x = F.normalize(x, p=2, dim=1)
        return x


class DinoVitFeatureExtractor(nn.Module):
    """
    DINO Vision Transformer Feature Extractor.
    """

    def __init__(self, config, preprocessing=None):
        super(DinoVitFeatureExtractor, self).__init__()
        dino_models = {
            "vit_small_p16": ("vit_small", 16, 384),
            "vit_small_p8": ("vit_small", 8, 384),
            "vit_base_p16": ("vit_base", 16, 768),
            "vit_base_p8": ("vit_base", 8, 768),
        }
        assert (
            config["images_encoder"] in dino_models.keys()
        ), f"DilationFeatureExtractor is only available for {dino_models.keys()}"

        model_name, patch_size, embed_dim = dino_models[config["images_encoder"]]

        print("Use Vision Transformer pretrained with DINO as the image encoder")
        print(f"==> model_name: {model_name}")
        print(f"==> patch_size: {patch_size}")
        print(f"==> embed_dim: {embed_dim}")

        self.patch_size = patch_size
        self.embed_dim = embed_dim

        self.encoder = dino_vit.__dict__[model_name](
            patch_size=patch_size, num_classes=0
        )
        dino_vit.load_pretrained_weights(self.encoder, "", None, model_name, patch_size)

        for param in self.encoder.parameters():
            param.requires_grad = False

        self.decoder = nn.Sequential(
            nn.Conv2d(embed_dim, config["model_n_out"], 1),
            nn.Upsample(scale_factor=patch_size, mode="bilinear", align_corners=True),
        )
        self.preprocessing = preprocessing
        self.normalize_feature = config["normalize_features"]

    def forward(self, x):
        if self.preprocessing:
            x = self.preprocessing(x)
        batch_size, _, height, width = x.size()
        assert (height % self.patch_size) == 0
        assert (width % self.patch_size) == 0
        f_height = height // self.patch_size
        f_width = width // self.patch_size

        x = self.encoder(x, all=True)
        # the output of x should be [batch_size x (1 + f_height * f_width) x self.embed_dim]
        assert x.size(1) == (1 + f_height * f_width)
        # Remove the CLS token and reshape the the patch token features.
        x = (
            x[:, 1:, :]
            .contiguous()
            .transpose(1, 2)
            .contiguous()
            .view(batch_size, self.embed_dim, f_height, f_width)
        )

        x = self.decoder(x)
        if self.normalize_feature:
            x = F.normalize(x, p=2, dim=1)
        return x
