import math
from collections import OrderedDict

import torch
import torch.nn as nn

from compressai.entropy_models import GaussianConditional
from compressai.layers import GDN
from compressai.models.hit_utils import ABCM
from compressai.models.priors import CompressionModel

from .utils import conv, deconv, update_registered_buffers


__all__ = [
    "ScaleHyperpriorHIT",
    "ScaleHyperpriorHyperHIT"
]


# From Balle's tensorflow compression examples
SCALES_MIN = 0.11
SCALES_MAX = 256
SCALES_LEVELS = 64


def get_scale_table(min=SCALES_MIN, max=SCALES_MAX, levels=SCALES_LEVELS):
    return torch.exp(torch.linspace(math.log(min), math.log(max), levels))


class ScaleHyperpriorHIT(CompressionModel):
    r"""Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
    N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
    <https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations
    (ICLR), 2018.

    Args:
        N (int): Number of channels
        M (int): Number of channels in the expansion layers (last layer of the
            encoder and last layer of the hyperprior decoder)
    """

    def __init__(self, N, M, gamma=0.01, **kwargs):
        super().__init__(entropy_bottleneck_channels=N, **kwargs)
        self.gamma = gamma

        self.g_a = nn.Sequential(
            conv(3, N),
            ABCM(N),
            GDN(N),
            conv(N, N),
            ABCM(N),
            GDN(N),
            conv(N, N),
            ABCM(N),
            GDN(N),
            conv(N, M),
        )

        self.g_s = nn.Sequential(
            deconv(M, N),
            ABCM(N),
            GDN(N, inverse=True),
            deconv(N, N),
            ABCM(N),
            GDN(N, inverse=True),
            deconv(N, N),
            ABCM(N),
            GDN(N, inverse=True),
            deconv(N, 3),
        )

        self.abcms = [self.g_a[1], self.g_a[4], self.g_a[7],
                      self.g_s[1], self.g_s[4], self.g_s[7]]

        self.h_a = nn.Sequential(
            conv(M, N, stride=1, kernel_size=3),
            nn.ReLU(inplace=True),
            conv(N, N),
            nn.ReLU(inplace=True),
            conv(N, N),
        )

        self.h_s = nn.Sequential(
            deconv(N, N),
            nn.ReLU(inplace=True),
            deconv(N, N),
            nn.ReLU(inplace=True),
            conv(N, M, stride=1, kernel_size=3),
            nn.ReLU(inplace=True),
        )

        self.gaussian_conditional = GaussianConditional(None)
        self.N = int(N)
        self.M = int(M)

    @property
    def downsampling_factor(self) -> int:
        return 2 ** (4 + 2)

    def forward(self, x):
        y = self.g_a(x)
        z = self.h_a(torch.abs(y))

        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        scales_hat = self.h_s(z_hat)
        y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }

    KEY_TABLE = {
        'g_a.0.weight': 'g_a.0.weight',
        'g_a.0.bias': 'g_a.0.bias',
        'g_a.1.beta': 'g_a.2.beta',
        'g_a.1.gamma': 'g_a.2.gamma',
        'g_a.1.beta_reparam.pedestal': 'g_a.2.beta_reparam.pedestal',
        'g_a.1.beta_reparam.lower_bound.bound': 'g_a.2.beta_reparam.lower_bound.bound',
        'g_a.1.gamma_reparam.pedestal': 'g_a.2.gamma_reparam.pedestal',
        'g_a.1.gamma_reparam.lower_bound.bound': 'g_a.2.gamma_reparam.lower_bound.bound',
        'g_a.2.weight': 'g_a.3.weight',
        'g_a.2.bias': 'g_a.3.bias',
        'g_a.3.beta': 'g_a.5.beta',
        'g_a.3.gamma': 'g_a.5.gamma',
        'g_a.3.beta_reparam.pedestal': 'g_a.5.beta_reparam.pedestal',
        'g_a.3.beta_reparam.lower_bound.bound': 'g_a.5.beta_reparam.lower_bound.bound',
        'g_a.3.gamma_reparam.pedestal': 'g_a.5.gamma_reparam.pedestal',
        'g_a.3.gamma_reparam.lower_bound.bound': 'g_a.5.gamma_reparam.lower_bound.bound',
        'g_a.4.weight': 'g_a.6.weight',
        'g_a.4.bias': 'g_a.6.bias',
        'g_a.5.beta': 'g_a.8.beta',
        'g_a.5.gamma': 'g_a.8.gamma',
        'g_a.5.beta_reparam.pedestal': 'g_a.8.beta_reparam.pedestal',
        'g_a.5.beta_reparam.lower_bound.bound': 'g_a.8.beta_reparam.lower_bound.bound',
        'g_a.5.gamma_reparam.pedestal': 'g_a.8.gamma_reparam.pedestal',
        'g_a.5.gamma_reparam.lower_bound.bound': 'g_a.8.gamma_reparam.lower_bound.bound',
        'g_a.6.weight': 'g_a.9.weight',
        'g_a.6.bias': 'g_a.9.bias',
        'g_s.0.weight': 'g_s.0.weight',
        'g_s.0.bias': 'g_s.0.bias',
        'g_s.1.beta': 'g_s.2.beta',
        'g_s.1.gamma': 'g_s.2.gamma',
        'g_s.1.beta_reparam.pedestal': 'g_s.2.beta_reparam.pedestal',
        'g_s.1.beta_reparam.lower_bound.bound': 'g_s.2.beta_reparam.lower_bound.bound',
        'g_s.1.gamma_reparam.pedestal': 'g_s.2.gamma_reparam.pedestal',
        'g_s.1.gamma_reparam.lower_bound.bound': 'g_s.2.gamma_reparam.lower_bound.bound',
        'g_s.2.weight': 'g_s.3.weight',
        'g_s.2.bias': 'g_s.3.bias',
        'g_s.3.beta': 'g_s.5.beta',
        'g_s.3.gamma': 'g_s.5.gamma',
        'g_s.3.beta_reparam.pedestal': 'g_s.5.beta_reparam.pedestal',
        'g_s.3.beta_reparam.lower_bound.bound': 'g_s.5.beta_reparam.lower_bound.bound',
        'g_s.3.gamma_reparam.pedestal': 'g_s.5.gamma_reparam.pedestal',
        'g_s.3.gamma_reparam.lower_bound.bound': 'g_s.5.gamma_reparam.lower_bound.bound',
        'g_s.4.weight': 'g_s.6.weight',
        'g_s.4.bias': 'g_s.6.bias',
        'g_s.5.beta': 'g_s.8.beta',
        'g_s.5.gamma': 'g_s.8.gamma',
        'g_s.5.beta_reparam.pedestal': 'g_s.8.beta_reparam.pedestal',
        'g_s.5.beta_reparam.lower_bound.bound': 'g_s.8.beta_reparam.lower_bound.bound',
        'g_s.5.gamma_reparam.pedestal': 'g_s.8.gamma_reparam.pedestal',
        'g_s.5.gamma_reparam.lower_bound.bound': 'g_s.8.gamma_reparam.lower_bound.bound',
        'g_s.6.weight': 'g_s.9.weight',
        'g_s.6.bias': 'g_s.9.bias',
    }

    mask_weight_pairs = OrderedDict(
        [('g_a.1.mask', ['g_a.0.weight', 'g_a.0.bias', 'g_a.2.beta', 'g_a.2.gamma', 'g_a.3.weight']),
        ('g_a.4.mask', ['g_a.3.weight', 'g_a.3.bias', 'g_a.5.beta', 'g_a.5.gamma', 'g_a.6.weight']),
        ('g_a.7.mask', ['g_a.6.weight', 'g_a.6.bias', 'g_a.8.beta', 'g_a.8.gamma', 'g_a.9.weight']),
        ('g_s.1.mask', ['g_s.0.weight', 'g_s.0.bias', 'g_s.2.beta', 'g_s.2.gamma', 'g_s.3.weight']),
        ('g_s.4.mask', ['g_s.3.weight', 'g_s.3.bias', 'g_s.5.beta', 'g_s.5.gamma', 'g_s.6.weight']),
        ('g_s.7.mask', ['g_s.6.weight', 'g_s.6.bias', 'g_s.8.beta', 'g_s.8.gamma', 'g_s.9.weight'])])

    def load_state_dict(self, state_dict, ckpt=False):
        update_registered_buffers(
            self.gaussian_conditional,
            "gaussian_conditional",
            ["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
            state_dict,
        )
        if ckpt:
            super().load_state_dict(state_dict)
            return
        ori_state_dict = self.state_dict()
        for k, v in self.KEY_TABLE.items():
            if k in state_dict:
                ori_state_dict[v] = state_dict[k]
        super().load_state_dict(ori_state_dict)

    @classmethod
    def from_state_dict(cls, state_dict, gamma=0.01):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.weight"].size(0)
        M = state_dict["g_a.6.weight"].size(0)
        net = cls(N, M, gamma)
        net.load_state_dict(state_dict)
        return net

    def update(self, scale_table=None, force=False):
        if scale_table is None:
            scale_table = get_scale_table()
        updated = self.gaussian_conditional.update_scale_table(scale_table, force=force)
        updated |= super().update(force=force)
        return updated

    def compress(self, x):
        y = self.g_a(x)
        z = self.h_a(torch.abs(y))

        z_strings = self.entropy_bottleneck.compress(z)
        z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])

        scales_hat = self.h_s(z_hat)
        indexes = self.gaussian_conditional.build_indexes(scales_hat)
        y_strings = self.gaussian_conditional.compress(y, indexes)
        return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}

    def decompress(self, strings, shape):
        assert isinstance(strings, list) and len(strings) == 2
        z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
        scales_hat = self.h_s(z_hat)
        indexes = self.gaussian_conditional.build_indexes(scales_hat)
        y_hat = self.gaussian_conditional.decompress(strings[0], indexes, z_hat.dtype)
        x_hat = self.g_s(y_hat).clamp_(0, 1)
        return {"x_hat": x_hat}


class ScaleHyperpriorHyperHIT(CompressionModel):
    r"""Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
    N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
    <https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations
    (ICLR), 2018.

    Args:
        N (int): Number of channels
        M (int): Number of channels in the expansion layers (last layer of the
            encoder and last layer of the hyperprior decoder)
    """

    def __init__(self, N, M, gamma=0.01, **kwargs):
        super().__init__(entropy_bottleneck_channels=N, **kwargs)
        self.gamma = gamma

        self.g_a = nn.Sequential(
            conv(3, N),
            GDN(N),
            conv(N, N),
            GDN(N),
            conv(N, N),
            GDN(N),
            conv(N, M),
        )

        self.g_s = nn.Sequential(
            deconv(M, N),
            GDN(N, inverse=True),
            deconv(N, N),
            GDN(N, inverse=True),
            deconv(N, N),
            GDN(N, inverse=True),
            deconv(N, 3),
        )

        self.h_a = nn.Sequential(
            conv(M, N, stride=1, kernel_size=3),
            ABCM(N),
            nn.ReLU(inplace=True),
            conv(N, N),
            ABCM(N),
            nn.ReLU(inplace=True),
            conv(N, N),
            ABCM(N)
        )

        self.h_s = nn.Sequential(
            deconv(N, N),
            ABCM(N),
            nn.ReLU(inplace=True),
            deconv(N, N),
            ABCM(N),
            nn.ReLU(inplace=True),
            conv(N, M, stride=1, kernel_size=3),
            nn.ReLU(inplace=True),
        )

        self.abcms = [self.h_a[1], self.h_a[4], self.h_a[7], self.h_s[1], self.h_s[4]]

        self.gaussian_conditional = GaussianConditional(None)
        self.N = int(N)
        self.M = int(M)

    @property
    def downsampling_factor(self) -> int:
        return 2 ** (4 + 2)

    def forward(self, x):
        y = self.g_a(x)
        z = self.h_a(torch.abs(y))

        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        scales_hat = self.h_s(z_hat)
        y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }

    KEY_TABLE = {
        'h_a.0.weight': 'h_a.0.weight',
        'h_a.0.bias': 'h_a.0.bias',
        'h_a.2.weight': 'h_a.3.weight',
        'h_a.2.bias': 'h_a.3.bias',
        'h_a.4.weight': 'h_a.6.weight',
        'h_a.4.bias': 'h_a.6.bias',
        'h_s.0.weight': 'h_s.0.weight',
        'h_s.0.bias': 'h_s.0.bias',
        'h_s.2.weight': 'h_s.3.weight',
        'h_s.2.bias': 'h_s.3.bias',
        'h_s.4.weight': 'h_s.6.weight',
        'h_s.4.bias': 'h_s.6.bias',
    }

    mask_weight_pairs = OrderedDict(
        [('h_a.1.mask', ['h_a.0.weight', 'h_a.0.bias', 'h_a.3.weight']),
        ('h_a.4.mask', ['h_a.3.weight', 'h_a.3.bias', 'h_a.6.weight']),
        ('h_a.7.mask', ['h_a.6.weight', 'h_a.6.bias', 'h_s.0.weight']),
        ('h_s.1.mask', ['h_s.0.weight', 'h_s.0.bias', 'h_s.3.weight']),
        ('h_s.4.mask', ['h_s.3.weight', 'h_s.3.bias', 'h_s.6.weight']),])

    def load_state_dict(self, state_dict, ckpt=False):
        update_registered_buffers(
            self.gaussian_conditional,
            "gaussian_conditional",
            ["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
            state_dict,
        )
        if ckpt:
            super().load_state_dict(state_dict)
            return
        ori_state_dict = self.state_dict()
        for k, v in self.KEY_TABLE.items():
            if k in state_dict:
                ori_state_dict[v] = state_dict[k]
        super().load_state_dict(ori_state_dict)

    @classmethod
    def from_state_dict(cls, state_dict, gamma=0.01):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.weight"].size(0)
        M = state_dict["g_a.6.weight"].size(0)
        net = cls(N, M, gamma)
        net.load_state_dict(state_dict)
        return net

    def update(self, scale_table=None, force=False):
        if scale_table is None:
            scale_table = get_scale_table()
        updated = self.gaussian_conditional.update_scale_table(scale_table, force=force)
        updated |= super().update(force=force)
        return updated

    def compress(self, x):
        y = self.g_a(x)
        z = self.h_a(torch.abs(y))

        z_strings = self.entropy_bottleneck.compress(z)
        z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])

        scales_hat = self.h_s(z_hat)
        indexes = self.gaussian_conditional.build_indexes(scales_hat)
        y_strings = self.gaussian_conditional.compress(y, indexes)
        return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}

    def decompress(self, strings, shape):
        assert isinstance(strings, list) and len(strings) == 2
        z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
        scales_hat = self.h_s(z_hat)
        indexes = self.gaussian_conditional.build_indexes(scales_hat)
        y_hat = self.gaussian_conditional.decompress(strings[0], indexes, z_hat.dtype)
        x_hat = self.g_s(y_hat).clamp_(0, 1)
        return {"x_hat": x_hat}

