import math
import torch
import torch.nn as nn
from compressai.entropy_models import GaussianConditional
from .utils import conv, deconv, update_registered_buffers
from compressai.models import CompressionModel


# From Balle's tensorflow compression examples
SCALES_MIN = 0.11
SCALES_MAX = 256
SCALES_LEVELS = 64


def get_scale_table(min=SCALES_MIN, max=SCALES_MAX, levels=SCALES_LEVELS):
    return torch.exp(torch.linspace(math.log(min), math.log(max), levels))


class C_model(CompressionModel):
    """
    Here, we set M=N
    """

    def __init__(self, N=192, M=192, **kwargs):
        super().__init__(entropy_bottleneck_channels=N, **kwargs)

        self.g_a = nn.Sequential(
            conv(3, N, kernel_size=5, stride=2),
            nn.ReLU(),
            conv(N, N, kernel_size=5, stride=2),
            nn.ReLU(),
            conv(N, N, kernel_size=5, stride=2),
            nn.ReLU(),
            conv(N, M, kernel_size=5, stride=2, bias=False),
        )

        self.g_s = nn.Sequential(
            deconv(M, N, kernel_size=5, stride=2),
            nn.ReLU(),
            deconv(N, N, kernel_size=5, stride=2),
            nn.ReLU(),
            deconv(N, N, kernel_size=5, stride=2),
            nn.ReLU(),
            deconv(N, 3, kernel_size=5, stride=2),
        )

        self.h_a = nn.Sequential(
            conv(M, N, stride=1, kernel_size=3),
            nn.LeakyReLU(inplace=True),
            conv(N, N, stride=2, kernel_size=5),
            nn.LeakyReLU(inplace=True),
            conv(N, N, stride=2, kernel_size=5),
        )

        self.h_s = nn.Sequential(
            deconv(N, M, stride=2, kernel_size=5),
            nn.LeakyReLU(inplace=True),
            deconv(M, M * 3 // 2, stride=2, kernel_size=5),
            nn.LeakyReLU(inplace=True),
            conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),
        )

        self.entropy_parameters = nn.Sequential(
            nn.Conv2d(M * 2, 640, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(640, 512, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(512, M * 6 // 3, 1, bias=False),
        )

        self.gaussian_conditional = GaussianConditional(None)
        self.N = int(N)
        self.M = int(M)

    @property
    def downsampling_factor(self) -> int:
        return 2 ** (4 + 2)

    def forward(self, x):
        y = self.g_a(x)
        z = self.h_a(y)
        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        params = self.h_s(z_hat)

        gaussian_params = self.entropy_parameters(params)
        scales_hat, means_hat = gaussian_params.chunk(2, 1)
        y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }

    def load_state_dict(self, state_dict):
        update_registered_buffers(
            self.gaussian_conditional,
            "gaussian_conditional",
            ["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
            state_dict,
        )
        super().load_state_dict(state_dict)
    
    @classmethod
    def from_state_dict(cls, state_dict):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.weight"].size(0)
        M = state_dict["g_a.6.weight"].size(0)
        net = cls(N, M)
        net.load_state_dict(state_dict)
        return net

    def update(self, scale_table=None, force=False):
        if scale_table is None:
            scale_table = get_scale_table()
        updated = self.gaussian_conditional.update_scale_table(scale_table, force=force)
        updated |= super().update(force=force)
        return updated


class C_model_effi(CompressionModel):
    """
    Here, we set M=N
    Note: h_a and h_s have different param with g_a and g_s
    """

    def __init__(self, N=192, M=192, **kwargs):
        super().__init__(entropy_bottleneck_channels=N//2, **kwargs)

        self.g_a = nn.Sequential(
            conv(3, N, kernel_size=5, stride=2),
            nn.ReLU(),
            conv(N, N, kernel_size=5, stride=2),
            nn.ReLU(),
            conv(N, N, kernel_size=5, stride=2),
            nn.ReLU(),
            conv(N, M, kernel_size=5, stride=2, bias=False),
        )

        self.g_s = nn.Sequential(
            deconv(M, N, kernel_size=5, stride=2),
            nn.ReLU(),
            deconv(N, N, kernel_size=5, stride=2),
            nn.ReLU(),
            deconv(N, N, kernel_size=5, stride=2),
            nn.ReLU(),
            deconv(N, 3, kernel_size=5, stride=2),
        )

        self.h_a = nn.Sequential(
            conv(M, N//2, stride=1, kernel_size=3),
            nn.LeakyReLU(inplace=True),
            conv(N//2, N//2, stride=2, kernel_size=5),
            nn.LeakyReLU(inplace=True),
            conv(N//2, N//2, stride=2, kernel_size=5),
        )

        self.h_s = nn.Sequential(
            deconv(N//2, M//2, stride=2, kernel_size=5),
            nn.LeakyReLU(inplace=True),
            deconv(M//2, M * 3 // 2//2, stride=2, kernel_size=5),
            nn.LeakyReLU(inplace=True),
            conv(M * 3 // 2//2, M * 2, stride=1, kernel_size=3),
        )

        self.entropy_parameters = nn.Sequential(
            nn.Conv2d(M * 2, 640, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(640, 512, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(512, M * 6 // 3, 1, bias=False),
        )

        self.gaussian_conditional = GaussianConditional(None)
        self.N = int(N)
        self.M = int(M)

    @property
    def downsampling_factor(self) -> int:
        return 2 ** (4 + 2)

    def forward(self, x):
        y = self.g_a(x)
        z = self.h_a(y)
        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        params = self.h_s(z_hat)

        gaussian_params = self.entropy_parameters(params)
        scales_hat, means_hat = gaussian_params.chunk(2, 1)
        y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }

    def load_state_dict(self, state_dict):
        update_registered_buffers(
            self.gaussian_conditional,
            "gaussian_conditional",
            ["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
            state_dict,
        )
        super().load_state_dict(state_dict)
    
    @classmethod
    def from_state_dict(cls, state_dict, infer=False):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.weight"].size(0)
        M = state_dict["g_a.6.weight"].size(0)
        net = cls(N, M)

        if not infer:
            ori_state_dict = net.state_dict()
            cnt = 0
            for k in state_dict:
                if state_dict[k].shape != ori_state_dict[k].shape:
                    print('dropped', k)
                    cnt +=1
                    state_dict[k] = ori_state_dict[k]
                else:
                    print('loaded', k)
            print(f'dropped: {cnt}/{len(state_dict)}')

        net.load_state_dict(state_dict)
        return net

    def update(self, scale_table=None, force=False):
        if scale_table is None:
            scale_table = get_scale_table()
        updated = self.gaussian_conditional.update_scale_table(scale_table, force=force)
        updated |= super().update(force=force)
        return updated