# Copyright 2020 InterDigital Communications, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
import torch.nn as nn
import torch.nn.functional as F

from compressai.layers import (
    AttentionBlock,
    ResidualBlock,
    ResidualBlockUpsample,
    ResidualBlockWithStride,
    MaskedConv2d,
    conv3x3,
    subpel_conv3x3,
)

from .priors import JointAutoregressiveHierarchicalPriors, CompressionModel
from compressai.entropy_models import GaussianConditional, GaussianMixtureConditional
from .utils import conv, deconv, update_registered_buffers


class Cheng2020Anchor(JointAutoregressiveHierarchicalPriors):
    """Anchor model variant from `"Learned Image Compression with
    Discretized Gaussian Mixture Likelihoods and Attention Modules"
    <https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
    Takeuchi, Jiro Katto.
    Uses residual blocks with small convolutions (3x3 and 1x1), and sub-pixel
    convolutions for up-sampling.
    Args:
        N (int): Number of channels
    """

    def __init__(self, N=192, **kwargs):
        super().__init__(N=N, M=N, **kwargs)

        self.g_a = nn.Sequential(
            ResidualBlockWithStride(3, N, stride=2),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            ResidualBlock(N, N),
            conv3x3(N, N, stride=2),
        )

        self.h_a = nn.Sequential(
            conv3x3(N, N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N, stride=2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N, stride=2),
        )

        self.h_s = nn.Sequential(
            conv3x3(N, N),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N, N, 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N * 3 // 2),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N * 3 // 2, N * 3 // 2, 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N * 3 // 2, N * 2),
        )

        self.g_s = nn.Sequential(
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            subpel_conv3x3(N, 3, 2),
        )

    @classmethod
    def from_state_dict(cls, state_dict):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.conv1.weight"].size(0)
        net = cls(N)
        net.load_state_dict(state_dict)
        return net


class Cheng2020Attention(Cheng2020Anchor):
    """Self-attention model variant from `"Learned Image Compression with
    Discretized Gaussian Mixture Likelihoods and Attention Modules"
    <https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
    Takeuchi, Jiro Katto.
    Uses self-attention, residual blocks with small convolutions (3x3 and 1x1),
    and sub-pixel convolutions for up-sampling.
    Args:
        N (int): Number of channels
    """

    def __init__(self, N=192, **kwargs):
        super().__init__(N=N, **kwargs)

        self.g_a = nn.Sequential(
            ResidualBlockWithStride(3, N, stride=2),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            ResidualBlock(N, N),
            conv3x3(N, N, stride=2),
            AttentionBlock(N),
        )

        self.g_s = nn.Sequential(
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            subpel_conv3x3(N, 3, 2),
        )


class cheng2020_lossless(CompressionModel):
    def __init__(self, N, K=1, **kwargs):
        super().__init__(entropy_bottleneck_channels=N, **kwargs)
        self.K = K
        M = N
        self.g_a = nn.Sequential(
            ResidualBlockWithStride(3, N, stride=2),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            ResidualBlock(N, N),
            conv3x3(N, N, stride=2),
            AttentionBlock(N),
        )

        self.g_s = nn.Sequential(
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            subpel_conv3x3(N, 6, 2),
        )

        self.h_a = nn.Sequential(
            conv3x3(N, N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N, stride=2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N, stride=2),
        )

        self.h_s = nn.Sequential(
            conv3x3(N, N),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N, N, 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N * 3 // 2),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N * 3 // 2, N * 3 // 2, 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N * 3 // 2, N * 2),
        )
        self.entropy_parameters = nn.Sequential(
            nn.Conv2d(M * 12 // 3, M * 10 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(M * 10 // 3, M * 8 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(M * 8 // 3, M * 6 // 3, 1),
        )

        self.context_prediction = MaskedConv2d(
            M, 2 * M, kernel_size=5, padding=2, stride=1
        )

        self.gaussian_conditional = GaussianConditional(None)
        self.N = int(N)
        self.gaussian_conditional_lossless = GaussianConditional(None)
        self.mse = nn.MSELoss()

    def L2_norm(self, mean, target):
        C = target.size()[1] 
        for i in range(self.K):
            # print(i)
            # print("mean: ", mean[:,i*C:(i+1)*C,:,:].size())
            # print("target: ", target.size())
            if i == 0:
                loss = self.mse(mean[:,i*C:(i+1)*C,:,:], target)
            else:
                loss += self.mse(mean[:,i*C:(i+1)*C,:,:], target)
        return loss / self.K

    def forward(self, x):
        y = self.g_a(x)
        z = self.h_a(y)
        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        params = self.h_s(z_hat)

        y_hat = self.gaussian_conditional.quantize(
            y, "noise" if self.training else "dequantize"
        )
        ctx_params = self.context_prediction(y_hat)
        gaussian_params = self.entropy_parameters(
            torch.cat((params, ctx_params), dim=1)
        )
        scales_hat, means_hat = gaussian_params.chunk(2, 1)
        _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)
        gaussian_params_2 = self.g_s(y_hat)

        scales_hat_2, means_hat_2 = gaussian_params_2.chunk(2, 1)
        # weight_2 = nn.functional.softmax(weight_2,dim=1)
        x_hat = self.gaussian_conditional.quantize(
            x, "noise" if self.training else "dequantize"
        )
        _, x_hat_likelihoods = self.gaussian_conditional_lossless(x,  scales_hat_2, means=means_hat_2)

        L2_loss = self.L2_norm(means_hat, y_hat) + self.L2_norm(means_hat_2, x_hat) 
        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods, "x_hat": x_hat_likelihoods},
            "L2_loss": L2_loss
        }


class Cheng2020_GMM_jinming(cheng2020_lossless):
    def __init__(self, N, K=3, **kwargs):
        super().__init__(N, K=K, **kwargs)
        self.gaussian_conditional = GaussianMixtureConditional(K=K)
        self.K = K

        self.g_s = nn.Sequential(
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            subpel_conv3x3(N, 3, 2),
            # subpel_conv3x3(N, 3*3*K, 2),
        )

        self.entropy_parameters = nn.Sequential(
            nn.Conv2d(N * 12 // 3, N * 10 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(N * 10 // 3, N * 8 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(N * 8 // 3, N * 3 * K, 1),
        )


    def forward(self, x):
        y = self.g_a(x)
        z = self.h_a(y)
        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        params = self.h_s(z_hat)

        y_hat = self.gaussian_conditional.quantize(
            y, "noise" if self.training else "dequantize"
        )
        ctx_params = self.context_prediction(y_hat)
        gaussian_params = self.entropy_parameters(
            torch.cat((params, ctx_params), dim=1)
        )
        scales_hat, means_hat, weight = gaussian_params.chunk(3, 1)
        weight = torch.reshape(weight,(weight.size(0), self.K, weight.size(1)//self.K, weight.size(2), weight.size(3)))
        # print(weight.size())
        weight = nn.functional.softmax(weight,dim=1)
        weight = torch.reshape(weight,(weight.size(0), weight.size(1)*weight.size(2), weight.size(3), weight.size(4)))
        # print(weight.size())
        _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat, weights=weight)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }

    @classmethod
    def from_state_dict(cls, state_dict, infer=False):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.conv1.weight"].size(0)
        net = cls(N)

        if not infer:
            ori_state_dict = net.state_dict()
            cnt = 0
            for k in ori_state_dict:
                if k in state_dict and state_dict[k].shape == ori_state_dict[k].shape:
                    ori_state_dict[k] = state_dict[k]
                    print('loaded', k)
                else:
                    print('dropped', k)
                    cnt += 1
            print(f'dropped: {cnt}/{len(state_dict)}')
            state_dict = ori_state_dict
        net.load_state_dict(state_dict)
        return net

class Cheng2020_GMM_jinmingRR(CompressionModel):
    def __init__(self, N, deps=None, K=3, **kwargs):
        if deps is None:
            deps = [N for _ in range(7)] + [N*3//2, N*3//2, N*2]
        self.deps = deps
        print(deps)
        super().__init__(entropy_bottleneck_channels=deps[4], **kwargs)

        self.K = K

        self.g_a = nn.Sequential(
            ResidualBlockWithStride(3, N, stride=2),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            ResidualBlock(N, N),
            conv3x3(N, N, stride=2),
            AttentionBlock(N),
        )

        self.g_s = nn.Sequential(
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            subpel_conv3x3(N, 3, 2),
        )

        self.h_a = nn.Sequential(
            conv3x3(N, deps[0]),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[0], deps[1]),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[1], deps[2], stride=2),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[2], deps[3]),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[3], deps[4], stride=2),
        )

        self.h_s = nn.Sequential(
            conv3x3(deps[4], deps[5]),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(deps[5], deps[6], 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[6], deps[7]),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(deps[7], deps[8], 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[8], deps[9]),
        )

        self.entropy_parameters = nn.Sequential(
            nn.Conv2d(N * 12 // 3, N * 10 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(N * 10 // 3, N * 8 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(N * 8 // 3, N * 3 * K, 1),
        )

        self.context_prediction = MaskedConv2d(
            N, 2 * N, kernel_size=5, padding=2, stride=1
        )

        self.gaussian_conditional = GaussianMixtureConditional(K=K)
        self.N = int(N)

    def forward(self, x):
        # with torch.no_grad():
            # y = self.g_a(x)
        y = self.g_a(x)
        z = self.h_a(y)
        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        # test = torch.zeros_like(z_hat)
        # params = self.h_s(test)
        params = self.h_s(z_hat)

        y_hat = self.gaussian_conditional.quantize(
            y, "noise" if self.training else "dequantize"
        )
        ctx_params = self.context_prediction(y_hat)
        gaussian_params = self.entropy_parameters(
            torch.cat((params, ctx_params), dim=1)
        )
        scales_hat, means_hat, weight = gaussian_params.chunk(3, 1)
        weight = torch.reshape(weight,(weight.size(0), self.K, weight.size(1)//self.K, weight.size(2), weight.size(3)))
        # print(weight.size())
        weight = nn.functional.softmax(weight,dim=1)
        weight = torch.reshape(weight,(weight.size(0), weight.size(1)*weight.size(2), weight.size(3), weight.size(4)))
        # print(weight.size())
        _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat, weights=weight)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }

    def load_state_dict(self, state_dict):
        update_registered_buffers(
            self.gaussian_conditional,
            "gaussian_conditional",
            ["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
            state_dict,
        )
        super().load_state_dict(state_dict)

    @classmethod
    def from_state_dict(cls, state_dict, deps, infer=False):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.conv1.weight"].size(0)
        net = cls(N, deps)

        if not infer:
            ori_state_dict = net.state_dict()
            cnt = 0
            for k in ori_state_dict:
                # if k == 'entropy_bottleneck._matrix0' or k == 'h_s.6.0.weight':
                #     print
                if k in state_dict and state_dict[k].shape == ori_state_dict[k].shape:
                    ori_state_dict[k] = state_dict[k]
                    print('loaded', k)
                else:
                    print('dropped', k)
                    cnt += 1
            print(f'dropped: {cnt}/{len(state_dict)}')
            state_dict = ori_state_dict
        net.load_state_dict(state_dict)
        return net


class Cheng2020_GMM_jinming_CM(CompressionModel):
    def __init__(self, N, deps, K=3, **kwargs):
        if deps is None:
            deps = [N for _ in range(7)] + [N*3//2, N*3//2, N*2, N*10//3, N*8//3, N*3*K]
        super().__init__(entropy_bottleneck_channels=deps[4], **kwargs)
        print(deps)
        self.deps = deps

        self.K = K

        self.g_a = nn.Sequential(
            ResidualBlockWithStride(3, N, stride=2),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            ResidualBlock(N, N),
            conv3x3(N, N, stride=2),
            AttentionBlock(N),
        )

        self.g_s = nn.Sequential(
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            subpel_conv3x3(N, 3, 2),
            # subpel_conv3x3(N, 3*3*K, 2),
        )

        self.h_a = nn.Sequential(
            conv3x3(N, deps[0]),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[0], deps[1]),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[1], deps[2], stride=2),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[2], deps[3]),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[3], deps[4], stride=2),
        )

        self.h_s = nn.Sequential(
            conv3x3(deps[4], deps[5]),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(deps[5], deps[6], 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[6], deps[7]),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(deps[7], deps[8], 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(deps[8], deps[9]),
        )

        self.entropy_parameters = nn.Sequential(
            nn.Conv2d(N * 2+deps[9], deps[10], 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(deps[10], deps[11], 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(deps[11], deps[12], 1),
        )

        self.context_prediction = MaskedConv2d(
            N, 2 * N, kernel_size=5, padding=2, stride=1
        )

        self.gaussian_conditional = GaussianMixtureConditional(K=K)
        self.N = int(N)

    def forward(self, x):
        # with torch.no_grad():
            # y = self.g_a(x)
        y = self.g_a(x)
        z = self.h_a(y)
        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        # test = torch.zeros_like(z_hat)
        # params = self.h_s(test)
        params = self.h_s(z_hat)

        y_hat = self.gaussian_conditional.quantize(
            y, "noise" if self.training else "dequantize"
        )
        ctx_params = self.context_prediction(y_hat)
        gaussian_params = self.entropy_parameters(
            torch.cat((params, ctx_params), dim=1)
        )
        scales_hat, means_hat, weight = gaussian_params.chunk(3, 1)
        weight = torch.reshape(weight,(weight.size(0), self.K, weight.size(1)//self.K, weight.size(2), weight.size(3)))
        # print(weight.size())
        weight = nn.functional.softmax(weight,dim=1)
        weight = torch.reshape(weight,(weight.size(0), weight.size(1)*weight.size(2), weight.size(3), weight.size(4)))
        # print(weight.size())
        _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat, weights=weight)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }

    def load_state_dict(self, state_dict):
        update_registered_buffers(
            self.gaussian_conditional,
            "gaussian_conditional",
            ["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
            state_dict,
        )
        super().load_state_dict(state_dict)

    @classmethod
    def from_state_dict(cls, state_dict, deps, infer=False):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.conv1.weight"].size(0)
        net = cls(N, deps)

        if not infer:
            ori_state_dict = net.state_dict()
            cnt = 0
            for k in ori_state_dict:
                # if k == 'entropy_bottleneck._matrix0' or k == 'h_s.6.0.weight':
                #     print
                if k in state_dict and state_dict[k].shape == ori_state_dict[k].shape:
                    ori_state_dict[k] = state_dict[k]
                    print('loaded', k)
                else:
                    print('dropped', k)
                    cnt += 1
            print(f'dropped: {cnt}/{len(state_dict)}')
            state_dict = ori_state_dict
        net.load_state_dict(state_dict)
        return net

    def compress(self, x):
        y = self.g_a(x)
        z = self.h_a(y)

        z_strings = self.entropy_bottleneck.compress(z)
        z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])

        params = self.h_s(z_hat)

        padding = self.context_prediction.padding[0]
        y_hat = F.pad(y, (padding, padding, padding, padding))

        y_strings = []
        for i in range(y.shape[0]):
            string = self._compress_ar(
                y_hat[i:i+1],
                params[i : i + 1],
                y.shape[2],
                y.shape[3])
            y_strings.append(string)
        self.y_hat = y_hat
        self.z_hat = z_hat

        # TODO: 修复y_strings
        return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}

    @torch.no_grad()
    def _get_minmax(self, y):
        minmax = torch.max(torch.abs(y.max()), torch.abs(y.min())).int().item()
        if minmax < 1:
            minmax = 1

        return minmax

    def _compress_ar(self, y_hat, params, height, width):
        kernel_size = self.context_prediction.kernel_size[0]
        padding = self.context_prediction.padding[0]

        scales = torch.zeros([y_hat.shape[0],y_hat.shape[1]*3,y_hat.shape[2],y_hat.shape[3]])
        means = torch.zeros([y_hat.shape[0],y_hat.shape[1]*3,y_hat.shape[2],y_hat.shape[3]])
        weights = torch.zeros([y_hat.shape[0],y_hat.shape[1]*3,y_hat.shape[2],y_hat.shape[3]])

        # Warning, this is slow...
        # TODO: profile the calls to the bindings...
        # minmax = self._get_minmax(y_hat)
        minmax = 256
        for h in range(height):
            for w in range(width):
                y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]
                ctx_p = self.context_prediction(y_crop)[:,:,padding:padding+1, padding:padding+1]

                # 1x1 conv for the entropy parameters prediction network, so
                # we only keep the elements in the "center"
                p = params[:, :, h : h + 1, w : w + 1]
                gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))
                scales_hat, means_hat, weight = gaussian_params.chunk(3, 1)
                weight = torch.reshape(weight,(weight.size(0), self.K, weight.size(1)//self.K, weight.size(2), weight.size(3)))
                weight = nn.functional.softmax(weight,dim=1)
                weight = torch.reshape(weight,(weight.size(0), weight.size(1)*weight.size(2), weight.size(3), weight.size(4)))
                scales[:,:,h+padding, w+padding] = scales_hat[:,:,0,0]
                means[:,:,h+padding, w+padding] = means_hat[:,:,0,0]
                weights[:,:,h+padding, w+padding] = weight[:,:,0,0]

                y_hat[:, :, h + padding, w + padding] = torch.round(y_crop[:,:,padding, padding])

        y_hat = F.pad(y_hat, (-padding, -padding, -padding, -padding))
        scales = F.pad(scales, (-padding, -padding, -padding, -padding))
        means = F.pad(means, (-padding, -padding, -padding, -padding))
        weights = F.pad(weights, (-padding, -padding, -padding, -padding))

        y_string = self.gaussian_conditional.compress(y_hat, scales, means, weights, minmax)

        return y_string

    def decompress(self, strings, shape):
        assert isinstance(strings, list) and len(strings) == 2
        z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
        s = 4
        params = self.h_s(z_hat)

        padding = self.context_prediction.padding[0]
        y_height = z_hat.shape[2]*s
        y_width = z_hat.shape[3]*s

        y_hat = torch.zeros(
            (z_hat.size(0), self.N, y_height + 2 * padding, y_width + 2 * padding),
            device=z_hat.device,
        )
        for i, y_string in enumerate(strings[0]):
            self._decompress_ar(
                y_string,
                y_hat[i : i + 1],
                params[i : i + 1],
                y_height,
                y_width)
        y_hat = F.pad(y_hat, (-padding, -padding, -padding, -padding))
        x_hat = self.g_s(y_hat).clamp_(0, 1)
        return {"x_hat": x_hat}
    
    def _decompress_ar( self, y_string, y_hat, params, height, width):
        minmax = 256
        kernel_size = self.context_prediction.kernel_size[0]
        padding = self.context_prediction.padding[0]

        # cdf = self.gaussian_conditional.quantized_cdf.tolist()
        # cdf_lengths = self.gaussian_conditional.cdf_length.tolist()
        # offsets = self.gaussian_conditional.offset.tolist()

        # decoder = RansDecoder()
        # decoder.set_stream(y_string)
        self.gaussian_conditional.set_stream(y_string)

        # Warning: this is slow due to the auto-regressive nature of the
        # decoding... See more recent publication where they use an
        # auto-regressive module on chunks of channels for faster decoding...
        for h in range(height):
            for w in range(width):
                # only perform the 5x5 convolution on a cropped tensor
                # centered in (h, w)
                y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]
                ctx_p = self.context_prediction(y_crop)[:,:,padding:padding+1,padding:padding+1]
                # 1x1 conv for the entropy parameters prediction network, so
                # we only keep the elements in the "center"
                p = params[:, :, h : h + 1, w : w + 1]
                gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))
                scales_hat, means_hat, weight = gaussian_params.chunk(3, 1)
                weight = torch.reshape(weight,(weight.size(0), self.K, weight.size(1)//self.K, weight.size(2), weight.size(3)))
                weight = nn.functional.softmax(weight,dim=1)
                weight = torch.reshape(weight,(weight.size(0), weight.size(1)*weight.size(2), weight.size(3), weight.size(4)))
                y_crop = self.gaussian_conditional.decompress(scales_hat, means_hat, weight, [y_crop.shape[0], y_crop.shape[1], 1, 1], minmax)

                hp = h + padding
                wp = w + padding
                y_hat[:, :, hp:hp+1, wp:wp+1] = y_crop
