# Copyright 2020 InterDigital Communications, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
import torch.nn as nn

from compressai.layers import (
    AttentionBlock,
    ResidualBlock,
    ResidualBlockUpsample,
    ResidualBlockWithStride,
    MaskedConv2d,
    conv3x3,
    subpel_conv3x3,
)

from .priors import CompressionModel, JointAutoregressiveHierarchicalPriors
from .waseda import Cheng2020Anchor, cheng2020_lossless
from compressai.entropy_models import GaussianConditional, GaussianMixtureConditional, EntropyBottleneck


class Cheng2020AnchorEffi(Cheng2020Anchor):
    """Anchor model variant from `"Learned Image Compression with
    Discretized Gaussian Mixture Likelihoods and Attention Modules"
    <https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
    Takeuchi, Jiro Katto.

    Uses residual blocks with small convolutions (3x3 and 1x1), and sub-pixel
    convolutions for up-sampling.

    Args:
        N (int): Number of channels
    """

    def __init__(self, N=192, **kwargs):
        N2 = N *3//4
        super().__init__(N=N, **kwargs)
        self.entropy_bottleneck = EntropyBottleneck(N2)

        self.h_a = nn.Sequential(
            conv3x3(N, N2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2, stride=2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2, stride=2),
        )

        self.h_s = nn.Sequential(
            conv3x3(N2, N2),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N2, N2, 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2 * 3 // 2),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N2 * 3 // 2, N2 * 3 // 2, 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2 * 3 // 2, N * 2),
        )

    @classmethod
    def from_state_dict(cls, state_dict, infer=False):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.conv1.weight"].size(0)
        net = cls(N)
        if not infer:
            ori_state_dict = net.state_dict()
            cnt = 0
            for k in ori_state_dict:
                if k in state_dict and state_dict[k].shape == ori_state_dict[k].shape:
                    ori_state_dict[k] = state_dict[k]
                    print('loaded', k)
                else:
                    print('dropped', k)
                    cnt += 1
            print(f'dropped: {cnt}/{len(state_dict)}')
            state_dict = ori_state_dict
        net.load_state_dict(state_dict)
        return net


class Cheng2020Attention(Cheng2020Anchor):
    """Self-attention model variant from `"Learned Image Compression with
    Discretized Gaussian Mixture Likelihoods and Attention Modules"
    <https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
    Takeuchi, Jiro Katto.

    Uses self-attention, residual blocks with small convolutions (3x3 and 1x1),
    and sub-pixel convolutions for up-sampling.

    Args:
        N (int): Number of channels
    """

    def __init__(self, N=192, **kwargs):
        super().__init__(N=N, **kwargs)

        self.g_a = nn.Sequential(
            ResidualBlockWithStride(3, N, stride=2),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            ResidualBlock(N, N),
            conv3x3(N, N, stride=2),
            AttentionBlock(N),
        )

        self.g_s = nn.Sequential(
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            subpel_conv3x3(N, 3, 2),
        )


class Cheng2020_GMM_jinming_Effi(CompressionModel):
    def __init__(self, N, K=3, **kwargs):
        N2 = N // 4
        super().__init__(entropy_bottleneck_channels=N2, **kwargs)

        self.K = K

        self.g_a = nn.Sequential(
            ResidualBlockWithStride(3, N, stride=2),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockWithStride(N, N, stride=2),
            ResidualBlock(N, N),
            conv3x3(N, N, stride=2),
            AttentionBlock(N),
        )

        self.g_s = nn.Sequential(
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            AttentionBlock(N),
            ResidualBlock(N, N),
            ResidualBlockUpsample(N, N, 2),
            ResidualBlock(N, N),
            subpel_conv3x3(N, 3, 2),
            # subpel_conv3x3(N, 3*3*K, 2),
        )

        self.h_a = nn.Sequential(
            conv3x3(N, N2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2, stride=2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2, stride=2),
        )

        self.h_s = nn.Sequential(
            conv3x3(N2, N2),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N2, N2, 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2, N2 * 3 // 2),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N2 * 3 // 2, N2 * 3 // 2, 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N2 * 3 // 2, N * 2),
        )

        self.entropy_parameters = nn.Sequential(
            nn.Conv2d(N * 12 // 3, N * 10 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(N * 10 // 3, N * 8 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(N * 8 // 3, N * 3 * K, 1),
        )

        self.context_prediction = MaskedConv2d(
            N, 2 * N, kernel_size=5, padding=2, stride=1
        )

        self.gaussian_conditional = GaussianMixtureConditional(K=K)
        self.N = int(N)


    def forward(self, x):
        y = self.g_a(x)
        z = self.h_a(y)
        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        params = self.h_s(z_hat)

        y_hat = self.gaussian_conditional.quantize(
            y, "noise" if self.training else "dequantize"
        )
        ctx_params = self.context_prediction(y_hat)
        gaussian_params = self.entropy_parameters(
            torch.cat((params, ctx_params), dim=1)
        )
        scales_hat, means_hat, weight = gaussian_params.chunk(3, 1)
        weight = torch.reshape(weight,(weight.size(0), self.K, weight.size(1)//self.K, weight.size(2), weight.size(3)))
        # print(weight.size())
        weight = nn.functional.softmax(weight,dim=1)
        weight = torch.reshape(weight,(weight.size(0), weight.size(1)*weight.size(2), weight.size(3), weight.size(4)))
        # print(weight.size())
        _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat, weights=weight)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }

    @classmethod
    def from_state_dict(cls, state_dict, infer=False):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.conv1.weight"].size(0)
        net = cls(N)

        if not infer:
            ori_state_dict = net.state_dict()
            cnt = 0
            for k in ori_state_dict:
                if k in state_dict and state_dict[k].shape == ori_state_dict[k].shape:
                    ori_state_dict[k] = state_dict[k]
                    print('loaded', k)
                else:
                    print('dropped', k)
                    cnt += 1
            print(f'dropped: {cnt}/{len(state_dict)}')
            state_dict = ori_state_dict
        net.load_state_dict(state_dict)
        return net