# Copyright 2020 InterDigital Communications, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
import torch.nn as nn

from compressai.layers import MaskedConv2d
from .priors_gd import GateDecorator

from .priors import CompressionModel
from compressai.entropy_models import GaussianConditional, GaussianMixtureConditional
from compressai.layers.gdn import GDN
from .utils import update_registered_buffers


def conv3x3(in_ch: int, out_ch: int, stride: int = 1) -> nn.Module:
    """3x3 convolution with padding."""
    return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)


def subpel_conv3x3(in_ch: int, out_ch: int, r: int = 1) -> nn.Sequential:
    """3x3 sub-pixel convolution for up-sampling."""
    return nn.Sequential(
        nn.Conv2d(in_ch, out_ch * r ** 2, kernel_size=3, padding=1), nn.PixelShuffle(r)
    )


def conv1x1(in_ch: int, out_ch: int, stride: int = 1) -> nn.Module:
    """1x1 convolution."""
    return nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=stride)


class ResidualBlockWithStrideGD(nn.Module):
    """Residual block with a stride on the first convolution.

    Args:
        in_ch (int): number of input channels
        out_ch (int): number of output channels
        stride (int): stride value (default: 2)
    """

    def __init__(self, in_ch: int, out_ch: int, stride: int = 2):
        super().__init__()
        self.conv1 = conv3x3(in_ch, out_ch, stride=stride)
        self.gd1 = GateDecorator(out_ch)
        self.leaky_relu = nn.LeakyReLU(inplace=True)
        self.conv2 = conv3x3(out_ch, out_ch)
        self.gd2 = GateDecorator(out_ch)
        self.gdn = GDN(out_ch)
        self.skip = conv1x1(in_ch, out_ch, stride=stride)
        self.gds = [[self.gd1], [self.gd2]]
        self.in_ch = in_ch
        self.out_ch = out_ch

    def forward(self, x):
        self.sz = x.shape[2:]
        self.in_ch = torch.sum(x[0, :, 0, 0] != 0.)
        identity = x
        out = self.conv1(x)
        out = self.gd1(out)
        out = self.leaky_relu(out)
        out = self.conv2(out)
        out = self.gd2(out)
        out = self.gdn(out)

        identity = self.skip(x)
        identity = self.gd2(identity)

        out += identity
        return out

    def flops(self):
        flops = 3*3*self.in_ch*torch.sum(self.gd1.mask>0)*self.sz[0]*self.sz[1]
        flops += 3*3*torch.sum(self.gd1.mask>0)*torch.sum(self.gd2.mask>0)*self.sz[0]*self.sz[1]
        flops += 1*1*self.in_ch*torch.sum(self.gd2.mask>0)*self.sz[0]*self.sz[1]
        return flops

class ResidualBlockGD(nn.Module):
    """Simple residual block with two 3x3 convolutions.

    Args:
        in_ch (int): number of input channels
        out_ch (int): number of output channels
    """

    def __init__(self, in_ch: int, out_ch: int):
        super().__init__()
        self.conv1 = conv3x3(in_ch, out_ch)
        self.gd1 = GateDecorator(out_ch)
        self.leaky_relu = nn.LeakyReLU(inplace=True)
        self.conv2 = conv3x3(out_ch, out_ch)
        self.gd2 = GateDecorator(out_ch)
        self.skip = conv1x1(in_ch, out_ch)
        self.gds = [[self.gd1], [self.gd2]]
        self.in_ch = in_ch
        self.out_ch = out_ch

    def forward(self, x):
        self.sz = x.shape[2:]
        self.in_ch = torch.sum(x[0, :, 0, 0] != 0.)
        identity = x
        out = self.conv1(x)
        out = self.gd1(out)
        out = self.leaky_relu(out)
        out = self.conv2(out)
        out = self.gd2(out)
        out = self.leaky_relu(out)

        identity = self.skip(x)
        identity = self.gd2(identity)

        out = out + identity
        return out

    def flops(self):
        flops = 3*3*self.in_ch*torch.sum(self.gd1.mask>0)*self.sz[0]*self.sz[1]
        flops += 3*3*torch.sum(self.gd1.mask>0)*torch.sum(self.gd2.mask>0)*self.sz[0]*self.sz[1]
        flops += 1*1*self.in_ch*torch.sum(self.gd2.mask>0)*self.sz[0]*self.sz[1]
        return flops


class AttentionBlockGD(nn.Module):
    """Self attention block.

    Simplified variant from `"Learned Image Compression with
    Discretized Gaussian Mixture Likelihoods and Attention Modules"
    <https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
    Takeuchi, Jiro Katto.

    Args:
        N (int): Number of channels)
    """

    def __init__(self, N: int):
        super().__init__()

        class ResidualUnit(nn.Module):
            """Simple residual unit."""

            def __init__(self):
                super().__init__()
                self.conv = nn.Sequential(
                    conv1x1(N, N // 2),
                    GateDecorator(N // 2),
                    nn.ReLU(inplace=True),
                    conv3x3(N // 2, N // 2),
                    GateDecorator(N // 2),
                    nn.ReLU(inplace=True),
                    conv1x1(N // 2, N),
                    GateDecorator(N),
                )
                self.relu = nn.ReLU(inplace=True)

            def forward(self, x):
                self.sz = x.shape[2:]
                self.in_ch = torch.sum(x[0, :, 0, 0] != 0.)
                identity = x
                out = self.conv(x)
                out += identity
                out = self.relu(out)
                return out

            def flops(self):
                flops = 1*1*self.in_ch*torch.sum(self.conv[1].mask>0.)*self.sz[0]*self.sz[1]
                flops += 3*3*torch.sum(self.conv[1].mask>0.)*torch.sum(self.conv[4].mask>0.)*self.sz[0]*self.sz[1]
                flops += 1*1*torch.sum(self.conv[4].mask>0.)*torch.sum(self.conv[7].mask>0.)*self.sz[0]*self.sz[1]
                return flops

        a_units = []
        a_units.append(ResidualUnit())
        a_units.append(ResidualUnit())
        a_units.append(ResidualUnit())

        self.conv_a = nn.Sequential(*a_units)

        b_units = []
        b_units.append(ResidualUnit())
        b_units.append(ResidualUnit())
        b_units.append(ResidualUnit())
        b_units.append(conv1x1(N, N))
        b_units.append(GateDecorator(N))
        self.conv_b = nn.Sequential(*b_units)
        self.gds = [
            [a_units[0].conv[-1], a_units[1].conv[-1], a_units[2].conv[-1], b_units[0].conv[-1], b_units[1].conv[-1], b_units[2].conv[-1], b_units[4]],
            [a_units[0].conv[1]],
            [a_units[0].conv[4]],
            [a_units[1].conv[1]],
            [a_units[1].conv[4]],
            [a_units[2].conv[1]],
            [a_units[2].conv[4]],
            [b_units[0].conv[1]],
            [b_units[0].conv[4]],
            [b_units[1].conv[1]],
            [b_units[1].conv[4]],
            [b_units[2].conv[1]],
            [b_units[2].conv[4]],
            ]

    def forward(self, x):
        self.sz = x.shape[2:]
        self.in_ch = torch.sum(x[0, :, 0, 0]!=0.)
        identity = x
        a = self.conv_a(x)
        b = self.conv_b(x)
        out = a * torch.sigmoid(b)
        out += identity
        return out

    def flops(self):
        flops = self.conv_a[0].flops()
        flops += self.conv_a[1].flops()
        flops += self.conv_a[2].flops()
        flops += self.conv_b[0].flops()
        flops += self.conv_b[1].flops()
        flops += self.conv_b[2].flops()
        flops += 1*1*self.in_ch*torch.sum(self.conv_b[4].mask>0.)*self.sz[0]*self.sz[1]
        return flops


class ResidualBlockUpsampleGD(nn.Module):
    """Residual block with sub-pixel upsampling on the last convolution.

    Args:
        in_ch (int): number of input channels
        out_ch (int): number of output channels
        upsample (int): upsampling factor (default: 2)
    """

    def __init__(self, in_ch: int, out_ch: int, upsample: int = 2):
        super().__init__()
        self.subpel_conv = subpel_conv3x3(in_ch, out_ch, upsample)
        self.subpel_gd = GateDecorator(out_ch)
        self.leaky_relu = nn.LeakyReLU(inplace=True)
        self.conv = conv3x3(out_ch, out_ch)
        self.gd = GateDecorator(out_ch)
        self.igdn = GDN(out_ch, inverse=True)
        self.upsample = subpel_conv3x3(in_ch, out_ch, upsample)
        self.gds = [[self.subpel_gd], [self.gd]]
        self.up_ch = upsample
        self.in_ch = in_ch

    def forward(self, x):
        self.sz = x.shape[2:]
        self.in_ch = torch.sum(x[0, :, 0, 0]!=0.)
        identity = x
        out = self.subpel_conv(x)
        out = self.subpel_gd(out)
        out = self.leaky_relu(out)
        out = self.conv(out)
        out = self.gd(out)
        out = self.igdn(out)
        identity = self.upsample(x)
        identity = self.gd(identity)
        out += identity
        return out

    def flops(self):
        flops = 3*3*self.in_ch*self.up_ch*self.up_ch*torch.sum(self.subpel_gd.mask>0.)*self.sz[0]*self.sz[1]
        flops += 3*3*torch.sum(self.subpel_gd.mask>0.)*torch.sum(self.gd.mask>0.)*self.sz[0]*self.up_ch*self.sz[1]*self.up_ch
        flops += 3*3*self.in_ch*self.up_ch*self.up_ch*torch.sum(self.gd.mask>0.)*self.sz[0]*self.sz[1]
        return flops


class Cheng2020_GMM_GD(CompressionModel):
    def __init__(self, N, K=3, sparse_lambda=0.001, **kwargs):
        super().__init__(N, **kwargs)
        self.K = K
        self.N = N
        self.sparse_lambda = sparse_lambda
        self.gds = []
        self.y_sz = None

        g_a_list = []
        g_a_list.append(ResidualBlockWithStrideGD(3, N, stride=2))
        self.gds += g_a_list[-1].gds
        g_a_list.append(ResidualBlockGD(N, N))
        self.gds += g_a_list[-1].gds
        g_a_list.append(ResidualBlockWithStrideGD(N, N, stride=2))
        self.gds += g_a_list[-1].gds
        g_a_list.append(AttentionBlockGD(N))
        self.gds[-1] += g_a_list[-1].gds[0]
        self.gds += g_a_list[-1].gds[1:]
        g_a_list.append(ResidualBlockGD(N, N))
        self.gds += g_a_list[-1].gds
        g_a_list.append(ResidualBlockWithStrideGD(N, N, stride=2))
        self.gds += g_a_list[-1].gds
        g_a_list.append(ResidualBlockGD(N, N))
        self.gds += g_a_list[-1].gds
        g_a_list.append(conv3x3(N, N, stride=2))
        g_a_list.append(GateDecorator(N))
        g_a_list.append(AttentionBlockGD(N))
        self.gds += g_a_list[-1].gds[1:]
        self.gds.append(g_a_list[-1].gds[0] + [g_a_list[-2]])
        self.g_a = nn.Sequential(*g_a_list)

        latentGDs = [GateDecorator(N)]*3
        self.gds[-1] += latentGDs
        self.latentGDs = nn.Sequential(*latentGDs)

        g_s_list = []
        g_s_list.append(AttentionBlockGD(N))
        self.gds[-1] += g_s_list[-1].gds[0]
        self.gds += g_s_list[-1].gds[1:]
        g_s_list.append(ResidualBlockGD(N, N))
        self.gds += g_s_list[-1].gds
        g_s_list.append(ResidualBlockUpsampleGD(N, N, 2))
        self.gds += g_s_list[-1].gds
        g_s_list.append(ResidualBlockGD(N, N))
        self.gds += g_s_list[-1].gds
        g_s_list.append(ResidualBlockUpsampleGD(N, N, 2))
        self.gds += g_s_list[-1].gds
        g_s_list.append(AttentionBlockGD(N))
        self.gds[-1] += g_s_list[-1].gds[0]
        self.gds += g_s_list[-1].gds[1:]
        g_s_list.append(ResidualBlockGD(N, N))
        self.gds += g_s_list[-1].gds
        g_s_list.append(ResidualBlockUpsampleGD(N, N, 2))
        self.gds += g_s_list[-1].gds
        g_s_list.append(ResidualBlockGD(N, N))
        self.gds += g_s_list[-1].gds
        g_s_list.append(subpel_conv3x3(N, 3, 2))
        self.g_s = nn.Sequential(*g_s_list)

        self.h_a = nn.Sequential(
            conv3x3(N, N),
            GateDecorator(N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N),
            GateDecorator(N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N, stride=2),
            GateDecorator(N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N),
            GateDecorator(N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N, stride=2),
            GateDecorator(N),
        )
        self.gds += [
            [self.h_a[1]],
            [self.h_a[4]],
            [self.h_a[7]],
            [self.h_a[10]],
            [self.h_a[13]],
        ]

        self.h_s = nn.Sequential(
            conv3x3(N, N),
            GateDecorator(N),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N, N, 2),
            GateDecorator(N),
            nn.LeakyReLU(inplace=True),
            conv3x3(N, N * 3 // 2),
            GateDecorator(N * 3 // 2),
            nn.LeakyReLU(inplace=True),
            subpel_conv3x3(N * 3 // 2, N * 3 // 2, 2),
            GateDecorator(N * 3 // 2),
            nn.LeakyReLU(inplace=True),
            conv3x3(N * 3 // 2, N * 2),
            GateDecorator(N * 2),
        )
        self.gds += [
            [self.h_s[1]],
            [self.h_s[4]],
            [self.h_s[7]],
            [self.h_s[10]],
            [self.h_s[13]],
        ]

        self.entropy_parameters = nn.Sequential(
            nn.Conv2d(N * 12 // 3, N * 10 // 3, 1),
            GateDecorator(N * 10 // 3),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(N * 10 // 3, N * 8 // 3, 1),
            GateDecorator(N * 8 // 3),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(N * 8 // 3, N * 3 * K, 1),
        )
        self.gds += [
            [self.entropy_parameters[1]],
            [self.entropy_parameters[4]],
        ]
    
        self.context_prediction = MaskedConv2d(
            N, 2 * N, kernel_size=5, padding=2, stride=1
        )
        self.context_gd = GateDecorator(2 * N)
        self.gds += [[self.context_gd]]

        self.gaussian_conditional = GaussianMixtureConditional(K=K)
        # self.mse = nn.MSELoss()

    def forward(self, x):
        y = self.g_a(x)
        self.y_sz = y.shape[2:]
        z = self.h_a(y)
        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        params = self.h_s(z_hat)

        y_hat = self.gaussian_conditional.quantize(
            y, "noise" if self.training else "dequantize"
        )
        ctx_params = self.context_prediction(y_hat)

        ctx_params = self.context_gd(ctx_params)

        gaussian_params = self.entropy_parameters(
            torch.cat((params, ctx_params), dim=1)
        )
        scales_hat, means_hat, weight = gaussian_params.chunk(3, 1)

        # 对mean scale weight剪枝
        for k in range(self.K):
            scales_hat[:, (k*self.N):((k+1)*self.N)] = self.latentGDs[0](scales_hat[:, (k*self.N):((k+1)*self.N)])
            means_hat[:, (k*self.N):((k+1)*self.N)] = self.latentGDs[1](means_hat[:, (k*self.N):((k+1)*self.N)])
            weight[:, (k*self.N):((k+1)*self.N)] = self.latentGDs[2](weight[:, (k*self.N):((k+1)*self.N)])

        weight = torch.reshape(weight,(weight.size(0), self.K, weight.size(1)//self.K, weight.size(2), weight.size(3)))
        # print(weight.size())
        weight = nn.functional.softmax(weight,dim=1)
        weight = torch.reshape(weight,(weight.size(0), weight.size(1)*weight.size(2), weight.size(3), weight.size(4)))
        # print(weight.size())
        _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat, weights=weight)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }

    def load_state_dict(self, state_dict, ckpt=False):
        update_registered_buffers(
            self.gaussian_conditional,
            "gaussian_conditional",
            ["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
            state_dict,
        )
        if ckpt:
            super().load_state_dict(state_dict)
            return
        ori_state_dict = self.state_dict()
        for k in ori_state_dict:
            if k in state_dict:
                ori_state_dict[k] = state_dict[k]
        for k, v in self.KEY_TABLE.items():
            ori_state_dict[v] = state_dict[k]
        super().load_state_dict(ori_state_dict)

    @classmethod
    def from_state_dict(cls, state_dict, infer=False, K=3, sparse_lambda=0.001):
        """Return a new model instance from `state_dict`."""
        N = state_dict["g_a.0.conv1.weight"].size(0)
        net = cls(N, K, sparse_lambda)
        net.load_state_dict(state_dict)
        return net
    
    def flops(self):
        if self.y_sz is None:
            print('please call flops() after at least one forward!')
            raise Exception()
        flops = self.g_a[0].flops()
        for i in range(1, 7):
            flops += self.g_a[i].flops()
        flops += 3*3*torch.sum(self.g_a[6].gds[-1][-1].mask>0.)*torch.sum(self.g_a[8].mask>0.)*self.g_a[9].sz[0]*self.g_a[9].sz[1]
        flops += self.g_a[9].flops()

        for i in range(9):
            flops += self.g_s[i].flops()
        flops += 3*3*torch.sum(self.g_s[8].gds[-1][-1].mask>0.)*self.g_s[9][0].out_channels*self.g_s[-2].sz[0]*self.g_s[-2].sz[1]
        
        flops += 3*3*torch.sum(self.latentGDs[0].mask>0.)*torch.sum(self.h_a[1].mask>0.)*self.y_sz[0]*self.y_sz[1]
        flops += 3*3*torch.sum(self.h_a[1].mask>0.)*torch.sum(self.h_a[4].mask>0.)*self.y_sz[0]*self.y_sz[1]
        flops += 3*3*torch.sum(self.h_a[4].mask>0.)*torch.sum(self.h_a[7].mask>0.)*self.y_sz[0]//2*self.y_sz[1]//2
        flops += 3*3*torch.sum(self.h_a[7].mask>0.)*torch.sum(self.h_a[10].mask>0.)*self.y_sz[0]//4*self.y_sz[1]//4

        flops += 3*3*torch.sum(self.h_a[10].mask>0.)*torch.sum(self.h_s[1].mask>0.)*self.y_sz[0]//4*self.y_sz[1]//4
        flops += 3*3*torch.sum(self.h_s[1].mask>0.)*torch.sum(self.h_s[4].mask>0.)*self.y_sz[0]//2*self.y_sz[1]//2
        flops += 3*3*torch.sum(self.h_s[4].mask>0.)*torch.sum(self.h_s[7].mask>0.)*self.y_sz[0]//2*self.y_sz[1]//2
        flops += 3*3*torch.sum(self.h_s[7].mask>0.)*torch.sum(self.h_s[10].mask>0.)*self.y_sz[0]*self.y_sz[1]
        flops += 3*3*torch.sum(self.h_s[10].mask>0.)*torch.sum(self.h_s[13].mask>0.)*self.y_sz[0]*self.y_sz[1]
        
        flops += 1*1*(torch.sum(self.context_gd.mask>0.)+torch.sum(self.h_s[-1].mask>0.))*torch.sum(self.entropy_parameters[1].mask>0.)*self.y_sz[0]*self.y_sz[1]
        flops += 1*1*(torch.sum(self.entropy_parameters[1].mask>0.))*torch.sum(self.entropy_parameters[4].mask>0.)*self.y_sz[0]*self.y_sz[1]
        flops += 1*1*(torch.sum(self.entropy_parameters[4].mask>0.))* torch.sum(self.latentGDs[0].mask>0.)*3 *self.y_sz[0]*self.y_sz[1]

        flops += 13*torch.sum(self.latentGDs[0].mask>0.)*torch.sum(self.context_gd.mask>0.)*self.y_sz[0]*self.y_sz[1]
        return flops

    KEY_TABLE = {
        'g_a.3.conv_a.0.conv.2.weight': 'g_a.3.conv_a.0.conv.3.weight',
        'g_a.3.conv_a.0.conv.2.bias': 'g_a.3.conv_a.0.conv.3.bias',
        'g_a.3.conv_a.0.conv.4.weight': 'g_a.3.conv_a.0.conv.6.weight',
        'g_a.3.conv_a.0.conv.4.bias': 'g_a.3.conv_a.0.conv.6.bias',
        'g_a.3.conv_a.1.conv.2.weight': 'g_a.3.conv_a.1.conv.3.weight',
        'g_a.3.conv_a.1.conv.2.bias': 'g_a.3.conv_a.1.conv.3.bias',
        'g_a.3.conv_a.1.conv.4.weight': 'g_a.3.conv_a.1.conv.6.weight',
        'g_a.3.conv_a.1.conv.4.bias': 'g_a.3.conv_a.1.conv.6.bias',
        'g_a.3.conv_a.2.conv.2.weight': 'g_a.3.conv_a.2.conv.3.weight',
        'g_a.3.conv_a.2.conv.2.bias': 'g_a.3.conv_a.2.conv.3.bias',
        'g_a.3.conv_a.2.conv.4.weight': 'g_a.3.conv_a.2.conv.6.weight',
        'g_a.3.conv_a.2.conv.4.bias': 'g_a.3.conv_a.2.conv.6.bias',
        'g_a.3.conv_b.0.conv.2.weight': 'g_a.3.conv_b.0.conv.3.weight',
        'g_a.3.conv_b.0.conv.2.bias': 'g_a.3.conv_b.0.conv.3.bias',
        'g_a.3.conv_b.0.conv.4.weight': 'g_a.3.conv_b.0.conv.6.weight',
        'g_a.3.conv_b.0.conv.4.bias': 'g_a.3.conv_b.0.conv.6.bias',
        'g_a.3.conv_b.1.conv.2.weight': 'g_a.3.conv_b.1.conv.3.weight',
        'g_a.3.conv_b.1.conv.2.bias': 'g_a.3.conv_b.1.conv.3.bias',
        'g_a.3.conv_b.1.conv.4.weight': 'g_a.3.conv_b.1.conv.6.weight',
        'g_a.3.conv_b.1.conv.4.bias': 'g_a.3.conv_b.1.conv.6.bias',
        'g_a.3.conv_b.2.conv.2.weight': 'g_a.3.conv_b.2.conv.3.weight',
        'g_a.3.conv_b.2.conv.2.bias': 'g_a.3.conv_b.2.conv.3.bias',
        'g_a.3.conv_b.2.conv.4.weight': 'g_a.3.conv_b.2.conv.6.weight',
        'g_a.3.conv_b.2.conv.4.bias': 'g_a.3.conv_b.2.conv.6.bias',
        'g_a.8.conv_a.0.conv.0.weight': 'g_a.9.conv_a.0.conv.0.weight',
        'g_a.8.conv_a.0.conv.0.bias': 'g_a.9.conv_a.0.conv.0.bias',
        'g_a.8.conv_a.0.conv.2.weight': 'g_a.9.conv_a.0.conv.3.weight',
        'g_a.8.conv_a.0.conv.2.bias': 'g_a.9.conv_a.0.conv.3.bias',
        'g_a.8.conv_a.0.conv.4.weight': 'g_a.9.conv_a.0.conv.6.weight',
        'g_a.8.conv_a.0.conv.4.bias': 'g_a.9.conv_a.0.conv.6.bias',
        'g_a.8.conv_a.1.conv.0.weight': 'g_a.9.conv_a.1.conv.0.weight',
        'g_a.8.conv_a.1.conv.0.bias': 'g_a.9.conv_a.1.conv.0.bias',
        'g_a.8.conv_a.1.conv.2.weight': 'g_a.9.conv_a.1.conv.3.weight',
        'g_a.8.conv_a.1.conv.2.bias': 'g_a.9.conv_a.1.conv.3.bias',
        'g_a.8.conv_a.1.conv.4.weight': 'g_a.9.conv_a.1.conv.6.weight',
        'g_a.8.conv_a.1.conv.4.bias': 'g_a.9.conv_a.1.conv.6.bias',
        'g_a.8.conv_a.2.conv.0.weight': 'g_a.9.conv_a.2.conv.0.weight',
        'g_a.8.conv_a.2.conv.0.bias': 'g_a.9.conv_a.2.conv.0.bias',
        'g_a.8.conv_a.2.conv.2.weight': 'g_a.9.conv_a.2.conv.3.weight',
        'g_a.8.conv_a.2.conv.2.bias': 'g_a.9.conv_a.2.conv.3.bias',
        'g_a.8.conv_a.2.conv.4.weight': 'g_a.9.conv_a.2.conv.6.weight',
        'g_a.8.conv_a.2.conv.4.bias': 'g_a.9.conv_a.2.conv.6.bias',
        'g_a.8.conv_b.0.conv.0.weight': 'g_a.9.conv_b.0.conv.0.weight',
        'g_a.8.conv_b.0.conv.0.bias': 'g_a.9.conv_b.0.conv.0.bias',
        'g_a.8.conv_b.0.conv.2.weight': 'g_a.9.conv_b.0.conv.3.weight',
        'g_a.8.conv_b.0.conv.2.bias': 'g_a.9.conv_b.0.conv.3.bias',
        'g_a.8.conv_b.0.conv.4.weight': 'g_a.9.conv_b.0.conv.6.weight',
        'g_a.8.conv_b.0.conv.4.bias': 'g_a.9.conv_b.0.conv.6.bias',
        'g_a.8.conv_b.1.conv.0.weight': 'g_a.9.conv_b.1.conv.0.weight',
        'g_a.8.conv_b.1.conv.0.bias': 'g_a.9.conv_b.1.conv.0.bias',
        'g_a.8.conv_b.1.conv.2.weight': 'g_a.9.conv_b.1.conv.3.weight',
        'g_a.8.conv_b.1.conv.2.bias': 'g_a.9.conv_b.1.conv.3.bias',
        'g_a.8.conv_b.1.conv.4.weight': 'g_a.9.conv_b.1.conv.6.weight',
        'g_a.8.conv_b.1.conv.4.bias': 'g_a.9.conv_b.1.conv.6.bias',
        'g_a.8.conv_b.2.conv.0.weight': 'g_a.9.conv_b.2.conv.0.weight',
        'g_a.8.conv_b.2.conv.0.bias': 'g_a.9.conv_b.2.conv.0.bias',
        'g_a.8.conv_b.2.conv.2.weight': 'g_a.9.conv_b.2.conv.3.weight',
        'g_a.8.conv_b.2.conv.2.bias': 'g_a.9.conv_b.2.conv.3.bias',
        'g_a.8.conv_b.2.conv.4.weight': 'g_a.9.conv_b.2.conv.6.weight',
        'g_a.8.conv_b.2.conv.4.bias': 'g_a.9.conv_b.2.conv.6.bias',
        'g_a.8.conv_b.3.weight': 'g_a.9.conv_b.3.weight',
        'g_a.8.conv_b.3.bias': 'g_a.9.conv_b.3.bias',
        'g_s.0.conv_a.0.conv.2.weight': 'g_s.0.conv_a.0.conv.3.weight',
        'g_s.0.conv_a.0.conv.2.bias': 'g_s.0.conv_a.0.conv.3.bias',
        'g_s.0.conv_a.0.conv.4.weight': 'g_s.0.conv_a.0.conv.6.weight',
        'g_s.0.conv_a.0.conv.4.bias': 'g_s.0.conv_a.0.conv.6.bias',
        'g_s.0.conv_a.1.conv.2.weight': 'g_s.0.conv_a.1.conv.3.weight',
        'g_s.0.conv_a.1.conv.2.bias': 'g_s.0.conv_a.1.conv.3.bias',
        'g_s.0.conv_a.1.conv.4.weight': 'g_s.0.conv_a.1.conv.6.weight',
        'g_s.0.conv_a.1.conv.4.bias': 'g_s.0.conv_a.1.conv.6.bias',
        'g_s.0.conv_a.2.conv.2.weight': 'g_s.0.conv_a.2.conv.3.weight',
        'g_s.0.conv_a.2.conv.2.bias': 'g_s.0.conv_a.2.conv.3.bias',
        'g_s.0.conv_a.2.conv.4.weight': 'g_s.0.conv_a.2.conv.6.weight',
        'g_s.0.conv_a.2.conv.4.bias': 'g_s.0.conv_a.2.conv.6.bias',
        'g_s.0.conv_b.0.conv.2.weight': 'g_s.0.conv_b.0.conv.3.weight',
        'g_s.0.conv_b.0.conv.2.bias': 'g_s.0.conv_b.0.conv.3.bias',
        'g_s.0.conv_b.0.conv.4.weight': 'g_s.0.conv_b.0.conv.6.weight',
        'g_s.0.conv_b.0.conv.4.bias': 'g_s.0.conv_b.0.conv.6.bias',
        'g_s.0.conv_b.1.conv.2.weight': 'g_s.0.conv_b.1.conv.3.weight',
        'g_s.0.conv_b.1.conv.2.bias': 'g_s.0.conv_b.1.conv.3.bias',
        'g_s.0.conv_b.1.conv.4.weight': 'g_s.0.conv_b.1.conv.6.weight',
        'g_s.0.conv_b.1.conv.4.bias': 'g_s.0.conv_b.1.conv.6.bias',
        'g_s.0.conv_b.2.conv.2.weight': 'g_s.0.conv_b.2.conv.3.weight',
        'g_s.0.conv_b.2.conv.2.bias': 'g_s.0.conv_b.2.conv.3.bias',
        'g_s.0.conv_b.2.conv.4.weight': 'g_s.0.conv_b.2.conv.6.weight',
        'g_s.0.conv_b.2.conv.4.bias': 'g_s.0.conv_b.2.conv.6.bias',
        'g_s.5.conv_a.0.conv.2.weight': 'g_s.5.conv_a.0.conv.3.weight',
        'g_s.5.conv_a.0.conv.2.bias': 'g_s.5.conv_a.0.conv.3.bias',
        'g_s.5.conv_a.0.conv.4.weight': 'g_s.5.conv_a.0.conv.6.weight',
        'g_s.5.conv_a.0.conv.4.bias': 'g_s.5.conv_a.0.conv.6.bias',
        'g_s.5.conv_a.1.conv.2.weight': 'g_s.5.conv_a.1.conv.3.weight',
        'g_s.5.conv_a.1.conv.2.bias': 'g_s.5.conv_a.1.conv.3.bias',
        'g_s.5.conv_a.1.conv.4.weight': 'g_s.5.conv_a.1.conv.6.weight',
        'g_s.5.conv_a.1.conv.4.bias': 'g_s.5.conv_a.1.conv.6.bias',
        'g_s.5.conv_a.2.conv.2.weight': 'g_s.5.conv_a.2.conv.3.weight',
        'g_s.5.conv_a.2.conv.2.bias': 'g_s.5.conv_a.2.conv.3.bias',
        'g_s.5.conv_a.2.conv.4.weight': 'g_s.5.conv_a.2.conv.6.weight',
        'g_s.5.conv_a.2.conv.4.bias': 'g_s.5.conv_a.2.conv.6.bias',
        'g_s.5.conv_b.0.conv.2.weight': 'g_s.5.conv_b.0.conv.3.weight',
        'g_s.5.conv_b.0.conv.2.bias': 'g_s.5.conv_b.0.conv.3.bias',
        'g_s.5.conv_b.0.conv.4.weight': 'g_s.5.conv_b.0.conv.6.weight',
        'g_s.5.conv_b.0.conv.4.bias': 'g_s.5.conv_b.0.conv.6.bias',
        'g_s.5.conv_b.1.conv.2.weight': 'g_s.5.conv_b.1.conv.3.weight',
        'g_s.5.conv_b.1.conv.2.bias': 'g_s.5.conv_b.1.conv.3.bias',
        'g_s.5.conv_b.1.conv.4.weight': 'g_s.5.conv_b.1.conv.6.weight',
        'g_s.5.conv_b.1.conv.4.bias': 'g_s.5.conv_b.1.conv.6.bias',
        'g_s.5.conv_b.2.conv.2.weight': 'g_s.5.conv_b.2.conv.3.weight',
        'g_s.5.conv_b.2.conv.2.bias': 'g_s.5.conv_b.2.conv.3.bias',
        'g_s.5.conv_b.2.conv.4.weight': 'g_s.5.conv_b.2.conv.6.weight',
        'g_s.5.conv_b.2.conv.4.bias': 'g_s.5.conv_b.2.conv.6.bias',
        'h_a.2.weight': 'h_a.3.weight',
        'h_a.2.bias': 'h_a.3.bias',
        'h_a.4.weight': 'h_a.9.weight',
        'h_a.4.bias': 'h_a.9.bias',
        'h_a.8.weight': 'h_a.12.weight',
        'h_a.8.bias': 'h_a.12.bias',
        'h_s.2.0.weight': 'h_s.3.0.weight',
        'h_s.2.0.bias': 'h_s.3.0.bias',
        'h_s.4.weight': 'h_s.6.weight',
        'h_s.4.bias': 'h_s.6.bias',
        'h_s.6.0.weight': 'h_s.9.0.weight',
        'h_s.6.0.bias': 'h_s.9.0.bias',
        'h_s.8.weight': 'h_s.12.weight',
        'h_s.8.bias': 'h_s.12.bias',
        'entropy_parameters.2.weight': 'entropy_parameters.3.weight',
        'entropy_parameters.2.bias': 'entropy_parameters.3.bias',
        'entropy_parameters.4.weight': 'entropy_parameters.6.weight',
        'entropy_parameters.4.bias': 'entropy_parameters.6.bias',
    }