# coding: utf-8

import torch
from WeConvene.models.tcm_wave_residual_two_entropy_modified_y_downsample_8 import *
from utils import Channel
from random import choice
import warnings
warnings.filterwarnings("ignore")


class WeConveneWithChannel(TCM_residual_wave_two_entropy_modified_y_downsample_8):
    def __init__(self,
                 args,
                 **model_args,
                 ):
        if model_args:
            super().__init__(**model_args)
        else:
            super().__init__()
        self.channel = Channel(args)
        if isinstance(args.multiple_snr, str):  # 1,2,3,4,5
            self.multiple_snr = [int(i) for i in args.multiple_snr.split(",")]
        else:
            self.multiple_snr = args.multiple_snr
        self.pass_channel = args.pass_channel


    def forward(self, x, given_SNR=None):
        if given_SNR is None:
            SNR = choice(self.multiple_snr)
            chan_param = SNR
        else:
            chan_param = given_SNR

        y = self.g_a(x)
        y_shape = y.shape[2:]

        y_output = self.dwt(y)

        # low_frequency and high_frequency
        low_freq = y_output[:, :320, :, :]      # y_L
        high_freq = y_output[:, 320:, :, :]     # y_H

        y_input = torch.cat([low_freq, high_freq], dim=1)       # -> ha

        z = self.h_a(y_input)

        # ======================================== #

        _, z_likelihoods = self.entropy_bottleneck(z)
        z_offset = self.entropy_bottleneck._get_medians()
        z_tmp = z - z_offset
        z_hat = ste_round(z_tmp) + z_offset
        # NOTE: z_hat add noise
        if self.pass_channel:
            z_hat = self.feature_pass_channel(z_hat, chan_param)

        latent_scales = self.h_scale_s(z_hat)
        latent_means = self.h_mean_s(z_hat)

        y_real_slices = low_freq.chunk(self.num_slices, 1)
        y_real_hat_slices = []
        y_real_likelihood = []
        mu_real_list = []
        scale_real_list = []

        y_imag_slices = high_freq.chunk(self.num_slices, 1)
        y_imag_hat_slices = []
        y_imag_likelihood = []
        mu_imag_list = []
        scale_imag_list = []

        for slice_index, y_slice in enumerate(y_real_slices):
            support_slices = (y_real_hat_slices if self.max_support_slices < 0 else y_real_hat_slices[:self.max_support_slices])
            mean_support = torch.cat([latent_means] + support_slices, dim=1)
            mean_support = self.atten_mean_real[slice_index](mean_support)
            mu = self.cc_mean_transforms_real[slice_index](mean_support)
            mu = mu[:, :, :, :]
            mu_real_list.append(mu)
            scale_support = torch.cat([latent_scales] + support_slices, dim=1)
            scale_support = self.atten_scale_real[slice_index](scale_support)
            scale = self.cc_scale_transforms_real[slice_index](scale_support)
            scale = scale[:, :, :y_shape[0], :y_shape[1]]
            scale_real_list.append(scale)
            _, y_slice_likelihood = self.gaussian_conditional_real(y_slice, scale, mu)
            y_real_likelihood.append(y_slice_likelihood)
            y_hat_slice = ste_round(y_slice - mu) + mu
            # if self.training:
            #     lrp_support = torch.cat([mean_support + torch.randn(mean_support.size()).cuda().mul(scale_support), y_hat_slice], dim=1)
            # else:
            lrp_support = torch.cat([mean_support, y_hat_slice], dim=1)
            lrp = self.lrp_transforms_real[slice_index](lrp_support)
            lrp = 0.5 * torch.tanh(lrp)
            y_hat_slice += lrp

            y_real_hat_slices.append(y_hat_slice)

        y_real_hat = torch.cat(y_real_hat_slices, dim=1)
        # NOTE: add noise
        if self.pass_channel:
            y_real_hat = self.feature_pass_channel(y_real_hat, chan_param)

        means_real = torch.cat(mu_real_list, dim=1)
        scales_real = torch.cat(scale_real_list, dim=1)
        y_real_likelihoods = torch.cat(y_real_likelihood, dim=1)

        for slice_index, y_slice in enumerate(y_imag_slices):
            support_slices = (y_imag_hat_slices if self.max_support_slices < 0 else y_imag_hat_slices[:self.max_support_slices])
            mean_support = torch.cat([latent_means]+ [y_real_hat] + support_slices, dim=1)
            mean_support = self.atten_mean_imag[slice_index](mean_support)
            mu = self.cc_mean_transforms_imag[slice_index](mean_support)
            mu = mu[:, :, :, :]
            mu_imag_list.append(mu)
            scale_support = torch.cat([latent_scales] + [y_real_hat]+ support_slices, dim=1)
            scale_support = self.atten_scale_imag[slice_index](scale_support)
            scale = self.cc_scale_transforms_imag[slice_index](scale_support)
            scale = scale[:, :, :y_shape[0], :y_shape[1]]
            scale_imag_list.append(scale)
            _, y_slice_likelihood = self.gaussian_conditional_imag(y_slice, scale, mu)
            y_imag_likelihood.append(y_slice_likelihood)
            y_hat_slice = ste_round(y_slice - mu) + mu
            # if self.training:
            #     lrp_support = torch.cat([mean_support + torch.randn(mean_support.size()).cuda().mul(scale_support), y_hat_slice], dim=1)
            # else:
            lrp_support = torch.cat([y_real_hat] +[mean_support, y_hat_slice], dim=1)
            lrp = self.lrp_transforms_imag[slice_index](lrp_support)
            lrp = 0.5 * torch.tanh(lrp)
            y_hat_slice += lrp

            y_imag_hat_slices.append(y_hat_slice)

        y_imag_hat = torch.cat(y_imag_hat_slices, dim=1)
        # NOTE: add noise
        if self.pass_channel:
            y_imag_hat = self.feature_pass_channel(y_imag_hat, chan_param)

        means_imag = torch.cat(mu_imag_list, dim=1)
        scales_imag = torch.cat(scale_imag_list, dim=1)
        y_imag_likelihoods = torch.cat(y_imag_likelihood, dim=1)

        #y_hat = torch.cat((y_real_hat, y_imag_hat), 1)
        dwt_processed = torch.cat([y_real_hat, y_imag_hat], dim=1)

        # IDWT 小波逆变换
        y_hat = self.idwt(dwt_processed)
        x_hat = self.g_s(y_hat)

        return {
            "x_hat": x_hat,
            "likelihoods": {"y_real": y_real_likelihoods,"y_imag": y_imag_likelihoods, "z": z_likelihoods},
            "para":{"means_real": means_real, "scales_real":scales_real, "y":y}
        }

    def compress(self, x, given_SNR=None):
        if given_SNR is None:
            SNR = choice(self.multiple_snr) if isinstance(self.multiple_snr, list) else self.multiple_snr
            chan_param = SNR
        else:
            chan_param = given_SNR
        #x = torch.fft.fft2(x, dim=(-2, -1))

        y = self.g_a(x)
        y_shape = y.shape[2:]

        y_output = self.dwt(y)

        low_freq = y_output[:, :320, :, :]
        high_freq = y_output[:, 320:, :, :]

        y_input = torch.cat([low_freq, high_freq], dim=1)
        #z = self.h_a(y_input)
        # NOTE: add noise
        if self.pass_channel:
            y_input = self.feature_pass_channel(y_input, chan_param)

        z = self.h_a(y_input)
        # ============================================ #

        z_strings = self.entropy_bottleneck.compress(z)
        z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
        # NOTE: z_hat add noise
        if self.pass_channel:
            z_hat = self.feature_pass_channel(z_hat, chan_param)

        #_, z_likelihoods = self.entropy_bottleneck(z)
        latent_scales = self.h_scale_s(z_hat)
        latent_means = self.h_mean_s(z_hat)

        y_real_slices = low_freq.chunk(self.num_slices, 1)
        y_real_hat_slices = []
        y_real_likelihood = []
        y_mu_real_list = []
        y_scale_real_list = []

        y_imag_slices = high_freq.chunk(self.num_slices, 1)
        y_imag_hat_slices = []
        y_imag_likelihood = []
        y_mu_imag_list = []
        y_scale_imag_list = []

        cdf_real = self.gaussian_conditional_real.quantized_cdf.tolist()
        cdf_real_lengths = self.gaussian_conditional_real.cdf_length.reshape(-1).int().tolist()
        offsets_real = self.gaussian_conditional_real.offset.reshape(-1).int().tolist()

        encoder_real = BufferedRansEncoder()
        symbols_real_list = []
        indexes_real_list = []
        y_real_strings = []

        for slice_index, y_slice in enumerate(y_real_slices):
            support_slices = (y_real_hat_slices if self.max_support_slices < 0 else y_real_hat_slices[:self.max_support_slices])
            mean_support = torch.cat([latent_means] + support_slices, dim=1)
            mean_support = self.atten_mean_real[slice_index](mean_support)
            mu = self.cc_mean_transforms_real[slice_index](mean_support)
            mu = mu[:, :, :, :]

            scale_support = torch.cat([latent_scales] + support_slices, dim=1)
            scale_support = self.atten_scale_real[slice_index](scale_support)
            scale = self.cc_scale_transforms_real[slice_index](scale_support)
            scale = scale[:, :, :, :]

            index = self.gaussian_conditional_real.build_indexes(scale)
            y_q_slice = self.gaussian_conditional_real.quantize(y_slice, "symbols", mu)
            y_hat_slice = y_q_slice + mu

            symbols_real_list.extend(y_q_slice.reshape(-1).tolist())
            indexes_real_list.extend(index.reshape(-1).tolist())

            lrp_support = torch.cat([mean_support, y_hat_slice], dim=1)
            lrp = self.lrp_transforms_real[slice_index](lrp_support)
            lrp = 0.5 * torch.tanh(lrp)
            y_hat_slice += lrp

            y_real_hat_slices.append(y_hat_slice)
            y_scale_real_list.append(scale)
            y_mu_real_list.append(mu)

        encoder_real.encode_with_indexes(symbols_real_list, indexes_real_list, cdf_real, cdf_real_lengths, offsets_real)
        y_real_string = encoder_real.flush()
        y_real_strings.append(y_real_string)

        cdf_imag = self.gaussian_conditional_imag.quantized_cdf.tolist()
        cdf_imag_lengths = self.gaussian_conditional_imag.cdf_length.reshape(-1).int().tolist()
        offsets_imag = self.gaussian_conditional_imag.offset.reshape(-1).int().tolist()

        y_real_hat = torch.cat(y_real_hat_slices, dim=1)
        encoder_imag = BufferedRansEncoder()
        symbols_imag_list = []
        indexes_imag_list = []
        y_imag_strings = []

        for slice_index, y_slice in enumerate(y_imag_slices):
            support_slices = (y_imag_hat_slices if self.max_support_slices < 0 else y_imag_hat_slices[:self.max_support_slices])
            mean_support = torch.cat([latent_means] + [y_real_hat]+ support_slices, dim=1)
            mean_support = self.atten_mean_imag[slice_index](mean_support)
            mu = self.cc_mean_transforms_imag[slice_index](mean_support)
            mu = mu[:, :, :, :]

            scale_support = torch.cat([latent_scales] + [y_real_hat]+ support_slices, dim=1)
            scale_support = self.atten_scale_imag[slice_index](scale_support)
            scale = self.cc_scale_transforms_imag[slice_index](scale_support)
            scale = scale[:, :, :, :]

            index = self.gaussian_conditional_imag.build_indexes(scale)
            y_q_slice = self.gaussian_conditional_imag.quantize(y_slice, "symbols", mu)
            y_hat_slice = y_q_slice + mu

            symbols_imag_list.extend(y_q_slice.reshape(-1).tolist())
            indexes_imag_list.extend(index.reshape(-1).tolist())

            lrp_support = torch.cat([y_real_hat]+ [mean_support, y_hat_slice], dim=1)
            lrp = self.lrp_transforms_imag[slice_index](lrp_support)
            lrp = 0.5 * torch.tanh(lrp)
            y_hat_slice += lrp

            y_imag_hat_slices.append(y_hat_slice)
            y_scale_imag_list.append(scale)
            y_mu_imag_list.append(mu)

        encoder_imag.encode_with_indexes(symbols_imag_list, indexes_imag_list, cdf_imag, cdf_imag_lengths, offsets_imag)
        y_imag_string = encoder_imag.flush()
        y_imag_strings.append(y_imag_string)


        return {"strings": [y_real_strings, y_imag_strings, z_strings], "shape": z.size()[-2:]}

    def feature_pass_channel(self, feature, chan_param, avg_pwr=False):
        noisy_feature = self.channel.forward(feature, chan_param, avg_pwr)
        return noisy_feature


if __name__ == '__main__':
    model = WeConveneWithChannel(None)
    print(model)
    data = torch.randn((2, 3, 256, 256))
    output = model(data)
    print(data)
    pass

