"""
Pytorch Classes and methods for DSP algorithms, in real-imaginary-split way.
@author: Pinjing He (hepinjing93@gmail.com).
"""
import torch
import torch.nn as nn
import torch.fft as tfft
import numpy as np
import math
from random import shuffle

from scipy.stats import norm

import util
from scipy.special import erf


def GSOP(sig):  # 施密特正交化
    i = sig[..., 0]
    q = sig[..., 1]
    rms = lambda x: torch.sqrt(torch.mean(x ** 2, dim=-1, keepdim=True))
    ret_i = i / rms(i)
    qtemp = q - torch.mean(i * q, dim=-1, keepdim=True) * i / rms(i) ** 2
    ret_q = q / rms(qtemp)
    ret = torch.stack([ret_i, ret_q], dim=-1)
    return ret


def recover_sig_by_phi(sig, phi):
    # sig_IQ_mismatch = g1*sig + g2*conj(sig)
    # sig = 1/(conj(g2)/conj(g1) - g1/g2) * (conj(sig_IQ_mismatch/g1) - sig_IQ_mismatch/g2)
    g1 = (1 + np.exp(1j * phi)) / 2
    g2 = (1 - np.exp(1j * phi)) / 2

    if g2 != 0:
        sig_recover = (1 / (g2.conj() / g1.conj() - g1 / g2)) * (
                (sig / g1).conj() - sig / g2)
    else:
        return sig
    return sig_recover


def adaptive_MZM_bias_compensate(sig,
                                 constellations=util.CONST_16QAM,
                                 **opt_kwargs):
    """[summary]
        补偿IQ不匹配
    Parameters
    ----------
    sig : [type] np.ndarray
        [description] 经过线性均衡的输入信号，1倍采样，shape: [*, samples]
    constellations : [type], optional, np.ndarray
        [description] 星座图分布，星座点顺序无所谓

    Returns
    -------
    [type]
        [description] 补偿后的信号
    """

    # Bayesian optimization lib: https://github.com/fmfn/BayesianOptimization
    from bayes_opt import BayesianOptimization

    def cal_snr(sig, phi):
        sig_recover = recover_sig_by_phi(sig, phi)
        snr = util.snr_dd(sig_recover, constellations)
        snr = np.mean(snr)
        return snr

    # 贝叶斯优化要最大化的目标函数
    obj_func = lambda phi: cal_snr(sig, phi) + np.mean(
        np.var(sig.real, axis=-1)) + np.mean(np.var(sig.imag, axis=-1))

    # 配置贝叶斯优化器
    phi_bound = opt_kwargs.get('phi_bound',
                               {'phi': (-np.pi / 2 * 0.6, np.pi / 2 * 0.6)})
    verbose = opt_kwargs.get('verbose', 0)
    optimizer = BayesianOptimization(
        f=obj_func,
        pbounds=phi_bound,
        verbose=verbose,
    )

    optimizer.maximize(init_points=5, n_iter=20, acq='ei', xi=0.05)
    phi_opt = optimizer.max['params']['phi']
    return recover_sig_by_phi(sig, phi_opt)


def adaptive_MZM_bias_compensate_prbs(sig,
                                      prbs,
                                      constellations=util.CONST_16QAM,
                                      **opt_kwargs):
    """[summary]
        补偿IQ不匹配
    Parameters
    ----------
    sig : [type] np.ndarray
        [description] 经过线性均衡的输入信号，1倍采样，shape: [*, samples]
    prbs : [type] np.ndarray
        [description] prbs
    constellations : [type], optional, np.ndarray
        [description] 星座图分布，星座点顺序无所谓

    Returns
    -------
    [type]
        [description] 补偿后的信号
    """

    # Bayesian optimization lib: https://github.com/fmfn/BayesianOptimization
    from bayes_opt import BayesianOptimization

    bit_order = opt_kwargs.get('bit_order', 'msb')

    def cal_q(sig, phi):
        sig_recover = recover_sig_by_phi(sig, phi)

        result = util.pr_ber(sig_recover,
                             prbs,
                             constellations,
                             order=bit_order)
        ber = np.mean(result[1])

        return util.ber2q(ber)

    # 贝叶斯优化要最大化的目标函数
    obj_func = lambda phi: cal_q(sig, phi)

    # 配置贝叶斯优化器
    phi_bound = opt_kwargs.get('phi_bound',
                               {'phi': (-np.pi / 2 * 0.6, np.pi / 2 * 0.6)})
    verbose = opt_kwargs.get('verbose', 2)
    optimizer = BayesianOptimization(
        f=obj_func,
        pbounds=phi_bound,
        verbose=verbose,
    )

    optimizer.maximize(init_points=5, n_iter=20, acq='ei', xi=0.05)
    phi_opt = optimizer.max['params']['phi']
    return recover_sig_by_phi(sig, phi_opt)


def frame_sync(sig, ruler_size, find_peak=True):
    if isinstance(sig, torch.Tensor):
        sig = torch.view_as_complex(sig)
        sig = sig.data.cpu().numpy()
    elif isinstance(sig, np.ndarray):
        if sig.shape[-1] == 2 and not np.iscomplex(sig):
            sig = sig[..., 0] + 1j * sig[..., 1]

    metric = np.zeros_like(sig) + 1j * np.zeros_like(sig)

    sig_len = sig.shape[-1]
    for indx in range(sig_len - ruler_size * 2):
        metric[..., indx] = np.sum(
            sig[..., indx:indx + ruler_size].conj() *
            sig[..., indx + ruler_size:indx + ruler_size * 2],
            axis=-1,
            keepdims=True)
    # peak = np.max(np.abs(metric), axis=-1)
    if not find_peak:
        peak_indx = np.argmax(np.abs(metric), axis=-1)
        return metric, peak_indx.item()
    else:
        metric_abs = np.abs(metric)
        from scipy.signal import find_peaks

        height = (np.max(metric_abs) - np.min(metric_abs)) / 2

        peak_detect_result = find_peaks(metric_abs.squeeze(),
                                        height=height,
                                        distance=2 * ruler_size)[0]

        peak_indx = np.min(peak_detect_result)

        return metric, peak_indx


def frame_sync_exact(sig,
                     coarse_indx,
                     prbs,
                     frame_len,
                     kwargs_ber={},
                     search_width=10):
    assert (sig.ndim == 1)
    assert (isinstance(sig, np.ndarray))
    search_range = range(coarse_indx - search_width,
                         coarse_indx + search_width)
    ber_list = []
    for indx in search_range:
        sig_cut = sig[..., indx:indx + frame_len]
        # sig_cut = sig_cut[np.newaxis, ...]
        sig_cut = sig_cut.reshape(*prbs.shape[0:-1], -1)
        sig_cut, ber, _ = util.pr_ber(sig_cut, prbs, **kwargs_ber)
        ber_list.append(ber.squeeze())
    ber_results = np.stack(ber_list, axis=0)
    min_ber = np.min(ber_results, axis=0)
    min_indx = np.argmin(ber_results, axis=0)
    if min_ber > 0.3:
        from warnings import warn
        warn('searched from [{}] to [{}], no matched result.'.format(
            coarse_indx - search_width, coarse_indx + search_width))

    exact_indx = min_indx + coarse_indx - search_width
    return exact_indx


def torch_fftshift(x, dims=-1):
    sNum = x.shape[dims]
    ret = torch.roll(x, -(sNum // 2), dims=dims)
    return ret


def decimate(sig, q, time=0, est_best_time=False, complex_out=False):
    # TODO: finish decimate
    """Downsample the signal after applying an anti-aliasing filter.
    By default, a 30 point FIR filter with Hamming window is used.

    Parameters
    ----------
    x : array with the shape of [*, samples]
        input signal
    q : int
        The downsampling factor. 
        
    Returns
    -------
    Torch array with the shape of [*, floor(samples/q)]
        [description]
    """
    if isinstance(sig, torch.Tensor):
        if not torch.is_complex(sig):
            if sig.shape[-1] != 2:
                raise ValueError(
                    'The last dimension of sig should be 2 when sig is real-valued.'
                )
            else:
                sig = torch.view_as_complex(sig)

    elif isinstance(sig, np.ndarray):
        raise NotImplementedError

    if est_best_time:
        ste = SymbolTimingEstLayer()

    sig_f = torch.fft.fft(sig, dim=-1)
    ceil_div = lambda y, x: y // x + (y % x != 0)
    zero_start_indx = ceil_div(sig.shape[-1], 2 * q)
    zero_end_indx = sig.shape[-1] - zero_start_indx
    sig_f[..., zero_start_indx:zero_end_indx +
                               1] = sig_f[..., zero_start_indx:zero_end_indx + 1] * 0
    sig = torch.fft.ifft(sig_f, dim=-1)

    if est_best_time:
        sig = torch.view_as_real(sig)
        sig = ste(sig)
        sig = torch.view_as_complex(sig)
        sig = sig[..., 0::q]
    else:
        sig = sig[..., time::q]

    if complex_out:
        return sig
    else:
        return torch.stack([sig.real, sig.imag], dim=-1)

    return ret


def format_rt(x):
    if not (isinstance(x, torch.Tensor)):
        raise TypeError('The input type is supposed to be torch.Tensor.')

    padding = torch.zeros_like(x, device=x.device)
    ret = torch.stack([x, padding], dim=-1)
    return ret


def format_it(x):
    if not (isinstance(x, torch.Tensor)):
        raise TypeError('The input type is supposed to be torch.Tensor.')

    padding = torch.zeros_like(x, device=x.device)
    ret = torch.stack([padding, x], dim=-1)
    return ret


def cmul(x, y):
    if not (isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor)):
        raise TypeError('The input type is supposed to be torch.Tensor.')

    if x.shape[-1] != 2 or y.shape[-1] != 2:
        raise ValueError(
            'The last dimension of the input is supposed to be 2, i.e., the real part and the imaginary part.'
        )

    if x.device != y.device:
        raise Exception(
            'The two input tensors are supposed to be in the same device.')

    if x.ndim != y.ndim:
        raise Exception(
            'The two input tensors are supposed to have the same number of dimensions.'
        )

    # Karatsuba multiplication
    k1 = y[..., 0] * (x[..., 0] + x[..., 1])
    k2 = x[..., 0] * (y[..., 1] - y[..., 0])
    k3 = x[..., 1] * (y[..., 0] + y[..., 1])
    ret = torch.stack([k1 - k3, k1 + k2], dim=-1)

    return ret


def cinv(x):
    if not (isinstance(x, torch.Tensor)):
        raise TypeError('The input type is supposed to be torch.Tensor.')
    if x.shape[-1] != 2:
        raise ValueError(
            'The last dimension of the input is supposed to be 2, i.e., the real part and the imaginary part.'
        )

    A = x[..., 0] ** 2 + x[..., 1] ** 2

    retr = x[..., 0] / A
    reti = -x[..., 1] / A

    ret = torch.stack([retr, reti], dim=-1)

    return ret


def cdiv(x, y):
    if not (isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor)):
        raise TypeError('The input type is supposed to be torch.Tensor.')

    if x.shape[-1] != 2 or y.shape[-1] != 2:
        raise ValueError(
            'The last dimension of the input is supposed to be 2, i.e., the real part and the imaginary part.'
        )

    if x.device != y.device:
        raise Exception(
            'The two input tensors are supposed to be in the same device.')

    if x.ndim != y.ndim:
        raise Exception(
            'The two input tensors are supposed to have the same number of dimensions.'
        )

    ret = cmul(x, cinv(y))
    return ret


def conj(x):
    if x.shape[-1] != 2:
        raise ValueError(
            'The last dimension of the input is supposed to be 2, i.e., the real part and the imaginary part.'
        )
    ret = torch.stack([x[..., 0], -x[..., 1]], dim=-1)
    return ret


def riSplitFFT(x, n=None):
    if not (isinstance(x, torch.Tensor)):
        raise TypeError('The input type is supposed to be torch.Tensor.')
    if x.shape[-1] != 2:
        raise ValueError(
            'The last dimension of the input is supposed to be 2, i.e., the real part and the imaginary part.'
        )

    x = torch.view_as_complex(x)
    x = tfft.fft(x, n=n)
    x = torch.view_as_real(x)
    return x


def riSplitIFFT(x, n=None):
    if not (isinstance(x, torch.Tensor)):
        raise TypeError('The input type is supposed to be torch.Tensor.')
    if x.shape[-1] != 2:
        raise ValueError(
            'The last dimension of the input is supposed to be 2, i.e., the real part and the imaginary part.'
        )

    x = torch.view_as_complex(x)
    x = tfft.ifft(x, n=n)
    x = torch.view_as_real(x)
    return x


def pick_pol(sig,
             prbs,
             sample_factor=2,
             constellations=np.array([
                                         -3 + 1j * 3, -3 + 1j * 1, -3 + 1j * -3, -3 + 1j * -1,
                                         -1 + 1j * 3, -1 + 1j * 1, -1 + 1j * -3, -1 + 1j * -1,
                                         3 + 1j * 3, 3 + 1j * 1, 3 + 1j * -3, 3 + 1j * -1, 1 + 1j * 3,
                                         1 + 1j * 1, 1 + 1j * -3, 1 + 1j * -1
                                     ] / np.sqrt(10)),
             mod_order=4):
    sig_ori = sig
    if sample_factor > 1:
        sig = sig[..., round(sample_factor // 2)::sample_factor]

    dummy, ber, _ = util.pr_ber(sig,
                                prbs,
                                constellations=constellations,
                                mod_order=mod_order)
    dummy, ber_flip, _ = util.pr_ber(np.flip(sig, axis=(-2,)),
                                     prbs,
                                     constellations=constellations,
                                     mod_order=mod_order)

    ber = np.mean(ber)
    ber_flip = np.mean(ber_flip)

    if ber < ber_flip:
        return sig_ori

    else:
        return np.flip(sig_ori, axis=(-2,))


def sym_decide(symbols, constellations):
    """[for torch]
    Make decisions of the input symbols. Pick the nearest ideal symbol
    according to the value of input symbols.

    NO GRADIENT PASS THROUGH THIS FUNCTION!

    Parameters
    ----------
    symbols : torch tensor
        Input symbols tensor.
    constellations : torch tensor
        Constellations of the ideal symbols.

    Returns
    -------
    torch tensor
        Symbol decisions.
    """
    if isinstance(constellations, np.ndarray):
        constellations = torch.from_numpy(constellations)
    const = constellations
    const = const.to(symbols.device)
    with torch.no_grad():
        if not torch.is_complex(const):
            const = torch.view_as_complex(const)

        symbols = norm_power(symbols)
        symbols = torch.view_as_complex(symbols)
        const = const.reshape(*[1] * symbols.ndim, -1)
        symbol_dec_indx = torch.argmin(torch.abs(symbols[..., np.newaxis] -
                                                 const),
                                       dim=-1)

        symbol_dec = const.squeeze()[symbol_dec_indx]
        symbol_dec = torch.view_as_real(symbol_dec)

    return symbol_dec


class BlockSelector():
    """
    Data block selector class.
    """

    def __init__(self, sig_size, block_size, remain):
        # super(BlockSelector, self).__init__()
        if remain > block_size:
            raise Exception('block_size should greater than remain.')
        if block_size > sig_size:
            raise Exception('sig_size should greater than block_size.')
        self.sig_size = sig_size
        self.block_size = block_size
        self.remain = remain
        self.block_num = math.ceil(self.sig_size / remain)
        self.tail_size = self.block_num * remain - sig_size
        self.wrapper = lambda x, N: x % N
        self.p_overlap = math.ceil((self.block_size - self.remain) / 2)

    def __len__(self):
        return self.block_num

    def __getitem__(self, indx):
        block_step_head = indx * self.remain
        block_head = block_step_head - self.p_overlap
        block_end = block_head + self.block_size
        indx = np.arange(block_head, block_end)
        indx = self.wrapper(indx, self.sig_size)
        return indx


def RRC(alpha, sample_factor, span):
    """ Calculate the square root cosine filter taps in the frequency domain.

    Parameters
    ----------
    alpha : scalar
        Roll-off factor
    sample_factor : scalar
        The number of samples per symbol
    span : scalar
        Correlated symbols

    Returns
    -------
    numpy array
        The filter taps in frequency domain.

    Raises
    ------
    Exception
        When the tap number is not even.
    """
    # check input legality
    if not span * sample_factor % 2 == 0:
        raise Exception("The number of filter taps should be even.")

    ret = RC(alpha, sample_factor, span)
    ret = np.sqrt(ret)
    return ret


def RC(alpha, sample_factor, span):
    # check input legality
    if not span * sample_factor % 2 == 0:
        raise Exception("The number of filter taps should be even.")
    N = span * sample_factor
    k = np.arange(N)

    # Frequency domain implementation
    ret = np.zeros((N,))
    k_pick = k / N * sample_factor <= (1 - alpha) / 2
    ret[k_pick] = 1
    mask = np.logical_and(k / N * sample_factor > (1 - alpha) / 2,
                          k / N * sample_factor < (1 + alpha) / 2)
    ret[mask] = 0.5 * (1 - np.sin(np.pi / alpha *
                                  (k[mask] / N * sample_factor - 0.5)))
    ret[N // 2 + 1:] = ret[N // 2 - 1:0:-1]

    # ret=ret/np.sqrt(np.mean(ret*ret))
    return ret[..., np.newaxis]


def Rect(sample_factor, span):
    # check input legality
    if not span * sample_factor % 2 == 0:
        raise Exception("The number of filter taps should be even.")
    N = span * sample_factor
    # k = np.arange(N)
    ret = np.zeros((N,))
    # ceil_div = lambda y, x: y//x + (y%x!=0)
    k_pick = np.arange((N - sample_factor) // 2, (N + sample_factor) // 2)
    ret[k_pick] = 1 / sample_factor
    return ret


def norm_power(x):
    if not (isinstance(x, torch.Tensor)):
        raise TypeError('The input type is supposed to be torch.Tensor.')
    if x.shape[-1] != 2:
        raise ValueError(
            'The last dimension of the input is supposed to be 2, i.e., the real part and the imaginary part.'
        )

    block_size = x.shape[-2]
    x = x / torch.sqrt(
        torch.sum(x[..., np.newaxis, 0] ** 2 + x[..., np.newaxis, 1] ** 2,
                  dim=-2,
                  keepdim=True) / block_size)
    return x


class FIRLayer(nn.Module):
    def __init__(self,
                 tap,
                 case_num=2,
                 power_norm=False,
                 centor_one=True,
                 inter_pol=False):
        super(FIRLayer, self).__init__()
        # -----------------initialize linear kernel---------------
        h = torch.zeros((tap, case_num), dtype=torch.complex128)
        if centor_one == True:
            h[tap // 2, :] = 1
        h = torch.view_as_real(h)
        self.h = nn.Parameter(h.transpose(0, 1), requires_grad=True)

        self.inter_pol = inter_pol

        if inter_pol:
            h_inter_pol = torch.zeros((tap, case_num), dtype=torch.complex128)
            h_inter_pol = torch.view_as_real(h_inter_pol)
            self.h_inter_pol = nn.Parameter(h_inter_pol.transpose(0, 1),
                                            requires_grad=True)
        self.tap = tap
        self.power_norm = power_norm
        self.case_num = case_num

    def forward(self, x):
        batchSize, pol, block_size, ri = x.shape
        # assert (pol == self.case_num)
        assert (ri == 2)
        if self.power_norm == True:
            with torch.no_grad():
                x = norm_power(x)

        if self.inter_pol:
            x = cmul(riSplitFFT(x),
                     riSplitFFT(self.h, n=block_size)[np.newaxis, ...]) + cmul(
                x.flip(dims=(1,)),
                riSplitFFT(self.h_inter_pol, n=block_size)[np.newaxis,
                ...])
        else:
            x = cmul(riSplitFFT(x),
                     riSplitFFT(self.h, n=block_size)[np.newaxis, ...])

        x = riSplitIFFT(x)
        tap = self.h.shape[-2]
        self.tap = tap
        x = torch.roll(x, -(tap // 2), dims=-2)
        return x

    def fit(self,
            sig,
            err_mode='DDM',
            power_norm=True,
            lr=1e-3,
            constellations=None,
            sig_label=None,
            block_size=128,
            remain=1,
            iter_num=2,
            shuffle_each_iter=True,
            optimizer=None,
            dd_with_case_indx=None):
        if sig.ndim != 4 or sig.shape[0] != 1 or sig.shape[-1] != 2:
            raise Exception(
                'sig should have the shape of [1, pol, samples, 2]')
        if err_mode == 'DDM':
            if constellations is None:
                raise Exception(
                    'When the err_mode is DDM, constellations should be assigned.'
                )
            if isinstance(constellations, np.ndarray):
                constellations = torch.from_numpy(constellations)
                if torch.is_complex(constellations):
                    constellations = torch.view_as_real(constellations)
            constellations = constellations.to(sig.device)
        elif err_mode == 'DAM':
            if sig_label is None:
                raise Exception(
                    'When the err_mode is DAM, sig_label should be assigned.')
        else:
            raise Exception('Invalid err_mode.')

        if optimizer is not None:
            if not isinstance(optimizer, torch.optim.Optimizer):
                raise Exception(
                    'optimizer is supposed to be an instance of torch.optim.Optimzer.'
                )
        else:
            optimizer = torch.optim.Adam(self.parameters(), lr=lr)

        def error_function(y, yl):
            # y shape : [B, pol, 2]
            res = y - yl
            err = torch.mean(torch.sum(res ** 2, dim=-1), dim=(0, 1, 2))
            return err

        if power_norm == True:
            with torch.no_grad():
                sig = norm_power(sig)
        sig = sig.detach()
        ret = sig.clone()
        bs = BlockSelector(sig_size=sig.shape[-2],
                           block_size=block_size,
                           remain=remain)

        if err_mode == 'DAM':
            sig_dec = sig_label

        for iter in range(iter_num):
            if err_mode == 'DDM':
                # ret_np = torch.view_as_complex(ret).cpu().data.numpy()
                # sig_dec = torch.from_numpy(sym_decide(
                #     ret_np, constellations)).to(sig.device)
                # sig_dec = torch.view_as_real(sig_dec)

                sig_dec = sym_decide(ret, constellations)
                if dd_with_case_indx is not None:
                    sig_dec = sig_dec[..., np.newaxis, dd_with_case_indx, :, :]
            block_sample_list = list(range(len(bs)))
            if shuffle_each_iter:
                shuffle(block_sample_list)

            for block_indx in block_sample_list:
                indexes = bs[block_indx]
                ''' sig_temp shape: [1, cases, taps, 2] '''
                sig_temp = sig[..., indexes, :]

                y = self(sig_temp)
                yl = sig_dec[..., indexes, :]

                if dd_with_case_indx is not None:
                    y = y[..., np.newaxis, dd_with_case_indx, :, :]
                    yl = yl[..., np.newaxis, dd_with_case_indx, :, :]

                err = error_function(y, yl)

                optimizer.zero_grad()
                err.backward()
                optimizer.step()

                if block_indx != len(bs) - 1:
                    ret[..., block_indx * remain:(block_indx + 1) *
                                                 remain, :] = y[...,
                                                              bs.p_overlap:remain + bs.p_overlap, :]
                else:
                    ret[..., block_indx * remain:(block_indx + 1) * remain -
                                                 bs.tail_size, :] = y[..., bs.p_overlap:remain +
                                                                                        bs.p_overlap - bs.tail_size, :]
        return ret

    @staticmethod
    def time_vary_infer(sig,
                        iter_num=2,
                        err_mode='Godard',
                        lr=1e-3,
                        tap=25,
                        constellations=None,
                        updata_step=1):
        if sig.ndim != 4 or sig.shape[0] != 1 or sig.shape[-1] != 2:
            raise Exception('sig should have the shape of [1, pol, samples]')

        if err_mode == 'DDM' and constellations is None:
            raise Exception(
                'When the err_mode is DDM, constellations should be assigned.')

        if constellations is not None:
            if isinstance(constellations, np.ndarray):
                constellations = torch.from_numpy(constellations).to(
                    sig.device)
                if torch.is_complex(constellations):
                    constellations = torch.view_as_real(constellations)
            constellations = constellations.to(sig.device)
        sig = torch.view_as_complex(sig)

        def error_function(y, err_mode, yl=None):
            if err_mode == 'Godard':
                res = torch.conj(y) * y - 1
                # err = torch.sum(torch.squeeze(res * res.conj()))/2
                err = y.conj() * res
            elif err_mode == 'DDM':
                if yl == None:
                    raise Exception('Decision result should be assigned')
                res = y - yl
                err = torch.sum(res * res.conj(),
                                dim=(-1, -2)) / y.shape[-1] / 2
            else:
                raise ValueError('Invalid error mode.')
            return err

        w = torch.zeros([1, 2, tap], device=sig.device, dtype=sig.dtype)
        wi = torch.zeros([1, 2, tap], device=sig.device, dtype=sig.dtype)

        if err_mode == 'Godard':
            w[..., round(tap // 2)] = 1

        with torch.no_grad():
            sig_size = sig.shape[-1]
            sig = sig / \
                  torch.sqrt(torch.sum(torch.abs(sig) ** 2,
                                       dim=-1, keepdim=True) / sig_size)
            ret = sig.clone()
            bs = BlockSelector(sig_size=sig_size,
                               block_size=tap,
                               remain=updata_step)
            for iter in range(iter_num):
                if err_mode == 'DDM':
                    # ret_np = ret.cpu().data.numpy()
                    # sig_dec = torch.from_numpy(
                    #     sym_decide(ret_np, constellations)).to(sig.device)
                    sig_dec = sym_decide(ret, constellations)
                elif err_mode == 'Godard':
                    ret_np = ret.cpu().data.numpy()
                else:
                    raise NotImplementedError

                for block_indx in range(len(bs)):
                    indexes = bs[block_indx]
                    ''' sig_temp shape: [1, pol, tap] '''
                    sig_temp = sig[..., indexes]
                    y = torch.sum(w.conj() * sig_temp +
                                  wi.conj() * torch.flip(sig_temp, dims=(1,)),
                                  dim=-1,
                                  keepdim=True)

                    if block_indx % 2 == 1:
                        if err_mode == 'Godard':
                            err = error_function(y, err_mode)
                        else:
                            yl = sig_dec[..., indexes]
                            err = error_function(y, err_mode, yl)

                        w = w - lr * err * sig_temp
                        wi = wi - lr * err * torch.flip(sig_temp, dims=(1,))

                    ret[..., block_indx:(block_indx + 1)] = y

        ret = torch.view_as_real(ret)
        return ret


class MatchedFilterLayer(FIRLayer):
    def __init__(self,
                 alpha,
                 span,
                 sample_factor,
                 case_num,
                 power_norm=True,
                 shape='rrc'):
        super(MatchedFilterLayer, self).__init__(tap=span * sample_factor,
                                                 case_num=case_num,
                                                 power_norm=power_norm)
        self.sf = sample_factor
        tap = span * sample_factor
        assert (tap % 2 == 0)
        self.shape = shape
        if self.shape == 'rrc':
            tap_fd = RRC(alpha=alpha, sample_factor=sample_factor, span=span)
            tap_fd = torch.from_numpy(tap_fd.conj())
            tap_fd = tap_fd.reshape([1, tap])
        else:
            raise NotImplementedError

        # tap_fd = torch.view_as_real(tap_fd)
        tap_fd = format_rt(tap_fd)
        tap_td = riSplitIFFT(tap_fd)
        tap_td = tap_td.roll(tap // 2, dims=-2)
        for indx in range(case_num):
            self.h.data[indx, ...] = tap_td


def gaussianLPF(BW, sample_factor=2, span=10):
    """Generator of a gaussian LPF in frequency domain

    Parameters
    ----------
    BW : float
        3dB bandwidth of the LPF. It is normalized by the symbol rate. For example,
        assuming the symbol rate 30 GBaud, if the required bandwidth equals 30 GHz,
        BW should be assigned as 1, i.e., the ratio of the bandwidth by the symbol rate;
        if the required bandwidth equals 15 GHz, BW should be assigned as 0.5.

    sample_factor : int, optional
        The number of samples per symbol, by default 2
    span : int, optional
        The number of corelated symbols, by default 10
    """

    assert ((sample_factor * span) % 2 == 0)
    sigma = BW / np.sqrt(2 * np.log(2))
    f = np.arange(sample_factor * span)
    f = f - sample_factor * span // 2
    f = f / sample_factor
    taps = np.exp(-f ** 2 / (2 * sigma ** 2))
    taps = np.fft.fftshift(taps, axes=-1)
    return taps


def riexp(x):
    if not (isinstance(x, torch.Tensor)):
        raise TypeError('The input type is supposed to be torch.Tensor.')
    if x.shape[-1] != 2:
        raise ValueError(
            'The last dimension of the input is supposed to be 2, i.e., the real part and the imaginary part.'
        )
    xr = x[..., 0, np.newaxis]
    xi = x[..., 1]
    ret = torch.exp(xr) * (torch.stack([torch.cos(xi), torch.sin(xi)], dim=-1))
    return ret


class GaussianLPFLayer(FIRLayer):
    def __init__(self, BW, span, sample_factor, case_num, power_norm=False):
        super(GaussianLPFLayer, self).__init__(tap=span * sample_factor,
                                               case_num=case_num,
                                               power_norm=power_norm)
        self.sf = sample_factor
        tap = span * sample_factor
        assert (tap % 2 == 0)
        tap_fd = gaussianLPF(BW=BW, sample_factor=sample_factor, span=span)
        tap_fd = torch.from_numpy(tap_fd)
        tap_fd = tap_fd.reshape([1, tap])

        # tap_fd = torch.view_as_real(tap_fd)
        tap_fd = format_rt(tap_fd)
        tap_td = riSplitIFFT(tap_fd)
        tap_td = tap_td.roll(tap // 2, dims=-2)
        for indx in range(case_num):
            self.h.data[indx, ...] = tap_td


class EDCLayer(FIRLayer):
    def __init__(self,
                 DL=17e-6 * 100e3,
                 c=3e8,
                 wave_length=1552.52e-9,
                 tap=2048,
                 sample_factor=2,
                 symbol_rate=28e9,
                 power_norm=False,
                 case_num=1,
                 init_method='LS_CO',
                 rolloff=0.1,
                 lmbd_for_LS_CO=5.12589879e-05,
                 eps_o_max_db=-0.1):
        super(EDCLayer, self).__init__(tap=tap,
                                       power_norm=power_norm,
                                       case_num=case_num)
        # -----------------initialize linear kernel---------------
        self.sample_factor = sample_factor
        Tsym = 1 / symbol_rate
        Ts = Tsym / sample_factor
        self.lmbd_for_LS_CO = lmbd_for_LS_CO

        if init_method == 'FSM':
            digital_smpl_point = torch.arange(-(tap // 2), tap // 2 + tap % 2)
            omega = (2 * np.pi / Ts / tap) * digital_smpl_point
            var = 1j / 2 * -DL * wave_length ** 2 / (2 * np.pi * c) * omega ** 2

            H = torch.exp(var)
            H = torch.roll(H, tap // 2)
            h_temp = tfft.ifft(H)

            h_temp = torch.roll(h_temp, tap // 2)

            h_temp = torch.view_as_real(h_temp)
        elif init_method == 'TDS':
            nbound = tap // 2
            n = np.arange(-nbound, nbound + tap % 2)
            h_temp = np.sqrt(1j * c * Ts ** 2 / (-DL * wave_length ** 2)) * np.exp(
                -1j * np.pi * c * Ts ** 2 * (n ** 2) / -DL / wave_length ** 2)
            h_temp = torch.from_numpy(h_temp)
            h_temp = torch.view_as_real(h_temp)
        elif init_method == 'LS':
            # [1] A. Eghbali, H. Johansson, O. Gustafsson, and S. J. Savory, “Optimal least-squares FIR digital filters for compensation of chromatic dispersion in digital coherent optical receivers,” J. Light. Technol., vol. 32, no. 8, pp. 1449–1456, 2014.
            nbound = tap // 2
            n = np.arange(-nbound, nbound + tap % 2)
            K = DL * wave_length ** 2 / (4 * np.pi * c * Ts ** 2)
            # Omega1 = -np.pi * (1+rolloff) / sample_factor
            # Omega2 = np.pi * (1+rolloff) / sample_factor
            Omega1 = -np.pi
            Omega2 = np.pi
            Q = np.zeros([tap, tap]) + 1j * np.zeros([tap, tap])
            for m in range(tap):
                for nprime in range(tap):
                    Q[nprime, m] = (Omega2 - Omega1) / (
                            2 * np.pi) if nprime == m else (
                                                                   np.exp(-1j * (m - nprime) * Omega1) -
                                                                   np.exp(-1j *
                                                                          (m - nprime) * Omega2)) / (1j * 2 * np.pi *
                                                                                                     (m - nprime))

            Dn = np.exp(
                -1j *
                (n ** 2 /
                 (4 * K) + 3 * np.pi / 4)) / (4 * np.sqrt(np.pi * K)) * (erf(
                (np.exp(1j * 3 * np.pi / 4) *
                 (2 * K * np.pi - n)) / (2 * np.sqrt(K))) + erf(
                (np.exp(1j * 3 * np.pi / 4) *
                 (2 * K * np.pi + n)) / (2 * np.sqrt(K))))
            Qinv = np.linalg.inv(Q + 0 * 30 * np.diag([1] * tap))
            h_temp = np.matmul(Qinv, Dn)
            if DL > 0:
                # h_temp = np.flip(h_temp)
                h_temp = h_temp.conj()
            h_temp = torch.from_numpy(h_temp)
            h_temp = torch.view_as_real(h_temp)
        elif init_method == 'LS_CO':
            # [2] A. Sheikh, C. Fougstedt, A. G. I. Amat, P. Johannisson, P. Larsson-Edefors, and
            # M. Karlsson, “Dispersion Compensation FIR Filter with Improved Robustness to
            # Coefficient Quantization Errors,” J. Light. Technol., vol. 34, no. 22, pp.
            # 5110–5117, 2016.

            # TODO: Rewrite using symmetry of m and n
            nbound = tap // 2
            m = np.arange(-nbound, nbound + tap % 2)
            DL_sigh = -1 if DL < 0 else 1
            DL = np.abs(DL)
            M = DL * wave_length ** 2 / (4 * np.pi * c * Ts ** 2)
            Omega1 = -np.pi * (1 + rolloff) / sample_factor
            Omega2 = np.pi * (1 + rolloff) / sample_factor
            self.Omega1 = Omega1
            self.Omega2 = Omega2

            def cal_h(lmbd):
                Q = np.zeros([tap, tap]) + 1j * np.zeros([tap, tap])
                for mprime in range(tap):
                    for nprime in range(tap):
                        Q[nprime, mprime] = (
                                                    2 * np.pi * (lmbd + 1) + (lmbd + 1) * Omega1 -
                                                    (lmbd + 1) * Omega2) / (
                                                    2 * np.pi + Omega1 - Omega2
                                            ) if nprime == mprime else lmbd / (
                                1j * (mprime - nprime) *
                                (2 * np.pi + Omega1 - Omega2)) * (
                                                                               np.exp(1j * (mprime - nprime) * Omega1) -
                                                                               np.exp(1j * (mprime - nprime) * Omega2)
                                                                       ) + 1 / (1j * (mprime - nprime) *
                                                                                (Omega1 - Omega2)) * (
                                                                               np.exp(1j *
                                                                                      (mprime - nprime) * Omega1)
                                                                               - np.exp(1j * (mprime - nprime) *
                                                                                        Omega2))
                vm = np.exp(-1j * (m ** 2 / (4 * M) + 3 * np.pi / 4)) / 2 / (
                        Omega2 - Omega1) * np.sqrt(np.pi / M) * (erf(
                    np.exp(-1j * np.pi / 4) *
                    (2 * M * Omega1 + m) / 2 / np.sqrt(M)) - erf(
                    np.exp(-1j * np.pi / 4) *
                    (2 * M * Omega2 + m) / 2 / np.sqrt(M)))
                Qinv = np.linalg.inv(Q)
                h = np.matmul(Qinv, vm)
                return h

            if lmbd_for_LS_CO is None:
                print('Solving lmbd...')
                lmbd_for_LS_CO = self.cal_lmbd_for_LS_CO(eps_o_max_db, cal_h)
                print(
                    'lambda got, which is {lmbd:}'.format(lmbd=lmbd_for_LS_CO))

            h_temp = cal_h(lmbd_for_LS_CO)

            if DL_sigh == 1:
                h_temp = h_temp.conj()
            h_temp = torch.from_numpy(h_temp)
            h_temp = torch.view_as_real(h_temp)
            self.lmbd_for_LS_CO = lmbd_for_LS_CO
        else:
            raise ValueError('Invalid init method.')

        for indx in range(case_num):
            self.h.data[indx, ...] = h_temp

    def cal_lmbd_for_LS_CO(self, eps_o_max_db, cal_h_func):
        eps_o_max = 10 ** (eps_o_max_db / 10)
        import scipy.optimize as opt

        def obj_func(lmbd):
            h = cal_h_func(lmbd)
            ret = np.abs(eps_o_max - (2 * np.pi * np.sum(np.abs(h) ** 2) +
                                      self.Omega1 - self.Omega2) /
                         (2 * np.pi + self.Omega1 - self.Omega2))
            return ret

        linear_cons = opt.LinearConstraint(1, -np.inf, 0)
        opt_result = opt.minimize(obj_func,
                                  0,
                                  method='BFGS',
                                  options={'disp': False})
        ret = opt_result['x']
        return ret


class CSGD(torch.optim.Optimizer):
    def __init__(self, parameters, lr, weight_decay=0):
        defaults = dict(lr=lr, weight_decay=weight_decay)
        super(CSGD, self).__init__(parameters, defaults)
        self.param = parameters
        self.lr = lr
        self.weight_decay = weight_decay

    def step(self):
        with torch.no_grad():
            for group in self.param_groups:
                for param in group['params']:
                    param -= self.lr * \
                             (param.grad + self.weight_decay * param)


def movAvr(x, win_size):
    pol = x.shape[-2]
    block_size = x.shape[-1]
    h = torch.ones((pol, win_size)) / win_size
    x = tfft.fft(x)
    h = h.to(x.device)
    x = x * tfft.fft(h, n=block_size)
    x = tfft.ifft(x)
    x = torch.roll(x, -round(math.floor((win_size) // 2)), dims=-1)
    return x


class MovAvrLayer(FIRLayer):
    def __init__(self, N, requires_grad=False, case_num=1):
        super(MovAvrLayer, self).__init__(tap=N,
                                          case_num=case_num,
                                          power_norm=False)
        re = torch.ones((1, N)) / N
        im = torch.zeros_like(re)
        h = torch.stack([re, im], dim=-1)

        for indx in range(case_num):
            self.h.data = h
        self.h.requires_grad_(requires_grad)


class WeightAvrLayer(FIRLayer):
    def __init__(self, N, std_dev=1, requires_grad=False, mean=0, case_num=1):
        super(WeightAvrLayer, self).__init__(tap=N,
                                             case_num=case_num,
                                             power_norm=False)
        x = np.linspace(-N, N, 2 * N + 1)
        re = norm.pdf(x, mean, std_dev)
        re = torch.from_numpy(re[np.newaxis, :])  # 使滤波器的形状服从高斯分布，标准差先设定为1
        im = torch.zeros_like(re)
        h = torch.stack([re, im], dim=-1)

        for indx in range(case_num):
            self.h.data = h
        self.h.requires_grad_(requires_grad)


def unwrap(phi, dim=-1):
    def diff(x, dim=-1, same_size=False):
        if same_size:
            return nn.functional.pad(x[..., 1:] - x[..., :-1], (1, 0))
        else:
            return x[..., 1:] - x[..., :-1]

    dphi = diff(phi, same_size=True)
    dphi_m = ((dphi + np.pi) % (2 * np.pi)) - np.pi
    dphi_m[(dphi_m == -np.pi) & (dphi > 0)] = np.pi
    phi_adj = dphi_m - dphi
    phi_adj[dphi.abs() < np.pi] = 0
    return phi + phi_adj.cumsum(dim)


class PhaseRecLayer(nn.Module):
    def __init__(self, win_size, rotate_fac=None):
        super(PhaseRecLayer, self).__init__()
        self.win_size = win_size
        self.rotate_fac = rotate_fac
        if rotate_fac is not None:
            assert (isinstance(rotate_fac, torch.Tensor))
            assert (rotate_fac.ndim == 1)
            self.rotate_fac = self.rotate_fac.reshape(1, -1, 1)

    def forward(self, sig):
        sig = torch.view_as_complex(sig)
        sig_4th_power = sig * sig * sig * sig

        # Avoid gradient backpropagation through v-v
        sig_4th_power = sig_4th_power.detach()

        sig_4th_power = movAvr(sig_4th_power * np.exp(1j * np.pi),
                               self.win_size)

        phi = unwrap(sig_4th_power.angle()) / 4

        if self.rotate_fac is not None:
            rotate_fac = self.rotate_fac.to(sig.device)
            ret = sig * (torch.cos(phi) - 1j * torch.sin(phi)) * rotate_fac
            return torch.view_as_real(ret)
        else:
            ret = sig * (torch.cos(phi) - 1j * torch.sin(phi))
            return torch.view_as_real(ret)


def torch_cmplx_exp(phi):
    ret = (torch.cos(phi.imag) + 1j * torch.sin(phi.imag)) * torch.exp(
        phi.real)
    return ret


class FrequencyOffsetEqualizeLayer(nn.Module):
    def __init__(self):
        super(FrequencyOffsetEqualizeLayer, self).__init__()
        pass

    def forward(self, sig):
        sig = torch.view_as_complex(sig)
        sig_4th_power = sig * sig * sig * sig
        # Avoid gradient backpropagation
        sig_4th_power = sig_4th_power.detach()
        sig4f = torch.fft.fft(sig_4th_power)
        sig4fabs = torch_fftshift(torch.abs(sig4f), dims=-1)
        freq_indexes = torch.arange(sig4fabs.shape[-1]).to(sig4fabs.device)
        freq = freq_indexes / sig4fabs.shape[-1]
        freq = freq - 1 / 2
        peak_indx = torch.argmax(sig4fabs, -1, keepdim=True)
        fo = freq[peak_indx] / 4
        ret = sig * torch_cmplx_exp(-1j * 2 * np.pi * fo * freq_indexes)
        ret = torch.view_as_real(ret)
        return ret


class SymbolTimingEstLayer(nn.Module):
    def __init__(self, smplFac=2, first_sample=True):
        super(SymbolTimingEstLayer, self).__init__()
        self.sf = smplFac
        self.first_sample = first_sample

    def forward(self, sig):
        smplNum = sig.shape[-2]
        k = torch.arange(smplNum).to(sig.device)
        k = k + 1
        omega = 2 * np.pi / smplNum * torch.arange(
            -(smplNum // 2), smplNum // 2 + smplNum % 2).to(sig.device)
        sig = sig[..., 0] + sig[..., 1] * 1j
        tau = 1 / 2 / np.pi * torch.angle(
            torch.sum(sig * sig.conj() * torch.exp(-1j * k * np.pi) +
                      torch.real(sig * torch.conj(torch.roll(sig, 1))) *
                      torch.exp(-1j * (k - 0.5) * np.pi)))
        ret = torch.fft.ifft(
            torch.fft.fft(sig) *
            torch_fftshift(torch.exp(-1j * omega * tau * self.sf), dims=-1))

        if self.first_sample == True:
            ret = ret.roll(-1, dims=-1)
        ret = torch.view_as_real(ret)
        return ret


class ExpNLLayer(nn.Module):
    def __init__(self, init_factor):
        super(ExpNLLayer, self).__init__()
        fac = torch.tensor((init_factor,))
        self.fac = nn.Parameter(fac, requires_grad=True)

    def forward(self, x):
        ''' x shape: [B, pol, block, 2] '''

        # xp, the total power of the input signal
        xp = torch.sum(x ** 2, dim=[1, -1], keepdim=True)
        NL_pow = self.fac * xp
        NL_operator_r = torch.cos(NL_pow)
        NL_operator_i = torch.sin(NL_pow)
        NL_operator = torch.cat([NL_operator_r, NL_operator_i], dim=-1)
        x = cmul(NL_operator, x)
        return x

    @staticmethod
    def cal_init_factor(gm, eps, alp_dB, power_dB, step_len):
        power = 10 ** (power_dB / 10 - 3)
        alp = -np.log(np.power(10, -alp_dB / 10))
        Leff = (1 - np.exp(-alp * step_len)) / alp
        ret = 8 / 9 * gm * eps * Leff * power
        return ret


class CRExpNLLayer(nn.Module):
    def __init__(self, init_factor, init_cr):
        super(CRExpNLLayer, self).__init__()
        fac = torch.tensor((init_factor,))
        self.fac = fac
        cr = torch.Tensor([init_cr])
        cr = cr[..., np.newaxis]
        self.cr = nn.Parameter(cr, requires_grad=True)

    def forward(self, x):
        self.fac = self.fac.to(x.device)
        xp = torch.sum(x ** 2, dim=[1, -1], keepdim=True)
        NL_pow = self.cr * self.fac * xp
        NL_operator_r = torch.cos(NL_pow)
        NL_operator_i = torch.sin(NL_pow)
        NL_operator = torch.cat([NL_operator_r, NL_operator_i], dim=-1)
        x = cmul(NL_operator, x)
        return x

    @staticmethod
    def cal_init_factor(gm, alp_dB, power_dB, step_len):
        power = 10 ** (power_dB / 10 - 3)
        alp = -np.log(np.power(10, -alp_dB / 10))
        Leff = (1 - np.exp(-alp * step_len)) / alp
        ret = 8 / 9 * gm * Leff * power
        return ret


class PerturbativeBlockLayer(nn.Module):
    def __init__(self,
                 time_win_size,
                 weight_case_num=1,
                 init_weight=None,
                 grouping_triplet=True):
        super(PerturbativeBlockLayer, self).__init__()

        if time_win_size % 2 != 1:
            raise ValueError('time_win_size is supposed to be odd.')

        self.__grouping_triplet = grouping_triplet

        if not grouping_triplet:
            triplet_pick_indxes = self.pick_indxes(time_win_size)
        else:
            triplet_pick_indxes = self.pick_indxes_in_group(time_win_size)

        self.time_win_size = time_win_size
        self.triplet_pick_indxes = triplet_pick_indxes
        self.triplet_number = self.triplet_pick_indxes.shape[-2] * \
                              (1 + int(self.__grouping_triplet))
        self.weight_case_num = weight_case_num
        if init_weight is None:
            weight = torch.zeros(
                [1, weight_case_num, 1, self.triplet_number, 2])
            self.w = nn.Parameter(weight, requires_grad=True)
        else:
            init_weight = init_weight.squeeze()
            if isinstance(init_weight, torch.Tensor):
                if torch.is_complex(init_weight):
                    assert (init_weight.ndim == 1)
                    init_weight_is_complex = True
                else:
                    assert (init_weight.ndim == 2
                            and init_weight.shape[-1] == 2)
                    init_weight_is_complex = False
            elif isinstance(init_weight, np.ndarray):
                if np.iscomplexobj(init_weight):
                    assert (init_weight.ndim == 1)
                    init_weight_is_complex = True
                else:
                    assert (init_weight.ndim == 2
                            and init_weight.shape[-1] == 2)
                    init_weight_is_complex = False
                init_weight = torch.from_numpy(init_weight)
            else:
                raise TypeError
            if init_weight_is_complex:
                init_weight = torch.view_as_real(init_weight)
            init_weight = init_weight.reshape([1, 1, 1, -1, 2])
            self.w = nn.Parameter(init_weight, requires_grad=True)

    def forward(self, x):
        """[summary]

        Parameters
        ----------
        sig : [type] Pytorch tensor
            [description] shape: [B, C, T, 2]

        Returns
        -------
        [type] Pytorch tensor
            [description] shape: [B, C, T, 2]
        """

        # shape of x: [B, C, T, triplet number, 2]
        x = self.__gen_triplet_block(x)

        # shape of w: [1, C or 1, 1, triplet number, 2]
        ret = torch.sum(cmul(self.w, x), dim=-2, keepdim=False)
        return ret

    def center_forward(self, x):
        """[summary]

        Parameters
        ----------
        sig : [type] Pytorch tensor
            [description] shape: [B, C, T, 2]

        Returns
        -------
        [type] Pytorch tensor
            [description] shape: [B, C, 1, 2]
        """

        # shape of x: [B, C, 1, triplet number, 2]
        x = self.__gen_triplet_for_center(x)

        # shape of w: [1, C or 1, 1, triplet number, 2]
        ret = torch.sum(cmul(self.w, x), dim=-2, keepdim=False)
        return ret

    def __gen_triplet_block(self, x):
        ''' T: time win size '''
        B, C, T, RI = x.shape
        triplet_indexes = self.triplet_pick_indxes

        def wrapper(x, N):
            return x % N

        indx = np.arange(T)
        M = np.int(np.floor(self.time_win_size / 2))
        indces = np.linspace(indx - M,
                             indx + M,
                             num=self.time_win_size,
                             axis=-1,
                             dtype=np.int)
        indces = wrapper(indces, T)

        # now the shape of x: [B,C,T, self.time_win_size,2]
        x = x[..., indces, :]

        am = x[..., triplet_indexes[..., 0], :]
        an = x[..., triplet_indexes[..., 1], :]
        ampn = x[..., triplet_indexes[..., 2], :]

        P = cmul(an, conj(ampn))
        P = torch.sum(P, dim=1, keepdim=True)
        x = cmul(P, am)

        if self.__grouping_triplet:
            G1 = torch.sum(x[..., 0:4, :, :], dim=-3, keepdim=False)
            G2 = torch.sum(x[..., 4:, :, :], dim=-3, keepdim=False)
            x = torch.cat([G1, G2], dim=-2)
        return x

    def __gen_triplet_for_center(self, x):
        ''' T: time win size '''
        B, C, T, RI = x.shape
        assert (T % 2 == 1)
        triplet_indexes = self.triplet_pick_indxes

        def wrapper(x, N):
            return x % N

        M = np.int(np.floor(self.time_win_size / 2))

        # now the shape of x: [B,C,T, self.time_win_size,2]
        am = x[..., triplet_indexes[..., 0], :]
        an = x[..., triplet_indexes[..., 1], :]
        ampn = x[..., triplet_indexes[..., 2], :]

        P = cmul(an, conj(ampn))
        P = torch.sum(P, dim=1, keepdim=True)
        x = cmul(P, am)

        if self.__grouping_triplet:
            G1 = torch.sum(x[..., 0:4, :, :], dim=-3, keepdim=False)
            G2 = torch.sum(x[..., 4:, :, :], dim=-3, keepdim=False)
            x = torch.cat([G1, G2], dim=-2)
        x = x.unsqueeze(-3)
        return x

    @staticmethod
    def pick_indxes(L):
        # L: Time win size
        M = np.int(np.floor(L / 2))
        m = np.arange(-M, M + 1)[..., np.newaxis, np.newaxis]
        m = np.repeat(m, m.shape[0], axis=1)
        m = np.concatenate([m, np.transpose(m, [1, 0, 2])], axis=2)
        m_pick = m[np.abs(m[..., 0] * m[..., 1]) < np.abs(np.floor(L / 2))]
        ret = np.concatenate(
            [m_pick, np.sum(m_pick, axis=1, keepdims=True)], axis=1)
        return ret + M

    @staticmethod
    def pick_indxes_in_group(L):
        M = np.int(np.floor(L / 2))
        m = np.arange(-M, M + 1)[..., np.newaxis, np.newaxis]
        m = np.repeat(m, m.shape[0], axis=1)
        m = np.concatenate([m, np.transpose(m, [1, 0, 2])], axis=2)
        """ Ignored IXPM and SPM: pick the indexes that are not equal to 0. """
        pick_condition = np.logical_and(
            np.abs(m[..., 0] * m[..., 1]) < np.abs(np.floor(L / 2)),
            m[..., 0] >= 0)
        pick_condition = np.logical_and(pick_condition, m[..., 1] >= 0)
        pick_condition = np.logical_and(pick_condition, m[..., 0] <= m[..., 1])

        # m_pick = m[np.abs(m[..., 0]*m[..., 1]) < np.abs(np.floor(L/2)) & m[..., 0]>0 & m[..., 1]>0 & m[..., 0]>m[..., 1]]
        m_pick = m[pick_condition]
        mCases = m_pick.shape[0]
        # ret = np.concatenate(
        #     [m_pick, np.sum(m_pick, axis=1, keepdims=True)], axis=1)

        ret = np.zeros([8, mCases, 2])

        # I0: m, n
        ret[0, ...] = np.stack([m_pick[:, 0], m_pick[:, 1]], axis=-1)
        # I1: n, m
        ret[1, ...] = np.stack([m_pick[:, 1], m_pick[:, 0]], axis=-1)
        # I2: -m, -n
        ret[2, ...] = np.stack([-m_pick[:, 0], -m_pick[:, 1]], axis=-1)
        # I3: -n, -m
        ret[3, ...] = np.stack([-m_pick[:, 1], -m_pick[:, 0]], axis=-1)

        # IC0: -m, n
        ret[4, ...] = np.stack([-m_pick[:, 0], m_pick[:, 1]], axis=-1)
        # IC1: n, -m
        ret[5, ...] = np.stack([m_pick[:, 1], -m_pick[:, 0]], axis=-1)
        # IC2: m, -n
        ret[6, ...] = np.stack([m_pick[:, 0], -m_pick[:, 1]], axis=-1)
        # IC3: -n, m
        ret[7, ...] = np.stack([-m_pick[:, 1], m_pick[:, 0]], axis=-1)

        ret = np.concatenate(
            [ret, (ret[..., 0] + ret[..., 1])[..., np.newaxis]], axis=-1)
        """ shape of ret: [[I0, I1, I2, I3, IC0, IC1, IC2, IC3], mCases, 3 for m and n and m+n]"""
        ret = ret.astype(int)
        return ret + M

    @staticmethod
    def calculate_pert_matrix(**kwargs):
        D = kwargs.get('D', 17e-6)
        Fs = kwargs.get('Fs', 28e9)
        gm = kwargs.get('gm', 1.3e-3)
        sf = kwargs.get('sf', 8)
        wave_length = kwargs.get('wave_length', 1552.52e-9)
        span_length = kwargs.get('span_length', 80e3)
        span_number = kwargs.get('span_number', 6)
        pre_cd = kwargs.get('pre_cd', 0)
        alpha_dB = kwargs.get('alpha_dB', 0.2)
        g_dB = kwargs.get('g_dB', 0.2 * 80)
        int_redundency = kwargs.get('temporal_redundency', 0.8)
        maxmn = kwargs.get('maxmn', 40)
        running_mode = kwargs.get('running_mode', 'time_consuming_mode')
        int_constraint = kwargs.get('int_constraint', maxmn ** 2 + 1)
        pulse_shape = kwargs.get('pulse_shape', 'RRC')
        rolloff = kwargs.get('rolloff', 0.01)
        c = 3e8

        # Form zgrid
        zgrid_per_span = np.linspace(0, span_length - 1, 10).reshape([1, -1])
        spans = np.arange(span_number).reshape([-1, 1])
        span_offset = spans * span_length
        zgrid = (span_offset + zgrid_per_span).reshape([-1])
        # zgrid formed
        # print(zgrid)

        atten = np.log(10 ** (-(alpha_dB / 10))) / (-1000)
        G = np.log(10 ** (g_dB / 10))
        pz = np.exp(-atten * zgrid +
                    np.floor(zgrid / span_length) * G).reshape([-1, 1])
        # print(pz)
        # return pz
        T_sym = 1 / Fs
        z_max = np.max(zgrid)
        DL_max = abs(D) * z_max * (1 - pre_cd)
        if pulse_shape == 'RRC':
            delta_tau = DL_max / T_sym * wave_length ** 2 / c
        else:
            delta_tau = DL_max / T_sym * wave_length ** 2 / c * 2
        symbol_num_for_cal = round(delta_tau / T_sym * (1 + int_redundency))
        symbol_num_for_cal = maxmn + symbol_num_for_cal
        int_sample_number = sf * symbol_num_for_cal
        int_sample_number = int_sample_number

        temporal_lower_bound = np.round(-symbol_num_for_cal / 2)
        temporal_upper_bound = -temporal_lower_bound

        if pulse_shape == 'RRC':
            H_pulse_ori = RRC(alpha=rolloff,
                              sample_factor=sf,
                              span=symbol_num_for_cal)
        elif pulse_shape == 'gaussian':
            H_pulse_ori = gaussianLPF(BW=1,
                                      sample_factor=sf,
                                      span=symbol_num_for_cal)
        elif pulse_shape == 'Rect':
            h_pulse_ori = Rect(sample_factor=sf, span=symbol_num_for_cal)
            H_pulse_ori = np.fft.fft(h_pulse_ori)
        else:
            raise ValueError('Invalid pulse_shape.')

        H_pulse_ori = H_pulse_ori.reshape([1, -1])
        H_pulse_ori = np.fft.fftshift(H_pulse_ori, axes=-1)
        T_s = T_sym / sf
        DL_grid = D * zgrid - D * z_max * pre_cd
        DL_grid = DL_grid.reshape([-1, 1])
        sample_time = np.linspace(temporal_lower_bound, temporal_upper_bound,
                                  int_sample_number)
        omega = (2 * np.pi / T_s /
                 (int_sample_number)) * (np.arange(int_sample_number) -
                                         np.ceil(int_sample_number / 2))
        omega = omega.reshape([1, -1])
        sample_time = sample_time.reshape([1, -1])
        H = np.exp(1j / 2 * DL_grid * wave_length ** 2 / (2 * np.pi * c) *
                   omega ** 2)

        H_pulse = H * H_pulse_ori
        time_shift_factor = lambda x: np.exp(-1j * omega * x / Fs)
        ret = np.zeros([2 * maxmn + 1, 2 * maxmn + 1], dtype=np.complex)

        def calculate_coefficients(m, n):
            H_pulse_m = time_shift_factor(m) * H_pulse
            H_pulse_n = time_shift_factor(n) * H_pulse
            H_pulse_mpn = time_shift_factor(m + n) * H_pulse

            h = np.fft.fftshift(np.fft.fft(np.fft.fftshift(H_pulse, axes=1),
                                           axis=1),
                                axes=1)
            hn = np.fft.fftshift(np.fft.fft(np.fft.fftshift(H_pulse_n, axes=1),
                                            axis=1),
                                 axes=1)
            hm = np.fft.fftshift(np.fft.fft(np.fft.fftshift(H_pulse_m, axes=1),
                                            axis=1),
                                 axes=1)
            hmpn = np.fft.fftshift(np.fft.fft(np.fft.fftshift(H_pulse_mpn,
                                                              axes=1),
                                              axis=1),
                                   axes=1)
            int_nodes = 1j * gm * 8 / 9 * pz * np.conj(h) * hn * hm * np.conj(
                hmpn)
            ret = np.trapz(np.trapz(int_nodes, x=sample_time * T_sym, axis=1),
                           zgrid,
                           axis=0)
            return ret

        if running_mode == 'time_consuming_mode':
            ''' Using symmetrical properties
                Ref:
                [1] Malekiha M, Tselniker I, Plant D V. Efficient nonlinear equalizer for 
                intra-channel nonlinearity compensation for next generation agile and dynamically
                reconfigurable optical networks[J]. Optics Express, 2016, 24(4): 4097.
            '''
            for m in range(0, maxmn + 1):
                for n in range(m, maxmn + 1):
                    if m * n <= int_constraint:
                        coef = calculate_coefficients(m, n)
                        ret[m + maxmn, n + maxmn] = coef
                        ret[n + maxmn, m + maxmn] = coef
                        ret[-m + maxmn, -n + maxmn] = coef
                        ret[-n + maxmn, -m + maxmn] = coef
                        ret[-m + maxmn, n + maxmn] = -np.conj(coef)
                        ret[n + maxmn, -m + maxmn] = -np.conj(coef)
                        ret[m + maxmn, -n + maxmn] = -np.conj(coef)
                        ret[-n + maxmn, m + maxmn] = -np.conj(coef)

            # ''' Naive implementation '''
            # for m in range(0, maxmn+1):
            #     for n in range(0, maxmn+1):
            #         ret[m+maxmn, n+maxmn] = calculate_coefficients(m,n)

        # TODO: finish space_consuming_mode
        elif running_mode == 'space_consuming_mode':
            fmn2indx = lambda m, n, N: (2 * N - m + 1) * m / 2 + n - m
            coeffcient_vec_len = fmn2indx(maxmn, maxmn, maxmn + 1)
            coeffcient_vec = np.zeros([coeffcient_vec_len, 1])
            raise NotImplementedError

        else:
            raise Exception('Invalid running mode')
        return ret

    @staticmethod
    def cal_init_perturbative_matrix(perturbative_matrix_path='',
                                     perturbative_matrix_save_path=None,
                                     **kwargs):
        maxmn = kwargs.get('maxmn', 40)
        import os
        if not os.path.exists(perturbative_matrix_path):
            perturbative_matrix = PerturbativeBlockLayer.calculate_pert_matrix(
                **kwargs)
        else:
            from scipy.io import loadmat
            data_dict = loadmat(perturbative_matrix_path)
            perturbative_matrix = data_dict['perturbative_matrix']
        if perturbative_matrix_save_path is not None:
            from scipy.io import savemat
            savemat(perturbative_matrix_save_path,
                    {'perturbative_matrix': perturbative_matrix})
        return perturbative_matrix

    @staticmethod
    def pert_matrix_to_vec(pert_matrix, maxmn):
        L = maxmn * 2 + 1
        pickIndxes = PerturbativeBlockLayer.pick_indxes(L)
        perturbative_vector = pert_matrix[pickIndxes[:, 0], pickIndxes[:, 1]]
        return perturbative_vector

    def group_weight(self, level_num) -> list:
        weight_group_indexes = []
        with torch.no_grad():
            w = self.w.data.squeeze()
            # absw = torch.sqrt(torch.sum(w**2, dim=-1, keepdim=False))
            absw = torch.abs(w)
            _, indices = torch.sort(absw, dim=-2)
            indices = indices.data.cpu().numpy()
            assert (absw.ndim == 1)
            num_ele = w.shape[-2]
            ''' round up '''
            ceil_div = lambda y, x: y // x + (y % x != 0)
            level_step = ceil_div(num_ele, level_num)
            for l in range(level_num - 1):
                weight_group_indexes.append(indices[l * level_step:(l + 1) *
                                                                   level_step, :])

            weight_group_indexes.append(indices[level_num * level_step:, :])
        return weight_group_indexes

    def quantize_module(self,
                        bit_width,
                        learn_quant_parameters=False,
                        group_weights_level_number=0):
        if group_weights_level_number != 0:
            weight_group_indexes = self.group_weight(
                group_weights_level_number)
        else:
            weight_group_indexes = None

        module_quant = QuantPerturbativeBlockLayer(
            self.time_win_size,
            bit_width,
            weight_group_indexes,
            self.weight_case_num,
            self.w.data,
            self.__grouping_triplet,
            learn_quant_parameters=learn_quant_parameters)
        return module_quant

    def mixed_precision_quantize(self,
                                 split_ratios,
                                 quant_parameters,
                                 learn_quant_parameters=False,
                                 quantize_triplet=False,
                                 quantize_triplet_bit_width=8):
        weight_group_indexes = self.group_weight_via_ratios(split_ratios)
        module_quant = MixedPrecisionPerturbativeBlockLayer(
            self.time_win_size,
            weight_group_indexes=weight_group_indexes,
            weight_case_num=self.weight_case_num,
            init_weight=self.w.data,
            grouping_triplet=self.__grouping_triplet,
            learn_quant_parameters=learn_quant_parameters,
            quantize_triplet=quantize_triplet,
            quantize_triplet_bit_width=quantize_triplet_bit_width,
            quant_parameters=quant_parameters)
        return module_quant

    def group_weight_via_ratios(self, split_ratios) -> list:
        weight_group_indexes = []
        ''' round up '''
        ceil_div = lambda y, x: y // x + (y % x != 0)

        with torch.no_grad():
            w = self.w.data.squeeze()

            absw = torch.abs(w)
            # _, indices = torch.sort(absw, dim=-2)
            # indices = indices.data.cpu().numpy()
            # assert (absw.ndim == 1)

            num_ele = w.shape[-2]
            num_ratios = len(split_ratios)
            if num_ratios == 2:
                ratio = split_ratios[0]
            elif num_ratios == 1:
                ratio = split_ratios[0]
            else:
                raise ValueError

            max_absw, _ = torch.max(absw, dim=-2, keepdim=True)
            ratio = np.array(ratio)
            threshold = max_absw * ratio

            group_indexes = absw - threshold < 0
            # group_indexes = torch.nonzero(group_indexes)
            group_indexes = group_indexes.data.cpu().numpy()
            weight_group_indexes.append(group_indexes)

            group_indexes = absw - threshold >= 0
            # group_indexes = torch.nonzero(group_indexes)
            group_indexes = group_indexes.data.cpu().numpy()
            weight_group_indexes.append(group_indexes)
        return weight_group_indexes

    def binarize_module(self, group_weights_level_number=0):
        if group_weights_level_number != 0:
            weight_group_indexes = self.group_weight(
                group_weights_level_number)
        else:
            weight_group_indexes = None

        module_bin = BinaryPerturbativeBlockLayer(
            self.time_win_size,
            weight_group_indexes,
            self.weight_case_num,
            self.w.data,
            self.__grouping_triplet,
        )
        return module_bin


class MixedPrecisionPerturbativeBlockLayer(PerturbativeBlockLayer):
    def __init__(
            self,
            time_win_size: int,
            weight_group_indexes: list = None,
            weight_case_num: int = 1,
            init_weight: np.ndarray = None,
            grouping_triplet: bool = True,
            learn_quant_parameters: bool = False,
            quantize_triplet: bool = True,
            quantize_triplet_bit_width: int = 8,
            quant_parameters: dict = {
                'quantize_approach': 'minmaxabs',
                'power_of_two': False,
                'bit_widths': None,
                'additive_power_of_two': False,
                'additive_power_of_two_parameters': None
            }):
        super(MixedPrecisionPerturbativeBlockLayer,
              self).__init__(time_win_size, weight_case_num, init_weight,
                             grouping_triplet)

        self.quantize_triplet = quantize_triplet
        self.quantie_triplet_bit_width = quantize_triplet_bit_width

        if weight_group_indexes is None:
            weight_group_indexes = [
                np.ones([self.triplet_pick_indxes.shape[-2] * 2]) == 1
            ]
        ''' shape of an element in weight_group_indexes: [*, 2]. '''
        self.weight_group_indexes = weight_group_indexes

        self.bit_widths = quant_parameters['bit_widths']
        self.learn_quant_parameters = learn_quant_parameters
        self.quantize_approach = quant_parameters['quantize_approach']
        self.power_of_two = quant_parameters['power_of_two']
        self.additive_power_of_two = quant_parameters['additive_power_of_two']
        self.additive_power_of_two_parameters = quant_parameters.get(
            'additive_power_of_two_parameters')
        if self.power_of_two is True:
            raise NotImplementedError

        self.cal_quant_parameters()

    def cal_quant_parameters(self):
        with torch.no_grad():
            self.quant_parameters = nn.ParameterList([])
            for count, group_indexes in enumerate(self.weight_group_indexes):
                quantize_approach = self.quantize_approach

                w_picked = self.pick_element_by_group_indexes(
                    self.w.data, group_indexes)

                if w_picked[0].nelement() != 0 and w_picked[1].nelement() != 0:
                    s_r, a_r = cal_uniform_quantization_parameters(
                        w_picked[0],
                        self.bit_widths[count],
                        axis=-1,
                        approach=quantize_approach)
                    s_i, a_i = cal_uniform_quantization_parameters(
                        w_picked[1],
                        self.bit_widths[count],
                        axis=-1,
                        approach=quantize_approach)
                else:
                    s_r, a_r, s_i, a_i = torch.Tensor([0]), torch.Tensor([0]), torch.Tensor([0]), torch.Tensor([0])

                param = nn.Parameter(torch.stack((s_r, a_r), dim=0),
                                     requires_grad=self.learn_quant_parameters)
                self.quant_parameters.append(param)

                param = nn.Parameter(torch.stack((s_i, a_i), dim=0),
                                     requires_grad=self.learn_quant_parameters)
                self.quant_parameters.append(param)

    def quant_weight(self):
        w_fp = self.w.data
        w_real_cache = torch.zeros_like(w_fp[..., 0])
        w_imag_cache = torch.zeros_like(w_fp[..., 1])
        for count, group_indexes in enumerate(self.weight_group_indexes):
            w_real_picked, w_imag_picked = self.pick_element_by_group_indexes(
                w_fp, group_indexes)
            s_r = self.quant_parameters[count * 2][0, ...]
            a_r = self.quant_parameters[count * 2][1, ...]

            s_i = self.quant_parameters[count * 2 + 1][0, ...]
            a_i = self.quant_parameters[count * 2 + 1][1, ...]

            w_real_cache[..., group_indexes[..., 0]] = uniform_quant(
                (w_real_picked, s_r, a_r),
                bit_width=self.bit_widths[count],
                axis=-1,
                approach=self.quantize_approach)

            w_imag_cache[..., group_indexes[..., 1]] = uniform_quant(
                (w_imag_picked, s_i, a_i),
                bit_width=self.bit_widths[count],
                axis=-1,
                approach=self.quantize_approach)

        w_quant = torch.stack([w_real_cache, w_imag_cache], dim=-1)
        return w_quant

    def forward(self, x):
        """[summary]

        Parameters
        ----------
        sig : [type] Pytorch tensor
            [description] shape: [B, C, T, 2]

        Returns
        -------
        [type] Pytorch tensor
            [description] shape: [B, C, T, 2]
        """
        weight_group_indexes = self.weight_group_indexes
        # shape of x: [B, C, T, triplet number, 2]
        x = self._PerturbativeBlockLayer__gen_triplet_block(x)

        if self.quantize_triplet:
            sx, ax = cal_uniform_quantization_parameters(
                x, self.quantie_triplet_bit_width, axis=-2, approach='maxabs')
            x = uniform_quant((
                x,
                sx,
                ax,
            ),
                bit_width=self.quantie_triplet_bit_width,
                axis=-2,
                approach='maxabs')

        B, C, T, tn, _ = x.shape

        # shape of self.w: [1, C or 1, 1, triplet number, 2]
        w_quantized = self.quant_weight()
        ret = torch.sum(cmul(w_quantized, x), dim=-2, keepdim=False)

        # ret = torch.sum(ret, dim=0, keepdim=False)
        return ret

    def examine_weights(self):
        # weight_group_indexes = self.weight_group_indexes
        # w_quantized = torch.zeros(self.w.data.shape).to(self.w.data)
        # for indx in range(len(weight_group_indexes)):
        #     if hasattr(self.power_of_two, '__getitem__'):
        #         power_of_two = self.power_of_two[indx]
        #     else:
        #         power_of_two = self.power_of_two
        #     quantize_approach = self.quantize_approach

        #     if hasattr(self.additive_power_of_two, '__getitem__'):
        #         additive_power_of_two = self.additive_power_of_two[indx]
        #     else:
        #         additive_power_of_two = self.additive_power_of_two

        #     if not power_of_two:
        #         w = uniform_quant((self.pick_element_by_group_indexes(
        #             self.w.data, weight_group_indexes[indx]),
        #                            self.quant_parameters[indx][0, ...],
        #                            self.quant_parameters[indx][1, ...]),
        #                           bit_width=self.bit_widths[indx],
        #                           axis=-2,
        #                           approach=quantize_approach)
        #     else:
        #         if not additive_power_of_two:
        #             w = power_of_two_quant(
        #                 (self.pick_element_by_group_indexes(
        #                     self.w.data, weight_group_indexes[indx]),
        #                  self.quant_parameters[indx][0, ...],
        #                  self.quant_parameters[indx][1, ...]),
        #                 bit_width=self.bit_widths[indx],
        #                 axis=-2,
        #                 approach=quantize_approach)
        #         else:
        #             w = additive_power_of_two_quant(
        #                 self.pick_element_by_group_indexes(
        #                     self.w.data, weight_group_indexes[indx]),
        #                 levels=self.quant_parameters[indx],
        #                 axis=-2,
        #                 approach=quantize_approach)
        #     w_quantized[..., weight_group_indexes[indx][..., 0], 0] = w[..., 0]
        #     w_quantized[..., weight_group_indexes[indx][..., 1], 1] = w[..., 1]
        w_quantized = self.quant_weight()

        return w_quantized.squeeze().data.cpu().numpy(), self.w.squeeze(
        ).data.cpu().numpy()

    def pick_element_by_group_indexes(self, tensor, group_index):
        ret = (tensor[..., group_index[..., 0],
        0], tensor[..., group_index[..., 1], 1])
        return ret

    def count_group_elements(self):
        single_bit_numbers = np.sum(self.weight_group_indexes[0],
                                    axis=-2,
                                    keepdims=False)
        multiple_bit_numbers = np.sum(self.weight_group_indexes[1],
                                      axis=-2,
                                      keepdims=False)
        return single_bit_numbers, multiple_bit_numbers


class QuantPerturbativeBlockLayer(PerturbativeBlockLayer):
    def __init__(self,
                 time_win_size,
                 bit_width,
                 weight_group_indexes=None,
                 weight_case_num=1,
                 init_weight=None,
                 grouping_triplet=True,
                 learn_quant_parameters=False,
                 quantize_approach='minmaxabs'):
        super(QuantPerturbativeBlockLayer,
              self).__init__(time_win_size, weight_case_num, init_weight,
                             grouping_triplet)

        if weight_group_indexes is None:
            weight_group_indexes = [
                np.ones([self.triplet_pick_indxes.shape[-2] * 2]) == 1
            ]
        self.weight_group_indexes = weight_group_indexes

        self.bit_width = bit_width
        self.quantize_approach = quantize_approach

        self.learn_quant_parameters = learn_quant_parameters
        s_list = []
        a_list = []
        # self.r1 = nn.Parameter(r1, requires_grad=learn_quant_parameters)
        # self.r2 = nn.Parameter(r2, requires_grad=learn_quant_parameters)
        with torch.no_grad():
            for group_indexes in self.weight_group_indexes:
                w_picked = self.w.data[..., group_indexes, :]
                s, a = cal_quantization_parameters(w_picked,
                                                   bit_width,
                                                   axis=-2)
                s_list.append(s)
                a_list.append(a)

        s = torch.stack(s_list, dim=0)
        a = torch.stack(a_list, dim=0)
        self.s = nn.Parameter(s, requires_grad=learn_quant_parameters)
        self.a = nn.Parameter(a, requires_grad=learn_quant_parameters)

    def forward(self, x):
        """[summary]

        Parameters
        ----------
        sig : [type] Pytorch tensor
            [description] shape: [B, C, T, 2]

        Returns
        -------
        [type] Pytorch tensor
            [description] shape: [B, C, T, 2]
        """
        weight_group_indexes = self.weight_group_indexes
        # shape of x: [B, C, T, triplet number, 2]
        x = self._PerturbativeBlockLayer__gen_triplet_block(x)
        B, C, T, tn, _ = x.shape

        # shape of self.w: [1, C or 1, 1, triplet number, 2]
        ret = torch.zeros([self.s.shape[0], B, C, T, 2]).to(x.device)
        for indx in range(self.s.shape[0]):
            _, w = quantize(self.w[..., weight_group_indexes[indx], :],
                            self.bit_width,
                            self.s[indx, ...],
                            approach=self.quantize_approach,
                            axis=-2)
            w = dequantize(w, self.s[indx, ...], self.a[indx, ...])
            # p, w = quantize(self.w[..., weight_group_indexes[indx], :],
            #                 self.bit_width,
            #                 axis=-2)
            # w = dequantize(w, *p)
            ret[indx,
            ...] = torch.sum(cmul(w, x[...,
                                     weight_group_indexes[indx], :]),
                             dim=-2,
                             keepdim=False)
        ret = torch.sum(ret, dim=0, keepdim=False)
        return ret


class BinActive(torch.autograd.Function):
    '''
    Binarize the input activations and calculate the mean across channel dimension.
    '''

    @staticmethod
    def forward(ctx, input):
        ctx.save_for_backward(input)
        input = input.sign()
        return input

    @staticmethod
    def backward(ctx, grad_output):
        input, = ctx.saved_tensors
        grad_input = grad_output.clone()
        grad_input[input.ge(1)] = 0
        grad_input[input.le(-1)] = 0
        return grad_input


class BinaryPerturbativeBlockLayer(PerturbativeBlockLayer):
    def __init__(self,
                 time_win_size,
                 weight_group_indexes=None,
                 weight_case_num=1,
                 init_weight=None,
                 grouping_triplet=True):
        super(BinaryPerturbativeBlockLayer,
              self).__init__(time_win_size, weight_case_num, init_weight,
                             grouping_triplet)

        self.binarize = lambda x: BinActive.apply(x)
        if weight_group_indexes is None:
            weight_group_indexes = [
                np.ones([self.triplet_pick_indxes.shape[-2] * 2]) == 1
            ]

        self.weight_group_indexes = weight_group_indexes

    def forward(self, x):
        """[summary]

        Parameters
        ----------
        sig : [type] Pytorch tensor
            [description] shape: [B, C, T, 2]

        Returns
        -------
        [type] Pytorch tensor
            [description] shape: [B, C, T, 2]
        """

        weight_group_indexes = self.weight_group_indexes
        # shape of x: [B, C, T, triplet number, 2]
        x = self._PerturbativeBlockLayer__gen_triplet_block(x)
        B, C, T, tn, _ = x.shape

        # shape of self.w: [1, C or 1, 1, triplet number, 2]
        weight_group_num = len(weight_group_indexes)
        ret = torch.zeros([weight_group_num, B, C, T, 2]).to(x.device)
        for indx in range(weight_group_num):
            w = self.w[..., weight_group_indexes[indx], :]
            alpha = torch.mean(w.abs(), dim=-2)
            # w = self.binarize(w) * alpha
            w = w.sign() * alpha
            ret[indx,
            ...] = torch.sum(cmul(w, x[...,
                                     weight_group_indexes[indx], :]),
                             dim=-2,
                             keepdim=False)
            ret[indx, ...] = ret[indx, ...]
        ret = torch.sum(ret, dim=0, keepdim=False)

        return ret

    # def binarize(self, w, dim=None):
    #     # n = w.shape[axis]
    #     a = torch.mean(w.abs(), dim=dim)
    #     b = torch.sign(w)
    #     w_tilde = a * b
    #     return w_tilde


def quantize(tensor_fp,
             bit_width,
             s=None,
             lower_bound=None,
             axis=-2,
             approach='minmaxabs'):
    if s is None or lower_bound is None:
        s, lower_bound = cal_uniform_quantization_parameters(
            tensor_fp, bit_width, axis, approach)
    tensor_int = s * (tensor_fp.abs() - lower_bound)
    tensor_int = tensor_int.round()
    # if approach == 'minmax':
    #     tensor_int = tensor_int.clip(0, 2**bit_width - 1)
    #     pass
    # elif approach == 'maxabs':
    #     Warning('Approach is maxabs. There are bugs in it.')
    #     tensor_int = tensor_int.clip(-2**(bit_width - 1),
    #                                  2**(bit_width - 1) - 1)
    return (s, lower_bound), tensor_int


def dequantize(tensor_int, tensor_signs, s, a):
    tensor_fp = (tensor_int / s + a) * tensor_signs
    return tensor_fp


def uniform_quant(value_parameter, bit_width, axis=-2, approach='maxabs'):
    # values_parameters: [(value, scale, bias)] * group_num
    w = value_parameter[0]
    s = value_parameter[1]
    a = value_parameter[2]
    w_signs = torch.sign(w)
    _, w_i = quantize(w, bit_width, s, a, axis=axis, approach=approach)
    w_q = dequantize(w_i, w_signs, s, a)
    return w_q


def cal_uniform_quantization_parameters(w,
                                        bit_width,
                                        axis=-2,
                                        approach='minmaxabs'):
    if approach == 'maxabs':
        upper_bound = torch.max(w.abs(), dim=axis, keepdim=True)[0]
        # lower_bound = torch.min(w.abs(), dim=axis, keepdim=True)[0]
        lower_bound = torch.zeros_like(upper_bound)
        interval = upper_bound - lower_bound
        interval[interval == 0] = 1e-8
        s = (2 ** bit_width - 1) / interval
    elif approach == 'minmax':
        upper_bound = torch.max(w, dim=axis, keepdim=True)[0]
        lower_bound = torch.min(w, dim=axis, keepdim=True)[0]
        interval = upper_bound - lower_bound
        interval[interval == 0] = 1e-8
        s = (2 ** bit_width - 1) / interval
    elif approach == 'minmaxabs':
        upper_bound = torch.max(w.abs(), dim=axis, keepdim=True)[0]
        lower_bound = torch.min(w.abs(), dim=axis, keepdim=True)[0]
        interval = upper_bound - lower_bound
        interval[interval == 0] = 1e-8
        s = (2 ** bit_width - 1) / interval
    else:
        raise ValueError('Invalid approach.')
    return s, lower_bound


def cal_power_of_two_quantization_parameters(w,
                                             bit_width,
                                             axis=-2,
                                             approach='minmaxabs'):
    if approach == 'maxabs':
        upper_bound = torch.max(w.abs(), dim=axis, keepdim=True)[0]
        # lower_bound = torch.min(w.abs(), dim=axis, keepdim=True)[0]
        # lower_bound = torch.zeros_like(upper_bound)
        interval = upper_bound
        lower_bound = torch.zeros_like(upper_bound)
        # interval[interval == 0] = 1e-8
        # s = 2**-(2**bit_width - 1) / interval
        # s = interval / (2**(bit_width - 1) - 1)
        s = interval
    # elif approach == 'minmax':
    #     upper_bound = torch.max(w, dim=axis, keepdim=True)[0]
    #     lower_bound = torch.min(w, dim=axis, keepdim=True)[0]
    #     interval = upper_bound - lower_bound
    #     interval[interval == 0] = 1e-8
    #     s = (2**bit_width - 1) / interval
    elif approach == 'minmaxabs':
        upper_bound = torch.max(w.abs(), dim=axis, keepdim=True)[0]
        lower_bound = torch.min(w.abs(), dim=axis, keepdim=True)[0]
        interval = upper_bound - lower_bound
        s = interval
    else:
        raise ValueError('Invalid approach.')
    return s, lower_bound


def power_of_two_int_reprensent(tensor_fp,
                                bit_width,
                                s=None,
                                offset=None,
                                axis=-2,
                                approach='maxabs'):
    '''
    Ref:
    @inproceedings{
        Li2020Additive,
        title={Additive Powers-of-Two Quantization: An Efficient
         Non-uniform Discretization for Neural Networks},
        author={Yuhang Li and Xin Dong and Wei Wang},
        booktitle={International Conference on Learning Representations},
        year={2020},
        url={https://openreview.net/forum?id=BkgXT24tDS}
    }
    Equation 3
    '''
    if s is None:
        s, offset = cal_power_of_two_quantization_parameters(
            tensor_fp, bit_width=bit_width, axis=axis, approach=approach)
    tensor_int = torch.round(-torch.log2((torch.abs(tensor_fp) - offset) / s))
    # tensor_int = torch.round(
    #     torch.abs(tensor_fp) * (2**(bit_width - 1) - 1) / s)
    tensor_int = torch.clip(tensor_int, max=2 ** bit_width - 1)
    tensor_signs = torch.sign(tensor_fp)
    return tensor_int, tensor_signs


def power_of_two_dequantize(tensor_int, tensor_signs, s, offset, bit_width):
    tensor_fp = torch.zeros_like(tensor_int)
    not_nan_indxes = torch.logical_not(torch.isnan(tensor_int))
    # not_max_int_indexes = torch.logical_not((tensor_int == (2**bit_width - 1)))
    # not_zero_indexes = torch.logical_and(not_nan_indxes, not_max_int_indexes)
    not_zero_indexes = not_nan_indxes
    tensor_fp[not_zero_indexes] = 2 ** -tensor_int[not_zero_indexes]
    tensor_fp = (tensor_fp * s + offset) * tensor_signs
    return tensor_fp


def power_of_two_quant(value_parameter, bit_width, axis=-2, approach='maxabs'):
    w = value_parameter[0]
    s = value_parameter[1]
    offset = value_parameter[2]

    w_int, w_sign = power_of_two_int_reprensent(w,
                                                bit_width=bit_width,
                                                s=s,
                                                offset=offset,
                                                axis=axis,
                                                approach=approach)

    w_q = power_of_two_dequantize(w_int, w_sign, s, offset, bit_width)

    return w_q


def gen_APoT_basis(b, k):
    assert (b % k == 0)
    n = b // k
    i = np.arange(n).reshape((n, 1))
    p = np.zeros((n, 2 ** k))
    k_indx = np.arange(2 ** k - 1)

    # Each row of p consists of 1 basis of dimension as 2**k.
    p[:, 1:] = 1 / 2 ** (i + k_indx * n)
    return p


def cal_levels_for_APoT(b, k):
    from math import log2
    p = gen_APoT_basis(b, k)
    n = p.shape[0]
    power_k_of_2 = p.shape[1]
    k = int(log2(power_k_of_2))
    b = n * k
    i = np.arange(n)
    power_base = 2 ** np.arange(k - 1, -1, -1).reshape(1, -1)

    levels = np.zeros((2 ** b,))
    # levels_dec_ints = np.arange(2**b)
    ''' 
    To map indx to levels: Let ib be the binary representation
    of indx. ib has b bits. These bits are divided into n groups, 
    correspoding to bases of APoT one by one. Each group has k
    bits. A group of bits determine which element to pick in the 
    corresponding basis. The sum of all picked elements is the 
    mapped level.
    '''
    for indx in range(2 ** b):
        indx_bin = list('{:0{}b}'.format(indx, b))
        indx_bin = np.asarray(indx_bin, dtype=np.uint)
        indx_bin = indx_bin.reshape((n, k))
        indx_dec = np.sum(indx_bin * power_base, axis=1).astype(np.int)
        p_picked = p[i, indx_dec]
        levels[indx] = np.sum(p_picked)

    return (levels / np.max(levels)).astype(np.single)


def additive_power_of_two_quant(w,
                                quant_parameters=None,
                                levels=None,
                                axis=-2,
                                approach='maxabs'):
    if levels is None:
        b = quant_parameters[0]
        k = quant_parameters[1]
        levels = cal_levels_for_APoT(b, k)

    if isinstance(levels, np.ndarray):
        levels = torch.from_numpy(levels).to(w.device)

    if approach == 'maxabs':
        upper_bound = torch.max(w.abs(), dim=axis, keepdim=True)[0]
        interval = upper_bound
        interval[interval == 0] = 1e-8
        w_abs_norm = torch.abs(w) / interval
        lower_bound = torch.zeros_like(upper_bound)
    # elif approach == 'minmax':
    #     upper_bound = torch.max(w, dim=axis, keepdim=True)[0]
    #     lower_bound = torch.min(w, dim=axis, keepdim=True)[0]
    #     interval = upper_bound - lower_bound
    #     interval[interval == 0] = 1e-8
    #     s = (2**bit_width - 1) / interval
    elif approach == 'minmaxabs':
        upper_bound = torch.max(w.abs(), dim=axis, keepdim=True)[0]
        lower_bound = torch.min(w.abs(), dim=axis, keepdim=True)[0]
        interval = upper_bound - lower_bound
        interval[interval == 0] = 1e-8
        w_abs_norm = (torch.abs(w) - lower_bound) / interval
    else:
        raise ValueError('Invalid approach.')
    levels = levels.reshape(*[1] * w_abs_norm.ndim, -1)
    w_abs_norm = w_abs_norm[..., np.newaxis]
    res = torch.abs(levels - w_abs_norm)
    min_indx = torch.argmin(res, dim=-1)
    w_quant = torch.sign(
        w) * levels.squeeze()[min_indx] * interval + lower_bound
    return w_quant


class AvrNLLayer(nn.Module):
    def __init__(self, init_factor, ntap):
        super(AvrNLLayer, self).__init__()
        fac = torch.tensor((init_factor,))
        self.fac = nn.Parameter(fac, requires_grad=True)
        self.lpf = MovAvrLayer(N=ntap)
        self.ntap = ntap
        for p in self.lpf.parameters():
            p.requires_grad_(False)

    def forward(self, x):
        ''' x shape: [B, pol, block, 2] '''
        # xp, the total power of the input signal
        B, pol, block, _ = x.shape
        xp = torch.sum(x ** 2, dim=[1, -1], keepdim=True)
        NL_pow = self.fac * xp

        NL_pow = format_rt(NL_pow[..., 0])
        NL_pow = self.lpf(NL_pow)

        complex_unit = torch.zeros_like(NL_pow, device=NL_pow.device)
        complex_unit[..., 1] = 1

        real_unit = torch.zeros_like(NL_pow, device=NL_pow.device)
        real_unit[..., 0] = 1

        NL_operator = (real_unit + cmul(complex_unit, NL_pow))
        x = cmul(NL_operator, x)
        return x

    @staticmethod
    def cal_init_factor(gm, eps, alp_dB, power_dB, step_len):
        power = 10 ** (power_dB / 10 - 3)
        alp = -np.log(np.power(10, -alp_dB / 10))
        Leff = (1 - np.exp(-alp * step_len)) / alp
        ret = 8 / 9 * gm * eps * Leff * power
        return ret


def module_test():
    import scipy.io as scio
    import os
    import util
    import matplotlib.pyplot as plt
    from multiprocessing import Process

    BASE_DIR = os.path.dirname(__file__)

    # tstDataPath = os.path.join(
    #     BASE_DIR, 'testData/BPS_4_seed_1_dBm_4_Loops_20_singleChannel_singlePol')
    tstDataPath = os.path.join(
        BASE_DIR,
        'testData/BPS_4_seed_1_dBm_0_Loops_20_singleChannel_dualPol_hasNL.mat')

    data = scio.loadmat(tstDataPath)
    symbol_rate = data['symbol_rate'][0, 0]
    bitsPerSymbol = data['BitsPerSymbol'][0, 0]
    spanNum = data['Loops_N'][0, 0]
    rolloff = data['RollOff'][0, 0]
    lp = data['dBm_N'][0, 0]
    hasNL = data['hasNL'][0, 0]
    hasPMD = data['hasPMD'][0, 0]
    prbsx = data['prbsx']
    prbsy = data['prbsy']
    # use_cuda = False
    use_cuda = True

    # Gray code
    constellations = np.array([
                                  -3 + 1j * 3, -3 + 1j * 1, -3 + 1j * -3, -3 + 1j * -1, -1 + 1j * 3,
                                  -1 + 1j * 1, -1 + 1j * -3, -1 + 1j * -1, 3 + 1j * 3, 3 + 1j * 1, 3 +
                                  1j * -3, 3 + 1j * -1, 1 + 1j * 3, 1 + 1j * 1, 1 + 1j * -3, 1 + 1j * -1
                              ] / np.sqrt(10))

    constellations = torch.from_numpy(constellations)
    spanLen = 80e3
    L = spanLen * spanNum
    DL = 17e-6 * L
    sigx = torch.from_numpy(data['signalx'])
    sigy = torch.from_numpy(data['signaly'])
    sigx = sigx.reshape(1, 1, -1)
    sigy = sigy.reshape(1, 1, -1)
    sig = torch.cat([sigx, sigy], dim=1)

    sigxb2b = torch.from_numpy(data['signalx_label'])
    sigyb2b = torch.from_numpy(data['signaly_label'])
    sigxb2b = sigxb2b.reshape(1, 1, -1)
    sigyb2b = sigyb2b.reshape(1, 1, -1)
    sigb2b = torch.cat([sigxb2b, sigyb2b], dim=1)

    sig = torch.view_as_real(sig)
    sigb2b = torch.view_as_real(sigb2b)

    mf = MatchedFilterLayer(alpha=rolloff,
                            span=100,
                            sample_factor=16,
                            case_num=2)
    edc = EDCLayer(symbol_rate=symbol_rate,
                   DL=DL,
                   case_num=2,
                   init_method='FSM')
    pr = PhaseRecLayer(1024)
    lms = FIRLayer(tap=32,
                   case_num=2,
                   power_norm=True,
                   centor_one=True,
                   inter_pol=True)
    ste = SymbolTimingEstLayer(smplFac=2)

    if torch.cuda.is_available() and use_cuda:
        sig = sig.cuda()
        sigb2b = sigb2b.cuda()
        lms = lms.cuda()
        edc = edc.cuda()
        mf = mf.cuda()
        pr = pr.cuda()
    choosen_device = sig.device
    ''' DSP procedure for the signal through the link '''

    prbs = np.concatenate([prbsx, prbsy], axis=0)
    with torch.no_grad():
        sig = mf(sig)
        sig = sig[..., 0::8, :]
        sig = edc(sig)
        ''' CMA '''
        sig = sig.cpu()
        sig = FIRLayer.time_vary_infer(
            sig,
            err_mode='Godard',
            iter_num=2,
            lr=5e-4,
            tap=13,
        )

        sig_np = pick_pol(
            torch.view_as_complex(sig).data.cpu().numpy().squeeze(),
            prbs,
            sample_factor=2)
        sig = torch.from_numpy(sig_np.copy()).to(choosen_device)
        sig = torch.view_as_real(sig)
        sig = sig[np.newaxis, ...]
        sig = ste(sig)

        sig = pr(sig[..., 0::2, :])
        # sig = sig[..., 1::2, :]

    lms.fit(sig,
            err_mode='DDM',
            constellations=constellations,
            iter_num=20,
            block_size=4028,
            remain=2048)
    sig = lms(sig)

    sig = torch.view_as_complex(sig)

    sig = sig.cpu().data.numpy().squeeze()

    sig, ber, _ = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())

    ber = np.mean(ber)
    print(ber)
    Q = util.ber2q(ber)

    print(Q)

    p = Process(target=util.colorfulPlot,
                args=(sig[0, :], prbsx),
                kwargs={'show': True})
    p.start()

    p2 = Process(target=util.colorfulPlot,
                 args=(sig[1, :], prbsy),
                 kwargs={'show': True})
    p2.start()
    ''' end '''


class DBP(nn.Module):
    def __init__(self,
                 D=17e-6,
                 c=3e8,
                 wave_length=1552.52e-9,
                 tap_per_edc=256,
                 sample_factor=2,
                 symbol_rate=28e9,
                 power_norm=False,
                 case_num=2,
                 gm=1.3e-3,
                 step=30,
                 trans_length=2400e3,
                 lp=0,
                 alpha=0.2e-3,
                 init_comp_fac=0.1,
                 pre_cd=0,
                 lmbd_each_layer=5.12589879e-05,
                 edc_init_method='FSM'):
        super(DBP, self).__init__()
        DLperStep = D * trans_length / step
        stepLen = trans_length / step
        NLstepInitFac = CRExpNLLayer.cal_init_factor(gm=gm,
                                                     alp_dB=alpha,
                                                     power_dB=lp,
                                                     step_len=stepLen)
        self.edc_layers = nn.ModuleList([])
        self.nl_layers = nn.ModuleList([])
        self.step = step
        for indx in range(step):
            edc_layers = [
                EDCLayer(symbol_rate=symbol_rate,
                         DL=DLperStep,
                         case_num=case_num,
                         c=c,
                         wave_length=wave_length,
                         tap=tap_per_edc,
                         sample_factor=sample_factor,
                         power_norm=power_norm,
                         init_method=edc_init_method,
                         lmbd_for_LS_CO=lmbd_each_layer)
            ]
            lmbd_each_layer = edc_layers[0].lmbd_for_LS_CO
            nl_layers = [CRExpNLLayer(NLstepInitFac, init_comp_fac)]

            self.edc_layers.extend(edc_layers)
            self.nl_layers.extend(nl_layers)

        if pre_cd == 0:
            self.tail_edc = False
        else:
            self.tail_edc = True
            self.edc = EDCLayer(symbol_rate=symbol_rate,
                                DL=-D * trans_length * pre_cd,
                                case_num=case_num,
                                c=c,
                                wave_length=wave_length,
                                tap=2048,
                                sample_factor=sample_factor,
                                power_norm=power_norm,
                                init_method='FSM')
            for p in self.edc.parameters():
                p.requires_grad_(False)

    def forward(self, x):
        for indx in range(self.step):
            x = self.edc_layers[indx](x)
            x = self.nl_layers[indx](x)
        if self.tail_edc:
            x = self.edc(x)
        # self.state_tracer(x)

        return x

    @property
    def device(self):
        return next(self.parameters()).device


if __name__ == '__main__':
    import os
    import util

    BASE_DIR = os.path.dirname(__file__)

    # ''' test decide symbol torch version '''
    # prbs_len = 8192
    # prbs = np.random.randint(low=0, high=2, size=(2, prbs_len))
    # mod_order = 4
    # constellations = np.array([
    #     -3 + 1j * 3, -3 + 1j * 1, -3 + 1j * -3, -3 + 1j * -1, -1 + 1j * 3,
    #     -1 + 1j * 1, -1 + 1j * -3, -1 + 1j * -1, 3 + 1j * 3, 3 + 1j * 1, 3 +
    #     1j * -3, 3 + 1j * -1, 1 + 1j * 3, 1 + 1j * 1, 1 + 1j * -3, 1 + 1j * -1
    # ] / np.sqrt(10))

    # const_torch = torch.from_numpy(constellations)
    # const_torch = torch.view_as_real(const_torch)

    # sym = util.map(prbs, constellations, mod_order)

    # sym = torch.from_numpy(sym)

    # sym = torch.view_as_real(sym)
    # sym = sym_decide(sym, const_torch)
    # sym = torch.view_as_complex(sym).squeeze()

    # sym = sym.cpu().data.numpy()

    # prbs_demap = util.demap(sym, constellations)

    # ber = util.cal_ber(prbs, prbs_demap)
    # print(ber)

    # module_test()

    # a = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=torch.float)
    # a = format_rt(a)
    # mv = MovAvrLayer(3)
    # a = a.reshape(1, 1, -1, 2)
    # print(a)
    # with torch.no_grad():
    #     b = mv(a)
    # print(b)

    # a = torch.tensor([1+1j])
    # b = torch.tensor([1-1j])

    # a = torch.view_as_real(a)
    # b = torch.view_as_real(b)

    # c = cmul(a, b)

    # d = cdiv(a, b)

    # e = torch.tensor([1, 2, 3])
    # print(c)
    # print(d)
    # print(format_rt(e))

    # BW=0.3
    # sample_factor=10
    # span=3
    # import matplotlib.pyplot as plt
    # tap_fd = gaussianLPF(BW=BW, sample_factor=sample_factor, span=span)
    # # print(tap_fd)
    # # plt.plot(tap_fd)
    # # plt.show()
    # # plt.close()

    # glpf = GaussianLPFLayer(BW=BW, sample_factor=sample_factor, span=span, case_num=2)

    # h_check = glpf.h.data.cpu().numpy()
    # plt.plot(h_check[1,:,0])
    # plt.show()
    # plt.close()

    # import matplotlib.pyplot as plt

    # edc = EDCLayer(DL=17e-6*3200e3, tap=60, symbol_rate=32e9)

    # h = edc.h.data.numpy()

    # h = h[..., 0]+ 1j*h[..., 1]

    # plt.plot(np.abs(np.fft.fft(h[0,:])))
    # plt.savefig('/home/hepinjing/OCAI/Weighted-LearnedDBP/temp_results/h.png')
