"""
Pytorch Classes and methods for DSP algorithms.
Modified on Dec. 21, 2020.
@author: Pinjing He (hepinjing93@gmail.com).

"""
import torch
import torch.nn as nn
import torch.fft as tfft
import numpy as np
import math
from CVPytorch import CAdam
from random import shuffle
from scipy.signal import firwin
import util

def decimate(x, q, n=None, axis=- 1, zero_phase=True):
    # TODO: finish decimate
    """Downsample the signal after applying an anti-aliasing filter.
    By default, a 30 point FIR filter with Hamming window is used.

    Parameters
    ----------
    x : [type]
        [description]
    q : [type]
        [description]
    n : [type], optional
        [description], by default None
    axis : int, optional
        [description], by default -1
    zero_phase : bool, optional
        [description], by default True
    """

def pick_pol(sig, prbs, sample_factor=2):
    sig_ori = sig
    if sample_factor > 1:
        sig = sig[..., round(sample_factor//2)::sample_factor]
    constellations = np.array([-3+1j*3, -3+1j*1, -3+1j*-3, -3+1j*-1, -1+1j*3, -1+1j*1, -1+1j*-
                            3, -1+1j*-1, 3+1j*3, 3+1j*1, 3+1j*-3, 3+1j*-1, 1+1j*3, 1+1j*1, 1+1j*-3, 1+1j*-1]/np.sqrt(10))
    dummy, ber, _ = util.pr_ber(sig, prbs, constellations=constellations)
    dummy, ber_flip, _ = util.pr_ber(np.flip(sig, axis=(-2,)), prbs, constellations=constellations)

    ber = np.mean(ber)
    ber_flip = np.mean(ber_flip)

    if ber < ber_flip:
        return sig_ori

    else:
        return np.flip(sig_ori, axis=(-2,))

def sym_decide(symbols, constellations):
    """Make decisions of the input symbols. Pick the nearest ideal symbol
    according to the value of input symbols.

    Parameters
    ----------
    symbols : numpy array
        Input symbols array.
    constellations : numpy array
        Constellations of the ideal symbols.

    Returns
    -------
    numpy array
        Symbol decisions.
    """
    const = constellations
    const = const.reshape(*[1]*symbols.ndim, -1)
    symbols = symbols / \
        np.sqrt(np.mean(symbols*symbols.conj(), axis=-1, keepdims=True))
    symbol_dec_indx = np.argmin(np.abs(
        symbols[..., np.newaxis]-const), axis=-1).astype(np.uint8)

    symbol_dec = np.take(const, symbol_dec_indx)

    return symbol_dec

class BlockSelector():
    """
    Data block selector class.
    """    
    def __init__(self, sig_size, block_size, remain):
        # super(BlockSelector, self).__init__()
        if remain > block_size:
            raise Exception('block_size should greater than remain.')
        if block_size > sig_size:
            raise Exception('sig_size should greater than block_size.')
        self.sig_size = sig_size
        self.block_size = block_size
        self.remain = remain
        self.block_num = math.ceil(self.sig_size/remain)
        self.tail_size = self.block_num * remain - sig_size
        self.wrapper = lambda x, N: x % N
        self.p_overlap = math.ceil((self.block_size-self.remain)/2)

    def __len__(self):
        return self.block_num

    def __getitem__(self, indx):
        block_step_head = indx*self.remain
        block_head = block_step_head-self.p_overlap
        block_end = block_head+self.block_size
        indx = np.arange(block_head, block_end)
        indx = self.wrapper(indx, self.sig_size)
        return indx

def RRC(alpha, sample_factor, span):
    """ Calculate the square root cosine filter taps in the frequency domain.

    Parameters
    ----------
    alpha : scalar
        Roll-off factor
    sample_factor : scalar
        The number of samples per symbol
    span : scalar
        Correlated symbols

    Returns
    -------
    numpy array
        The filter taps in frequency domain.

    Raises
    ------
    Exception
        When the tap number is not even.
    """
    # check input legality
    if not span*sample_factor % 2 == 0:
        raise Exception("The number of filter taps should be even.")

    ret = RC(alpha, sample_factor, span)
    ret = np.sqrt(ret)
    return ret

def RC(alpha, sample_factor, span):
    # check input legality
    if not span*sample_factor % 2 == 0:
        raise Exception("The number of filter taps should be even.")
    N = span*sample_factor
    k = np.arange(N)

    # Frequency domain implementation
    ret = np.zeros((N,))
    k_pick = k/N*sample_factor <= (1-alpha)/2
    ret[k_pick] = 1
    mask = np.logical_and(k/N*sample_factor > (1-alpha)/2,
                          k/N*sample_factor < (1+alpha)/2)
    ret[mask] = 0.5*(1-np.sin(np.pi/alpha*(k[mask]/N*sample_factor-0.5)))
    ret[N//2+1:] = ret[N//2-1:0:-1]

    # ret=ret/np.sqrt(np.mean(ret*ret))
    return ret[..., np.newaxis]

class FIRLayer(nn.Module):
    def __init__(self, tap, case_num=2, power_norm=True, centor_one=True):
        super(FIRLayer, self).__init__()
        # -----------------initialize linear kernel---------------
        assert(tap % 2 == 0)

        h = torch.zeros((tap, case_num), dtype=torch.complex128)
        if centor_one == True:
            h[tap//2, :] = 1
        self.h = nn.Parameter(h.transpose(0, 1), requires_grad=True)

        h_inter_pol = torch.zeros((tap, case_num), dtype=torch.complex128)
        self.h_inter_pol = nn.Parameter(
            h_inter_pol.transpose(0, 1), requires_grad=True)
        self.tap = tap
        self.power_norm = power_norm
        self.case_num = case_num

    def forward(self, x):
        batchSize, pol, block_size = x.shape
        assert(pol == self.case_num)
        if self.power_norm == True:
            x = x / torch.sqrt(torch.sum(torch.abs(x)**2,
                                         dim=-1, keepdim=True)/block_size)
        x = tfft.fft(x)
        x = x*tfft.fft(self.h, n=block_size)+x.flip(
            dims=(1,))*tfft.fft(self.h_inter_pol, n=block_size)
        x = tfft.ifft(x)
        x = torch.roll(x, -self.tap//2, dims=-1)
        return x

    def fit(self, sig, err_mode='DDM', power_norm=True, lr=1e-3, constellations=None, sig_label=None, block_size=128, remain=1, iter_num=2, shuffle_each_iter=True, optimizer=None):
        if sig.ndim != 3 or sig.shape[0] != 1:
            raise Exception('sig should have the shape of [1, pol, samples]')

        if err_mode == 'DDM':
            if constellations is None:
                raise Exception('When the err_mode is DDM, constellations should be assigned.')
            if isinstance(constellations, torch.Tensor):
                constellations = constellations.cpu().data.numpy()

        elif err_mode == 'DAM':
            if sig_label is None:
                raise Exception('When the err_mode is DAM, sig_label should be assigned.')
        else:
            raise Exception('Invalid err_mode.')

        if optimizer is not None:
            if not isinstance(optimizer, torch.optim.Optimizer):
                raise Exception('optimizer is supposed to be an instance of torch.optim.Optimzer.')
        else:
            optimizer = CAdam(self.parameters(), lr=lr)

        def error_function(y, yl):
            res = y-yl
            err = torch.sum(res * res.conj(), dim=(-1, -2)) / y.shape[-1] / 2
            # err = torch.mean(res * res.conj(), dim=(-1, -2))
            return err

        if power_norm == True:
            with torch.no_grad():
                sig = sig / \
                    torch.sqrt(torch.sum(torch.abs(sig)**2,
                                        dim=-1, keepdim=True)/sig.shape[-1])
        sig = sig.detach()
        ret = sig.clone()
        bs = BlockSelector(sig_size=sig.shape[-1],
                block_size=block_size, remain=remain)

        if err_mode == 'DAM':
            sig_dec = sig_label

        for iter in range(iter_num):
            if err_mode == 'DDM':
                ret_np = ret.cpu().data.numpy()
                sig_dec = torch.from_numpy(sym_decide(
                    ret_np, constellations)).to(sig.device)

            block_sample_list = list(range(len(bs)))
            if shuffle_each_iter:
                shuffle(block_sample_list)

            for block_indx in block_sample_list:
                indexes = bs[block_indx]

                ''' sig_temp shape: [1, pol, tap] '''
                sig_temp = sig[..., indexes]

                y = self(sig_temp)
                yl = sig_dec[..., indexes]
                err = error_function(y, yl)

                optimizer.zero_grad()
                err.backward(torch.ones_like(err))
                optimizer.step()

                if block_indx != len(bs)-1:
                    ret[..., block_indx*remain:(
                        block_indx+1)*remain] = y[..., bs.p_overlap:remain+bs.p_overlap]
                else:
                    ret[..., block_indx*remain:(
                        block_indx+1)*remain-bs.tail_size] = y[..., bs.p_overlap:remain+bs.p_overlap-bs.tail_size]
        return ret

    @staticmethod
    def time_vary_infer(sig, iter_num=2, err_mode='Godard', lr=1e-3, tap=25, constellations=None):
        if sig.ndim != 3 or sig.shape[0] != 1:
            raise Exception('sig should have the shape of [1, pol, samples]')

        if err_mode == 'DDM' and constellations is None:
            raise Exception('When the err_mode is DDM, constellations should be assigned.')

        if isinstance(constellations, torch.Tensor):
            constellations = constellations.cpu().data.numpy()

        def error_function(y, err_mode, yl=None):
            if err_mode == 'Godard':
                res = torch.conj(y)*y-1
                # err = torch.sum(torch.squeeze(res * res.conj()))/2
                err = y.conj() * res
            elif err_mode == 'DDM':
                if yl == None:
                    raise Exception('Decision result should be assigned')
                res = y-yl
                err = torch.sum(res * res.conj(), dim=(-1, -2)) / y.shape[-1] / 2
            else:
                raise ValueError('Invalid error mode.')
            return err

        w = torch.zeros([1, 2, tap], device=sig.device, dtype=sig.dtype)
        wi = torch.zeros([1, 2, tap], device=sig.device, dtype=sig.dtype)

        if err_mode == 'Godard':
            w[..., round(tap//2)] = 1

        with torch.no_grad():
            sig_size = sig.shape[-1]
            sig = sig / \
                torch.sqrt(torch.sum(torch.abs(sig)**2,
                                    dim=-1, keepdim=True)/sig_size)
            ret = sig.clone()
            bs = BlockSelector(sig_size=sig_size,
                            block_size=tap, remain=1)
            for iter in range(iter_num):
                if err_mode == 'DDM':
                    ret_np = ret.cpu().data.numpy()
                    sig_dec = torch.from_numpy(sym_decide(
                        ret_np, constellations)).to(sig.device)
                elif err_mode == 'Godard':
                    ret_np = ret.cpu().data.numpy()
                else:
                    raise NotImplementedError

                for block_indx in range(len(bs)):
                    indexes = bs[block_indx]

                    ''' sig_temp shape: [1, pol, tap] '''
                    sig_temp = sig[..., indexes]
                    y = torch.sum(w.conj() * sig_temp + wi.conj() * torch.flip(sig_temp, dims=(1,)), dim=-1, keepdim=True)

                    if block_indx % 2 == 1:
                        if err_mode == 'Godard':
                            err = error_function(y, err_mode)
                        else:
                            yl = sig_dec[..., indexes]
                            err = error_function(y, err_mode, yl)

                        w = w - lr * err * sig_temp
                        wi = wi - lr * err * torch.flip(sig_temp, dims=(1,))

                    ret[..., block_indx:(block_indx+1)] = y
        return ret

class MatchedFilterLayer(FIRLayer):
    def __init__(self, alpha, span, sample_factor, case_num, power_norm=True, shape='rrc'):
        super(MatchedFilterLayer, self).__init__(
            tap=span*sample_factor, case_num=case_num, power_norm=power_norm)
        self.sf = sample_factor
        tap = span*sample_factor
        assert(tap % 2 == 0)
        self.shape = shape
        if self.shape == 'rrc':
            tap_fd = RRC(alpha=alpha, sample_factor=sample_factor, span=span)
            tap_fd = torch.from_numpy(tap_fd.conj())
            tap_fd = tap_fd.reshape([1, tap])
        else:
            raise NotImplementedError

        tap_td = tfft.ifft(tap_fd, dim=-1)
        tap_td = tap_td.roll(tap//2)
        for indx in range(case_num):
            self.h.data[indx, :] = tap_td

class EDCLayer(FIRLayer):
    def __init__(self, DL=17e-6*100e3, c=3e8, waveLength=1552.52e-9, tap=2048, sampleFactor=2, symbolRate=28e9, power_norm=False, case_num=1):
        super(EDCLayer, self).__init__(
            tap=tap, power_norm=power_norm, case_num=case_num)
        # -----------------initialize linear kernel---------------
        self.sampleFactor = sampleFactor
        assert(tap % 2 == 0)
        Tsym = 1/symbolRate
        Ts = Tsym/sampleFactor
        omega = (2*np.pi/Ts/tap)*torch.arange(-tap/2, tap/2)
        var = 1j/2*-DL*waveLength**2/(2*np.pi*c)*omega**2

        H = torch.exp(var)
        H = torch.roll(H, tap//2)
        h_temp = tfft.ifft(H)

        h_temp = torch.roll(h_temp, tap//2)

        for indx in range(case_num):
            self.h.data[indx, :] = h_temp

class CSGD(torch.optim.Optimizer):
    def __init__(self, parameters, lr, weight_decay=0):
        defaults = dict(lr=lr, weight_decay=weight_decay)
        super(CSGD, self).__init__(parameters, defaults)
        self.param = parameters
        self.lr = lr
        self.weight_decay = weight_decay

    def step(self):
        with torch.no_grad():
            for group in self.param_groups:
                for param in group['params']:
                    param -= self.lr * \
                        (param.grad+self.weight_decay*param)

def movAvr(x, win_size):
    pol = x.shape[-2]
    block_size = x.shape[-1]
    h = torch.ones((pol, win_size))/win_size
    x = tfft.fft(x)
    h = h.to(x.device)
    x = x*tfft.fft(h, n=block_size)
    x = tfft.ifft(x)
    x = torch.roll(x, -round(math.floor((win_size)//2)), dims=-1)
    return x

class PhaseRecLayer(nn.Module):
    def __init__(self, win_size, rotate_fac=None):
        super(PhaseRecLayer, self).__init__()
        self.win_size = win_size
        self.rotate_fac = rotate_fac
        if rotate_fac is not None:
            assert(isinstance(rotate_fac, torch.Tensor))
            assert(rotate_fac.ndim == 1)
            self.rotate_fac = self.rotate_fac.reshape(1,-1,1)
    def forward(self, sig):
        sig_4th_power = sig*sig*sig*sig



        # Avoid gradient backpropagation through v-v
        sig_4th_power = sig_4th_power.detach()

        sig_4th_power = movAvr(sig_4th_power*np.exp(1j*np.pi), self.win_size)

        def unwrap(phi, dim=-1):
            def diff(x, dim=-1, same_size=False):
                if same_size:
                    return nn.functional.pad(x[..., 1:]-x[..., :-1], (1, 0))
                else:
                    return x[..., 1:]-x[..., :-1]
            dphi = diff(phi, same_size=True)
            dphi_m = ((dphi+np.pi) % (2 * np.pi)) - np.pi
            dphi_m[(dphi_m == -np.pi) & (dphi > 0)] = np.pi
            phi_adj = dphi_m-dphi
            phi_adj[dphi.abs() < np.pi] = 0
            return phi + phi_adj.cumsum(dim)

        phi = unwrap(sig_4th_power.angle())/4

        if self.rotate_fac is not None:
            rotate_fac = self.rotate_fac.to(sig.device)
            return sig*(torch.cos(phi)-1j*torch.sin(phi)) * rotate_fac
        else:
            return sig*(torch.cos(phi)-1j*torch.sin(phi))

class ExpNLLayer(nn.Module):
    def __init__(self, init_factor):
        super(ExpNLLayer, self).__init__()
        fac = torch.tensor((init_factor,))
        self.fac = nn.Parameter(fac, requires_grad=True)

    def forward(self, x):
        NL_pow = torch.sum(self.fac * x * x.conj(), dim=1, keepdim=True)
        x = (torch.cos(NL_pow) + 1j * torch.sin(NL_pow)) * x
        return x

    @staticmethod
    def cal_init_factor(gm, eps, alp_dB, power_dB, step_len):
        power = 10**(power_dB/10-3)
        alp = - np.log(np.power(10, -alp_dB/10))
        Leff = (1-np.exp(- alp * step_len)) / alp
        ret = 8 / 9 * gm * eps * Leff * power
        return ret

class NoExpNLLayer(nn.Module):
    def __init__(self, init_factor):
        super(NoExpNLLayer, self).__init__()
        fac = torch.tensor((init_factor,))
        self.fac = nn.Parameter(fac, requires_grad=True)

    def forward(self, x):
        NL_pow = torch.sum(self.fac * x * x.conj(), dim=1, keepdim=True)
        # x = (torch.cos(NL_pow) + 1j * torch.sin(NL_pow)) * x
        x = 1j * NL_pow * x
        return x

    @staticmethod
    def cal_init_factor(gm, eps, alp_dB, power_dB, step_len):
        power = 10**(power_dB/10-3)
        alp = - np.log(np.power(10, -alp_dB/10))
        Leff = (1-np.exp(- alp * step_len)) / alp
        ret = 8 / 9 * gm * eps * Leff * power
        return ret


if __name__ == '__main__':
    import scipy.io as scio
    import os
    import util
    import matplotlib.pyplot as plt
    from multiprocessing import Process

    BASE_DIR = os.path.dirname(__file__)

    tstDataPath = os.path.join(
        BASE_DIR, 'testData/BPS_4_seed_1_dBm_4_Loops_20_singleChannel_singlePol')
    # tstDataPath = os.path.join(
    #     BASE_DIR, 'testData/BPS_4_seed_1_dBm_0_Loops_20_singleChannel_dualPol_hasNL.mat')

    data = scio.loadmat(tstDataPath)
    symbolRate = data['SymbolRate'][0, 0]
    bitsPerSymbol = data['BitsPerSymbol'][0, 0]
    spanNum = data['Loops_N'][0, 0]
    rolloff = data['RollOff'][0, 0]
    lp = data['dBm_N'][0, 0]
    hasNL = data['hasNL'][0, 0]
    hasPMD = data['hasPMD'][0, 0]
    prbsx = data['prbsx']
    prbsy = data['prbsy']
    use_cuda = True

    # Gray code
    constellations = np.array([-3+1j*3, -3+1j*1, -3+1j*-3, -3+1j*-1, -1+1j*3, -1+1j*1, -1+1j*-
                               3, -1+1j*-1, 3+1j*3, 3+1j*1, 3+1j*-3, 3+1j*-1, 1+1j*3, 1+1j*1, 1+1j*-3, 1+1j*-1]/np.sqrt(10))

    constellations = torch.from_numpy(constellations)
    spanLen = 80e3
    L = spanLen*spanNum
    DL = 17e-6 * L
    sigx = torch.from_numpy(data['signalx'])
    sigy = torch.from_numpy(data['signaly'])
    sigx = sigx.reshape(1, 1, -1)
    sigy = sigy.reshape(1, 1, -1)
    sig = torch.cat([sigx, sigy], dim=1)

    sigxb2b = torch.from_numpy(data['signalx_label'])
    sigyb2b = torch.from_numpy(data['signaly_label'])
    sigxb2b = sigxb2b.reshape(1, 1, -1)
    sigyb2b = sigyb2b.reshape(1, 1, -1)
    sigb2b = torch.cat([sigxb2b, sigyb2b], dim=1)

    mf = MatchedFilterLayer(alpha=rolloff, span=100,
                            sample_factor=16, case_num=2)
    edc = EDCLayer(symbolRate=symbolRate, DL=DL, case_num=2)
    pr = PhaseRecLayer(1024)
    lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)

    if torch.cuda.is_available() and use_cuda:
        sig = sig.cuda()
        sigb2b = sigb2b.cuda()
        lms = lms.cuda()
        edc = edc.cuda()
        mf = mf.cuda()
        pr = pr.cuda()
    choosen_device = sig.device

    ''' DSP procedure for the signal through the link '''

    prbs = np.concatenate([prbsx, prbsy], axis=0)
    with torch.no_grad():
        # sig = mf(sig)
        sig = sig[..., 0::8]
        sig = edc(sig)
        ''' CMA '''
        # sig = sig.cpu()
        # sig = FIRLayer.time_vary_infer(
        #     sig,
        #     err_mode='Godard',
        #     iter_num=2,
        #     lr=5e-4,
        #     tap=13,
        #     )

        # sig_np = pick_pol(sig.data.cpu().numpy().squeeze(), prbs, sample_factor=2)
        # sig = torch.from_numpy(sig_np.copy()).to(choosen_device)

        # sig = sig[np.newaxis, ...]

        # sig = pr(sig[..., 1::2])
        sig = sig[..., 1::2]

    # lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=20, block_size=4028, remain=2048)
    # sig = lms(sig)

    sig = sig.cpu().data.numpy().squeeze()

    sig, ber, _ = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())

    ber = np.mean(ber)
    print(ber)
    Q = util.ber2q(ber)

    print(Q)

    p = Process(target=util.colorfulPlot, args=(
        sig[0, :], prbsx), kwargs={'show': True})
    p.start()

    p2 = Process(target=util.colorfulPlot, args=(
        sig[1, :], prbsy), kwargs={'show': True})
    p2.start()
    ''' end '''

