import util
from util import BlockSelector
import CR_DSPPytorch
import torch
import numpy as np
from torch.utils.data import Dataset
from multiprocessing import cpu_count
import os
import scipy.io as scio
from CR_DSPPytorch import PerturbativeBlockLayer
from bayes_opt import BayesianOptimization
from bayes_opt.util import UtilityFunction
from inspect import currentframe, getframeinfo
import matplotlib.pyplot as plt

BASE_DIR = os.path.dirname(__file__)


def pick_indxes_in_group(L):
    M = int(np.floor(L / 2))
    m = np.arange(-M, M + 1)[..., np.newaxis, np.newaxis]
    m = np.repeat(m, m.shape[0], axis=1)
    m = np.concatenate([m, np.transpose(m, [1, 0, 2])], axis=2)
    """ Ignored IXPM and SPM: pick the indexes that are not equal to 0. """
    pick_condition = np.logical_and(
        np.abs(m[..., 0] * m[..., 1]) < np.abs(np.floor(L / 2)),
        m[..., 0] >= 0)
    pick_condition = np.logical_and(pick_condition, m[..., 1] >= 0)
    pick_condition = np.logical_and(pick_condition, m[..., 0] <= m[..., 1])
    m_pick = m[pick_condition]
    mCases = m_pick.shape[0]

    ret = np.zeros([8, mCases, 2])

    # I0: m, n
    ret[0, ...] = np.stack([m_pick[:, 0], m_pick[:, 1]], axis=-1)
    # I1: n, m
    ret[1, ...] = np.stack([m_pick[:, 1], m_pick[:, 0]], axis=-1)
    # I2: -m, -n
    ret[2, ...] = np.stack([-m_pick[:, 0], -m_pick[:, 1]], axis=-1)
    # I3: -n, -m
    ret[3, ...] = np.stack([-m_pick[:, 1], -m_pick[:, 0]], axis=-1)

    # IC0: -m, n
    ret[4, ...] = np.stack([-m_pick[:, 0], m_pick[:, 1]], axis=-1)
    # IC1: n, -m
    ret[5, ...] = np.stack([m_pick[:, 1], -m_pick[:, 0]], axis=-1)
    # IC2: m, -n
    ret[6, ...] = np.stack([m_pick[:, 0], -m_pick[:, 1]], axis=-1)
    # IC3: -n, m
    ret[7, ...] = np.stack([-m_pick[:, 1], m_pick[:, 0]], axis=-1)

    ret = np.concatenate([ret, (ret[..., 0] + ret[..., 1])[..., np.newaxis]],
                         axis=-1)
    """ shape of ret: [[I0, I1, I2, I3, IC0, IC1, IC2, IC3], mCases, 3 for m and n and m+n]"""
    ret = ret.astype(int)
    return ret + M


def pick_indxes(L):
    M = int(np.floor(L / 2))
    m = np.arange(-M, M + 1)[..., np.newaxis, np.newaxis]
    m = np.repeat(m, m.shape[0], axis=1)
    m = np.concatenate([m, np.transpose(m, [1, 0, 2])], axis=2)
    m_pick = m[np.abs(m[..., 0] * m[..., 1]) < np.abs(np.floor(L / 2))]
    ret = np.concatenate(
        [m_pick, np.sum(m_pick, axis=1, keepdims=True)], axis=1)
    return ret + M


class NPDataset(Dataset):
    def __init__(self, array, label, win_size, grouping,
                 **make_decision_kwargs):
        """
        pol is for two polarizations.
        shape of array: [pol, samples]
        shape of label: [pol, symbols]
        """
        self.array = np.csingle(array)

        def view_as_complex(x):
            return x[..., 0] + 1j * x[..., 1]

        self.win_size = win_size
        L = self.win_size
        self.constellations = make_decision_kwargs.get(
            "constellations",
            util.CONST_16QAM,
        )
        self.mod_order = make_decision_kwargs.get("mod_order", 4)
        if label is not None:
            self.label = label
        else:
            self.label = util.decide(array,
                                     self.constellations,
                                     mod_order=self.mod_order)

        if self.label.ndim == 1:
            self.label = self.label.reshape(1, -1)

        if self.array.ndim == 1:
            self.array = self.array.reshape(1, -1)

        self.label = self.label.astype(np.complex128)
        self.array = self.array.astype(np.complex128)

        self.grouping = grouping

    def __len__(self):
        return self.array.shape[-1]

    def __getitem__(self, indx):
        def view_as_real(x):
            if x.ndim == 0:
                x = x[np.newaxis]
            return np.stack([x.real, x.imag], axis=-1)

        L = self.win_size
        M = int(np.floor(L / 2))
        if isinstance(indx, np.ndarray):
            assert indx.ndim == 1

            def wrapper(x, N):
                return x % N

            indces = np.linspace(indx - M,
                                 indx + M,
                                 num=self.win_size,
                                 axis=-1,
                                 dtype=np.int)
            indces = wrapper(indces, self.array.shape[-1])
            ret = np.stack([
                self.array[indx, ...][indces]
                for indx in range(self.array.shape[0])
            ],
                axis=1)

            ori_sym = np.stack(
                [self.array[x, ...][indx] for x in range(self.array.shape[0])],
                axis=0)
            ret = self.formTriplet(ret)
            """
                shape of returns (ret, label, ori_sym):
                ret: [len(indx), 2 pols, triplet number, 2 for real and imaginary parts]
                label: [2 pols, len(indx), 2 for real and imaginary parts]
                ori_sym: [2 pols, 2 for real and imaginary parts]
            """
            return (
                view_as_real(ret),
                view_as_real(self.label[..., indx]),
                view_as_real(ori_sym),
            )
        else:
            ret = np.take(
                self.array,
                indices=np.arange(-M + indx, indx + M + 1),
                axis=-1,
                mode="wrap",
            )
            ori_sym = self.array[:, indx]
            ret = self.formTriplet(ret)
            """
                shape of returns (ret, label, ori_sym):
                ret: [triplet number, 2 for real and imaginary parts]
                label: [2 pols, 2 for real and imaginary parts]
                ori_sym: [2 pols, 2 for real and imaginary parts]
            """
            return (
                view_as_real(ret),
                view_as_real(self.label[..., indx]),
                view_as_real(ori_sym),
            )

    def formTriplet(self, sig):
        """
        shape of sig: [Batches, 2 pols, samples] or [2 pols, samples]
        """
        if self.grouping:
            tripletIndexes = pick_indxes_in_group(self.win_size)
        else:
            tripletIndexes = pick_indxes(self.win_size)

        sigm = sig[..., tripletIndexes[..., 0]]
        sign = sig[..., tripletIndexes[..., 1]]
        sigmpn = sig[..., tripletIndexes[..., 2]]
        P = sign * sigmpn.conj()
        P = np.sum(P, axis=-2 - int(self.grouping), keepdims=True)
        ret = sigm * P

        if self.grouping:
            G1 = np.sum(ret[..., 0:4, :], axis=-2, keepdims=False)
            G2 = np.sum(ret[..., 4:, :], axis=-2, keepdims=False)
            ret = np.concatenate([G1, G2], axis=-1)
        return ret


def gen_data_loader(**kwargs):
    sig = kwargs['sig']
    prbs = kwargs.get('prbs', None)
    batch_size = kwargs.get("batch_size")
    win_size = kwargs.get("win_size")
    constellations = kwargs.get("constellations", util.CONST_16QAM)
    shuffle = kwargs.get("shuffle", False)
    mod_order = kwargs.get('mod_order', 4)
    bit_order = kwargs.get('bit_order', 'msb')
    grouping = kwargs.get("grouping_triplet_flag", True)

    make_decision_kwargs = kwargs.get("make_decision_kwargs", {
        'constellations': util.CONST_16QAM,
        'mod_order': 4
    })

    if prbs is not None:
        label = util.map(prbs,
                         constellations=constellations,
                         mod_order=mod_order,
                         order=bit_order)
    else:
        label = None

    dataset = NPDataset(array=sig,
                        label=label,
                        win_size=win_size,
                        grouping=grouping,
                        constellations=constellations,
                        mod_order=mod_order,
                        make_decision_kwargs=make_decision_kwargs)
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size,
                                              shuffle=shuffle)
    return data_loader


def simulation(lp_list):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    Q_cache = []
    Q_pr_cache = []
    Q_le_cache = []
    for lp in lp_list:
        test_set_path = os.path.join(BASE_DIR, f'data/dataset/tstSet_dBm_{lp}_nn_sf_1.mat')
        data = scio.loadmat(test_set_path)
        sig = data['dataset']
        sig = np.transpose(sig, axes=[1, 0])
        prbs = np.stack([data['prbsx'].reshape(-1, ), data['prbsy'].reshape(-1, )], axis=0)
        maxmn = 92
        symbol_num = 131072
        mod_order = 4

        cal_kwargs = {
            'Fs': 32e9,
            'sf': 20,
            'span_length': 80e3,
            'span_number': 40,
            'pre_cd': 0.5,
            'rolloff': 0.1,
            'maxmn': maxmn,
            'int_constraint': maxmn,
        }

        '''calculate effective length for ERP'''
        alp = 0.2e-3
        gm = 1.3e-3
        power = 10 ** (lp / 10 - 3)
        span_num = 40
        span_len = 80e3
        alpha = -np.log(np.power(10, -alp / 10))
        Leff = (1 - np.exp(- alpha * span_len * span_num)) / alpha

        perturbative_matrix_dir = os.path.join(BASE_DIR, f'perturbative_matrixes/{experiment_name}')
        if not os.path.exists(perturbative_matrix_dir):
            os.makedirs(perturbative_matrix_dir)

        perturbative_matrix_path = os.path.join(perturbative_matrix_dir, f'win_size_{2 * maxmn + 1}.mat')

        prtMtrx = PerturbativeBlockLayer.cal_init_perturbative_matrix(
            perturbative_matrix_path=perturbative_matrix_path,
            perturbative_matrix_save_path=perturbative_matrix_path,
            **cal_kwargs)

        prtVec = PerturbativeBlockLayer.pert_matrix_to_vec(prtMtrx, maxmn)  # np.complex (2085,)
        '''calculate perturbation terms coefficient end, next calculate triplets'''

        pbl = PerturbativeBlockLayer(2 * maxmn + 1,
                                     init_weight=prtVec,
                                     grouping_triplet=False)  # initial coefficients
        sig = sig[..., 0:symbol_num]
        sig_ori = sig.copy()
        prbs = prbs[..., 0:symbol_num * mod_order]

        sig = torch.from_numpy(sig)
        sig = torch.view_as_real(sig)
        sig = CR_DSPPytorch.norm_power(sig)
        sig = sig[np.newaxis, ...]

        sig_len = sig.shape[-2]
        perturbative_terms = torch.zeros([2, sig_len, 2])

        step = 512
        block_size = step + maxmn + 2
        bs = BlockSelector(sample_num=sig_len, block_size=block_size, step=step)

        with torch.no_grad():
            for indx in range(len(bs)):
                sig_block = sig[..., bs[indx], :]
                perturbative_terms[:, indx * step:(indx + 1) * step, :] = pbl(sig_block)[...,
                                                                          bs.pre_overhead:bs.pre_overhead + step, :]

        perturbative_terms = perturbative_terms.data.cpu().numpy()
        perturbative_terms = perturbative_terms[..., 0] + 1j * perturbative_terms[..., 1]
        sig = sig.squeeze()
        sig = sig[..., 0] + 1j * sig[..., 1]
        sig = sig.data.cpu().numpy()

        def est_signal_performance(xi, phi, eta):
            energy_divergence = 1j * 8 / 9 * gm * sig * Leff * power * 3 / 2 * eta
            eqsig = sig + np.exp(1j * phi) * (xi * perturbative_terms - energy_divergence)
            eqsig = eqsig.squeeze()
            ber = util.pr_ber(eqsig, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
            Q = np.mean(util.ber2q(ber))
            return Q

        optimizer = BayesianOptimization(
            f=est_signal_performance,
            pbounds={'xi': (0, 1e-3), 'phi': (0, 2 * np.pi), 'eta': (10, 50)},
            verbose=1
        )
        utility = UtilityFunction(kind='ei', xi=0.1)
        optimizer.maximize(init_points=20, n_iter=300, acquisition_function=utility)
        xi_opt = optimizer.max['params']['xi']
        phi_opt = optimizer.max['params']['phi']
        eta_opt = optimizer.max['params']['eta']
        print(f'optimal xi: {xi_opt}, optimal phi:{phi_opt} optimal eta:{eta_opt}')

        '''ERP-PB-NLC techniques'''
        energy_diver = 1j * 8 / 9 * gm * sig * Leff * power * 3 / 2 * eta_opt
        eqsig = sig_ori + (xi_opt * perturbative_terms - energy_diver) * np.exp(1j * phi_opt)
        ber_eq = util.pr_ber(eqsig, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
        Q_eq = np.mean(util.ber2q(ber_eq))
        print(f' With ERP, the Q factor is {Q_eq}')
        Q_cache.append(Q_eq)

        '''PR-PB-NLC techniques'''
        eqsig_pr = sig_ori + xi_opt * perturbative_terms * np.exp(1j * phi_opt)
        ber_eq_pr = util.pr_ber(eqsig_pr, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
        Q_eq_pr = np.mean(util.ber2q(ber_eq_pr))
        print(f' PR-PB-NLC, the Q factor is {Q_eq_pr}')
        Q_pr_cache.append(Q_eq_pr)

        '''EDC techniques'''
        ber_le = util.pr_ber(sig_ori, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
        Q_le = np.mean(util.ber2q(ber_le))
        print(f'Without equalization, the Q factor is {Q_le}')
        Q_le_cache.append(Q_le)

    print('ERP-PB-NLC:')
    for Q in Q_cache:
        print(Q)
    print('PR-PB-NLC:')
    for q in Q_pr_cache:
        print(q)
    print('LE:')
    for q in Q_le_cache:
        print(q)


# def simulation(lp_list):
#     '''reference: Kumar, O. S. Sunish, Abdelkerim Amari, Octavia A. Dobre和Ramachandran Venkatesan.
#     《Enhanced Regular Perturbation-Based Nonlinearity Compensation Technique for Optical Transmission Systems》.
#      *IEEE Photonics Journal* 11, 期 4 (2019年8月): 1–12. https://doi.org/10.1109/JPHOT.2019.2923568.'''
#
#     frame = currentframe()
#     experiment_name = getframeinfo(frame).function
#     Q_cache = []
#     Q_pr_cache = []
#     Q_le_cache = []
#     for lp in lp_list:
#         test_set_path = os.path.join(BASE_DIR, f'data/dataset/tstSet_dBm_{lp}_nn_sf_1.mat')
#         data = scio.loadmat(test_set_path)
#         sig = data['dataset']
#         sig = np.transpose(sig, axes=[1, 0])
#         prbs = np.stack([data['prbsx'].reshape(-1, ), data['prbsy'].reshape(-1, )], axis=0)
#         maxmn = 92
#         symbol_num = 131072
#         mod_order = 4
#
#         cal_kwargs = {
#             'Fs': 32e9,
#             'sf': 20,
#             'span_length': 80e3,
#             'span_number': 40,
#             'pre_cd': 0.5,
#             'rolloff': 0.1,
#             'maxmn': maxmn,
#             'int_constraint': maxmn,
#         }
#
#         '''calculate effective length for ERP'''
#         alp = 0.2e-3
#         gm = 1.3e-3
#         power = 10 ** (lp / 10 - 3)
#         span_num = 40
#         span_len = 80e3
#         alpha = -np.log(np.power(10, -alp / 10))
#         Leff = (1 - np.exp(- alpha * span_len * span_num)) / alpha
#
#         perturbative_matrix_dir = os.path.join(BASE_DIR, f'perturbative_matrixes/{experiment_name}')
#         if not os.path.exists(perturbative_matrix_dir):
#             os.makedirs(perturbative_matrix_dir)
#
#         perturbative_matrix_path = os.path.join(perturbative_matrix_dir, f'win_size_{2 * maxmn + 1}.mat')
#
#         prtMtrx = PerturbativeBlockLayer.cal_init_perturbative_matrix(
#             perturbative_matrix_path=perturbative_matrix_path,
#             perturbative_matrix_save_path=perturbative_matrix_path,
#             **cal_kwargs)
#
#         prtVec = PerturbativeBlockLayer.pert_matrix_to_vec(prtMtrx, maxmn)  # np.complex (2085,)
#         '''calculate perturbation terms coefficient end, next calculate triplets'''
#
#         pbl = PerturbativeBlockLayer(2 * maxmn + 1,
#                                      init_weight=prtVec,
#                                      grouping_triplet=False)  # initial coefficients
#         sig = sig[..., 0:symbol_num]
#         sig_ori = sig.copy()
#         prbs = prbs[..., 0:symbol_num * mod_order]
#
#         sig = torch.from_numpy(sig)
#         sig = torch.view_as_real(sig)
#         sig = CR_DSPPytorch.norm_power(sig)
#         sig = sig[np.newaxis, ...]
#
#         sig_len = sig.shape[-2]
#         perturbative_terms = torch.zeros([2, sig_len, 2])
#
#         step = 512
#         block_size = step + maxmn + 2
#         bs = BlockSelector(sample_num=sig_len, block_size=block_size, step=step)
#
#         with torch.no_grad():
#             for indx in range(len(bs)):
#                 sig_block = sig[..., bs[indx], :]
#                 perturbative_terms[:, indx * step:(indx + 1) * step, :] = pbl(sig_block)[...,
#                                                                           bs.pre_overhead:bs.pre_overhead + step, :]
#
#         perturbative_terms = perturbative_terms.data.cpu().numpy()
#         perturbative_terms = perturbative_terms[..., 0] + 1j * perturbative_terms[..., 1]
#         sig = sig.squeeze()
#         sig = sig[..., 0] + 1j * sig[..., 1]
#         sig = sig.data.cpu().numpy()
#
#         def est_signal_performance(xi, phi):
#             eqsig = sig + np.exp(1j * phi) * xi * perturbative_terms
#             eqsig = eqsig.squeeze()
#             ber = util.pr_ber(eqsig, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
#             Q = np.mean(util.ber2q(ber))
#             return Q
#
#         optimizer = BayesianOptimization(
#             f=est_signal_performance,
#             pbounds={'xi': (0, 1e-3), 'phi': (0, 2 * np.pi)},
#             verbose=1
#         )
#         utility = UtilityFunction(kind='ei', xi=0.1)
#         optimizer.maximize(init_points=10, n_iter=100, acquisition_function=utility)
#         xi_opt = optimizer.max['params']['xi']
#         phi_opt = optimizer.max['params']['phi']
#         print(f'optimal xi: {xi_opt}, optimal phi:{phi_opt}')
#
#         '''PR-PB-NLC techniques'''
#         eqsig_pr = sig_ori + xi_opt * perturbative_terms * np.exp(1j * phi_opt)
#         ber_eq_pr = util.pr_ber(eqsig_pr, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
#         Q_eq_pr = np.mean(util.ber2q(ber_eq_pr))
#         print(f' PR-PB-NLC, the Q factor is {Q_eq_pr}')
#         Q_pr_cache.append(Q_eq_pr)
#
#         def est_signal_performance_erp(eta):
#             energy_divergence = 1j * 8 / 9 * gm * sig * Leff * power * 3 / 2 * eta
#             eqsig = sig + np.exp(1j * phi_opt) * (xi_opt * perturbative_terms - energy_divergence)
#             eqsig = eqsig.squeeze()
#             ber = util.pr_ber(eqsig, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
#             Q = np.mean(util.ber2q(ber))
#             return Q
#
#         '''ERP-PB-NLC techniques'''
#         optimizer_erp = BayesianOptimization(
#             f=est_signal_performance_erp,
#             pbounds={'eta': (10, 20)},
#             verbose=1,
#         )
#         utility_erp = UtilityFunction(kind='ei')
#         optimizer_erp.maximize(init_points=10, n_iter=300, acquisition_function=utility_erp)
#         eta_opt = optimizer_erp.max['params']['eta']
#         print(f'optimal eta: {eta_opt}')
#
#         energy_diver = 1j * 8 / 9 * gm * sig * Leff * power * 3 / 2 * eta_opt
#         eqsig = sig_ori + (xi_opt * perturbative_terms - energy_diver) * np.exp(1j * phi_opt)
#         ber_eq = util.pr_ber(eqsig, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
#         Q_eq = np.mean(util.ber2q(ber_eq))
#         print(f' With ERP, the Q factor is {Q_eq}')
#         Q_cache.append(Q_eq)
#
#         '''EDC techniques'''
#         ber_le = util.pr_ber(sig_ori, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
#         Q_le = np.mean(util.ber2q(ber_le))
#         print(f'Without equalization, the Q factor is {Q_le}')
#         Q_le_cache.append(Q_le)
#
#     print('ERP-PB-NLC:')
#     for Q in Q_cache:
#         print(Q)
#     print('PR-PB-NLC:')
#     for q in Q_pr_cache:
#         print(q)
#     print('LE:')
#     for q in Q_le_cache:
#         print(q)


def plot_perturbative_matrix():
    import matplotlib.pyplot as plt
    maxmn = 92
    perturbative_matrix_path = os.path.join(BASE_DIR,
                                            f'perturbative_matrixes/simulation/win_size_{2 * maxmn + 1}.mat')

    data_load = scio.loadmat(perturbative_matrix_path)
    matrix = data_load['perturbative_matrix']

    matrix_abs = np.abs(matrix)
    matrix_abs_db = util.db(matrix_abs / matrix_abs[maxmn, maxmn]) * 2

    plt.imshow(matrix_abs_db)
    plt.clim(-35, 0)
    plt.savefig(os.path.join(BASE_DIR, 'perturbative_matrixes/simulation/pert_matrix.jpg'))
    plt.close()


if __name__ == '__main__':
    lp_list = np.arange(-2, 5)
    simulation(lp_list)

    # plot_perturbative_matrix()
