import math
import os
import re
from inspect import currentframe, getframeinfo
from itertools import chain

import torch.nn as nn
from bayes_opt import BayesianOptimization, UtilityFunction

import CR_DSPPytorch as risdsp
from CR_DSPPytorch import SymbolTimingEstLayer, FIRLayer, PhaseRecLayer
import scipy.io as scio
import numpy as np
import torch
import warnings
from torch.utils.data import DataLoader
import util

warnings.filterwarnings("error")
BASE_DIR = os.path.dirname(__file__)

constellations = util.CONST_16QAM


class NPDataset(torch.utils.data.Dataset):
    def __init__(self, array, label, block_size):
        self.array = np.csingle(array)
        self.label = np.csingle(label)
        self.block_size = block_size
        self.case_num = self.array.shape[-2]

    def __len__(self):
        return self.label.shape[-1]

    def __getitem__(self, indx):
        def view_as_real(x):
            return np.stack((x.real, x.imag), axis=-1)

        M = int(np.floor(self.block_size / 2))
        if isinstance(indx, np.ndarray):
            ret = self.array[np.newaxis, ..., indx]
            return view_as_real(ret)
        else:
            ret = np.take(self.array,
                          indices=np.arange(-M + 2 * indx, 2 * indx + M),
                          axis=-1,
                          mode='wrap')
            return view_as_real(ret), view_as_real(self.label[..., indx])


def gen_dataloader(data_path,
                   block_size,
                   batch_size,
                   constellations,
                   symbol_num=0,
                   shuffle=False,
                   count_from_end=False):
    modOrder = 4
    data = scio.loadmat(data_path)
    prbs = data['prbs']
    sig = data['sig']

    if symbol_num != 0:
        if count_from_end:
            sig = sig[..., -symbol_num * 2:]
            prbs = prbs[..., -symbol_num * modOrder:]
        else:
            sig = sig[..., 0:symbol_num * 2]
            prbs = prbs[..., 0:symbol_num * modOrder]

    label = util.map(prbs, constellations, modOrder)
    dataset = NPDataset(sig, label, block_size)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size,
                                             shuffle=shuffle)
    return dataloader, dataset, prbs


class AVrNLLayer(nn.Module):
    def __init__(self, init_factor, ntap):
        super(AVrNLLayer, self).__init__()
        fac = torch.tensor((init_factor,))
        self.fac = nn.Parameter(fac, requires_grad=True)
        self.lpf = risdsp.MovAvrLayer(N=ntap)
        self.ntap = ntap
        for p in self.lpf.parameters():
            p.requires_grad_(False)

    def forward(self, x):
        '''x shape: [B, pol, block, 2]'''
        B, pol, block, _ = x.shape
        xp = torch.sum(x ** 2, dim=[1, -1], keepdim=True)
        NL_pow = self.fac * xp

        NL_pow = risdsp.format_rt(NL_pow[..., 0])
        NL_pow = self.lpf(NL_pow)

        complex_unit = torch.zeros_like(NL_pow, device=NL_pow.device)
        complex_unit[..., 1] = 1

        real_unit = torch.zeros_like(NL_pow, device=NL_pow.device)
        real_unit[..., 0] = 1

        NL_operator = (real_unit + risdsp.cmul(complex_unit, NL_pow))
        x = risdsp.cmul(NL_operator, x)
        return x

    @staticmethod
    def cal_init_factor(gm, eps, alp_dB, power_dB, step_len):
        power = 10 ** (power_dB / 10 - 3)
        alp = - np.log(np.power(10, -alp_dB / 10))
        Leff = (1 - np.exp(-alp * step_len)) / alp
        ret = 8 / 9 * gm * eps * Leff * power
        return ret


class ADBP(nn.Module):
    def __init__(self, D=17e-6, c=3e8, wave_length=1552.52e-9, tap_per_edc=256, sample_factor=2, symbol_rate=28e9,
                 power_norm=False, case_num=2, gm=1.3e-3, step=30, trans_length=2400e3, lp=0,
                 alpha=0.2e-3, init_comp_fac=0.1, nltap=3, pre_cd=0.5, edc_init_method='LS_CO',
                 lmbd_each_layer=5.12589879e-05):
        super(ADBP, self).__init__()
        DLperStep = D * trans_length / step
        stepLen = trans_length / step
        self.edc_layers = nn.ModuleList([])
        self.nl_layers = nn.ModuleList([])
        self.step = step

        for indx in range(step):
            edc_layers = risdsp.EDCLayer(symbol_rate=symbol_rate, DL=DLperStep, case_num=case_num, c=c,
                                         wave_length=wave_length, tap=tap_per_edc, sample_factor=sample_factor,
                                         power_norm=power_norm,
                                         init_method=edc_init_method, lmbd_for_LS_CO=lmbd_each_layer)
            NLstepInitFac = AVrNLLayer.cal_init_factor(gm=gm, eps=init_comp_fac, alp_dB=alpha, power_dB=lp,
                                                       step_len=stepLen)
            nl_layers = AVrNLLayer(NLstepInitFac, nltap)

            self.edc_layers.extend([edc_layers])
            self.nl_layers.extend([nl_layers])
            lmbd_each_layer = self.edc_layers[0].lmbd_for_LS_CO

        if pre_cd == 0:
            self.tail_edc = False
        else:
            self.tail_edc = True
            self.postedc = risdsp.EDCLayer(symbol_rate=symbol_rate, DL=-D * trans_length * pre_cd, case_num=case_num,
                                           c=c,
                                           wave_length=wave_length, tap=2048, sample_factor=sample_factor,
                                           power_norm=power_norm, init_method='FSM')
            for p in self.postedc.parameters():
                p.requires_grad_(False)

    def forward(self, x):
        for indx in range(self.step):
            x = self.edc_layers[indx](x)
            x = self.nl_layers[indx](x)

        if self.tail_edc:
            x = self.postedc(x)
        return x

    def edc_parameters(self):
        ret = iter([])
        for st in range(self.step):
            ret = chain(ret, self.edc_layers[st].parameters())
        return ret

    def nl_parameters(self):
        ret = iter([])
        for st in range(self.step):
            ret = chain(ret, self.nl_layers[st].parameters())
        return ret


class Experiment():
    def __init__(self, **kwargs):
        self.model_kwargs = kwargs['model_kwargs']
        self.dataset = kwargs['dataset']
        self.prbs = kwargs['prbs']
        self.use_cuda = kwargs['use_cuda']
        self.bit_order = kwargs.get('bit_order', 'msb')
        self.proper_lmbd = None

    def objective_function(self, **kwargs):
        nltap = kwargs['nltap']
        cr = kwargs['cr']
        model_kwargs = self.model_kwargs
        model_kwargs['nltap'] = math.ceil(nltap)
        model_kwargs['init_comp_fac'] = cr
        mdl = ADBP(**model_kwargs)
        self.proper_lmbd = mdl.edc_layers[0].lmbd_for_LS_CO
        self.model_kwargs['lmbd_each_layer'] = self.proper_lmbd
        sig, ber = self.eval_ber(mdl)
        try:
            Q = util.ber2q(np.mean(ber))
        except:
            print(f'Cannot calculate Q. Ber: {ber}')
            Q = -50

        return Q

    def eval_ber(self, model):
        with torch.no_grad():
            dataset = self.dataset
            prbs = self.prbs
            use_cuda = self.use_cuda
            if not isinstance(dataset, NPDataset):
                raise ValueError(
                    'dataset is supposed to be an instance of NPDataset'
                )
            model = model.eval()
            lms = FIRLayer(tap=25,
                           case_num=1,
                           power_norm=True,
                           centor_one=True)
            pr = PhaseRecLayer(301)

            if use_cuda and torch.cuda.is_available():
                model = model.cuda()

            chosen_device = next(model.parameters()).device
            block_size = len(dataset) * 2
            indxes = np.arange(block_size)
            sig = dataset[indxes]
            sig = torch.from_numpy(sig).to(chosen_device)

            sig = model(sig)
            has_nan = torch.sum(sig.isnan(), dim=[0, 1, 2, 3])
            if has_nan:
                print('nan detected')

            del model

            '''CMA，原本应有均衡PMD的，但由于已经移除，所以这里不做考虑'''
            lms = lms.to(chosen_device)
            pr = pr.to(chosen_device)
            sig = pr(sig[..., 1::2, :])

        lms.fit(sig,
                err_mode='DDM',
                constellations=constellations,
                iter_num=10,
                block_size=4028,
                remain=2048,
                lr=1e-4)
        with torch.no_grad():
            sig = lms(sig)
            sig = torch.view_as_complex(sig)
            sig = sig.cpu().data.numpy().squeeze()
            try:
                sig, ber, _ = util.pr_ber(sig, prbs, constellations)
            except:
                print(sig)
                exit()

        return sig, ber


def vary_lp(lp_range=[1]):
    lp_range = [*lp_range]
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    resultPath = os.path.join(BASE_DIR, 'result_exp/AVR-DBP')
    AVRDBPsearchPath = os.path.join(resultPath, f'{experiment_name}')
    if not os.path.exists(resultPath):
        os.makedirs(resultPath)
        print(f'Created result dir at \"{resultPath}\"')
    if not os.path.exists(AVRDBPsearchPath):
        os.makedirs(AVRDBPsearchPath)
        print(f'Created result dir at \"{AVRDBPsearchPath}\"')

    '''Behavior control'''
    batch_size = 1
    trsetLen = 8192
    tstsetLen = 110000
    lr = 1e-3
    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    nltap_bound_dict = {
        'step_2': (3, 20),
        'step_3': (3, 20),
        'step_9': (2, 20),
        'step_14': (3, 20),
        'step_18': (2, 7),
    }

    cr_bound_dict = {
        'step_2': (-0.1, 2),
        'step_3': (-0.1, 2),
        'step_9': (0.1, 1),
        'step_14': (-0.1, 1.6),
        'step_18': (0.1, 0.5),
    }
    '''存储各功率下的Q因子'''
    Qcache = np.zeros([len(lp_range)])

    for lpIndx, lp in enumerate(lp_range):
        save_dict = {}
        testSetPath = os.path.join(
            BASE_DIR, f'data/experiment/16QAM20GBaud1800kmHe/tstSet_lp_{lp}.mat'
        )
        '''Form dataloaders'''
        data = scio.loadmat(testSetPath)
        spanLen = data['spanLen'][0, 0]
        spanNum = data['spanNum'][0, 0]
        L = spanLen * spanNum
        symbolRate = 20e9
        test_dataloader, tstSet, prbsTst = gen_dataloader(testSetPath,
                                                          block_size=block_size,
                                                          symbol_num=tstsetLen,
                                                          batch_size=batch_size,
                                                          constellations=constellations,
                                                          shuffle=False)
        '''Form dataloaders end'''

        step = 9  # 调节这里调节补偿的精度
        model_kwargs = {
            'D': -17e-6,
            'tap_per_edc': 256,
            'sample_factor': 2,
            'symbol_rate': symbolRate,
            'power_norm': False,
            'case_num': 1,
            'gm': -1.3e-3,
            'step': step,
            'trans_length': L,
            'lp': lp,
            'alpha': 0.2e-3,
            'pre_cd': 0,
            'edc_init_method': 'FSM',
        }
        nltap_pbounds = nltap_bound_dict[f'step_{step}']
        cr_pbounds = cr_bound_dict[f'step_{step}']

        pbounds = {'nltap': nltap_pbounds, 'cr': cr_pbounds}

        experiment = Experiment(model_kwargs=model_kwargs,
                                dataset=tstSet,
                                prbs=prbsTst,
                                use_cuda=True, )
        optimizer = BayesianOptimization(
            f=experiment.objective_function,
            pbounds=pbounds,
            verbose=2
        )
        utility = UtilityFunction(kind='ei', xi=0.05)
        optimizer.maximize(init_points=5, n_iter=10, acquisition_function=utility)
        print(optimizer.max)
        Qcache[lpIndx] = optimizer.max['target']
        save_dict['nltap'] = optimizer.max['params']['nltap']
        save_dict['init_comp_fac'] = optimizer.max['params']['cr']
        # scio.savemat(os.path.join(AVRDBPsearchPath, f'search_result_lp_{lp}_step_{step}.mat'), save_dict)
    print(Qcache)


def vary_step(lp=[1]):
    lp_range = [*lp]
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    resultPath = os.path.join(BASE_DIR, 'result_exp/AVR-DBP')
    AVRDBPsearchPath = os.path.join(resultPath, f'{experiment_name}')
    if not os.path.exists(resultPath):
        os.makedirs(resultPath)
        print(f'Created result dir at \"{resultPath}\"')
    if not os.path.exists(AVRDBPsearchPath):
        os.makedirs(AVRDBPsearchPath)
        print(f'Created result dir at \"{AVRDBPsearchPath}\"')

    '''Behavior control'''
    batch_size = 1
    trsetLen = 8192
    tstsetLen = 110000
    lr = 1e-3
    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    '''存储各步数，各功率下的Q因子'''
    step_list = [3, 9, 14, 18]
    # step_list = [1]
    Qcache = np.zeros([len(step_list), len(lp_range)])

    nltap_bound_dict = {
        'step_2': (3, 20),
        'step_3': (3, 20),
        'step_9': (3, 20),
        'step_14': (3, 20),
        'step_18': (2, 7),
    }

    cr_bound_dict = {
        'step_2': (0.1, 2),
        'step_3': (0.1, 2),
        'step_9': (0.1, 1.5),
        'step_14': (0.1, 1.6),
        'step_18': (0.1, 0.5),
    }
    save_result_flag = True
    for step_indx, step in enumerate(step_list):
        for lpIndx, lp in enumerate(lp_range):
            save_dict = {}
            testSetPath = os.path.join(
                BASE_DIR,
                f'data/experiment/16QAM20GBaud1800kmHe/tstSet_lp_{lp}.mat'
            )
            '''Form dataloaders'''
            data = scio.loadmat(testSetPath)
            spanLen = data['spanLen'][0, 0]
            spanNum = data['spanNum'][0, 0]
            L = spanLen * spanNum
            symbolRate = 20e9
            test_dataloader, tstSet, prbsTst = gen_dataloader(testSetPath,
                                                              block_size=block_size,
                                                              symbol_num=tstsetLen,
                                                              batch_size=batch_size,
                                                              constellations=constellations,
                                                              shuffle=False)
            '''Form dataloaders end'''
            '''计算均衡色散所需的总的抽头数大小'''
            total_taps_limit = util.cal_tap_num(DL=17e-6 * L,
                                                ref_freq=193.1e12,
                                                sample_freq=2 * symbolRate)
            least_taps_per_layer = round(total_taps_limit // step)
            print(f'At least {least_taps_per_layer} taps per layer')

            '''遍历步数的时候是否需要固定总的抽头数'''
            total_taps = 256 * 9
            # init_taps_per_layer = int(math.ceil(total_taps / step))
            init_taps_per_layer = 256
            print(f'There are {init_taps_per_layer} taps per layer')

            model_kwargs = {
                'D': -17e-6,
                'tap_per_edc': init_taps_per_layer,
                'sample_factor': 2,
                'symbol_rate': symbolRate,
                'power_norm': False,
                'case_num': 1,
                'gm': -1.3e-3,
                'step': step,
                'trans_length': L,
                'lp': lp,
                'alpha': 0.2e-3,
                'pre_cd': 0,
                'edc_init_method': 'FSM',
                'lmbd_each_layer': 5.12589879e-05,
            }
            nltap_pbounds = nltap_bound_dict[f'step_{step}']
            cr_pbounds = cr_bound_dict[f'step_{step}']

            pbounds = {'nltap': nltap_pbounds, 'cr': cr_pbounds}

            experiment = Experiment(model_kwargs=model_kwargs,
                                    dataset=tstSet,
                                    prbs=prbsTst,
                                    use_cuda=True)
            optimizer = BayesianOptimization(
                f=experiment.objective_function,
                pbounds=pbounds,
                verbose=2
            )
            utility = UtilityFunction(kind='ei', xi=0.05)
            optimizer.maximize(init_points=5, n_iter=10, acquisition_function=utility)
            print(optimizer.max)
            Qcache[step_indx, lpIndx] = optimizer.max['target']
            save_dict['nltap'] = optimizer.max['params']['nltap']
            save_dict['init_comp_fac'] = optimizer.max['params']['cr']
            scio.savemat(os.path.join(AVRDBPsearchPath, f'search_result_lp_{lp}_step_{step}.mat'), save_dict)
    if save_result_flag:
        scio.savemat(os.path.join(resultPath, f'Q_results_in_lp_{lp}_vary_steps.mat'),
                     {'steps': np.array(step_list), 'Q': Qcache})
    print(Qcache)


def vary_tap(lp=[1]):
    lp_range = [*lp]
    resultPath = os.path.join(BASE_DIR, 'result_exp/AVR-DBP')

    if not os.path.exists(resultPath):
        os.makedirs(resultPath)
        print(f'Created result dir at \"{resultPath}\"')

    '''Behavior control'''
    batch_size = 1
    trsetLen = 8192
    tstsetLen = 110000
    lr = 1e-3
    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    '''存储各步数，各功率下的Q因子'''
    edc_layer_taps_list = [84, 128, 192, 256]
    step = 9
    Qcache = np.zeros([len(edc_layer_taps_list), len(lp_range)])

    nltap_bound_dict = {
        'step_2': (3, 20),
        'step_3': (3, 20),
        'step_9': (3, 20),
        'step_14': (3, 20),
        'step_18': (2, 7),
    }

    cr_bound_dict = {
        'step_2': (0.1, 2),
        'step_3': (0.1, 2),
        'step_9': (0.1, 1.5),
        'step_14': (0.1, 1.6),
        'step_18': (0.1, 0.5),
    }
    for elt_indx, edc_layer_taps in enumerate(edc_layer_taps_list):
        for lpIndx, lp in enumerate(lp_range):
            testSetPath = os.path.join(
                BASE_DIR,
                f'data/experiment/16QAM20GBaud1800kmHe/tstSet_lp_{lp}.mat'
            )
            '''Form dataloaders'''
            data = scio.loadmat(testSetPath)
            spanLen = data['spanLen'][0, 0]
            spanNum = data['spanNum'][0, 0]
            L = spanLen * spanNum
            symbolRate = 20e9
            test_dataloader, tstSet, prbsTst = gen_dataloader(testSetPath,
                                                              block_size=block_size,
                                                              symbol_num=tstsetLen,
                                                              batch_size=batch_size,
                                                              constellations=constellations,
                                                              shuffle=False)
            '''Form dataloaders end'''
            '''计算均衡色散所需的总的抽头数大小'''
            total_taps_limit = util.cal_tap_num(DL=17e-6 * L,
                                                ref_freq=193.1e12,
                                                sample_freq=2 * symbolRate)
            least_taps_per_layer = round(total_taps_limit // step)
            print(f'At least {least_taps_per_layer} taps per layer')

            '''遍历步数的时候是否需要固定总的抽头数'''
            # total_taps = edc_layer_taps
            # init_taps_per_layer = int(math.ceil(total_taps / step))
            print(f'There are {edc_layer_taps} taps per layer')

            model_kwargs = {
                'D': -17e-6,
                'tap_per_edc': edc_layer_taps,
                'sample_factor': 2,
                'symbol_rate': symbolRate,
                'power_norm': False,
                'case_num': 1,
                'gm': -1.3e-3,
                'step': step,
                'trans_length': L,
                'lp': lp,
                'alpha': 0.2e-3,
                'pre_cd': 0,
                'edc_init_method': 'FSM',
                'lmbd_each_layer': 5.12589879e-05,
            }
            nltap_pbounds = nltap_bound_dict[f'step_{step}']
            cr_pbounds = cr_bound_dict[f'step_{step}']

            pbounds = {'nltap': nltap_pbounds, 'cr': cr_pbounds}

            experiment = Experiment(model_kwargs=model_kwargs,
                                    dataset=tstSet,
                                    prbs=prbsTst,
                                    use_cuda=True)
            optimizer = BayesianOptimization(
                f=experiment.objective_function,
                pbounds=pbounds,
                verbose=2
            )
            utility = UtilityFunction(kind='ei', xi=0)
            optimizer.maximize(init_points=5, n_iter=10, acquisition_function=utility)
            print(optimizer.max)
            Qcache[elt_indx, lpIndx] = optimizer.max['target']
    print(Qcache)


def form_csv_experiment_result():
    import csv
    result_path = os.path.join(BASE_DIR, 'result_exp/AVR-DBP')  # 目录
    result_name = 'Q_results_in_lp_1_vary_steps'  # 名字

    result_dict = scio.loadmat(
        os.path.join(
            result_path, '{}.mat'
        ).format(result_name)
    )
    Q = result_dict['Q'].squeeze()
    step = result_dict['steps'].squeeze()
    with open(os.path.join(result_path, '{}.csv').format(result_name), 'w') as f:
        csv_writer = csv.writer(f)
        csv_writer.writerow(['step', 'Q factor'])
        for result_indx in range(step.shape[0]):
            csv_writer.writerow([step[result_indx], Q[result_indx]])

        print('transform has been done!')


if __name__ == '__main__':
    vary_lp(lp_range=[1])
    # vary_step(lp=[1])
    # vary_tap(lp=[1])
    # form_csv_experiment_result()
