import math
import warnings
from inspect import currentframe, getframeinfo
from typing import Any

import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
from torch.utils.data import DataLoader, random_split, Dataset
import scipy.io as scio
import numpy as np
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import os

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import CR_DSPPytorch as risdsp
import util

BASE_DIR = os.path.dirname(__file__)


class NPDataset(torch.utils.data.Dataset):
    def __init__(self, array, label, block_size):
        self.array = np.csingle(array)
        self.label = np.csingle(label)
        self.block_size = block_size
        self.case_num = self.array.shape[-2]

    def __len__(self):
        return self.label.shape[-1]

    def __getitem__(self, indx):
        def view_as_real(x):
            return np.stack((x.real, x.imag), axis=-1)

        M = int(np.floor(self.block_size / 2))
        if isinstance(indx, np.ndarray):
            ret = self.array[np.newaxis, ..., indx]
            return view_as_real(ret)
        else:
            ret = np.take(self.array, indices=np.arange(
                -M + 2 * indx, 2 * indx + M), axis=-1, mode='wrap')
            return view_as_real(ret), view_as_real(self.label[..., indx])


def gen_dataloader(data_path, block_size, batch_size, symbol_num=0, constellations=util.CONST_16QAM, shuffle=False,
                   count_from_end=False):
    modOrder = 4
    data = scio.loadmat(data_path)
    prbs = data['prbs']

    sig = data['sig']

    if symbol_num != 0:
        if count_from_end:
            sig = sig[..., -symbol_num * 2:]
            prbs = prbs[..., -symbol_num * modOrder:]
        else:
            sig = sig[..., 0:symbol_num * 2]
            prbs = prbs[..., 0:symbol_num * modOrder]

    labelx = util.map(prbs[0, ...], constellations)
    labely = util.map(prbs[1, ...], constellations)
    labelx = labelx.reshape(1, -1)
    labely = labely.reshape(1, -1)
    label = np.concatenate([labelx, labely], axis=0)
    dataset = NPDataset(sig, label, block_size)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size, shuffle=shuffle)
    return dataloader, dataset, prbs


def eval_ber(model, dataset, prbs, constellations=util.CONST_16QAM, use_cuda=True):
    if not isinstance(dataset, NPDataset):
        raise ValueError('dataset is supposed to be an instance of NPDataset.')
    model = model.eval()
    lms = risdsp.FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)
    pr = risdsp.PhaseRecLayer(1024)

    if use_cuda and torch.cuda.is_available():
        model = model.cuda()

    chosen_device = model.device
    block_size = len(dataset) * 2
    indxes = np.arange(block_size)
    sig = dataset[indxes]
    sig = torch.from_numpy(sig).to(chosen_device)
    sig = model(sig)

    del model

    '''CMA，原本应有均衡PMD的，但由于已经移除，所以这里不做考虑'''
    lms = lms.to(chosen_device)
    pr = pr.to(chosen_device)
    sig = pr(sig[..., 1::2, :])

    lms.fit(sig,
            err_mode='DDM',
            constellations=constellations,
            iter_num=10,
            block_size=4028,
            remain=2048,
            lr=1e-4)
    with torch.no_grad():
        sig = lms(sig)
        sig = torch.view_as_complex(sig)
        sig = sig.cpu().data.numpy().squeeze()

        sig, ber, _ = util.pr_ber(sig, prbs, constellations)
    return sig, ber


class WeightedExpNLLayer(nn.Module):
    def __init__(self, init_factor, init_BW, sample_factor=2, case_num=1, span=10):
        super(WeightedExpNLLayer, self).__init__()
        fac = torch.tensor((init_factor,))
        self.fac = nn.Parameter(fac, requires_grad=True)
        self.lpf = risdsp.GaussianLPFLayer(init_BW, span=span, case_num=case_num, sample_factor=sample_factor)

    def forward(self, x):
        '''x shape:[B, pol, block, 2]'''
        B, pol, block, _ = x.shape
        xp = torch.sum(x ** 2, dim=[1, -1], keepdim=True)
        NL_pow = self.fac * xp

        NL_pow = risdsp.format_rt(NL_pow[..., 0])
        NL_pow = self.lpf(NL_pow)

        complex_unit = torch.zeros_like(NL_pow, device=NL_pow.device)
        complex_unit[..., 1] = 1
        NL_operator = risdsp.riexp(risdsp.cmul(complex_unit, NL_pow))
        x = risdsp.cmul(NL_operator, x)
        return x

    @staticmethod
    def cal_init_factor(gm, eps, alp_dB, power_dB, step_len):
        power = 10 ** (power_dB / 10 - 3)
        alp = - np.log(np.power(10, -alp_dB / 10))
        Leff = (1 - np.exp(-alp * step_len)) / alp
        ret = 8 / 9 * gm * eps * Leff * power
        return ret


class ParallelNDLayer(nn.Module):
    def __init__(self, init_factor, init_BW, sample_factor=2, case_num=2, span=10, DL=17e-6 * 100e3, c=3e8,
                 waveLength=1552.52e-9, tap=2048,
                 symbolRate=28e9, power_norm=False, init_method='LS_CO', rolloff=0.01, lmbd_for_LS_CO=None,
                 eps_o_max_db=0.1):
        super(ParallelNDLayer, self).__init__()
        fac = torch.tensor((init_factor,))
        self.fac = nn.Parameter(fac, requires_grad=True)
        '''设定好高斯滤波层的参数'''
        ntap = 2 * np.sqrt(2 * np.log(2)) * 1 / init_BW
        span = round(ntap / sample_factor)

        self.lpf = risdsp.GaussianLPFLayer(init_BW, span=span, case_num=1, sample_factor=sample_factor)
        self.nltap = self.lpf.h.shape[-2]
        self.edc = risdsp.EDCLayer(DL=DL, case_num=case_num, c=c, wave_length=waveLength, tap=tap,
                                   sample_factor=sample_factor,
                                   symbol_rate=symbolRate, power_norm=power_norm, init_method=init_method,
                                   rolloff=rolloff,
                                   lmbd_for_LS_CO=lmbd_for_LS_CO, eps_o_max_db=eps_o_max_db)

    def forward(self, x):
        '''x shape [B, pol, block, 2]'''
        B, pol, block, _ = x.shape
        xp = torch.sum(x ** 2, dim=[1, -1], keepdim=True)
        NL_pow = self.fac * xp

        NL_pow = risdsp.format_rt(NL_pow[..., 0])
        NL_pow = self.lpf(NL_pow)

        complex_unit = torch.zeros_like(NL_pow, device=NL_pow.device)
        complex_unit[..., 1] = 1

        real_unit = torch.zeros_like(NL_pow, device=NL_pow.device)
        real_unit[..., 0] = 1

        NL_operator = risdsp.cmul(complex_unit, NL_pow)
        xnl = self.edc(risdsp.cmul(NL_operator, x))
        xcd = self.edc(x)
        x = xnl + xcd
        return x


class LWDBP(pl.LightningModule):
    def __init__(self, D=17e-6, c=3e8, wave_length=1552.52e-9, tap_per_edc=256, sample_factor=2, symbol_rate=28e9,
                 power_norm=False, case_num=2, gm=1.3e-3, step=30, trans_length=2400e3, lp=0, alpha=0.2e-3,
                 init_comp_fac=(0.1,), init_BW=(0.5,), pr_win=1024, pre_cd=0.5, lr=1e-3, optimizer='Adam',
                 weight_decay=0.02, edc_init_method='LS_CO', lmbd_each_layer=5.12589879e-05):
        super(LWDBP, self).__init__()
        DLperStep = D * trans_length / step
        stepLen = trans_length / step
        self.edc_layers = nn.ModuleList([])
        self.nl_layers = nn.ModuleList([])
        self.step = step
        self.weight_decay = weight_decay

        init_BW = init_BW[0]
        init_comp_fac = init_comp_fac[0]

        self.head_edc_layer = risdsp.EDCLayer(symbol_rate=symbol_rate, DL=DLperStep / 2, case_num=case_num,
                                              c=c, wave_length=wave_length, tap=tap_per_edc,
                                              sample_factor=sample_factor,
                                              power_norm=power_norm, init_method=edc_init_method,
                                              lmbd_for_LS_CO=lmbd_each_layer)

        self.end_edc = risdsp.EDCLayer(symbol_rate=symbol_rate, DL=DLperStep / 2, case_num=case_num,
                                       c=c, wave_length=wave_length, tap=tap_per_edc, sample_factor=sample_factor,
                                       power_norm=power_norm, init_method=edc_init_method,
                                       lmbd_for_LS_CO=lmbd_each_layer)

        for indx in range(step - 1):
            NLstepInitFac = WeightedExpNLLayer.cal_init_factor(gm=gm, eps=init_comp_fac, power_dB=lp, alp_dB=alpha,
                                                               step_len=stepLen)
            nl_layers = [WeightedExpNLLayer(NLstepInitFac, init_BW, sample_factor=sample_factor)]
            edc_layers = [risdsp.EDCLayer(symbol_rate=symbol_rate, DL=DLperStep, case_num=case_num,
                                          c=c, wave_length=wave_length, tap=tap_per_edc, sample_factor=sample_factor,
                                          power_norm=power_norm, init_method=edc_init_method,
                                          lmbd_for_LS_CO=lmbd_each_layer)]
            self.nl_layers.extend(nl_layers)
            self.edc_layers.extend(edc_layers)

            lmbd_each_layer = edc_layers[0].lmbd_for_LS_CO

        NLstepInitFac = WeightedExpNLLayer.cal_init_factor(gm=gm, eps=init_comp_fac, alp_dB=alpha, power_dB=lp,
                                                           step_len=stepLen)
        self.tail_nl_layer = WeightedExpNLLayer(NLstepInitFac, init_BW=init_BW, sample_factor=sample_factor)

        if pre_cd == 0:
            self.tail_edc = False
        else:
            self.tail_edc = True
            self.edc = risdsp.EDCLayer(symbol_rate=symbol_rate, DL=-D * trans_length * pre_cd, case_num=case_num, c=c,
                                       wave_length=wave_length, tap=2048, sample_factor=sample_factor,
                                       power_norm=power_norm, init_method='FSM')
            for p in self.edc.parameters():
                p.requires_grad_(False)

        self.pr = risdsp.PhaseRecLayer(pr_win)
        self.lr = lr
        self.optimizer = optimizer

        if self.optimizer == 'Adam':
            hp_opt = 0
        elif self.optimizer == 'SGD':
            hp_opt = 1
        else:
            raise ValueError('Invalid optimizer')

        self.hyper_parameters = {
            'D': D,
            'c': c,
            'wave_length': wave_length,
            'tap_per_edc': tap_per_edc,
            'sample_factor': sample_factor,
            'symbol_rate': symbol_rate,
            'power_norm': power_norm,
            'case_num': case_num,
            'gm': gm,
            'step': step,
            'trans_length': trans_length,
            'lp': lp,
            'alpha': alpha,
            'init_comp_fac': init_comp_fac,
            'init_BW': init_BW,
            'pr_win': pr_win,
            'pre_cd': pre_cd,
            'lr': lr,
            'weight_decay': weight_decay,
            'optimizer': hp_opt,
            'edc_init_method': edc_init_method,
        }

    def forward(self, x):
        x = self.head_edc_layer(x)
        for indx in range(self.step - 1):
            x = self.nl_layers[indx](x)
            x = self.edc_layers[indx](x)

        x = self.tail_nl_layer(x)
        x = self.end_edc(x)

        if self.tail_edc:
            x = self.edc(x)

        if self.training:
            x = x[..., 1::2, :]
            x = self.pr(x)

        return x

    def edc_parameters(self):
        edc_parameters_list = []
        edc_parameters_list += list(self.head_edc_layer.parameters())
        for indx in range(self.step - 1):
            edc_parameters_list += list(self.edc_layers[indx].parameters())
        edc_parameters_list += list(self.end_edc.parameters())
        return edc_parameters_list

    def nl_parameters(self):
        nl_parameters_list = []
        for indx in range(self.step - 1):
            nl_parameters_list += [self.nl_layers[indx].fac]
            nl_parameters_list += list(self.nl_layers[indx].lpf.parameters())
        nl_parameters_list += [self.tail_nl_layer.fac]
        nl_parameters_list += list(self.tail_nl_layer.lpf.parameters())
        return nl_parameters_list

    def configure_optimizers(self):
        if self.optimizer == 'Adam':
            optimizer = optim.Adam(
                [
                    {'params': self.edc_parameters(), 'lr': self.lr[0]},
                    {'params': self.nl_parameters(), 'lr': self.lr[1]}
                ], weight_decay=self.weight_decay
            )
            scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2,
                                                             verbose=False)
            return (
                {
                    'optimizer': optimizer,
                    'lr_scheduler': {
                        'scheduler': scheduler,
                        'monitor': 'val_loss',
                    }
                }
            )
        elif self.optimizer == 'SGD':
            return optim.SGD(self.parameters(), lr=self.lr, weight_decay=self.weight_decay * 0)
        else:
            raise ValueError('Invalid optimizer')

    def err_function(self, x, y):
        batchsize = x.shape[0]
        if x.ndim == 4:
            x = x[..., round(x.shape[-2] // 2), :]
        rotate_fac = np.exp(1j * np.array([0, np.pi / 2, np.pi, np.pi / 2 * 3]))
        rotate_fac = rotate_fac.reshape(*[1] * (x.ndim - 1), -1)
        rotate_fac = torch.from_numpy(rotate_fac).to(x.device)
        rotate_fac.requires_grad_(False)
        rotate_fac = torch.view_as_real(rotate_fac)

        x = x[..., np.newaxis, :]
        y = y[..., np.newaxis, :]

        x = risdsp.cmul(x, rotate_fac)
        res = y - x
        err = torch.sum(res ** 2, dim=-1)
        err = torch.min(err, dim=-1)[0]
        err = torch.mean(err, dim=[0, 1])
        return err

    def training_step(self, batch, batch_indx):
        x, y = batch
        output = self(x)
        err = self.err_function(output, y)
        err = err * 1000
        self.log('train_loss', err, on_step=False, on_epoch=True, prog_bar=True, logger=True)
        return {'loss': err}

    def validation_step(self, batch, batch_indx):
        x, y = batch
        output = self(x)
        with torch.no_grad():
            output = output[..., 1::2, :]
            output = self.pr(output)
        err = self.err_function(output, y)
        err = err * 1000
        self.log('val_loss', err, on_step=False, on_epoch=True, prog_bar=True, logger=True)
        return {'val_loss': err}

    def test_step(self, batch, batch_indx):
        x, y = batch
        output = self(x)
        pass


def vary_lp(lp_range=[1]):
    lp_range = [*lp_range]
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    resultDir = os.path.join(BASE_DIR, 'result/LWDBP')
    if not os.path.exists(resultDir):
        os.makedirs(resultDir)
        print(f'Create result dir at \"{resultDir}\"')
    resultPath = os.path.join(resultDir, '{}'.format(experiment_name))
    if not os.path.exists(resultPath):
        os.mkdir(resultPath)
        print(f'Create result path at \"{resultPath}\"')
    checkpointSavePath = os.path.join(resultPath, 'chkpts')
    WDBPsearchPath = os.path.join(BASE_DIR, f'result/LPF-DBP/{experiment_name}')

    if not os.path.exists(checkpointSavePath):
        os.mkdir(checkpointSavePath)
        print(f'Create result path at \"{checkpointSavePath}\"')

    '''Behavior control'''
    max_epochs = 10
    batch_size = 64
    splitAmount = 65536
    tr_val_ratio = 7
    trsetLen = round(splitAmount * tr_val_ratio / (tr_val_ratio + 3))
    valsetLen = round(splitAmount * 3 / (tr_val_ratio + 3))
    tstsetLen = 131072
    lr = (5e-6, 5e-7)
    patience = 6
    log_interval = 20

    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    resume_from_checkpoints = False
    print(f'Resume from checkpoint: {resume_from_checkpoints}')

    for lpIndx, lp in enumerate(lp_range):
        trSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        valSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        tstSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/tstSet_lp_{}.mat'.format(lp)
        )
        '''Form dataloaders'''
        spanLen = 80e3
        data = scio.loadmat(trSetPath)
        spanNum = data['spanNum'][0, 0]
        L = spanLen * spanNum
        symbolRate = 32e9

        train_dataloader, _, _ = gen_dataloader(trSetPath,
                                                block_size=block_size,
                                                symbol_num=trsetLen,
                                                batch_size=batch_size,
                                                constellations=constellations,
                                                shuffle=True)
        val_dataloader, valSet, prbsVal = gen_dataloader(valSetPath,
                                                         block_size=block_size,
                                                         symbol_num=valsetLen,
                                                         batch_size=batch_size,
                                                         constellations=constellations,
                                                         shuffle=False,
                                                         count_from_end=True)
        test_dataloader, tstSet, prbsTst = gen_dataloader(tstSetPath,
                                                          block_size=block_size,
                                                          symbol_num=tstsetLen,
                                                          batch_size=batch_size,
                                                          constellations=constellations,
                                                          shuffle=False)
        '''Form dataloader end'''
        step = 20
        total_taps = 2560
        least_taps_per_layer = int(math.ceil(total_taps / step))

        if os.path.exists(WDBPsearchPath):
            dat = scio.loadmat(os.path.join(WDBPsearchPath, f'search_result_lp_{lp}_step_{step}.mat'))
            init_BW = [dat['init_BW'][0][0]]
            init_comp_fac = [dat['init_comp_fac'][0][0]]
            print('Previous search result read.')
        else:
            init_BW = [0.9]
            init_comp_fac = [0.9]
            print('Init BW to 0.9 and cr 0.9.')

        model_kwargs = {
            'D': 17e-6,
            'tap_per_edc': least_taps_per_layer,
            'sample_factor': 2,
            'symbol_rate': symbolRate,
            'power_norm': False,
            'case_num': 2,
            'gm': 1.3e-3,
            'step': step,
            'trans_length': L,
            'lp': lp,
            'alpha': 0.2e-3,
            'init_comp_fac': init_comp_fac,
            'pr_win': 1024,
            'pre_cd': 0.5,
            'lr': lr,
            'init_BW': init_BW,
            'optimizer': 'Adam',
            'weight_decay': 0,
            'edc_init_method': 'LS_CO',
            'lmbd_each_layer': 5.12589879e-05,
        }

        mdl = LWDBP(**model_kwargs)

        '''Specify callbacks'''
        early_stop_callback = EarlyStopping(monitor='val_loss', patience=patience, strict=False,
                                            verbose=True, mode='min', min_delta=1e-2)
        check_point_callback = ModelCheckpoint(
            monitor='val_loss',
            verbose=True,
            save_last=None,
            save_top_k=1,
            save_weights_only=True,
            mode='min',
            dirpath=checkpointSavePath,
            filename=f'LWDBP_lp_{lp}_step_{step}_tapsPerLinearLayer_{least_taps_per_layer}',
        )
        '''Specify callbacks end'''
        logger = TensorBoardLogger(save_dir=resultPath, name='log', version=f'lp_{lp}')
        trainer_kwargs = {
            'accelerator': 'gpu',
            'callbacks': [early_stop_callback, check_point_callback],
            'max_epochs': max_epochs,
            'check_val_every_n_epoch': 1,
            'num_sanity_val_steps': 0,
            'log_every_n_steps': log_interval,
            'val_check_interval': 200,
            'logger': logger,
        }

        trainer = pl.Trainer(**trainer_kwargs)
        if resume_from_checkpoints:
            ckpt_path = os.path.join(checkpointSavePath, 'model.ckpt')
        else:
            ckpt_path = None
        trainer.fit(mdl, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, ckpt_path=ckpt_path)

        sig, ber = eval_ber(mdl, tstSet, prbsTst, constellations=util.CONST_16QAM, use_cuda=False)
        Q = util.ber2q(np.mean(ber))
        print(f'lp {lp}, Q factor {Q}')


def eval_vary_lp(lp_range=[1]):
    lp_range = [*lp_range]
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    resultDir = os.path.join(BASE_DIR, 'result/LWDBP')
    if not os.path.exists(resultDir):
        os.makedirs(resultDir)
        print(f'Create result dir at \"{resultDir}\"')
    resultPath = os.path.join(resultDir, 'vary_lp')
    if not os.path.exists(resultPath):
        raise ValueError('Here is lack of vary_lp path')
    checkpointSavePath = os.path.join(resultPath, 'chkpts')
    WDBPsearchPath = os.path.join(BASE_DIR, f'result/LPF-DBP/vary_lp')

    if not os.path.exists(checkpointSavePath):
        os.mkdir(checkpointSavePath)
        print(f'Create result path at \"{checkpointSavePath}\"')

    '''Behavior control'''
    batch_size = 32
    trsetLen = 8192
    valsetLen = 4096
    tstsetLen = 131072
    lr = (5e-6, 5e-7)

    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    resume_from_checkpoints = False
    print(f'Resume from checkpoint: {resume_from_checkpoints}')

    for lpIndx, lp in enumerate(lp_range):
        trSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        valSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        tstSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/tstSet_lp_{}.mat'.format(lp)
        )
        '''Form dataloaders'''
        spanLen = 80e3
        data = scio.loadmat(trSetPath)
        spanNum = data['spanNum'][0, 0]
        L = spanLen * spanNum
        symbolRate = 32e9

        train_dataloader, _, _ = gen_dataloader(trSetPath,
                                                block_size=block_size,
                                                symbol_num=trsetLen,
                                                batch_size=batch_size,
                                                constellations=constellations,
                                                shuffle=True)
        val_dataloader, valSet, prbsVal = gen_dataloader(valSetPath,
                                                         block_size=block_size,
                                                         symbol_num=valsetLen,
                                                         batch_size=batch_size,
                                                         constellations=constellations,
                                                         shuffle=False,
                                                         count_from_end=True)
        test_dataloader, tstSet, prbsTst = gen_dataloader(tstSetPath,
                                                          block_size=block_size,
                                                          symbol_num=tstsetLen,
                                                          batch_size=batch_size,
                                                          constellations=constellations,
                                                          shuffle=False)
        '''Form dataloader end'''
        step = 20
        total_taps = 2560
        least_taps_per_layer = int(math.ceil(total_taps / step))

        if os.path.exists(WDBPsearchPath):
            dat = scio.loadmat(os.path.join(WDBPsearchPath, f'search_result_lp_{lp}_step_{step}.mat'))
            init_BW = [dat['init_BW'][0][0]]
            init_comp_fac = [dat['init_comp_fac'][0][0]]
            print('Previous search result read.')
        else:
            init_BW = [0.9]
            init_comp_fac = [0.9]
            print('Init BW to 0.9 and cr 0.9.')

        model_kwargs = {
            'D': 17e-6,
            'tap_per_edc': least_taps_per_layer,
            'sample_factor': 2,
            'symbol_rate': symbolRate,
            'power_norm': False,
            'case_num': 2,
            'gm': 1.3e-3,
            'step': step,
            'trans_length': L,
            'lp': lp,
            'alpha': 0.2e-3,
            'init_comp_fac': init_comp_fac,
            'pr_win': 1024,
            'pre_cd': 0.5,
            'lr': lr,
            'init_BW': init_BW,
            'optimizer': 'Adam',
            'weight_decay': 0,
            'edc_init_method': 'LS_CO',
            'lmbd_each_layer': 5.12589879e-05,
        }

        mdl = LWDBP.load_from_checkpoint(os.path.join(
            checkpointSavePath, f'LWDBP_lp_{lp}_step_{step}_tapsPerLinearLayer_{least_taps_per_layer}.ckpt'
        ), **model_kwargs)

        sig, ber = eval_ber(mdl, tstSet, prbsTst, constellations=util.CONST_16QAM, use_cuda=False)
        Q = util.ber2q(np.mean(ber))
        print(f'lp {lp}, Q factor {Q}')


def vary_step(step_range=[20]):
    step_range = [*step_range]
    lp = 2
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    resultDir = os.path.join(BASE_DIR, 'result/LWDBP')
    if not os.path.exists(resultDir):
        os.makedirs(resultDir)
        print(f'Create result dir at \"{resultDir}\"')
    resultPath = os.path.join(resultDir, '{}'.format(experiment_name))
    if not os.path.exists(resultPath):
        os.mkdir(resultPath)
        print(f'Create result path at \"{resultPath}\"')
    checkpointSavePath = os.path.join(resultPath, 'chkpts')
    WDBPsearchPath = os.path.join(BASE_DIR, f'result/LPF-DBP/{experiment_name}')

    if not os.path.exists(checkpointSavePath):
        os.mkdir(checkpointSavePath)
        print(f'Create result path at \"{checkpointSavePath}\"')

    '''Behavior control'''
    max_epochs = 10
    batch_size = 64
    splitAmount = 65536
    tr_val_ratio = 7
    trsetLen = round(splitAmount * tr_val_ratio / (tr_val_ratio + 3))
    valsetLen = round(splitAmount * 3 / (tr_val_ratio + 3))
    tstsetLen = 131072
    lr = (5e-6, 5e-7)
    patience = 6
    log_interval = 20

    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    resume_from_checkpoints = False
    print(f'Resume from checkpoint: {resume_from_checkpoints}')

    for stIndx, step in enumerate(step_range):
        trSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        valSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        tstSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/tstSet_lp_{}.mat'.format(lp)
        )
        '''Form dataloaders'''
        spanLen = 80e3
        data = scio.loadmat(trSetPath)
        spanNum = data['spanNum'][0, 0]
        L = spanLen * spanNum
        symbolRate = 32e9

        train_dataloader, _, _ = gen_dataloader(trSetPath,
                                                block_size=block_size,
                                                symbol_num=trsetLen,
                                                batch_size=batch_size,
                                                constellations=constellations,
                                                shuffle=True)
        val_dataloader, valSet, prbsVal = gen_dataloader(valSetPath,
                                                         block_size=block_size,
                                                         symbol_num=valsetLen,
                                                         batch_size=batch_size,
                                                         constellations=constellations,
                                                         shuffle=False,
                                                         count_from_end=True)
        test_dataloader, tstSet, prbsTst = gen_dataloader(tstSetPath,
                                                          block_size=block_size,
                                                          symbol_num=tstsetLen,
                                                          batch_size=batch_size,
                                                          constellations=constellations,
                                                          shuffle=False)
        '''Form dataloader end'''
        total_taps = 2560
        least_taps_per_layer = int(math.ceil(total_taps / step))

        if os.path.exists(WDBPsearchPath):
            dat = scio.loadmat(os.path.join(WDBPsearchPath, f'search_result_lp_{lp}_step_{step}.mat'))
            init_BW = [dat['init_BW'][0][0]]
            init_comp_fac = [dat['init_comp_fac'][0][0]]
            print('Previous search result read.')
        else:
            init_BW = [0.9]
            init_comp_fac = [0.9]
            print('Init BW to 0.9 and cr 0.9.')

        model_kwargs = {
            'D': 17e-6,
            'tap_per_edc': least_taps_per_layer,
            'sample_factor': 2,
            'symbol_rate': symbolRate,
            'power_norm': False,
            'case_num': 2,
            'gm': 1.3e-3,
            'step': step,
            'trans_length': L,
            'lp': lp,
            'alpha': 0.2e-3,
            'init_comp_fac': init_comp_fac,
            'pr_win': 1024,
            'pre_cd': 0.5,
            'lr': lr,
            'init_BW': init_BW,
            'optimizer': 'Adam',
            'weight_decay': 0,
            'edc_init_method': 'LS_CO',
            'lmbd_each_layer': 5.12589879e-05,
        }

        mdl = LWDBP(**model_kwargs)

        '''Specify callbacks'''
        early_stop_callback = EarlyStopping(monitor='val_loss', patience=patience, strict=False,
                                            verbose=True, mode='min', min_delta=1e-2)
        check_point_callback = ModelCheckpoint(
            monitor='val_loss',
            verbose=True,
            save_last=None,
            save_top_k=1,
            save_weights_only=True,
            mode='min',
            dirpath=checkpointSavePath,
            filename=f'LWDBP_lp_{lp}_step_{step}_tapsPerLinearLayer_{least_taps_per_layer}',
        )
        '''Specify callbacks end'''
        logger = TensorBoardLogger(save_dir=resultPath, name='log', version=f'step_{step}')
        trainer_kwargs = {
            'accelerator': 'gpu',
            'callbacks': [early_stop_callback, check_point_callback],
            'max_epochs': max_epochs,
            'check_val_every_n_epoch': 1,
            'num_sanity_val_steps': 0,
            'log_every_n_steps': log_interval,
            'val_check_interval': 200,
            'logger': logger,
        }

        trainer = pl.Trainer(**trainer_kwargs)
        if resume_from_checkpoints:
            ckpt_path = os.path.join(checkpointSavePath, 'model.ckpt')
        else:
            ckpt_path = None
        trainer.fit(mdl, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, ckpt_path=ckpt_path)

        sig, ber = eval_ber(mdl, tstSet, prbsTst, constellations=util.CONST_16QAM, use_cuda=False)
        Q = util.ber2q(np.mean(ber))
        print(f'lp {lp}, step {step}, Q factor {Q}')
        del mdl


def eval_vary_step(step_range=[20]):
    step_range = [*step_range]
    lp = 2
    resultDir = os.path.join(BASE_DIR, 'result/LWDBP')
    if not os.path.exists(resultDir):
        os.makedirs(resultDir)
        print(f'Create result dir at \"{resultDir}\"')
    resultPath = os.path.join(resultDir, 'vary_step')
    if not os.path.exists(resultPath):
        os.mkdir(resultPath)
        print(f'Create result path at \"{resultPath}\"')
    checkpointSavePath = os.path.join(resultPath, 'chkpts')
    WDBPsearchPath = os.path.join(BASE_DIR, 'result/LPF-DBP/vary_step')

    if not os.path.exists(checkpointSavePath):
        os.mkdir(checkpointSavePath)
        print(f'Create result path at \"{checkpointSavePath}\"')

    '''Behavior control'''
    batch_size = 32
    trsetLen = 8192
    valsetLen = 4096
    tstsetLen = 131072
    lr = (5e-6, 5e-7)

    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    resume_from_checkpoints = False
    print(f'Resume from checkpoint: {resume_from_checkpoints}')

    for stIndx, step in enumerate(step_range):
        trSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        valSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        tstSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/tstSet_lp_{}.mat'.format(lp)
        )
        '''Form dataloaders'''
        spanLen = 80e3
        data = scio.loadmat(trSetPath)
        spanNum = data['spanNum'][0, 0]
        L = spanLen * spanNum
        symbolRate = 32e9

        train_dataloader, _, _ = gen_dataloader(trSetPath,
                                                block_size=block_size,
                                                symbol_num=trsetLen,
                                                batch_size=batch_size,
                                                constellations=constellations,
                                                shuffle=True)
        val_dataloader, valSet, prbsVal = gen_dataloader(valSetPath,
                                                         block_size=block_size,
                                                         symbol_num=valsetLen,
                                                         batch_size=batch_size,
                                                         constellations=constellations,
                                                         shuffle=False,
                                                         count_from_end=True)
        test_dataloader, tstSet, prbsTst = gen_dataloader(tstSetPath,
                                                          block_size=block_size,
                                                          symbol_num=tstsetLen,
                                                          batch_size=batch_size,
                                                          constellations=constellations,
                                                          shuffle=False)
        '''Form dataloader end'''
        total_taps = 2560
        least_taps_per_layer = int(math.ceil(total_taps / step))

        if os.path.exists(WDBPsearchPath):
            dat = scio.loadmat(os.path.join(WDBPsearchPath, f'search_result_lp_{lp}_step_{step}.mat'))
            init_BW = [dat['init_BW'][0][0]]
            init_comp_fac = [dat['init_comp_fac'][0][0]]
            print('Previous search result read.')
        else:
            init_BW = [0.9]
            init_comp_fac = [0.9]
            print('Init BW to 0.9 and cr 0.9.')

        model_kwargs = {
            'D': 17e-6,
            'tap_per_edc': least_taps_per_layer,
            'sample_factor': 2,
            'symbol_rate': symbolRate,
            'power_norm': False,
            'case_num': 2,
            'gm': 1.3e-3,
            'step': step,
            'trans_length': L,
            'lp': lp,
            'alpha': 0.2e-3,
            'init_comp_fac': init_comp_fac,
            'pr_win': 1024,
            'pre_cd': 0.5,
            'lr': lr,
            'init_BW': init_BW,
            'optimizer': 'Adam',
            'weight_decay': 0,
            'edc_init_method': 'LS_CO',
            'lmbd_each_layer': 5.12589879e-05,
        }

        mdl = LWDBP.load_from_checkpoint(os.path.join(
            checkpointSavePath, f'LWDBP_lp_{lp}_step_{step}_tapsPerLinearLayer_{least_taps_per_layer}.ckpt'
        ), **model_kwargs)

        sig, ber = eval_ber(mdl, tstSet, prbsTst, constellations=util.CONST_16QAM, use_cuda=False)
        Q = util.ber2q(np.mean(ber))
        print(f'lp {lp}, step {step}, Q factor {Q}')
        del mdl


def vary_tap(step_range=[20]):
    step_range = [*step_range]
    lp = 2
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    resultDir = os.path.join(BASE_DIR, 'result/LWDBP')
    if not os.path.exists(resultDir):
        os.makedirs(resultDir)
        print(f'Create result dir at \"{resultDir}\"')
    resultPath = os.path.join(resultDir, '{}'.format(experiment_name))
    if not os.path.exists(resultPath):
        os.mkdir(resultPath)
        print(f'Create result path at \"{resultPath}\"')
    checkpointSavePath = os.path.join(resultPath, 'chkpts')
    WDBPsearchPath = os.path.join(BASE_DIR, f'result/LPF-DBP/vary_step')

    if not os.path.exists(checkpointSavePath):
        os.mkdir(checkpointSavePath)
        print(f'Create result path at \"{checkpointSavePath}\"')

    '''Behavior control'''
    max_epochs = 10
    batch_size = 64
    splitAmount = 65536
    tr_val_ratio = 7
    trsetLen = round(splitAmount * tr_val_ratio / (tr_val_ratio + 3))
    valsetLen = round(splitAmount * 3 / (tr_val_ratio + 3))
    tstsetLen = 131072
    lr = (5e-6, 5e-7)
    patience = 6
    log_interval = 20

    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    resume_from_checkpoints = False
    print(f'Resume from checkpoint: {resume_from_checkpoints}')

    for stIndx, step in enumerate(step_range):
        trSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        valSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        tstSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/tstSet_lp_{}.mat'.format(lp)
        )
        '''Form dataloaders'''
        spanLen = 80e3
        data = scio.loadmat(trSetPath)
        spanNum = data['spanNum'][0, 0]
        L = spanLen * spanNum
        symbolRate = 32e9

        train_dataloader, _, _ = gen_dataloader(trSetPath,
                                                block_size=block_size,
                                                symbol_num=trsetLen,
                                                batch_size=batch_size,
                                                constellations=constellations,
                                                shuffle=True)
        val_dataloader, valSet, prbsVal = gen_dataloader(valSetPath,
                                                         block_size=block_size,
                                                         symbol_num=valsetLen,
                                                         batch_size=batch_size,
                                                         constellations=constellations,
                                                         shuffle=False,
                                                         count_from_end=True)
        test_dataloader, tstSet, prbsTst = gen_dataloader(tstSetPath,
                                                          block_size=block_size,
                                                          symbol_num=tstsetLen,
                                                          batch_size=batch_size,
                                                          constellations=constellations,
                                                          shuffle=False)
        '''Form dataloader end'''
        total_taps_limit = util.cal_tap_num(DL=17e-6 * L,
                                            ref_freq=193.1e12,
                                            sample_freq=2 * symbolRate)
        least_taps_per_layer = int(math.ceil(total_taps_limit / step))
        print(f'At least {least_taps_per_layer} taps per linear layer')
        taps_per_layer_range = least_taps_per_layer + np.arange(0, 3 * (round(80 / step)), round(80 / step))

        if os.path.exists(WDBPsearchPath):
            dat = scio.loadmat(os.path.join(WDBPsearchPath, f'search_result_lp_{lp}_step_{step}.mat'))
            init_BW = [dat['init_BW'][0][0]]
            init_comp_fac = [dat['init_comp_fac'][0][0]]
            print('Previous search result read.')
        else:
            init_BW = [0.9]
            init_comp_fac = [0.9]
            print('Init BW to 0.9 and cr 0.9.')
        for tasIndx, taps_per_layer in enumerate(taps_per_layer_range):
            print(f'There are {taps_per_layer} taps per linear layer!')
            model_kwargs = {
                'D': 17e-6,
                'tap_per_edc': taps_per_layer,
                'sample_factor': 2,
                'symbol_rate': symbolRate,
                'power_norm': False,
                'case_num': 2,
                'gm': 1.3e-3,
                'step': step,
                'trans_length': L,
                'lp': lp,
                'alpha': 0.2e-3,
                'init_comp_fac': init_comp_fac,
                'pr_win': 1024,
                'pre_cd': 0.5,
                'lr': lr,
                'init_BW': init_BW,
                'optimizer': 'Adam',
                'weight_decay': 0,
                'edc_init_method': 'LS_CO',
                'lmbd_each_layer': 5.12589879e-05,
            }

            mdl = LWDBP(**model_kwargs)

            '''Specify callbacks'''
            early_stop_callback = EarlyStopping(monitor='val_loss', patience=patience, strict=False,
                                                verbose=True, mode='min', min_delta=1e-2)
            check_point_callback = ModelCheckpoint(
                monitor='val_loss',
                verbose=True,
                save_last=None,
                save_top_k=1,
                save_weights_only=True,
                mode='min',
                dirpath=checkpointSavePath,
                filename=f'LWDBP_lp_{lp}_step_{step}_tapsPerLinearLayer_{taps_per_layer}',
            )
            '''Specify callbacks end'''
            logger = TensorBoardLogger(save_dir=resultPath, name='log', version=f'step_{step}_taps_{taps_per_layer}')
            trainer_kwargs = {
                'accelerator': 'gpu',
                'callbacks': [early_stop_callback, check_point_callback],
                'max_epochs': max_epochs,
                'check_val_every_n_epoch': 1,
                'num_sanity_val_steps': 0,
                'log_every_n_steps': log_interval,
                'val_check_interval': 200,
                'logger': logger,
            }

            trainer = pl.Trainer(**trainer_kwargs)
            if resume_from_checkpoints:
                ckpt_path = os.path.join(checkpointSavePath, 'model.ckpt')
            else:
                ckpt_path = None
            trainer.fit(mdl, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, ckpt_path=ckpt_path)

            sig, ber = eval_ber(mdl, tstSet, prbsTst, constellations=util.CONST_16QAM, use_cuda=False)
            Q = util.ber2q(np.mean(ber))
            print(f'lp {lp}, step {step}, taps {taps_per_layer}, Q factor {Q}')


def eval_vary_tap(step_range=[20]):
    step_range = [*step_range]
    lp = 2
    resultDir = os.path.join(BASE_DIR, 'result/LWDBP')
    if not os.path.exists(resultDir):
        os.makedirs(resultDir)
        print(f'Create result dir at \"{resultDir}\"')
    resultPath = os.path.join(resultDir, 'vary_tap')
    if not os.path.exists(resultPath):
        os.mkdir(resultPath)
        print(f'Create result path at \"{resultPath}\"')
    checkpointSavePath = os.path.join(resultPath, 'chkpts')
    WDBPsearchPath = os.path.join(BASE_DIR, f'result/LPF-DBP/vary_step')

    if not os.path.exists(checkpointSavePath):
        os.mkdir(checkpointSavePath)
        print(f'Create result path at \"{checkpointSavePath}\"')

    '''Behavior control'''

    batch_size = 32
    trsetLen = 8192
    valsetLen = 4096
    tstsetLen = 131072
    lr = (5e-6, 5e-7)

    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    resume_from_checkpoints = False
    print(f'Resume from checkpoint: {resume_from_checkpoints}')

    for stIndx, step in enumerate(step_range):
        trSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        valSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/trSet_lp_{}.mat'.format(lp)
        )
        tstSetPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA/tstSet_lp_{}.mat'.format(lp)
        )
        '''Form dataloaders'''
        spanLen = 80e3
        data = scio.loadmat(trSetPath)
        spanNum = data['spanNum'][0, 0]
        L = spanLen * spanNum
        symbolRate = 32e9

        train_dataloader, _, _ = gen_dataloader(trSetPath,
                                                block_size=block_size,
                                                symbol_num=trsetLen,
                                                batch_size=batch_size,
                                                constellations=constellations,
                                                shuffle=True)
        val_dataloader, valSet, prbsVal = gen_dataloader(valSetPath,
                                                         block_size=block_size,
                                                         symbol_num=valsetLen,
                                                         batch_size=batch_size,
                                                         constellations=constellations,
                                                         shuffle=False,
                                                         count_from_end=True)
        test_dataloader, tstSet, prbsTst = gen_dataloader(tstSetPath,
                                                          block_size=block_size,
                                                          symbol_num=tstsetLen,
                                                          batch_size=batch_size,
                                                          constellations=constellations,
                                                          shuffle=False)
        '''Form dataloader end'''
        total_taps_limit = util.cal_tap_num(DL=17e-6 * L,
                                            ref_freq=193.1e12,
                                            sample_freq=2 * symbolRate)
        least_taps_per_layer = int(math.ceil(total_taps_limit / step))
        print(f'At least {least_taps_per_layer} taps per linear layer')
        taps_per_layer_range = least_taps_per_layer + np.arange(0, 3 * (round(80 / step)), round(80 / step))

        if os.path.exists(WDBPsearchPath):
            dat = scio.loadmat(os.path.join(WDBPsearchPath, f'search_result_lp_{lp}_step_{step}.mat'))
            init_BW = [dat['init_BW'][0][0]]
            init_comp_fac = [dat['init_comp_fac'][0][0]]
            print('Previous search result read.')
        else:
            init_BW = [0.9]
            init_comp_fac = [0.9]
            print('Init BW to 0.9 and cr 0.9.')
        for tasIndx, taps_per_layer in enumerate(taps_per_layer_range):
            print(f'There are {taps_per_layer} per linear later!')
            model_kwargs = {
                'D': 17e-6,
                'tap_per_edc': taps_per_layer,
                'sample_factor': 2,
                'symbol_rate': symbolRate,
                'power_norm': False,
                'case_num': 2,
                'gm': 1.3e-3,
                'step': step,
                'trans_length': L,
                'lp': lp,
                'alpha': 0.2e-3,
                'init_comp_fac': init_comp_fac,
                'pr_win': 1024,
                'pre_cd': 0.5,
                'lr': lr,
                'init_BW': init_BW,
                'optimizer': 'Adam',
                'weight_decay': 0,
                'edc_init_method': 'LS_CO',
                'lmbd_each_layer': 5.12589879e-05,
            }

            mdl = LWDBP.load_from_checkpoint(os.path.join(
                checkpointSavePath, f'LWDBP_lp_{lp}_step_{step}_tapsPerLinearLayer_{taps_per_layer}.ckpt'
            ), **model_kwargs)

            sig, ber = eval_ber(mdl, tstSet, prbsTst, constellations=util.CONST_16QAM, use_cuda=False)
            Q = util.ber2q(np.mean(ber))
            print(f'lp {lp}, step {step}, taps {taps_per_layer}, Q factor {Q}')


if __name__ == '__main__':
    warnings.filterwarnings('ignore')
    vary_lp(lp_range=[2])
    eval_vary_lp(lp_range=[2])
    vary_step(step_range=[20])
    eval_vary_step(step_range=[20])
    vary_tap(step_range=[20])
    eval_vary_tap(step_range=[20])