import warnings
from inspect import currentframe, getframeinfo
import scipy.io as scio
import numpy as np
import torch

import torch.nn as nn
from bayes_opt import BayesianOptimization, UtilityFunction

from DSPPytorch import *
import os

BASE_DIR = os.path.dirname(__file__)


class DBP(nn.Module):
    '''DBP has three models: Wiener model, Hammerstein model, Wiener-Hammerstein model
    the difference among them is the order
    Napoli等, 《Reduced Complexity Digital Back-Propagation Methods for Optical Communication Systems》.
    the following model is Wiener model'''

    def __init__(self, D=17e-6, c=3e8, waveLength=1552.52e-9, tap_per_edc=32, sampleFactor=2,
                 symbolRate=28e9, power_norm=False, case_num=2, gm=1.3e-3, step=30,
                 trans_length=2400e3, lp=0, alpha=0.2e-3, init_comp_fac=0.1):
        super(DBP, self).__init__()
        '''定义好每步所需的DL以及每步的长度'''
        DLperStep = D * trans_length / step
        stepLen = trans_length / step

        NLstepInitFac = ExpNLLayer.cal_init_factor(gm=gm, eps=init_comp_fac, alp_dB=alpha,
                                                   power_dB=lp, step_len=stepLen)
        self.dbp_layers = nn.ModuleList([])
        for indx in range(step):
            dbp_layers = [
                EDCLayer(symbolRate=symbolRate, DL=DLperStep, case_num=case_num, c=c,
                         waveLength=waveLength, tap=tap_per_edc, sampleFactor=sampleFactor, power_norm=power_norm),
                ExpNLLayer(NLstepInitFac)
            ]
            self.dbp_layers.extend(dbp_layers)

    def forward(self, x):
        for indx in range(len(self.dbp_layers)):
            x = self.dbp_layers[indx](x)
        return x


def DBP_convention(lp_range=[1]):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    lp_range = [*lp_range]
    Qcache = np.zeros([len(lp_range)])
    BERcache = np.zeros_like(Qcache)
    Q2cache = np.zeros_like(Qcache)
    pick_sym_num = 131072
    modOrder = 4
    result_save_dir = os.path.join(BASE_DIR, 'result/{}'.format(experiment_name))
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir)
    result_save_file_path = os.path.join(result_save_dir, 'convention_DBP_performance_against_lp.mat')
    save_dict = {}
    save_results_flag = True
    for lpIndx, lp in enumerate(lp_range):
        dataPath = os.path.join(
            BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/BPS_4_seed_2_dBm_{lp:d}_Loops_8.mat'.format(lp=lp)
        )
        '''读取文件信息'''
        data = scio.loadmat(dataPath)
        symbolRate = 32e9
        bitsPerSymbol = data['BitsPerSymbol'][0, 0]
        spanNum = data['Loops_N'][0, 0] * 5
        rolloff = 0.01
        lp = data['dBm_N'][0, 0]
        prbsx = data['prbsx']
        prbsy = data['prbsy']
        spanLen = 80e3
        L = spanLen * spanNum
        D = 17e-6
        DL_pre_com = - D * L / 2  # 抵消预补偿的50%色散

        sigx = torch.from_numpy(data['signalx'])
        sigy = torch.from_numpy(data['signaly'])
        sigx = sigx.reshape(1, 1, -1)
        sigy = sigy.reshape(1, 1, -1)
        sig = torch.cat([sigx, sigy], dim=1)

        sigxb2b = torch.from_numpy(data['signalx_label'])
        sigyb2b = torch.from_numpy(data['signaly_label'])
        sigxb2b = sigxb2b.reshape(1, 1, -1)
        sigyb2b = sigyb2b.reshape(1, 1, -1)
        sigb2b = torch.cat([sigxb2b, sigyb2b], dim=1)

        constellations = torch.from_numpy(util.CONST_16QAM)

        '''实例化后端的DSP类'''
        edc = EDCLayer(symbolRate=symbolRate, DL=DL_pre_com, case_num=2)  # 抵消多补偿的50%色散
        pr = PhaseRecLayer(1024)
        lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)

        '''DSP procedure for the signal through the link'''
        # sig = mf(sig)
        sig = sig[..., 1::4]  # resampling
        sig = sig[..., 0:pick_sym_num * 2]
        prbsx = prbsx[..., 0:pick_sym_num * modOrder]
        prbsy = prbsy[..., 0:pick_sym_num * modOrder]
        prbs = np.concatenate([prbsx, prbsy], axis=0)

        '''没有调节xi,采用默认的init_comp_fac'''
        dbp_para_dict = {
            'D': D,
            'tap_per_edc': 256,
            'sampleFactor': 2,
            'symbolRate': symbolRate,
            'power_norm': True,
            'step': spanNum,  # 此处调节DBP算法的精度
            'trans_length': L,
            'gm': -1.3e-3,
            'lp': lp,
        }
        if torch.cuda.is_available():
            sig = sig.cuda()
            lms = lms.cuda()
            edc = edc.cuda()

        with torch.no_grad():
            dbp = DBP(**dbp_para_dict)
            dbp = dbp.to(sig.device)
            sig = dbp(sig)
            del dbp

            sig = edc(sig)  # 抵消多余的色散补偿

            '''后端的TDE均衡PMD以及PDL等，是自适应的方法'''
            sig = sig.cpu()
            sig = FIRLayer.time_vary_infer(sig, err_mode='Godard', iter_num=2, lr=5e-4, tap=13)
            if torch.cuda.is_available():
                sig = sig.cuda()  # 重新放回cuda上
            '''DownSampling and PhaseRecovery'''
            sig = pr(sig[..., 1::2])

        '''DD-LMS'''
        lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=4, block_size=4028,
                remain=2048, lr=5e-4)
        sig = lms(sig)

        '''与Prbs进行比较计算误码率'''
        sig = sig.cpu().data.numpy().squeeze()
        sig, ber, good_rotate = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())

        ber = np.mean(ber)
        Q = util.ber2q(ber)
        Q2 = util.ber2Q2(ber)
        Qcache[lpIndx] = Q
        BERcache[lpIndx] = ber
        Q2cache[lpIndx] = Q2

        print(f'lp {lp} done, Q factor: {Q}')
    save_dict['lp'] = np.array(lp_range)
    save_dict['ber'] = BERcache
    save_dict['Q'] = Qcache
    save_dict['Q2'] = Q2cache
    if save_results_flag:
        scio.savemat(result_save_file_path, save_dict)
        print('\n Save results at \"{p:s}\"'.format(p=result_save_file_path))


def DBP_BO_exp(lp_range=[1]):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    lp_range = [*lp_range]
    Qcache = np.zeros([len(lp_range)])
    BERcache = np.zeros_like(Qcache)
    Q2cache = np.zeros_like(Qcache)
    optimal_cr_results = np.zeros([len(lp_range)])
    pick_sym_num = 110000
    modOrder = 4
    step = 9
    result_save_dir = os.path.join(BASE_DIR, 'result_exp/{}'.format(experiment_name))
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir)
    result_save_file_path = os.path.join(result_save_dir, f'BO_DBP_performance_against_lp_in_step_{step}.mat')
    optimal_cr_file_path = os.path.join(result_save_dir, f'optimal_cr_learned_by_BO_step_in_step_{step}.mat')

    load_previous_optimal_cr = False

    if load_previous_optimal_cr:
        loaded_cr_data = scio.loadmat(optimal_cr_file_path)
        optimal_xi_loaded = list(loaded_cr_data['optimal_cr'].squeeze())
        print(f'Previous NL compensation ratios read from {optimal_cr_file_path}')

    save_dict = {}
    save_results_flag = False
    for lpIndx, lp in enumerate(lp_range):
        dataPath = os.path.join(
            BASE_DIR, f'data/experiment/16QAM20GBaud1800kmHe/tstSet_lp_{lp}.mat'
        )
        '''读取文件信息'''
        data = scio.loadmat(dataPath)
        symbolRate = data['symbolRate'][0, 0]
        spanNum = data['spanNum'][0, 0]
        prbs = data['prbs']
        spanLen = 100e3
        L = spanLen * spanNum
        D = -17e-6

        sig = torch.from_numpy(data['sig'])
        sig = sig.reshape(1, 1, -1)

        constellations = torch.from_numpy(util.CONST_16QAM)

        '''DSP procedure for the signal through the link'''

        sig = sig[..., 0:pick_sym_num * 2]
        prbs = prbs[..., 0:pick_sym_num * modOrder]

        # step = spanNum * 0.5

        total_taps_limit = util.cal_tap_num(DL=17e-6 * L,
                                            ref_freq=193.1e12,
                                            sample_freq=2 * symbolRate)
        least_taps_per_layer = int(math.ceil(total_taps_limit // step))
        print(f'At least {least_taps_per_layer} taps per layer')


        dbp_para_dict = {
            'D': D,
            'tap_per_edc': 256,
            'case_num': 1,
            'sampleFactor': 2,
            'symbolRate': symbolRate,
            'power_norm': True,
            'step': step,  # 此处调节DBP算法的精度
            'trans_length': L,
            'gm': -1.3e-3,
            'lp': lp,
        }
        sig_ori = sig
        '''定义目标函数'''

        def objective_function(cr):
            dbp_para_dict['init_comp_fac'] = cr
            sig = sig_ori
            pr = PhaseRecLayer(1024)
            lms = FIRLayer(tap=32, case_num=1, power_norm=True, centor_one=True)
            if torch.cuda.is_available():
                sig = sig.cuda()
                lms = lms.cuda()

            with torch.no_grad():
                dbp = DBP(**dbp_para_dict)
                dbp = dbp.to(sig.device)
                sig = dbp(sig)
                del dbp

                # '''后端的TDE均衡PMD以及PDL等，是自适应的方法'''
                # sig = sig.cpu()
                # sig = FIRLayer.time_vary_infer(sig, err_mode='Godard', iter_num=2, lr=5e-4, tap=13)
                # if torch.cuda.is_available():
                #     sig = sig.cuda()  # 重新放回cuda上
                '''DownSampling and PhaseRecovery'''
                sig = pr(sig[..., 1::2])

            '''DD-LMS'''
            lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=10, block_size=4028,
                    remain=2048, lr=1e-4)
            sig = lms(sig)

            '''与Prbs进行比较计算误码率'''
            sig = sig.cpu().data.numpy().squeeze()
            sig, ber, good_rotate = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())

            ber = np.mean(ber)
            Q = util.ber2q(ber)
            return Q

        if not load_previous_optimal_cr:
            pbounds = {'cr': (-1, 1)}
            optimizer = BayesianOptimization(
                f=objective_function,
                pbounds=pbounds,
                verbose=2
            )
            utility = UtilityFunction(kind='ei', xi=0.1)
            optimizer.maximize(init_points=5, n_iter=10, acquisition_function=utility)
            print(optimizer.max)
            optimal_cr = optimizer.max['params']['cr']
            optimal_cr_results[lpIndx] = optimal_cr
            Qcache[lpIndx] = optimizer.max['target']
        else:
            print('Use previous learned cr')
            cr = optimal_xi_loaded[lpIndx]
            Q = objective_function(cr)
            Qcache[lpIndx] = Q
            print(f'lp: {lp}, loaded cr {cr}, Q factor: {Q}')
    save_dict['lp'] = np.array(lp_range)
    save_dict['Q'] = Qcache
    if save_results_flag:
        scio.savemat(result_save_file_path, save_dict)
        print('\n Save results at \"{p:s}\"'.format(p=result_save_file_path))
    if not load_previous_optimal_cr:
        # scio.savemat(optimal_cr_file_path, {'optimal_cr': optimal_cr_results, 'lp': np.array(lp_range)})
        print(f'Save optimal NL compensation ratios at \"{optimal_cr_file_path}\"')


def DBP_BO_in_vary_steps(step_range=[9]):
    step_range = [*step_range]
    lp = 1
    Qcache = np.zeros([len(step_range)])
    BERcache = np.zeros_like(Qcache)
    Q2cache = np.zeros_like(Qcache)
    pick_sym_num = 110000
    modOrder = 4
    result_save_dir = os.path.join(BASE_DIR, 'result_exp/DBP_BO_exp')
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir)
    result_save_path = os.path.join(result_save_dir, 'search_vary_steps')

    if not os.path.exists(result_save_path):
        os.mkdir(result_save_path)
        print(f'Create result save path at \"{result_save_path}\"')

    save_results_flag = True
    for stIndx, step in enumerate(step_range):
        print(f'Search at step {step}')
        save_dict = {}
        dataPath = os.path.join(
            BASE_DIR, f'data/experiment/16QAM20GBaud1800kmHe/tstSet_lp_{lp}.mat'
        )
        '''读取文件信息'''
        data = scio.loadmat(dataPath)
        symbolRate = data['symbolRate'][0, 0]
        spanNum = data['spanNum'][0, 0]
        prbs = data['prbs']
        spanLen = 100e3
        L = spanLen * spanNum
        D = -17e-6

        sig = torch.from_numpy(data['sig'])
        sig = sig.reshape(1, 1, -1)

        constellations = torch.from_numpy(util.CONST_16QAM)

        '''DSP procedure for the signal through the link'''

        sig = sig[..., 0:pick_sym_num * 2]
        prbs = prbs[..., 0:pick_sym_num * modOrder]

        dbp_para_dict = {
            'D': D,
            'tap_per_edc': 256,
            'case_num': 1,
            'sampleFactor': 2,
            'symbolRate': symbolRate,
            'power_norm': True,
            'step': step,  # 此处调节DBP算法的精度
            'trans_length': L,
            'gm': -1.3e-3,
            'lp': lp,
        }
        sig_ori = sig
        '''定义目标函数'''

        def objective_function(cr):
            dbp_para_dict['init_comp_fac'] = cr
            sig = sig_ori
            pr = PhaseRecLayer(1024)
            lms = FIRLayer(tap=32, case_num=1, power_norm=True, centor_one=True)
            if torch.cuda.is_available():
                sig = sig.cuda()
                lms = lms.cuda()

            with torch.no_grad():
                dbp = DBP(**dbp_para_dict)
                dbp = dbp.to(sig.device)
                sig = dbp(sig)
                del dbp


                '''后端的TDE均衡PMD以及PDL等，是自适应的方法'''
                # sig = sig.cpu()
                # sig = FIRLayer.time_vary_infer(sig, err_mode='Godard', iter_num=2, lr=5e-4, tap=13)
                # if torch.cuda.is_available():
                #     sig = sig.cuda()  # 重新放回cuda上
                '''DownSampling and PhaseRecovery'''
                sig = pr(sig[..., 1::2])

            '''DD-LMS'''
            lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=10, block_size=4028,
                    remain=2048, lr=1e-4)
            sig = lms(sig)

            '''与Prbs进行比较计算误码率'''
            sig = sig.cpu().data.numpy().squeeze()
            sig, ber, good_rotate = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())

            ber = np.mean(ber)
            Q = util.ber2q(ber)
            return Q

        pbounds = {'cr': (-1, 1.5)}
        optimizer = BayesianOptimization(
            f=objective_function,
            pbounds=pbounds,
            verbose=1
        )
        utility = UtilityFunction(kind='ei', xi=0.1)
        optimizer.maximize(init_points=5, n_iter=10, acquisition_function=utility)
        print(optimizer.max)
        optimal_cr = optimizer.max['params']['cr']
        Qcache[stIndx] = optimizer.max['target']
        save_dict['step'] = step
        save_dict['optimal_cr'] = optimal_cr
        scio.savemat(os.path.join(result_save_path, f'search_optimal_cr_lp_{lp}_step_{step}.mat'), save_dict)
    if save_results_flag:
        scio.savemat(os.path.join(result_save_dir, 'BO_DBP_perf_against_step.mat'),
                     {'step': np.array(step_range), 'Q': Qcache})
        print('\n Save results at \"{p:s}\"'.format(p=result_save_dir))


def form_csv_experiment_result():
    import csv
    result_path = os.path.join(BASE_DIR, 'result_exp/DBP_BO_exp')  # 目录
    result_name = 'BO_DBP_performance_against_lp_in_step_18'  # 名字

    result_dict = scio.loadmat(
        os.path.join(
            result_path, '{}.mat'
        ).format(result_name)
    )
    Q = result_dict['Q'].squeeze()
    lp = result_dict['lp'].squeeze()
    with open(os.path.join(result_path, '{}.csv').format(result_name), 'w') as f:
        csv_writer = csv.writer(f)
        csv_writer.writerow(['lp', 'Q factor'])
        for result_indx in range(lp.shape[0]):
            csv_writer.writerow([lp[result_indx], Q[result_indx]])

        print('transform has been done!')


if __name__ == '__main__':
    warnings.filterwarnings("ignore")
    # DBP_convention(lp_range=[-2, -1, 0, 1, 2, 3, 4])
    DBP_BO_exp(lp_range=[1])
    # DBP_BO_in_vary_steps(step_range=[3,9,14,18])

    '''将存储结果转化为csv文件'''
    # form_csv_experiment_result()
