import math
import tkinter
# import matplotlib
# matplotlib.use('TkAgg')
import os
import warnings

os.environ["OMP_NUM_THREADS"] = "8"
from inspect import currentframe, getframeinfo
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import scipy.io as scio
from CR_DSPPytorch import EDCLayer, PhaseRecLayer, FIRLayer, format_rt, cmul, PerturbativeBlockLayer, norm_power
import util
from bayes_opt import BayesianOptimization
from bayes_opt.util import UtilityFunction

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
BASE_DIR = os.path.dirname(__file__)


def Fuzzy_cluster_assisted_TDC_weight_opt(lp_range=[1], tap_list=[2048], cluster=8, save_result_flag=False):
    pick_syms_num = 131072
    modOrder = 4
    Qcache = np.zeros([len(lp_range), len(tap_list)])
    BERcache = np.zeros_like(Qcache)
    result_save_dir = os.path.join(BASE_DIR, f'result/Fuzzy_cluster_assisted_TDC_weight_opt')
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir)
    chosen_device = 'cuda' if torch.cuda.is_available() else 'cpu'
    save_result_flag = save_result_flag
    cluster = cluster

    for lpIndx, lp in enumerate(lp_range):
        for tapIndx, tap in enumerate(tap_list):
            # load data
            dataPath = os.path.join(BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA'
                                              f'/trSet_lp_{lp}.mat')
            data = scio.loadmat(dataPath)
            symbolRate = data['symbolRate'][0, 0]
            spanNum = data['spanNum'][0, 0]
            spanLen = data['spanLen'][0, 0]
            sig = data['sig']
            prbs = data['prbs']
            L = spanNum * spanLen
            D = 17e-6
            DL = D * L / 2

            sig = torch.from_numpy(sig[np.newaxis, ...])
            constellations = torch.from_numpy(util.CONST_16QAM)

            '''均衡色散所需总抽头书上界'''
            # total_taps_limit = util.cal_tap_num_odd(DL=DL,
            #                                         sample_freq=2 * symbolRate)
            # print(f'taps upper bound: {total_taps_limit}')

            '''DSP procedure'''
            edc = EDCLayer(symbol_rate=symbolRate, DL=DL, case_num=2, tap=tap, sample_factor=2,
                           power_norm=True, init_method='TDS', time_domain=True)
            # scio.savemat(os.path.join(result_save_dir, f'filter_taps_{lp}_dBm_{tap}_taps.mat'),
            #              {'h': edc.h.data[0, ...].cpu().data.numpy()})
            edc.h.data = Fuzzy_cluster_filter_taps(edc.h.data, clusters=cluster)

            pr = PhaseRecLayer(1024)
            lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)

            sig = sig.to(chosen_device)
            lms = lms.to(chosen_device)
            edc = edc.to(chosen_device)

            sig = sig[..., 0:pick_syms_num * 2]
            prbs = prbs[..., 0:pick_syms_num * modOrder]

            sig = edc(torch.view_as_real(sig))
            sig = pr(sig[..., 1::2, :])
            lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=4, block_size=4028,
                    remain=2048, lr=5e-4)
            sig = lms(sig)

            sig = torch.view_as_complex(sig)
            sig = sig.cpu().data.numpy().squeeze()
            sig, ber, _ = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())

            ber = np.mean(ber)
            BERcache[lpIndx, tapIndx] = ber
            Qcache[lpIndx, tapIndx] = util.ber2q(ber)
            print(f'lp: {lp}, tap:{tap}, BER: {ber}, Q: {util.ber2q(ber)}')

    if save_result_flag:
        with open(os.path.join(result_save_dir, f'TDE_vary_tap.csv'), 'w') as f:
            f.write('lp, tap, BER, Q\n')
            for lpIndx, lp in enumerate(lp_range):
                for tapIndx, tap in enumerate(tap_list):
                    f.write(f'{lp}, {tap}, {BERcache[lpIndx, tapIndx]}, {Qcache[lpIndx, tapIndx]}\n')
        print(f'result saved at \"{result_save_dir}\"')


def Fuzzy_cluster_assisted_NLC_weight_opt(lp=2, tap_list=[2048], cluster_list=[8], save_result_flag=False):
    '''引入了NLC之后，看看聚类的效果对性能的影响'''
    pick_syms_num = 131072
    modOrder = 4
    Qcache = np.zeros([len(tap_list), len(cluster_list)])
    BERcache = np.zeros_like(Qcache)
    result_save_dir = os.path.join(BASE_DIR, f'result/Fuzzy_cluster_assisted_NLC_weight_opt')
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir)
    chosen_device = 'cuda' if torch.cuda.is_available() else 'cpu'
    save_result_flag = save_result_flag

    for tapIndx, tap in enumerate(tap_list):
        for clusterIndx, cluster in enumerate(cluster_list):
            # load data
            dataPath = os.path.join(BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA'
                                              f'/trSet_lp_{lp}.mat')
            data = scio.loadmat(dataPath)
            symbolRate = data['symbolRate'][0, 0]
            spanNum = data['spanNum'][0, 0]
            spanLen = data['spanLen'][0, 0]
            sig = data['sig']
            prbs = data['prbs']
            L = spanNum * spanLen
            D = 17e-6
            DL = D * L / 2

            sig = torch.from_numpy(sig[np.newaxis, ...])
            constellations = torch.from_numpy(util.CONST_16QAM)

            '''均衡色散所需总抽头书上界'''
            # total_taps_limit = util.cal_tap_num_odd(DL=DL,
            #                                         sample_freq=2 * symbolRate)
            # print(f'taps upper bound: {total_taps_limit}')

            '''DSP procedure'''
            edc = EDCLayer(symbol_rate=symbolRate, DL=DL, case_num=2, tap=tap, sample_factor=2,
                           power_norm=True, init_method='TDS', time_domain=True)
            # scio.savemat(os.path.join(result_save_dir, f'filter_taps_{lp}_dBm_{tap}_taps.mat'),
            #              {'h': edc.h.data[0, ...].cpu().data.numpy()})
            edc.h.data = Fuzzy_cluster_filter_taps(edc.h.data, clusters=cluster)

            pr = PhaseRecLayer(1024)
            lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)

            sig = sig.to(chosen_device)
            lms = lms.to(chosen_device)
            edc = edc.to(chosen_device)

            sig = sig[..., 0:pick_syms_num * 2]
            prbs = prbs[..., 0:pick_syms_num * modOrder]

            sig = edc(torch.view_as_real(sig))
            sig = pr(sig[..., 1::2, :])
            lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=4, block_size=4028,
                    remain=2048, lr=5e-4)
            sig = lms(sig)

            sig = torch.view_as_complex(sig)
            sig = sig.cpu().data.numpy().squeeze()
            ber, Q = simulation_ERP_NLC_module(sig, prbs, lp, symbolRate, spanLen, spanNum)

            BERcache[tapIndx, clusterIndx] = ber
            Qcache[tapIndx, clusterIndx] = Q
            print(f'lp: {lp}, tap:{tap}, BER: {ber}, Q: {util.ber2q(ber)}')

    if save_result_flag:
        with open(os.path.join(result_save_dir, f'NLC_weight_opt.csv'), 'w') as f:
            f.write('tap,cluster,BER,Q\n')
            for tapIndx, tap in enumerate(tap_list):
                for clusterIndx, cluster in enumerate(cluster_list):
                    f.write(f'{tap}, {cluster}, {BERcache[tapIndx, clusterIndx]}, {Qcache[tapIndx, clusterIndx]}\n')
        print(f'result saved at \"{result_save_dir}\"')


def Fuzzy_cluster_assisted_TDC_weight_fixed(lp_range=[1], tap_list=[2048], cluster=8, save_result_flag=False):
    pick_syms_num = 131072
    modOrder = 4
    Qcache = np.zeros([len(lp_range), len(tap_list)])
    BERcache = np.zeros_like(Qcache)
    result_save_dir = os.path.join(BASE_DIR, f'result/Fuzzy_cluster_assisted_TDC_weight_fixed')
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir)
    chosen_device = 'cuda' if torch.cuda.is_available() else 'cpu'
    save_result_flag = save_result_flag
    cluster = cluster

    for lpIndx, lp in enumerate(lp_range):
        for tapIndx, tap in enumerate(tap_list):
            # load data
            dataPath = os.path.join(BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA'
                                              f'/trSet_lp_{lp}.mat')
            data = scio.loadmat(dataPath)
            symbolRate = data['symbolRate'][0, 0]
            spanNum = data['spanNum'][0, 0]
            spanLen = data['spanLen'][0, 0]
            sig = data['sig']
            prbs = data['prbs']

            L = spanNum * spanLen
            D = 17e-6
            DL = D * L / 2

            sig = torch.from_numpy(sig[np.newaxis, ...])

            constellations = torch.from_numpy(util.CONST_16QAM)

            '''均衡色散所需总抽头书上界'''
            # total_taps_limit = util.cal_tap_num_odd(DL=DL,
            #                                         sample_freq=2 * symbolRate)
            # print(f'taps upper bound: {total_taps_limit}')

            '''DSP procedure'''
            prbs_copy = prbs.copy()
            sig_clone = sig.clone()

            def est_best_weight(xi):
                edc = EDCLayer(symbol_rate=symbolRate, DL=DL, case_num=2, tap=tap, sample_factor=2,
                               power_norm=True, init_method='TDS', time_domain=True)
                edc.h.data = Simplified_fuzzy_cluster_filter_taps(edc.h.data, xi, clusters=8)

                pr = PhaseRecLayer(1024)
                lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)

                sig = sig_clone.to(chosen_device)
                lms = lms.to(chosen_device)
                edc = edc.to(chosen_device)

                sig = sig[..., 0:pick_syms_num * 2]
                prbs = prbs_copy[..., 0:pick_syms_num * modOrder]

                sig = edc(torch.view_as_real(sig))
                sig = pr(sig[..., 1::2, :])
                lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=4, block_size=4028,
                        remain=2048, lr=5e-4)
                sig = lms(sig)

                sig = torch.view_as_complex(sig)
                sig = sig.cpu().data.numpy().squeeze()
                sig, ber, _ = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())
                return util.ber2q(np.mean(ber))

            optimizer = BayesianOptimization(
                f=est_best_weight,
                pbounds={'xi': (0, 1)},
                verbose=1
            )
            utility = UtilityFunction(kind='ei', xi=0.5)
            optimizer.maximize(init_points=20, n_iter=100, acquisition_function=utility)
            xi_opt = optimizer.max['params']['xi']
            Q = optimizer.max['target']
            print(f'optimal xi: {xi_opt}, best Q: {Q}')

    if save_result_flag:
        with open(os.path.join(result_save_dir, f'TDE_vary_tap.csv'), 'w') as f:
            f.write('lp, tap, BER, Q\n')
            for lpIndx, lp in enumerate(lp_range):
                for tapIndx, tap in enumerate(tap_list):
                    f.write(f'{lp}, {tap}, {BERcache[lpIndx, tapIndx]}, {Qcache[lpIndx, tapIndx]}\n')
        print(f'result saved at \"{result_save_dir}\"')


def Fuzzy_cluster_assisted_NLC_weight_fixed(lp=2, tap_list=[2048], cluster_list=[8], save_result_flag=False):
    pick_syms_num = 131072
    modOrder = 4
    QLecache = np.zeros([len(tap_list), len(cluster_list)])
    Qcache = np.zeros([len(tap_list), len(cluster_list)])
    Weightcache = np.zeros_like(Qcache)
    result_save_dir = os.path.join(BASE_DIR, f'result/Fuzzy_cluster_assisted_NLC_weight_fixed')
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir)
    chosen_device = 'cuda' if torch.cuda.is_available() else 'cpu'
    save_result_flag = save_result_flag

    for tapIndx, tap in enumerate(tap_list):
        for clusterIndx, cluster in enumerate(cluster_list):
            # load data
            dataPath = os.path.join(BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA'
                                              f'/trSet_lp_{lp}.mat')
            data = scio.loadmat(dataPath)
            symbolRate = data['symbolRate'][0, 0]
            spanNum = data['spanNum'][0, 0]
            spanLen = data['spanLen'][0, 0]
            sig = data['sig']
            prbs = data['prbs']

            L = spanNum * spanLen
            D = 17e-6
            DL = D * L / 2

            sig = torch.from_numpy(sig[np.newaxis, ...])

            constellations = torch.from_numpy(util.CONST_16QAM)

            '''均衡色散所需总抽头书上界'''
            # total_taps_limit = util.cal_tap_num_odd(DL=DL,
            #                                         sample_freq=2 * symbolRate)
            # print(f'taps upper bound: {total_taps_limit}')

            prbs_copy = prbs.copy()
            sig_clone = sig.clone()

            def est_best_weight(xi):
                edc = EDCLayer(symbol_rate=symbolRate, DL=DL, case_num=2, tap=tap, sample_factor=2,
                               power_norm=True, init_method='TDS', time_domain=True)
                edc.h.data = Simplified_fuzzy_cluster_filter_taps(edc.h.data, xi, clusters=cluster)

                pr = PhaseRecLayer(1024)
                lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)

                sig = sig_clone.to(chosen_device)
                lms = lms.to(chosen_device)
                edc = edc.to(chosen_device)

                sig = sig[..., 0:pick_syms_num * 2]
                prbs = prbs_copy[..., 0:pick_syms_num * modOrder]

                sig = edc(torch.view_as_real(sig))
                sig = pr(sig[..., 1::2, :])
                lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=4, block_size=4028,
                        remain=2048, lr=5e-4)
                sig = lms(sig)

                sig = torch.view_as_complex(sig)
                sig = sig.cpu().data.numpy().squeeze()
                sig, ber, _ = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())
                return util.ber2q(np.mean(ber))

            optimizer = BayesianOptimization(
                f=est_best_weight,
                pbounds={'xi': (0, 1)},
                verbose=1
            )
            utility = UtilityFunction(kind='ei', xi=0.5)
            optimizer.maximize(init_points=20, n_iter=100, acquisition_function=utility)
            xi_opt = optimizer.max['params']['xi']
            Q_le = optimizer.max['target']
            '''DSP procedure'''
            cdc = EDCLayer(symbol_rate=symbolRate, DL=DL, case_num=2, tap=tap, sample_factor=2,
                           power_norm=True, init_method='TDS', time_domain=True)
            cdc.h.data = Simplified_fuzzy_cluster_filter_taps(cdc.h.data, xi_opt, clusters=cluster)
            pr = PhaseRecLayer(1024)
            lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)
            sig = sig.to(chosen_device)
            lms = lms.to(chosen_device)
            cdc = cdc.to(chosen_device)
            sig = sig[..., 0:pick_syms_num * 2]
            prbs = prbs[..., 0:pick_syms_num * modOrder]

            sig = cdc(torch.view_as_real(sig))
            sig = pr(sig[..., 1::2, :])
            lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=4, block_size=4028,
                    remain=2048, lr=5e-4)
            sig = lms(sig)

            sig = torch.view_as_complex(sig)
            sig = sig.cpu().data.numpy().squeeze()
            ber, Q = simulation_ERP_NLC_module(sig, prbs, lp, symbolRate, spanLen, spanNum)
            QLecache[tapIndx, clusterIndx] = Q_le
            Qcache[tapIndx, clusterIndx] = Q
            Weightcache[tapIndx, clusterIndx] = round(xi_opt, 4)
            print(f'lp: {lp}, tap: {tap}, optimal xi: {xi_opt}, Q_le: {Q_le}, Q: {Q}')

    if save_result_flag:
        with open(os.path.join(result_save_dir, f'NLC_weight_fixed.csv'), 'w') as f:
            f.write('tap,cluster,weight,Q_le,Q\n')
            for tapIndx, tap in enumerate(tap_list):
                for clusterIndx, cluster in enumerate(cluster_list):
                    f.write(f'{tap}, {cluster}, {Weightcache[tapIndx, clusterIndx]}, {QLecache[tapIndx, clusterIndx]},'
                            f'{Qcache[tapIndx, clusterIndx]}\n')
        print(f'result saved at \"{result_save_dir}\"')


def cluster_coeffs_of_TDC(lp_range=[1], tap=2048, cluster_list=[4], init_method='LS_CO', save_result_flag=False,
                          clusters_flag=False,
                          quantization_flag=False):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    pick_syms_num = 131072
    modOrder = 4
    Qcache = np.zeros([len(lp_range), len(cluster_list)])
    BERcache = np.zeros_like(Qcache)
    result_save_dir = os.path.join(BASE_DIR, f'result/{experiment_name}')
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir)
    chosen_device = 'cuda' if torch.cuda.is_available() else 'cpu'
    init_method = init_method
    save_result_flag = save_result_flag
    quantization_flag = quantization_flag

    for lpIndx, lp in enumerate(lp_range):
        for clusterIndx, cluster in enumerate(cluster_list):
            print(f'lp={lp}, tap={tap} cluster={cluster}')
            # load data
            dataPath = os.path.join(BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA'
                                              f'/trSet_lp_{lp}.mat')
            data = scio.loadmat(dataPath)
            symbolRate = data['symbolRate'][0, 0]
            spanNum = data['spanNum'][0, 0]
            spanLen = data['spanLen'][0, 0]
            sig = data['sig']
            prbs = data['prbs']
            L = spanNum * spanLen
            D = 17e-6
            DL = D * L / 2

            sig = torch.from_numpy(sig[np.newaxis, ...])
            constellations = torch.from_numpy(util.CONST_16QAM)

            '''均衡色散所需总抽头书上界'''
            total_taps_limit = util.cal_tap_num_odd(DL=DL,
                                                    sample_freq=2 * symbolRate)
            print(f'taps upper bound: {total_taps_limit}')

            '''DSP procedure'''
            edc = EDCLayer(symbol_rate=symbolRate, DL=DL, case_num=2, tap=tap, sample_factor=2,
                           power_norm=True, init_method=init_method, time_domain=True)
            # filter_tap_moduli(edc.h.data,
            #                   show=True,
            #                   saveFig=True,
            #                   savePath=os.path.join(result_save_dir, f'Moduli_of_filter_taps_init_by_{init_method}.png'))
            # heat_map_of_filter_taps(edc.h.data,
            #                         show=True,
            #                         saveFig=True,
            #                         savePath=os.path.join(result_save_dir, f'Heat_map_of_filter_taps_init_by_{init_method}.png'))
            # cluster filter coefficients
            if clusters_flag:
                # edc.h.data = modified_filter_taps_by_cluster_real(edc.h.data, clusters=cluster)
                edc.h.data = modified_filter_taps_by_cluster_complex(edc.h.data, clusters=cluster)
                # heat_map_of_filter_taps(edc.h.data,
                #                         show=True)
                # filter_tap_moduli(edc.h.data,
                #                   show=True)
            if quantization_flag:
                edc.h.data = quantization_filter_taps(edc.h.data, bit_width=4)
            pr = PhaseRecLayer(1024)
            lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)

            sig = sig.to(chosen_device)
            lms = lms.to(chosen_device)
            edc = edc.to(chosen_device)

            sig = sig[..., 0:pick_syms_num * 2]
            prbs = prbs[..., 0:pick_syms_num * modOrder]

            sig = edc(torch.view_as_real(sig))
            sig = pr(sig[..., 1::2, :])
            lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=4, block_size=4028,
                    remain=2048, lr=5e-4)
            sig = lms(sig)

            sig = torch.view_as_complex(sig)
            sig = sig.cpu().data.numpy().squeeze()
            sig, ber, _ = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())

            ber = np.mean(ber)
            BERcache[lpIndx, clusterIndx] = ber
            Qcache[lpIndx, clusterIndx] = util.ber2q(ber)
            print(f'lp: {lp}, tap:{tap}, cluster: {cluster}, BER: {ber}, Q: {util.ber2q(ber)}')

    if save_result_flag:
        with open(os.path.join(result_save_dir, f'Complex_TDC_init_{init_method}_vary_clusters_12_15.csv'), 'w') as f:
            f.write('lp, cluster, tap, BER, Q\n')
            for lpIndx, lp in enumerate(lp_range):
                for clusterIndx, cluster in enumerate(cluster_list):
                    f.write(f'{lp}, {cluster}, {tap}, {BERcache[lpIndx, clusterIndx]}, {Qcache[lpIndx, clusterIndx]}\n')
        print(f'result saved at \"{result_save_dir}\"')


def quantize_coeffs_of_TDC(lp=1, tap=2048, init_method='LS_CO', save_result_flag=False,
                           clusters_flag=False, quantization_flag=False):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    pick_syms_num = 131072
    modOrder = 4
    result_save_dir = os.path.join(BASE_DIR, f'result/{experiment_name}')
    if not os.path.exists(result_save_dir):
        os.makedirs(result_save_dir)
    chosen_device = 'cuda' if torch.cuda.is_available() else 'cpu'
    init_method = init_method
    save_result_flag = save_result_flag
    quantization_flag = quantization_flag
    bit_width_list = [2, 3, 4, 5, 6, 7, 8]
    BERcache = np.zeros([len(bit_width_list)])
    Qcache = np.zeros_like(BERcache)
    for bitIndx, bit_width in enumerate(bit_width_list):
        print(f'bit width: {bit_width}')
        # load data
        dataPath = os.path.join(BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA'
                                          f'/trSet_lp_{lp}.mat')
        data = scio.loadmat(dataPath)
        symbolRate = data['symbolRate'][0, 0]
        spanNum = data['spanNum'][0, 0]
        spanLen = data['spanLen'][0, 0]
        sig = data['sig']
        prbs = data['prbs']
        L = spanNum * spanLen
        D = 17e-6
        DL = D * L / 2

        sig = torch.from_numpy(sig[np.newaxis, ...])
        constellations = torch.from_numpy(util.CONST_16QAM)

        '''均衡色散所需总抽头书上界'''
        total_taps_limit = util.cal_tap_num_odd(DL=DL,
                                                sample_freq=2 * symbolRate)
        print(f'taps upper bound: {total_taps_limit}')

        '''DSP procedure'''
        edc = EDCLayer(symbol_rate=symbolRate, DL=DL, case_num=2, tap=tap, sample_factor=2,
                       power_norm=True, init_method=init_method, time_domain=True)
        # filter_tap_moduli(edc.h.data,
        #                   show=True,
        #                   saveFig=True,
        #                   savePath=os.path.join(result_save_dir, f'Moduli_of_filter_taps_init_by_{init_method}.png'))
        # heat_map_of_filter_taps(edc.h.data,
        #                         show=True,
        #                         saveFig=True,
        #                         savePath=os.path.join(result_save_dir, f'Heat_map_of_filter_taps_init_by_{init_method}.png'))
        # cluster filter coefficients
        if clusters_flag:
            # edc.h.data = modified_filter_taps_by_cluster_in_complex_plane(edc.h.data, clusters=14)
            edc.h.data = modified_filter_taps_by_cluster_real(edc.h.data, clusters=14)
            # heat_map_of_filter_taps(edc.h.data,
            #                         show=True)
            # filter_tap_moduli(edc.h.data,
            #                   show=True)
        if quantization_flag:
            edc.h.data = quantization_filter_taps(edc.h.data, bit_width=bit_width)
        pr = PhaseRecLayer(1024)
        lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)

        sig = sig.to(chosen_device)
        lms = lms.to(chosen_device)
        edc = edc.to(chosen_device)

        sig = sig[..., 0:pick_syms_num * 2]
        prbs = prbs[..., 0:pick_syms_num * modOrder]

        sig = edc(torch.view_as_real(sig))
        sig = pr(sig[..., 1::2, :])
        lms.fit(sig, err_mode='DDM', constellations=constellations, iter_num=4, block_size=4028,
                remain=2048, lr=5e-4)
        sig = lms(sig)

        sig = torch.view_as_complex(sig)
        sig = sig.cpu().data.numpy().squeeze()
        sig, ber, _ = util.pr_ber(sig, prbs, constellations.cpu().data.numpy())

        ber = np.mean(ber)
        BERcache[bitIndx] = ber
        Qcache[bitIndx] = util.ber2q(ber)
        print(f'lp: {lp}, tap:{tap}, BER: {ber}, bit_width: {bit_width} Q: {util.ber2q(ber)}')

    if save_result_flag:
        with open(os.path.join(result_save_dir, f'Quantize_coeffs_of_TDC_init_{init_method}.csv'), 'w') as f:
            f.write('bit_width, lp, tap, BER, Q\n')
            for bitIndx, bit_width in enumerate(bit_width_list):
                f.write(f'{bit_width}, {lp}, {tap}, {BERcache[bitIndx]}, {Qcache[bitIndx]}\n')
        print(f'result saved at \"{result_save_dir}\"')


def filter_tap_moduli(h, show=False, saveFig=False, savePath=None, splitRi=False):
    '''
    h: filter taps: [2, tap_num, re/im], torch.Size
    plot the moduli of taps
    '''
    if saveFig == True:
        if savePath is None:
            raise Exception('savePath is supposed to be assigned when saveFig is True.')
    # 计算滤波器抽头的模值
    h = h.cpu().data.numpy().astype(np.float32)
    if not splitRi:
        h_moduli = np.sqrt(h[..., 0] ** 2 + h[..., 1] ** 2)
        h_moduli = h_moduli[0, :]
        plt.plot(h_moduli, 'o-', markersize=4.5)
        plt.xlabel('Position')
        plt.ylabel('Moduli')
        plt.grid(True)
    else:
        h_moduli_r = h[0, ..., 0]
        h_moduli_i = h[0, ..., 1]
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 6))
        ax1.plot(h_moduli_r, 'o-', markersize=4.5, label='Real Part')
        ax2.plot(h_moduli_i, 'o-', markersize=4.5, label='Imaginary Part')
        plt.tight_layout()

    if saveFig:
        plt.savefig(savePath, dpi=300)
    if show:
        plt.show()
    plt.close()


def heat_map_of_filter_taps(h, show=False, saveFig=False, savePath=None):
    '''
    h: filter taps: [2, tap_num, re/im], torch.Size
    plot the heat map of taps
    '''
    if saveFig == True:
        if savePath is None:
            raise Exception('savePath is supposed to be assigned when saveFig is True.')
    h = h.cpu().data.numpy().astype(np.float32)
    h = h[0, ...]
    h_real = h[..., 0]
    h_imag = h[..., 1]
    h_magn = np.sqrt(h_real ** 2 + h_imag ** 2)
    h_phase = np.arctan2(h_imag, h_real)
    # 根据相位绘制热力图
    plt.scatter(h_real.flatten(), h_imag.flatten(), c=h_phase.flatten(), cmap='viridis', s=100)
    # plt.hist2d(h_real.flatten(), h_imag.flatten(), bins=50, cmap='viridis')
    plt.title('Heatmap of Filter Taps in the Complex Plane (phase)')
    plt.xlabel('Filter Coefficients (Real)')
    plt.ylabel('Filter Coefficients (Imaginary)')
    plt.grid(False)
    if saveFig:
        plt.savefig(savePath, dpi=300)
    if show:
        plt.show()
    plt.close()


def modified_filter_taps_by_cluster_complex(h_ori, clusters=8):
    '''
    h: filter taps: [2, tap_num, re/im], torch.Size
    return: modified filter taps
    Implement clustering in the complex plane
    '''
    from sklearn.cluster import KMeans
    h_ori = h_ori.cpu().data.numpy().astype(np.float32)
    # 转化为2D数组
    h = h_ori.reshape(-1, 2)
    kmeans = KMeans(n_clusters=clusters, random_state=42, n_init=10)
    kmeans.fit(h)
    # 获取聚类中心和每个系数的聚类标签
    centroids = kmeans.cluster_centers_
    labels = kmeans.labels_
    new_h = centroids[labels]
    new_h = new_h.reshape(h_ori.shape)
    return torch.tensor(new_h, dtype=torch.float32)


def Fuzzy_cluster_filter_taps(h_ori, clusters=8):
    '''
    h: filter taps: [2, tap_num, re/im], torch.Size
    return: modified filter taps
    Implement clustering in the complex plane
    '''
    from sklearn.cluster import KMeans
    h_ori = h_ori.cpu().data.numpy().astype(np.float32)
    '''转换为2D数组并进行训练，获取聚类中心'''
    h = h_ori.reshape(-1, 2)
    kmeans = KMeans(n_clusters=clusters, random_state=42, n_init=10)
    kmeans.fit(h)
    # 获取聚类中心和每个系数的聚类标签
    centroids = kmeans.cluster_centers_
    # 计算系数与聚类中心的距离, h_ori[2, len, 2] --> [2, len, 1, 2], centroids[len, 2] --> [1, 1, len, 2]
    distances = np.linalg.norm(h_ori[..., np.newaxis, :] - centroids[np.newaxis, np.newaxis, ...], axis=3)
    # 为每个系数找到最近的两个聚类中心， 计算其聚类的倒数来进行加权
    closest_centers = np.argsort(distances, axis=2)[:, :, :2]
    updated_h = np.zeros_like(h_ori)
    for i in range(h_ori.shape[0]):
        for j in range(h_ori.shape[1]):
            center_indices = closest_centers[i, j]
            dist1, dist2 = distances[i, j, center_indices[0]], distances[i, j, center_indices[1]]
            weight1, weight2 = 1.0 / dist1, 1.0 / dist2
            total_weight = weight1 + weight2
            weight1, weight2 = weight1 / total_weight, weight2 / total_weight
            updated_h[i, j] = weight1 * centroids[center_indices[0]] + weight2 * centroids[center_indices[1]]
    return torch.tensor(updated_h, dtype=torch.float32)


def Simplified_fuzzy_cluster_filter_taps(h_ori, weight1, clusters=8):
    '''
    h: filter taps: [2, tap_num, re/im], torch.Size
    return: modified filter taps
    Implement clustering in the complex plane
    '''
    from sklearn.cluster import KMeans
    h_ori = h_ori.cpu().data.numpy().astype(np.float32)
    '''转换为2D数组并进行训练，获取聚类中心'''
    h = h_ori.reshape(-1, 2)
    kmeans = KMeans(n_clusters=clusters, random_state=42, n_init=10)
    kmeans.fit(h)
    # 获取聚类中心和每个系数的聚类标签
    centroids = kmeans.cluster_centers_
    # 计算系数与聚类中心的距离, h_ori[2, len, 2] --> [2, len, 1, 2], centroids[len, 2] --> [1, 1, len, 2]
    distances = np.linalg.norm(h_ori[..., np.newaxis, :] - centroids[np.newaxis, np.newaxis, ...], axis=3)
    # 为每个系数找到最近的两个聚类中心， 计算其聚类的倒数来进行加权
    closest_centers = np.argsort(distances, axis=2)[:, :, :2]
    updated_h = np.zeros_like(h_ori)
    for i in range(h_ori.shape[0]):
        for j in range(h_ori.shape[1]):
            center_indices = closest_centers[i, j]
            updated_h[i, j] = weight1 * centroids[center_indices[0]] + (1 - weight1) * centroids[center_indices[1]]
    return torch.tensor(updated_h, dtype=torch.float32)


def modified_filter_taps_by_cluster_real(h_ori, clusters=8):
    '''
    h: filter taps: [2, tap_num, re/im], torch.Size
    return: modified filter taps
    '''
    from sklearn.cluster import KMeans
    h_ori = h_ori.cpu().data.numpy().astype(np.float64)
    # 转化为2D数组
    h = h_ori.reshape(-1, 2)

    real_part = h[..., 0][..., np.newaxis]
    kmeas_real = KMeans(n_clusters=clusters, random_state=42, n_init=10)
    kmeas_real.fit(real_part)
    centroids_real = kmeas_real.cluster_centers_
    labels_real = kmeas_real.labels_
    new_real_part = centroids_real[labels_real]

    imag_part = h[..., 1][..., np.newaxis]
    kmeas_imag = KMeans(n_clusters=clusters, random_state=42, n_init=10)
    kmeas_imag.fit(imag_part)
    centroids_imag = kmeas_imag.cluster_centers_
    labels_imag = kmeas_imag.labels_
    new_imag_part = centroids_imag[labels_imag]

    new_h = np.hstack([new_real_part, new_imag_part])
    new_h = new_h.reshape(h_ori.shape)
    return torch.tensor(new_h, dtype=torch.float32)


def quantization_filter_taps(h, bit_width=8):
    '''
    h: filter taps: [2, tap_num, re/im], torch.Size
    bit_width: bit width of quantization
    return: quantized filter taps
    '''
    from CR_DSPPytorch import cal_uniform_quantization_parameters, uniform_quant
    sx_r, ax_r = cal_uniform_quantization_parameters(
        h[..., 0], bit_width=bit_width, axis=-1, approach='maxabs')
    sx_i, ax_i = cal_uniform_quantization_parameters(
        h[..., 1], bit_width=bit_width, axis=-1, approach='maxabs')
    h_r = uniform_quant((h[..., 0], sx_r, ax_r), bit_width=bit_width, axis=-2, approach='maxabs')
    h_i = uniform_quant((h[..., 1], sx_i, ax_i), bit_width=bit_width, axis=-2, approach='maxabs')
    new_h = torch.stack([h_r, h_i], dim=-1)
    assert new_h.shape == h.shape
    return new_h


def simulation_ERP_NLC_module(sig, prbs, lp, symbol_rate, span_length, span_number):
    '''
    sig: [2, 131072[, np.ndarray
    prbs: [2, 131072 * 4], np.ndarray
    '''
    from bayes_opt import BayesianOptimization
    from bayes_opt.util import UtilityFunction
    maxmn = 92
    symbol_num = sig.shape[-1]
    mod_order = 4

    cal_kwargs = {
        'Fs': symbol_rate,
        'sf': 20,
        'span_length': span_length,
        'span_number': span_number,
        'pre_cd': 0.5,
        'rolloff': 0.1,
        'maxmn': maxmn,
        'int_constraint': maxmn,
    }

    '''calculate effective length for ERP'''
    alp = 0.2e-3
    gm = 1.3e-3
    power = 10 ** (lp / 10 - 3)
    span_num = 40
    span_len = 80e3
    alpha = -np.log(np.power(10, -alp / 10))
    Leff = (1 - np.exp(- alpha * span_len * span_num)) / alpha

    perturbative_matrix_dir = os.path.join(BASE_DIR, f'perturbative_matrixes_sim')
    if not os.path.exists(perturbative_matrix_dir):
        os.makedirs(perturbative_matrix_dir)

    perturbative_matrix_path = os.path.join(perturbative_matrix_dir, f'win_size_{2 * maxmn + 1}.mat')

    prtMtrx = PerturbativeBlockLayer.cal_init_perturbative_matrix(
        perturbative_matrix_path=perturbative_matrix_path,
        perturbative_matrix_save_path=perturbative_matrix_path,
        **cal_kwargs)

    prtVec = PerturbativeBlockLayer.pert_matrix_to_vec(prtMtrx, maxmn)

    pbl = PerturbativeBlockLayer(2 * maxmn + 1,
                                 init_weight=prtVec,
                                 grouping_triplet=False)  # initial coefficients
    sig = sig[..., 0:symbol_num]
    sig_ori = sig.copy()
    prbs = prbs[..., 0:symbol_num * mod_order]

    sig = torch.from_numpy(sig)
    sig = torch.view_as_real(sig)
    sig = norm_power(sig)
    sig = sig[np.newaxis, ...]

    sig_len = sig.shape[-2]
    perturbative_terms = torch.zeros([2, sig_len, 2])

    step = 512
    block_size = step + maxmn + 2
    bs = util.BlockSelector(sample_num=sig_len, block_size=block_size, step=step)

    with torch.no_grad():
        for indx in range(len(bs)):
            sig_block = sig[..., bs[indx], :]
            perturbative_terms[:, indx * step:(indx + 1) * step, :] = pbl(sig_block)[...,
                                                                      bs.pre_overhead:bs.pre_overhead + step, :]

    perturbative_terms = perturbative_terms.data.cpu().numpy()
    perturbative_terms = perturbative_terms[..., 0] + 1j * perturbative_terms[..., 1]
    sig = sig.squeeze()
    sig = sig[..., 0] + 1j * sig[..., 1]
    sig = sig.data.cpu().numpy()

    def est_signal_performance(xi, phi, eta):
        energy_divergence = 1j * 8 / 9 * gm * sig * Leff * power * 3 / 2 * eta
        eqsig = sig + np.exp(1j * phi) * (xi * perturbative_terms - energy_divergence)
        eqsig = eqsig.squeeze()
        ber = util.pr_ber(eqsig, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
        Q = np.mean(util.ber2q(ber))
        return Q

    optimizer = BayesianOptimization(
        f=est_signal_performance,
        pbounds={'xi': (0, 1e-3), 'phi': (0, 2 * np.pi), 'eta': (10, 50)},
        verbose=1
    )
    utility = UtilityFunction(kind='ei', xi=0.1)
    optimizer.maximize(init_points=20, n_iter=300, acquisition_function=utility)
    xi_opt = optimizer.max['params']['xi']
    phi_opt = optimizer.max['params']['phi']
    eta_opt = optimizer.max['params']['eta']
    print(f'optimal xi: {xi_opt}, optimal phi:{phi_opt} optimal eta:{eta_opt}')

    '''ERP-PB-NLC techniques'''
    energy_diver = 1j * 8 / 9 * gm * sig * Leff * power * 3 / 2 * eta_opt
    eqsig = sig_ori + (xi_opt * perturbative_terms - energy_diver) * np.exp(1j * phi_opt)
    ber_eq = util.pr_ber(eqsig, prbs, constellations=util.CONST_16QAM, mod_order=mod_order)[1]
    Q_eq = np.mean(util.ber2q(ber_eq))
    print(f' With ERP, the Q factor is {Q_eq}')
    return np.mean(ber_eq), Q_eq

def change_file(filepath):
    import pandas as pd
    df = pd.read_csv(os.path.join(filepath, 'NLC_weight_fixed.csv'))
    df.columns = df.columns.str.strip()

    df_sorted = df.sort_values(by=['cluster', 'tap']).reset_index(drop=True)
    df_sorted.to_csv(os.path.join(filepath, 'NLC_weight_fixed_version2.csv'), index=False)
    print(f'result saved at \"{filepath}\"')



if __name__ == '__main__':
    warnings.filterwarnings("ignore", category=UserWarning)
    # lp_range = list(np.arange(-2, 4, 1))
    tap_list = [447, 507, 557, 607, 657, 707]
    # Fuzzy_cluster_assisted_TDC_weight_opt(lp_range=[2], tap_list=[597], cluster=8, save_result_flag=False)
    '''引入NLC，并且系数与簇之间的权重由距离的归一化倒数给出'''
    # cluster_list = [4, 5, 6, 7, 8, 9, 10]
    # Fuzzy_cluster_assisted_NLC_weight_opt(lp=2, tap_list=tap_list, cluster_list=cluster_list, save_result_flag=True)
    # Fuzzy_cluster_assisted_TDC_weight_fixed(lp_range=[2], tap_list=[597], cluster=8, save_result_flag=False)
    '''引入NLC，并且系数与簇之间的权重固定为xi,xi由贝叶斯搜索得到'''
    cluster_list = [18, 16, 14, 12, 10, 8, 6]
    # Fuzzy_cluster_assisted_NLC_weight_fixed(lp=2, tap_list=tap_list, cluster_list=cluster_list, save_result_flag=True)
    # cluster_coeffs_of_TDC(lp_range=[1], tap=897, cluster_list=[8, 16, 32, 48, 64, 80], init_method=init_method,
    #                       save_result_flag=True, clusters_flag=True, quantization_flag=False)
    # cluster_coeffs_of_clusterd_TDC(lp_range=lp_range, tap=597, bit_width_list=[2, 3, 4],
    #                                init_method=init_method,
    #                                save_result_flag=True, clusters_flag=True, quantization_flag=True)
    # quantize_coeffs_of_TDC(lp=0, tap=597, init_method=init_method,
    #                        save_result_flag=True, clusters_flag=False, quantization_flag=True)
    filepath = os.path.join(BASE_DIR, 'result/Fuzzy_cluster_assisted_NLC_weight_fixed')
    change_file(filepath)
