import csv
import os

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
from inspect import currentframe, getframeinfo
from ProcedureControls import train_model, eval_model
import util
import matplotlib.pyplot as plt
import scipy.io as scio

BASE_DIR = os.path.dirname(__file__)


def plot_constellation_hist(sig, **behaviour_dict):
    '''on_eval_end_callback时调用'''
    global_config = behaviour_dict['global_config']
    experiment_name = global_config['experiment_name']
    lp = global_config['lp']
    result_dir = os.path.join(BASE_DIR, f'result/{experiment_name}')

    h = plt.hist2d(sig.real, sig.imag, bins=100, range=[[-1.2, 1.2], [-1.2, 1.2]])[0]
    plt.savefig(os.path.join(result_dir, f'model_const_lp_{lp}_{experiment_name}.jpg'))
    plt.close()
    hist_name = os.path.join(result_dir, f'model_const_lp_{lp}_{experiment_name}.mat')
    scio.savemat(hist_name, {'h': h})


def TNN_simulation_lp_vary_DR_UL(train_flag):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    print(f'experiment name: {experiment_name}')

    def config_trainer(*args):
        accelerator = 'gpu'
        max_epochs = 18 * args[0]
        check_val_every_n_epoch = max_epochs + 1
        num_sanity_val_steps = 0
        del args
        return locals()

    def config_model():
        hidden_sizes = [2, 10]
        use_batch_normalization_flag = False
        return locals()

    def config_plm(*args):
        model_kwargs = args[0]
        lr = 1e-3
        time_win_size = 185
        which_pol = args[1]
        optimizer = 'Adam'
        group_triplet = True
        err_function = 'MSE'
        weight_decay = 0
        del args
        return locals()

    def config_global(*args):
        batch_size = 128
        lp = 3
        save_checkpoint = True
        comment = f'TNN_sim_{args[1]}'  # CVTNN/TNN 调整
        constellations = util.CONST_16QAM
        dataset_dir = os.path.join(BASE_DIR, 'data/dataset')
        fit_data_amount = 131072
        variable_names_in_mat = args[0]
        decision_directed_flag = True  # 决定是否是无监督学习，有监督学习为False
        eval_block_size = 2048
        complex_value_model = False  # CVTNN/TNN 调整
        read_mat_with_transpose = True
        del args
        return locals()

    def single_case():
        trainer_kwargs = config_trainer(train_flag)
        global_config_x = config_global(('dataset', 'prbsx', 'prbsy'), "x")
        global_config_x['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 0
        plm_kwargs_x = config_plm(model_config, which_pol)
        train_behavior_dict_x = {}
        train_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_x['global_config'] = global_config_x
        train_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
        train_lp = global_config_x['lp']

        trainer_kwargs = config_trainer(train_flag)
        global_config_y = config_global(('dataset', 'prbsx', 'prbsy'), "y")
        global_config_y['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 1
        plm_kwargs_y = config_plm(model_config, which_pol)
        train_behavior_dict_y = {}
        train_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_y['global_config'] = global_config_y
        train_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
        train_lp = global_config_y['lp']

        if train_flag:
            model_x = train_model(**train_behavior_dict_x)
            model_y = train_model(**train_behavior_dict_y)
        else:
            model_x = None
            model_y = None

        # lp_list = range(-2, 5)
        lp_list = [2]
        global_config_x['train_lp'] = train_lp
        global_config_y['train_lp'] = train_lp

        use_BO = False
        eval_le = False
        Q_list = []
        for lp in lp_list:
            global_config_x['lp'] = lp
            global_config_x['use_BO'] = use_BO
            global_config_x['on_eval_end_callback'] = plot_constellation_hist  # 绘制一下星座图
            eval_behavior_dict_x = {}
            eval_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_x['global_config'] = global_config_x
            eval_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            eval_behavior_dict_x['eval_le'] = eval_le
            result_x = eval_model(**eval_behavior_dict_x, model=model_x)

            global_config_y['lp'] = lp
            global_config_y['use_BO'] = use_BO
            eval_behavior_dict_y = {}
            eval_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_y['global_config'] = global_config_y
            eval_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            eval_behavior_dict_y['eval_le'] = eval_le
            result_y = eval_model(**eval_behavior_dict_y, model=model_y)

            Q = (result_x[0] + result_y[0]) / 2
            Q_list.append(Q)

            print(f'lp: {lp}, Q: {Q}')
        return Q_list, lp_list, train_lp

    Q_list, lp_list, train_lp = single_case()
    # if train_flag:
    #     with open(os.path.join(BASE_DIR, f'result/{experiment_name}/Q_results_vary_lp_trained_by_{train_lp}_dBm.csv'), 'w') as f:
    #         csv_writer = csv.writer(f)
    #         csv_writer.writerow(['lp', 'Q'])
    #         for indx, lp in enumerate(lp_list):
    #             csv_writer.writerow([lp, Q_list[indx]])
    #         print(f'save results at \"{experiment_name}\"')

    for Q in Q_list:
        print(Q)


def CVTNN_simulation_lp_vary_DR_UL(train_flag):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    print(f'experiment name: {experiment_name}')

    def config_trainer(*args):
        accelerator = 'gpu'
        max_epochs = 18 * args[0]
        check_val_every_n_epoch = max_epochs + 1
        num_sanity_val_steps = 0
        del args
        return locals()

    def config_model():
        hidden_sizes = [1, 5]
        use_batch_normalization_flag = False
        return locals()

    def config_plm(*args):
        model_kwargs = args[0]
        lr = 1e-3
        time_win_size = 185
        which_pol = args[1]
        optimizer = 'Adam'
        group_triplet = True
        err_function = 'MSE'
        weight_decay = 0
        del args
        return locals()

    def config_global(*args):
        batch_size = 128
        lp = 2
        save_checkpoint = True
        comment = f'CVTNN_sim_{args[1]}'  # CVTNN/TNN 调整
        constellations = util.CONST_16QAM
        dataset_dir = os.path.join(BASE_DIR, 'data/dataset')
        fit_data_amount = 131072
        variable_names_in_mat = args[0]
        decision_directed_flag = True  # 决定是否是无监督学习，有监督学习为False
        eval_block_size = 2048
        complex_value_model = True  # CVTNN/TNN 调整
        read_mat_with_transpose = True
        del args
        return locals()

    def single_case():
        trainer_kwargs = config_trainer(train_flag)
        global_config_x = config_global(('dataset', 'prbsx', 'prbsy'), "x")
        global_config_x['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 0
        plm_kwargs_x = config_plm(model_config, which_pol)
        train_behavior_dict_x = {}
        train_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_x['global_config'] = global_config_x
        train_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
        train_lp = global_config_x['lp']

        trainer_kwargs = config_trainer(train_flag)
        global_config_y = config_global(('dataset', 'prbsx', 'prbsy'), "y")
        global_config_y['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 1
        plm_kwargs_y = config_plm(model_config, which_pol)
        train_behavior_dict_y = {}
        train_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_y['global_config'] = global_config_y
        train_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
        train_lp = global_config_y['lp']

        if train_flag:
            model_x = train_model(**train_behavior_dict_x)
            model_y = train_model(**train_behavior_dict_y)
        else:
            model_x = None
            model_y = None

        lp_list = range(-2, 5)
        global_config_x['train_lp'] = train_lp
        global_config_y['train_lp'] = train_lp

        use_BO = False
        eval_le = False
        Q_list = []
        for lp in lp_list:
            global_config_x['lp'] = lp
            global_config_x['use_BO'] = use_BO
            eval_behavior_dict_x = {}
            eval_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_x['global_config'] = global_config_x
            eval_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            eval_behavior_dict_x['eval_le'] = eval_le
            result_x = eval_model(**eval_behavior_dict_x, model=model_x)

            global_config_y['lp'] = lp
            global_config_y['use_BO'] = use_BO
            eval_behavior_dict_y = {}
            eval_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_y['global_config'] = global_config_y
            eval_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            eval_behavior_dict_y['eval_le'] = eval_le
            result_y = eval_model(**eval_behavior_dict_y, model=model_y)

            Q = (result_x[0] + result_y[0]) / 2
            Q_list.append(Q)

            print(f'lp: {lp}, Q: {Q}')
        return Q_list, lp_list, train_lp

    Q_list, lp_list, train_lp = single_case()
    if train_flag:
        with open(os.path.join(BASE_DIR, f'result/{experiment_name}/Q_results_vary_lp_trained_by_{train_lp}_dBm.csv'),
                  'w') as f:
            csv_writer = csv.writer(f)
            csv_writer.writerow(['lp', 'Q'])
            for indx, lp in enumerate(lp_list):
                csv_writer.writerow([lp, Q_list[indx]])
            print(f'save results at \"{experiment_name}\"')

    for Q in Q_list:
        print(Q)


def TNN_simulation_lp_vary_data_aided_DR(train_flag):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    print(f'experiment name: {experiment_name}')

    def config_trainer(*args):
        accelerator = 'gpu'
        max_epochs = 18 * args[0]
        check_val_every_n_epoch = max_epochs + 1
        num_sanity_val_steps = 0
        del args
        return locals()

    def config_model():
        hidden_sizes = [2, 10]
        use_batch_normalization_flag = False
        return locals()

    def config_plm(*args):
        model_kwargs = args[0]
        lr = 1e-3
        time_win_size = 185
        which_pol = args[1]
        optimizer = 'Adam'
        group_triplet = True
        err_function = 'MSE'
        weight_decay = 0
        del args
        return locals()

    def config_global(*args):
        batch_size = 128
        lp = 3
        save_checkpoint = True
        comment = f'TNN_sim_{args[1]}'  # CVTNN/TNN 调整
        constellations = util.CONST_16QAM
        dataset_dir = os.path.join(BASE_DIR, 'data/dataset')
        fit_data_amount = 131072
        variable_names_in_mat = args[0]
        decision_directed_flag = False  # 决定是否是无监督学习，有监督学习为False
        eval_block_size = 2048
        complex_value_model = False  # CVTNN/TNN 调整
        read_mat_with_transpose = True
        del args
        return locals()

    def single_case():
        trainer_kwargs = config_trainer(train_flag)
        global_config_x = config_global(('dataset', 'prbsx', 'prbsy'), "x")
        global_config_x['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 0
        plm_kwargs_x = config_plm(model_config, which_pol)
        train_behavior_dict_x = {}
        train_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_x['global_config'] = global_config_x
        train_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
        train_lp = global_config_x['lp']

        trainer_kwargs = config_trainer(train_flag)
        global_config_y = config_global(('dataset', 'prbsx', 'prbsy'), "y")
        global_config_y['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 1
        plm_kwargs_y = config_plm(model_config, which_pol)
        train_behavior_dict_y = {}
        train_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_y['global_config'] = global_config_y
        train_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
        train_lp = global_config_y['lp']

        if train_flag:
            model_x = train_model(**train_behavior_dict_x)
            model_y = train_model(**train_behavior_dict_y)
        else:
            model_x = None
            model_y = None

        lp_list = range(-2, 5)
        global_config_x['train_lp'] = train_lp
        global_config_y['train_lp'] = train_lp

        use_BO = True
        eval_le = False
        Q_list = []
        for lp in lp_list:
            global_config_x['lp'] = lp
            global_config_x['use_BO'] = use_BO
            eval_behavior_dict_x = {}
            eval_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_x['global_config'] = global_config_x
            eval_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            eval_behavior_dict_x['eval_le'] = eval_le
            result_x = eval_model(**eval_behavior_dict_x, model=model_x)

            global_config_y['lp'] = lp
            global_config_y['use_BO'] = use_BO
            eval_behavior_dict_y = {}
            eval_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_y['global_config'] = global_config_y
            eval_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            eval_behavior_dict_y['eval_le'] = eval_le
            result_y = eval_model(**eval_behavior_dict_y, model=model_y)

            Q = (result_x[0] + result_y[0]) / 2
            Q_list.append(Q)

            print(f'lp: {lp}, Q: {Q}')
        return Q_list, lp_list, train_lp

    Q_list, lp_list, train_lp = single_case()
    if not train_flag:
        with open(os.path.join(BASE_DIR, f'result/{experiment_name}/Q_results_vary_lp_trained_by_{train_lp}_dBm.csv'),
                  'w') as f:
            csv_writer = csv.writer(f)
            csv_writer.writerow(['lp', 'Q'])
            for indx, lp in enumerate(lp_list):
                csv_writer.writerow([lp, Q_list[indx]])
            print(f'save results at \"{experiment_name}\"')

    for Q in Q_list:
        print(Q)


def TNN_simulation_lp_vary_data_aided(train_flag):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    print(f'experiment name: {experiment_name}')

    def config_trainer(*args):
        accelerator = 'gpu'
        max_epochs = 18 * args[0]
        check_val_every_n_epoch = max_epochs + 1
        num_sanity_val_steps = 0
        del args
        return locals()

    def config_model():
        hidden_sizes = [2, 10]
        use_batch_normalization_flag = False
        return locals()

    def config_plm(*args):
        model_kwargs = args[0]
        lr = 1e-3
        time_win_size = 185
        which_pol = args[1]
        optimizer = 'Adam'
        group_triplet = False
        err_function = 'MSE'
        weight_decay = 0
        del args
        return locals()

    def config_global(*args):
        batch_size = 128
        lp = 2
        save_checkpoint = False
        comment = f'TNN_sim_{args[1]}'  # CVTNN/TNN 调整
        constellations = util.CONST_16QAM
        dataset_dir = os.path.join(BASE_DIR, 'data/dataset')
        fit_data_amount = 65536
        variable_names_in_mat = args[0]
        decision_directed_flag = False  # 决定是否是无监督学习，有监督学习为False
        print_q_factor_flag = False
        eval_block_size = 2048
        complex_value_model = False  # CVTNN/TNN 调整
        read_mat_with_transpose = True
        del args
        return locals()

    def single_case():
        trainer_kwargs = config_trainer(train_flag)
        global_config_x = config_global(('dataset', 'prbsx', 'prbsy'), "x")
        global_config_x['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 0
        plm_kwargs_x = config_plm(model_config, which_pol)
        train_behavior_dict_x = {}
        train_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_x['global_config'] = global_config_x
        train_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
        train_lp = global_config_x['lp']

        trainer_kwargs = config_trainer(train_flag)
        global_config_y = config_global(('dataset', 'prbsx', 'prbsy'), "y")
        global_config_y['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 1
        plm_kwargs_y = config_plm(model_config, which_pol)
        train_behavior_dict_y = {}
        train_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_y['global_config'] = global_config_y
        train_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
        train_lp = global_config_y['lp']

        if train_flag:
            model_x = train_model(**train_behavior_dict_x)
            model_y = train_model(**train_behavior_dict_y)
        else:
            model_x = None
            model_y = None

        # lp_list = range(-2, 5)
        lp_list = [2]
        global_config_x['train_lp'] = train_lp
        global_config_y['train_lp'] = train_lp

        use_BO = False
        eval_le = False
        Q_list = []
        for lp in lp_list:
            global_config_x['lp'] = lp
            global_config_x['use_BO'] = use_BO
            eval_behavior_dict_x = {}
            eval_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_x['global_config'] = global_config_x
            eval_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            eval_behavior_dict_x['eval_le'] = eval_le
            result_x = eval_model(**eval_behavior_dict_x, model=model_x)

            global_config_y['lp'] = lp
            global_config_y['use_BO'] = use_BO
            eval_behavior_dict_y = {}
            eval_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_y['global_config'] = global_config_y
            eval_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            eval_behavior_dict_y['eval_le'] = eval_le
            result_y = eval_model(**eval_behavior_dict_y, model=model_y)

            Q = (result_x[0] + result_y[0]) / 2
            Q_list.append(Q)

            print(f'lp: {lp}, Q: {Q}')
        return Q_list, lp_list, train_lp

    Q_list, lp_list, train_lp = single_case()
    # if not train_flag:
    #     with open(os.path.join(BASE_DIR, f'result/{experiment_name}/Q_results_vary_lp_trained_by_{train_lp}_dBm.csv'), 'w') as f:
    #         csv_writer = csv.writer(f)
    #         csv_writer.writerow(['lp', 'Q'])
    #         for indx, lp in enumerate(lp_list):
    #             csv_writer.writerow([lp, Q_list[indx]])
    #         print(f'save results at \"{experiment_name}\"')

    for Q in Q_list:
        print(Q)


def CVTNN_simulation_lp_vary_data_aided(train_flag):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    print(f'experiment name: {experiment_name}')

    def config_trainer(*args):
        accelerator = 'gpu'
        max_epochs = 18 * args[0]
        check_val_every_n_epoch = max_epochs + 1
        num_sanity_val_steps = 0
        del args
        return locals()

    def config_model():
        hidden_sizes = [1, 5]
        use_batch_normalization_flag = False
        return locals()

    def config_plm(*args):
        model_kwargs = args[0]
        lr = 1e-3
        time_win_size = 185
        which_pol = args[1]
        optimizer = 'Adam'
        group_triplet = False
        err_function = 'MSE'
        weight_decay = 0
        del args
        return locals()

    def config_global(*args):
        batch_size = 128
        lp = 2
        save_checkpoint = False
        comment = f'CVTNN_sim_{args[1]}'  # CVTNN/TNN 调整
        constellations = util.CONST_16QAM
        dataset_dir = os.path.join(BASE_DIR, 'data/dataset')
        fit_data_amount = 65536
        variable_names_in_mat = args[0]
        decision_directed_flag = False  # 决定是否是无监督学习，有监督学习为False
        eval_block_size = 2048
        complex_value_model = True  # CVTNN/TNN 调整
        read_mat_with_transpose = True
        del args
        return locals()

    def single_case():
        trainer_kwargs = config_trainer(train_flag)
        global_config_x = config_global(('dataset', 'prbsx', 'prbsy'), "x")
        global_config_x['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 0
        plm_kwargs_x = config_plm(model_config, which_pol)
        train_behavior_dict_x = {}
        train_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_x['global_config'] = global_config_x
        train_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
        train_lp = global_config_x['lp']

        trainer_kwargs = config_trainer(train_flag)
        global_config_y = config_global(('dataset', 'prbsx', 'prbsy'), "y")
        global_config_y['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 1
        plm_kwargs_y = config_plm(model_config, which_pol)
        train_behavior_dict_y = {}
        train_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_y['global_config'] = global_config_y
        train_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
        train_lp = global_config_y['lp']

        if train_flag:
            model_x = train_model(**train_behavior_dict_x)
            model_y = train_model(**train_behavior_dict_y)
        else:
            model_x = None
            model_y = None

        lp_list = range(-2, 5)
        global_config_x['train_lp'] = train_lp
        global_config_y['train_lp'] = train_lp

        use_BO = False
        eval_le = False
        Q_list = []
        for lp in lp_list:
            global_config_x['lp'] = lp
            global_config_x['use_BO'] = use_BO
            eval_behavior_dict_x = {}
            eval_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_x['global_config'] = global_config_x
            eval_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            eval_behavior_dict_x['eval_le'] = eval_le
            result_x = eval_model(**eval_behavior_dict_x, model=model_x)

            global_config_y['lp'] = lp
            global_config_y['use_BO'] = use_BO
            eval_behavior_dict_y = {}
            eval_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_y['global_config'] = global_config_y
            eval_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            eval_behavior_dict_y['eval_le'] = eval_le
            result_y = eval_model(**eval_behavior_dict_y, model=model_y)

            Q = (result_x[0] + result_y[0]) / 2
            Q_list.append(Q)

            print(f'lp: {lp}, Q: {Q}')
        return Q_list, lp_list, train_lp

    Q_list, lp_list, train_lp = single_case()
    # if train_flag:
    #     with open(os.path.join(BASE_DIR, f'result/{experiment_name}/Q_results_vary_lp_trained_by_{train_lp}_dBm.csv'),
    #               'w') as f:
    #         csv_writer = csv.writer(f)
    #         csv_writer.writerow(['lp', 'Q'])
    #         for indx, lp in enumerate(lp_list):
    #             csv_writer.writerow([lp, Q_list[indx]])
    #         print(f'save results at \"{experiment_name}\"')

    for Q in Q_list:
        print(Q)


def CVTNN_simulation_lp_vary_data_aided_DR(train_flag):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    print(f'experiment name: {experiment_name}')

    def config_trainer(*args):
        accelerator = 'gpu'
        max_epochs = 10 * args[0]
        check_val_every_n_epoch = max_epochs + 1
        num_sanity_val_steps = 0
        del args
        return locals()

    def config_model():
        hidden_sizes = [1, 5]
        use_batch_normalization_flag = False
        return locals()

    def config_plm(*args):
        model_kwargs = args[0]
        lr = 1e-3
        time_win_size = 185
        which_pol = args[1]
        optimizer = 'Adam'
        group_triplet = True
        err_function = 'MSE'
        weight_decay = 0
        del args
        return locals()

    def config_global(*args):
        batch_size = 128
        lp = 2
        save_checkpoint = True
        comment = f'CVTNN_sim_{args[1]}'  # CVTNN/TNN 调整
        constellations = util.CONST_16QAM
        dataset_dir = os.path.join(BASE_DIR, 'data/dataset')
        fit_data_amount = 131072
        variable_names_in_mat = args[0]
        decision_directed_flag = False  # 决定是否是无监督学习，有监督学习为False
        eval_block_size = 2048
        complex_value_model = True  # CVTNN/TNN 调整
        read_mat_with_transpose = True
        del args
        return locals()

    def single_case():
        trainer_kwargs = config_trainer(train_flag)
        global_config_x = config_global(('dataset', 'prbsx', 'prbsy'), "x")
        global_config_x['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 0
        plm_kwargs_x = config_plm(model_config, which_pol)
        train_behavior_dict_x = {}
        train_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_x['global_config'] = global_config_x
        train_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
        train_lp = global_config_x['lp']

        trainer_kwargs = config_trainer(train_flag)
        global_config_y = config_global(('dataset', 'prbsx', 'prbsy'), "y")
        global_config_y['experiment_name'] = experiment_name
        model_config = config_model()
        which_pol = 1
        plm_kwargs_y = config_plm(model_config, which_pol)
        train_behavior_dict_y = {}
        train_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
        train_behavior_dict_y['global_config'] = global_config_y
        train_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
        train_lp = global_config_y['lp']

        if train_flag:
            model_x = train_model(**train_behavior_dict_x)
            model_y = train_model(**train_behavior_dict_y)
        else:
            model_x = None
            model_y = None

        lp_list = range(-2, 5)
        global_config_x['train_lp'] = train_lp
        global_config_y['train_lp'] = train_lp

        use_BO = False
        eval_le = False
        Q_list = []
        for lp in lp_list:
            global_config_x['lp'] = lp
            global_config_x['use_BO'] = use_BO
            eval_behavior_dict_x = {}
            eval_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_x['global_config'] = global_config_x
            eval_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            eval_behavior_dict_x['eval_le'] = eval_le
            result_x = eval_model(**eval_behavior_dict_x, model=model_x)

            global_config_y['lp'] = lp
            global_config_y['use_BO'] = use_BO
            eval_behavior_dict_y = {}
            eval_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_y['global_config'] = global_config_y
            eval_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            eval_behavior_dict_y['eval_le'] = eval_le
            result_y = eval_model(**eval_behavior_dict_y, model=model_y)

            Q = (result_x[0] + result_y[0]) / 2
            Q_list.append(Q)

            print(f'lp: {lp}, Q: {Q}')
        return Q_list, lp_list, train_lp

    Q_list, lp_list, train_lp = single_case()
    if train_flag:
        with open(os.path.join(BASE_DIR, f'result/{experiment_name}/Q_results_vary_lp_trained_by_{train_lp}_dBm.csv'),
                  'w') as f:
            csv_writer = csv.writer(f)
            csv_writer.writerow(['lp', 'Q'])
            for indx, lp in enumerate(lp_list):
                csv_writer.writerow([lp, Q_list[indx]])
            print(f'save results at \"{experiment_name}\"')

    for Q in Q_list:
        print(Q)


def TNN_simulation_win_size_vary_data_aided(train_flag):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    print(f'experiment name: {experiment_name}')

    def config_trainer(*args):
        accelerator = 'gpu'
        max_epochs = 15 * args[0]
        check_val_every_n_epoch = max_epochs + 1
        num_sanity_val_steps = 0
        del args
        return locals()

    def config_model():
        hidden_sizes = [2, 10]
        use_batch_normalization_flag = False
        return locals()

    def config_plm(*args):
        model_kwargs = args[0]
        lr = 1e-3
        time_win_size = args[2]
        which_pol = args[1]
        optimizer = 'Adam'
        group_triplet = False
        err_function = 'MSE'
        weight_decay = 0
        del args
        return locals()

    def config_global(*args):
        batch_size = 128
        lp = 2
        save_checkpoint = False
        comment = f'TNN_sim_{args[1]}_win_size_{args[2]}'  # CVTNN/TNN 调整
        constellations = util.CONST_16QAM
        dataset_dir = os.path.join(BASE_DIR, 'data/dataset')
        fit_data_amount = 131072
        variable_names_in_mat = args[0]
        decision_directed_flag = False  # 决定是否是无监督学习，有监督学习为False
        print_q_factor_flag = False
        eval_block_size = 2048
        complex_value_model = False  # CVTNN/TNN 调整
        read_mat_with_transpose = True
        del args
        return locals()

    def single_case():
        q_list = []
        time_win_size_list = [185, 165, 145, 125, 105, 85, 65]
        for time_win in time_win_size_list:
            trainer_kwargs = config_trainer(train_flag)
            global_config_x = config_global(('dataset', 'prbsx', 'prbsy'), "x", time_win)
            global_config_x['experiment_name'] = experiment_name
            model_config = config_model()
            which_pol = 0
            plm_kwargs_x = config_plm(model_config, which_pol, time_win)
            train_behavior_dict_x = {}
            train_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            train_behavior_dict_x['global_config'] = global_config_x
            train_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            train_lp = global_config_x['lp']

            trainer_kwargs = config_trainer(train_flag)
            global_config_y = config_global(('dataset', 'prbsx', 'prbsy'), "y", time_win)
            global_config_y['experiment_name'] = experiment_name
            model_config = config_model()
            which_pol = 1
            plm_kwargs_y = config_plm(model_config, which_pol, time_win)
            train_behavior_dict_y = {}
            train_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            train_behavior_dict_y['global_config'] = global_config_y
            train_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            train_lp = global_config_y['lp']

            if train_flag:
                model_x = train_model(**train_behavior_dict_x)
                model_y = train_model(**train_behavior_dict_y)
            else:
                model_x = None
                model_y = None

            global_config_x['train_lp'] = train_lp
            global_config_y['train_lp'] = train_lp

            use_BO = False
            eval_le = False

            global_config_x['lp'] = train_lp
            global_config_x['use_BO'] = use_BO
            eval_behavior_dict_x = {}
            eval_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_x['global_config'] = global_config_x
            eval_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            eval_behavior_dict_x['eval_le'] = eval_le
            result_x = eval_model(**eval_behavior_dict_x, model=model_x)

            global_config_y['lp'] = train_lp
            global_config_y['use_BO'] = use_BO
            eval_behavior_dict_y = {}
            eval_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_y['global_config'] = global_config_y
            eval_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            eval_behavior_dict_y['eval_le'] = eval_le
            result_y = eval_model(**eval_behavior_dict_y, model=model_y)

            Q = (result_x[0] + result_y[0]) / 2
            q_list.append(Q)

            print(f'time_win_size: {time_win}, Q: {Q}')
        return q_list, time_win_size_list, train_lp

    Q_list, time_win_size_list, train_lp = single_case()
    if train_flag:
        with open(os.path.join(BASE_DIR,
                               f'result/{experiment_name}/Q_results_vary_win_size_trained_by_{train_lp}_dBm.csv'),
                  'w') as f:
            csv_writer = csv.writer(f)
            csv_writer.writerow(['win_size', 'Q'])
            for indx, time_win in enumerate(time_win_size_list):
                csv_writer.writerow([time_win, Q_list[indx]])
            print(f'save results at \"{experiment_name}\"')

    for Q in Q_list:
        print(Q)


def TNN_simulation_input_Q_vary_data_aided_DR(train_flag):
    frame = currentframe()
    experiment_name = getframeinfo(frame).function
    experiment_data_time = '20240103'
    print(f'experiment name: {experiment_name}_{experiment_data_time}')

    def config_trainer(*args):
        accelerator = 'gpu'
        max_epochs = 10 * args[0]
        check_val_every_n_epoch = max_epochs + 1
        num_sanity_val_steps = 0
        del args
        return locals()

    def config_model():
        hidden_sizes = [2, 10]
        use_batch_normalization_flag = False
        return locals()

    def config_plm(*args):
        model_kwargs = args[0]
        lr = 1e-3
        time_win_size = 185
        which_pol = args[1]
        optimizer = 'Adam'
        group_triplet = True
        err_function = 'MSE'
        weight_decay = 0
        del args
        return locals()

    def config_global(*args):
        batch_size = 128
        lp = 2
        save_checkpoint = True
        comment = f'TNN_sim_{args[1]}'  # CVTNN/TNN 调整
        constellations = util.CONST_16QAM
        dataset_dir = os.path.join(BASE_DIR, 'data/dataset')
        fit_data_amount = 131072
        variable_names_in_mat = args[0]
        decision_directed_flag = False  # 决定是否是无监督学习，有监督学习为False
        eval_block_size = 2048
        complex_value_model = False  # CVTNN/TNN 调整
        read_mat_with_transpose = True
        del args
        return locals()

    def single_case():
        input_SNR_list = [5, 7, 9, 11, 13, 15, 17, 19]
        Q_list = []
        le_list = []
        for input_SNR in input_SNR_list:
            trainer_kwargs = config_trainer(train_flag)
            global_config_x = config_global(('dataset', 'prbsx', 'prbsy'), f"x_input_snr_{input_SNR}")
            global_config_x['experiment_name'] = experiment_name
            dataset_dir = global_config_x['dataset_dir']
            global_config_x['training_set_path'] = os.path.join(dataset_dir, 'SNRVary',
                                                                             f'trset_add_AWGN_by_SNR_{input_SNR}.mat')
            global_config_x['validation_set_path'] = global_config_x['training_set_path']
            model_config = config_model()
            which_pol = 0
            plm_kwargs_x = config_plm(model_config, which_pol)
            train_behavior_dict_x = {}
            train_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            train_behavior_dict_x['global_config'] = global_config_x
            train_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            train_lp = global_config_x['lp']

            trainer_kwargs = config_trainer(train_flag)
            global_config_y = config_global(('dataset', 'prbsx', 'prbsy'), f"y_input_snr_{input_SNR}")
            global_config_y['experiment_name'] = experiment_name
            dataset_dir = global_config_y['dataset_dir']
            global_config_y['training_set_path'] = os.path.join(dataset_dir, 'SNRVary',
                                                                             f'trset_add_AWGN_by_SNR_{input_SNR}.mat')
            global_config_y['validation_set_path'] = global_config_y['training_set_path']
            model_config = config_model()
            which_pol = 1
            plm_kwargs_y = config_plm(model_config, which_pol)
            train_behavior_dict_y = {}
            train_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            train_behavior_dict_y['global_config'] = global_config_y
            train_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            train_lp = global_config_y['lp']

            if train_flag:
                model_x = train_model(**train_behavior_dict_x)
                model_y = train_model(**train_behavior_dict_y)
            else:
                model_x = None
                model_y = None

            global_config_x['train_lp'] = train_lp
            global_config_y['train_lp'] = train_lp

            use_BO = False
            global_config_x['lp'] = train_lp
            global_config_x['use_BO'] = use_BO
            global_config_x['eval_le'] = True
            global_config_x['test_set_path'] = global_config_x['training_set_path']
            eval_behavior_dict_x = {}
            eval_behavior_dict_x['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_x['global_config'] = global_config_x
            eval_behavior_dict_x['plm_kwargs'] = plm_kwargs_x
            result_x = eval_model(**eval_behavior_dict_x, model=model_x)

            global_config_y['lp'] = train_lp
            global_config_y['use_BO'] = use_BO
            global_config_y['eval_le'] = True
            global_config_y['test_set_path'] = global_config_y['training_set_path']
            eval_behavior_dict_y = {}
            eval_behavior_dict_y['trainer_kwargs'] = trainer_kwargs
            eval_behavior_dict_y['global_config'] = global_config_y
            eval_behavior_dict_y['plm_kwargs'] = plm_kwargs_y
            result_y = eval_model(**eval_behavior_dict_y, model=model_y)

            Q = (result_x[0] + result_y[0]) / 2
            Q_list.append(Q)
            le = (result_x[-1] + result_y[-1]) / 2
            le_list.append(le)

            print(f'Input Q : {input_SNR}, Q: {Q}')
        return Q_list, le_list, input_SNR_list

    Q_list, le_list, input_SNR_list = single_case()
    save_result_in_CSV = True
    if save_result_in_CSV:
        with open(os.path.join(BASE_DIR, f'result/{experiment_name}/Q_improvement_vs_input_Q_at_2_dBm.csv'),
                  'w') as f:
            csv_writer = csv.writer(f)
            csv_writer.writerow(['SNR', 'Q factor improvement'])
            for indx, SNR in enumerate(input_SNR_list):
                csv_writer.writerow([input_SNR_list[indx], Q_list[indx]-le_list[indx]])
            print(f'save results at \"{experiment_name}\"')

    for Q in Q_list:
        print(Q)


if __name__ == '__main__':
    train_flag = True

    '''无监督学习，遍历发射功率，CVTNN/TNN-DR'''
    # TNN_simulation_lp_vary_DR_UL(train_flag)
    # CVTNN_simulation_lp_vary_DR_UL(train_flag)

    '''有监督学习，遍历发射功率，CVTNN/TNN'''
    # TNN_simulation_lp_vary_data_aided_DR(train_flag)
    # TNN_simulation_lp_vary_data_aided(train_flag)
    # CVTNN_simulation_lp_vary_data_aided(train_flag)
    # CVTNN_simulation_lp_vary_data_aided_DR(train_flag)

    '''遍历互相关符号数'''
    # TNN_simulation_win_size_vary_data_aided(train_flag)

    '''用不同信噪比的高斯噪声污染数据，测试算法对噪声的鲁棒性'''
    TNN_simulation_input_Q_vary_data_aided_DR(train_flag)
