import math
import os
import numpy as np
import torch
import yaml
import scipy.io as scio
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
import pytorch_lightning as pl
import models
from models import CVTNN_PLM, TNN_PLM
import util
from pytorch_lightning.callbacks import Callback, EarlyStopping

BASE_DIR = os.path.dirname(__file__)


def read_arrays_from_file(file_path, keys, transpose=True):
    data = scio.loadmat(os.path.join(file_path, ))
    ret = {}

    for key in keys:
        ret[key] = data[key]
        if transpose:
            ret[key] = data[key].T

    return ret


def gen_dataloader(**kwargs):
    sig = kwargs['sig']
    prbs = kwargs.get('prbs', None)
    batch_size = kwargs.get('batch_size')
    win_size = kwargs.get('win_size')
    constellations = kwargs.get('constellations', util.CONST_16QAM)
    shuffle = kwargs.get('shuffle', False)
    mod_order = kwargs.get('mod_order', 4)
    bit_order = kwargs.get('bit_order', 'msb')
    grouping = kwargs.get('grouping_triplet_flag', True)
    form_triplet_from_dec = kwargs.get('form_triplet_from_dec', False)

    if prbs is not None:
        label = util.map(prbs,
                         constellations=constellations,
                         mod_order=mod_order,
                         order=bit_order)
    else:
        label = None

    dataset = models.NPDataset(array=sig,
                               label=label,
                               win_size=win_size,
                               grouping=grouping,
                               constellations=constellations,
                               mod_order=mod_order,
                               form_triplet_from_dec=form_triplet_from_dec)
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size,
                                              shuffle=shuffle)
    return data_loader


class DecisionDirectedDataloaderUpdatedCallback(Callback):
    def __init__(self,
                 grouping,
                 constellations=util.CONST_16QAM,
                 shuffle=True,
                 eval_block_size=2048,
                 step=2048 - 185,
                 prbs=None,
                 eval_use_data_loader=False,
                 init_label_with_model=False):
        super(DecisionDirectedDataloaderUpdatedCallback, self).__init__()
        self.grouping = grouping
        self.eval_block_size = eval_block_size
        self.step = step
        self.shuffle = shuffle
        self.prbs = prbs
        self.eval_use_data_loader = eval_use_data_loader
        self.init_label_with_model = init_label_with_model
        self.constellations = constellations
        self.mod_order = int(math.log2(len(constellations)))

    def update_label(self, trainer, pl_module, *args):
        win_size = self.eval_block_size - self.step
        sigeq = eval_sig(
            trainer.train_dataloader.dataset,
            pl_module,
            lp=2,
            lp0=2,
            eval_block_size=self.eval_block_size,
            step=self.step,
            eval_use_data_loader=self.eval_use_data_loader
        )
        newLabel = util.decide(sigeq,
                               constellations=self.constellations,
                               mod_order=self.mod_order).squeeze()
        inputs = trainer.train_dataloader.dataset.array
        batch_size = trainer.train_dataloader.batch_size
        if pl_module.which_pol == 0:
            labelx = newLabel
            if self.prbs is not None:
                _, ber_before, _ = util.pr_ber(
                    trainer.train_dataloader.dataset.label[np.newaxis, 0, ...],
                    self.prbs[np.newaxis, pl_module.which_pol, :],
                    constellations=self.constellations,
                    mod_order=self.mod_order
                )
                print(f'Q factor of label before update: {util.ber2q(ber_before)}')

            if trainer.train_dataloader.dataset.label.shape[-2] == 2:
                labely = trainer.train_dataloader.dataset.label[1, ...]
        elif pl_module.which_pol == 1:
            labely = newLabel
            if self.prbs is not None:
                _, ber_before, _ = util.pr_ber(
                    trainer.train_dataloader.dataset.label[np.newaxis, 1, ...],
                    self.prbs[np.newaxis, pl_module.which_pol, :],
                    constellations=self.constellations,
                    mod_order=self.mod_order
                )
                print(f'Q factor of label before update: {util.ber2q(ber_before)}')
            labelx = trainer.train_dataloader.dataset.label[0, ...]
        else:
            raise ValueError('Invalid which_pol')
        make_decision = False

        if trainer.train_dataloader.dataset.label.shape[-2] == 2:
            label = np.stack([labelx.squeeze(), labely.squeeze()], axis=0)
        else:
            label = labelx

        del trainer.train_dataloader.dataset.label

        # new_dataset = models.NPDataset(
        #     array=inputs,
        #     label=label.astype(np.complex128),
        #     grouping=self.grouping,
        #     win_size=win_size,
        #     make_decision=make_decision,
        # )

        trainer.train_dataloader.dataset.label = label

        # trainer.train_dataloader = torch.utils.data.DataLoader(
        #     new_dataset,
        #     batch_size,
        #     shuffle=self.shuffle,
        # )
        print('Label updated')
        if self.prbs is not None:
            if pl_module.which_pol == 0:
                dummy, ber_after, _ = util.pr_ber(
                    trainer.train_dataloader.dataset.label[np.newaxis, 0, ...],
                    self.prbs[np.newaxis, pl_module.which_pol, :],
                    constellations=self.constellations,
                    mod_order=self.mod_order
                )
            else:
                dummy, ber_after, _ = util.pr_ber(
                    trainer.train_dataloader.dataset.label[np.newaxis, 1, ...],
                    self.prbs[np.newaxis, pl_module.which_pol, :],
                    constellations=self.constellations,
                    mod_order=self.mod_order
                )
            print(f'Q factor of label after update : {util.ber2q(ber_after)}')

    def on_train_epoch_end(self, trainer, pl_module):
        self.update_label(trainer, pl_module)

    def on_train_start(self, trainer, pl_module):
        if self.init_label_with_model:
            print('Initialize labels via models')
            self.update_label(trainer, pl_module)


class logQFactorCallback(Callback):
    def __init__(self, grouping, constellations, prbs, eval_block_size=2048, step=2048 - 185,
                 eval_use_data_loader=False):
        super(logQFactorCallback).__init__()
        self.grouping = grouping
        self.eval_block_size = eval_block_size
        self.step = step
        self.prbs = prbs
        self.constellations = constellations
        self.eval_use_data_loader = eval_use_data_loader

    def on_train_epoch_end(self, trainer, pl_module):
        '''sig shape: [2, sample number]'''
        sigeq = eval_sig(trainer.train_dataloader.dataset,
                         pl_module,
                         lp=2,
                         lp0=2,
                         eval_block_size=self.eval_block_size,
                         step=self.step,
                         eval_use_data_loader=self.eval_use_data_loader).reshape(1, -1)
        ber = util.pr_ber(sigeq,
                          self.prbs[np.newaxis, pl_module.which_pol, :],
                          constellations=self.constellations,
                          mod_order=4)[1]
        print(f'After equalization, the Q factor is {util.ber2q(ber)}')




def eval_sig(dataset,
             model,
             lp=2,
             lp0=2,
             eval_block_size=2048,
             step=2048 - 185,
             eval_use_data_loader=False,
             block_num=None):
    model = model.eval()
    if isinstance(model, models.TNN_PLM):
        which_plm = 'TNN'
    elif isinstance(model, models.CVTNN_PLM):
        which_plm = 'CVTNN'
    chosen_device = next(model.parameters()).device

    if not eval_use_data_loader:
        bs = util.BlockSelector(sample_num=len(dataset),
                                block_size=eval_block_size,
                                step=step)
        if block_num is None:
            actual_block_num = len(bs)
        else:
            actual_block_num = block_num

        output_cache = torch.zeros([actual_block_num * eval_block_size, 2],
                                   device=chosen_device)

        with torch.no_grad():
            for block_indx in range(actual_block_num):
                inputTensor = dataset[bs[block_indx]][0]
                inputTensor = torch.from_numpy(inputTensor).to(chosen_device)
                if which_plm == 'CVTNN':
                    output_cache[block_indx * bs.step: (block_indx + 1) *
                                                       bs.step, :] = model(
                        inputTensor[:, model.which_pol, ...]
                    )[bs.pre_overhead:bs.pre_overhead + bs.step, ...].squeeze()
                elif which_plm == 'TNN':
                    inputTensor = model.formInputTensor(inputTensor)
                    output_cache[block_indx * bs.step: (block_indx + 1) *
                                                       bs.step, :] = model(
                        inputTensor[:, model.which_pol, ...]
                    )[bs.pre_overhead:bs.pre_overhead + bs.step, ...].squeeze()

        output = (torch.view_as_complex(output_cache[0:bs.sample_num, ...]).data.cpu().numpy())

    else:
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 eval_block_size,
                                                 shuffle=False)
        if block_num is None:
            actual_block_num = len(dataloader)
        else:
            actual_block_num = block_num

        output_cache = torch.zeros([
            actual_block_num * eval_block_size, 2
        ], device=chosen_device)
        pointer = 0
        dataloader_iter = iter(dataloader)
        for batch_idx in range(actual_block_num):
            batch = next(dataloader_iter)
            inputTensor = batch[0][:, model.which_pol, ...].to(chosen_device)

            if which_plm == 'TNN':
                inputTensor = model.formInputTensor(inputTensor)

            out = model(inputTensor).squeeze()
            out_eval_block_size = out.shape[0]

            output_cache[pointer:pointer + out_eval_block_size, :] = out
            pointer = pointer + out_eval_block_size
        output = torch.view_as_complex(output_cache).data.cpu().numpy()

    del output_cache

    P = util.dBm2w(lp)
    P0 = util.dBm2w(lp0)
    xi = P / P0
    eqsig = dataset.array[model.which_pol, 0:len(output)] + xi * output
    model = model.train()
    if eqsig.ndim == 1:
        eqsig = eqsig.reshape(1, -1)
    return eqsig


def train_model(**behavior_dict):
    '''argument parse'''
    global_config = behavior_dict.get('global_config')
    plm_kwargs = behavior_dict.get('plm_kwargs')
    trainer_kwargs = behavior_dict.get('trainer_kwargs')

    experiment_name = global_config['experiment_name']
    batch_size = global_config.get('batch_size', 16)
    lp = global_config.get('lp')
    save_checkpoint = global_config.get('save_checkpoint', True)
    comment = global_config.get('comment', '')
    constellations = global_config.get('constellations', util.CONST_16QAM)
    mod_order = int(math.log2(len(constellations)))
    variable_names_in_mat = global_config['variable_names_in_mat']
    dataset_dir = global_config['dataset_dir']
    training_set_path = global_config.get('training_set_path', None)
    validation_set_path = global_config.get('validation_set_path', None)
    fit_data_amount = global_config.get('fit_data_amount', 32768)
    tr_val_ratio = global_config.get('tr_val_ratio', 9)
    bit_order = global_config.get('bit_order', 'msb')
    complex_value_model = global_config.get('complex_value_model', True)
    decision_directed_flag = global_config.get('decision_directed_flag', True)
    print_q_factor_flag = global_config.get('print_q_factor_flag', False)
    eval_block_size = global_config.get('eval_block_size', 2048)
    read_mat_with_transpose = global_config.get('read_mat_with_transpose', True)

    group_triplet = plm_kwargs.get('group_triplet', True)
    win_size = plm_kwargs.get('time_win_size', 185)

    model = global_config.get('model', None)
    '''argument parse end'''
    if model is not None:
        init_label_with_model = True
    else:
        init_label_with_model = False

    split_amount = fit_data_amount
    train_set_length = round(split_amount * tr_val_ratio / (tr_val_ratio + 1))
    validation_set_length = round(split_amount * 1 / (tr_val_ratio + 1))

    result_dir = os.path.join(BASE_DIR, f'result/{experiment_name}')

    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    behavior_dict['global_config']['model'] = None
    with open(os.path.join(result_dir, 'training_behavior.yaml'), 'w') as f:
        yaml.dump(behavior_dict, f)

    if training_set_path is None:
        training_set_path = os.path.join(dataset_dir, f'trSet_dBm_{lp}_nn_sf_1.mat')
    tr_read_result = read_arrays_from_file(training_set_path, variable_names_in_mat, read_mat_with_transpose)
    tr_sig = tr_read_result[variable_names_in_mat[0]]
    if len(variable_names_in_mat) == 3:
        tr_prbs = np.stack([
            tr_read_result[variable_names_in_mat[1]].squeeze(),
            tr_read_result[variable_names_in_mat[2]].squeeze()
        ], axis=0)
    elif len(variable_names_in_mat) == 2:
        tr_prbs = tr_read_result[variable_names_in_mat[1]]
    else:
        raise Exception('Invalid variable_names_in_mat.')

    if train_set_length != 0:
        tr_sig = tr_sig[..., 0:train_set_length]
        tr_prbs = tr_prbs[..., 0:train_set_length * mod_order]

    tr_dataloader = gen_dataloader(
        sig=tr_sig,
        prbs=tr_prbs if not decision_directed_flag else None,
        batch_size=batch_size,
        win_size=win_size,
        constellations=constellations,
        mod_order=mod_order,
        bit_order=bit_order,
        grouping_triplet_flag=group_triplet,
    )

    if validation_set_path is None:
        validation_set_path = os.path.join(dataset_dir,
                                           f'trSet_dBm_{lp}_nn_sf_1.mat')
    val_read_result = read_arrays_from_file(validation_set_path, variable_names_in_mat, read_mat_with_transpose)
    val_sig = val_read_result[variable_names_in_mat[0]]
    if len(variable_names_in_mat) == 3:
        val_prbs = np.stack([
            val_read_result[variable_names_in_mat[1]].squeeze(),
            val_read_result[variable_names_in_mat[2]].squeeze()
        ], axis=0)
    elif len(variable_names_in_mat) == 2:
        val_prbs = tr_read_result[variable_names_in_mat[1]]
    else:
        raise Exception('Invalid variable_names_in_mat.')

    if validation_set_path != 0:
        val_sig = val_sig[..., -validation_set_length:]
        val_prbs = val_prbs[..., -validation_set_length * mod_order:]

    val_dataloader = gen_dataloader(
        sig=val_sig,
        prbs=val_prbs,
        batch_size=batch_size,
        win_size=win_size,
        constellations=constellations,
        mod_order=mod_order,
        bit_order=bit_order,
        grouping_triplet_flag=group_triplet,
    )

    if model is None:
        if complex_value_model:
            model = CVTNN_PLM(**plm_kwargs)
        else:
            model = TNN_PLM(**plm_kwargs)

    call_back_list = []
    if decision_directed_flag:
        label_update_callback = DecisionDirectedDataloaderUpdatedCallback(
            grouping=group_triplet,
            constellations=constellations,
            eval_block_size=eval_block_size,
            step=eval_block_size - win_size,
            prbs=tr_prbs,
            eval_use_data_loader=False,
            init_label_with_model=init_label_with_model
        )
        call_back_list.append(label_update_callback)
    if print_q_factor_flag:
        log_q_factor_callback = logQFactorCallback(
            grouping=group_triplet,
            constellations=constellations,
            eval_block_size=eval_block_size,
            step=eval_block_size - win_size,
            prbs=tr_prbs,
            eval_use_data_loader=False
        )
        call_back_list.append(log_q_factor_callback)

    logger = TensorBoardLogger(save_dir=result_dir, name='log', version=comment)
    trainer = pl.Trainer(callbacks=call_back_list,
                         logger=logger,
                         **trainer_kwargs)
    trainer.fit(model,
                train_dataloaders=tr_dataloader,
                val_dataloaders=val_dataloader)

    if save_checkpoint:
        trainer.save_checkpoint(filepath=os.path.join(
            result_dir, f'{comment}_trained_by_{lp}_dBm.ckpt'
        ))
    return model


def load_model(result_dir, comment, train_lp, complex_valued, **plm_kwargs):
    if complex_valued:
        model = CVTNN_PLM.load_from_checkpoint(os.path.join(
            result_dir, f'{comment}_trained_by_{train_lp}_dBm.ckpt'
        ), **plm_kwargs)
    else:
        model = TNN_PLM.load_from_checkpoint(os.path.join(
            result_dir, f'{comment}_trained_by_{train_lp}_dBm.ckpt'
        ), **plm_kwargs)
    return model


def eval_model(**behavior_dict):
    '''argument parse'''
    global_config = behavior_dict.get('global_config')
    plm_kwargs = behavior_dict.get('plm_kwargs')

    experiment_name = global_config['experiment_name']
    batch_size = global_config.get('batch_size', 16)
    lp = global_config.get('lp')
    comment = global_config.get('comment', '')
    constellations = global_config.get('constellations', util.CONST_16QAM)
    mod_order = int(math.log2(len(constellations)))
    variable_names_in_mat = global_config['variable_names_in_mat']
    dataset_dir = global_config['dataset_dir']
    test_set_path = global_config.get('test_set_path', None)
    bit_order = global_config.get('bit_order', 'msb')
    complex_value_model = global_config.get('complex_value_model', True)
    eval_use_gpu = global_config.get('eval_use_gpu', True)
    eval_le = global_config.get('eval_le', False)
    train_lp = global_config['train_lp']
    eval_block_size = global_config['eval_block_size']
    read_mat_with_transpose = global_config.get('read_mat_with_transpose', True)
    use_BO = global_config.get('use_BO', False)
    on_eval_end_callback = global_config.get('on_eval_end_callback', None)
    form_triplet_from_dec = global_config.get('form_triplet_from_dec', True)

    group_triplet = plm_kwargs.get('group_triplet', True)
    win_size = plm_kwargs.get('time_win_size', 185)

    model = behavior_dict.get('model', None)
    '''argument parse end'''

    result_dir = os.path.join(BASE_DIR, f'result/{experiment_name}')

    if test_set_path is None:
        test_set_path = os.path.join(dataset_dir, f'tstSet_dBm_{lp}_nn_sf_1.mat')
    read_result = read_arrays_from_file(test_set_path, variable_names_in_mat, read_mat_with_transpose)
    tst_sig = read_result[variable_names_in_mat[0]]
    if len(variable_names_in_mat) == 3:
        tst_prbs = np.stack([
            read_result[variable_names_in_mat[1]].squeeze(),
            read_result[variable_names_in_mat[2]].squeeze()
        ], axis=0)
    elif len(variable_names_in_mat) == 2:
        tst_prbs = read_result[variable_names_in_mat[1]]
    else:
        raise Exception('Invalid variable_names_in_mat.')

    tst_dataloader = gen_dataloader(
        sig=tst_sig,
        prbs=tst_prbs,
        batch_size=batch_size,
        win_size=win_size,
        constellations=constellations,
        mod_order=mod_order,
        bit_order=bit_order,
        grouping_triplet_flag=group_triplet,
        form_triplet_from_dec=form_triplet_from_dec
    )

    if model is None:
        model = load_model(result_dir, comment, train_lp, complex_valued=complex_value_model, **plm_kwargs)

    if eval_use_gpu:
        model = model.eval().cuda()
    else:
        model = model.eval()

    tstSet = tst_dataloader.dataset
    if not use_BO:
        eqsig = eval_sig(tstSet,
                         model,
                         lp=lp,
                         lp0=train_lp,
                         eval_block_size=eval_block_size,
                         step=eval_block_size - win_size,
                         eval_use_data_loader=False)
        eqsig = eqsig.squeeze()
    else:
        from bayes_opt import BayesianOptimization, UtilityFunction

        def cal_snr(lp):
            eqsig = eval_sig(tstSet,
                             model,
                             lp=lp,
                             lp0=train_lp,
                             eval_block_size=eval_block_size,
                             step=eval_block_size - win_size,
                             eval_use_data_loader=False)
            eqsig = eqsig.squeeze()
            snr = util.effective_snr(eqsig,
                                     prbs=tst_prbs[np.newaxis, model.which_pol, ...],
                                     constellations=constellations,
                                     mod_order=mod_order)
            ber = util.pr_ber(eqsig,
                              prbs=tst_prbs[np.newaxis, model.which_pol, ...],
                              constellations=constellations,
                              mod_order=mod_order)[1]
            Q = util.ber2q(ber.squeeze())
            if not np.isfinite(Q).any():
                Q = -50
            return Q

        optimizer = BayesianOptimization(
            f=cal_snr,
            pbounds={'lp': (-5.9, 8.1)},
            verbose=0
        )
        utility = UtilityFunction(kind='ei', xi=0.01)
        optimizer.maximize(init_points=5, n_iter=10, acquisition_function=utility)
        lp_opt = optimizer.max['params']['lp']
        eqsig = eval_sig(tstSet,
                         model,
                         lp=lp_opt,
                         lp0=train_lp,
                         eval_block_size=eval_block_size - win_size,
                         eval_use_data_loader=False)
        print(f'opt lp: {lp_opt}')

    tst_prbs = tst_prbs.squeeze()
    if eqsig.ndim == 1:
        eqsig = eqsig.reshape(1, -1)
    if tst_prbs.ndim == 1:
        tst_prbs = tst_prbs.reshape(1, -1)
    eqsig, ber, _ = util.pr_ber(eqsig,
                                tst_prbs[np.newaxis, model.which_pol, ...],
                                constellations=constellations,
                                mod_order=mod_order)

    if on_eval_end_callback is not None:
        on_eval_end_callback(eqsig, **behavior_dict)  # 可以接一个星座图绘制

    ber = np.mean(ber)

    if eval_le:
        _, ber_le, _ = util.pr_ber(tstSet.array,
                                   tst_prbs,
                                   constellations=constellations,
                                   mod_order=mod_order)
        ber_le_result = np.mean(ber_le)
        Q_le = util.ber2q(ber_le_result)
    else:
        Q_le = None

    Q = util.ber2q(ber)
    SNR = util.effective_snr(eqsig,
                             tst_prbs[np.newaxis, model.which_pol, ...],
                             constellations=constellations,
                             mod_order=mod_order)
    return Q, SNR, Q_le


if __name__ == "__main__":
    pass
