import warnings
from typing import Any
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.types import OptimizerLRScheduler, STEP_OUTPUT
from CR_DSPPytorch import PhaseRecLayer, cmul, FIRLayer, EDCLayer
import os
import scipy.io as scio

os.environ["OMP_NUM_THREADS"] = "8"
import util
from sklearn.cluster import KMeans
from pytorch_lightning.callbacks import Callback
from scipy.special import erf
from ERPNLC_TDC import simulation_ERP_NLC_module
from TDCE import modified_filter_taps_by_cluster_complex

BASE_DIR = os.path.dirname(__file__)

'''
这个代码就是利用梯度下降更新滤波器系数，然后与传统的初始化方法相比，在浮点精度表示下，所需的抽头数更少！！！
'''


def eval_ber(model, dataset, prbs, constellations=util.CONST_16QAM, use_cuda=True):
    if not isinstance(dataset, util.NPDataset):
        raise ValueError('dataset is supposed to be an instance of NPDataset.')
    model = model.eval()
    # print(f'Updated centroids: {model.model.centroids.data}')
    lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)
    pr = PhaseRecLayer(1024)

    if use_cuda and torch.cuda.is_available():
        model = model.cuda()

    chosen_device = model.device
    block_size = len(dataset) * 2
    indxes = np.arange(block_size)
    sig = dataset[indxes]
    sig = torch.from_numpy(sig).to(chosen_device)
    sig = model(sig)

    del model

    '''CMA，原本应有均衡PMD的，但由于已经移除，所以这里不做考虑'''
    lms = lms.to(chosen_device)
    pr = pr.to(chosen_device)
    sig = pr(sig[..., 1::2, :])

    lms.fit(sig,
            err_mode='DDM',
            constellations=constellations,
            iter_num=10,
            block_size=4028,
            remain=2048,
            lr=1e-4)
    with torch.no_grad():
        sig = lms(sig)
        sig = torch.view_as_complex(sig)
        sig = sig.cpu().data.numpy().squeeze()

        sig, ber, _ = util.pr_ber(sig, prbs, constellations)
    return sig, ber


def eval_ber_with_NLC(model, dataset, prbs,
                      lp, symbolRate, spanLen, spanNum,
                      constellations=util.CONST_16QAM, use_cuda=True):
    if not isinstance(dataset, util.NPDataset):
        raise ValueError('dataset is supposed to be an instance of NPDataset.')
    model = model.eval()
    # print(f'Updated centroids: {model.model.centroids.data}')
    lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)
    pr = PhaseRecLayer(1024)

    if use_cuda and torch.cuda.is_available():
        model = model.cuda()

    chosen_device = model.device
    block_size = len(dataset) * 2
    indxes = np.arange(block_size)
    sig = dataset[indxes]
    sig = torch.from_numpy(sig).to(chosen_device)
    sig = model(sig)

    del model

    '''CMA，原本应有均衡PMD的，但由于已经移除，所以这里不做考虑'''
    lms = lms.to(chosen_device)
    pr = pr.to(chosen_device)
    sig = pr(sig[..., 1::2, :])

    lms.fit(sig,
            err_mode='DDM',
            constellations=constellations,
            iter_num=10,
            block_size=4028,
            remain=2048,
            lr=1e-4)
    with torch.no_grad():
        sig = lms(sig)
        sig = torch.view_as_complex(sig)
        sig = sig.cpu().data.numpy().squeeze()
        ber, Q = simulation_ERP_NLC_module(sig, prbs, lp, symbolRate, spanLen, spanNum)
        # sig, ber, _ = util.pr_ber(sig, prbs, constellations)
    return sig, Q


def eval_sig(model, dataset, prbs, constellations=util.CONST_16QAM, use_cuda=True):
    if not isinstance(dataset, util.NPDataset):
        raise ValueError('dataset is supposed to be an instance of NPDataset.')
    model = model.eval()
    lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)
    pr = PhaseRecLayer(1024)

    if use_cuda and torch.cuda.is_available():
        model = model.cuda()

    chosen_device = model.device
    block_size = len(dataset) * 2
    indxes = np.arange(block_size)
    sig = dataset[indxes]
    sig = torch.from_numpy(sig).to(chosen_device)
    sig = model(sig)

    '''CMA，原本应有均衡PMD的，但由于已经移除，所以这里不做考虑'''
    lms = lms.to(chosen_device)
    pr = pr.to(chosen_device)
    sig = pr(sig[..., 1::2, :])

    lms.fit(sig,
            err_mode='DDM',
            constellations=constellations,
            iter_num=10,
            block_size=4028,
            remain=2048,
            lr=1e-4)
    with torch.no_grad():
        sig = lms(sig)
        sig = torch.view_as_complex(sig)
        sig = sig.cpu().data.numpy().squeeze()

        sig, ber, _ = util.pr_ber(sig, prbs, constellations)
    model = model.train()
    return sig, ber


class LogQFactorCallback(Callback):
    def __init__(self,
                 dataset,
                 prbs):
        super(LogQFactorCallback, self).__init__()
        self.dataset = dataset
        self.prbs = prbs

    def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
        sig, ber = eval_sig(pl_module,
                            self.dataset,
                            self.prbs,
                            use_cuda=False)
        Q = util.ber2q(np.mean(ber))
        print(f'Q factor: {Q}')


class GDAssistedLinearEqualizer(nn.Module):
    def __init__(self,
                 symbol_rate=32e9,
                 DL=17e-6 * 100e3,
                 c=3e8,
                 wave_length=1552.52e-9,
                 sample_factor=2,
                 case_num=2,
                 rolloff=0.1,
                 tap=2048,
                 ):
        super(GDAssistedLinearEqualizer, self).__init__()
        self.sample_factor = sample_factor
        self.case_num = case_num
        Ts = 1 / symbol_rate / sample_factor
        '''TDS'''
        n = np.arange(-(tap // 2), (tap // 2) + tap % 2)
        h_temp = np.sqrt(1j * c * Ts ** 2 / (-DL * wave_length ** 2)) * np.exp(
            -1j * np.pi * c * Ts ** 2 * (n ** 2) / (-DL * wave_length ** 2))
        '''Initialize Linear Kernel'''
        h = np.zeros((case_num, tap), dtype=np.complex128)
        for indx in range(case_num):
            h[indx, ...] = h_temp
        # 转化为二维实数数组
        h = np.stack([np.real(h), np.imag(h)], axis=-1).reshape(case_num, tap, 2)
        h = torch.from_numpy(h)
        new_h = torch.zeros_like(h, requires_grad=True, dtype=torch.float64)
        self.h = nn.Parameter(new_h, requires_grad=True)
        self.pr = PhaseRecLayer(1024)
        self.tap = tap

    def forward(self, x):
        batch_size, pol, seq_len, ri = x.shape
        self.h = self.h.to(x.device)
        x_r = x[..., 0].float()
        x_i = x[..., 1].float()
        h_r = self.h[..., 0].unsqueeze(1).float()
        h_i = self.h[..., 1].unsqueeze(1).float()

        y_r_r = F.conv1d(x_r, h_r, groups=pol, padding=self.tap // 2)
        y_r_i = F.conv1d(x_r, h_i, groups=pol, padding=self.tap // 2)
        y_i_r = F.conv1d(x_i, h_r, groups=pol, padding=self.tap // 2)
        y_i_i = F.conv1d(x_i, h_i, groups=pol, padding=self.tap // 2)

        y_r = y_r_r - y_i_i
        y_i = y_r_i + y_i_r

        y = torch.stack([y_r, y_i], dim=-1)  # shape[batch_size, pol, seq_len, 2]
        if self.training:
            y = self.pr(y[..., 1::2, :])
        # 检查梯度是否存在，存在即能正确更新数据
        # for name, param in self.named_parameters():
        #     if param.grad is None:
        #         print(name, param.grad_fn)
        return y


class GDAssistedLinearEqualizerPLM(pl.LightningModule):
    def __init__(self, **plm_kwargs):
        super(GDAssistedLinearEqualizerPLM, self).__init__()
        '''argument parsing'''
        model_kwargs = plm_kwargs.get('model_kwargs')
        lr = plm_kwargs.get('lr', 1e-3)
        optimizer = plm_kwargs.get('optimizer', 'Adam')
        '''argument parsing end'''

        actual_model_kwargs = model_kwargs.copy()
        self.model = GDAssistedLinearEqualizer(**actual_model_kwargs)
        self.lr = lr
        self.optimizer = optimizer

    def forward(self, x):
        return self.model(x)

    def configure_optimizers(self) -> OptimizerLRScheduler:
        if self.optimizer == 'Adam':
            optimizer = torch.optim.Adam(self.parameters(),
                                         lr=self.lr)
            return {'optimizer': optimizer,
                    'lr_scheduler': {
                        'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                                                mode='min',
                                                                                factor=0.5,
                                                                                patience=4,
                                                                                min_lr=1e-5,
                                                                                verbose=True),
                        'monitor': 'train_loss'}
                    }
        elif self.optimizer == 'SGD':
            return torch.optim.SGD(self.parameters(),
                                   lr=self.lr)
        elif self.optimizer == 'ASGD':
            return torch.optim.ASGD(self.parameters(),
                                    lr=self.lr)
        else:
            raise ValueError('Invalid optimizer')

    def err_function(self, x, y):
        batchsize = x.shape[0]
        if x.ndim == 4:
            x = x[..., round(x.shape[-2] // 2), :]
        rotate_fac = np.exp(1j * np.array([0, np.pi / 2, np.pi, np.pi / 2 * 3]))
        rotate_fac = rotate_fac.reshape(*[1] * (x.ndim - 1), -1)
        rotate_fac = torch.from_numpy(rotate_fac).to(x.device)
        rotate_fac.requires_grad_(False)
        rotate_fac = torch.view_as_real(rotate_fac)

        x = x[..., np.newaxis, :]
        y = y[..., np.newaxis, :]

        x = cmul(x, rotate_fac)
        res = y - x
        err = torch.sum(res ** 2, dim=-1)
        err = torch.min(err, dim=-1)[0]
        err = torch.mean(err, dim=[0, 1])
        return err

    def training_step(self, batch, batch_idx):
        x, y = batch
        x.requires_grad_(True)
        output = self(x)
        err = self.err_function(output, y)
        self.log('train_loss', err, on_step=True, on_epoch=True, prog_bar=True)
        return {'loss': err}

    def validation_step(self, batch, batch_idx):
        x, y = batch
        output = self(x)
        with torch.no_grad():
            output = self.model.pr(output[..., 1::2, :])
        err = self.err_function(output, y)
        return {'val_loss': err}

    def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure):
        optimizer.step(closure=optimizer_closure)


def job_TDC_GD(lp=2, tap_list=[897], eval_flag=False):
    resultDir = os.path.join(BASE_DIR, 'result/TDCGradDescent')
    os.makedirs(resultDir, exist_ok=True)
    resultPath = os.path.join(resultDir, 'Vary_tap')
    os.makedirs(resultPath, exist_ok=True)
    checkpointPath = os.path.join(resultPath, 'chkpts')
    os.makedirs(checkpointPath, exist_ok=True)
    QLEcache = np.zeros([len(tap_list), 1])
    Qcache = np.zeros_like(QLEcache)
    save_result = False
    '''Behavioral Parameters'''
    max_epochs = 12
    batch_size = 64
    splitAmount = 65536
    tr_val_ratio = 7
    trsetLen = round(splitAmount * tr_val_ratio / (tr_val_ratio + 3))
    valsetLen = round(splitAmount * 3 / (tr_val_ratio + 3))
    tstsetLen = 131072
    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    save_checkpoint = True

    for tapIndx, tap in enumerate(tap_list):
        '''Load Data'''
        dataPath = os.path.join(BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA'
                                          f'/trSet_lp_{lp}.mat')
        '''Form Dataloaders'''
        spanLen = 80e3
        spanNum = 40
        D = 17e-6
        L = spanLen * spanNum
        symbolRate = 32e9
        DL = D * L / 2

        train_dataloader, _, _ = util.gen_dataloader(dataPath,
                                                     block_size=block_size,
                                                     symbol_num=trsetLen,
                                                     batch_size=batch_size,
                                                     constellations=constellations,
                                                     shuffle=True)
        val_dataloader, valSet, prbsVal = util.gen_dataloader(dataPath,
                                                              block_size=block_size,
                                                              symbol_num=valsetLen,
                                                              batch_size=batch_size,
                                                              constellations=constellations,
                                                              shuffle=False,
                                                              count_from_end=True)
        test_dataloader, tstSet, prbsTst = util.gen_dataloader(dataPath,
                                                               block_size=block_size,
                                                               symbol_num=tstsetLen,
                                                               batch_size=batch_size,
                                                               constellations=constellations,
                                                               shuffle=False)
        '''Form dataloader end'''
        model_kwargs = {
            'DL': DL,
            'sample_factor': 2,
            'case_num': 2,
            'symbol_rate': symbolRate,
            'tap': tap,
        }
        plm_kwargs = {
            'model_kwargs': model_kwargs,
            'lr': 1e-4,
            'optimizer': 'Adam',
        }
        if not eval_flag:
            mdl = GDAssistedLinearEqualizerPLM(**plm_kwargs)
            '''Specify callbacks and logger'''
            # early_stop_callback = EarlyStopping(monitor='loss',
            #                                     min_delta=1e-4,
            #                                     patience=5,
            #                                     verbose=True,
            #                                     mode='min')
            log_q_factor_callback = LogQFactorCallback(dataset=tstSet, prbs=prbsTst)
            # logger = TensorBoardLogger(save_dir=resultPath, name='log', version='Adam')
            trainer_kwargs = {
                'callbacks': [log_q_factor_callback],
                # 'logger': logger,
                'max_epochs': max_epochs,
                'accelerator': 'gpu',
                'check_val_every_n_epoch': max_epochs + 1,
            }
            trainer = pl.Trainer(**trainer_kwargs)
            trainer.fit(mdl, train_dataloader, val_dataloader)
            if save_checkpoint:
                trainer.save_checkpoint(filepath=os.path.join(
                    checkpointPath,
                    f'sim_trained_by_{lp}dBm_{tap}taps.ckpt'
                ))
            sig, ber = eval_ber(mdl, tstSet, prbsTst, use_cuda=False)
            Q = util.ber2q(np.mean(ber))
        else:
            mdl = GDAssistedLinearEqualizerPLM.load_from_checkpoint(
                os.path.join(checkpointPath, f'sim_trained_by_{lp}dBm_{tap}taps.ckpt'),
                **plm_kwargs
            )
            # filter_h = mdl.model.h.data
            # scio.savemat(os.path.join(resultPath, f'filter_h_{lp}dBm_{tap}taps.mat'),
            #              {'filter_h': filter_h[0, ...].cpu().data.numpy()})
            _, ber_le = eval_ber(mdl, tstSet, prbsTst, use_cuda=False)
            Q_le = util.ber2q(np.mean(ber_le))
            QLEcache[tapIndx, 0] = Q_le
            # _, Q = eval_ber_with_NLC(mdl, tstSet, prbsTst, lp, symbolRate, spanLen, spanNum, use_cuda=False)
            # Qcache[tapIndx, 0] = Q
        print(f'lp {lp}: Q factor: {Q_le}')
    if save_result:
        with open(os.path.join(resultPath, f'LTDE_lp_{lp}_vary_tap.csv'), 'w') as f:
            f.write('tap,Q,Q_LE\n')
            for tapIndx, tap in enumerate(tap_list):
                f.write(f'{tap},{Qcache[tapIndx, 0]},{QLEcache[tapIndx, 0]}\n')


if __name__ == '__main__':
    warnings.filterwarnings("ignore", category=UserWarning)
    tap_list = [427]
    job_TDC_GD(tap_list=tap_list, lp=2, eval_flag=True)
