from typing import Any
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.types import OptimizerLRScheduler, STEP_OUTPUT
from CR_DSPPytorch import PhaseRecLayer, cmul, FIRLayer, EDCLayer
import os

os.environ["OMP_NUM_THREADS"] = "8"
import util
from sklearn.cluster import KMeans
from pytorch_lightning.callbacks import Callback
from scipy.special import erf
import scipy.io as scio

BASE_DIR = os.path.dirname(__file__)

'''
这个用于对相干系统中的滤波器的系数进行聚类，我们训练一个N*N的方阵来更新聚类中心，
虽然在性能表现上，没有看见提升，但是在短距的情况下，在滤波器系数重叠数更多的情况下，我们一定可以期待看到提升；
这个代码还是有实用价值的！！！
'''

def eval_ber(model, dataset, prbs, constellations=util.CONST_16QAM, use_cuda=True):
    if not isinstance(dataset, util.NPDataset):
        raise ValueError('dataset is supposed to be an instance of NPDataset.')
    model = model.eval()
    print(f'Updated centroids: {model.model.centroids.data}')
    lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)
    pr = PhaseRecLayer(1024)

    if use_cuda and torch.cuda.is_available():
        model = model.cuda()

    chosen_device = model.device
    block_size = len(dataset) * 2
    indxes = np.arange(block_size)
    sig = dataset[indxes]
    sig = torch.from_numpy(sig).to(chosen_device)
    sig = model(sig)

    del model

    '''CMA，原本应有均衡PMD的，但由于已经移除，所以这里不做考虑'''
    lms = lms.to(chosen_device)
    pr = pr.to(chosen_device)
    sig = pr(sig[..., 1::2, :])

    lms.fit(sig,
            err_mode='DDM',
            constellations=constellations,
            iter_num=10,
            block_size=4028,
            remain=2048,
            lr=1e-4)
    with torch.no_grad():
        sig = lms(sig)
        sig = torch.view_as_complex(sig)
        sig = sig.cpu().data.numpy().squeeze()

        sig, ber, _ = util.pr_ber(sig, prbs, constellations)
    return sig, ber


def eval_sig(model, dataset, prbs, constellations=util.CONST_16QAM, use_cuda=True):
    if not isinstance(dataset, util.NPDataset):
        raise ValueError('dataset is supposed to be an instance of NPDataset.')
    model = model.eval()
    lms = FIRLayer(tap=32, case_num=2, power_norm=True, centor_one=True)
    pr = PhaseRecLayer(1024)

    if use_cuda and torch.cuda.is_available():
        model = model.cuda()

    chosen_device = model.device
    block_size = len(dataset) * 2
    indxes = np.arange(block_size)
    sig = dataset[indxes]
    sig = torch.from_numpy(sig).to(chosen_device)
    sig = model(sig)

    '''CMA，原本应有均衡PMD的，但由于已经移除，所以这里不做考虑'''
    lms = lms.to(chosen_device)
    pr = pr.to(chosen_device)
    sig = pr(sig[..., 1::2, :])

    lms.fit(sig,
            err_mode='DDM',
            constellations=constellations,
            iter_num=10,
            block_size=4028,
            remain=2048,
            lr=1e-4)
    with torch.no_grad():
        sig = lms(sig)
        sig = torch.view_as_complex(sig)
        sig = sig.cpu().data.numpy().squeeze()

        sig, ber, _ = util.pr_ber(sig, prbs, constellations)
    model = model.train()
    return sig, ber


class LogQFactorCallback(Callback):
    def __init__(self,
                 dataset,
                 prbs):
        super(LogQFactorCallback, self).__init__()
        self.dataset = dataset
        self.prbs = prbs

    def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
        sig, ber = eval_sig(pl_module,
                            self.dataset,
                            self.prbs,
                            use_cuda=False)
        Q = util.ber2q(np.mean(ber))
        print(f'Q factor: {Q}')


class GDAssistedLinearEqualizer(nn.Module):
    def __init__(self,
                 symbol_rate=32e9,
                 DL=17e-6 * 100e3,
                 c=3e8,
                 wave_length=1552.52e-9,
                 sample_factor=2,
                 case_num=2,
                 rolloff=0.1,
                 tap=2048,
                 clusters=8,
                 init_method='TDS',
                 initial_centroids='kmeans',
                 h_path=None
                 ):
        super(GDAssistedLinearEqualizer, self).__init__()
        self.sample_factor = sample_factor
        self.case_num = case_num
        self.clusters = clusters
        Ts = 1 / symbol_rate / sample_factor
        if init_method == 'TDS':
            '''TDS'''
            n = np.arange(-(tap // 2), (tap // 2) + tap % 2)
            h_temp = np.sqrt(1j * c * Ts ** 2 / (-DL * wave_length ** 2)) * np.exp(
                -1j * np.pi * c * Ts ** 2 * (n ** 2) / (-DL * wave_length ** 2))
            h = np.zeros((case_num, tap), dtype=np.complex128)
            for indx in range(case_num):
                h[indx, ...] = h_temp
            # 转化为二维实数数组
            h = np.stack([np.real(h), np.imag(h)], axis=-1).reshape(-1, 2)
        elif init_method == 'GD':
            if h_path is None:
                raise ValueError('h_path is supposed to be provided.')
            h = scio.loadmat(h_path)['filter_h'].reshape(-1, 2)
        else:
            raise ValueError('Invalid init_method')
        '''Initialize Linear Kernel'''
        self.h = torch.from_numpy(h)
        if initial_centroids == 'kmeans':
            kmeans = KMeans(n_clusters=clusters, random_state=42)
            kmeans.fit(h)
            centroids = torch.from_numpy(kmeans.cluster_centers_)
            labels = torch.from_numpy(kmeans.labels_)
        elif initial_centroids == 'linear':
            if not isinstance(h, torch.Tensor):
                h = torch.from_numpy(h)
            min_row = h[torch.argmin(h.sum(dim=-1))]
            max_row = h[torch.argmax(h.sum(dim=-1))]
            centroids = torch.stack([torch.linspace(min_row[i], max_row[i],
                                                    clusters) for i in range(h.shape[-1])], dim=-1).double()
            labels = torch.argmin(torch.cdist(h, centroids), dim=-1)
        elif initial_centroids == 'random':
            if not isinstance(h, torch.Tensor):
                h = torch.from_numpy(h)
            indices = torch.randperm(h.shape[0])[:clusters]
            centroids = h[indices]
            # step_size = h.shape[0] // clusters
            # centroids = h[::step_size][:clusters]
            labels = torch.argmin(torch.cdist(h, centroids), dim=-1)
        else:
            raise ValueError('Invalid initial_centroids')
        self.centroids = centroids
        self.register_buffer('init_centroids', centroids)
        fuzzy_matrix = torch.zeros((clusters, clusters), dtype=torch.float64)
        self.fuzzy_matrix = nn.Parameter(fuzzy_matrix, requires_grad=True)
        self.labels = labels
        self.pr = PhaseRecLayer(1024)
        self.tap = tap

    def forward(self, x):
        batch_size, pol, seq_len, ri = x.shape
        centroids = self.fuzzy_matrix @ self.centroids.to(x.device)
        self.labels = self._update_labels(centroids)
        h = centroids[self.labels].reshape(self.case_num, self.tap, 2).to(x.device)
        x_r = x[..., 0].float()
        x_i = x[..., 1].float()
        h_r = h[..., 0].unsqueeze(1).float()
        h_i = h[..., 1].unsqueeze(1).float()

        y_r_r = F.conv1d(x_r, h_r, groups=pol, padding=self.tap // 2)
        y_r_i = F.conv1d(x_r, h_i, groups=pol, padding=self.tap // 2)
        y_i_r = F.conv1d(x_i, h_r, groups=pol, padding=self.tap // 2)
        y_i_i = F.conv1d(x_i, h_i, groups=pol, padding=self.tap // 2)

        y_r = y_r_r - y_i_i
        y_i = y_r_i + y_i_r

        y = torch.stack([y_r, y_i], dim=-1)  # shape[batch_size, pol, seq_len, 2]
        if self.training:
            y = self.pr(y[..., 1::2, :])
        return y

    def _update_labels(self, centroids=None):
        device = centroids.device
        distance = torch.cdist(self.h.to(device), centroids)
        labels = torch.argmin(distance, dim=-1)
        return labels


class GDAssistedLinearEqualizerPLM(pl.LightningModule):
    def __init__(self, **plm_kwargs):
        super(GDAssistedLinearEqualizerPLM, self).__init__()
        '''argument parsing'''
        model_kwargs = plm_kwargs.get('model_kwargs')
        lr = plm_kwargs.get('lr', 1e-3)
        optimizer = plm_kwargs.get('optimizer', 'Adam')
        '''argument parsing end'''

        actual_model_kwargs = model_kwargs.copy()
        self.model = GDAssistedLinearEqualizer(**actual_model_kwargs)
        self.lr = lr
        self.optimizer = optimizer

    def forward(self, x):
        return self.model(x)

    def configure_optimizers(self) -> OptimizerLRScheduler:
        if self.optimizer == 'Adam':
            optimizer = torch.optim.Adam(self.parameters(),
                                         lr=self.lr)
            return {'optimizer': optimizer,
                    'lr_scheduler': {
                        'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                                                mode='min',
                                                                                factor=0.5,
                                                                                patience=2,
                                                                                min_lr=1e-5,
                                                                                verbose=True),
                        'monitor': 'train_loss'}
                    }
        elif self.optimizer == 'SGD':
            return torch.optim.SGD(self.parameters(),
                                   lr=self.lr)
        elif self.optimizer == 'ASGD':
            return torch.optim.ASGD(self.parameters(),
                                    lr=self.lr)
        else:
            raise ValueError('Invalid optimizer')

    def err_function(self, x, y):
        batchsize = x.shape[0]
        if x.ndim == 4:
            x = x[..., round(x.shape[-2] // 2), :]
        rotate_fac = np.exp(1j * np.array([0, np.pi / 2, np.pi, np.pi / 2 * 3]))
        rotate_fac = rotate_fac.reshape(*[1] * (x.ndim - 1), -1)
        rotate_fac = torch.from_numpy(rotate_fac).to(x.device)
        rotate_fac.requires_grad_(False)
        rotate_fac = torch.view_as_real(rotate_fac)

        x = x[..., np.newaxis, :]
        y = y[..., np.newaxis, :]

        x = cmul(x, rotate_fac)
        res = y - x
        err = torch.sum(res ** 2, dim=-1)
        err = torch.min(err, dim=-1)[0]
        err = torch.mean(err, dim=[0, 1])
        return err

    def training_step(self, batch, batch_idx):
        x, y = batch
        x.requires_grad_(True)
        output = self(x)
        err = self.err_function(output, y)
        self.log('train_loss', err, on_step=True, on_epoch=True, prog_bar=True)
        return {'loss': err}

    def validation_step(self, batch, batch_idx):
        x, y = batch
        output = self(x)
        with torch.no_grad():
            output = self.model.pr(output[..., 1::2, :])
        err = self.err_function(output, y)
        return {'val_loss': err}

    def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure):
        optimizer.step(closure=optimizer_closure)


def job_TDC_GD(eval_flag=False):
    resultDir = os.path.join(BASE_DIR, 'result/TDCGradDescent')
    os.makedirs(resultDir, exist_ok=True)
    resultPath = os.path.join(resultDir, 'job_TDC_GD')
    os.makedirs(resultPath, exist_ok=True)
    hpath = os.path.join(resultDir, 'Fuzzy_cluster')
    checkpointPath = os.path.join(resultDir, 'chkpts')
    os.makedirs(checkpointPath, exist_ok=True)

    max_epochs = 20
    batch_size = 64
    splitAmount = 65536
    tr_val_ratio = 7
    trsetLen = round(splitAmount * tr_val_ratio / (tr_val_ratio + 3))
    valsetLen = round(splitAmount * 3 / (tr_val_ratio + 3))
    tstsetLen = 131072
    block_size = 2048
    assert (block_size % 2 == 0)
    constellations = util.CONST_16QAM
    save_checkpoint = True

    '''Load Data'''
    lp = 2
    tap = 557
    dataPath = os.path.join(BASE_DIR, 'data/simulation/16QAM32Gbaud3200kmHe/PMD_eliminated_by_CMA'
                                      f'/trSet_lp_{lp}.mat')
    '''Form Dataloaders'''
    spanLen = 80e3
    spanNum = 40
    D = 17e-6
    L = spanLen * spanNum
    symbolRate = 32e9
    DL = D * L / 2

    train_dataloader, _, _ = util.gen_dataloader(dataPath,
                                                 block_size=block_size,
                                                 symbol_num=trsetLen,
                                                 batch_size=batch_size,
                                                 constellations=constellations,
                                                 shuffle=True)
    val_dataloader, valSet, prbsVal = util.gen_dataloader(dataPath,
                                                          block_size=block_size,
                                                          symbol_num=valsetLen,
                                                          batch_size=batch_size,
                                                          constellations=constellations,
                                                          shuffle=False,
                                                          count_from_end=True)
    test_dataloader, tstSet, prbsTst = util.gen_dataloader(dataPath,
                                                           block_size=block_size,
                                                           symbol_num=tstsetLen,
                                                           batch_size=batch_size,
                                                           constellations=constellations,
                                                           shuffle=False)
    '''Form dataloader end'''
    model_kwargs = {
        'DL': DL,
        'sample_factor': 2,
        'case_num': 2,
        'symbol_rate': symbolRate,
        'tap': tap,
        'clusters': 16,
        'init_method': 'TDS',
        'initial_centroids': 'kmeans',
        'h_path': os.path.join(hpath, f'filter_h_{lp}dBm_{tap}taps.mat')
    }
    plm_kwargs = {
        'model_kwargs': model_kwargs,
        'lr': 3e-3,
        'optimizer': 'Adam',
    }
    if not eval_flag:
        mdl = GDAssistedLinearEqualizerPLM(**plm_kwargs)
        '''Specify callbacks and logger'''
        # early_stop_callback = EarlyStopping(monitor='loss',
        #                                     min_delta=1e-4,
        #                                     patience=5,
        #                                     verbose=True,
        #                                     mode='min')
        log_q_factor_callback = LogQFactorCallback(dataset=tstSet, prbs=prbsTst)
        logger = TensorBoardLogger(save_dir=resultPath, name='log', version='Adam')
        trainer_kwargs = {
            'callbacks': [log_q_factor_callback],
            'logger': logger,
            'max_epochs': max_epochs,
            'accelerator': 'gpu',
            'check_val_every_n_epoch': max_epochs + 1,
        }
        trainer = pl.Trainer(**trainer_kwargs)
        trainer.fit(mdl, train_dataloader, val_dataloader)
        if save_checkpoint:
            trainer.save_checkpoint(filepath=os.path.join(
                checkpointPath,
                f'sim_trained_by_{lp}dBm_{tap}taps.ckpt'
            ))

        sig, ber = eval_ber(mdl, tstSet, prbsTst, use_cuda=False)
        print(f'lp {lp}: Q factor: {util.ber2q(np.mean(ber))}')
    else:
        mdl = GDAssistedLinearEqualizerPLM.load_from_checkpoint(
            os.path.join(checkpointPath, f'sim_trained_by_{lp}dBm_{tap}taps.ckpt'),
            **plm_kwargs
        )
        sig, ber = eval_ber(mdl, tstSet, prbsTst, use_cuda=False)
        print(f'lp {lp}: Q factor: {util.ber2q(np.mean(ber))}')


if __name__ == '__main__':
    job_TDC_GD(eval_flag=False)
