import math

import numpy as np
import torch
import torch.nn as nn
from pytorch_lightning.utilities.types import OptimizerLRScheduler
from torch import optim
from torch.utils.data import DataLoader, random_split, Dataset
import os
import pytorch_lightning as pl
import util
from CR_CVPytorchModules import CplxBatchNorm1d, CLinear, CSinh, CDropout, CTanh, CReLu


def pick_indxes_in_group(L):
    M = int(np.floor(L / 2))
    m = np.arange(-M, M + 1)[..., np.newaxis, np.newaxis]
    m = np.repeat(m, m.shape[0], axis=1)
    m = np.concatenate([m, np.transpose(m, [1, 0, 2])], axis=2)
    '''Ignored IXPM and SPM: pick the indexes that are not equal to 0'''
    pick_condition = np.logical_and(
        np.abs(m[..., 0] * m[..., 1]) < np.abs(np.floor(L / 2)), m[..., 0] >= 0
    )
    pick_condition = np.logical_and(pick_condition, m[..., 1] >= 0)
    pick_condition = np.logical_and(pick_condition, m[..., 0] <= m[..., 1])

    m_pick = m[pick_condition]
    mCases = m_pick.shape[0]

    ret = np.zeros([8, mCases, 2])

    # I0: m, n
    ret[0, ...] = np.stack([m_pick[:, 0], m_pick[:, 1]], axis=-1)
    # I1: m, n
    ret[1, ...] = np.stack([m_pick[:, 1], m_pick[:, 0]], axis=-1)
    # I2: -m ,-n
    ret[2, ...] = np.stack([-m_pick[:, 0], -m_pick[:, 1]], axis=-1)
    # I3: -n, -m
    ret[3, ...] = np.stack([-m_pick[:, 1], -m_pick[:, 0]], axis=-1)

    # IC0: -m ,n
    ret[4, ...] = np.stack([-m_pick[:, 0], m_pick[:, 1]], axis=-1)
    # IC1: n, -m
    ret[5, ...] = np.stack([m_pick[:, 1], -m_pick[:, 0]], axis=-1)
    # IC2: m ,-n
    ret[6, ...] = np.stack([m_pick[:, 0], -m_pick[:, 1]], axis=-1)
    # IC3: -n, m
    ret[7, ...] = np.stack([-m_pick[:, 1], m_pick[:, 0]], axis=-1)

    ret = np.concatenate([ret, (ret[..., 0] + ret[..., 1])[..., np.newaxis]], axis=-1)
    '''shape of ret: [[I0, I1, I2, I3, IC0, IC1, IC2, IC3], mCases, 3 for m and n and m+n]'''
    ret = ret.astype(int)
    return ret + M


def pick_indxes(L):
    M = int(np.floor(L / 2))
    m = np.arange(-M, M + 1)[..., np.newaxis, np.newaxis]
    m = np.repeat(m, m.shape[0], axis=1)
    m = np.concatenate([m, np.transpose(m, [1, 0, 2])], axis=2)
    m_pick = m[np.abs(m[..., 0] * m[..., 1]) < np.abs(np.floor(L / 2))]
    ret = np.concatenate(
        [m_pick, np.sum(m_pick, axis=1, keepdims=True)], axis=1
    )
    return ret + M


class NPDataset(Dataset):
    def __init__(self, array, label, win_size, grouping, **make_decision_kwargs):
        '''
        pol is for two polarizations.
        shape of array: [pol, samples].
        shape of label: [pol, symbols].
        '''
        self.array = np.csingle(array)  # 将输入数组中的元素转换为单精度复数类型。单精度复数是由32位浮点数表示的复数，其中16位用于表示实部，另外16位用于表示虚部

        self.win_size = win_size
        L = self.win_size
        self.constellations = make_decision_kwargs.get('constellations', util.CONST_16QAM)
        self.mod_order = make_decision_kwargs.get('mod_order', 4)
        self.form_triplet_from_dec = make_decision_kwargs.get('form_triplet_from_dec', False)
        if label is not None:
            self.label = label
        else:
            self.label = util.decide(array,
                                     self.constellations,
                                     mod_order=self.mod_order)

        if self.form_triplet_from_dec:
            self.sig_dec = util.decide(array, self.constellations, mod_order=self.mod_order)

        if self.label.ndim == 1:
            self.label = self.label.reshape(1, -1)

        if self.array.ndim == 1:
            self.array = self.array.reshape(1, -1)

        self.label = self.label.astype(np.complex128)
        self.array = self.array.astype(np.complex128)

        self.grouping = grouping

    def __len__(self):
        return self.array.shape[-1]

    def __getitem__(self, indx):

        def view_as_real(x):
            if x.ndim == 0:
                x = x[np.newaxis]
            return np.stack([x.real, x.imag], axis=-1)

        L = self.win_size
        M = int(np.floor(L / 2))
        if isinstance(indx, np.ndarray):
            assert indx.ndim == 1

            def wrapper(x, N):
                return x % N

            indces = np.linspace(indx - M, indx + M, num=self.win_size, axis=-1, dtype=int)
            '''让超出索引的部分可以在信号内循环，比如indx=131072为数据的长度，那么indx+M会取0:M位的数据'''
            indces = wrapper(indces, self.array.shape[-1])

            if not self.form_triplet_from_dec:
                ret = np.stack([
                    self.array[indx, ...][indces] for indx in range(self.array.shape[0])
                ], axis=1)
            else:
                ret = np.stack([
                    self.sig_dec[indx, ...][indces] for indx in range(self.array.shape[0])
                ], axis=1)

            ori_sym = np.stack(
                [self.array[x, ...][indx] for x in range(self.array.shape[0])], axis=0
            )
            ret = self.formTriplet(ret)
            '''
                shape of returns (ret, label, ori_sym):
                ret: [len(indx), 2 pols, triplet number, 2 for real and imaginary parts]
                label: [2 pols, len(indx), 2 for real and imaginary parts]
                ori_sym: [2 pols, 2 for real and imaginary parts]
            '''
            return (
                view_as_real(ret),
                view_as_real(self.label[..., indx]),
                view_as_real(ori_sym),
            )
        else:
            if not self.form_triplet_from_dec:
                ret = np.take(
                    self.array,
                    indices=np.arange(-M + indx, indx + M + 1),
                    axis=-1,
                    mode='wrap',
                )
            else:
                ret = np.take(
                    self.sig_dec,
                    indices=np.arange(-M + indx, indx + M + 1),
                    axis=-1,
                    mode='wrap'
                )
            ori_sym = self.array[:, indx]
            ret = self.formTriplet(ret)
            '''
                shape of returns (ret, label, ori_sym):
                ret: [triplet number, 2 for real and imaginary parts]
                label: [2 pols, 2 for real and imaginary parts]
                ori_sym: [2 pols, 2 for real and imaginary parts] 
            '''
            return (
                view_as_real(ret),
                view_as_real(self.label[..., indx]),
                view_as_real(ori_sym),
            )

    def formTriplet(self, sig):
        '''
        shape of sig: [Batches, 2 pols, samples] or [2 pols, samples]
        '''
        if self.grouping:
            tripletIndxes = pick_indxes_in_group(self.win_size)
        else:
            tripletIndxes = pick_indxes(self.win_size)

        sigm = sig[..., tripletIndxes[..., 0]]  # 索引为m
        sign = sig[..., tripletIndxes[..., 1]]  # 索引为n
        sigmpn = sig[..., tripletIndxes[..., 2]]  # 索引为m + n

        P = sign * sigmpn.conj()
        P = np.sum(P, axis=-2 - int(self.grouping), keepdims=True)
        ret = sigm * P

        if self.grouping:
            G1 = np.sum(ret[..., 0:4, :], axis=-2, keepdims=False)
            G2 = np.sum(ret[..., 4:, :], axis=-2, keepdims=False)
            ret = np.concatenate([G1, G2], axis=-1)
        return ret


class TNN(nn.Module):
    def __init__(self, hidden_sizes, tripletNum, use_batch_normalization_flag=False):
        super(TNN, self).__init__()
        hidden_layers_num = len(hidden_sizes)
        self.hidden_layers_num = hidden_layers_num
        self.hidden_layers = nn.ModuleList([])
        self.acts = nn.ModuleList([])
        if use_batch_normalization_flag:
            self.BNs = nn.ModuleList([])
            self.first_BN = nn.BatchNorm1d(num_features=hidden_sizes[0])
            print('use BN')
        self.triplet_num = tripletNum
        hidden_out_sizes = hidden_sizes[1:]
        hidden_out_sizes.append(hidden_sizes[-1])
        self.input_layer = nn.Linear(in_features=self.triplet_num * 2, # 多了一个formInputTensor的过程
                                     out_features=hidden_sizes[0])
        self.first_act = nn.ReLU()
        for indx in range(hidden_layers_num):
            self.hidden_layers.extend([
                nn.Linear(
                    in_features=hidden_sizes[indx],
                    out_features=hidden_out_sizes[indx],
                )
            ])
            self.acts = nn.ReLU()
            if use_batch_normalization_flag:
                self.BNs.extend([
                    nn.BatchNorm1d(num_features=hidden_out_sizes[indx])
                ])
        self.out_layer = nn.Linear(in_features=hidden_out_sizes[-1],
                                   out_features=2)
        self.useBN = use_batch_normalization_flag

    def forward(self, x):
        x = self.input_layer(x)
        if self.useBN:
            x = self.first_BN(x)
        x = self.first_act(x)
        for indx in range(self.hidden_layers_num):
            x = self.hidden_layers[indx](x)
            if self.useBN:
                x = self.BNs[indx](x)
            x = self.acts(x)
        x = self.out_layer(x)
        return x


class TNN_PLM(pl.LightningModule):
    def __init__(self, **plm_kwargs):
        super(TNN_PLM, self).__init__()
        '''argument parsing'''
        model_kwargs = plm_kwargs.get('model_kwargs')
        lr = plm_kwargs.get('lr', 1e-3)
        which_pol = plm_kwargs.get('which_pol', 0)
        weight_decay = plm_kwargs.get('weight_decay', 0)
        group_triplet = plm_kwargs.get('group_triplet', True)
        optimizer = plm_kwargs.get('optimizer', 'Adam')
        err_function = plm_kwargs.get('err_function', "MSE")
        '''argument parsing end'''

        self.group_triplet = group_triplet
        if group_triplet:
            tripletNum = pick_indxes_in_group(plm_kwargs['time_win_size']).shape[1] * 2
        else:
            tripletNum = pick_indxes(plm_kwargs['time_win_size']).shape[0]

        if err_function == 'MSE':
            self.calculate_loss = self.calculate_mse_loss
        elif err_function == 'Cauchy':
            self.calculate_loss = self.calculate_cauchy_loss
        elif err_function == 'Huber':
            self.huber_loss = nn.SmoothL1Loss(beta=2 / math.sqrt(5))
            self.calculate_loss = self.calculate_huber_loss
        else:
            raise ValueError('Invalid err_function')
        # print(f'Choose {err_function} as error function')

        actual_model_kwargs = model_kwargs.copy()
        actual_model_kwargs['tripletNum'] = tripletNum

        self.model = TNN(**actual_model_kwargs)
        self.lr = lr
        self.optimizer = optimizer
        self.weight_decay = weight_decay
        self.which_pol = which_pol

    def forward(self, x):
        x = self.model(x.float())
        return x

    def configure_optimizers(self) -> OptimizerLRScheduler:
        if self.optimizer == 'Adam':
            return optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
        elif self.optimizer == 'SGD':
            return optim.SGD(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
        elif self.optimzer == 'ASGD':
            return optim.SGD(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
        else:
            raise ValueError('Invalid optimizer.')

    def formInputTensor(self, x):
        shape = x.shape
        ri = shape[-1]
        ndim = shape[-2]
        '''column first'''
        x = x.transpose(-2, -1)
        '''row first with no transpose'''
        '''
            row first leads to very slow convergence property, I do not know why.
        '''
        x = x.reshape((*shape[0:-2], ndim * ri))  # 注意这一行的维度变换
        return x

    def err_function(self, x, y):
        err = self.calculate_loss(x, y)
        return err

    def calculate_mse_loss(self, x, y):
        res = y[:, self.which_pol, :] - x.squeeze()  # 需要注意x,y的维度是否一致
        err = torch.mean(torch.sum(res ** 2, dim=-1), dim=0)
        return err

    def calculate_cauchy_loss(self, x, y, c=2 / np.sqrt(5)):
        res = y[:, self.which_pol, :] - x.squeeze()
        err = torch.mean(torch.log(1 + (res / c) ** 2, dim=-1), dim=0)
        return err

    def calculate_huber_loss(self, x, y):
        y0 = y[:, self.which_pol, :]
        x0 = x.squeeze()
        err = self.huber_loss(x0, y0)
        return err

    def training_step(self, batch, batch_indx):
        x = batch[0][:, self.which_pol, ...]
        x = self.formInputTensor(x)
        y = batch[1]
        ori_sym = batch[2]
        output = self(x)
        err = self.err_function(output, y - ori_sym)
        err = err * 1000
        self.log('train_loss', err, prog_bar=True, on_epoch=True, on_step=False, logger=True)
        return {'loss': err}

    def validation_step(self, batch, batch_idx):
        x = batch[0][:, self.which_pol, ...]
        x = self.formInputTensor(x)
        y = batch[1]
        ori_sym = batch[2]
        output = self(x)
        err = self.err_function(output, y - ori_sym)
        err = err * 1000
        self.log('val_loss', err, prog_bar=True, on_epoch=True, on_step=False, logger=True)
        return {'val_loss': err}

    def test_step(self, batch, batch_idx):
        x = batch[0][:, self.which_pol, ...]
        x = self.formInputTensor(x)
        y = batch[1]
        ori_sym = batch[2]
        output = self(x)
        err = self.err_function(output, y - ori_sym)

    def quantize(self, bit_width, approach='maxabs'):
        from CR_DSPPytorch import cal_uniform_quantization_parameters, quantize, dequantize
        with torch.no_grad():
            state_dict = self.model.state_dict()
            new_state_dict = {}
            for k, p in state_dict.items():
                w = p.data
                s, lower_bound = cal_uniform_quantization_parameters(
                    w.flatten(),
                    axis=-1,
                    bit_width=bit_width,
                    approach=approach
                )
                w_int = quantize(w,
                                 bit_width=bit_width,
                                 s=s,
                                 axis=-1,
                                 lower_bound=lower_bound,
                                 approach=approach)[1]
                w_quant = dequantize(w_int, torch.sign(w), s=s, a=lower_bound)
                new_state_dict[k] = w_quant
            self.model.load_state_dict(new_state_dict)


class CVTNN(nn.Module):
    def __init__(self, hidden_sizes, tripletNum, use_batch_normalization_flag=False):
        super(CVTNN, self).__init__()
        hidden_layers_num = len(hidden_sizes)
        self.hidden_layers_num = hidden_layers_num
        self.hidden_layers = nn.ModuleList([])
        self.acts = nn.ModuleList([])
        if use_batch_normalization_flag:
            self.BNs = nn.ModuleList([])
            self.first_BN = CplxBatchNorm1d(num_features=hidden_sizes[0])
            print('use CBN')
        self.triplet_num = tripletNum
        hidden_out_sizes = hidden_sizes[1:]
        hidden_out_sizes.append(hidden_sizes[-1])
        self.input_layer = CLinear(in_features=self.triplet_num,
                                   out_features=hidden_sizes[0])
        self.first_act = CReLu(0.5)
        for indx in range(hidden_layers_num):
            self.hidden_layers.extend([
                CLinear(
                    in_features=hidden_sizes[indx],
                    out_features=hidden_out_sizes[indx],
                )
            ])
            self.acts = CReLu(0.5)
            if use_batch_normalization_flag:
                self.BNs.extend([
                    CplxBatchNorm1d(num_features=hidden_out_sizes[indx])
                ])
        self.out_layer = CLinear(in_features=hidden_out_sizes[-1],
                                 out_features=1)
        self.useBN = use_batch_normalization_flag

    def forward(self, x):
        x = self.input_layer(x)
        if self.useBN:
            x = self.first_BN(x)
        x = self.first_act(x)
        for indx in range(self.hidden_layers_num):
            x = self.hidden_layers[indx](x)
            if self.useBN:
                x = self.BNs[indx](x)
            x = self.acts(x)
        x = self.out_layer(x)
        return x


class CVTNN_PLM(pl.LightningModule):
    def __init__(self, **plm_kwargs):
        super(CVTNN_PLM, self).__init__()
        '''argument parsing'''
        model_kwargs = plm_kwargs.get('model_kwargs')
        lr = plm_kwargs.get('lr', 1e-3)
        which_pol = plm_kwargs.get('which_pol', 0)
        weight_decay = plm_kwargs.get('weight_decay', 0)
        group_triplet = plm_kwargs.get('group_triplet', True)
        optimizer = plm_kwargs.get('optimizer', 'Adam')
        err_function = plm_kwargs.get('err_function', "MSE")
        '''argument parsing end'''

        self.group_triplet = group_triplet
        if group_triplet:
            tripletNum = pick_indxes_in_group(plm_kwargs['time_win_size']).shape[1] * 2
        else:
            tripletNum = pick_indxes(plm_kwargs['time_win_size']).shape[0]

        if err_function == 'MSE':
            self.calculate_loss = self.calculate_mse_loss
        elif err_function == 'Cauchy':
            self.calculate_loss = self.calculate_cauchy_loss
        elif err_function == 'Huber':
            self.huber_loss = nn.SmoothL1Loss(beta=2 / math.sqrt(5))
            self.calculate_loss = self.calculate_huber_loss
        elif err_function == 'Goard':
            self.calculate_loss = self.calculate_goard_loss
        else:
            raise ValueError('Invalid err_function')
        # print(f'Choose {err_function} as error function')

        actual_model_kwargs = model_kwargs.copy()
        actual_model_kwargs['tripletNum'] = tripletNum

        self.model = CVTNN(**actual_model_kwargs)
        self.lr = lr
        self.optimizer = optimizer
        self.weight_decay = weight_decay
        self.which_pol = which_pol

    def forward(self, x):
        x = self.model(x)
        return x

    def configure_optimizers(self) -> OptimizerLRScheduler:
        if self.optimizer == 'Adam':
            return optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
        elif self.optimizer == 'SGD':
            return optim.SGD(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
        elif self.optimzer == 'ASGD':
            return optim.SGD(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
        else:
            raise ValueError('Invalid optimizer.')

    def err_function(self, x, y):
        err = self.calculate_loss(x, y)
        return err

    def calculate_mse_loss(self, x, y):
        res = y[:, self.which_pol, :] - x.squeeze()  # 需要注意x,y的维度是否一致
        err = torch.mean(torch.sum(res ** 2, dim=-1), dim=0)
        return err

    def calculate_cauchy_loss(self, x, y, c=2 / np.sqrt(5)):
        res = y[:, self.which_pol, :] - x.squeeze()
        err = torch.mean(torch.log(1 + (res / c) ** 2, dim=-1), dim=0)
        return err

    def calculate_huber_loss(self, x, y):
        y0 = y[:, self.which_pol, :]
        x0 = x.squeeze()
        err = self.huber_loss(x0, y0)
        return err

    def calculate_goard_loss(self, x, y):
        res = torch.sum(x ** 2, dim=-1, keepdim=True)
        res = res - 1.32
        err = torch.mean(res ** 2, dim=0)
        return err

    def training_step(self, batch, batch_indx):
        x = batch[0][:, self.which_pol, :, :]
        y = batch[1]
        ori_sym = batch[2]
        output = self(x)
        err = self.err_function(output + ori_sym[..., np.newaxis, self.which_pol, :], y)
        err = err * 1000
        self.log('train_loss', err, prog_bar=True, on_epoch=True, on_step=False, logger=True)
        return {'loss': err}

    def validation_step(self, batch, batch_idx):
        x = batch[0][:, self.which_pol, :, :]
        y = batch[1]
        ori_sym = batch[2]
        output = self(x)
        err = self.err_function(output + ori_sym[..., np.newaxis, self.which_pol, :], y)
        err = err * 1000
        self.log('val_loss', err, prog_bar=True, on_epoch=True, on_step=False, logger=True)
        return {'val_loss': err}

    def test_step(self, batch, batch_idx):
        x = batch[0][:, self.which_pol, :, :]
        y = batch[1]
        ori_sym = batch[2]
        output = self(x)
        err = self.err_function(output + ori_sym[..., np.newaxis, self.which_pol, :], y)

    def quantize(self, bit_width, approach='maxabs'):
        from CR_DSPPytorch import cal_uniform_quantization_parameters, quantize, dequantize
        with torch.no_grad():
            state_dict = self.model.state_dict()
            new_state_dict = {}
            for k, p in state_dict.items():
                if p.shape[-1] == 2:
                    w_r = p.data[..., 0]
                    w_i = p.data[..., 1]
                    s_r, lower_bound_r = cal_uniform_quantization_parameters(
                        w_r.flatten(),
                        axis=-1,
                        bit_width=bit_width,
                        approach=approach
                    )
                    s_i, lower_bound_i = cal_uniform_quantization_parameters(
                        w_i.flatten(),
                        axis=-1,
                        bit_width=bit_width,
                        approach=approach
                    )
                    w_r_int = quantize(w_r,
                                       bit_width=bit_width,
                                       s=s,
                                       axis=-1,
                                       lower_bound=lower_bound_r,
                                       approach=approach)[1]
                    w_i_int = quantize(w_i,
                                       bit_width=bit_width,
                                       s=s,
                                       axis=-1,
                                       lower_bound=lower_bound_i,
                                       approach=approach)[1]
                    w_r_quant = dequantize(w_r_int, torch.sign(w_r), s=s_r, a=lower_bound_r)
                    w_i_quant = dequantize(w_i_int, torch.sign(w_i), s=s_i, a=lower_bound_i)
                    w_quant = torch.stack([w_r_quant, w_i_quant], dim=-1)
                else:
                    w = p.data
                    s, lower_bound = cal_uniform_quantization_parameters(
                        w.flatten(),
                        axis=-1,
                        bit_width=bit_width,
                        approach=approach
                    )
                    w_int = quantize(w,
                                     bit_width=bit_width,
                                     s=s,
                                     axis=-1,
                                     lower_bound=lower_bound,
                                     approach=approach)[1]
                    w_quant = dequantize(w_int, torch.sign(w), s=s, a=lower_bound)
                new_state_dict[k] = w_quant
            self.model.load_state_dict(new_state_dict)
