from torch._C import device
import torch.nn as nn
import model.model_LISv2_mutilayer_emb as model_LISv2_mutilayer_emb
import torch
import scipy
import numpy as np
from multiprocessing import Pool


class Criterion(nn.Module):
    def __init__(
        self,
        data=None,
        DisE=None,
        DisG=None,
        device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
        perplexity=10,
        r_e=1.0,
        r_p=1.0,
        enlarge=2.0,
        v=0.001,
        PE=None,
        PG=None,
        rhoE=None,
        rhoG=None,
    ):
        super(Criterion, self).__init__()

        self.Loss = nn.CrossEntropyLoss()
        self.elisLoss = ELISLoss(
            DisE=DisE,
            DisG=DisG,
            data=data,
            vs=v,
            ve=100.0,
            perplexity=perplexity,
            r_e=r_e,
            r_p=r_p,
            enlarge=enlarge,
            PE=PE,
            PG=PG,
            rhoE=rhoE,
            rhoG=rhoG,
        )

    def forward(self, inputs, outputs, epoch, index):

        return [
            self.elisLoss.CalLoss(outputs, inputs, epoch, inputDataIndex=index)
        ]


class ELISLoss(nn.Module):
    def __init__(
        self,
        data=None,
        vs=0.001,
        ve=100,
        perplexity=10,
        etapow=2,
        device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
        verbos=0,
        DisE=None,
        DisG=None,
        r_e=1.0,
        r_p=1.0,
        enlarge=2,
        PE=None,
        PG=None,
        rhoE=None,
        rhoG=None,
    ):
        """class for ELIS loss

        Args:
            data (tensor): all the train data, to calculate the sigma and rho in RLIS
            device (str): cuda or cpu
            vs (float, optional): start of the the continuation nv. Defaults to 0.001.
            ve (int, optional): end of the the continuation nv. Defaults to 100.
            perplexity (int, optional): Q in the paper. Defaults to 10.
            etapow (int, optional): power of the eta. Defaults to 2.
            verbos (int, optional): bool for print information. Defaults to 0.
            path (str, optional): the saving path. Defaults to './'.
        """
        nn.Module.__init__(self)
        # self.args = args
        with torch.no_grad():
            if verbos == 1:
                print("start init ELIS Loss")
            self.device = device
            self.Pdist = nn.PairwiseDistance(p=2, eps=1e-12)
            self.perplexity = perplexity
            self.epoch = 0
            self.vList = [100, 0.001]
            self.gammaList = self._CalGammaF(self.vList)
            self.etapow = etapow
            self.r_e = r_e
            self.r_p = r_p
            self.enlarge = enlarge
            if verbos == 1:
                print("start calculate pairwise distance and sigma, rho list")

            if data is not None:
                self.ingraphmask = torch.tensor(~np.isposinf(DisG)).bool()

                if PE is None:
                    copyDisG = np.copy(DisG)
                    infcopyDisG = np.isposinf(copyDisG)
                    copyDisG[infcopyDisG] = -100
                    maxDisG = copyDisG.max(axis=0)

                    for i in range(DisG.shape[0]):
                        DisG[i][infcopyDisG[i]] = maxDisG[i]*self.enlarge

                    print('sigma search {} DisG')

                    self.rhoG, self.sigmaListlayerG = self.InitSigmaSearchEverySample(
                        self.gammaList,
                        self.vList,
                        data.reshape(data.shape[0], -1),
                        torch.tensor(DisG),
                    )
                    self.rhoG = self.rhoG.to(device)

                    self.PG = self._CalPt(
                        dist=torch.tensor(DisG).to(device),
                        rho=self.rhoG,
                        sigma_array=self.sigmaListlayerG[0],
                        gamma=self.gammaList[0],
                        v=self.vList[0],
                    ).float()

                    self.rhoE, self.sigmaListlayerE = self.InitSigmaSearchEverySample(
                        self.gammaList,
                        self.vList,
                        data.reshape(data.shape[0], -1),
                        torch.tensor(DisE),
                    )
                    self.rhoE = self.rhoE.to(device)

                    self.PE = self._CalPt(
                        dist=torch.tensor(DisE).to(device),
                        rho=self.rhoE,
                        sigma_array=self.sigmaListlayerE[0],
                        gamma=self.gammaList[0],
                        v=self.vList[0],
                    ).float()
                    print('sigma search {} DisE')
                else:
                    self.PE=torch.tensor(PE, device=self.device).float()
                    self.PG=torch.tensor(PG, device=self.device).float()
                    self.rhoE=torch.tensor(rhoE, device=self.device).float()
                    self.rhoG=torch.tensor(rhoG, device=self.device).float()


            if verbos == 1:
                print("start calculate continuation step")
            s = np.log10(vs)
            e = np.log10(ve)
            self.vListForEpoch = np.concatenate(
                [
                    np.zeros((1000,)) + 10 ** s,
                    np.logspace(s, e, 2000),
                    np.zeros((17001,)) + 10 ** e,
                ]
            )

            print("init LISV2_MLP mocol model, gaama is: {}".format(self.gammaList))
            # self.data = data.to(self.device).float()
            torch.cuda.empty_cache()

    def _CalGammaF(self, vList):
        out = []
        for v in vList:
            a = scipy.special.gamma((v + 1) / 2)
            b = np.sqrt(v * np.pi) * scipy.special.gamma(v / 2)
            out.append(a / b)
        return out

    def _CalPt(self, dist, rho, sigma_array, gamma, v=100, split=1):

        if torch.is_tensor(rho):
            dist_rho = (dist - rho.reshape(-1, 1)) / sigma_array.reshape(-1, 1)
        else:
            dist_rho = dist
        dist_rho[dist_rho < 0] = 0
        sample_index_list = torch.linspace(0, dist.shape[0], int(split) + 1)
        for i in range(split):
            dist_rho_c = dist_rho[
                int(sample_index_list[i]) : int(sample_index_list[i + 1])
            ]
            Pij_c = torch.pow(
                gamma
                * torch.pow((1 + dist_rho_c / v), -1 * (v + 1) / 2)
                * torch.sqrt(torch.tensor(2 * 3.14)),
                2,
            )
            if i == 0:
                Pij = Pij_c
            else:
                Pij = torch.cat([Pij, Pij_c], dim=0)
        P = Pij + Pij.t() - torch.mul(Pij, Pij.t())

        return P

    def _Distance_squared(self, x, y, min_val=1e-12):
        # add knn_scale to reweight SSL pos and kNN pos
        x = x.reshape(x.shape[0], -1)
        y = y.reshape(y.shape[0], -1)

        m, n = x.size(0), y.size(0)
        xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
        yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
        dist = xx + yy
        dist.addmm_(1, -2, x, y.t())
        d = dist.clamp(min=min_val)
        # add rescale
        # if scale - 1. >= 1e-5:
        # d = d * scale
        return d

    def _CE(self, P, Q):

        EPS = 1e-12
        losssum1 = P * torch.log(Q + EPS)
        losssum2 = (1 - P) * torch.log(1 - Q + EPS)
        losssum = -1 * (losssum1 + losssum2)

        # if torch.isnan(losssum):
        #     input("stop and find nan in the loss")
        return losssum

    def InitSigmaSearchEverySample(self, gammaList, vList, data, dist, num_layer=2):

        distC = torch.clone(dist)
        distC[distC.le(1e-11)] = 1e16
        rho, _ = torch.min(distC, dim=1)
        # rho = torch.zeros((distC.shape[0]))

        print("start pool search")
        sigmaListlayer = [0] * num_layer

        r = PoolRunner(
            dist.shape[0],
            self.perplexity,
            dist.detach().cpu().numpy(),
            rho.detach().cpu().numpy(),
            gammaList[0],
            vList[0],
            pow=2,
        )
        sigmaListlayer[0] = torch.tensor(r.Getout()).to(self.device)

        for i in range(1, num_layer):
            sigmaListlayer[i] = torch.zeros(data.shape[0], device=self.device) + 1

        return rho, sigmaListlayer

    def _ChangeVList(self, epoch):

        # epoch = epoch
        self.vCurent = self.vListForEpoch[epoch]
        newVList = [100]
        for i in range(2 - 1):
            newVList.append(self.vCurent)
        self.vList = newVList
        self.gammaList = self._CalGammaF(newVList)

    def CalLoss(self, latentList, dataInput, epoch=0, inputDataIndex=None):
        # add kNN_scale and Z_normalization
        def ITEM(
            Psave,
            data1,
            data2,
            latent1,
            latent2,
            mask=None,
            data_eye_val=None,
            scale=1.0,
        ):

            # if data_eye_val is not None:
            #     distData[mask] = data_eye_val
            distlatent = self._Distance_squared(latent1, latent2)

            loss = (
                self._CE(
                    P=Psave[inputDataIndex][:, inputDataIndex],
                    Q=self._CalPt(
                        dist=distlatent,
                        rho=0,
                        sigma_array=1,
                        gamma=self.gammaList[-1],
                        v=self.vList[-1],
                    ),
                ).mean()
                * scale
            )
            return loss

        # bs = dataInput.shape[0] // 2
        # dataInput1 = dataInput[:bs]
        # dataInput2 = dataInput[bs:]
        # latent1 = latentList[:bs]
        # latent2 = latentList[bs:]
        self._ChangeVList(epoch)
        # print("print ep", self.r_p, self.r_e)
        loss1 = ITEM(
            Psave=self.PG,
            data1=dataInput,
            data2=dataInput,
            latent1=latentList,
            latent2=latentList,
            # mask=torch.ones((dataInput.shape[0], dataInput.shape[0])).bool(),
            mask=self.ingraphmask,
            data_eye_val=None,
            scale=self.r_p,
        )
        loss2 = ITEM(
            Psave=self.PE,
            data1=dataInput,
            data2=dataInput,
            latent1=latentList,
            latent2=latentList,
            mask=torch.ones((dataInput.shape[0], dataInput.shape[0])).bool(),
            data_eye_val=None,
            scale=self.r_e,
        )
        # print(self.r_e)
        return (loss1 + loss2) / (self.r_p + self.r_e)


class PoolRunner(object):
    def __init__(self, n, N_NEIGHBOR, dist, rho, gamma, v, pow=2):
        pool = Pool(processes=30)

        print(n)
        result = []
        for dist_row in range(n):

            result.append(
                pool.apply_async(
                    sigma_binary_search,
                    (N_NEIGHBOR, dist[dist_row], rho[dist_row], gamma, v, pow),
                )
            )
        print("start calculate sigma")
        pool.close()
        pool.join()
        sigma_array = []
        for i in result:
            sigma_array.append(i.get())
        self.sigma_array = np.array(sigma_array)
        print("\nMean sigma = " + str(np.mean(sigma_array)))
        print("finish calculate sigma")

    def Getout(self,):
        return self.sigma_array


def sigma_binary_search(fixed_k, dist_row_line, rho_line, gamma, v, pow=2):
    """
    Solve equation k_of_sigma(sigma) = fixed_k
    with respect to sigma by the binary search algorithm
    """
    sigma_lower_limit = 0
    sigma_upper_limit = 100
    for i in range(20):
        approx_sigma = (sigma_lower_limit + sigma_upper_limit) / 2
        k_value = func(approx_sigma, dist_row_line, rho_line, gamma, v, pow=pow)
        if k_value < fixed_k:
            sigma_lower_limit = approx_sigma
        else:
            sigma_upper_limit = approx_sigma
        if np.abs(fixed_k - k_value) <= 1e-4:
            break
    return approx_sigma


def func(sigma, dist_row_line, rho_line, gamma, v, pow=2):
    d = (dist_row_line - rho_line) / sigma
    d[d < 0] = 0
    p = np.power(
        gamma * np.power((1 + d / v), -1 * (v + 1) / 2) * np.sqrt(2 * 3.14), pow
    )
    return np.power(2, np.sum(p))
