import scipy
import scipy.sparse.linalg as sla
# ^^^ we NEED to import scipy before torch, or it crashes :(
# (observed on Ubuntu 20.04 w/ torch 1.6.0 and scipy 1.5.2 installed via conda)

import os.path
import sys
import random
from multiprocessing import Pool

import numpy as np
import scipy.spatial
import torch
from torch.distributions.categorical import Categorical
import sklearn.neighbors

import robust_laplacian
import potpourri3d as pp3d

import diffusion_net.utils as utils
from .utils import toNP

# Wave kernel signature
def WKS(evals, evecs, energy_list, sigma, scaled=False):
    """
    Returns the Wave Kernel Signature for some energy values.

    Parameters
    ------------------------
    evects      : (N,K) array with the K eigenvectors of the Laplace Beltrami operator
    evals       : (K,) array of the K corresponding eigenvalues
    energy_list : (num_E,) values of e to use
    sigma       : (float) [positive] standard deviation to use
    scaled      : (bool) Whether to scale each energy level

    Output
    ------------------------
    WKS : (N,num_E) array where each column is the WKS for a given e
    """

    """
    assert sigma > 0, f"Sigma should be positive ! Given value : {sigma}"

    # expand batch
    if len(evals.shape) == 1:
        expand_batch = True
        evals = evals.unsqueeze(0)
        evecs = evecs.unsqueeze(0)
        energy_list = energy_list.unsqueeze(0)
    else:
        expand_batch = False
    """

    """
    B_indices, K_indices = torch.where(evals > 1e-5)
    evals = evals[B_indices, K_indices] # (B,K)
    evects = evecs[B_indices, :, K_indices] # (B,V,K)
    """
    evals = evals[:, 1:] # (B,K)
    evecs = evecs[:, :, 1:] # (B,V,K)

    e_list = energy_list
    coefs = torch.exp(-(e_list.unsqueeze(-1) - torch.log(torch.abs(evals.unsqueeze(-2)))) ** 2/(2*sigma.unsqueeze(-1).unsqueeze(-1)**2))  # (B,num_E,K)

    weighted_evecs = (evecs * evecs).unsqueeze(1) * coefs.unsqueeze(-2)  # (B,num_E,N,K)

    natural_WKS = torch.sum(weighted_evecs, dim=-1)  # (B,num_E,N)
    natural_WKS = natural_WKS.permute([0,2,1]) # (B,N,num_E)

    if scaled:
        inv_scaling = torch.sum(coefs, dim=-1)  # (B,num_E)
        return (inv_scaling.unsqueeze(1) ** -1) * natural_WKS

    else:
        return natural_WKS

def auto_WKS(evals, evecs, num_E, landmarks=None, scaled=True):
    """
    Compute WKS with an automatic choice of scale and energy

    Parameters
    ------------------------
    evals       : (K,) array of  K eigenvalues
    evects      : (N,K) array with K eigenvectors
    landmarks   : (p,) If not None, indices of landmarks to compute.
    num_E       : (int) number values of e to use
    Output
    ------------------------
    WKS or lm_WKS : (N,num_E) or (N,p*num_E)  array where each column is the WKS for a given e
                    and possibly for some landmarks
    """

    # expand batch
    if len(evals.shape) == 1:
        expand_batch = True
        evals = evals.unsqueeze(0)
        evecs = evecs.unsqueeze(0)
    else:
        expand_batch = False

    e_min,e_max = torch.log(evals[:,1]).to(evals.device), torch.log(evals[:,-1]).to(evals.device)
    sigma = 7*(e_max-e_min)/num_E

    e_min += 2*sigma
    e_max -= 2*sigma

    energy_list = torch.cat([torch.linspace(e_min[idx], e_max[idx], num_E).unsqueeze(0) for idx in range(e_min.shape[-1])], dim=0).to(evals.device)

    if landmarks is None:
        out = WKS(evals, evecs, energy_list, sigma, scaled=scaled)
        if expand_batch:
            return out.squeeze(0)
        else:
            return out
    else:
        return lm_WKS(evals, evecs, landmarks, energy_list, sigma, scaled=scaled)


def mesh_WKS(mesh, num_E, landmarks=None, k=None):
    assert mesh.eigenvalues is not None, "Eigenvalues should be processed"

    if k is None:
        k = len(mesh.eigenvalues)
    else:
        assert len(mesh.eigenvalues >= k), f"At least ${k}$ eigenvalues should be computed, not {len(mesh.eigenvalues)}"

    return auto_WKS(mesh.eigenvalues[:k], mesh.eigenvectors[:, :k], num_E, landmarks=landmarks, scaled=True)
