
import dis
from turtle import distance
import torch
import torch.nn.functional as F
import numpy as np
import math
from Models.utils import *

class LabelPropagation(torch.nn.Module):
    def __init__(self, alpha=0.2, rbf_scale=1, norm_prop=True, apply_log=True, balanced=False):
        super().__init__()
        self.alpha = alpha
        self.rbf_scale = rbf_scale
        self.norm_prop = norm_prop
        self.apply_log = apply_log
        self.balanced = balanced

    def forward(self, x, labels, nclasses, weights=None, propagator=None):
        """Applies label propagation given a set of embeddings and labels

        Arguments:
            x {Tensor} -- Input embeddings
            labels {Tensor} -- Input labels from 0 to nclasses + 1. The highest value corresponds to unlabeled samples.
            nclasses {int} -- Total number of classes

        Keyword Arguments:
            propagator {Tensor} -- A pre-computed propagator (default: {None}), 也就是 affinity matrix

        Returns:
            tuple(Tensor, Tensor) -- Logits and Propagator
        """
        return label_propagation(x, labels, nclasses, self.alpha, self.rbf_scale,
                                 self.norm_prop, self.apply_log, weights=weights, propagator=propagator,
                                 balanced=self.balanced)

def get_emd_distance_for_tensor(X, Y, model, gamma=None, knn=None):
    """
    tensor 版本的 affinity matrix 的构建
    Args:
        X(tensor): shape 为 (n_samples, n_features) 的 tensor, 为输入数据构建 affinity matrix
        model(model): 提供计算 EMD 距离的 API
    Returns:
        Affinity matrix(tensor): 形为 (n_samples, n_samples) 的 tensor
    """
    # X, Y = torch.from_numpy(X).cuda(), torch.from_numpy(Y).cuda()
    distance_matrix = torch.randn(len(X), len(X))
    bs = 10
    rows = math.ceil(len(X) / bs)
    cols = math.ceil(len(Y) / bs)
    for i in range(rows):
        for j in range(cols):
            distance_matrix[i*bs:min(len(X), (i+1) * bs), j*bs:min(len(Y), (j+1) * bs)] = \
                model.compute_emd_distance(Y[j*bs:min(len(Y), (j+1) * bs)], X[i*bs:min(len(X), (i+1) * bs)], solver='opencv')
    # distance_matrix = model.compute_emd_distance(X, Y, solver='opencv') # [n_samples, n_sample] 的 distance 矩阵，需要转换为 weight 矩阵
    # 参考 scikit-learn label propagation rbf kernel转换
    if gamma is None:
        gamma = 1.0 / X.shape[1]
    # 直接返回 distance_matrix tensor
    mask = distance_matrix > 1e-5
    distance_matrix = distance_matrix / distance_matrix[mask].std()
    # 取 knn 领接图
    if knn is not None and knn < len(distance_matrix):
        distance_matrix, zero_mask = graph_dense_to_knn(distance_matrix, knn)
        distance_matrix = 1 / 2 * (distance_matrix + distance_matrix.T)
    weights = torch.exp(-gamma * distance_matrix)
    if knn is not None and knn < len(distance_matrix):
        weights[zero_mask] = 0
    mask = torch.eye(weights.size(1), dtype=torch.bool, device=weights.device)
    weights = weights * (~mask).float()
    return weights

def get_similarity_matrix(x, rbf_scale):
    b, c = x.size()
    sq_dist = ((x.view(b, 1, c) - x.view(1, b, c))**2).sum(-1) / np.sqrt(c)
    mask = sq_dist != 0
    sq_dist = sq_dist / sq_dist[mask].std()
    weights = torch.exp(-sq_dist * rbf_scale)
    mask = torch.eye(weights.size(1), dtype=torch.bool, device=weights.device)
    weights = weights * (~mask).float()
    return weights

def label_propagation(x, labels, nclasses, alpha, rbf_scale, norm_prop, apply_log, weights=None, propagator=None, balanced=False, epsilon=1e-6):
    labels = F.one_hot(labels.type(torch.long), nclasses + 1)
    labels = labels[:, :nclasses].float()  # the max label is unlabeled
    if balanced:
        labels = labels / labels.sum(0, keepdim=True)
    if propagator is None:
        if weights == None:
            weights = get_similarity_matrix(x, rbf_scale)
        propagator = global_consistency(
            weights, alpha=alpha, norm_prop=norm_prop)
    y_pred = torch.mm(propagator, labels)
    if apply_log:
        y_pred = torch.log(y_pred + epsilon)

    return y_pred


def global_consistency(weights, alpha=1, norm_prop=False):
    """Implements D. Zhou et al. "Learning with local and global consistency". (Same as in TPN paper but without bug)

    Args:
        weights: Tensor of shape (n, n). Expected to be exp( -d^2/s^2 ), where d is the euclidean distance and
            s the scale parameter.
        labels: Tensor of shape (n, n_classes)
        alpha: Scaler, acts as a smoothing factor
    Returns:
        Tensor of shape (n, n_classes) representing the logits of each classes
    """
    n = weights.shape[1]
    identity = torch.eye(n, dtype=weights.dtype, device=weights.device)
    isqrt_diag = 1. / torch.sqrt(1e-4 + torch.sum(weights, dim=-1))
    # checknan(laplacian=isqrt_diag)
    S = weights * isqrt_diag[None, :] * isqrt_diag[:, None]
    # checknan(normalizedlaplacian=S)
    propagator = identity - alpha * S
    propagator = torch.inverse(propagator[None, ...])[0]
    # checknan(propagator=propagator)
    if norm_prop:
        propagator = F.normalize(propagator, p=1, dim=-1)
    return propagator
