import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms

import cub_models.vgg as vgg
import cub_models.resnet as resnet

from scipy import sparse
from sklearn.metrics.pairwise import cosine_similarity
#from pykeops.torch import LazyTensor

##############################################
# Center Loss for Attention Regularization
##############################################
class CenterLoss(nn.Module):
    def __init__(self):
        super(CenterLoss, self).__init__()
        self.l2_loss = nn.MSELoss(reduction='sum')

    def forward(self, outputs, targets):
        return self.l2_loss(outputs, targets) / outputs.size(0)


##################################
# Metric
##################################
class Metric(object):
    pass


class AverageMeter(Metric):
    def __init__(self, name='loss'):
        self.name = name
        self.reset()

    def reset(self):
        self.scores = 0.
        self.total_num = 0.

    def __call__(self, batch_score, sample_num=1):
        self.scores += batch_score
        self.total_num += sample_num
        return self.scores / self.total_num


class TopKAccuracyMetric(Metric):
    def __init__(self, topk=(1,)):
        self.name = 'topk_accuracy'
        self.topk = topk
        self.maxk = max(topk)
        self.reset()

    def reset(self):
        self.corrects = np.zeros(len(self.topk))
        self.num_samples = 0.

    def __call__(self, output, target):
        """Computes the precision@k for the specified values of k"""
        self.num_samples += target.size(0)
        _, pred = output.topk(self.maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        for i, k in enumerate(self.topk):
            correct_k = correct[:k].reshape(-1).float().sum(0)
            self.corrects[i] += correct_k.item()

        return self.corrects * 100. / self.num_samples


##################################
# Callback
##################################
class Callback(object):
    def __init__(self):
        pass

    def on_epoch_begin(self):
        pass

    def on_epoch_end(self, *args):
        pass


class ModelCheckpoint(Callback):
    def __init__(self, savepath, monitor='val_topk_accuracy', mode='max'):
        self.savepath = savepath
        self.monitor = monitor
        self.mode = mode
        self.reset()
        super(ModelCheckpoint, self).__init__()

    def reset(self):
        if self.mode == 'max':
            self.best_score = float('-inf')
        else:
            self.best_score = float('inf')

    def set_best_score(self, score):
        if isinstance(score, np.ndarray):
            self.best_score = score[0]
        else:
            self.best_score = score

    def on_epoch_begin(self):
        pass

    def on_epoch_end(self, logs, net, **kwargs):
        current_score = logs[self.monitor]
        if isinstance(current_score, np.ndarray):
            current_score = current_score[0]

        if (self.mode == 'max' and current_score > self.best_score) or \
            (self.mode == 'min' and current_score < self.best_score):
            self.best_score = current_score

            if isinstance(net, torch.nn.DataParallel):
                state_dict = net.module.state_dict()
            else:
                state_dict = net.state_dict()

            for key in state_dict.keys():
                state_dict[key] = state_dict[key].cpu()

            if 'feature_center' in kwargs:
                feature_center = kwargs['feature_center']
                feature_center = feature_center.cpu()

                torch.save({
                    'logs': logs,
                    'state_dict': state_dict,
                    'feature_center': feature_center}, self.savepath)
            else:
                torch.save({
                    'logs': logs,
                    'state_dict': state_dict}, self.savepath)


##################################
# augment function
##################################
def batch_augment(images, attention_map, mode='crop', theta=0.5, padding_ratio=0.1):
    batches, _, imgH, imgW = images.size()

    if mode == 'crop':
        crop_images = []
        for batch_index in range(batches):
            atten_map = attention_map[batch_index:batch_index + 1]
            if isinstance(theta, tuple):
                theta_c = random.uniform(*theta) * atten_map.max()
            else:
                theta_c = theta * atten_map.max()

            crop_mask = F.upsample_bilinear(atten_map, size=(imgH, imgW)) >= theta_c
            nonzero_indices = torch.nonzero(crop_mask[0, 0, ...])
            height_min = max(int(nonzero_indices[:, 0].min().item() - padding_ratio * imgH), 0)
            height_max = min(int(nonzero_indices[:, 0].max().item() + padding_ratio * imgH), imgH)
            width_min = max(int(nonzero_indices[:, 1].min().item() - padding_ratio * imgW), 0)
            width_max = min(int(nonzero_indices[:, 1].max().item() + padding_ratio * imgW), imgW)

            crop_images.append(
                F.upsample_bilinear(images[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max],
                                    size=(imgH, imgW)))
        crop_images = torch.cat(crop_images, dim=0)
        return crop_images

    elif mode == 'drop':
        drop_masks = []
        for batch_index in range(batches):
            atten_map = attention_map[batch_index:batch_index + 1]
            if isinstance(theta, tuple):
                theta_d = random.uniform(*theta) * atten_map.max()
            else:
                theta_d = theta * atten_map.max()

            drop_masks.append(F.upsample_bilinear(atten_map, size=(imgH, imgW)) < theta_d)
        drop_masks = torch.cat(drop_masks, dim=0)
        drop_images = images * drop_masks.float()
        return drop_images

    else:
        raise ValueError('Expected mode in [\'crop\', \'drop\'], but received unsupported augmentation method %s' % mode)


##################################
# transform in dataset
##################################
def get_transform(resize, phase='train'):
    if phase == 'train':
        # return  transforms.Compose([transforms.Resize(448), 
        #                             transforms.RandomCrop(448), 
        #                             transforms.RandomHorizontalFlip(),
        #                             transforms.ToTensor(),
        #                             transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        #                             ])
        return transforms.Compose([
            transforms.Resize(size=(int(resize[0] / 0.875), int(resize[1] / 0.875))),
            transforms.RandomCrop(resize),
            transforms.RandomHorizontalFlip(0.5),
            transforms.ColorJitter(brightness=0.126, saturation=0.5),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    else:
        # return  transforms.Compose([transforms.Resize(448), 
        #                              transforms.CenterCrop(448),
        #                              transforms.ToTensor(),
        #                              transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        #                              ])
        return transforms.Compose([
            transforms.Resize(size=(int(resize[0] / 0.875), int(resize[1] / 0.875))),
            transforms.CenterCrop(resize),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

class KDLoss(nn.Module):
    def __init__(self, temp_factor):
        super(KDLoss, self).__init__()
        self.temp_factor = temp_factor
        #self.kl_div = nn.KLDivLoss(reduction="sum")

    def forward(self, input, target):
        log_p = F.log_softmax(input/self.temp_factor, dim=-1)
        q = F.softmax(target/self.temp_factor, dim=-1)
        loss = F.kl_div(log_p, q, reduction='sum')*(self.temp_factor**2)/input.size(0)
        #self.kl_div(log_p, q)
        return loss

def normalize_map(s_map):
	# normlize the map and return the map in the dim [B,M,W*H]
    B,M,W,H = s_map.size()
    s_map = s_map.view(B,M,-1)
    s_map_min = (torch.min(s_map, dim=-1, keepdim=True)[0]).expand(-1,-1,W*H)
    s_map_max = (torch.max(s_map, dim=-1, keepdim=True)[0]).expand(-1,-1,W*H)

    norm_s_map = (s_map - s_map_min) / ((s_map_max - s_map_min)*1.0 + 1e-7)
    return norm_s_map


def self_distill_loss(embed_s, embed_resnet):
    # Calculate cosine similarities
    embed_s = embed_s.detach()
    embed_s = torch.nn.functional.normalize(embed_s, dim=1)
    embed_resnet = torch.nn.functional.normalize(embed_resnet, dim=1)
    sim_s = embed_s.mm(embed_s.t())
    sim_resnet = embed_resnet.mm(embed_resnet.t())

    kdloss = KDLoss(1)
    # return torch.nn.functional.mse_loss(sim_attr, sim_resnet, reduction='sum')
    return kdloss(sim_resnet, sim_s)

class SmoothCrossEntropy(nn.Module):
    def __init__(self, epsilon: float = 0.):
        super(SmoothCrossEntropy, self).__init__()
        self.epsilon = float(epsilon)

    def forward(self, logits: torch.Tensor, labels: torch.LongTensor) -> torch.Tensor:
        target_probs = torch.full_like(logits, self.epsilon / (logits.shape[1] - 1))
        target_probs.scatter_(1, labels.unsqueeze(1), 1 - self.epsilon)
        return F.kl_div(torch.log_softmax(logits, 1), target_probs, reduction='none').sum(1)
        # torch.nn.NLLLoss(reduction="sum")(torch.log_softmax(logits, 1), target_probs)

def cal_corr_score(t1, t2):
    ### make feature vector also have the channel dim
    t1 = t1.unsqueeze(1)
    t2 = t2.unsqueeze(1)
    ### tensor 1 and tensor 2 should have the same dim
    N, C, S = t1.shape
    mean_t1 = torch.mean(t1, dim=-1, keepdim=True)
    mean_t2 = torch.mean(t2, dim=-1, keepdim=True)

    # covariance
    product = torch.bmm(t1, t2.permute(0, 2, 1))
    cov = product/S - (mean_t1 * mean_t2)

    rho = cov / (torch.std(t1, dim=-1, keepdim=True) * torch.std(t2, dim=-1, keepdim=True) + 1e-9)
    return (1.0 - rho).mean()

def cal_corr_score_raw(t1, t2):
    ### make feature vector also have the channel dim
    t1 = t1.unsqueeze(1)
    t2 = t2.unsqueeze(1)
    ### tensor 1 and tensor 2 should have the same dim
    N, C, S = t1.shape
    mean_t1 = torch.mean(t1, dim=-1, keepdim=True)
    mean_t2 = torch.mean(t2, dim=-1, keepdim=True)

    # covariance
    product = torch.bmm(t1, t2.permute(0, 2, 1))
    cov = product/S - (mean_t1 * mean_t2)

    rho = cov / (torch.std(t1, dim=-1, keepdim=True) * torch.std(t2, dim=-1, keepdim=True) + 1e-6)
    return rho

def MI_approximation(t1, t2):
### tensor 1 and tensor 2 should have the same dim
    rho = cal_corr_score_raw(t1, t2)
    if sum(torch.isnan(rho)) > 0:
        product = torch.bmm(t1, t2.permute(0, 2, 1))
        mean_t1 = torch.mean(t1, dim=-1, keepdim=True)
        mean_t2 = torch.mean(t2, dim=-1, keepdim=True)
        cov = product/t1.shape[-1] - (mean_t1 * mean_t2)
        print("t1: ", t1) 
        print("t2: ", t2)
        exit()
    MI = - 0.5 * torch.log(1-rho * rho)
    MI = 1 - torch.sigmoid(MI)
    return MI.mean()

class ConsistencyEvaluation(object):
    """
    This is for evlauating the consistency among one filter
    """
    def __init__(self, n_sample, feature_num, eval_model, num_M=32):
        self.model = eval_model ### This is cal model only with pretrained parameters
        self.index = 0
        self.num_M = num_M

        self.matrix = np.zeros((num_M, n_sample, feature_num))
        self.pool = nn.AdaptiveAvgPool2d(1)

    def __call__(self, x, M):
        with torch.no_grad():
            feature_volumns = self.model.features(x)
        B, C, H, W = feature_volumns.size()
        #### should post process attention maps as visualization
        M_reshape = M.view((B, self.num_M, -1))
        M_reshape = F.normalize(M_reshape, dim=-1)
        M_reshape = torch.sqrt(torch.abs(M_reshape / (M_reshape.max(dim=-1)[0]+ 1e-5).view(B, self.num_M, 1)))
        M = M_reshape.view((B, self.num_M, M.size(-2), M.size(-1)))       

        for i in range(self.num_M):
            AiF = self.pool(feature_volumns * M[:, i:i+1, ...]).view(B, -1)
            self.matrix[i, self.index:(self.index+B), : ] = AiF.detach().cpu().numpy()
        self.index += B

    def calculate(self, topk=1000):
        consistency = np.zeros((self.num_M))
        for i in range(self.num_M):
            print('Calculating No. %d Filter'%i)
            x = self.matrix[i,:,:]
            x_sum = np.sum(x, axis=-1)
            x_idx = x_sum.argsort()[::-1][:topk] ## only consider top k inside one filter
            x_partial = x[x_idx,:]

            x_sparse = sparse.csr_matrix(x_partial)
            similarities = cosine_similarity(x_sparse)
            avg_similarity = (similarities.sum()-topk)/(2*topk*(topk-1))
            consistency[i] = avg_similarity
        return consistency



##############################################
# KMeans for contextual cls prediction
##############################################
class KMeans(nn.Module):
    def __init__(self, n_center=32, n_feat=2048, epsilon=0.1):
        super(KMeans, self).__init__()

        self.c = torch.rand(n_center, n_feat)
        self.K = n_center
        self.D = n_feat
        self.epsilon = epsilon

        self.sample = []

    def forward(self, x):
        ## return (soft) predicted class
        N, _ = x.shape
        x_norm = torch.nn.functional.normalize(x, dim=1, p=2)
        x = x.view(N, 1, self.D)  # (N, 1, D) samples
        c = self.c.view(1, self.K, self.D).to(x.get_device()) # (1, K, D) centroids

        D_ij = ((x - c) ** 2).sum(-1)  # (N, K) symbolic squared distances
        cl = D_ij.argmin(dim=1).view(-1)  # Points -> Nearest cluster

        #c = self.c.clone().to(x.get_device())
        #c_norm = torch.nn.functional.normalize(c, dim=1, p=2)

        #D_ij = torch.matmul(x_norm, c_norm.T)
        #cl = D_ij.argmax(dim=1).long().view(-1) 
        ## soft cls prediction
        target_probs = torch.full((N,self.K), self.epsilon / (self.K - 1)).to(x.get_device())
        target_probs.scatter_(1, cl.unsqueeze(1), 1 - self.epsilon)

        ## store sample features in the list 
        self.sample.append(x.detach().cpu().squeeze())
        return target_probs

    def update(self, Niter=50, alpha=0.2, initial=False):
        ## adapt from https://www.kernel-operations.io/keops/_auto_tutorials/kmeans/plot_kmeans_torch.html

        ## get all samples from 
        x = torch.cat(self.sample, dim=0)
        #x_norm = torch.nn.functional.normalize(x, dim=1, p=2)
        N, _ = x.shape  # Number of samples, dimension of the ambient space

        ## in case some centers dont have samples
        #k_index = np.random.choice(N, self.K)
        #c = x[k_index, :].clone()
        if initial:
            c = self.plus_plus(x, self.K)
            # c = torch.zeros((self.c.shape))
            # init_cls = torch.randint(0, self.K, (N,))
            # c.scatter_add_(0, init_cls[:, None].repeat(1, self.D), x)
            # # Divide by the number of points per cluster:
            # Ncl = torch.bincount(init_cls, minlength=self.K).type_as(c).view(self.K, 1)
            # c /= Ncl  # in-place division to compute the average
        else:
            c = self.c.clone()

        x_i = x.view(N, 1, self.D) # (N, 1, D) samples
        c_j = c.view(1, self.K, self.D) # (1, K, D) centroids
        # K-means loop:
        # - x  is the (N, D) point cloud,
        # - cl is the (N,) vector of class labels
        # - c  is the (K, D) cloud of cluster centroids
        for i in range(Niter):
            # E step: assign points to the closest cluster -------------------------
            D_ij = ((x_i - c_j) ** 2).sum(-1)  # (N, K) symbolic squared distances
            cl = D_ij.argmin(dim=1).long().view(-1)  # Points -> Nearest cluster
            #c_norm = torch.nn.functional.normalize(c, dim=1, p=2)
            #D_ij = torch.matmul(x_norm, c_norm.T)
            #cl = D_ij.argmax(dim=1).long().view(-1)  # Points -> Nearest cluster
            # M step: update the centroids to the normalized cluster average: ------
            # Compute the sum of points per cluster:
            c.zero_()
            c.scatter_add_(0, cl[:, None].repeat(1, self.D), x)

            # Divide by the number of points per cluster:
            Ncl = torch.bincount(cl, minlength=self.K).type_as(c).view(self.K, 1)
            c /= Ncl  # in-place division to compute the average

            c_j = c.view(1, self.K, self.D)
        
        ### move with alpha
        self.c += alpha * (self.c-c)
        #self.c = c

        ### clear the storage of samples
        self.sample = []

    def plus_plus(self, ds, k):
        """
        Create cluster centroids using the k-means++ algorithm.
        Parameters
        ----------
        ds : numpy array
            The dataset to be used for centroid initialization.
        k : int
            The desired number of clusters for which centroids are required.
        Returns
        -------
        centroids : numpy array
            Collection of k centroids as a numpy array.
        Inspiration from here: https://stackoverflow.com/questions/5466323/how-could-one-implement-the-k-means-algorithm
        """

        # np.random.seed(random_state)
        centroids = [ds[0,:]]

        for _ in range(1, k):
            dist_sq = torch.tensor([min([torch.dot(c-x,c-x) for c in centroids]) for x in ds])
            probs = dist_sq/dist_sq.sum()
            cumulative_probs = torch.cumsum(probs, dim=0)
            r = torch.rand(1)
            
            for j, p in enumerate(cumulative_probs):
                if r < p:
                    i = j
                    break
            
            centroids.append(ds[i])

        return torch.stack(centroids)

