# Modified by Yangshuo from nwpu
import warnings

import torch
import torch.nn as nn
import torch.nn.functional as F


from ..builder import LOSSES
from .utils import get_class_weight, weight_reduce_loss


@LOSSES.register_module()
class PixelContrastCrossEntropyLoss(nn.Module):
    """
    The PixelContrastCrossEntropyLoss implementation based on Pytorh.
    The original article refers to
    Wenguan Wang, Tianfei Zhou, et al. "Exploring Cross-Image Pixel Contrast for Semantic Segmentation"
    (https://arxiv.org/abs/2101.11939).
    Args:
        temperature (float, optional): Controling the numerical similarity of features. Default: 0.1.
        base_temperature (float, optional): Controling the numerical range of contrast loss. Default: 0.07.
        ignore_index (int, optional): Specifies a target value that is ignored
            and does not contribute to the input gradient. Default 255.
        max_samples (int, optional): Max sampling anchors. Default: 1024.
        max_views (int): Sampled samplers of a class. Default: 100.
    """
    def __init__(self,
                temperature = 0.1 , 
                base_temperature = 0.07,
                max_samples = 1024 ,
                max_views=100,
                ignore_index = 255 , 
                class_weight = None , 
                loss_weight = 1.0 ,
                loss_name = 'loss_contrast') :
        super().__init__()
        self.temperature = temperature
        self.base_temperature = base_temperature
        self.ignore_index = ignore_index
        self.max_samples = max_samples
        self.max_views = max_views
        
        self.class_weight = class_weight 
        self.loss_weight = loss_weight 
        self._loss_name = loss_name
        
        
    def _hard_anchor_sampling(self, X , y_hat , y ) :
        """
        Args:
            X (Tensor): reshaped feats, shape = [N, H * W, feat_dim]
            y_hat (Tensor): reshaped label, shape = [N, H * W]
            y (Tensor): reshaped predict, shape = [N, H * W]
        """
        batch_size , feat_dim = X.shape[0] , X.shape[-1] 
        
        classes = []
        
        # print('y_hat' , y_hat.shape)
        total_classes = 0
        
        for i in range(batch_size) :
            current_y = y_hat[i]
            # print('current_y shape' , current_y.shape) 
            current_classes = torch.unique(current_y) 
            current_classes = [x for x in current_classes if x != self.ignore_index]
            current_classes = [x for x in current_classes if (current_y == x).nonzero().shape[0] > self.max_views]
            
            classes.append(current_classes)
            total_classes += len(current_classes)
            
        if total_classes == 0 :
            return None , None
        
        n_view = self.max_samples // total_classes
        n_view = min(n_view , self.max_views)
        
        X_ = torch.zeros((total_classes , n_view , feat_dim) , dtype = torch.float).cuda()
        y_ = torch.zeros(total_classes, dtype=torch.float).cuda() 
        
        X_ptr = 0 
        for i in range(batch_size) :
            current_y_hat = y_hat[i]
            current_y = y[i]
            current_classes = classes[i] 
            
            
            for cls_id in current_classes :
                # print('cls_id' , cls_id.shape)
                # print('y' , current_y.shape) # 262144
                # print('y_hat' , current_y_hat.shape)  # 4096
                hard_indices = ((current_y_hat == cls_id) & (current_y != cls_id)).nonzero()
                easy_indices = ((current_y_hat == cls_id) & (current_y == cls_id)).nonzero()
                
                num_hard = hard_indices.shape[0] 
                num_easy = easy_indices.shape[0] 
                
                if num_hard >= n_view / 2 and num_easy >= n_view / 2 :
                    num_hard_keep = n_view // 2
                    num_easy_keep = n_view - num_hard_keep 
                elif num_hard >= n_view / 2 :
                    num_easy_keep = num_easy
                    num_hard_keep = n_view - num_easy_keep 
                elif num_easy >= n_view / 2 :
                    num_hard_keep = num_hard
                    num_easy_keep = n_view - num_hard_keep
                else :
                    num_hard_keep = num_hard 
                    num_easy_keep = num_easy 
                    warnings.warn('this shoud be never touched! {} {} {}'.format(num_hard, num_easy, n_view))
                
                perm = torch.randperm(num_hard)
                hard_indices = hard_indices[perm[:num_hard_keep]]
                perm = torch.randperm(num_easy) 
                easy_indices = easy_indices[perm[:num_easy_keep]]
                indices = torch.cat((hard_indices , easy_indices) , dim = 0) 
                
                X_[X_ptr, : , :  ] = X[i , indices , : ].squeeze(1) 
                y_[X_ptr] = cls_id 
                X_ptr +=1 
                
            return X_ , y_ 
        
        
    def _contrastive(self,feats_ , labels_) :
        """
        Args:
            feats_ (Tensor): sampled pixel, shape = [total_classes, n_view, feat_dim], total_classes = batch_size * single image classes
            labels_ (Tensor): label, shape = [total_classes]
        """
        anchor_num , n_view = feats_.shape[0] , feats_.shape[1] 
        labels_ = labels_.reshape((-1,1)) 
        mask = torch.eq(labels_ , torch.transpose(labels_ , 0 , 1)).float().cuda() 
        
        
        contrast_count = n_view 
        contrast_feature = torch.cat(torch.unbind(feats_,dim=1) ,dim = 0 ) 
        
        anchor_feature = contrast_feature
        anchor_count = contrast_count 
        
        anchor_dot_contrast = torch.div(torch.matmul(anchor_feature , torch.transpose(contrast_feature, 0 , 1)) , 
                                        self.temperature)
        
        logits_max , _ = torch.max(anchor_dot_contrast , dim =1 , keepdim=True) 
        logits = anchor_dot_contrast - logits_max.detach() 
        
        mask = mask.repeat(anchor_count , contrast_count) 
        neg_mask = 1 - mask 
        
        logits_mask = torch.ones_like(mask).scatter_(1 ,
                                                        torch.arange(anchor_num * anchor_count).view(-1,1).cuda(),
                                                    0 )
        mask = mask * logits_mask
        
        neg_logits = torch.exp(logits) * neg_mask
        neg_logits = neg_logits.sum(1, keepdim=True)
        
        exp_logits = torch.exp(logits)
        
        log_prob = logits - torch.log(exp_logits + neg_logits) 
        
        mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1) 
        loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
        loss = loss.mean() 
        
        return loss
        
    def contrast_criterion(self, feats , labels=None , predicts = None) :
        # print("feats:" , feats.shape)  # 6,256,64,64
        labels = labels.unsqueeze(1).float().clone()  
        # print('first labels shape' , labels.shape)  # 6,1,512,512
        labels = F.interpolate(labels,(feats.shape[2], feats.shape[3]), mode='nearest') 
        # labels = F.interpolate(labels,(feats.shape[2:]), mode='nearest') 
        # print('second labels shape' , labels.shape)
        labels = labels.squeeze(1).long()
        # print('third labels shape' , labels.shape)
        assert labels.shape[-1] == feats.shape[-1], '{} {}'.format(labels.shape, feats.shape)
        
        batch_size = feats.shape[0] 
        labels = labels.contiguous().view(batch_size, -1)
        predict = predicts.contiguous().view(batch_size, -1)
        feats = feats.permute(0,2,3,1)
        feats = feats.contiguous().view(feats.shape[0],-1,feats.shape[-1])
        
        # print('labels shape' , labels.shape)  # 6,4096
        # print('feats shape' , feats.shape)  # 6,4096,256
        # print('predict shape' , predict.shape)  # 6 ,262144
        feats_ , labels_ = self._hard_anchor_sampling(feats,labels,predict)
        loss = self._contrastive(feats_ , labels_) 
        return loss
    
    
    
    def forward(self, 
                pred,
                target,
                weight = None , 
                reducetion_override=None,
                ignore_index = 255,
                **kwargs): 
        """Forward function.

        Args:
            pred (torch.Tensor): The prediction with shape
                (N, C) where C = number of classes, or
                (N, C, d_1, d_2, ..., d_K) with K≥1 in the
                case of K-dimensional loss.
            target (torch.Tensor): The ground truth. If containing class
                indices, shape (N) where each value is 0≤targets[i]≤C−1,
                or (N, d_1, d_2, ..., d_K) with K≥1 in the case of
                K-dimensional loss. If containing class probabilities,
                same shape as the input.
            weight (torch.Tensor, optional): The weight of loss for each
                prediction. Defaults to None.
            avg_factor (int, optional): Average factor that is used to
                average the loss. Defaults to None.
            reduction_override (str, optional): The reduction method used
                to override the original reduction method of the loss.
                Options are "none", "mean" and "sum".
            ignore_index (int, optional): The label index to be ignored.
                Default: 255
        Returns:
            torch.Tensor: The calculated loss
        """
        assert "seg" in pred, "The input of PixelContrastCrossEntropyLoss should include 'seg' output, but not found."
        assert "embed" in pred, "The input of PixelContrastCrossEntropyLoss should include 'embed' output, but not found."
        predict = pred['seg']
        embedding = pred['embed']
        # print("before predict---" , predict.shape)  # 6,2,512,512
        predict = torch.argmax(predict,axis=1) 
        # print("after predict---" , predict.shape)  # 6,512,512
        # print("embedding---",embedding.shape)  # 6,256,64,64
        # print('target---',target.shape)  # 6,512,512
        loss = self.loss_weight * self.contrast_criterion(embedding, target, predict)
        return loss
    
    
    @property
    def loss_name(self):
        """Loss Name.

        This function must be implemented and will return the name of this
        loss function. This name will be used to combine different loss items
        by simple sum operation. In addition, if you want this loss item to be
        included into the backward graph, `loss_` must be the prefix of the
        name.

        Returns:
            str: The name of this loss item.
        """
        return self._loss_name
            
 
         
                        
                     
                    
                
            
            