import torch
import torch.nn as nn
import torch.nn.functional as F

from typing import Optional

from vehicle_reid_pytorch.loss.triplet_loss import normalize, euclidean_dist, hard_example_mining
from ..tools.math_tools import clck_dist


class ParsingIdLoss(nn.Module):
    def __init__(self, num_classes: int,
                 gamma: Optional[float] = None,
                 weight_noramlize: bool = False,
                 reduction: str = 'mean') -> None:
        if reduction not in ('none', 'mean', 'sum'):
            raise ValueError(f"excepted argument 'reduction' must be "
                             f"'none', 'mean' or 'sum', got {reduction}")

        super(ParsingIdLoss, self).__init__()
        self.num_classes: int = num_classes
        if gamma is not None:
            self.gamma: float = gamma
        else:
            raise ValueError("missing essential argument: 'gamma'")
        self.weight_noramlize: bool = weight_noramlize
        self.reduction: str = reduction
        self._softmax = nn.Softmax(dim=1)
        self._crossentropyloss = nn.CrossEntropyLoss(reduction='none')

    def forward(self, y: torch.tensor, labels: torch.tensor, weight: torch.tensor) -> torch.tensor:
        '''
        Args:
            y      - torch.tensor - local prediction martix (BxNUMxN)
            labels - torch.tensor - ground truth labels (Bx1 or B)
            weight - torch.tensor - weight of local predictions (BxN)
        '''
        if not (y.device == labels.device == weight.device):
            raise TypeError(f"all three input tensor must be on the same device, "
                            f"got y on {y.device}, labels on {labels.device} "
                            f"and weight on {weight.device}")
        if not len(y.size()) == 3:
            raise ValueError(f"excepeted input tensor 'y' is a 3d tensor(BxNUMxN), "
                             f"got a {len(y.size())}d tensor instead")
        if len(labels.size()) == 1:
            labels = labels.unsqueeze(dim=1)  # from (B) to (Bx1)
        if not (len(labels.size()) == 2 and labels.size(1) == 1):
            raise ValueError(f"excepeted input tensor 'labels' is a 1d tensor(B) or a 2d tensor(Bx1), "
                             f"got a {len(y.size())}d tensor instead")
        if len(weight.size()) == 1:
            weight = weight.unsqueeze(dim=1)  # from (B) to (Bx1)
        if not len(weight.size()) == 2:
            raise ValueError(f"excepeted input tensor 'weight' is a 1d tensor(B) or a 2d tensor(BxN), "
                             f"got a {len(weight.size())}d tensor instead")
        if not (y.size(0) == labels.size(0) == weight.size(0)):
            raise ValueError(f"all three input tensor must have same batch size(dim0), "
                             f"got {y.size(0)} for y, {labels.size(0)} for labels "
                             f"and {weight.size(0)} for weight")
        if y.size(-1) != weight.size(-1):
            raise ValueError(f"excepted input tensor 'y' and 'weight' "
                             f"have same size along the last dim, "
                             f"got {y.size(-1)} for y and {weight.size(-1)} for weight")
        if y.size(1) != self.num_classes:
            raise ValueError(f"excepted input tensor 'y' have {self.num_classes} channels, "
                             f"got {y.size(1)} instead")

        pred: torch.tensor = self._softmax(y).transpose(1, 2)  # (B, N, NUM)
        pred = pred.contiguous().view(-1, self.num_classes)  # (B*N, NUM)
        pred = pred.clamp(min=1e-30, max=1.)  # avoid nan in log_pred
        labels = labels.repeat(1, y.size(2))  # (B, N)
        if self.weight_noramlize:
            weight = F.normalize(weight, dim=1, p=1)
        weight = weight.view(-1, 1)  # (B*N, 1)

        # focal loss (with sample weight)
        log_pred: torch.tensor = torch.log(pred).gather(1, labels.view(-1, 1))
        pred = pred.gather(1, labels.view(-1, 1))
        result: torch.tensor = -torch.mul((1 - pred) ** self.gamma, log_pred)
        result = result * weight

        if self.reduction == 'mean':
            return result.mean()
        elif self.reduction == 'sum':
            return result.sum()
        else:
            return result


