import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import numpy as np
from scipy.ndimage import distance_transform_edt as distance_edt
from scipy.spatial.distance import directed_hausdorff
from skimage import segmentation as skimage_seg
from skimage.morphology import skeletonize_3d
from typing import List, Any, Callable, Iterable, Set, Tuple, TypeVar, Union, cast


def compute_sdf(img_gt, out_shape):
    img_gt = img_gt.astype(np.uint8)
    gt_sdf = np.zeros(out_shape)
    for b in range(out_shape[0]):
        for c in range(1, out_shape[1]):
            positive_mask = img_gt[b][c].astype(np.bool)
            if positive_mask.any():
                negative_mask = ~positive_mask
                posdis = distance_edt(positive_mask )
                negdis = distance_edt(negative_mask )
                surface = skimage_seg.find_boundaries(positive_mask, mode='inner').astype(np.uint8)
                sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis)) - (posdis - np.min(posdis)) / (
                        np.max(posdis) - np.min(posdis))
                sdf[surface == 1] = 0
                gt_sdf[b][c] = sdf
    return gt_sdf


def compute_skeleton_sdf(img_gt, out_shape, spacings):
    img_gt = img_gt.astype(np.uint8)
    gt_sdf = np.zeros(out_shape)
    for b in range(out_shape[0]):
        for c in range(1, out_shape[1]):
            positive_mask = img_gt[b][c]
            positive_skeleton = skeletonize_3d(positive_mask).astype(np.bool)
            if positive_skeleton.any():
                negative_skeleton = ~positive_skeleton
                posdis = distance_edt(positive_skeleton, sampling=spacings[b])
                negdis = distance_edt(negative_skeleton, sampling=spacings[b])
                surface = skimage_seg.find_boundaries(positive_mask, mode='inner').astype(np.uint8)
                sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis)) - (posdis - np.min(posdis)) / (
                        np.max(posdis) - np.min(posdis))
                sdf[surface == 1] = 0
                gt_sdf[b][c] = sdf
    return gt_sdf


softmax_helper = lambda x: F.softmax(x, dim=1)


def compute_sdf_1(segmentation):
    segmentation = segmentation.astype(np.uint8)
    if len(segmentation.shape) == 4:
        segmentation = np.expand_dims(segmentation, 1)
    normalized_sdf = np.zeros(segmentation.shape)
    if segmentation.shape[1] == 1:
        dis_id = 0
    else:
        dis_id = 1
    for b in range(segmentation.shape[0]):
        for c in range(dis_id, segmentation.shape[1]):
            posmask = segmentation[b][c]
            negmask = ~posmask
            posdis = distance_edt(posmask)
            negdis = distance_edt(negmask)
            boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
            sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis)) - (
                    posdis - np.min(posdis)) / (np.max(posdis) - np.min(posdis))
            sdf[boundary == 1] = 0
            normalized_sdf[b][c] = sdf
    return normalized_sdf


def sum_tensor(inp, axes, keepdim=False):
    axes = np.unique(axes).astype(int)
    if keepdim:
        for ax in axes:
            inp = inp.sum(int(ax), keepdim=True)
    else:
        for ax in sorted(axes, reverse=True):
            inp = inp.sum(int(ax))
    return inp


class SurfaceLoss(nn.Module):
    def __init__(self):
        super(SurfaceLoss, self).__init__()

    def forward(self, pred, gt ):
        pred = softmax_helper(pred)
        with torch.no_grad():
            if len(pred.shape) != len(gt.shape):
                gt = gt.view((gt.shape[0], 1, *gt.shape[1:]))
            if all([i == j for i, j in zip(pred.shape, gt.shape)]):
                gt_onehot = gt
            else:
                gt = gt.long()
                gt_onehot = torch.zeros(pred.shape)
                if pred.device.type == "cuda":
                    gt_onehot = gt_onehot.cuda(pred.device.index)
                gt_onehot.scatter_(1, gt, 1)
            gt_sdf = compute_sdf(gt_onehot.cpu().numpy(), pred.shape)
        phi = torch.from_numpy(gt_sdf)
        if phi.device != pred.device:
            phi = phi.to(pred.device).type(torch.float32)
        # multipled = torch.einsum("bcxyz,bcxyz->bcxyz", pred[:, 1:, ...], phi[:, 1:, ...])
        multipled = torch.einsum("bcxy,bcxy->bcxy", pred[:, 0:, ...], phi[:, 0:, ...])
        # multipled = torch.einsum("bcxyz,bcxyz->bcxyz", pred, phi)
        surface_loss = multipled.mean()
        return surface_loss


class SurfaceSkeletonLoss(nn.Module):
    def __init__(self):
        super(SurfaceSkeletonLoss, self).__init__()

    def forward(self, pred, gt, spacings):
        pred = softmax_helper(pred)
        with torch.no_grad():
            if len(pred.shape) != len(gt.shape):
                gt = gt.view((gt.shape[0], 1, *gt.shape[1:]))
            if all([i == j for i, j in zip(pred.shape, gt.shape)]):
                gt_onehot = gt
            else:
                gt = gt.long()
                gt_onehot = torch.zeros(pred.shape)
                if pred.device.type == "cuda":
                    gt_onehot = gt_onehot.cuda(pred.device.index)
                gt_onehot.scatter_(1, gt, 1)
            gt_sdf = compute_sdf(gt_onehot.cpu().numpy(), pred.shape, spacings)
            gt_skeleton_sdf = compute_sdf(gt_onehot.cpu().numpy(), pred.shape, spacings)
        phi = torch.from_numpy(gt_sdf)
        phi_skeleton = torch.from_numpy(gt_skeleton_sdf)
        if phi.device != pred.device:
            phi = phi.to(pred.device).type(torch.float32)
            phi_skeleton = phi_skeleton.to(pred.device).type(torch.float32)
        multipled = torch.einsum("bcxyz,bcxyz->bcxyz", pred[:, 1:, ...], phi[:, 1:, ...])
        multipled_skeleton = torch.einsum("bcxyz,bcxyz->bcxyz", pred[:, 1:, ...], phi_skeleton[:, 1:, ...])
        surface_loss = multipled.mean()
        skeleton_loss = multipled_skeleton.mean()
        return surface_loss + skeleton_loss


class RegressionLoss(nn.Module):
    def __init__(self):
        super(RegressionLoss, self).__init__()

    def forward(self, pred_heat, gt_heat):
        loss = torch.norm(pred_heat - gt_heat, 1) / torch.numel(pred_heat) + F.mse_loss(pred_heat, gt_heat)
        return loss
