from abc import abstractmethod
from glob import glob
from multiprocessing import Pool
import os
from typing import List, Optional, Tuple, Union
import warnings
import cv2
import torch
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
from PIL import Image, ImageDraw
import numpy as np
from scipy.ndimage import distance_transform_edt
import xml.etree.ElementTree as ET
from scipy.ndimage.measurements import find_objects
from skimage.morphology import binary_erosion
from scipy.ndimage import binary_dilation
import scipy.io as sio
from stardist import star_dist
import cv2
from tqdm import tqdm

def label2bd(inst_map: np.ndarray, bboxs: np.ndarray = None, padding: int = 2, inner: bool = True) -> np.ndarray:
    if bboxs is None:  # 获取细胞核实例的边界框
        ids: np.ndarray = np.unique(inst_map)[1:]  # 从1开始，因为0是背景
        bboxs: list = []  # 用于存储边界框
        for nu_idx in ids:  # 遍历每个细胞核实例
            x, y = np.where(inst_map == nu_idx)  # 获取细胞核实例的坐标
            bboxs.append([x.min(), x.max(), y.min(), y.max()])  # 获取细胞核实例的边界框

    padding2    : int          = padding * 2
    inst_map    : np.ndarray   = np.pad(inst_map, ((padding, padding), (padding, padding)), 'constant', constant_values=0)  # 将整个掩码向外扩张padding个像素
    # probabilitys: np.ndarray   = np.zeros_like(inst_map).astype(np.float64)  # 用于存储每个像素点的概率
    probabilitys: np.ndarray   = inst_map.astype(bool).astype(np.uint8)  # 用于存储每个像素点的概率
    for nu_idx in range(1, len(bboxs) + 1):  # 遍历每个细胞核实例
        x_min, x_max, y_min, y_max = bboxs[nu_idx - 1]  # 获取细胞核实例的边界框
        nu_mask  = inst_map[x_min:x_max + padding2, y_min:y_max + padding2] == nu_idx  # 获取细胞核实例的掩码
        # internal = binary_erosion(nu_mask)  # 腐蚀操作获取细胞核实例的内部掩码
        # outer    = binary_dilation(nu_mask)  # 膨胀操作获取细胞核实例的外部掩码
        # if inner:
        #     edge = nu_mask ^ internal  # 获取细胞核内边界
        # else:
        #     edge = nu_mask ^ outer  # 获取细胞核外边界
        edge = nu_mask ^ binary_erosion(nu_mask) if inner else binary_dilation(nu_mask)
        x, y = np.where(edge)  # 获取细胞核边界上所有像素点的坐标
        probabilitys[x + x_min, y + y_min] += 1
    probabilitys = np.where(probabilitys > 1, 2, probabilitys)
    return probabilitys[padding:-padding, padding:-padding]  # 去除padding

def edt_prob(lbl_img, with_center: bool = True):
    """Perform EDT on each labeled object and normalize."""

    def grow(sl, interior):
        return tuple(
            slice(s.start - int(w[0]), s.stop + int(w[1])) for s, w in zip(sl, interior)
        )

    def shrink(interior):
        return tuple(slice(int(w[0]), (-1 if w[1] else None)) for w in interior)

    constant_img = lbl_img.min() == lbl_img.max() and lbl_img.flat[0] > 0
    if constant_img:
        lbl_img = np.pad(lbl_img, ((1, 1),) * lbl_img.ndim, mode="constant")
        warnings.warn(
            "EDT of constant label image is ill-defined. (Assuming background around it.)"
        )

    objects = find_objects(lbl_img)
    prob = np.zeros(lbl_img.shape, np.float32)
    center_dist = np.zeros(lbl_img.shape, np.float32)
    for i, sl in enumerate(objects, 1):
        if sl is None:
            continue
        interior = [(s.start > 0, s.stop < sz) for s, sz in zip(sl, lbl_img.shape)]
        shrink_slice = shrink(interior)
        grown_mask = lbl_img[grow(sl, interior)] == i
        mask = grown_mask[shrink_slice]
        edt = distance_transform_edt(grown_mask)[shrink_slice][mask]
        prob[sl][mask] = edt / (np.max(edt) + 1e-10)
        # print(grown_mask.shape, prob[sl][mask].shape, edt.shape)
        if with_center:
            # coords_y, coords_x = np.nonzero(grown_mask)  # 获取所有非零元素的坐标
            # center_x = np.mean(coords_x)
            # center_y = np.mean(coords_y)
            # # 计算每个像素到质心的距离
            # center_distance = np.sqrt(
            #     (coords_x - center_x) ** 2 + (coords_y - center_y) ** 2
            # )
            # center_distance = center_distance / (np.max(center_distance) + 1e-10)
            # # print(center_dist[sl].shape, center_dist[sl][mask].shape, center_distance.shape)
            # center_dist[sl][mask] = center_distance
            countours, _ = cv2.findContours(grown_mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            img = center_dist[sl][mask].copy()
            cv2.drawContours(img, countours, -1, 1, 1)
            center_dist[sl][mask] = img
        #
    if constant_img:
        prob = prob[(slice(1, -1),) * lbl_img.ndim].copy()
    return prob, center_dist if with_center else None


def gen_dist_label(label_path_or_label: Union[str, torch.Tensor], 
                    dist_path: Optional[str]=None, with_pe: bool=True) -> Optional[Tuple[torch.Tensor, torch.Tensor]]:
    label = (
        torch.load(label_path_or_label).long().cpu().numpy()
        if isinstance(label_path_or_label, str)
        else label_path_or_label.long().cpu().numpy()
    )
    temp = edt_prob(label, with_center=with_pe)  # prob, dist
    if dist_path is not None:
        torch.save(torch.from_numpy(temp[0]).to(torch.float16), dist_path)
        if with_pe:
            torch.save(torch.from_numpy(temp[1]).to(torch.float16), dist_path)
        return None
    prob_tensor = torch.from_numpy(temp[0]).float()
    return prob_tensor, torch.from_numpy(temp[1]).float() if with_pe else None


class BaseDataset(Dataset):

    def __init__(
        self,
        base_dir: str,
        mode: Union[str, int] = "train",
        with_cls: bool = False,
        transform=None,
        load_from_memory: bool = True,
        re_gen: bool = False,
        save_dist_label: bool = True,
    ):
        super(BaseDataset, self).__init__()
        self.mode = mode
        self.transform = transform
        self.image_paths = self.load_image_paths()
        self.label_paths = self.load_label_paths()
        self.dist_paths = self.load_dist_paths()
        self.len = len(self.image_paths)
        self.with_cls = with_cls
        self.load_from_memory = load_from_memory
        self.base_dir = base_dir
        if save_dist_label and mode=='train' and (
            not all(map(os.path.exists, self.dist_paths)) or re_gen
        ):           
            self.gen_dist()

        self.load_data = (
            self.load_data_from_disk
            if not self.load_from_memory
            else self.load_data_from_memory
        )

    def __getitem__(self, index):
        image, *label = self.load_data(index)
        if self.transform is not None:
            image, *label = self.transform(image, *label)
        return image, *label

    def __len__(self):
        return self.len

    def load_data_from_memory(self, index: int) -> torch.Tensor:
        if not hasattr(self, "cache"):
            print('Load data into memory...')
            self.cache = [self.load_data_from_disk(i) for i in tqdm(range(self.len))]
            print('Load data into memory finished')
        return self.cache[index]

    def gen_dist(self) -> None:
        os.makedirs(os.path.join(self.base_dir, "train", "Dists"), exist_ok=True)
        for label_path in self.label_paths:
            dist_path = label_path.replace("Labels", "Dists")
            gen_dist_label(label_path, dist_path)

    @abstractmethod
    def load_image_paths(self) -> List[str]:
        raise NotImplementedError

    @abstractmethod
    def load_label_paths(self) -> List[str]:
        raise NotImplementedError

    @abstractmethod
    def load_dist_paths(self) -> List[str]:
        raise NotImplementedError

    @abstractmethod
    def load_data_from_disk(self, index: int) -> torch.Tensor:
        raise NotImplementedError


class MoNuSeg(BaseDataset):

    def __init__(
        self,
        base_dir: str,
        re_gen: bool = False,
        mode: Union[str, int] = "train",
        transform=None,
        load_from_memory: bool = True,
        n_rays: int = 4,
        with_pe: bool = True,
        **kwargs,
    ):
        assert os.path.exists(base_dir), f"Path {base_dir} does not exist"
        self.base_dir = base_dir
        self.n_rays = n_rays
        self.with_pe = with_pe
        if not os.path.exists(os.path.join(base_dir, "train", "Labels")):
            self.first_load()
        BaseDataset.__init__(
            self,
            base_dir=base_dir, 
            mode=mode, 
            with_cls=False, 
            transform=transform, 
            load_from_memory=load_from_memory, 
            re_gen=re_gen
        )

    def first_load(self):
        print("First load, this may take a while...")
        os.makedirs(os.path.join(self.base_dir, "train", "Labels"), exist_ok=True)       
        os.makedirs(os.path.join(self.base_dir, "test", "Labels"), exist_ok=True)
        train_ann_paths = glob(
            os.path.join(self.base_dir, "train", "Annotations/*.xml")
        )
        train_label_paths = [
            path.replace("Annotations", "Labels").replace("xml", "pt")
            for path in train_ann_paths
        ]
        test_ann_paths = glob(os.path.join(self.base_dir, "test", "Annotations/*.xml"))
        test_label_paths = [
            path.replace("Annotations", "Labels").replace("xml", "pt")
            for path in test_ann_paths
        ]
        p = Pool(os.cpu_count() - 2)
        p.starmap(self._ann_2_label, iterable=zip(train_ann_paths, train_label_paths))
        p.starmap(self._ann_2_label, iterable=zip(test_ann_paths, test_label_paths))
        p.close()
        p.join()

    def _ann_2_label(ann_path: str, label_path: str) -> None:
        tree = ET.parse(ann_path)
        root = tree.getroot().find("Annotation").find("Regions")
        width, height = 1000, 1000
        instances = root.findall("Region")
        labels = np.zeros((height, width), dtype=np.int16)

        def get_polygon(region):
            label = Image.new("L", (width, height))
            draw = ImageDraw.Draw(label)
            vertices = region.find("Vertices")
            id = int(region.get("Id"))
            xy = [
                (float(vertex.get("X")), float(vertex.get("Y"))) for vertex in vertices
            ]
            draw.polygon(xy, fill=1, outline=1)
            return np.array(label)

        for id, instance in enumerate(instances[::1]):
            inst = get_polygon(instance) != 0
            labels[inst] = id + 1
        labels = torch.from_numpy(np.array(labels))
        torch.save(labels, label_path)

    def load_image_paths(self):
        return glob(os.path.join(self.base_dir, self.mode, "Tissue Images", "*.tif"))

    def load_label_paths(self):
        return [
            path.replace("Tissue Images", "Labels").replace("tif", "pt")
            for path in self.image_paths
        ]

    def load_dist_paths(self):
        return [
            path.replace("Tissue Images", "Dists").replace("tif", "pt")
            for path in self.image_paths
        ]

    def load_data_from_disk(self, index: int) -> torch.Tensor:
        image = TF.to_tensor(Image.open(self.image_paths[index]))
        label = torch.load(self.label_paths[index])
        if self.mode == 'train':
            distances = torch.from_numpy(
                np.transpose(star_dist(label.cpu().numpy(), self.n_rays), (2, 0, 1))
            )
            temp = gen_dist_label(label, with_pe=self.with_pe) # prob center_dist            
            return (
                image,
                label.bool().long(),
                temp[0],
                distances,
                *((temp[1],) if self.with_pe else ()),
            )
        # return image, label.long(), prob, distances, dist
        return image, label.long()

class CoNSeP(BaseDataset):

    def __init__(
        self,
        base_dir: str,
        re_gen: bool = False,
        mode: Union[str, int] = "train",
        transform=None,
        load_from_memory: bool = True,
        n_rays: int = 4,
        with_pe: bool = True,
        **kwargs,
    ):
        assert os.path.exists(base_dir), f"Path {base_dir} does not exist"
        self.base_dir = base_dir
        self.n_rays = n_rays
        self.with_pe = with_pe
        BaseDataset.__init__(
            self,
            base_dir=base_dir, 
            mode=mode, 
            with_cls=False, 
            transform=transform, 
            load_from_memory=load_from_memory, 
            re_gen=re_gen,
            save_dist_label=True
        )

    def _ann_2_label(self, ann_path):
        return sio.loadmat(ann_path)["inst_map"].astype("int32")

    def load_data_from_disk(self, index: int) -> torch.Tensor:
        image = Image.open(self.image_paths[index]).convert("RGB")
        image = TF.to_tensor(image)
        label = torch.from_numpy(self._ann_2_label(self.label_paths[index]))
        if self.mode == 'train':
            distances = torch.from_numpy(np.transpose(star_dist(label.cpu().numpy(), self.n_rays), (2, 0, 1)))
            temp = gen_dist_label(label, with_pe=self.with_pe)
            # dist = torch.load(self.dist_paths[index]).float() if self.mode == 'train' else torch.zeros_like(label)
            return (
                image,
                label.bool().long(),
                temp[0],
                distances,
                *((temp[1],) if self.with_pe else ()),
            )
        return image, label.long()

    def load_image_paths(self):
        return glob(os.path.join(self.base_dir, self.mode, "Images", "*.png"))

    def load_label_paths(self):
        return [
            path.replace("Images", "Labels").replace("png", "mat")
            for path in self.image_paths
        ]

    def load_dist_paths(self):
        return [
            path.replace("Images", "Dists").replace("png", "pt")
            for path in self.image_paths
        ]

    def gen_dist(self) -> None:
        os.makedirs(os.path.join(self.base_dir, "train", "Dists"), exist_ok=True)
        for label_path in self.label_paths:
            dist_path = label_path.replace("Labels", "Dists").replace('mat', 'pt')
            label = torch.from_numpy(self._ann_2_label(label_path))
            gen_dist_label(label, dist_path)


class CPM17(CoNSeP):

    def __init__(
        self,
        base_dir: str,
        re_gen: bool = False,
        mode: Union[str, int] = "train",
        transform=None,
        load_from_memory: bool = True,
        n_rays: int = 4,
        **kwargs,
    ):
        assert os.path.exists(base_dir), f"Path {base_dir} does not exist"
        self.base_dir = base_dir
        CoNSeP.__init__(
            self,
            base_dir=base_dir,
            mode=mode,
            transform=transform,
            load_from_memory=load_from_memory,
            re_gen=re_gen,
            n_rays=n_rays,
        )


class Lizard(BaseDataset):

    def __init__(
        self,
        base_dir: str,
        mode: str = "train",
        with_cls: bool = False,
        transform=None,
        load_from_memory: bool = True,
        re_gen: bool = False,
        n_rays: int = 4,
        with_pe: bool = False,
        **kwargs,
    ):
        self.fold_id = int(kwargs["fold_id"])
        self.base_dir = base_dir
        self.n_rays = n_rays
        self.with_pe = with_pe
        self._read_info()
        BaseDataset.__init__(
            self,
            base_dir,
            mode,
            with_cls,
            transform,
            load_from_memory,
            False,
            False,
        )

    def _read_info(self):
        self.file_names = []
        with open(os.path.join(self.base_dir, "Lizard_Labels", "info.csv"), "r") as f:
            f.readline()
            lines = f.readlines()
            for line in lines:
                line = line.strip().split(",")
                if int(line[2]) == self.fold_id:
                    self.file_names.append(line[0])

    def load_image_paths(self) -> List[str]:
        return [os.path.join(self.base_dir, "Lizard_Images", name + '.png') for name in self.file_names]

    def load_label_paths(self) -> List[str]:
        return [path.replace("Lizard_Images", "Lizard_Labels/Labels").replace("png", "mat") for path in self.image_paths]

    def load_dist_paths(self) -> List[str]:
        return []

    def _get_label(self, label_path: str) -> Tuple[torch.Tensor, torch.Tensor]:
        label = sio.loadmat(label_path)
        inst_map = label["inst_map"]
        inst_map = torch.from_numpy(inst_map).long()
        classes = np.squeeze(label["class"]).tolist()
        classes = [0] + classes
        classes = torch.tensor(classes).long()
        clss_map = classes[inst_map]
        return inst_map, clss_map

    def load_data_from_disk(self, index: int):
        image = TF.to_tensor(Image.open(self.image_paths[index]))
        inst_label, cls_label = self._get_label(self.label_paths[index])
        if self.mode == 'train':
            distances = torch.from_numpy(np.transpose(star_dist(inst_label.cpu().numpy(), self.n_rays), (2, 0, 1)))
            temp = gen_dist_label(inst_label)
            return (
                image,
                inst_label.bool().long(),
                temp[0],
                distances,
                *((temp[1],) if self.with_pe else ()),
            )
        if self.with_cls:
            return image, inst_label.long(), cls_label
        return image, inst_label.long()


class Pannuke(BaseDataset):

    def __init__(
        self,
        base_dir: str,
        mode: str = "train",
        with_cls: bool = False,
        transform=None,
        load_from_memory: bool = True,
        re_gen: bool = False,
        n_rays: int = 4,
        with_pe: bool = False,
        **kwargs,
    ):
        self.fold_id = int(kwargs["fold_id"])
        self.base_dir = base_dir
        self.n_rays = n_rays
        self.with_pe = with_pe
        BaseDataset.__init__(
            self,
            base_dir,
            mode,
            with_cls,
            transform,
            load_from_memory,
            False,
            False,
        )

    def load_image_paths(self) -> List[str]:
        image_path = os.path.join(self.base_dir, "images", f"fold{self.fold_id}")
        return glob(os.path.join(image_path, "*.pt"))

    def load_label_paths(self) -> List[str]:
        label_path = os.path.join(self.base_dir, "masks", f"fold{self.fold_id}")
        return glob(os.path.join(label_path, "*.pt"))

    def load_dist_paths(self) -> List[str]:
        return []

    def load_data_from_disk(self, index: int) -> torch.Tensor:
        image = torch.load(self.image_paths[index]).float()
        label = torch.load(self.label_paths[index]).long()
        if self.mode == 'train':
            distances = torch.from_numpy(np.transpose(star_dist(label.cpu().numpy(), self.n_rays), (2, 0, 1)))
            temp = gen_dist_label(label)
            return (
                image,
                label.bool().long(),
                temp[0],
                distances,
                *((temp[1],) if self.with_pe else ()),
            )
        return image, label
