"""
Original work Copyright 2021 Manan Lalit, Max Planck Institute of Molecular Cell Biology and Genetics  (MIT License https://github.com/juglab/EmbedSeg/blob/main/LICENSE)
Modified work Copyright 2022 Katharina Löffler, Karlsruhe Institute of Technology (MIT License)
Modifications: process image pairs; shift augmentation; process multiple data sets
"""
import glob
import os
import random
from pathlib import Path

import numpy as np
import tifffile
import cv2
from torch.utils.data import Dataset


class TwoDimensionalDataset(Dataset):
    """
    TwoDimensionalDataset class
    """

    def __init__(
        self,
        crop_dir="./",
        data_dir="./",
        data_subsets=[],
        center="center-medoid",
        type="train",
        bg_id=0,
        size=None,
        transform=None,
        translation_prob=0.5,
        max_rel_translation=0.1,
        crop_size=None,
    ):
        """

        Args:
            crop_dir (string): path to the directory containing the cropped data
            data_dir (string): path to the directory containing the non cropped data
            data_subsets (list): list of sub_directories to use
            center (string): indicating the type of cell centers to train on
            type (string): data set type ("train" or "val")
            bg_id (int): value of background pixels in the segmentation masks
            size (int, optional):  if int yield only N samples of the dataset per (train/val) epoch;
                                    if None: use the full dataset  per (train/val) epoch
            transform (Callable): transformations to apply to each sample
            translation_prob (float): probability of a sample to be translated in a random direction
            max_rel_translation (float): maximum translation relative to the crop size (e.g. if max_rel_translation=0.1
             and the crop size is 256 images can be shifted by a maximum of 25 pixels to each other)
            crop_size (tuple or int, optional): size of the image crops (height, width) or single value for square crops
        """

        print(
            "2-D `{}` dataloader created! Accessing data from {}/{}/".format(
                type, crop_dir, data_subsets
            )
        )
        self.data_dir = data_dir
        self.center = center
        # get image and instance list
        image_list = []
        instance_list = []
        center_image_list = []
        flow_image_list = []
        for sub_set in data_subsets:
            img_list = glob.glob(
                os.path.join(crop_dir, "{}/".format(sub_set), "images/*.tif")
            )
            img_list.sort()
            image_list.extend(img_list)
            print(
                "Number of images in `{}` directory is {}".format(
                    sub_set, len(img_list)
                )
            )

            inst_list = glob.glob(
                os.path.join(crop_dir, "{}/".format(sub_set), "masks/*.tif")
            )
            print(
                "Number of instances in `{}` directory is {}".format(
                    sub_set, len(inst_list)
                )
            )
            inst_list.sort()
            instance_list.extend(inst_list)

            center_img_list = glob.glob(
                os.path.join(crop_dir, "{}/".format(sub_set), center + "/center*.tif")
            )
            print(
                "Number of center images in `{}` directory is {}".format(
                    sub_set, len(center_img_list)
                )
            )
            center_img_list.sort()
            center_image_list.extend(center_img_list)

            flow_img_list = glob.glob(
                os.path.join(
                    crop_dir, "{}/".format(sub_set), center + "-flow" + "/*.tif"
                )
            )
            flow_image_list.extend(flow_img_list)

        print("*************************")

        self.image_list = image_list
        self.instance_list = instance_list
        self.center_image_list = center_image_list
        self.flow_image_list = flow_image_list
        self.bg_id = bg_id
        self.size = size
        self.pair_index = self.get_image_pairs()
        self.n_pairs = len(self.pair_index)
        self.transform = transform
        self.crop_size = crop_size
        self.p_translation = translation_prob
        self.max_offset = max_rel_translation

        print(f"image_list样例: {self.image_list[:10]}")
        print(f"pair_index样例: {self.pair_index[:10]}")
        print(f"总配对数: {self.n_pairs}")

    def get_image_pairs(self):
        """
        Extract all pairs of image crops of successive time points t, t-1
        Returns:

        """
        pairs = []
        
        # 预处理所有list为标准路径
        image_list_norm = [os.path.normpath(p) for p in self.image_list]
        flow_image_list_norm = [os.path.normpath(p) for p in self.flow_image_list]
        
        for i, path_img_file in enumerate(self.image_list):
            path_img, name_img = os.path.split(path_img_file)
            name_img, ending = name_img.split(".")
            time, patch_id = name_img.split("_")
            if time.startswith('t'):
                time_int = int(time[1:])
                time_prefix = "t"
            else:
                time_int = int(time)
                time_prefix = ""
            name_next_img = (
                "_".join([time_prefix + str(time_int + 1).zfill(len(time.lstrip('t'))), patch_id]) + "." + ending
            )
            # flow文件名格式 center001_center000_000.tif
            flow_img_name = (
                "_".join([f"center{str(time_int + 1).zfill(3)}", f"center{str(time_int).zfill(3)}", patch_id])
                + "."
                + ending
            )
            path_flow_img = os.path.normpath(os.path.join(
                os.path.dirname(path_img),
                "-".join([self.center, "flow"]),
                flow_img_name,
            ))
            next_img_path = os.path.normpath(os.path.join(path_img, name_next_img))
            
            try:
                next_img_index = image_list_norm.index(next_img_path)
                flow_index = flow_image_list_norm.index(path_flow_img)
                pairs.append((next_img_index, i, flow_index))
            except ValueError:
                continue
        
        return pairs

    def __len__(self):

        return len(self.pair_index) if self.size is None else self.size

    def convert_yx_to_cyx(self, im, key):
        if im.ndim == 2 and key == "image":  # gray-scale image
            im = im[np.newaxis, ...]  # CYX
        elif im.ndim == 3 and key == "image":  # multi-channel image image
            pass
        else:
            im = im[np.newaxis, ...]
        return im

    def get_sample(self, index):
        """
        Get a data sample - containing the raw image pair (t, t-1) and the label images (t, t-1)
        Args:
            index (int): index of the selected image pair

        Returns: dict of data sample

        """
        index_curr, index_prev, flow_index = self.pair_index[index]
        sample = {}

        # load image pair
        image_curr = tifffile.imread(self.image_list[index_curr])  # YX or CYX
        image_prev = tifffile.imread(self.image_list[index_prev])  # YX or CYX
        
        # 确保图像尺寸一致：如果crop_size为None，使用默认的256x256尺寸
        if self.crop_size is None:
            crop_size = (256, 256)
        elif isinstance(self.crop_size, int):
            crop_size = (self.crop_size, self.crop_size)
        else:
            if len(self.crop_size) != 2:
                raise AssertionError(f"{self.crop_size} is not 2D")
            crop_size = self.crop_size
            
        # 强制统一处理图像尺寸：确保所有图像都是相同的尺寸
        if image_curr.shape[0] != crop_size[0] or image_curr.shape[1] != crop_size[1]:
            image_curr = cv2.resize(image_curr, (crop_size[1], crop_size[0]), interpolation=cv2.INTER_LINEAR)
        if image_prev.shape[0] != crop_size[0] or image_prev.shape[1] != crop_size[1]:
            image_prev = cv2.resize(image_prev, (crop_size[1], crop_size[0]), interpolation=cv2.INTER_LINEAR)
        
        image_curr = self.convert_yx_to_cyx(image_curr, key="image")
        image_prev = self.convert_yx_to_cyx(image_prev, key="image")
        sample["image_curr"] = image_curr  # CYX
        sample["image_prev"] = image_prev  # CYX
        sample["im_name_curr"] = self.image_list[index_curr]
        sample["im_name_prev"] = self.image_list[index_prev]
        if len(self.instance_list) != 0:
            instance_curr = tifffile.imread(
                self.instance_list[index_curr]
            )  # YX or DYX (one-hot!)
            instance_prev = tifffile.imread(
                self.instance_list[index_prev]
            )  # YX or DYX (one-hot!)
            
            # 对instance进行相同的尺寸标准化
            if instance_curr.shape[0] != crop_size[0] or instance_curr.shape[1] != crop_size[1]:
                instance_curr = cv2.resize(instance_curr.astype(np.float32), (crop_size[1], crop_size[0]), interpolation=cv2.INTER_NEAREST).astype(instance_curr.dtype)
            if instance_prev.shape[0] != crop_size[0] or instance_prev.shape[1] != crop_size[1]:
                instance_prev = cv2.resize(instance_prev.astype(np.float32), (crop_size[1], crop_size[0]), interpolation=cv2.INTER_NEAREST).astype(instance_prev.dtype)
            
            instance_curr, label_curr = self.decode_instance(instance_curr, self.bg_id)
            instance_prev, label_prev = self.decode_instance(instance_prev, self.bg_id)
            instance_curr = self.convert_yx_to_cyx(
                instance_curr, key="instance"
            )  # CYX or CDYX
            instance_prev = self.convert_yx_to_cyx(
                instance_prev, key="instance"
            )  # CYX or CDYX
            label_curr = self.convert_yx_to_cyx(label_curr, key="label")  # CYX
            label_prev = self.convert_yx_to_cyx(label_prev, key="label")  # CYX
            sample["instance_curr"] = instance_curr
            sample["instance_prev"] = instance_prev
            sample["label_curr"] = label_curr
            sample["label_prev"] = label_prev
        if len(self.center_image_list) != 0:
            center_image_curr = tifffile.imread(
                self.center_image_list[index_curr]
            ).astype(np.float32)  # 改为float32避免布尔类型冲突
            center_image_prev = tifffile.imread(
                self.center_image_list[index_prev]
            ).astype(np.float32)  # 改为float32避免布尔类型冲突
            
            # 对center_image进行相同的尺寸标准化
            if center_image_curr.shape[0] != crop_size[0] or center_image_curr.shape[1] != crop_size[1]:
                center_image_curr = cv2.resize(center_image_curr.astype(np.float32), (crop_size[1], crop_size[0]), interpolation=cv2.INTER_NEAREST).astype(center_image_curr.dtype)
            if center_image_prev.shape[0] != crop_size[0] or center_image_prev.shape[1] != crop_size[1]:
                center_image_prev = cv2.resize(center_image_prev.astype(np.float32), (crop_size[1], crop_size[0]), interpolation=cv2.INTER_NEAREST).astype(center_image_prev.dtype)
            
            center_image_curr = self.convert_yx_to_cyx(
                center_image_curr, key="center_image"
            )  # CYX
            center_image_prev = self.convert_yx_to_cyx(
                center_image_prev, key="center_image"
            )  # CYX
            sample["center_image_curr"] = center_image_curr
            sample["center_image_prev"] = center_image_prev
            
            # 对flow进行相同的尺寸标准化
            flow = tifffile.imread(self.flow_image_list[flow_index])
            if flow.shape[1] != crop_size[0] or flow.shape[2] != crop_size[1]:
                # flow是3D张量，需要逐通道调整
                resized_flow = np.zeros((flow.shape[0], crop_size[0], crop_size[1]), dtype=flow.dtype)
                for i in range(flow.shape[0]):
                    resized_flow[i] = cv2.resize(flow[i].astype(np.float32), (crop_size[1], crop_size[0]), interpolation=cv2.INTER_LINEAR)
                flow = resized_flow
            sample["flow"] = flow
        return sample

    def get_translated_sample(self, index):
        """
        Simulate a shift between successive frames by shifting an image crop in a random direction.
        Args:
            index (int): index of the selected image pair, however only the first image will be used and translated

        Returns:

        """
        # get first image
        img_id_curr, _, _ = self.pair_index[index]
        img_file = self.image_list[img_id_curr]
        # 不要修改self.crop_size，保持一致性
        # sample position
        img_crop_name = os.path.basename(img_file)
        
        # 修复：正确处理t000_000.tif格式的文件名
        time_part, patch_id = img_crop_name.split("_")
        if time_part.startswith('t'):
            time_int = int(time_part[1:])  # 去掉't'前缀
            full_img_name = f"{time_int:03d}.{img_crop_name.split('.')[-1]}"  # 格式化为000.tif
        else:
            full_img_name = time_part + "." + img_crop_name.split(".")[-1]
            
        data_set = Path(self.data_dir).name
        path_parts = list(Path(img_file).parts)
        sub_dirs = path_parts[
            path_parts.index(data_set) + 1 : path_parts.index("images")
        ]
        
        # 添加文件存在性检查
        image_path = os.path.join(self.data_dir, *sub_dirs, "images", full_img_name)
        # 修复：center文件需要加上center前缀
        center_file_name = f"center{full_img_name}"
        center_path = os.path.join(self.data_dir, *sub_dirs, self.center, center_file_name)
        mask_path = os.path.join(self.data_dir, *sub_dirs, "masks", full_img_name)
        
        if not os.path.exists(image_path):
            raise FileNotFoundError(f"图像文件不存在: {image_path}")
        if not os.path.exists(center_path):
            raise FileNotFoundError(f"中心图像文件不存在: {center_path}")
        if not os.path.exists(mask_path):
            raise FileNotFoundError(f"掩码文件不存在: {mask_path}")
            
        image = tifffile.imread(image_path)
        center_image = tifffile.imread(center_path)
        instance_image = tifffile.imread(mask_path)
        
        # calculate a shift and crop the image to the crop size
        height, width = image.shape
        # 如果crop_size为None，使用默认的256x256尺寸
        if self.crop_size is None:
            crop_size = (256, 256)
        elif isinstance(self.crop_size, int):
            crop_size = (self.crop_size, self.crop_size)
        else:
            if len(self.crop_size) != 2:
                raise AssertionError(f"{self.crop_size} is not 2D")
            crop_size = self.crop_size
            
        # 强制统一处理图像尺寸：确保所有图像都是相同的尺寸
        if height != crop_size[0] or width != crop_size[1]:
            image = cv2.resize(image, (crop_size[1], crop_size[0]), interpolation=cv2.INTER_LINEAR)
            center_image = cv2.resize(center_image.astype(np.float32), (crop_size[1], crop_size[0]), interpolation=cv2.INTER_NEAREST).astype(center_image.dtype)
            instance_image = cv2.resize(instance_image.astype(np.float32), (crop_size[1], crop_size[0]), interpolation=cv2.INTER_NEAREST).astype(instance_image.dtype)
            height, width = crop_size[0], crop_size[1]
        
        actual_crop_size = crop_size
        
        # 确保有足够的空间进行裁剪
        if height < actual_crop_size[0] or width < actual_crop_size[1]:
            # 如果图像太小，直接使用整个图像
            top_left = (0, 0)
        else:
            top_left = (
                int(np.random.randint(0, height - actual_crop_size[0] + 1)),
                int(np.random.randint(0, width - actual_crop_size[1] + 1)),
            )
        # calculation of the min max bounds of the offset
        max_offset0 = int(self.max_offset * actual_crop_size[0])
        max_offset1 = int(self.max_offset * actual_crop_size[1])
        
        # 安全计算translation范围
        if height <= actual_crop_size[0] or width <= actual_crop_size[1]:
            # 如果图像太小或等于crop_size，不进行translation
            translation = (0, 0)
        else:
            # 确保translation范围有效
            max_translation_y = min(height - actual_crop_size[0] - top_left[0], max_offset0)
            max_translation_x = min(width - actual_crop_size[1] - top_left[1], max_offset1)
            
            if max_translation_y <= 0 or max_translation_x <= 0:
                translation = (0, 0)
            else:
                translation = (
                    int(np.random.randint(-min(top_left[0], max_offset0), max_translation_y + 1)),
                    int(np.random.randint(-min(top_left[1], max_offset1), max_translation_x + 1)),
                )
        top_left_translated = (
            top_left[0] + translation[0],
            top_left[1] + translation[1],
        )
        image_curr = self.get_image_crop(image, top_left_translated, actual_crop_size)
        center_image_curr = self.get_image_crop(center_image, top_left_translated, actual_crop_size)
        instance_curr = self.get_image_crop(instance_image, top_left_translated, actual_crop_size)

        image_prev = self.get_image_crop(image, top_left, actual_crop_size)
        center_image_prev = self.get_image_crop(center_image, top_left, actual_crop_size)
        instance_prev = self.get_image_crop(instance_image, top_left, actual_crop_size)

        offset = np.zeros((len(actual_crop_size), *actual_crop_size))
        offset[:, center_image_curr != 0] = -np.array(translation).reshape(-1, 1)

        sample = dict()
        sample["image_curr"] = self.convert_yx_to_cyx(image_curr, key="image")  # CYX
        sample["image_prev"] = self.convert_yx_to_cyx(image_prev, key="image")  # CYX
        sample["im_name_curr"] = full_img_name
        sample["im_name_prev"] = full_img_name
        if len(self.instance_list) != 0:
            instance_curr, label_curr = self.decode_instance(instance_curr, self.bg_id)
            instance_prev, label_prev = self.decode_instance(instance_prev, self.bg_id)
            instance_curr = self.convert_yx_to_cyx(
                instance_curr, key="instance"
            )  # CYX or CDYX
            instance_prev = self.convert_yx_to_cyx(
                instance_prev, key="instance"
            )  # CYX or CDYX
            label_curr = self.convert_yx_to_cyx(label_curr, key="label")  # CYX
            label_prev = self.convert_yx_to_cyx(label_prev, key="label")  # CYX
            sample["instance_curr"] = instance_curr
            sample["instance_prev"] = instance_prev
            sample["label_curr"] = label_curr
            sample["label_prev"] = label_prev
        if len(self.center_image_list) != 0:
            center_image_curr = self.convert_yx_to_cyx(
                center_image_curr, key="center_image"
            )  # CYX
            center_image_prev = self.convert_yx_to_cyx(
                center_image_prev, key="center_image"
            )  # CYX
            sample["center_image_curr"] = center_image_curr
            sample["center_image_prev"] = center_image_prev
            sample["flow"] = offset
        return sample

    def get_image_crop(self, image, top_left, crop_size=None):
        if crop_size is None:
            if self.crop_size is None:
                # 如果crop_size为None，使用默认的256x256尺寸
                crop_size = (256, 256)
            elif isinstance(self.crop_size, int):
                crop_size = (self.crop_size, self.crop_size)
            else:
                crop_size = self.crop_size
        
        # 添加边界检查
        height, width = image.shape
        end_y = min(top_left[0] + crop_size[0], height)
        end_x = min(top_left[1] + crop_size[1], width)
        start_y = min(top_left[0], height)
        start_x = min(top_left[1], width)
        
        cropped = image[start_y:end_y, start_x:end_x]
        
        # 强制调整到目标尺寸
        if cropped.shape[0] != crop_size[0] or cropped.shape[1] != crop_size[1]:
            result = cv2.resize(cropped, (crop_size[1], crop_size[0]), interpolation=cv2.INTER_LINEAR)
            return result
        
        return cropped

    def __getitem__(self, index):
        if self.n_pairs == 1:
            index = 0
        else:
            index = (
                index
                if self.size is None
                else random.randint(0, len(self.pair_index) - 1)
            )
        if np.random.rand() <= self.p_translation:
            sample = self.get_translated_sample(index)
        else:
            sample = self.get_sample(index)
        
        # 验证所有张量的尺寸一致性
        self._validate_sample_sizes(sample)
        
        # transform
        if self.transform is not None:
            return self.transform(sample)
        else:
            return sample
    
    def _validate_sample_sizes(self, sample):
        """验证样本中所有张量的尺寸一致性"""
        if self.crop_size is None:
            expected_size = (256, 256)
        elif isinstance(self.crop_size, int):
            expected_size = (self.crop_size, self.crop_size)
        else:
            expected_size = self.crop_size
        
        # 检查图像尺寸
        for key in ['image_curr', 'image_prev']:
            if key in sample:
                img = sample[key]
                if img.ndim == 3:  # CYX
                    if img.shape[1] != expected_size[0] or img.shape[2] != expected_size[1]:
                        print(f"警告: {key} 尺寸不匹配: {img.shape} vs {expected_size}")
                        # 强制调整尺寸
                        if img.ndim == 3:
                            sample[key] = cv2.resize(img.transpose(1, 2, 0), (expected_size[1], expected_size[0]), interpolation=cv2.INTER_LINEAR).transpose(2, 0, 1)
        
        # 检查其他张量尺寸
        for key in ['instance_curr', 'instance_prev', 'label_curr', 'label_prev', 'center_image_curr', 'center_image_prev']:
            if key in sample:
                tensor = sample[key]
                if tensor.ndim == 3:  # CYX
                    if tensor.shape[1] != expected_size[0] or tensor.shape[2] != expected_size[1]:
                        print(f"警告: {key} 尺寸不匹配: {tensor.shape} vs {expected_size}")
                        # 强制调整尺寸
                        if tensor.ndim == 3:
                            sample[key] = cv2.resize(tensor.transpose(1, 2, 0), (expected_size[1], expected_size[0]), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1)
        
        # 检查flow尺寸
        if 'flow' in sample:
            flow = sample['flow']
            if flow.ndim == 3:  # CYX
                if flow.shape[1] != expected_size[0] or flow.shape[2] != expected_size[1]:
                    print(f"警告: flow 尺寸不匹配: {flow.shape} vs {expected_size}")
                    # 强制调整尺寸
                    resized_flow = np.zeros((flow.shape[0], expected_size[0], expected_size[1]), dtype=flow.dtype)
                    for i in range(flow.shape[0]):
                        resized_flow[i] = cv2.resize(flow[i].astype(np.float32), (expected_size[1], expected_size[0]), interpolation=cv2.INTER_LINEAR)
                    sample['flow'] = resized_flow

    @classmethod
    def decode_instance(cls, pic, bg_id=None):
        pic = np.array(pic, copy=False, dtype=np.uint16)
        instance_map = np.zeros((pic.shape[0], pic.shape[1]), dtype=np.int16)
        class_map = np.zeros((pic.shape[0], pic.shape[1]), dtype=np.uint8)

        if bg_id is not None:
            mask = pic > bg_id
            if mask.sum() > 0:
                instance_map[mask] = pic[mask]
                class_map[mask] = 1

        return instance_map, class_map


def get_dataset(name, dataset_opts):
    if name == "2d":
        return TwoDimensionalDataset(**dataset_opts)
    else:
        raise RuntimeError("Dataset {} not available".format(name))
