# coding = utf-8

'''
快速做法，任意3D数据之间没有连接
'''

import json
from pathlib2 import Path
import math

import cv2
import numpy as np
import torch
from albumentations import (
    PadIfNeeded,
    Compose,
    Resize
)
from torch.utils import data
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler

from dataset.transform import to_numpy

class IRCAD(data.Dataset):
    def __init__(self, root, stack_num=1, spec_classes=None, img_size=(512, 512),
                 train_case_ids_file='train.txt', valid_case_ids_file='val.txt', test_case_ids_file='test.txt',
                 use_roi=False, roi_file=None, roi_error_range=0,
                 train_transform=None, valid_transform=None, test_transform=None):
        self._root = Path(root)
        self._stack_num = stack_num
        if spec_classes is None:
            self._spec_classes = [0, 1, 2]
        else:
            assert len(self.get_classes_name(spec=False)) == len(spec_classes)
            self._spec_classes = spec_classes

        self._img_size = img_size

        self._use_roi = use_roi
        if use_roi:
            self._rois = None
            _roi_file = self._root / roi_file
            assert _roi_file.exists()
            with open(_roi_file, 'r') as f:
                self._rois = json.load(f)
            self._roi_error_range = roi_error_range

        self._train_transform = train_transform
        self._valid_transform = valid_transform
        self._test_transform = test_transform

        self._get_data(train_case_ids_file, valid_case_ids_file, test_case_ids_file)
        self._split_subset()

        self._num_classes = len(self.get_classes_name())
        self._img_channels = self.__getitem__(0)['image'].shape[0]

    def get_classes_name(self, spec=True):
        classes_name = np.array(['background', 'kidney', 'tumor'])

        if not spec:
            return classes_name

        spec_classes_name = []
        for i in classes_name[self._spec_classes]:
            if i not in spec_classes_name:
                spec_classes_name.append(i)
        return spec_classes_name

    def _get_data(self, train_case_ids_file, valid_case_ids_file, test_case_ids_file):
        def read_txt(file):
            d = []
            f = open(file, 'r')
            for line in f:
                d.append(int(line))
            return d

        train_case_ids_file = self._root / train_case_ids_file
        valid_case_ids_file = self._root / valid_case_ids_file
        test_case_ids_file = self._root / test_case_ids_file
        self._train_case = read_txt(train_case_ids_file)
        self._valid_case = read_txt(valid_case_ids_file)
        self._test_case = read_txt(test_case_ids_file)
        self._case_id = self._train_case + self._valid_case + self._test_case

        train_imgs, train_labels, train_case_slice_num = self._read_npy(self._root, self._train_case, self._stack_num, is_test=False)
        valid_imgs, valid_labels, valid_case_slice_num = self._read_npy(self._root, self._valid_case, self._stack_num, is_test=False)
        test_imgs, test_labels, test_case_slice_num = self._read_npy(self._root, self._test_case, self._stack_num, is_test=True)

        self._imgs = train_imgs + valid_imgs + test_imgs
        self._labels = train_labels + valid_labels + test_labels

        self._indices = list(range(len(self._imgs)))
        self._train_indices = self._indices[:len(train_imgs)]
        self._valid_indices = self._indices[len(train_imgs):len(train_imgs) + len(valid_imgs)]
        self._test_indices = self._indices[
                             len(train_imgs) + len(valid_imgs): len(train_imgs) + len(valid_imgs) + len(test_imgs)]

        idx = 0
        self._case_slice_indices = [0]
        self._train_case_slice_indices = [0]
        for num in train_case_slice_num:
            idx += num
            self._case_slice_indices.append(idx)
            self._train_case_slice_indices.append(idx)

        self._valid_case_slice_indices = [self._train_case_slice_indices[-1]]
        for num in valid_case_slice_num:
            idx += num
            self._case_slice_indices.append(idx)
            self._valid_case_slice_indices.append(idx)

        self._test_case_slice_indices = [self._valid_case_slice_indices[-1]]
        for num in test_case_slice_num:
            idx += num
            self._case_slice_indices.append(idx)
            self._test_case_slice_indices.append(idx)

    def _read_npy(self, root, cases, stack_num, is_test=False):

        case_slice_num = []
        imgs = []
        labels = []

        for case in cases:
            case_root = root / f'case_{case:05d}'
            imaging_dir = case_root / 'imaging'
            assert imaging_dir.exists()
            case_imgs = sorted(list(imaging_dir.glob('*.npy')))

            min_z = 0
            max_z = len(case_imgs)
            if self._use_roi:
                roi = self._rois[f'case_{case:05d}']['kidney']
                min_z = max(min_z, roi['min_z'] - self._roi_error_range)
                max_z = min(max_z, roi['max_z'] + self._roi_error_range)

            case_imgs = case_imgs[min_z: max_z]
            case_labels = []


            if not is_test:
                segmentation_dir = case_root / 'segmentation'
                assert segmentation_dir.exists()
                case_labels = sorted(list(segmentation_dir.glob('*.npy')))
                case_labels = case_labels[min_z: max_z]
                assert len(case_imgs) == len(case_labels)

            temp_case_images = []
            temp_labels = []
            for i in range(math.ceil(len(case_labels)/self._stack_num)):
                end_index = min((i+1)*self._stack_num, len(case_labels))
                begin_index = min(i * self._stack_num, end_index-self._stack_num)
                temp_case_images.append(case_imgs[begin_index:end_index])
                temp_labels.append(case_labels[begin_index:end_index])


            imgs.extend(temp_case_images)
            labels.extend(temp_labels)
            case_slice_num.append(len(temp_case_images))



        return imgs, labels, case_slice_num

    def _split_subset(self):
        self._train_dataset = data.Subset(self, self._train_indices)
        self._valid_dataset = data.Subset(self, self._valid_indices)
        self._test_dataset = data.Subset(self, self._test_indices)

    def _resize(self, data):
        data = to_numpy(data)
        img, label = data['image'], data['label']

        num = max(img.shape[0], img.shape[1])

        aug = Compose([
            PadIfNeeded(min_height=num, min_width=num,
                        border_mode=cv2.BORDER_CONSTANT, p=1),
            Resize(height=self._img_size[0], width=self._img_size[1], p=1)
        ])

        data = aug(image=img, mask=label)
        img, label = data['image'], data['mask']

        data['image'] = img
        data['label'] = label
        return data

    def _default_transform(self, data):
        if (data['image'].shape[0], data['image'].shape[1]) != self._img_size:
            #z1 = np.unique(data["label"])
            data = self._resize(data)
            #z2 = np.unique(data["label"])
            #print(z1,z2)

        image, label = data['image'], data['label']

        image = image.astype(np.float32)
        #image = image.transpose((2, 0, 1))
        #image = torch.from_numpy(image)
        data['image'] = image

        if label is not None:
            label = label.astype(np.int64)

            if self._spec_classes != [0, 1, 2]:
                idx = list(range(len(self.get_classes_name(spec=False))))
                masks = [np.where(label == i) for i in idx]
                spec_class_idx = []
                for i in self._spec_classes:
                    if i not in spec_class_idx:
                        spec_class_idx.append(i)

                for mask, spec_class in zip(masks, self._spec_classes):
                    label[mask] = spec_class_idx.index(spec_class)

            #label = torch.from_numpy(label)
            data['label'] = label

        else:
            data['label'] = torch.Tensor()

        return data

    def __len__(self):
        return len(self._imgs)

    def __getitem__(self, idx):
        image = []
        label = []

        for i in range(len(self._imgs[idx])):
            image_data = np.load(str(self._imgs[idx][i]))
            label_data = np.load(str(self._labels[idx][i]))
            data = {'image': image_data, 'label': label_data}
            data = self._default_transform(data)
            image.append(data["image"])
            label.append(data["label"])

        image = np.array(image)
        label = np.array(label)
        image = image.reshape((1,image.shape[0], image.shape[1], image.shape[2]))
        image = image.astype(np.float32)
        label = label.astype(np.int64)

        data = {'image': image, 'label': label, 'index': idx}

        if idx in self._train_indices and self._train_transform is not None:
            data = self._train_transform(data)
        elif idx in self._valid_indices and self._valid_transform is not None:
            data = self._valid_transform(data)
        elif idx in self._test_indices and self._test_transform is not None:
            data = self._test_transform(data)

        return data

    def img_idx_to_case_idx(self, idx):
        case_idx = 0
        for i in range(len(self._case_slice_indices) - 1):
            if self._case_slice_indices[i] <= idx < self._case_slice_indices[i + 1]:
                case_idx = i
                break
        return case_idx

    def case_idx_to_case_id(self, case_idx, type='all'):
        if type == 'all':
            return self._case_id[case_idx]
        elif type == 'train':
            return self._train_case[case_idx]
        elif type == 'valid':
            return self._valid_case[case_idx]
        elif type == 'test':
            return self._test_case[case_idx]

    @property
    def train_dataset(self):
        return self._train_dataset

    @property
    def valid_dataset(self):
        return self._valid_dataset

    @property
    def test_dataset(self):
        return self._test_dataset

    @property
    def train_case_slice_indices(self):
        return self._train_case_slice_indices

    @property
    def valid_case_slice_indices(self):
        return self._valid_case_slice_indices

    @property
    def test_case_slice_indices(self):
        return self._test_case_slice_indices

    @property
    def train_case(self):
        return self._train_case

    @property
    def valid_case(self):
        return self._valid_case

    @property
    def test_case(self):
        return self._test_case

    @property
    def img_channels(self):
        return self._img_channels

    @property
    def num_classes(self):
        return self._num_classes


if __name__ == '__main__':
    ircad = IRCAD(root="/datasets/DongbeiDaxue/chengkunv2", stack_num=16, spec_classes=[0, 1, 2], img_size=(256,256),
                     use_roi=False, roi_file=None, roi_error_range=5,
                    train_transform=None, valid_transform=None)

    print(ircad.img_channels)

    subset = ircad.valid_dataset

    sampler = SequentialSampler(subset)
    data_loader = DataLoader(subset, batch_size=1)

    for batch_idx, data in enumerate(data_loader):
        index = data["index"]
        case_idx = ircad.img_idx_to_case_idx(index)
        case_id = ircad.case_idx_to_case_id(case_idx)

        print(case_idx, case_id, data["image"].shape, data["label"].shape)
        #print(torch.unique(data["label"]))
        print()
