# coding = utf-8

'''
专门为了hdenseunet设计的数据读取方法
'''

import json
from pathlib2 import Path

import cv2
import numpy as np
import torch
from albumentations import (
    PadIfNeeded,
    Compose,
    Resize
)
from torch.utils import data

from dataset.transform import to_numpy
import random


class DatasetHdenseunet(data.Dataset):
    def __init__(self, root, stack_num=1, spec_classes=None, img_size=(512, 512),
                 train_case_ids_file='train.txt', valid_case_ids_file='val.txt', test_case_ids_file='test.txt',
                 train_transform=None, valid_transform=None, test_transform=None):
        self._root = Path(root)
        self._stack_num = stack_num
        if spec_classes is None:
            self._spec_classes = [0, 1, 2]
        else:
            self._spec_classes = spec_classes

        self._img_size = img_size

        self._train_transform = train_transform
        self._valid_transform = valid_transform
        self._test_transform = test_transform

        self._get_data(train_case_ids_file, valid_case_ids_file, test_case_ids_file)
        self._split_subset()

    def _get_data(self, train_case_ids_file, valid_case_ids_file, test_case_ids_file):
        def read_txt(file):
            d = []
            f = open(file, 'r')
            for line in f:
                d.append(int(line))
            return d

        train_case_ids_file = self._root / train_case_ids_file
        valid_case_ids_file = self._root / valid_case_ids_file
        test_case_ids_file = self._root / test_case_ids_file
        self._train_case = read_txt(train_case_ids_file)
        self._valid_case = read_txt(valid_case_ids_file)
        self._test_case = read_txt(test_case_ids_file)
        self._case_id = self._train_case + self._valid_case + self._test_case

        train_imgs, train_labels, train_case_slice_num = self._read_npy(self._root, self._train_case, is_test=False)
        valid_imgs, valid_labels, valid_case_slice_num = self._read_npy(self._root, self._valid_case, is_test=False)
        test_imgs, test_labels, test_case_slice_num = self._read_npy(self._root, self._test_case, is_test=True)

        self._imgs = train_imgs + valid_imgs + test_imgs
        self._labels = train_labels + valid_labels + test_labels
        self._cases = self._train_case + self._valid_case + self._test_case


        self._indices = list(range(len(self._cases)))
        self._train_indices = self._indices[:len(self._train_case)]
        self._valid_indices = self._indices[len(self._train_case):len(self._train_case) + len(self._valid_case)]
        self._test_indices = self._indices[
                             len(train_imgs) + len(valid_imgs): len(train_imgs) + len(valid_imgs) + len(test_imgs)]

        self._case_id_to_img_idx = {}
        idx = 0
        begin = 0
        for item in train_case_slice_num:
            self._case_id_to_img_idx[idx] = [begin, begin+item-1]
            idx += 1
            begin += item
        for item in valid_case_slice_num:
            self._case_id_to_img_idx[idx] = [begin, begin + item - 1]
            idx += 1
            begin += item
        for item in test_case_slice_num:
            self._case_id_to_img_idx[idx] = [begin, begin + item - 1]
            idx += 1
            begin += item

    def _read_npy(self, root, cases, is_test=False):
        imgs = []
        labels = []
        case_slice_num = []

        for case in cases:
            case_root = root / f'case_{case:05d}'
            imaging_dir = case_root / 'imaging'
            assert imaging_dir.exists(), imaging_dir
            case_imgs = sorted(list(imaging_dir.glob('*.npy')))

            min_z = 0
            max_z = len(case_imgs)

            case_imgs = case_imgs[min_z: max_z]
            imgs += case_imgs

            if not is_test:
                segmentation_dir = case_root / 'segmentation'
                assert segmentation_dir.exists()
                case_labels = sorted(list(segmentation_dir.glob('*.npy')))
                case_labels = case_labels[min_z: max_z]
                labels += case_labels
                assert len(imgs) == len(labels)

            case_slice_num.append(len(case_imgs))

        return imgs, labels, case_slice_num

    def _split_subset(self):
        self._train_dataset = data.Subset(self, self._train_indices)
        self._valid_dataset = data.Subset(self, self._valid_indices)
        self._test_dataset = data.Subset(self, self._test_indices)


    def _resize(self, data):
        data = to_numpy(data)
        img, label = data['image'], data['label']

        num = max(img.shape[0], img.shape[1])

        aug = Compose([
            PadIfNeeded(min_height=num, min_width=num,
                        border_mode=cv2.BORDER_CONSTANT, p=1),
            Resize(height=self._img_size[0], width=self._img_size[1], p=1)
        ])

        data = aug(image=img, mask=label)
        img, label = data['image'], data['mask']

        data['image'] = img
        data['label'] = label
        return data


    def case_idx_to_case_id(self, case_idx, type='all'):
        if type == 'all':
            return self._case_id[case_idx]
        elif type == 'train':
            return self._train_case[case_idx]
        elif type == 'valid':
            return self._valid_case[case_idx]
        elif type == 'test':
            return self._test_case[case_idx]

    def get_stack_img(self, idx):
        [begin, end] = self._case_id_to_img_idx[idx]
        begin_index = random.randint(begin, end-self._stack_num)
        imgs = []
        labels = []
        for i in range(begin_index, begin_index+self._stack_num):
            img_path = self._imgs[i]
            img = np.load(str(img_path))
            imgs.append(img)

            label_path = self._labels[i]
            label = np.load(str(label_path))
            labels.append(label)

        img = np.stack(imgs, axis=2)
        img = img.astype("float32")
        label = np.stack(labels, axis=2)

        data = {'image': img, 'label': label, 'index': idx}

        return data


    def __getitem__(self, idx):
        data = self.get_stack_img(idx)

        if idx in self._train_indices and self._train_transform is not None:
            data = self._train_transform(data)
        elif idx in self._valid_indices and self._valid_transform is not None:
            data = self._valid_transform(data)
        elif idx in self._test_indices and self._test_transform is not None:
            data = self._test_transform(data)

        image, label = data['image'], data['label']

        image = image.astype(np.float32)
        image = image.transpose((2, 0, 1))
        image = torch.from_numpy(image)
        data['image'] = image

        label = label.transpose((2, 0, 1))
        label = torch.from_numpy(label)
        data['label'] = label

        return data

    def __len__(self):
        return len(self._cases)


    @property
    def spec_classes(self):
        return self._spec_classes


    @property
    def train_dataset(self):
        return self._train_dataset

    @property
    def valid_dataset(self):
        return self._valid_dataset

    @property
    def test_dataset(self):
        return self._test_dataset


    @property
    def train_case(self):
        return self._train_case

    @property
    def valid_case(self):
        return self._valid_case

    @property
    def test_case(self):
        return self._test_case


def testkits19():
    from dataset.transform import CropTransform
    from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
    import matplotlib.pyplot as plt
    transform = CropTransform(output_size=(224, 224), roi_error_range=15, use_roi=False)
    dataset = DatasetHdenseunet(root="/datasets/3Dircadb/chengkun_only_liver", stack_num=5, spec_classes=[0, 1, 2], img_size=(512, 512),
                                                              train_transform=transform, valid_transform=transform)
    transform.eval()
    sampler = RandomSampler(dataset.train_dataset)
    train_loader = DataLoader(dataset.train_dataset, batch_size=1, sampler=sampler,
                              num_workers=1, pin_memory=True)
    for index,data in enumerate(train_loader):
        #print(data)
        image = data["image"].squeeze().numpy()
        label = data["label"].squeeze().numpy()
        for i in range(image.shape[0]):
            plt.subplot(1, 2, 1)
            plt.imshow(image[i], cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(label[i], cmap="gray")
            plt.show()



if __name__ == '__main__':
    testkits19()