# coding = utf-8

'''
基于instance级别的数据读取算法
'''

import json
from pathlib2 import Path

import cv2
import numpy as np
import torch
from albumentations import (
    PadIfNeeded,
    Compose,
    Resize
)
from torch.utils import data

from dataset.transform import to_numpy
import matplotlib.pyplot as plt
import math

class KiTSInstance(data.Dataset):
    def __init__(self, root, stack_num=1, img_size=(512, 512),
                 train_case_ids_file='train.txt', valid_case_ids_file='val.txt', test_case_ids_file='test.txt',
                 train_transform=None, valid_transform=None, test_transform=None):
        self._root = Path(root)
        self._stack_num = stack_num

        self._img_size = img_size

        self._train_transform = train_transform
        self._valid_transform = valid_transform
        self._test_transform = test_transform

        self._get_data(train_case_ids_file, valid_case_ids_file, test_case_ids_file)
        self._split_subset()

        self._instance_cal = {}
        with open("../Dongbeidaxue/only_tumor_analysis", "r") as file:
            for line in file:
                data = line.strip().split("\t")
                self._instance_cal[int(data[0])] = {}
                for item in data[1].split(","):
                    tumor_idx = int(item.split(":")[0])
                    tumor_size = int(item.split(":")[1])
                    self._instance_cal[int(data[0])][tumor_idx] = tumor_size

        self._img_channels = self.__getitem__(0)['image'].shape[0]




    def _get_data(self, train_case_ids_file, valid_case_ids_file, test_case_ids_file):
        def read_txt(file):
            d = []
            f = open(file, 'r')
            for line in f:
                d.append(int(line))
            return d

        train_case_ids_file = self._root / train_case_ids_file
        valid_case_ids_file = self._root / valid_case_ids_file
        test_case_ids_file = self._root / test_case_ids_file
        self._train_case = read_txt(train_case_ids_file)
        self._valid_case = read_txt(valid_case_ids_file)
        self._test_case = read_txt(test_case_ids_file)
        self._case_id = self._train_case + self._valid_case + self._test_case

        train_imgs, train_labels, train_case_slice_num = self._read_npy(self._root, self._train_case, is_test=False)
        valid_imgs, valid_labels, valid_case_slice_num = self._read_npy(self._root, self._valid_case, is_test=False)
        test_imgs, test_labels, test_case_slice_num = self._read_npy(self._root, self._test_case, is_test=True)

        self._imgs = train_imgs + valid_imgs + test_imgs
        self._labels = train_labels + valid_labels + test_labels

        self._indices = list(range(len(self._imgs)))
        self._train_indices = self._indices[:len(train_imgs)]
        self._valid_indices = self._indices[len(train_imgs):len(train_imgs) + len(valid_imgs)]
        self._test_indices = self._indices[
                             len(train_imgs) + len(valid_imgs): len(train_imgs) + len(valid_imgs) + len(test_imgs)]

        idx = 0
        self._case_slice_indices = [0]
        self._train_case_slice_indices = [0]
        for num in train_case_slice_num:
            idx += num
            self._case_slice_indices.append(idx)
            self._train_case_slice_indices.append(idx)

        self._valid_case_slice_indices = [self._train_case_slice_indices[-1]]
        for num in valid_case_slice_num:
            idx += num
            self._case_slice_indices.append(idx)
            self._valid_case_slice_indices.append(idx)

        self._test_case_slice_indices = [self._valid_case_slice_indices[-1]]
        for num in test_case_slice_num:
            idx += num
            self._case_slice_indices.append(idx)
            self._test_case_slice_indices.append(idx)

    def _read_npy(self, root, cases, is_test=False):
        imgs = []
        labels = []
        case_slice_num = []

        for case in cases:
            case_root = root / f'case_{case:05d}'
            imaging_dir = case_root / 'imaging'
            assert imaging_dir.exists(), imaging_dir
            case_imgs = sorted(list(imaging_dir.glob('*.npy')))

            min_z = 0
            max_z = len(case_imgs)

            case_imgs = case_imgs[min_z: max_z]
            imgs += case_imgs

            if not is_test:
                segmentation_dir = case_root / 'segmentation'
                assert segmentation_dir.exists()
                case_labels = sorted(list(segmentation_dir.glob('*.npy')))
                case_labels = case_labels[min_z: max_z]
                labels += case_labels
                assert len(imgs) == len(labels)

            case_slice_num.append(len(case_imgs))

        return imgs, labels, case_slice_num

    def _split_subset(self):
        self._train_dataset = data.Subset(self, self._train_indices)
        self._valid_dataset = data.Subset(self, self._valid_indices)
        self._test_dataset = data.Subset(self, self._test_indices)



    def idx_to_name(self, idx):
        path = self._imgs[idx]
        name = Path(path.parts[-3]) / Path(path.parts[-1][:-4])
        return name

    def _default_transform(self, data, case_id):
        index = data["index"]
        if (data['image'].shape[0], data['image'].shape[1]) != self._img_size:
            data = self._resize(data)

        image, label = data['image'], data['label']

        image = image.astype(np.float32)
        image = image.transpose((2, 0, 1))
        image = torch.from_numpy(image)
        data['image'] = image

        if label is not None:
            label = label.astype(np.int64)
            label = torch.from_numpy(label)
            data['label'] = label

        else:
            data['label'] = torch.Tensor()
        data["index"] = index

        weight = torch.ones(label.shape)
        weight[label == 1] = 2
        for i in torch.unique(label).numpy():
            if i < 2:
                continue
            w =  4 * math.log(math.e + 97076 / self._instance_cal[case_id][i])
            #w = 4 * (1 + math.log(5602914 / self._instance_cal[case_id][i]))
            #if w == 0:
            #    w = 1
            #if w > 100:
            #    w = 100
            weight[label == i] = w
        data["weight"] = weight

        data["label"][label >= 2] = 2

        return data

    @staticmethod
    def normalize(vol):
        hu_max = 512
        hu_min = -512
        vol = np.clip(vol, hu_min, hu_max)

        mxval = np.max(vol)
        mnval = np.min(vol)
        volume_norm = (vol - mnval) / max(mxval - mnval, 1e-3)

        return volume_norm

    def _resize(self, data):
        data = to_numpy(data)
        img, label = data['image'], data['label']

        num = max(img.shape[0], img.shape[1])

        aug = Compose([
            PadIfNeeded(min_height=num, min_width=num,
                        border_mode=cv2.BORDER_CONSTANT, p=1),
            Resize(height=self._img_size[0], width=self._img_size[1], p=1)
        ])

        data = aug(image=img, mask=label)
        img, label = data['image'], data['mask']

        data['image'] = img
        data['label'] = label
        return data

    def img_idx_to_case_idx(self, idx):
        case_idx = 0
        for i in range(len(self._case_slice_indices) - 1):
            if self._case_slice_indices[i] <= idx < self._case_slice_indices[i + 1]:
                case_idx = i
                break
        return case_idx

    def case_idx_to_case_id(self, case_idx, type='all'):
        if type == 'all':
            return self._case_id[case_idx]
        elif type == 'train':
            return self._train_case[case_idx]
        elif type == 'valid':
            return self._valid_case[case_idx]
        elif type == 'test':
            return self._test_case[case_idx]

    def get_stack_img(self, idx):
        case_idx = self.img_idx_to_case_idx(idx)
        imgs = []
        for i in range(idx - self._stack_num // 2, idx + self._stack_num // 2 + 1):
            if i < self._case_slice_indices[case_idx]:
                i = self._case_slice_indices[case_idx]
            elif i >= self._case_slice_indices[case_idx + 1]:
                i = self._case_slice_indices[case_idx + 1] - 1
            img_path = self._imgs[i]
            img = np.load(str(img_path))
            imgs.append(img)
        img = np.stack(imgs, axis=2)

        if idx in self._test_indices:
            label = None
        else:
            label_path = self._labels[idx]
            label = np.load(str(label_path))

        img = img.astype("float32")

        roi =  {}
        data = {'image': img, 'label': label, 'index': idx, 'roi': roi}

        return data



    def __getitem__(self, idx):
        data = self.get_stack_img(idx)

        if idx in self._train_indices and self._train_transform is not None:
            data = self._train_transform(data)
        elif idx in self._valid_indices and self._valid_transform is not None:
            data = self._valid_transform(data)
        elif idx in self._test_indices and self._test_transform is not None:
            data = self._test_transform(data)



        case_id = self.img_idx_to_case_idx(idx)
        case_id = self.case_idx_to_case_id(case_id, type="all")


        data = self._default_transform(data, case_id)

        return data

    def __len__(self):
        return len(self._imgs)

    @property
    def img_channels(self):
        return self._img_channels



    @property
    def train_dataset(self):
        return self._train_dataset

    @property
    def valid_dataset(self):
        return self._valid_dataset

    @property
    def test_dataset(self):
        return self._test_dataset

    @property
    def train_case_slice_indices(self):
        return self._train_case_slice_indices

    @property
    def valid_case_slice_indices(self):
        return self._valid_case_slice_indices

    @property
    def test_case_slice_indices(self):
        return self._test_case_slice_indices

    @property
    def train_case(self):
        return self._train_case

    @property
    def valid_case(self):
        return self._valid_case

    @property
    def test_case(self):
        return self._test_case



def testkits19():
    from dataset.transform import MedicalTransform
    from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
    transform = MedicalTransform(output_size=(512, 512), roi_error_range=15, use_roi=False)
    dataset = KiTSInstance(root="/datasets/DongbeiDaxue/chengkun_remove_only_tumor", stack_num=0, img_size=(512, 512),
                      train_transform=transform, valid_transform=None)
    sampler = RandomSampler(dataset._valid_indices)
    train_loader = DataLoader(dataset._valid_dataset, batch_size=1, sampler=sampler,
                              num_workers=1, pin_memory=True)
    for index,data in enumerate(train_loader):
        #print(index, data)
        image = data["image"]
        label = data["label"]
        weight = data["weight"]
        print(image.shape, label.shape, weight.shape)
        image = image.squeeze().numpy()
        label = label.squeeze().numpy()
        weight = weight.squeeze().numpy()
        print(np.unique(label))
        print(np.unique(weight))
        plt.subplot(1, 3, 1)
        plt.imshow(image, cmap="gray")
        plt.subplot(1, 3, 2)
        plt.imshow(label, cmap="gray")
        plt.subplot(1, 3, 3)
        plt.imshow(weight, cmap="gray")
        plt.show()



if __name__ == '__main__':
    testkits19()