#coding = utf-8

'''
针对聚类之后的类别达成的数据进行训练
'''

import json
from pathlib2 import Path

import cv2
import numpy as np
import torch
from albumentations import (
    PadIfNeeded,
    Compose,
    Resize
)
from torch.utils import data

from dataset.transform import to_numpy

import matplotlib.pyplot as plt
from skimage import measure


def garbor_filter(image):
    image = image * 255
    image = image.astype(np.uint8)

    temp = np.zeros(image.shape)

    filters3 = []
    theta2 = [0, 1 * np.pi / 6, 2 * np.pi / 6, 3 * np.pi / 6, 4 * np.pi / 6, 5 * np.pi / 6]
    for item in theta2:
        kern = cv2.getGaborKernel((2, 2), sigma=1.0, theta=item, lambd=np.pi / 2.0, gamma=0.5, psi=0, ktype=cv2.CV_32F)
        kern /= 1.5 * kern.sum()
        filters3.append(kern)
    result3 = np.zeros_like(temp)
    for i in range(len(filters3)):
        accum = np.zeros_like(image)
        for kern in filters3[i]:
            fimg = cv2.filter2D(image, cv2.CV_8UC1, kern)
            accum = np.maximum(accum, fimg, accum)
        result3 += np.array(accum)
    result3 = result3 / len(filters3)
    result3 = result3.astype(np.uint8)
    result3 = cv2.equalizeHist(result3)

    return result3


class KiTS19GaborCluster(data.Dataset):
    def __init__(self, root, stack_num=1, img_size=(512, 512),
                 train_case_ids_file='train.txt', valid_case_ids_file='val.txt', test_case_ids_file='test.txt',
                 use_roi=False, roi_file=None, roi_error_range=0,
                 train_transform=None, valid_transform=None, test_transform=None):
        self._root = Path(root)
        self._stack_num = stack_num

        self._img_size = img_size

        self._use_roi = use_roi
        if use_roi:
            self._rois = None
            _roi_file = self._root / roi_file
            assert _roi_file.exists()
            with open(_roi_file, 'r') as f:
                self._rois = json.load(f)
            self._roi_error_range = roi_error_range

        self._train_transform = train_transform
        self._valid_transform = valid_transform
        self._test_transform = test_transform

        self._get_data(train_case_ids_file, valid_case_ids_file, test_case_ids_file)
        self._split_subset()

        self._num_classes = 6
        self._img_channels = self.__getitem__(0)['image'].shape[0]

    def _get_data(self, train_case_ids_file, valid_case_ids_file, test_case_ids_file):
        def read_txt(file):
            d = []
            f = open(file, 'r')
            for line in f:
                d.append(int(line))
            return d

        train_case_ids_file = self._root / train_case_ids_file
        valid_case_ids_file = self._root / valid_case_ids_file
        test_case_ids_file = self._root / test_case_ids_file
        self._train_case = read_txt(train_case_ids_file)
        self._valid_case = read_txt(valid_case_ids_file)
        self._test_case = read_txt(test_case_ids_file)
        self._case_id = self._train_case + self._valid_case + self._test_case

        train_imgs, train_labels, train_case_slice_num = self._read_npy(self._root, self._train_case, is_test=False)
        valid_imgs, valid_labels, valid_case_slice_num = self._read_npy(self._root, self._valid_case, is_test=False)
        test_imgs, test_labels, test_case_slice_num = self._read_npy(self._root, self._test_case, is_test=True)

        self._imgs = train_imgs + valid_imgs + test_imgs
        self._labels = train_labels + valid_labels + test_labels

        self._indices = list(range(len(self._imgs)))
        self._train_indices = self._indices[:len(train_imgs)]
        self._valid_indices = self._indices[len(train_imgs):len(train_imgs) + len(valid_imgs)]
        self._test_indices = self._indices[
                             len(train_imgs) + len(valid_imgs): len(train_imgs) + len(valid_imgs) + len(test_imgs)]

        idx = 0
        self._case_slice_indices = [0]
        self._train_case_slice_indices = [0]
        for num in train_case_slice_num:
            idx += num
            self._case_slice_indices.append(idx)
            self._train_case_slice_indices.append(idx)

        self._valid_case_slice_indices = [self._train_case_slice_indices[-1]]
        for num in valid_case_slice_num:
            idx += num
            self._case_slice_indices.append(idx)
            self._valid_case_slice_indices.append(idx)

        self._test_case_slice_indices = [self._valid_case_slice_indices[-1]]
        for num in test_case_slice_num:
            idx += num
            self._case_slice_indices.append(idx)
            self._test_case_slice_indices.append(idx)

    def _read_npy(self, root, cases, is_test=False):
        imgs = []
        labels = []
        case_slice_num = []

        for case in cases:
            case_root = root / f'case_{case:05d}'
            imaging_dir = case_root / 'imaging'
            assert imaging_dir.exists(), imaging_dir
            case_imgs = sorted(list(imaging_dir.glob('*.npy')))

            min_z = 0
            max_z = len(case_imgs)
            if self._use_roi:
                roi = self._rois[f'case_{case:05d}']['kidney']
                min_z = max(min_z, roi['min_z'] - self._roi_error_range)
                max_z = min(max_z, roi['max_z'] + self._roi_error_range)

            case_imgs = case_imgs[min_z: max_z]
            imgs += case_imgs

            if not is_test:
                segmentation_dir = case_root / 'segmentation'
                assert segmentation_dir.exists()
                case_labels = sorted(list(segmentation_dir.glob('*.npy')))
                case_labels = case_labels[min_z: max_z]
                labels += case_labels
                assert len(imgs) == len(labels)

            case_slice_num.append(len(case_imgs))

        return imgs, labels, case_slice_num

    def _split_subset(self):
        self._train_dataset = data.Subset(self, self._train_indices)
        self._valid_dataset = data.Subset(self, self._valid_indices)
        self._test_dataset = data.Subset(self, self._test_indices)

    def idx_to_name(self, idx):
        path = self._imgs[idx]
        name = Path(path.parts[-3]) / Path(path.parts[-1][:-4])
        return name

    def _default_transform(self, data):
        index = data["index"]
        if (data['image'].shape[0], data['image'].shape[1]) != self._img_size:
            data = self._resize(data)

        image, label = data['image'], data['label']

        image = image.astype(np.float32)
        image = image.transpose((2, 0, 1))
        image = torch.from_numpy(image)
        data['image'] = image

        if label is not None:
            label = label.astype(np.int64)

            label = torch.from_numpy(label)
            data['label'] = label

        else:
            data['label'] = torch.Tensor()
        data["index"] = index
        return data

    @staticmethod
    def normalize(vol):
        hu_max = 512
        hu_min = -512
        vol = np.clip(vol, hu_min, hu_max)

        mxval = np.max(vol)
        mnval = np.min(vol)
        volume_norm = (vol - mnval) / max(mxval - mnval, 1e-3)

        return volume_norm

    def _resize(self, data):
        data = to_numpy(data)
        img, label = data['image'], data['label']

        num = max(img.shape[0], img.shape[1])

        aug = Compose([
            PadIfNeeded(min_height=num, min_width=num,
                        border_mode=cv2.BORDER_CONSTANT, p=1),
            Resize(height=self._img_size[0], width=self._img_size[1], p=1)
        ])

        data = aug(image=img, mask=label)
        img, label = data['image'], data['mask']

        data['image'] = img
        data['label'] = label
        return data

    def img_idx_to_case_idx(self, idx):
        case_idx = 0
        for i in range(len(self._case_slice_indices) - 1):
            if self._case_slice_indices[i] <= idx < self._case_slice_indices[i + 1]:
                case_idx = i
                break
        return case_idx

    def case_idx_to_case_id(self, case_idx, type='all'):
        if type == 'all':
            return self._case_id[case_idx]
        elif type == 'train':
            return self._train_case[case_idx]
        elif type == 'valid':
            return self._valid_case[case_idx]
        elif type == 'test':
            return self._test_case[case_idx]

    def get_stack_img(self, idx):
        case_idx = self.img_idx_to_case_idx(idx)
        imgs = []
        for i in range(idx - self._stack_num // 2, idx + self._stack_num // 2 + 1):
            if i < self._case_slice_indices[case_idx]:
                i = self._case_slice_indices[case_idx]
            elif i >= self._case_slice_indices[case_idx + 1]:
                i = self._case_slice_indices[case_idx + 1] - 1
            img_path = self._imgs[i]
            img = np.load(str(img_path))

            imgs.append(img)

        if idx in self._test_indices:
            label = None
        else:
            label_path = self._labels[idx]
            label = np.load(str(label_path))

        if label.sum() > 0:
            label_temp = np.zeros(label.shape)
            label_temp[label > 0] = 1
            tumor = np.zeros(label_temp.shape)
            #tumor[label >= 2] = 1
            tumor[label == 2] = 2
            tumor[label == 3] = 3
            tumor[label == 4] = 4
            single_image = imgs[self._stack_num // 2] * label_temp
            garbor_result = garbor_filter(single_image)
            garbor_result = (255 - garbor_result) * label_temp
            garbor_result = garbor_result.astype(np.uint8)
            data = cv2.medianBlur(garbor_result, ksize=5)
            data[data <= 200] = 0
            data[data > 200] = 1
            [data_labels, num] = measure.label(data, return_num=True)

            liver = label_temp
            liver[liver > 0] = 1
            liver = liver * 255
            liver = liver.astype(np.uint8)
            kernel = np.ones((15, 15), np.uint8)
            erosion = cv2.erode(liver, kernel, iterations=1)
            erosion = liver - erosion
            erosion[erosion > 0] = 1

            for i in np.unique(data_labels):
                if (data_labels == i).sum() < 100 or i == 0:
                    continue
                temp = np.zeros(data_labels.shape)
                temp[data_labels == i] = 1

                count = float((erosion[temp == 1] == 1).sum()) / float((temp == 1).sum())
                if count >= 0.6:
                    #print("remove erosion")
                    continue
                fp = 1 - float((tumor[temp == 1] > 0).sum()) / float((temp == 1).sum())
                if fp >= 0.5:
                    #print(i, format(fp, ".2f"))
                    label[data_labels == i] = 5

            for i in range(2, int(np.max(tumor))+1):
                label[tumor == i] = i

        img = np.stack(imgs, axis=2)

        img = img.astype("float32")

        roi = self.get_roi(case_idx, type='all')['kidney'] if self._use_roi else {}
        data = {'image': img, 'label': label, 'index': idx, 'roi': roi}

        return data

    def get_roi(self, case_idx, type='all'):
        case_id = self.case_idx_to_case_id(case_idx, type)
        roi = self._rois[f'case_{case_id:05d}']

        return roi

    def __getitem__(self, idx):
        data = self.get_stack_img(idx)

        if idx in self._train_indices and self._train_transform is not None:
            data = self._train_transform(data)
        elif idx in self._valid_indices and self._valid_transform is not None:
            data = self._valid_transform(data)
        elif idx in self._test_indices and self._test_transform is not None:
            data = self._test_transform(data)

        data = self._default_transform(data)

        return data

    def __len__(self):
        return len(self._imgs)

    @property
    def img_channels(self):
        return self._img_channels

    @property
    def num_classes(self):
        return self._num_classes

    @property
    def spec_classes(self):
        return self._spec_classes

    @property
    def roi_error_range(self):
        return self._roi_error_range

    @property
    def train_dataset(self):
        return self._train_dataset

    @property
    def valid_dataset(self):
        return self._valid_dataset

    @property
    def test_dataset(self):
        return self._test_dataset

    @property
    def train_case_slice_indices(self):
        return self._train_case_slice_indices

    @property
    def valid_case_slice_indices(self):
        return self._valid_case_slice_indices

    @property
    def test_case_slice_indices(self):
        return self._test_case_slice_indices

    @property
    def train_case(self):
        return self._train_case

    @property
    def valid_case(self):
        return self._valid_case

    @property
    def test_case(self):
        return self._test_case


def testkits19():
    from dataset.transform import MedicalTransform
    from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
    import matplotlib.pyplot as plt

    transform = MedicalTransform(output_size=(512, 512), roi_error_range=15, use_roi=False)
    dataset = KiTS19GaborCluster(root="/datasets/DongbeiDaxue/chengkun_liver_cluster3", stack_num=3,
                          img_size=(512, 512),
                          use_roi=False, train_transform=transform, valid_transform=None)
    transform.train()
    # sampler = RandomSampler(dataset.train_dataset)
    print(dataset.img_channels, dataset._num_classes)
    sampler = SequentialSampler(dataset.train_dataset)

    train_loader = DataLoader(dataset.train_dataset, batch_size=1, sampler=sampler,
                              num_workers=1, pin_memory=True)

    for index, data in enumerate(train_loader):
        print(index)
        image = data["image"]
        label = data["label"]

        image = image.squeeze().numpy()
        label = label.squeeze().numpy()
        if not 5 in np.unique(label):
            continue

        plt.subplot(2, 2, 1)
        plt.imshow(image[0], cmap="gray")
        plt.subplot(2, 2, 2)
        plt.imshow(image[1], cmap="gray")
        plt.subplot(2, 2, 3)
        plt.imshow(image[2], cmap="gray")
        plt.subplot(2, 2, 4)
        plt.imshow(label, cmap="gray")
        plt.show()


if __name__ == '__main__':
    testkits19()