import os
import random
import cv2 as cv

import matplotlib.image as mpimg
import matplotlib.pyplot as plt

import numpy as np
import torch
from scipy import ndimage
from scipy.ndimage.interpolation import zoom
from torch.utils.data import Dataset
from torchvision import transforms


def random_rot_flip(image, label):
    k = np.random.randint(0, 4)
    image = np.rot90(image, k, axes=(1, 2))
    label = np.rot90(label, k)
    axis = np.random.randint(0, 2)
    image = np.flip(image, axis=axis + 1).copy()
    label = np.flip(label, axis=axis).copy()
    return image, label


def random_rotate(image, label):
    angle = np.random.randint(-20, 20)
    image = ndimage.rotate(image, angle, axes=(1, 2), order=0, reshape=False)
    label = ndimage.rotate(label, angle, order=0, reshape=False)
    return image, label


class RandomGenerator(object):
    def __init__(self, output_size):
        self.output_size = output_size

    def __call__(self, sample):
        image, label = sample['image'], sample['label']
        if random.random() > 0.5:
            image, label = random_rot_flip(image, label)
        elif random.random() > 0.5:
            image, label = random_rotate(image, label)
        z, x, y = image.shape
        if x != self.output_size[0] or y != self.output_size[1]:
            image = zoom(image, (1, self.output_size[0] / x, self.output_size[1] / y), order=3)  # why not 3?
            label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0)

        image = torch.from_numpy(image)
        label = torch.from_numpy(label)

        sample = {'image': image, 'label': label.long()}
        return sample


def listdir_image1(path):
    image_dir = os.path.join(path, 'image')
    label_dir = os.path.join(path, 'label')
    dict = {}
    for people in os.listdir(image_dir):
        for img in os.listdir(os.path.join(image_dir, people)):
            image_dirr = os.path.join(os.path.join(image_dir, people), img)
            img_lable = img.replace('.jpg', '').strip().split('-')[-1]
            label_dirr = os.path.join(label_dir, people + '_z', str(int(img_lable)) + '.png')
            dict[image_dirr] = label_dirr

    return dict


class Synapse_dataset(Dataset):
    def __init__(self, base_dir, split, transform=None):
        self.transform = transform  # using transform in torch!
        self.split = split
        self.image_list = []
        self.label_list = []
        self.image_list1 = []
        self.label_list1 = []
        self.dict = listdir_image1(os.path.join(base_dir, self.split))
        for k, v in self.dict.items():
            a = os.path.join(k)
            b = os.path.join(v)
            self.image_list.append(a)
            self.label_list.append(b)
        print(len(self.image_list), len(self.label_list), self.image_list[0], self.label_list[0])

        for i in range(len(self.label_list)):

            if 255 in cv.imread(os.path.join(self.label_list[i])):
                self.image_list1.append(self.image_list[i])
                self.label_list1.append(self.label_list[i])

            else:
                continue

        print(self.image_list1[0])
        print(len(self.image_list1), len(self.label_list1))
        print(self.label_list1[0])

        self.data_dir = base_dir

    def __len__(self):
        return len(self.image_list1)

    def __getitem__(self, idx):
        if self.split == "train":

            train_slice_name = self.image_list1[idx]
            label_slice_name = self.label_list1[idx]

            train_data_path = os.path.join(train_slice_name)
            label_data_path = os.path.join(label_slice_name)

            label_data = cv.imread(label_data_path)
            label_data = label_data[:, :, 0]
            label_data[label_data > 0] = 1
            contours, cnt = cv.findContours(label_data.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
            M = cv.moments(contours[0])  # 计算第一条轮廓的各阶矩,字典形式
            u = int(M["m10"] / label_data.sum())
            v = int(M["m01"] / label_data.sum())
            a = np.zeros([224, 224])
            b = np.zeros([3, 224, 224])
            if u - 112 >= 0:
                h1 = u - 112
            else:
                h1 = 0
            if v - 112 >= 0:
                w1 = v - 112
            else:
                w1 = 0
            if u + 112 <= 512:
                h2 = u + 112
            else:
                h2 = 512
            if v + 112 <= 512:
                w2 = v + 112
            else:
                w2 = 512
            h1 = int(h1)
            h2 = int(h2)
            w1 = int(w1)
            w2 = int(w2)
            a[0:w2 - w1, 0:h2 - h1] = label_data[w1:w2, h1:h2]

            train_data = mpimg.imread(train_data_path)
            train_data = train_data.transpose([2, 0, 1])
            b[:, 0:w2 - w1, 0:h2 - h1] = train_data[:, w1:w2, h1:h2]
            image, label = b, a

        else:

            test_slice_name = self.image_list1[idx]
            label_slice_name = self.label_list1[idx]

            test_data_path = os.path.join(test_slice_name)
            label_data_path = os.path.join(label_slice_name)

            label_data = cv.imread(label_data_path)
            label_data = label_data[:, :, 0]
            label_data[label_data > 0] = 1
            coor = np.nonzero(label_data)
            hmin = min(coor[0])
            hmax = max(coor[0])
            wmin = min(coor[1])
            wmax = max(coor[1])

            h = hmin + (hmax - hmin) // 2
            w = wmin + (wmax - wmin) // 2

            new_label = np.zeros([224, 224])
            new_image = np.zeros([3, 224, 224])

            w1 = w - 112 if w - 112 > 0 else 0
            h1 = h - 112 if h - 112 > 0 else 0
            w2 = w + 112 if w + 112 < 512 else 512
            h2 = h + 112 if h + 112 < 512 else 512

            new_label[0:h2 - h1, 0:w2 - w1] = label_data[h1:h2, w1:w2]
            test_data = mpimg.imread(test_data_path)
            test_data = test_data.transpose([2, 0, 1])
            new_image[:, 0:h2 - h1, 0:w2 - w1] = test_data[:, h1:h2, w1:w2]
            image, label = new_image, new_label

            z, x, y = image.shape
            if x != 224 or y != 224:
                image = zoom(image, (1, 224 / x, 224 / y), order=3)  # why not 3?
                label = zoom(label, (224 / x, 224 / y), order=0)

            image = torch.from_numpy(image.astype(np.float32))
            label = torch.from_numpy(label.astype(np.float32))

            sample = {'image': image, 'label': label.long()}
            return sample

        sample = {'image': image, 'label': label}
        if self.transform:
            sample = self.transform(sample)
        sample['case_name'] = self.image_list1[idx]
        return sample

