import os
import random
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms, datasets
from torchvision.transforms import InterpolationMode
import torchvision
import matplotlib.pyplot as plt
import cv2
import matplotlib as mpl
import time
num_cl = 5
#mpl.use('TkAgg')
import skimage
from sklearn.model_selection import train_test_split
import torchio as tio
import time


def Trans(degree, ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.RandomRotation((-degree, degree),
                                              interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.RandomVerticalFlip(p=0.1),
        torchvision.transforms.RandomHorizontalFlip(p=0.2),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),

    ])
    return train_transformI

def TransR(degree, ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        torchvision.transforms.RandomRotation((-degree, degree),
                                              interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        torchvision.transforms.RandomVerticalFlip(p=0.1),
        torchvision.transforms.RandomHorizontalFlip(p=0.2),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),
    ])
    return train_transformI

def vali_trans(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),
    ])
    return train_transformI

def vali_transR(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),

    ])
    return train_transformI



class My_Dataset(Dataset):
    """自定义数据集"""

    def __init__(self, data_list, size, degree, lw, ww, transform):
        self.data_list = data_list
        self.size = size
        self.lw = lw
        self. ww = ww
        self.degree = degree
        self.transform = transform

    # def __init__(self, data_list, size):
    #     self.data_list = data_list
    #     self.size = size


    def __len__(self):
        return len(self.data_list)


    def get_label(self, tmp_subfolder):
        label_dic = {'HCC': 3, 'CYST': 0, 'FNH': 1, 'HA': 2, 'ICC': 4, 'META': 5, 'Hemangioma': 2, 'nodule': 0}
        if 'HCC' in tmp_subfolder:
            return label_dic['HCC']
        elif 'CYST' in tmp_subfolder:
            return label_dic['CYST']
        elif 'FNH' in tmp_subfolder:
            return label_dic['FNH']
        elif 'HA' in tmp_subfolder:
            return label_dic['HA']
        elif 'ICC' in tmp_subfolder:
            return label_dic['ICC']
        elif 'META' in tmp_subfolder:
            return label_dic['META']
        elif 'Hemangioma' in tmp_subfolder:
            return label_dic['Hemangioma']
        elif 'nodule' in tmp_subfolder:
            return label_dic['nodule']

    def getdata(self, data):
        image = data['image']
        mask = data['mask']
        layer = self.size[0]
        width = self.size[1]
        height = self.size[2]
        z, x, y = np.where(mask > 0)
        z_start = np.min(z)
        x_start = np.min(x)
        y_start = np.min(y)
        z_end = np.max(z)
        x_end = np.max(x)
        y_end = np.max(y)
        tumor = image[z_start:z_end+1, x_start:x_end+1, y_start:y_end+1]
        transformed_tumor = skimage.transform.resize(tumor, output_shape=(layer, width, height), anti_aliasing=None, order=1)
        transformed_tumor = (transformed_tumor - 0.5) / 0.5
        return transformed_tumor

    def getdata_trans(self, data, seed, transform):
        image = data['image']
        mask = data['mask']
        layer = self.size[0]
        width = self.size[1]
        height = self.size[2]
        if transform == True:
            transformI = Trans(self.degree, self.ww, self.lw)
            transformR = TransR(self.degree, self.ww, self.lw)
        else:
            transformI = vali_trans(self.ww, self.lw)
            transformR = vali_transR(self.ww, self.lw)

        mask = mask > 0.1
        mask = (mask * 255).astype(np.uint8)
        index = np.sum(np.sum(mask, -1), -1)
        index = np.where(index > 0)[0]
        if len(index)==0:
            print('qqqqqwwwwjdfifhfkdndbf', id)
        if len(index) < 4:
            z_, x_, y_ = np.where(mask > 0)
            start_z = max(0, min(z_) - 2)
            end_z = min(max(z_) + 2, mask.shape[0])
            start_x = max(0, min(x_) - 5)
            end_x = min(max(x_) + 5, mask.shape[1])
            start_y = max(0, min(y_) - 5)
            end_y = min(max(y_) + 5, mask.shape[2])

            new_mask = np.zeros(mask.shape).astype(np.uint8)
            new_mask[start_z:end_z, start_x:end_x, start_y:end_y] = 255
            mask = new_mask

        torch.manual_seed(seed)
        transformed_data = transformI(Image.fromarray(image[0, :, :]))
        torch.manual_seed(seed)
        transformed_mask = transformR(Image.fromarray(mask[0, :, :]))

        for i in range(image.shape[0] - 1):
            torch.manual_seed(seed)
            trans_data = transformI(Image.fromarray(image[i + 1, :, :]))
            torch.manual_seed(seed)
            trans_mask = transformR(Image.fromarray(mask[i + 1, :, :]))
            transformed_data = torch.cat([transformed_data, trans_data], dim=0)
            transformed_mask = torch.cat([transformed_mask, trans_mask], dim=0)

        transformed_data = (transformed_data.numpy() * 255)
        transformed_mask = transformed_mask.numpy()
        # transformed_mask = transformed_mask > 0.1
        # transformed_mask = (transformed_mask * 255).astype(np.uint8)

        # # transformed_data = skimage.transform.resize(transformed_data, self.size, anti_aliasing=False,
        # #                                             order=1)
        # # transformed_mask = skimage.transform.resize(transformed_mask, self.size, anti_aliasing=False,
        # #                                             order=0)
        # transformed_data = transformed_data.astype(np.uint8)
        # # transformed_data = transformed_data / 255.
        # transformed_mask = transformed_mask > 0.1
        # transformed_mask = transformed_mask.astype(np.uint8)
        #
        # index = np.sum(np.sum(transformed_mask, -1), -1)
        # index = np.where(index > 0)[0]
        # if len(index) < 4 and len(index) != 0:
        #     z_, x_, y_ = np.where(transformed_mask > 0)
        #     start_z = max(0, min(z_) - 2)
        #     end_z = min(max(z_) + 2, transformed_mask.shape[0])
        #     start_x = max(0, min(x_) - 2)
        #     end_x = min(max(x_) + 2, transformed_mask.shape[1])
        #     start_y = max(0, min(y_) - 2)
        #     end_y = min(max(y_) + 2, transformed_mask.shape[2])
        #
        #     new_mask = np.zeros(transformed_mask.shape).astype(np.uint8)
        #     new_mask[start_z:end_z, start_x:end_x, start_y:end_y] = 1
        #     transformed_mask = new_mask

        z, x, y = np.where(transformed_mask > 0)
        z_start = np.min(z)
        x_start = np.min(x)
        y_start = np.min(y)
        z_end = np.max(z)
        x_end = np.max(x)
        y_end = np.max(y)
        tumor = transformed_data[z_start:z_end+1, x_start:x_end+1, y_start:y_end+1]
        transformed_tumor = skimage.transform.resize(tumor, output_shape=(layer, width, height), anti_aliasing=None, order=1)
        # transformed_tumor = (transformed_tumor - 0.5) / 0.5

        return transformed_tumor

    def __getitem__(self, index):
        data = self.data_list[index]


        img_path2 = os.path.join(data, 'a.npz')
        img_path3 = os.path.join(data, 'p.npz')
        img_path4 = os.path.join(data, 'v.npz')
        data2 = np.load(img_path2)
        data3 = np.load(img_path3)
        data4 = np.load(img_path4)


        ###########################################################
        # tumor_a = self.getdata(data2).astype(np.float32)
        # tumor_p = self.getdata(data3).astype(np.float32)
        # tumor_v = self.getdata(data4).astype(np.float32)
        ########################################################################
        # st = time.time()
        ##################################################################################
        seed = np.random.randint(0, 2 ** 16)
        transform = self.transform
        tumor_a = self.getdata_trans(data=data2, seed=seed, transform=transform).astype(np.float32)
        tumor_p = self.getdata_trans(data=data3, seed=seed, transform=transform).astype(np.float32)
        tumor_v = self.getdata_trans(data=data4, seed=seed, transform=transform).astype(np.float32)
        ##########################################################################################
        # et = time.time()
        # print("time used = ",et-st)
        tumor_a = torch.tensor(tumor_a)
        tumor_p = torch.tensor(tumor_p)
        tumor_v = torch.tensor(tumor_v)
        tumor_set = [tumor_a, tumor_p, tumor_v]
        tumor_set = torch.stack(tumor_set)
        label = self.get_label(data)

        return tumor_set, label


    @staticmethod
    def collate_fn(batch):
        # 官方实现的default_collate可以参考
        # https://github.com/pytorch/pytorch/blob/67b7e751e6b5931a9f45274653f4f653a4e6cdf6/torch/utils/data/_utils/collate.py
        #images, labels = tuple(zip(*batch))
        images = [sample[0] for sample in batch]
        images = np.array(images).astype(np.float32)
        images = torch.tensor(images)
        # images = [torch.tensor(sample[0]) for sample in batch]  # 将 NumPy 数组转换为张量
        labels = [sample[1] for sample in batch]
        # images = torch.stack(images, dim = 0)
        labels = torch.as_tensor(labels)
        return images, labels



