import os
import random
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms, datasets
from torchvision.transforms import InterpolationMode
import torchvision
import matplotlib.pyplot as plt
import cv2
import matplotlib as mpl
import time
num_cl = 6
#mpl.use('TkAgg')
import skimage
from sklearn.model_selection import train_test_split
import torchio as tio


def numpy_to_pil(array):
    # 转换为 8 位无符号整型数组
    array = (array * 255).astype(np.uint8)
    return Image.fromarray(array)

def Trans(degree, ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.RandomRotation((-degree, degree),
                                              interpolation=torchvision.transforms.InterpolationMode.BICUBIC),

        torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),

        torchvision.transforms.CenterCrop(ww),

        torchvision.transforms.ToTensor(),

    ])
    return train_transformI


def TransR(degree, ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        # torchvision.transforms.RandomRotation((-degree, degree),
        #                                       interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        # torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),

        torchvision.transforms.CenterCrop(ww),

        torchvision.transforms.ToTensor(),
    ])
    return train_transformI


def vali_trans(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),
    ])
    return train_transformI


def vali_transR(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),

    ])
    return train_transformI


class My_Dataset(Dataset):
    """自定义数据集"""

    def __init__(self, data_list, size, ww, lw, degree, transform=True):
        self.data_list = data_list
        self.transform = transform
        self.size = size
        self.ww = ww
        self.lw = lw
        self.degree = degree

    def __len__(self):
        return len(self.data_list)


    def get_label(self, tmp_subfolder):
        label_dic = {'HCC': 3, 'CYST': 0, 'FNH': 1, 'HA': 2, 'ICC': 4, 'META': 5, 'Hemangioma': 2, 'nodule': 0}
        if 'HCC' in tmp_subfolder:
            return label_dic['HCC']
        elif 'CYST' in tmp_subfolder:
            return label_dic['CYST']
        elif 'FNH' in tmp_subfolder:
            return label_dic['FNH']
        elif 'HA' in tmp_subfolder:
            return label_dic['HA']
        elif 'ICC' in tmp_subfolder:
            return label_dic['ICC']
        elif 'META' in tmp_subfolder:
            return label_dic['META']
        elif 'Hemangioma' in tmp_subfolder:
            return label_dic['Hemangioma']
        elif 'nodule' in tmp_subfolder:
            return label_dic['nodule']

    def mean_std(self, data):
        std_ = [0.229, 0.224, 0.225]
        mean_ = [0.485, 0.456, 0.406]
        for i in range(data.shape[0]):
            if i % 3 == 0:
                data[i, :, :] = (data[i, :, :] - mean_[0]) / std_[0]
            elif i % 3 == 1:
                data[i, :, :] = (data[i, :, :] - mean_[1]) / std_[1]
            else:
                data[i, :, :] = (data[i, :, :] - mean_[2]) / std_[2]

        return data

    def __getitem__(self, index):
        st=time.time()
        data = self.data_list[index]
        img_path2 = os.path.join(data, 'a.npz')
        img_path3 = os.path.join(data, 'p.npz')
        img_path4 = os.path.join(data, 'v.npz')
        data2 = np.load(img_path2)
        data3 = np.load(img_path3)
        data4 = np.load(img_path4)
        # test_img_p = data2['image']
        # test_mask_p = data2['mask']
        # test_z,test_x,test_y = np.where(test_mask_p>0)
        # test_zs=np.min(test_z)
        # test_ze=np.max(test_z)
        # test_xs=np.min(test_x)
        # test_xe=np.max(test_x)
        # test_ys=np.min(test_y)
        # test_ye=np.max(test_y)
        # test_tumor = test_img_p[test_zs:test_ze+1,test_xs:test_xe+1,test_ys:test_ye+1]
        # plt.imshow(test_tumor[0,:,:],cmap='gray')
        # plt.show()
        lw = self.lw
        degree = self.degree
        lyers = self.size[0]
        width = self.size[1]
        height = self.size[2]
        if self.transform:
            transformI = Trans(degree, width, lw)
            transformR = TransR(degree, width, lw)
            #验证集不能有随机的变化，记住了
        else:
            transformI = vali_trans(ww=width, lw=lw)
            transformR = vali_transR(ww=width, lw=lw)

        seed = np.random.randint(0, 2 ** 16)

        image_a = data2['image']
        mask_a = data2['mask']
        mask_a = mask_a > 0.1
        mask_a = (mask_a * 255).astype(np.uint8)
        image_p = data3['image']
        mask_p = data3['mask']
        mask_p = mask_p > 0.1
        mask_p = (mask_p * 255).astype(np.uint8)
        image_v = data4['image']
        mask_v = data4['mask']
        mask_v = mask_v > 0.1
        mask_v = (mask_v * 255).astype(np.uint8)

        index_a = np.sum(np.sum(mask_a, -1), -1)
        index_a = np.where(index_a > 0)[0]
        if len(index_a)==0:
            print('qqqqqwwwwjdfifhfkdndbf', id)
        if len(index_a) < 4:
            z_, x_, y_ = np.where(mask_a > 0)
            start_z = max(0, min(z_) - 2)
            end_z = min(max(z_) + 2, mask_a.shape[0])
            start_x = max(0, min(x_) - 5)
            end_x = min(max(x_) + 5, mask_a.shape[1])
            start_y = max(0, min(y_) - 5)
            end_y = min(max(y_) + 5, mask_a.shape[2])

            new_mask_a = np.zeros(mask_a.shape).astype(np.uint8)
            new_mask_a[start_z:end_z, start_x:end_x, start_y:end_y] = 255
            mask_a = new_mask_a
        torch.manual_seed(seed)

        transformed_data_a = transformI(Image.fromarray(image_a[0, :, :]))
        torch.manual_seed(seed)
        transformed_mask_a = transformR(Image.fromarray(mask_a[0, :, :]))

        for i in range(image_a.shape[0] - 1):
            torch.manual_seed(seed)
            trans_data_a = transformI(Image.fromarray(image_a[i + 1, :, :]))
            torch.manual_seed(seed)
            trans_mask_a = transformR(Image.fromarray(mask_a[i + 1, :, :]))
            transformed_data_a = torch.cat([transformed_data_a, trans_data_a], dim=0)
            transformed_mask_a = torch.cat([transformed_mask_a, trans_mask_a], dim=0)

        transformed_data_a = (transformed_data_a.numpy() * 255)
        transformed_mask_a = transformed_mask_a.numpy()
        transformed_mask_a = transformed_mask_a > 0.1
        transformed_mask_a = (transformed_mask_a * 255).astype(np.uint8)

        transformed_data_a = skimage.transform.resize(transformed_data_a, (lyers, width, height), anti_aliasing=False,
                                                    order=1)
        transformed_mask_a = skimage.transform.resize(transformed_mask_a, (lyers, width, height), anti_aliasing=False,
                                                    order=0)

        transformed_data_a = transformed_data_a.astype(np.uint8)

        transformed_data_a = transformed_data_a / 255.
        # transformed_data = (transformed_data-np.mean(transformed_data))/ np.std(transformed_data)
        transformed_data_a = self.mean_std(transformed_data_a)
        transformed_data_a = transformed_data_a.astype(np.float32)#要转成np.float32，float是双精度double
        transformed_mask_a = transformed_mask_a > 0.1
        transformed_mask_a = transformed_mask_a.astype(np.uint8)
        index_a = np.sum(np.sum(transformed_mask_a, -1), -1)
        index_a = np.where(index_a > 0)[0]
        if len(index_a) < 4 and len(index_a) != 0:
            z_, x_, y_ = np.where(transformed_mask_a > 0)
            start_z = max(0, min(z_) - 2)
            end_z = min(max(z_) + 2, transformed_mask_a.shape[0])
            start_x = max(0, min(x_) - 2)
            end_x = min(max(x_) + 2, transformed_mask_a.shape[1])
            start_y = max(0, min(y_) - 2)
            end_y = min(max(y_) + 2, transformed_mask_a.shape[2])

            new_mask_a = np.zeros(transformed_mask_a.shape).astype(np.uint8)
            new_mask_a[start_z:end_z, start_x:end_x, start_y:end_y] = 1
            transformed_mask_a = new_mask_a

        z_a, x_a, y_a = np.where(transformed_mask_a > 0)
        z_a_start = np.min(z_a)
        z_a_end = np.max(z_a)
        x_a_start = np.min(x_a)
        x_a_end = np.max(x_a)
        y_a_start = np.min(y_a)
        y_a_end = np.max(y_a)

        ######################
        index_p = np.sum(np.sum(mask_p, -1), -1)
        index_p = np.where(index_p > 0)[0]
        if len(index_p)==0:
            print('qqqqqwwwwjdfifhfkdndbf', id)
        if len(index_p) < 4:
            z_, x_, y_ = np.where(mask_p > 0)
            start_z = max(0, min(z_) - 2)
            end_z = min(max(z_) + 2, mask_p.shape[0])
            start_x = max(0, min(x_) - 5)
            end_x = min(max(x_) + 5, mask_p.shape[1])
            start_y = max(0, min(y_) - 5)
            end_y = min(max(y_) + 5, mask_p.shape[2])

            new_mask_p = np.zeros(mask_p.shape).astype(np.uint8)
            new_mask_p[start_z:end_z, start_x:end_x, start_y:end_y] = 255
            mask_p = new_mask_p
        torch.manual_seed(seed)

        transformed_data_p = transformI(Image.fromarray(image_p[0, :, :]))
        torch.manual_seed(seed)
        transformed_mask_p = transformR(Image.fromarray(mask_p[0, :, :]))

        for i in range(image_p.shape[0] - 1):
            torch.manual_seed(seed)
            trans_data_p = transformI(Image.fromarray(image_p[i + 1, :, :]))
            torch.manual_seed(seed)
            trans_mask_p = transformR(Image.fromarray(mask_p[i + 1, :, :]))
            transformed_data_p = torch.cat([transformed_data_p, trans_data_p], dim=0)
            transformed_mask_p = torch.cat([transformed_mask_p, trans_mask_p], dim=0)

        transformed_data_p = (transformed_data_p.numpy() * 255)
        transformed_mask_p = transformed_mask_p.numpy()
        transformed_mask_p = transformed_mask_p > 0.1
        transformed_mask_p = (transformed_mask_p * 255).astype(np.uint8)

        transformed_data_p = skimage.transform.resize(transformed_data_p, (lyers, width, height), anti_aliasing=False,
                                                    order=1)
        transformed_mask_p = skimage.transform.resize(transformed_mask_p, (lyers, width, height), anti_aliasing=False,
                                                    order=0)

        transformed_data_p = transformed_data_p.astype(np.uint8)
        transformed_data_p = transformed_data_p / 255.
        transformed_data_p = self.mean_std(transformed_data_p)
        transformed_data_p = transformed_data_p.astype(np.float32)
        transformed_mask_p = transformed_mask_p > 0.1
        transformed_mask_p = transformed_mask_p.astype(np.uint8)
        index_p = np.sum(np.sum(transformed_mask_p, -1), -1)
        index_p = np.where(index_p > 0)[0]
        if len(index_p) < 4 and len(index_p) != 0:
            z_, x_, y_ = np.where(transformed_mask_p > 0)
            start_z = max(0, min(z_) - 2)
            end_z = min(max(z_) + 2, transformed_mask_p.shape[0])
            start_x = max(0, min(x_) - 2)
            end_x = min(max(x_) + 2, transformed_mask_p.shape[1])
            start_y = max(0, min(y_) - 2)
            end_y = min(max(y_) + 2, transformed_mask_p.shape[2])

            new_mask_p = np.zeros(transformed_mask_p.shape).astype(np.uint8)
            new_mask_p[start_z:end_z, start_x:end_x, start_y:end_y] = 1
            transformed_mask_p = new_mask_p

        z_p, x_p, y_p = np.where(transformed_mask_p > 0)
        z_p_start = np.min(z_p)
        z_p_end = np.max(z_p)
        x_p_start = np.min(x_p)
        x_p_end = np.max(x_p)
        y_p_start = np.min(y_p)
        y_p_end = np.max(y_p)

        ################
        index_v = np.sum(np.sum(mask_v, -1), -1)
        index_v = np.where(index_v > 0)[0]
        if len(index_v)==0:
            print('qqqqqwwwwjdfifhfkdndbf', id)
        if len(index_v) < 4:
            z_, x_, y_ = np.where(mask_v > 0)
            start_z = max(0, min(z_) - 2)
            end_z = min(max(z_) + 2, mask_v.shape[0])
            start_x = max(0, min(x_) - 5)
            end_x = min(max(x_) + 5, mask_v.shape[1])
            start_y = max(0, min(y_) - 5)
            end_y = min(max(y_) + 5, mask_v.shape[2])

            new_mask_v = np.zeros(mask_v.shape).astype(np.uint8)
            new_mask_v[start_z:end_z, start_x:end_x, start_y:end_y] = 255
            mask_v = new_mask_v
        torch.manual_seed(seed)

        transformed_data_v = transformI(Image.fromarray(image_v[0, :, :]))
        torch.manual_seed(seed)
        transformed_mask_v = transformR(Image.fromarray(mask_v[0, :, :]))
        ############
        for i in range(image_v.shape[0] - 1):
            torch.manual_seed(seed)
            trans_data_v = transformI(Image.fromarray(image_v[i + 1, :, :]))
            torch.manual_seed(seed)
            trans_mask_v = transformR(Image.fromarray(mask_v[i + 1, :, :]))
            transformed_data_v = torch.cat([transformed_data_v, trans_data_v], dim=0)
            transformed_mask_v = torch.cat([transformed_mask_v, trans_mask_v], dim=0)

        transformed_data_v = (transformed_data_v.numpy() * 255)
        transformed_mask_v = transformed_mask_v.numpy()
        transformed_mask_v = transformed_mask_v > 0.1
        transformed_mask_v = (transformed_mask_v * 255).astype(np.uint8)

        transformed_data_v = skimage.transform.resize(transformed_data_v, (lyers, width, height), anti_aliasing=False,
                                                    order=1)
        transformed_mask_v = skimage.transform.resize(transformed_mask_v, (lyers, width, height), anti_aliasing=False,
                                                    order=0)

        transformed_data_v = transformed_data_v.astype(np.uint8)
        transformed_data_v = transformed_data_v / 255.
        transformed_data_v = self.mean_std(transformed_data_v)
        transformed_data_v = transformed_data_v.astype(np.float32)
        transformed_mask_v = transformed_mask_v > 0.1
        transformed_mask_v = transformed_mask_v.astype(np.uint8)
        index_v = np.sum(np.sum(transformed_mask_v, -1), -1)
        index_v = np.where(index_v > 0)[0]
        if len(index_v) < 4 and len(index_v) != 0:
            z_, x_, y_ = np.where(transformed_mask_v > 0)
            start_z = max(0, min(z_) - 2)
            end_z = min(max(z_) + 2, transformed_mask_v.shape[0])
            start_x = max(0, min(x_) - 2)
            end_x = min(max(x_) + 2, transformed_mask_v.shape[1])
            start_y = max(0, min(y_) - 2)
            end_y = min(max(y_) + 2, transformed_mask_v.shape[2])

            new_mask_v = np.zeros(transformed_mask_v.shape).astype(np.uint8)
            new_mask_v[start_z:end_z, start_x:end_x, start_y:end_y] = 1
            transformed_mask_v = new_mask_v

        z_v, x_v, y_v = np.where(transformed_mask_v > 0)
        z_v_start = np.min(z_v)
        z_v_end = np.max(z_v)
        x_v_start = np.min(x_v)
        x_v_end = np.max(x_v)
        y_v_start = np.min(y_v)
        y_v_end = np.max(y_v)

        tumor_a = transformed_data_a.astype(np.float32)[z_a_start:z_a_end+1 , x_a_start:x_a_end+1, y_a_start:y_a_end+1]
        tumor_p = transformed_data_p.astype(np.float32)[z_p_start:z_p_end+1 , x_p_start:x_p_end+1, y_p_start:y_p_end+1]
        tumor_v = transformed_data_v.astype(np.float32)[z_v_start:z_v_end+1 , x_v_start:x_v_end+1, y_v_start:y_v_end+1]
        # plt.imshow(tumor_v[0,:,:],cmap='gray')
        # plt.show()
        transformed_tumor_a = skimage.transform.resize(tumor_a, output_shape=(32, 64, 64), anti_aliasing=None, order=1)
        transformed_tumor_p = skimage.transform.resize(tumor_p, output_shape=(32, 64, 64), anti_aliasing=None, order=1)
        transformed_tumor_v = skimage.transform.resize(tumor_v, output_shape=(32, 64, 64), anti_aliasing=None, order=1)

        transformed_tumor_a = torch.tensor(transformed_tumor_a)
        transformed_tumor_p = torch.tensor(transformed_tumor_p)
        transformed_tumor_v = torch.tensor(transformed_tumor_v)

        transformed_tumor = [transformed_tumor_a, transformed_tumor_p, transformed_tumor_v]
        transformed_tumor = torch.stack(transformed_tumor)
        label = self.get_label(data)
        et=time.time()
        print(et-st)
        return transformed_tumor, label


    @staticmethod
    def collate_fn(batch):
        # 官方实现的default_collate可以参考
        # https://github.com/pytorch/pytorch/blob/67b7e751e6b5931a9f45274653f4f653a4e6cdf6/torch/utils/data/_utils/collate.py
        #images, labels = tuple(zip(*batch))
        images = [torch.tensor(sample[0]) for sample in batch]  # 将 NumPy 数组转换为张量
        labels = [sample[1] for sample in batch]
        images = torch.stack(images, dim = 0)
        labels = torch.as_tensor(labels)
        return images, labels



