import os
import random
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms, datasets
from torchvision.transforms import InterpolationMode
import cc3d
import torchvision
import matplotlib.pyplot as plt
import cv2
import matplotlib as mpl
import time
num_cl = 6
#mpl.use('TkAgg')
import skimage
from sklearn.model_selection import train_test_split
import torchio as tio


def numpy_to_pil(array):
    # 转换为 8 位无符号整型数组
    array = (array * 255).astype(np.uint8)
    return Image.fromarray(array)

def Trans(degree, ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.RandomRotation((-degree, degree),
                                              interpolation=torchvision.transforms.InterpolationMode.BICUBIC),

        torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),

        torchvision.transforms.CenterCrop(ww),

        torchvision.transforms.ToTensor(),

    ])
    return train_transformI


def TransR(degree, ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        torchvision.transforms.RandomRotation((-degree, degree),
                                              interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),

        torchvision.transforms.CenterCrop(ww),

        torchvision.transforms.ToTensor(),
    ])
    return train_transformI


def vali_trans(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),
    ])
    return train_transformI


def vali_transR(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),

    ])
    return train_transformI


class My_Dataset(Dataset):
    """自定义数据集"""

    def __init__(self, data_list, size, ww, lw, degree, transform=True):
        self.data_list = data_list
        self.transform = transform
        self.size = size
        self.ww = ww
        self.lw = lw
        self.degree = degree

    def __len__(self):
        return len(self.data_list)


    def get_label(self, tmp_subfolder):
        label_dic = {'HCC': 3, 'CYST': 0, 'FNH': 1, 'HA': 2, 'ICC': 4, 'META': 5, 'Hemangioma': 2, 'nodule': 0}
        if 'HCC' in tmp_subfolder:
            return label_dic['HCC']
        elif 'CYST' in tmp_subfolder:
            return label_dic['CYST']
        elif 'FNH' in tmp_subfolder:
            return label_dic['FNH']
        elif 'HA' in tmp_subfolder:
            return label_dic['HA']
        elif 'ICC' in tmp_subfolder:
            return label_dic['ICC']
        elif 'META' in tmp_subfolder:
            return label_dic['META']
        elif 'Hemangioma' in tmp_subfolder:
            return label_dic['Hemangioma']
        elif 'nodule' in tmp_subfolder:
            return label_dic['nodule']

    def mean_std(self, data):
        std_ = [0.229, 0.224, 0.225]
        mean_ = [0.485, 0.456, 0.406]
        for i in range(data.shape[0]):
            if i % 3 == 0:
                data[i, :, :] = (data[i, :, :] - mean_[0]) / std_[0]
            elif i % 3 == 1:
                data[i, :, :] = (data[i, :, :] - mean_[1]) / std_[1]
            else:
                data[i, :, :] = (data[i, :, :] - mean_[2]) / std_[2]

        return data

    def getdata(self, data, id, seed):
        lw = self.lw
        degree = self.degree
        lyers = self.size[0]
        width = self.size[1]
        height = self.size[2]
        if self.transform:
            transformI = Trans(degree, width, lw)
            transformR = TransR(degree, width, lw)
        else:
            transformI = vali_trans(ww=width, lw=lw)
            transformR = vali_transR(ww=width, lw=lw)

        image = data['image']
        mask = data['mask']
        mask = mask>0.1
        mask = (mask * 255).astype(np.uint8)

        index = np.sum(np.sum(mask, -1), -1)
        index = np.where(index > 0)[0]
        if len(index)==0:
            print('qqqqqwwwwjdfifhfkdndbf', id)
        if len(index) < 4:
            z_, x_, y_ = np.where(mask > 0)
            start_z = max(0, min(z_) - 2)
            end_z = min(max(z_) + 2, mask.shape[0])
            start_x = max(0, min(x_) - 5)
            end_x = min(max(x_) + 5, mask.shape[1])
            start_y = max(0, min(y_) - 5)
            end_y = min(max(y_) + 5, mask.shape[2])

            new_mask = np.zeros(mask.shape).astype(np.uint8)
            new_mask[start_z:end_z, start_x:end_x, start_y:end_y] = 255
            mask = new_mask
        torch.manual_seed(seed)

        transformed_data = transformI(Image.fromarray(image[0, :, :]))
        torch.manual_seed(seed)
        transformed_mask = transformR(Image.fromarray(mask[0, :, :]))

        for i in range(image.shape[0] - 1):
            torch.manual_seed(seed)
            trans_data = transformI(Image.fromarray(image[i + 1, :, :]))
            torch.manual_seed(seed)
            trans_mask = transformR(Image.fromarray(mask[i + 1, :, :]))
            transformed_data = torch.cat([transformed_data, trans_data], dim=0)
            transformed_mask = torch.cat([transformed_mask, trans_mask], dim=0)

        transformed_data = (transformed_data.numpy() * 255)
        transformed_mask = transformed_mask.numpy()
        transformed_mask = transformed_mask > 0.1
        transformed_mask = (transformed_mask * 255).astype(np.uint8)

        transformed_data = skimage.transform.resize(transformed_data, (lyers, width, height), anti_aliasing=False,
                                                    order=1)
        transformed_mask = skimage.transform.resize(transformed_mask, (lyers, width, height), anti_aliasing=False,
                                                    order=0)

        transformed_data = transformed_data.astype(np.uint8)

        transformed_data = transformed_data / 255.
        # transformed_data = (transformed_data-np.mean(transformed_data))/ np.std(transformed_data)
        transformed_data = self.mean_std(transformed_data)
        transformed_mask = transformed_mask > 0.1
        transformed_mask = transformed_mask.astype(np.uint8)
        index = np.sum(np.sum(transformed_mask, -1), -1)
        index = np.where(index > 0)[0]
        if len(index) < 4 and len(index) != 0:
            z_, x_, y_ = np.where(transformed_mask > 0)
            start_z = max(0, min(z_) - 2)
            end_z = min(max(z_) + 2, transformed_mask.shape[0])
            start_x = max(0, min(x_) - 2)
            end_x = min(max(x_) + 2, transformed_mask.shape[1])
            start_y = max(0, min(y_) - 2)
            end_y = min(max(y_) + 2, transformed_mask.shape[2])

            new_mask = np.zeros(transformed_mask.shape).astype(np.uint8)
            new_mask[start_z:end_z, start_x:end_x, start_y:end_y] = 1
            transformed_mask = new_mask

        return transformed_data, transformed_mask

    def __getitem__(self, index):
        data = self.data_list[index]
        img_path2 = os.path.join(data, 'a.npz')
        img_path3 = os.path.join(data, 'p.npz')
        img_path4 = os.path.join(data, 'v.npz')
        data2 = np.load(img_path2)
        data3 = np.load(img_path3)
        data4 = np.load(img_path4)
        seed = np.random.randint(0, 2 ** 16)
        start_time = time.time()
        data_a, mask_a = self.getdata(data2, data, seed)
        data_p, mask_p = self.getdata(data3, data, seed)
        data_v, mask_v = self.getdata(data4, data, seed)
        end_time = time.time()
        time_d = end_time-start_time
        print(time_d)
        # test_img_p = data2['image']
        # test_mask_p = data2['mask']
        # test_z,test_x,test_y = np.where(test_mask_p>0)
        # test_zs=np.min(test_z)
        # test_ze=np.max(test_z)
        # test_xs=np.min(test_x)
        # test_xe=np.max(test_x)
        # test_ys=np.min(test_y)
        # test_ye=np.max(test_y)
        # test_tumor = test_img_p[test_zs:test_ze+1,test_xs:test_xe+1,test_ys:test_ye+1]
        # plt.imshow(test_tumor[0,:,:],cmap='gray')
        # plt.show()

        z_a, x_a, y_a = np.where(mask_a > 0)
        z_a_start = np.min(z_a)
        z_a_end = np.max(z_a)
        x_a_start = np.min(x_a)
        x_a_end = np.max(x_a)
        y_a_start = np.min(y_a)
        y_a_end = np.max(y_a)

        z_p, x_p, y_p = np.where(mask_p > 0)
        z_p_start = np.min(z_p)
        z_p_end = np.max(z_p)
        x_p_start = np.min(x_p)
        x_p_end = np.max(x_p)
        y_p_start = np.min(y_p)
        y_p_end = np.max(y_p)

        z_v, x_v, y_v = np.where(mask_v > 0)
        z_v_start = np.min(z_v)
        z_v_end = np.max(z_v)
        x_v_start = np.min(x_v)
        x_v_end = np.max(x_v)
        y_v_start = np.min(y_v)
        y_v_end = np.max(y_v)

        tumor_a = data_a[z_a_start:z_a_end+1 , x_a_start:x_a_end+1, y_a_start:y_a_end+1]
        tumor_p = data_p[z_p_start:z_p_end+1 , x_p_start:x_p_end+1, y_p_start:y_p_end+1]
        tumor_v = data_v[z_v_start:z_v_end+1 , x_v_start:x_v_end+1, y_v_start:y_v_end+1]
        # plt.imshow(tumor_v[0,:,:],cmap='gray')
        # plt.show()
        transformed_tumor_a = skimage.transform.resize(tumor_a, output_shape=(32, 64, 64), anti_aliasing=None, order=1)
        transformed_tumor_p = skimage.transform.resize(tumor_p, output_shape=(32, 64, 64), anti_aliasing=None, order=1)
        transformed_tumor_v = skimage.transform.resize(tumor_v, output_shape=(32, 64, 64), anti_aliasing=None, order=1)

        transformed_tumor_a = torch.tensor(transformed_tumor_a)
        transformed_tumor_p = torch.tensor(transformed_tumor_p)
        transformed_tumor_v = torch.tensor(transformed_tumor_v)

        transformed_tumor = [transformed_tumor_a, transformed_tumor_p, transformed_tumor_v]
        transformed_tumor = torch.stack(transformed_tumor)
        label = self.get_label(data)

        return transformed_tumor, label


    @staticmethod
    def collate_fn(batch):
        # 官方实现的default_collate可以参考
        # https://github.com/pytorch/pytorch/blob/67b7e751e6b5931a9f45274653f4f653a4e6cdf6/torch/utils/data/_utils/collate.py
        #images, labels = tuple(zip(*batch))
        images = [torch.tensor(sample[0]) for sample in batch]  # 将 NumPy 数组转换为张量
        labels = [sample[1] for sample in batch]
        images = torch.stack(images, dim = 0)
        labels = torch.as_tensor(labels)
        return images, labels



