##load data
from torch.utils.data import Dataset
import numpy as np
import os
import torch
import yaml
import random
import configparser
import albumentations as A
import random





def load_npz(path):
    img = np.load(path)['arr_0']
    gt = np.load(path)['arr_1']
    return img, gt
def set_random(seed_id=1234):
    np.random.seed(seed_id)
    torch.manual_seed(seed_id)  # for cpu
    torch.cuda.manual_seed_all(seed_id)  # for GPU
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True

def norm_1__1( array, l, m):
    '''
    将array缩放到-1~1之间，l：最大值，m：最小值
    '''
    return 2 * ((array - m) / (l - (m))) - 1

class UnpairedDataset(Dataset):
    # get unpaired dataset, such as MR-CT dataset
    def __init__(self, A_path, B_path):
        # listA = os.listdir(A_path)
        # listB = os.listdir(B_path)
        # self.listA = [os.path.join(A_path,k) for k in listA]
        # self.listB = [os.path.join(B_path,k) for k in listB]
        self.A_path = A_path
        self.B_path = B_path
        # A_path是source，B_path是target
        with open(A_path, 'r') as f:
            self.listA = f.read().splitlines()
        with open(B_path, 'r') as f:
            self.listB = f.read().splitlines()

        self.Asize = len(self.listA)
        self.Bsize = len(self.listB)
        self.dataset_size = max(self.Asize, self.Bsize)
        #SIFA使用了旋转、缩放和仿射变换
        self.transform = A.OneOf(
            [
                A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5, border_mode=0),
                A.RandomSizedCrop(p=0.5,min_max_height=(230,250),height=256,width=256),#缩放，SIFA论文用到的
                A.Affine(p=0.5),#仿射变换，SIFA论文用到的
                A.Rotate(limit=15,p=0.5),#旋转，SIFA论文用到的
                # A.HorizontalFlip(),  # 水平翻转
                # A.VerticalFlip(p=0.6),  # 垂直翻转
                # 下面是新加的数据增强,注意BST-ST论文没用ShiftScaleRotate
                # A.Transpose(),
                # A.ZoomBlur(p=0.5),
                # A.GaussNoise(p=0.5)

            ])
    def augment(self,source_image,source_gt,target_image):
        '''
        数据增强
        '''
        transforme_source = self.transform(image=source_image, mask=source_gt)
        transforme_target = self.transform(image=target_image)
        source_image = transforme_source['image']
        source_gt = transforme_source['mask']
        target_image=transforme_target['image']
        return source_image,source_gt,target_image
    def mynormal(self,A,B,f_name):
        '''
        需要将值化为-1到1之间
        2*[(x-x_min)/(x_max-x_min)]-1
        LITS：3.6954126 -1.2178384036947165
        ATLAS:20.8222842255827 -1.1186605967010617

        '''
        # l_A=max(A.max(),3.4539323)
        # m_A=min(A.min(),-1.2810926)
        # l_B=max(B.max(),4.1356735)
        # m_B=min(B.min(),-1.1623437)
        l_A = 3.6954126
        m_A = -1.2178384036947165
        l_B = 20.8222842255827
        m_B = -1.1186605967010617
        # if 'LITS' in f_name:  # 如果BTCV是源域，目前的设置是BTCV的CT源域，CHAOS的mri目标域
        #     # BTCV是A，CHAOS是B
        #     A = norm_1__1(A, l_A, m_A)
        #     B = norm_1__1(B, l_B, m_B)
        # elif 'ATLAS' in f_name:
        #     # CHAOS是A,BTCV是B
        #     A = norm_1__1(B, l_B, m_B)
        #     B = norm_1__1(A, l_A, m_A)
        # else:
        #     raise ("can not identity which is BTCV or CHAOS,can not process function norm_1_1()")
        #如果最大值>1或者最小值<-1，再执行一次normal_1_1
        #2*[(x-x_min)/(x_max-x_min)]-1
        if A.max()>1 or A.min()<-1:
            A=A/max(abs(A.max()),abs(A.min()))
        if B.max()>1 or B.min()<-1:
            B=B/max(abs(B.max()),abs(B.min()))
        return A,B
    def __getitem__(self, index):
        if self.Asize == self.dataset_size:
            A, A_gt = load_npz(self.listA[index])
            B, B_gt = load_npz(self.listB[random.randint(0, self.Bsize - 1)])
        else:
            B, B_gt = load_npz(self.listB[index])
            A, A_gt = load_npz(self.listA[random.randint(0, self.Asize - 1)])

        #--------进行数据增强-------#
        A, A_gt, B=self.augment(A, A_gt,B)
        #数据放缩到[-1,1]之间,注意要先进行数据增强，后进行放缩，因为数据增强还会改变像素值
        A,B=self.mynormal(A,B,self.listA[index])
        #转化为tensor
        A_gt = A_gt.astype(np.float32)
        B_gt = B_gt.astype(np.float32)
        A = torch.from_numpy(A.copy()).unsqueeze(0).float()
        A_gt = torch.from_numpy(A_gt.copy()).unsqueeze(0).float()
        B = torch.from_numpy(B.copy()).unsqueeze(0).float()
        B_gt = torch.from_numpy(B_gt.copy()).unsqueeze(0).float()
        return A, A_gt, B, B_gt

    def __len__(self):
        return self.dataset_size


class SingleDataset(Dataset):
    def __init__(self, test_path):
        # test_list = os.listdir(test_path)
        # self.test = [os.path.join(test_path,k) for k in test_list]
        with open(test_path, 'r') as f:
            self.test = f.read().splitlines()
    def __getitem__(self, index):
        img, gt = load_npz(self.test[index])
        '''
        LITS：3.6954126 -1.2178384036947165
        ATLAS:20.8222842255827 -1.1186605967010617
        '''
        l_A = 3.6954126
        m_A = -1.2178384036947165
        l_B = 20.8222842255827
        m_B = -1.1186605967010617
        # 将数值转化到[-1,1]之间
        # if 'LITS' in self.test[index]:  # 如果BTCV是源域，目前的设置是BTCV的CT源域，CHAOS的mri目标域
        #     # BTCV
        #     img = norm_1__1(img, 3.6954126 ,-1.2178384036947165)
        # elif 'ATLAS' in self.test[index]:
        #     # CHAOS
        #     img = norm_1__1(img,20.8222842255827 ,-1.1186605967010617)
        gt=gt.astype(np.float32)
        img = torch.from_numpy(img.copy()).unsqueeze(0).float()
        gt = torch.from_numpy(gt.copy()).unsqueeze(0).float()
        return img, gt

    def __len__(self):
        return len(self.test)

# def myHorizontalFlip(source_image,source_gt,target_image,p=0.5):
#     if random.random()<p:
#         source_image2=A.HorizontalFlip().apply(source_image)
#         source_gt2=A.HorizontalFlip().apply(source_gt)
#         target_image2=A.HorizontalFlip().apply(target_image)
#         return source_image2,source_gt2,target_image2,True
#     return source_image,source_gt,target_image,False
# def myVerticalFlip(source_image,source_gt,target_image,p=0.5):
#     if random.random()<p:
#         source_image2=A.VerticalFlip().apply(source_image)
#         source_gt2=A.VerticalFlip().apply(source_gt)
#         target_image2=A.VerticalFlip().apply(target_image)
#         return source_image2,source_gt2,target_image2,True
#     return source_image,source_gt,target_image,False

if __name__ == '__main__':
    ds=UnpairedDataset('/home/liukai/projects/SIFA/dataset/LiverAndTumor/source.txt',
                       '/home/liukai/projects/SIFA/dataset/LiverAndTumor/target_train.txt')
    for s, s_gt, t, t_gt in ds:
        print(s.max(),s.min(),t.max(),t.min())