from skorch.core import *
from sklearn.model_selection import train_test_split
import albumentations as albu
import warnings
import json
import SimpleITK as sitk
from prepare_dataset import patch_loader, get_tensor, image2tensor, mask2tensor
np.set_printoptions(threshold=np.inf)
warnings.filterwarnings('ignore', '.*output shape of zoom.*')
segment_stats = [15.05582917/255, 25.47439385/255]
#   60样本均值 :15.05582917  方差 :692.2445526  标准差:25.47439385


def denormalize_image(img:TensorImage, mean, std, max_pixel_value=255.0):
    mean, std = tensor(mean), tensor(std)
    img = img.cpu().float()*std[...,None,None] + mean[...,None,None]
    img *= max_pixel_value
    return img.numpy().astype(np.uint8)


def create_segment_augmenter(height=256, width=256):
    transformations = albu.Compose([
        albu.Resize(height=height, width=width, p=1.0),
        albu.HorizontalFlip(p=0.5),
        albu.OneOf([albu.RandomBrightnessContrast(brightness_limit=.1, p=0.5),
                    albu.RandomGamma(gamma_limit=(20, 40), p=0.5)]),
        albu.ShiftScaleRotate(rotate_limit=5, p=0.5)
    ])

    target = {}
    target['image1'] = 'image'
    target['mask1'] = 'mask'
    return albu.Compose(transformations, p=1.0, additional_targets=target)


def load_json_label(json_file):

    hot_label = one_hot()
    return hot_label


class NpEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        else:
            return super(NpEncoder, self).default(obj)


class SegmentDataset(Dataset):
    def __init__(self, data_list, augmentation_func=None, stats: Collection[Tensor]=None, target_size: Sizes=None, file=''):
        super().__init__()
        self.image_items = data_list
        self.augmentation = None
        self.target_size = target_size
        if augmentation_func:
            self.augmentation = augmentation_func(*self.target_size)
        if stats:
            self.stats = stats

    def __len__(self):
        return len(self.image_items)

    def transform_tensor(self, image, mask):
        image = image2tensor(image)
        mask = mask2tensor(mask)
        return image, mask

    def __getitem__(self, idx):
        sample_idx = self.image_items[idx]
        image_patch, mask = get_tensor(sample_idx)
        return self.transform_tensor(image_patch, mask)


def create_segment_datasets(data_path: PathList, augmentation_func=None, stats: Collection[Tensor]=None, target_size: Sizes=None):
    data_path = Path(data_path)
    image_items = sorted(data_path.joinpath('image').glob('*.npy'))
    x_train, x_test = train_test_split(image_items, test_size=0.25, shuffle=True)
    train_ds = SegmentDataset(x_train, augmentation_func=augmentation_func, stats=stats, target_size=target_size)
    valida_ds = SegmentDataset(x_test, augmentation_func=augmentation_func, stats=stats, target_size=target_size)
    return train_ds, valida_ds


if __name__ == '__main__':
    print('==========start===========')

    print('==========done==========')

