import os

import albumentations as albu
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset

import edge_utils

color_aug = albu.Compose([
    albu.RGBShift(10, 10, 10, p=0.3),
    # albu.RandomBrightnessContrast(0.2, 0.2, always_apply=False, p=0.5),
    # albu.HueSaturationValue(20, 20, 20, always_apply=False, p=0.3)
])


def load(split_path):
    res = []
    with open(split_path, 'r') as f:
        lines = f.readlines()
        for line in lines:
            v = line.replace("\n", "")
            v = v.replace("\\", "/")
            res.append(v)
    return res


def rescale(image):
    def rescale_band(tmp):
        def get_top(tmp, p=0.02):
            tmp = tmp.flatten()
            vout = []
            n = int(len(tmp) * p)
            indices = np.argpartition(tmp, -n)[-n:]
            indices = indices[np.argsort(-tmp[indices])]
            vout.append(tmp[indices][-1])
            indices = np.argpartition(tmp, n)[:n]
            indices = indices[np.argsort(-tmp[indices])]
            vout.append(tmp[indices][-1])
            return vout

        vout = get_top(tmp)
        tmp[tmp < vout[1]] = vout[1]
        tmp[tmp > vout[0]] = vout[0]
        tmp = tmp.astype('float32')
        tmp = tmp - vout[1]
        tmp = tmp / (vout[0] - vout[1] + 1E-8)
        return (tmp * 255).astype('uint8')

    image = np.dstack((rescale_band(image[:, :, 0]), rescale_band(image[:, :, 1]), rescale_band(image[:, :, 2])))
    return image


def histo_match(img, ref):
    out = np.zeros_like(img)
    for i in range(img.shape[2]):
        hist_img, _ = np.histogram(img[:, :, i], 256)
        hist_ref, _ = np.histogram(ref[:, :, i], 256)
        cdf_img = np.cumsum(hist_img)
        cdf_ref = np.cumsum(hist_ref)
        for j in range(256):
            tmp = abs(cdf_img[j] - cdf_ref)
            tmp = tmp.tolist()
            idx = tmp.index(min(tmp))
            out[:, :, i][img[:, :, i] == j] = idx
    return out


class JIDataset(Dataset):
    def __init__(self, split="", preprocessing=None, augmentation=None, onehot=True):
        # train:    4736 = 64 * 74
        # val:      1036 = 4 * 259
        # test:     2416 = 16 * 151
        assert split in ["train", "val", "test"]
        root_dir = os.path.join("/home/hjr/zze/data/JI", split)
        self.images_fps = []
        self.masks_fps = []
        self.onehot = onehot
        for ne in os.listdir(root_dir + "/image"):
            self.images_fps.append(os.path.join(root_dir + "/image", ne))
            self.masks_fps.append(os.path.join(root_dir + "/label", ne))
        self.augmentation = augmentation
        self.preprocessing = preprocessing
        self.num_classes = 2

    def __getitem__(self, i):
        image = np.array(Image.open(self.images_fps[i]))
        mask = np.array(Image.open(self.masks_fps[i]))
        mask = (mask != 0).astype('uint8')

        if self.augmentation:
            sample = self.augmentation(image=image, mask=mask)
            image, mask = sample['image'], sample['mask']

        edge = edge_utils.mask_to_onehot(mask, self.num_classes)
        edge = edge_utils.onehot_to_binary_edges(edge, 2, self.num_classes)
        edge = torch.from_numpy(edge).float()

        if self.onehot:
            masks = [(mask == 1)]
            mask = np.stack(masks, axis=-1).astype('float')

        if self.preprocessing:
            sample = self.preprocessing(image=image, mask=mask)
            image, mask = sample['image'], sample['mask']

        return image, [mask, edge]

    def __len__(self):
        return len(self.images_fps)
