import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from joblib import Parallel, delayed
from numpy import seterr
from scipy.ndimage import find_objects
from skimage import segmentation, color
from skimage.feature import local_binary_pattern
from torch.distributions import Distribution

seterr(all='raise')


def _calculate_color_sim(ri, rj):
    """
        Calculate color similarity using histogram intersection
    """
    return sum([min(a, b) for a, b in zip(ri["color_hist"], rj["color_hist"])])


def _calculate_texture_sim(ri, rj):
    """
        Calculate texture similarity using histogram intersection
    """
    return sum([min(a, b) for a, b in zip(ri["texture_hist"], rj["texture_hist"])])


def _calculate_size_sim(ri, rj, imsize):
    """
        Size similarity boosts joint between small regions, which prevents
        a single region from engulfing other blobs one by one.

        size (ri, rj) = 1 − [size(ri) + size(rj)] / size(image)
    """
    return 1.0 - (ri['size'] + rj['size']) / imsize


def _calculate_fill_sim(ri, rj, imsize):
    """
        Fill similarity measures how well ri and rj fit into each other.
        BBij is the bounding box around ri and rj.

        fill(ri, rj) = 1 − [size(BBij) − size(ri) − size(ri)] / size(image)
    """

    bbsize = (max(ri['box'][2], rj['box'][2]) - min(ri['box'][0], rj['box'][0])) * (
            max(ri['box'][3], rj['box'][3]) - min(ri['box'][1], rj['box'][1]))

    return 1.0 - (bbsize - ri['size'] - rj['size']) / imsize


def calculate_color_hist(mask, img):
    """
        Calculate colour histogram for the region.
        The output will be an array with n_BINS * n_color_channels.
        The number of channel is varied because of different
        colour spaces.
    """

    BINS = 25
    if len(img.shape) == 2:
        img = img.reshape(img.shape[0], img.shape[1], 1)

    channel_nums = img.shape[2]
    hist = np.array([])

    for channel in range(channel_nums):
        layer = img[:, :, channel][mask]
        hist = np.concatenate([hist] + [np.histogram(layer, BINS)[0]])

    # L1 normalize
    hist = hist / np.sum(hist)

    return hist


def generate_lbp_image(img):
    if len(img.shape) == 2:
        img = img.reshape(img.shape[0], img.shape[1], 1)
    channel_nums = img.shape[2]

    lbp_img = np.zeros(img.shape)
    for channel in range(channel_nums):
        layer = img[:, :, channel]
        lbp_img[:, :, channel] = local_binary_pattern(layer, 8, 1)

    return lbp_img


def calculate_texture_hist(mask, lbp_img):
    """
        Use LBP for now, enlightened by AlpacaDB's implementation.
        Plan to switch to Gaussian derivatives as the paper in future
        version.
    """

    BINS = 10
    channel_nums = lbp_img.shape[2]
    hist = np.array([])

    for channel in range(channel_nums):
        layer = lbp_img[:, :, channel][mask]
        hist = np.concatenate([hist] + [np.histogram(layer, BINS)[0]])

    # L1 normalize
    hist = hist / np.sum(hist)

    return hist


def calculate_sim(ri, rj, imsize, sim_strategy):
    """
        Calculate similarity between region ri and rj using diverse
        combinations of similarity measures.
        C: color, T: texture, S: size, F: fill.
    """
    sim = 0

    if 'C' in sim_strategy:
        sim += _calculate_color_sim(ri, rj)
    if 'T' in sim_strategy:
        sim += _calculate_texture_sim(ri, rj)
    if 'S' in sim_strategy:
        sim += _calculate_size_sim(ri, rj, imsize)
    if 'F' in sim_strategy:
        sim += _calculate_fill_sim(ri, rj, imsize)

    return sim


'''
超像素处理
'''


def simple_superpixel(batch_image: np.ndarray, superpixel_fn: callable) -> np.ndarray:
    """ convert batch image to superpixel
    Args:
        batch_image (np.ndarray): np.ndarry, shape must be [b,h,w,c]
        seg_num (int, optional): . Defaults to 200.
    Returns:
        np.ndarray: superpixel array, shape = [b,h,w,c]
    """
    num_job = batch_image.shape[0]
    batch_out = Parallel(n_jobs=num_job)(delayed(superpixel_fn)
                                         (image) for image in batch_image)
    return np.array(batch_out)


'''
模糊处理
'''


class GuidedFilter(nn.Module):
    def box_filter(self, x: torch.Tensor, r):
        ch = x.shape[1]
        k = 2 * r + 1
        weight = 1 / ((k) ** 2)  # 1/9
        # [c,1,3,3] * 1/9
        box_kernel = torch.ones((ch, 1, k, k), dtype=torch.float32, device=x.device).fill_(weight)
        # same padding
        return F.conv2d(x, box_kernel, padding=r, groups=ch)

    def forward(self, x: torch.Tensor, y: torch.Tensor, r, eps=1e-2):
        b, c, h, w = x.shape
        device = x.device
        # 全1的图像进行滤波的结果
        N = self.box_filter(torch.ones((1, 1, h, w), dtype=x.dtype, device=device), r)

        mean_x = self.box_filter(x, r) / N
        mean_y = self.box_filter(y, r) / N
        cov_xy = self.box_filter(x * y, r) / N - mean_x * mean_y
        var_x = self.box_filter(x * x, r) / N - mean_x * mean_x

        A = cov_xy / (var_x + eps)
        b = mean_y - A * mean_x

        mean_A = self.box_filter(A, r) / N
        mean_b = self.box_filter(b, r) / N

        output = mean_A * x + mean_b
        return output


def clip_by_tensor(t, t_min, t_max):
    """
    clip_by_tensor
    :param t: tensor
    :param t_min: min
    :param t_max: max
    :return: cliped tensor
    """
    t = t.float()
    t_min = t_min.float()
    t_max = t_max.float()

    result = (t >= t_min).float() * t + (t < t_min).float() * t_min
    result = (result <= t_max).float() * result + (result > t_max).float() * t_max
    return result


'''
颜色偏移
'''


class ColorShift(nn.Module):
    def __init__(self, mode='uniform', device=None):
        super().__init__()
        self.dist: Distribution = None
        self.mode = mode

        if self.mode == 'normal':
            self.dist = torch.distributions.Normal(
                torch.tensor((0.299, 0.587, 0.114), device=device),
                torch.tensor((0.1, 0.1, 0.1), device=device))
        elif self.mode == 'uniform':
            self.dist = torch.distributions.Uniform(
                torch.tensor((0.199, 0.487, 0.014), device=device),
                torch.tensor((0.399, 0.687, 0.214), device=device))

    # def forward(self, *img: torch.Tensor):
    #     rgb = self.dist.sample()
    #     return ((im * rgb[None, :, None, None]) / rgb.sum() for im in img)
    def forward(self,photo,cartoon=None):
        if cartoon is not None:
            rgb=self.dist.sample()
            return photo*rgb[None,:,None,None]/rgb.sum(),cartoon*rgb[None,:,None,None]/rgb.sum()
        else:
            rgb = self.dist.sample()
            return photo*rgb[None,:,None,None]/rgb.sum()


'''
保证图片在行和列上面的变化比较正常，没有出现断裂
'''


class VariationLoss(nn.Module):
    def __init__(self, k_size: int) -> None:
        super().__init__()
        self.k_size = k_size

    def forward(self, image: torch.Tensor):
        b, c, h, w = image.shape
        tv_h = torch.mean((image[:, :, self.k_size:, :] - image[:, :, : -self.k_size, :]) ** 2)
        tv_w = torch.mean((image[:, :, :, self.k_size:] - image[:, :, :, : -self.k_size]) ** 2)
        tv_loss = (tv_h + tv_w) / (c * h * w)
        return tv_loss


class LSGanLoss(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        # NOTE c=b a=0

    def _d_loss(self, real_logit, fake_logit):
        # 1/2 * [(real-b)^2 + (fake-a)^2]
        return 0.5 * (torch.mean((real_logit - 1) ** 2) + torch.mean(fake_logit ** 2))

    def _g_loss(self, fake_logit):
        # 1/2 * (fake-c)^2
        return torch.mean((fake_logit - 1) ** 2)

    def forward(self, real_logit, fake_logit):
        g_loss = self._g_loss(fake_logit)
        d_loss = self._d_loss(real_logit, fake_logit)
        return d_loss, g_loss


def slic(image, seg_num=200):
    seg_label = segmentation.slic(image, n_segments=seg_num, sigma=1,
                                  compactness=10, convert2lab=True,
                                  start_label=0)
    image = color.label2rgb(seg_label, image, kind='avg', bg_label=-1)
    return image


def adaptive_slic(image, seg_num=200):
    seg_label = segmentation.slic(image, n_segments=seg_num, sigma=1,
                                  compactness=10, convert2lab=True,
                                  start_label=0)
    image = adaptive_label2rgb(seg_label, image, kind='mix')
    return image ** 1.2


def adaptive_label2rgb(label_field, image, kind='mix', bg_label=-1, bg_color=(0, 0, 0)):
    out = np.zeros_like(image)
    labels = np.unique(label_field)
    bg = (labels == bg_label)
    if bg.any():
        labels = labels[labels != bg_label]
        mask = (label_field == bg_label).nonzero()
        out[mask] = bg_color
    for label in labels:
        mask = (label_field == label).nonzero()
        color: np.ndarray = None
        if kind == 'avg':
            color = image[mask].mean(axis=0)
        elif kind == 'median':
            color = np.median(image[mask], axis=0)
        elif kind == 'mix':
            std = np.std(image[mask])
            if std < 20:
                color = image[mask].mean(axis=0)
            elif 20 < std < 40:
                mean = image[mask].mean(axis=0)
                median = np.median(image[mask], axis=0)
                color = 0.5 * mean + 0.5 * median
            elif 40 < std:
                color = image[mask].median(axis=0)
        out[mask] = color
    return out


def switch_color_space(img, target):
    """
        RGB to target color space conversion.
        I: the intensity (grey scale), Lab, rgI: the rg channels of
        normalized RGB plus intensity, HSV, H: the Hue channel H from HSV
    """
    if target == 'HSV':
        return color.rgb2hsv(img)

    elif target == 'Lab':
        return color.rgb2lab(img)

    elif target == 'I':
        return color.rgb2grey(img)

    elif target == 'rgb':
        img = img / np.sum(img, axis=0)
        return img

    elif target == 'rgI':
        img = img / np.sum(img, axis=0)
        img[:, :, 2] = color.rgb2grey(img)
        return img

    elif target == 'H':
        return color.rgb2hsv(img)[:, :, 0]

    else:
        raise "{} is not suported.".format(target)


class HierarchicalGrouping(object):
    def __init__(self, img, img_seg, sim_strategy):
        self.img = img
        self.sim_strategy = sim_strategy
        self.img_seg = img_seg.copy()
        self.labels = np.unique(self.img_seg).tolist()

    def build_regions(self):
        self.regions = {}
        lbp_img = generate_lbp_image(self.img)
        for label in self.labels:
            size = (self.img_seg == 1).sum()
            region_slice = find_objects(self.img_seg == label)[0]
            box = tuple([region_slice[i].start for i in (1, 0)] +
                        [region_slice[i].stop for i in (1, 0)])

            mask = self.img_seg == label
            color_hist = calculate_color_hist(mask, self.img)
            texture_hist = calculate_texture_hist(mask, lbp_img)

            self.regions[label] = {
                'size': size,
                'box': box,
                'color_hist': color_hist,
                'texture_hist': texture_hist
            }

    def build_region_pairs(self):
        self.s = {}
        for i in self.labels:
            neighbors = self._find_neighbors(i)
            for j in neighbors:
                if i < j:
                    self.s[(i, j)] = calculate_sim(self.regions[i],
                                                   self.regions[j],
                                                   self.img.size,
                                                   self.sim_strategy)

    def _find_neighbors(self, label):
        """
        Parameters
        ----------
            label : int
                label of the region
        Returns
        -------
            neighbors : list
                list of labels of neighbors
        """

        boundary = segmentation.find_boundaries(self.img_seg == label,
                                                mode='outer')
        neighbors = np.unique(self.img_seg[boundary]).tolist()

        return neighbors

    def get_highest_similarity(self):
        return sorted(self.s.items(), key=lambda i: i[1])[-1][0]

    def merge_region(self, i, j):

        # generate a unique label and put in the label list
        new_label = max(self.labels) + 1
        self.labels.append(new_label)

        # merge blobs and update blob set
        ri, rj = self.regions[i], self.regions[j]

        new_size = ri['size'] + rj['size']
        new_box = (min(ri['box'][0], rj['box'][0]),
                   min(ri['box'][1], rj['box'][1]),
                   max(ri['box'][2], rj['box'][2]),
                   max(ri['box'][3], rj['box'][3]))
        value = {
            'box': new_box,
            'size': new_size,
            'color_hist':
                (ri['color_hist'] * ri['size']
                 + rj['color_hist'] * rj['size']) / new_size,
            'texture_hist':
                (ri['texture_hist'] * ri['size']
                 + rj['texture_hist'] * rj['size']) / new_size,
        }

        self.regions[new_label] = value

        # update segmentation mask
        self.img_seg[self.img_seg == i] = new_label
        self.img_seg[self.img_seg == j] = new_label

    def remove_similarities(self, i, j):

        # mark keys for region pairs to be removed
        key_to_delete = []
        for key in self.s.keys():
            if (i in key) or (j in key):
                key_to_delete.append(key)

        for key in key_to_delete:
            del self.s[key]

        # remove old labels in label list
        self.labels.remove(i)
        self.labels.remove(j)

    def calculate_similarity_for_new_region(self):
        i = max(self.labels)
        neighbors = self._find_neighbors(i)

        for j in neighbors:
            # i is larger than j, so use (j,i) instead
            self.s[(j, i)] = calculate_sim(self.regions[i],
                                           self.regions[j],
                                           self.img.size,
                                           self.sim_strategy)

    def is_empty(self):
        return True if not self.s.keys() else False

    def num_regions(self):
        return len(self.s.keys())


def sscolor(image, seg_num=200, power=1.2,
            color_space='Lab', k=10, sim_strategy='CTSF'):
    img_seg = segmentation.felzenszwalb(image, scale=k, sigma=0.8, min_size=100)
    img_cvtcolor = adaptive_label2rgb(img_seg, image, kind='mix')
    img_cvtcolor = switch_color_space(img_cvtcolor, color_space)
    S = HierarchicalGrouping(img_cvtcolor, img_seg, sim_strategy)
    S.build_regions()
    S.build_region_pairs()

    # Start hierarchical grouping

    while S.num_regions() > seg_num:
        i, j = S.get_highest_similarity()
        S.merge_region(i, j)
        S.remove_similarities(i, j)
        S.calculate_similarity_for_new_region()

    image = adaptive_label2rgb(S.img_seg, image,
                               kind='mix')  # 这里会出现<-1 的现象 不理会的话后面的计算会崩溃  所以做了一个clip操作，夹逼在（-1，1）之间
    image = np.clip(image, -1, 1)

    image = (image + 1) / 2

    image = image ** power

    image = image / (np.max(image) + 1e-5)
    image = image * 2 - 1

    return image


from typing import Union


def denormalize(im: Union[np.ndarray, torch.Tensor], mean=0.5, std=0.5):
    return im * std + mean


'''

用于 # aggregate all  loss_D = lossr + lossf + 0.2*gp

'''
from torch import autograd


def gradient_penalty(D, xr, xf):
    """
    :param D:
    :param xr: [b, 2]
    :param xf: [b, 2]
    :return:
    """
    # [b, 1]
    batchsz = xr.shape[0]
    t = torch.rand(batchsz, 1, 1, 1).cuda()
    # [b, 1] => [b, 2]
    t = t.expand_as(xr)
    # interpolation
    mid = t * xr + (1 - t) * xf.detach()  # detach 用于切断它与计算图的连接
    # set it requires gradient
    mid.requires_grad_()

    pred = D(mid)
    grads = autograd.grad(outputs=pred, inputs=mid,
                          grad_outputs=torch.ones_like(pred),
                          create_graph=True, retain_graph=True, only_inputs=True)[0]

    gp = torch.pow(grads.norm(2, dim=1) - 1, 2).mean()
    return gp
