from functools import partial

import numpy as np
import skimage
import torch_scatter
from matplotlib import pyplot as plt

import silearn
import silearn.graph
import torch

from model.encoding_tree import Partitioning
from optimizer.enc.partitioning.propagation_others import OperatorPropagation


class SegmentorCore:
    def __init__(self):
        self.pre_proc_methods = [(lambda img: torch.tensor(img))]
        self.init_optim_methods = []
        self.iteration_methods = []
        self.k, self.d = 49, 7
        self.t = 1
        self.p = 0.5
        self.iterative = False
        self.target_size_scaling = True
        self.obj = "SE_M"

    def pixel_distance(self, a, b, dx, dy):
        return ((a - b) ** 2).sum(dim = -1) ** 0.5 * (dx**2 + dy**2) ** 0.5



    def construct_graph(self, img):
        w, es, et = silearn.spatial_knn_graph(img, self.k, self.d)
        w = torch.exp2(- w.double() / w.mean() / self.t)
        torch.clip(w, min=1e-256)
        imgH = img.shape[0]
        imgW = img.shape[1]
        return silearn.graph.GraphSparse(torch.cat([es.unsqueeze(1), et.unsqueeze(1)],dim = 1), w, n_vertices =imgH * imgW)

    def pre_process(self, img):
        for x in self.pre_proc_methods:
            img = x(img)
        return img

    def iteration(self, e, w, trans_idx):
        self.cur_num_nodes = trans_idx.shape[0]
        if not self.iterative:
            return e, w
        self.node_fea = torch_scatter.scatter_sum(self.node_fea, trans_idx, dim = 0)
        for x in self.iteration_methods:
            e, w = x(e, w, self.node_fea)
        return e, w

    def extract_hyper_fea(self, img):
        fea = img.reshape(-1, img.shape[-1])
        self.node_fea = torch.cat([fea, torch.ones_like(fea[:, :1])], dim = -1)

    def get_adj_cover(self, e_idx, img_w):
        return None

    def get_optim(self, g, partition_buffer):
        optim = OperatorPropagation(partition_buffer, objective=self.obj)
        for x in self.init_optim_methods:
            optim = x(optim)
        return optim

    def run(self, img, target_size = 100):
        img_w = img.shape[1]
        img = self.pre_process(img)
        g = self.construct_graph(img)
        self.extract_hyper_fea(img)
        partitioning = Partitioning(g, None)
        self.optim = self.get_optim(g, partitioning)
        # self.optim.perform(p = self.p, m_scale=0, min_com = target_size * 2, adj_cover=self.get_adj_cover(g._edges, img_w), contains_self_loops=False)
        self.cur_num_nodes = g.num_vertices
        if self.target_size_scaling:
            num_vertices = int(target_size * (g.n_vertices / target_size) ** 0.5)
            if self.iterative:
                self.optim.on_operated = self.iteration
            adj_cover = self.get_adj_cover(g._edges, img_w)
            # self.optim.objective = "SE_M"
            self.optim.perform(p=1.0, m_scale=0, min_com=num_vertices,adj_cover=adj_cover, re_compute=False)
            # num_vertices = min(num_vertices, self.optim.enc.node_id.max() / 2)

            while(num_vertices > target_size):
                adj_cover = None  # stored inside optim after 1st iteration
                # self.optim.objective = "SE_M"
                self.optim.perform(p=1.0, m_scale=0, min_com=num_vertices, adj_cover=adj_cover, re_compute=False)
                num_vertices = int(num_vertices * (0.8))
            self.optim.perform(p = 1.0, m_scale=0, min_com = target_size, adj_cover=None, re_compute=False)
        else:

            if self.iterative:
                self.optim.on_merged = self.iteration
            self.optim.perform(p = self.p, m_scale=0, min_com = target_size, adj_cover=self.get_adj_cover(g._edges, img_w))

        self.node_fea = None
        self.optim = None
        return partitioning.node_id.reshape(-1, 1)




class SegmentorBuilder:
    def __init__(self, k = 80, d = 9, t = 1., p = 0.5,objective = "SE_M", scaling_tgt_size = True):
        self.e0_inserted = False
        self.segmentor = SegmentorCore()
        self.segmentor.k, self.segmentor.t, self.segmentor.d = k, t, d
        self.segmentor.p = p
        self.segmentor.obj = objective
        self.segmentor.target_size_scaling = True

    def set_gpu(self, device_name = "cuda"):
        self.segmentor.pre_proc_methods.append(lambda img: img.to(device_name))
        return self

    def convertLAB(self):
        self.segmentor.pre_proc_methods.insert(0, lambda img: skimage.color.rgb2lab(img / 255).astype(np.float64))
        return self

    def normalize_hyper_fea(self):
        old_hyper_fea = self.segmentor.extract_hyper_fea
        def hyper_fea_1(img, seg):
            std = torch.std(img, dim=(0,1), keepdim=True)
            img = img / std
            # imgH, imgW = img.shape[0], img.shape[1]
            # imgx = torch.arange(imgW, device=img.device).repeat(imgH).reshape(imgW, imgH, 1).swapaxes(0, 1)
            # imgy = torch.arange(imgH, device=img.device).repeat(imgW).reshape(imgH, imgW, 1)
            # img = torch.cat([img, imgx, imgy], dim = -1)
            old_hyper_fea(img)
        self.segmentor.extract_hyper_fea = partial(hyper_fea_1, seg = self.segmentor)
        return self

    def constrain_neighbor_4connect(self):
        def f(idx, width):
            delta = torch.abs(idx[:, 0] - idx[:, 1])
            return (delta == 1) + (delta == width) > 0
        self.segmentor.get_adj_cover = lambda idx, w: f(idx, w)
        return self

    def constrain_neighbor_8connect(self):
        def f(idx, width):
            delta = torch.abs(idx[:, 0] - idx[:, 1])
            return (delta == 1) + (delta == width) + (delta == width + 1) + (delta == width-1) > 0
        self.segmentor.get_adj_cover = lambda idx, w: f(idx, w)
        return self

    def iterative_mean_shift(self, ratio = 0.05):
        self.segmentor.iterative = True
        ratio =  ratio * self.segmentor.p
        # def insert_e0(optim: OperatorPropagation):
        #     optim.edge_descriptors.append(torch.ones_like(optim.enc.graph._p))
        #     return optim
        def refine(e, w, fea):

            flag = e[:, 0] != e[:, 1]
            fea_mean = fea[:, :-1] / fea[:, -1:]
            sq = (fea_mean[e[flag, 0]] - fea_mean[e[flag, 1]]) ** 2
            # sqr = (fea_mean[e[flag, 0],-2:] - fea_mean[e[flag, 1], -2:]) ** 2
            sq = sq.sum(dim = -1) ** .5
            wd = torch.log2(w)
            wt = torch.std(wd)
            nt = torch.std(sq)

            w[flag] = torch.exp2((-sq  * (wt * ratio / nt) + wd[flag]  * (1 - ratio)) )
            # w *= torch.exp2(- sq *( wd / nd *  ratio) )
            return e, w
        self.segmentor.iteration_methods.append(refine)
        # if not self.e0_inserted:
        #     self.segmentor.init_optim_methods.append(insert_e0)
        # self.e0_inserted = True
        return self


    def get_full_graph(self, N, dev=None):
        """
        Get the edge vectors of full graph.

        @:param
        N: num of node.
        dev: torch device.
        @:returns es, et, w: torch tensor. Same as @get_sparse_conv
        """
        if dev is None:
            dev = "cuda" if torch.cuda.is_available() else "cpu"
        es = torch.arange(0, N, dtype=torch.int64, device=dev).repeat(N)
        et = torch.arange(0, N, dtype=torch.int64, device=dev).repeat_interleave(N)
        w = torch.zeros(N * N, device=dev)
        return es, et, w

    def fuse_full_graph(self, threshold = 500, ratio = 1.0):
        self.segmentor.iterative = True
        def insert_e0(optim: OperatorPropagation):
            optim.edge_descriptors.append(torch.ones_like(optim.enc.graph._p))
            return optim

        def refine(e, w, fea):
            n_vertices = fea.shape[0]
            # print(n_vertices)
            if n_vertices < threshold:
                fea_mean = fea[:, :-1] / fea[:, -1:]
                dev = fea.device if hasattr(fea, "device") else None
                es, et, w1 = silearn.full_coo_graph(n_vertices, dev, silearn.get_dat_backend(fea))
                e1 = torch.stack([es, et], dim = 1)
                if self.segmentor.optim.adj_cover is not None:
                    self.segmentor.optim.adj_cover = torch.nn.functional.pad(self.segmentor.optim.adj_cover,(0,e1.shape[0] - e.shape[0]), value=0)
                e = torch.concat([e, e1], dim = 0)
                w = torch.concat([w, w1], dim=0)
                num = self.segmentor.optim.edge_descriptors[0]
                num = torch.concat([num, torch.zeros_like(w1)])

                e, w, num = silearn.sumup_duplicates(e, w, num)
                self.segmentor.optim.edge_descriptors = []
                # num_e = silearn.scatter_cnt(e[:, 0], n_vertices)

                max_num = fea[e[:, 0], -1] * fea[e[:, 1], -1]

                sq = (fea_mean[es] - fea_mean[et]) ** 2
                sq = sq.sum(dim=-1) ** .5
                sq += torch.log2(torch.clip(w / w.sum(), min=2 ** -100)) * 0.1

                w2 = torch.exp2(- sq / (sq.mean() * self.segmentor.t / ratio))

                w = w + w2 * (max_num - num)

                self.segmentor.iterative = False
            return e, w
        self.segmentor.iteration_methods.append(refine)
        if not self.e0_inserted:
            self.segmentor.init_optim_methods.append(insert_e0)
        self.e0_inserted = True
        return self




if __name__ == '__main__':
    import PIL.Image
    import os

    os.environ['CUDA_VISIBLE_DEVICES'] = "1"
    img = PIL.Image.open("img1.png")
    img = np.array(img)[:, :, :3]
    img = torch.tensor(img).double() / 255

    # SegBuilder = SegmentorBuilder().set_gpu().convertLAB()\
    #     .normalize_hyper_fea().iterative_mean_shift().constrain_neighbor_8connect()
    SegBuilder = SegmentorBuilder(d=9,t=1., objective="SE_M").convertLAB().set_gpu().constrain_neighbor_8connect()\
         .normalize_hyper_fea().iterative_mean_shift(ratio=0.01)#.fuse_full_graph()
    seg = SegBuilder.segmentor.run(img, 100)
    import time
    t = time.time()
    seg = SegBuilder.segmentor.run(img,100)
    print(time.time() - t)
    print(seg.max() + 1)

    imgH = img.shape[0]
    imgW = img.shape[1]
    img = img.cuda()

    torch.random.manual_seed(321)
    # imgc = torch.rand((seg.max() + 1, 3)).cuda()
    imgc = torch_scatter.scatter_mean(img.reshape(-1, 3), seg.cuda().reshape(-1), dim = 0)
    imgc2 = torch.rand((seg.max() + 1, 3)).cuda()
    cntc = silearn.scatter_cnt(seg.reshape(-1))

    print(cntc)
    imgs = imgc[seg].reshape(imgH, imgW, 3)
    # imgs2 = img.permute(1, 2, 0).reshape(imgH, imgW, 3)
    # imgs = (img + imgs * 2) / 3

    imgcl = seg.reshape(1, 1, imgH, imgW).float()
    kernel1 = torch.Tensor([-1, 1, 0]).reshape(1, 1, 1, 3).cuda()
    kernel2 = torch.Tensor([-1, 1, 0]).reshape(1, 1, 3, 1).cuda()
    import torch.nn.functional as F

    imgcl = torch.max(
        torch.abs(F.conv2d(imgcl, kernel1, padding=(0, 1))) + torch.abs(F.conv2d(imgcl, kernel2, padding=(1, 0))), 1)

    imgs[:, :, 0] += imgcl[0][0]
    imgs[:, :, 1] -= imgcl[0][0]
    imgs[:, :, 2] -= imgcl[0][0]

    imgs = imgs.clamp(max=1, min=0)
    imgs = np.array(imgs.cpu())
    plt.imsave("output.png", imgs)
    plt.axis("off")
    plt.imshow(imgs)
    plt.show()