import os
from itertools import chain
from typing import Tuple, List, NamedTuple

import cv2
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from skimage.filters import threshold_otsu
from PIL import Image
from sklearn.cluster import KMeans
from scipy.ndimage import label
from numba import njit
from torch.utils.data import Dataset
from concurrent.futures import ProcessPoolExecutor
from skimage.draw import disk
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from matplotlib import pyplot as plt
import random

from tqdm import tqdm

isimg = lambda f: f.split(".")[-1].lower() in ["png", "jpg", "jpeg"]
isnpz = lambda f: f.split(".")[-1].lower() in ["npz"]
listdir = lambda p: [os.path.join(p, f).replace('\\', '/') for f in os.listdir(p)]
same_artist = lambda a, b: a.split("/")[-2] == b.split("/")[-2]

import torch


class Rangei(NamedTuple):
    a: int
    b: int


class Rangef(NamedTuple):
    a: float
    b: float


class xDog:
    def __init__(self,
                 γ: float = 0.95,
                 ϕ: float = 1e9,
                 ϵ: float = -1e1,
                 k: float = 4.5,
                 σ: float = 0.3, ) -> None:
        self.γ = γ
        self.ϕ = ϕ
        self.ϵ = ϵ
        self.k = k
        self.σ = σ

    def __call__(self, img) -> np.ndarray:
        x = (img[..., 0] + img[..., 1] + img[..., 2]) / 3

        gaussian_a = gaussian_filter(x, self.σ)
        gaussian_b = gaussian_filter(x, self.σ * self.k)

        dog = gaussian_a - self.γ * gaussian_b

        inf = dog < self.ε
        xdog = inf * 1 + ~inf * (1 - np.tanh(self.φ * dog))

        xdog -= xdog.min()
        xdog /= xdog.max()
        xdog = xdog >= threshold_otsu(xdog)
        xdog = 1 - xdog

        return xdog


# 叠置
def compose(
        img: np.ndarray, lineart: np.ndarray, mask: np.ndarray, black: float = 1.0,
) -> np.ndarray:
    lineart[lineart == 0] = 1 - black
    res = img * (1 - mask)[..., None] + (lineart * mask)[..., None]
    return res


def topo_compute_normal(dist: np.ndarray) -> np.ndarray:
    c = cv2.filter2D(dist, cv2.CV_32F, np.array([[-1, +1]]))
    r = cv2.filter2D(dist, cv2.CV_32F, np.array([[-1], [+1]]))
    h = np.zeros_like(c + r, dtype=np.float32) + 0.75
    normal_map = np.stack([h, r, c], axis=2)
    normal_map /= np.sum(normal_map ** 2.0, axis=2, keepdims=True) ** 0.5
    return normal_map


def get_regions(skeleton: np.ndarray) -> List[Tuple[np.ndarray, np.ndarray]]:
    marker = skeleton.copy()

    normal = topo_compute_normal(marker) * 127.5 + 127.5
    normal = normal.clip(0, 255).astype(np.uint8)

    marker[marker > 100] = 255
    marker[marker < 255] = 0

    labels, _ = label(marker / 255)
    labels = labels.astype(np.int32)

    water = cv2.watershed(normal, labels)
    water = thinning(water + 1)

    return find_all(water)


def thinning(fill_map: np.ndarray, max_iter: int = 100) -> np.ndarray:
    line_id = 0
    h, w = fill_map.shape[:2]
    result = fill_map.copy()

    for iter in range(max_iter):
        line_points = np.where(result == line_id)
        if not len(line_points[0]) > 0:
            break

        line_mask = np.full((h, w), 255, np.uint8)
        line_mask[line_points] = 0
        line_border_mask = cv2.morphologyEx(
            line_mask,
            cv2.MORPH_DILATE,
            cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)),
            anchor=(-1, -1),
            iterations=1
        ) - line_mask
        line_border_points = np.where(line_border_mask == 255)

        result_tmp = result.copy()
        for i, _ in enumerate(line_border_points[0]):
            x, y = line_border_points[1][i], line_border_points[0][i]

            if x - 1 > 0 and result[y, x - 1] != line_id:
                result_tmp[y, x] = result[y, x - 1]
                continue
            if x - 1 > 0 and y - 1 > 0 and result[y - 1, x - 1] != line_id:
                result_tmp[y, x] = result[y - 1, x - 1]
                continue
            if y - 1 > 0 and result[y - 1, x] != line_id:
                result_tmp[y, x] = result[y - 1, x]
                continue
            if y - 1 > 0 and x + 1 < w and result[y - 1, x + 1] != line_id:
                result_tmp[y, x] = result[y - 1, x + 1]
                continue
            if x + 1 < w and result[y][x + 1] != line_id:
                result_tmp[y, x] = result[y, x + 1]
                continue
            if x + 1 < w and y + 1 < h and result[y + 1, x + 1] != line_id:
                result_tmp[y, x] = result[y + 1, x + 1]
                continue
            if y + 1 < h and result[y + 1, x] != line_id:
                result_tmp[y, x] = result[y + 1, x]
                continue
            if y + 1 < h and x - 1 > 0 and result[y + 1, x - 1] != line_id:
                result_tmp[y, x] = result[y + 1, x - 1]
                continue

        result = result_tmp.copy()

    return result


def find_all(labeled: np.ndarray) -> np.ndarray:
    hist_size = int(np.max(labeled))
    if hist_size == 0:
        return []

    all_counts = [0 for _ in range(hist_size)]
    count_all(labeled, all_counts)

    xs = [np.zeros(shape=(item,), dtype=np.uint32) for item in all_counts]
    ys = [np.zeros(shape=(item,), dtype=np.uint32) for item in all_counts]
    cs = [0 for item in all_counts]
    trace_all(labeled, xs, ys, cs)

    filled_area = []
    for _ in range(hist_size):
        filled_area.append((xs[_], ys[_]))
    return filled_area


@njit
def count_all(labeled: np.ndarray, all_counts: List[int]) -> None:
    M, N = labeled.shape
    for x in range(M):
        for y in range(N):
            i = labeled[x, y] - 1
            if i > -1:
                all_counts[i] = all_counts[i] + 1
    return


@njit
def trace_all(
        labeled: np.ndarray,
        xs: List[np.ndarray],
        ys: List[np.ndarray],
        cs: List[int],
) -> None:
    M, N = labeled.shape
    for x in range(M):
        for y in range(N):
            current_label = labeled[x, y] - 1
            if current_label > -1:
                current_label_count = cs[current_label]
                xs[current_label][current_label_count] = x
                ys[current_label][current_label_count] = y
                cs[current_label] = current_label_count + 1
    return


# 保存预处理的mask和colormap
def save_preprocessed(
        path: str, colors: np.ndarray, masks: np.ndarray,
) -> None:
    np.savez(path, colors=colors, masks=masks)


def load_preprocessed(path: str) -> Tuple[np.ndarray, np.ndarray]:
    loader = np.load(path)
    return loader["colors"], loader["masks"]


# 对单张图片的预处理
def preprocess(model: str, f: str, dest: str) -> int:
    *_, artist, img = f.split("/")
    folder = os.path.join(dest, artist)
    os.makedirs(folder, exist_ok=True)

    npz = f"{img.split('.')[-2]}.npz"
    path = os.path.join(folder, npz)
    if os.path.isfile(path):
        return 1

    segmentor = Segmentor(model)

    img = Image.open(f).convert("RGB")
    size = img.size

    img = np.array(img.resize((256, 256))) / 255
    colors, masks = segmentor(img)
    colors = cv2.resize((colors * 255).astype(np.uint8), dsize=size)
    masks = cv2.resize((masks * 255).astype(np.uint8), dsize=size)

    segmentor.cpu()
    del segmentor

    save_preprocessed(path, colors, masks)
    return 1


def chromaTransform(img: np.ndarray, beta=.5):
    img_np = img.copy()
    R = img_np[..., 0]
    G = img_np[..., 1]
    B = img_np[..., 2]
    total = R + G + B + 1e-6
    img_chromaTransform_resut = np.zeros_like(img_np)
    img_chromaTransform_resut[..., 0] = beta * total / 3.0
    img_chromaTransform_resut[..., 1] = R / total
    img_chromaTransform_resut[..., 2] = G / total
    return img_chromaTransform_resut

class Segmentor:
    def __init__(self, path):
        self.model = torch.jit.load(path).cuda().eval()

    def cuda(self):
        self.model = self.model.cuda()
        return self

    def cpu(self):
        self.model = self.model.cpu()
        return self

    def __call__(self, img: np.ndarray, max_colors: int = 10):
        with torch.no_grad():
            # device = next(self.model.parameters()).device
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            inp = torch.Tensor(img).permute((2, 0, 1)).unsqueeze(0).to(device)
            skeleton = self.model(inp)
            skeleton = skeleton.squeeze(0)[0].cpu().numpy()
            regions = list(filter(len, get_regions(skeleton * 255.0)))
            colors = np.zeros((*img.shape[:2], 3))
            for r, region in enumerate(regions):
                y = np.clip(region[0], 0, img.shape[0] - 1)
                x = np.clip(region[1], 0, img.shape[1] - 1)
                if len(img[y, x]):
                    colors[y, x] = [np.median(img[y, x][..., i]) for i in range(3)]
        # colors = chromaTransform(colors)
        H, W, C = colors.shape
        colors = colors.reshape((H * W, C))
        kmeans = KMeans(n_clusters=max_colors)
        labels = kmeans.fit_predict(colors)
        colors = kmeans.cluster_centers_[labels]
        colors = colors.reshape((H, W, C))
        labels = labels.reshape((H, W))
        masks = np.zeros((*img.shape[:2], kmeans.n_clusters))
        for label in range(kmeans.n_clusters):
            masks[labels == label, label] = 1
        return colors, masks

    # root图片根目录 dest预处理文件保存位置
    @classmethod
    def preprocess(cls, model: str, root: str, dest: str) -> None:
        files = chain(*(filter(isimg, listdir(f)) for f in listdir(root)))
        files = list(files)

        # pbar = tqdm(total=len(files), desc="Preprocess")
        # with ProcessPoolExecutor() as executor:
        #     for f in files:
        #         process = executor.submit(preprocess, model, f, dest)
        #         pbar.update(process.result())

        pbar = tqdm(iterable=files, desc='preprocess:')
        for f in pbar:
            preprocess(model,f,dest)

    @classmethod
    def sample(
            cls, img: np.ndarray, masks: np.ndarray, n_color: float,
    ) -> np.ndarray:
        n = int(masks.shape[-1] * n_color)
        if n_color <= 0 or n_color >= 1:
            return np.ones(img.shape[:2], dtype=np.float32)

        idxs = np.random.choice(masks.shape[-1], size=(n,))
        return np.max(masks[:, :, idxs], axis=-1)


def generate_input(composition: np.ndarray, mask: np.ndarray) -> np.ndarray:
    ret = np.zeros((*composition.shape[:2], 4))
    ret[..., :3] = composition
    ret[..., -1] = mask
    return ret


def generate_hint(colors: np.ndarray,
                   masks: np.ndarray,
                   radius_range: Rangei = Rangei(5, 8),scribble_num=Rangei(2, 8)):
    m = cv2.split(masks)
    hints = np.zeros((*colors.shape[:2], 3), dtype=np.float32)
    # hints[:,:,-1] = 0
    # maxnum = max(scribble_num.a, scribble_num.b)
    minnum = min(scribble_num.a, scribble_num.b)
    for mask in m:
        pos = zip(*np.nonzero(mask))
        pos = list(pos)
        random_scribble_num = np.random.randint(*scribble_num)
        if len(pos) < random_scribble_num:
            simulated_strokes = random.sample(pos, len(pos))
            hints = drawPoint(hints, colors, simulated_strokes, radius_range)
            continue
        simulated_strokes = random.sample(pos, random_scribble_num)
        hints = drawPoint(hints,colors,simulated_strokes,radius_range)
    return hints

def drawPoint(hints,colors,endpoint,radius_range):
    radius = np.random.randint(*radius_range)
    for point in endpoint:
        rr,cc = disk(point,radius,shape=colors.shape[:2])
        hints[rr, cc, -1] = 1.0
        hints[rr, cc, :3] = colors[point]
    return hints


class Sample(NamedTuple):
    y: torch.Tensor  # (3, H,   W  ) Target Illustration
    x: torch.Tensor  # (4, H,   W  ) Composition Input
    h: torch.Tensor  # (4, H/4, W/4) Color Hints
    c: torch.Tensor  # (3, H,   W  ) Segmented Color Map


def read_npy_file(item):
    path = item.numpy()
    data = np.load(path)
    return data["colors"].astype(int), data["masks"].astype(int)



def preprocess_all(data_path, desc_path, model_path):
    Segmentor.preprocess(model_path, root=data_path, dest=desc_path)


if __name__ == '__main__':
    data_path = r'H:\zhw\spade_ssam\data\train\images'
    desc_path = r'H:\zhw\spade_ssam\data\train\preprocess'
    path = '../resource/skeletonizer.ts'
    preprocess_all(data_path, desc_path, model_path=path)
