import cv2
from PIL import Image

import numpy as np

import torchvision
import shutil
import torch
import math
import os

from segment_anything import SamAutomaticMaskGenerator, sam_model_registry

"""
有两个供调用的方法：

1、analysis(image_str, mode='max_pooling', root_data=True, pooling_size=5, max_color=65535, debug=True, points=None, vip=False)
参数很多，但是需要传的参数只有三个：
image_str: 被分析的图片路径。
    在不采用图片分割的情况下，默认生成的配色会和原图在同一路径，文件名为"xxx_palette.png"，其中xxx为原图文件名，例如，传入"C:/aa/Fig. 1.jpg"，生成的配色表的路径为"C:/aa/Fig. 1_palette.png"；
mode: 分析模式，不改变该值，采用默认的'max_pooling'，速度快，准确率略低，否则无论输入其他什么参数，都会使用平均汇聚。
vip: 该值设为True时，将直接导出该图片的配色，不会使用图片分割模块。
不返回如何有效值。

2、get_csv_from_palette(palette, pixel_size=16)
palette：配色文件的路径。例如，传入"C:/aa/Fig. 1_palette.png"，会生成一个"C:/aa/Fig. 1_palette.csv"。
csv文件中的具体内容为：
-------------------
R G B "#RGB"
R G B "#RGB"
...
-------------------
一共有数行，每行代表着配色表中的各个颜色，例如，配色表中有5个颜色，即配色表图片大小为16×80，那么csv文件会有5行。
每一行都有四个值，前三个为integer，分别指代RGB的值，第四个值为文本，代表着该色彩的16进制表达。
将返回csv的绝对路径。
"""

#这里是模型路径
sam_checkpoint = "E:\models\sam_vit_h_4b8939.pth"
model_type = "vit_h"
device = "cuda"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
mask_generator = SamAutomaticMaskGenerator(
    model=sam,
    points_per_side=64,
    points_per_batch=128,
    pred_iou_thresh=0.7,
    stability_score_thresh=0.92,
    stability_score_offset=0.7,
    crop_n_layers=1,
    box_nms_thresh=0.7,
)


def near_closest_divisors(num: int):
    div = math.isqrt(num) + 1
    return [div, div, div * div]


def closest_divisors(num: int):
    a, b, i = 1, num, 0
    while a < b:
        i += 1
        if num % i == 0:
            a = i
            b = num // a
    return [b, a]


def save(_image, _mask, _suffix, to):
    num = _mask.count_nonzero()
    size = closest_divisors(num)
    new = _image[:, _mask].reshape(shape=(3, size[0], size[1]))
    a = size[0] / size[1]
    if (a < 0.5) | (a > 2):
        new = new.reshape(shape=(3, num))
        size = near_closest_divisors(num)
        extra = size[2] - num
        extra_tensor = torch.zeros(size=(3, extra))
        new = torch.cat((new, extra_tensor), dim=-1).reshape(shape=(3, size[0], size[1]))
    torchvision.io.write_png(new.type(torch.uint8), to + "/%d.png" % _suffix)


def save_raw(_image, _mask, _suffix):
    new = torchvision.utils.draw_segmentation_masks(image=_image, masks=_mask, alpha=1, colors="#FF00FF")
    torchvision.io.write_png(new, "raw/%d.png" % _suffix)


def segment(image_str, to, raw=False):
    image = cv2.imread(image_str)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    masks = mask_generator.generate(image)
    image = torchvision.io.decode_image(image_str)
    suffix = 0
    for mask in masks:
        mask = torch.tensor(mask['segmentation'])
        if raw:
            save_raw(image, mask, suffix)
        save(image, mask, suffix, to)
        suffix += 1
        print("%d / %d" % (suffix, len(masks)))
    return masks


def get_loss(r1, g1, b1, r2, g2, b2):
    loss = 0
    loss += (r1 - r2) * (r1 - r2)
    loss += (g1 - g2) * (g1 - g2)
    loss += (b1 - b2) * (b1 - b2)
    return int(math.pow(loss, 1.0 / 2))


def is_gray(r, g, b):
    return get_loss(r, g, b, r, r, r) <= 32 or get_loss(r, g, b, g, g, g) <= 32 or get_loss(r, g, b, b, b, b) <= 32


def clean_data(_points, _dense, _ave):
    new = torch.zeros_like(_points)
    mask = _dense >= _ave
    new[mask] = _points[mask]
    return new


def get_rgb(int_color):
    return (int_color >> 16) & 0xff, (int_color >> 8) & 0xff, int_color & 0xff


class Palette:
    def __init__(self, threshold=32):
        self.list = []
        self.count = []
        self.loss_threshold = threshold

    def absorb_int(self, int_color: int, amount: int):
        r, g, b = get_rgb(int_color)
        self.absorb_rgb(r, g, b, amount)

    def absorb_rgb(self, r, g, b, amount: int):
        if len(self.list) < 1:
            self.list.append((r, g, b))
            self.count.append(amount)
            return
        for i, color in enumerate(self.list):
            if get_loss(r, g, b, color[0], color[1], color[2]) < self.loss_threshold:
                self.count[i] += amount
                return
        self.list.append((r, g, b))
        self.count.append(amount)

    def save_palette(self, dir_name, file_name, max_color, pixel_size=16):
        color_amount = max_color if max_color < len(self.list) else len(self.list)
        out = torch.zeros(size=(3, pixel_size, pixel_size * color_amount))
        for i, color in enumerate(self.list):
            if i >= color_amount:
                break
            for j in range(0, 3):
                out[j, :, i * pixel_size: (i + 1) * pixel_size] = color[j]
        torchvision.io.write_png(out.type(torch.uint8), dir_name + "/" + file_name + "_palette.png")

    def to_cache(self):
        with open("cache/cache.csv", "a") as file:
            for i, color in enumerate(self.list):
                line = "%d,%d,%d,%d\n" % (color[0], color[1], color[2], self.count[i])
                file.write(line)


def can_export(dense, last_dense, first_ave):
    if last_dense / dense < 0.3:
        return False
    if first_ave < 32:
        return False
    return True


def distance(a, b, point):
    x = a - b
    t = np.dot(point - b, x) / np.dot(x, x)
    return np.linalg.norm(t * (a - b) + b - point)


def get_regress_loss(colors):
    if len(colors) < 2:
        return 0
    r_list = []
    g_list = []
    b_list = []
    a = []
    for color in colors:
        r_list.append(color[0])
        g_list.append(color[1])
        b_list.append(color[2])
    a.append(np.max(r_list) - np.min(r_list))
    a.append(np.max(g_list) - np.min(g_list))
    a.append(np.max(b_list) - np.min(b_list))
    max_index = np.argsort(a)[2]
    if max_index == 0:
        target = r_list
    elif max_index == 1:
        target = g_list
    else:
        target = b_list
    indexes = np.argsort(target)
    max_color = np.array(colors[indexes[len(indexes) - 1]])
    min_color = np.array(colors[indexes[0]])
    result = 0
    for color in colors:
        result += distance(max_color, min_color, np.array(color))
    return result / len(colors)


def palette_from_points(points):
    flat_points = points.reshape(256 * 256 * 256)
    counts, int_colors = torch.sort(flat_points, dim=-1, descending=True)
    mask = [counts > 0]
    int_colors = int_colors[mask].tolist()
    palette = Palette()
    for i, int_color in enumerate(int_colors):
        palette.absorb_int(int_color, counts[i])
    return palette


class Mask:
    def __init__(self, bool_tensor_2d, dim=1, x=0, y=0):
        self.content = bool_tensor_2d
        self.concrete_dim = dim
        self.top = (x, y)

    def get_box(self):
        return self.top[0], self.top[1], self.top[0] + self.content.shape[0], self.top[1] + self.content.shape[1]


def get_len(mask: Mask):
    return mask.content.size(dim=1 - mask.concrete_dim)


def get_slice(mask, start, to):
    if mask.concrete_dim == 0:
        return Mask(mask.content[:, start: to], dim=1, x=mask.top[0], y=mask.top[1] + start)
    return Mask(mask.content[start: to, :], dim=0, x=mask.top[0] + start, y=mask.top[1])


def segment_child_mask(mask: Mask):
    out = []
    start = 0
    in_concrete = False
    has_true = torch.any(mask.content, dim=mask.concrete_dim)
    for i in range(0, has_true.shape[0]):
        if has_true[i]:
            if not in_concrete:
                start = i
                in_concrete = True
            elif has_true.shape[0] == i + 1:
                out.append(get_slice(mask, start, i + 1))
        else:
            if in_concrete:
                to = i
                in_concrete = False
                out.append(get_slice(mask, start, to))
    return out


def segment_mask(mask: Mask):
    root = []
    result = []
    root.append(mask)
    is_first = True
    while len(root) > 0:
        out = segment_child_mask(root.pop())
        if is_first:
            is_first = False
            for single in out:
                root.append(single)
            continue
        if len(out) < 1:
            continue
        if len(out) < 2:
            result.append(out[0])
            continue
        for single in out:
            root.append(single)
    return result


def get_all_child_masks(mask):
    mask = Mask(mask)
    masks = segment_mask(mask)
    global_mask_tensor = []
    for single_mask in masks:
        new = torch.zeros(size=mask.content.shape, dtype=torch.bool)
        box = single_mask.get_box()
        new[box[0]: box[2], box[1]: box[3]] = True
        global_mask_tensor.append(new)
    return global_mask_tensor, masks


def ensure_empty_dir(dir_str):
    if os.path.exists(dir_str):
        shutil.rmtree(dir_str)
    os.mkdir(dir_str)


def get_csv_from_palette(palette, pixel_size=16):
    path = os.path.abspath(palette)
    csv_path = path[:-3] + "csv"
    if os.path.exists(csv_path):
        os.remove(csv_path)
    image = cv2.imread(path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    color_amount = image.shape[1] // pixel_size
    with open(csv_path, "a") as file:
        for i in range(0, color_amount):
            r = int(image[0][0 + 16 * i][0])
            g = int(image[0][0 + 16 * i][1])
            b = int(image[0][0 + 16 * i][2])
            rgb_str = ("#%s%s%s"%(hex(r)[2:], hex(g)[2:], hex(b)[2:])).upper()
            line = "%d,%d,%d,%s\n" % (r, g, b, rgb_str)
            file.write(line)
    return csv_path


def analysis(image_str, mode='max_pooling', root_data=True, pooling_size=5, max_color=65535, debug=True, points=None,
             vip=False):
    padding_size = pooling_size // 2
    image = Image.open(image_str)
    if points is None:
        points = np.zeros((256, 256, 256))
        for pixel_value in np.array(list(image.getdata())):
            if is_gray(pixel_value[0], pixel_value[1], pixel_value[2]):
                continue
            points[pixel_value[0]][pixel_value[1]][pixel_value[2]] += 1
    points = torch.tensor(points).reshape(1, 1, 256, 256, 256).detach().cuda()
    effective_pixels, first_dense, last_dense, first_ave, ave, last_ave = 0, 0, 0, 0, 0, 0
    while True:
        padded = torch.nn.functional.pad(points, (
            padding_size, padding_size, padding_size, padding_size, padding_size, padding_size), mode='replicate')
        if mode == 'max_pooling':
            dense = torch.nn.functional.max_pool3d(padded, kernel_size=pooling_size, stride=1)
        else:
            dense = torch.nn.functional.avg_pool3d(padded, kernel_size=pooling_size, stride=1)
        count_nonzero = torch.count_nonzero(dense)
        if count_nonzero < 1:
            print("No available color")
            return
        last_dense = torch.sum(dense)
        ave = last_dense / count_nonzero
        if effective_pixels == 0:
            effective_pixels = count_nonzero
        if first_ave == 0:
            first_ave = ave
        if first_dense == 0:
            first_dense = torch.sum(last_dense)
        if debug:
            print(torch.sum(dense))
            print(torch.count_nonzero(dense))
            print(ave)
            print("----------------------------------------------------------------------------")
        if ave <= last_ave:
            break
        last_ave = ave
        points = clean_data(points, dense, ave)
    if not debug:
        print("ave: %f, first dense: %d, last dense: %d" % (first_ave.item(), first_dense.item(), last_dense.item()))
    # For common image
    if root_data:
        # For simple image, just output its palette
        if vip or can_export(first_dense, last_dense, first_ave):
            palette = palette_from_points(points)
            palette.save_palette(os.path.dirname(os.path.abspath(image_str)),
                                 os.path.splitext(os.path.basename(image_str))[0], max_color=max_color)
            return
        # For complex image, use sam2
        ensure_empty_dir("cache")
        masks = segment(os.path.abspath(image_str), "cache", False)
        total = len(masks)
        mask = torch.zeros(size=masks[0]['segmentation'].shape).bool()
        for i, file in enumerate(os.listdir("cache")):
            print("----------------------------------------------------------------------------")
            print("%d/%d:" % (i + 1, total))
            if not analysis("cache/%d.png" % i, root_data=False, debug=False):
                continue
            mask = mask | masks[i]['segmentation']
        mask = mask.bool()
        global_child_mask_tensors, child_masks = get_all_child_masks(mask)
        main_dir = image_str[:-4]
        ensure_empty_dir(main_dir)
        segment_dir = os.path.join(main_dir, "segment")
        palette_dir = os.path.join(main_dir, "palette")
        os.mkdir(segment_dir)
        os.mkdir(palette_dir)
        raw_image_str = os.path.join(main_dir, "raw.png")
        shutil.copy(image_str, raw_image_str)
        torchvision.io.write_png(
            torchvision.utils.draw_segmentation_masks(image=torchvision.io.decode_image(image_str), masks=mask, alpha=1,
                                                      colors="#FF00FF"), os.path.join(main_dir, "_raw.png"))
        for i, global_child_mask_tensor in enumerate(global_child_mask_tensors):
            size = child_masks[i].content.shape
            if size[0] < 16 or size[1] < 16:
                continue
            torchvision.io.write_png(
                torchvision.utils.draw_segmentation_masks(image=torchvision.io.decode_image(raw_image_str),
                                                          masks=global_child_mask_tensor, alpha=1, colors="#FF00FF"),
                raw_image_str)
            box = child_masks[i].get_box()
            segment_image_path = os.path.join(segment_dir, "%d.png" % i)
            image.crop((box[1], box[0], box[3], box[2])).save(segment_image_path)
            analysis(segment_image_path, vip=True)
            palette = segment_image_path[:-4] + "_palette.png"
            if os.path.exists(palette):
                shutil.move(segment_image_path[:-4] + "_palette.png", os.path.join(palette_dir, "%d.png" % i))
        return
    # For segmented image
    palette = palette_from_points(points)
    # Check output is gradient color
    if get_regress_loss(palette.list) > 8:
        print("Ignore, regress: %f" % get_regress_loss(palette.list))
        return False
    palette.to_cache()
    print("Available mask, regress: %f" % get_regress_loss(palette.list))
    return True
