# ==========================================================================
# Eses Image Effect Levels
# ==========================================================================
#
# Description:
# The 'Eses Image Effect Levels' node provides a versatile, image editor style
# levels adjustment tool directly within the ComfyUI interface. It allows for
# precise, interactive control over the tonal range of images and masks,
# complete with a live histogram for visual feedback.
#
# Key Features:
#
# - Interactive Controls:
#   - An interactive preview of the level adjustments in the node.
#   - Features sliders for input Black, Mid, and White points, as well as
#     sliders for the final Output Black and White points.
#   - A live histogram visualizes the tonal distribution of the selected
#     channel.
#
# - Multi-Channel Adjustments:
#   - Apply level adjustments to the combined RGB channels for general
#     corrections.
#   - Isolate adjustments to the individual Red, Green, or Blue channels.
#   - Apply a separate level adjustment directly to an input mask.
#
# - State Serialization:
#   - All level adjustments are saved with the workflow and restored on
#     reload.
#   - The node's state persists after refreshing the browser page.
#
# - Live Preview:
#   - The node displays a preview of the connected image with the level
#     adjustments applied in real-time as you move the sliders.
#
# - Quality of Life Features:
#   - "Set Auto Levels" button to automatically calculate optimal
#      input levels.
#   - A simple file-based preset system for saving and loading adjustment
#     presets
#   - "Reset" buttons to revert the current channel or all channels to
#     default values.
#   - Adjustable auto levels sensitivity for fine-tuned automatic adjustments.
#
# Version: 1.3.1
#
# License: See LICENSE.txt
#
# ==========================================================================


import torch
import numpy as np
from PIL import Image
from server import PromptServer # type: ignore
from io import BytesIO
import base64
import json
import os
import re
from aiohttp import web


# +++ Preset Handling Start +++

# A dedicated folder for Levels presets
# to keep them separate from Curves presets
PRESETS_DIR = os.path.join(os.path.dirname(__file__), "presets_levels")

# Ensure presets directory exists
if not os.path.exists(PRESETS_DIR):
    os.makedirs(PRESETS_DIR)

def load_presets():
    """
    Loads presets from individual .json files in the presets_levels directory.
    The filename (without .json) is used as the preset name.
    """
    presets = {}
    if not os.path.isdir(PRESETS_DIR):
        return presets

    for filename in os.listdir(PRESETS_DIR):
        if filename.endswith(".json"):
            preset_name = os.path.splitext(filename)[0]
            file_path = os.path.join(PRESETS_DIR, filename)
            try:
                with open(file_path, "r", encoding="utf-8") as f:
                    presets[preset_name] = json.load(f)
            except Exception as e:
                print(f"[EsesImageEffectLevels] Error loading preset file {filename}: {e}")

    return presets

# +++ Preset Handling End +++



class EsesImageEffectLevels:

    DEFAULT_LEVELS = { "black_point": 0.0, "white_point": 1.0, "mid_point": 1.0, "output_black": 0, "output_white": 255 }
    DEFAULT_ALL_LEVELS = { "rgb": DEFAULT_LEVELS.copy(), "r": DEFAULT_LEVELS.copy(), "g": DEFAULT_LEVELS.copy(), "b": DEFAULT_LEVELS.copy(), "mask": DEFAULT_LEVELS.copy() }
    DEFAULT_ALL_LEVELS_JSON = json.dumps(DEFAULT_ALL_LEVELS)

    @classmethod
    def IS_CHANGED(cls, **kwargs):
        return float("NaN")

    @classmethod
    def INPUT_TYPES(cls):
        # Load presets for the dropdown
        preset_names = ["None"] + sorted(list(load_presets().keys()))

        return {
            "required": {
                "preset": (preset_names,),
                "channel": (["RGB", "Red", "Green", "Blue", "Mask"],),
                "all_levels_json": ("STRING", {"default": cls.DEFAULT_ALL_LEVELS_JSON, "multiline": True}),
                "auto_levels": ("BOOLEAN", {"default": False}),
                "auto_color": ("BOOLEAN", {"default": False}),
                "auto_sensitivity": ("FLOAT", {"default": 0.002, "min": 0.0001, "max": 0.05, "step": 0.0001, "round": 0.0001}),
            },
            "optional": {"image": ("IMAGE",), "mask": ("MASK",)},
            "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"},
        }

    RETURN_TYPES = ("IMAGE", "MASK", "IMAGE", "MASK")
    RETURN_NAMES = ("adjusted_image", "adjusted_mask", "image", "mask")
    FUNCTION = "execute"
    CATEGORY = "Eses Nodes/Image Adjustments"



    # Helpers ---------

    @staticmethod
    def _apply_levels(pil_img, levels_dict):
        try:
            black_point = levels_dict.get('black_point', 0.0)
            white_point = levels_dict.get('white_point', 1.0)
            mid_point = levels_dict.get('mid_point', 1.0)

            if black_point == 0.0 and white_point == 1.0 and mid_point == 1.0:
                return pil_img

            if white_point <= black_point:
                white_point = black_point + 1e-6

            gamma_inv = 1.0 / mid_point; input_range = white_point - black_point
            lookup_table = [0] * 256

            for i in range(256):
                val_norm = i / 255.0; val_remap = max(0.0, min(1.0, (val_norm - black_point) / input_range)); val_gamma = val_remap ** gamma_inv
                lookup_table[i] = int(val_gamma * 255)

            return pil_img.point(lookup_table * 3 if pil_img.mode == 'RGB' else lookup_table)

        except Exception as e: print(f"Error applying levels: {e}"); return pil_img


    @staticmethod
    def _apply_output_levels(pil_img, out_black, out_white):
        if out_black == 0 and out_white == 255: return pil_img

        # Clamp values to be safe
        out_black = max(0, min(255, out_black))
        out_white = max(0, min(255, out_white))

        if out_white <= out_black:
            out_white = out_black + 1

        output_range = out_white - out_black
        lookup_table = [int(out_black + (i/255.0 * output_range)) for i in range(256)]

        return pil_img.point(lookup_table * 3 if pil_img.mode == 'RGB' else lookup_table)


    @classmethod
    def process_image(cls, pil_image, levels_dict):
        # Apply global RGB input levels
        img_after_input = cls._apply_levels(pil_image.copy(), levels_dict["rgb"])
        # Apply global RGB output levels
        img_after_rgb_output = cls._apply_output_levels(img_after_input, levels_dict["rgb"].get("output_black", 0), levels_dict["rgb"].get("output_white", 255))

        r_chan, g_chan, b_chan = img_after_rgb_output.split()

        # Adjust Red channel with its own input and output levels
        r_adj_in = cls._apply_levels(r_chan, levels_dict["r"])
        r_adj_out = cls._apply_output_levels(r_adj_in, levels_dict["r"].get("output_black", 0), levels_dict["r"].get("output_white", 255))

        # Adjust Green channel with its own input and output levels
        g_adj_in = cls._apply_levels(g_chan, levels_dict["g"])
        g_adj_out = cls._apply_output_levels(g_adj_in, levels_dict["g"].get("output_black", 0), levels_dict["g"].get("output_white", 255))

        # Adjust Blue channel with its own input and output levels
        b_adj_in = cls._apply_levels(b_chan, levels_dict["b"])
        b_adj_out = cls._apply_output_levels(b_adj_in, levels_dict["b"].get("output_black", 0), levels_dict["b"].get("output_white", 255))

        return Image.merge("RGB", (r_adj_out, g_adj_out, b_adj_out))


    # Helper to calculate auto levels for 
    # a single channel with configurable sensitivity
    @staticmethod
    def _calculate_auto_levels_for_channel(pil_channel_image, cutoff_multiplier=0.002):
        hist = pil_channel_image.histogram()
        total_pixels = pil_channel_image.width * pil_channel_image.height

        cutoff_pixels = total_pixels * cutoff_multiplier

        black_level, pixel_sum = 0, 0
        for i in range(256):
            pixel_sum += hist[i]
            if pixel_sum > cutoff_pixels:
                black_level = i
                break

        white_level, pixel_sum = 255, 0
        for i in range(255, -1, -1):
            pixel_sum += hist[i]
            if pixel_sum > cutoff_pixels:
                white_level = i
                break

        if white_level <= black_level:
            white_level = black_level + 1

        return {
            'black_point': black_level / 255.0,
            'white_point': white_level / 255.0,
            'mid_point': 1.0,
            'output_black': 0,
            'output_white': 255
        }


    # Main function --------------

    def execute(self, preset, channel, auto_levels, auto_color, auto_sensitivity, all_levels_json, image=None, mask=None, prompt=None, extra_pnginfo=None, unique_id=None):
        all_levels = json.loads(all_levels_json)

        if image is not None and (auto_levels or auto_color):
            pil_image_rgb = Image.fromarray(np.clip(255. * image[0].cpu().numpy(), 0, 255).astype(np.uint8)).convert('RGB')

            # Handle Auto Levels for RGB channel
            if auto_levels:
                pil_image_l = pil_image_rgb.convert('L')
                rgb_auto_levels = self._calculate_auto_levels_for_channel(pil_image_l, auto_sensitivity)
                all_levels['rgb'] = rgb_auto_levels
                
                # Reset ALL levels (both input and output) for R, G, B, and mask channels when auto_levels is used
                default_levels = self.DEFAULT_LEVELS.copy()
                all_levels['r'] = default_levels.copy()
                all_levels['g'] = default_levels.copy() 
                all_levels['b'] = default_levels.copy()
                all_levels['mask'] = default_levels.copy()

            if auto_color:
                r_chan, g_chan, b_chan = pil_image_rgb.split()
                all_levels['r'] = self._calculate_auto_levels_for_channel(r_chan, auto_sensitivity)
                all_levels['g'] = self._calculate_auto_levels_for_channel(g_chan, auto_sensitivity)
                all_levels['b'] = self._calculate_auto_levels_for_channel(b_chan, auto_sensitivity)
                
                # Optionally reset RGB composite and mask 
                # channels when auto_color is used
                default_levels = self.DEFAULT_LEVELS.copy()
                all_levels['rgb'] = default_levels.copy()
                all_levels['mask'] = default_levels.copy()


        adjusted_image_tensor = image
        if image is not None:
            img_batch = [torch.from_numpy(np.array(self.process_image(Image.fromarray(np.clip(255. * i.cpu().numpy(), 0, 255).astype(np.uint8)), all_levels)).astype(np.float32) / 255.0) for i in image]
            adjusted_image_tensor = torch.stack(img_batch)

        adjusted_mask_tensor = mask

        if mask is not None:
            mask_pil_batch = [Image.fromarray((m.cpu().numpy()*255).astype(np.uint8), 'L') for m in mask]

            mask_batch_adjusted = []
            for m in mask_pil_batch:
                m_after_input = self._apply_levels(m, all_levels["mask"])
                m_after_output = self._apply_output_levels(m_after_input, all_levels["mask"].get("output_black", 0), all_levels["mask"].get("output_white", 255))
                mask_batch_adjusted.append(m_after_output)

            mask_batch_tensors = [torch.from_numpy(np.array(m).astype(np.float32) / 255.0) for m in mask_batch_adjusted]
            adjusted_mask_tensor = torch.stack(mask_batch_tensors)


        if unique_id:
            img_base64, mask_base64 = None, None

            # Only prepare preview data if an image
            # or mask is actually connected
            if image is not None:
                img_pil_preview = Image.fromarray(np.clip(255. * image[0].cpu().numpy(), 0, 255).astype(np.uint8))
                img_pil_preview.thumbnail((512, 512), Image.LANCZOS)
                buffered = BytesIO()
                img_pil_preview.save(buffered, format="PNG")
                img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")

            if mask is not None:
                mask_pil_preview = Image.fromarray((mask[0].cpu().numpy()*255).astype(np.uint8), 'L')
                mask_pil_preview.thumbnail((512, 512), Image.LANCZOS)
                buffered = BytesIO()
                mask_pil_preview.save(buffered, format="PNG")
                mask_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")

            if image is not None or mask is not None:
                PromptServer.instance.send_sync("eses.levels_preview_base", {
                    "node_id": unique_id,
                    "image_data": img_base64,
                    "mask_data": mask_base64,
                    # UPDATED: Send updated_levels if either auto_levels or auto_color was triggered
                    "updated_levels": all_levels if auto_levels or auto_color else None
                })

        return (adjusted_image_tensor, adjusted_mask_tensor, image, mask)



# Routes ----------------------

@PromptServer.instance.routes.post("/eses_levels/save_preset")
async def eses_levels_save_preset(request):
    """
    API endpoint to save a new preset to a file.
    """
    try:
        data = await request.json()
        preset_name = data.get("preset_name")
        preset_data = data.get("preset_data")

        if not preset_name or not preset_data:
            return web.json_response({"success": False, "message": "Preset name or data is missing."}, status=400)

        # Sanitize the filename to prevent security
        # issues like path traversal.
        # Remove any leading/trailing whitespace.
        sanitized_name = preset_name.strip()

        # Replace spaces with underscores.
        sanitized_name = sanitized_name.replace(" ", "_")

        # Allow only alphanumeric characters,
        # underscores, and hyphens.
        sanitized_name = re.sub(r'[^\w\-_]', '', sanitized_name)

        if not sanitized_name:
            return web.json_response({"success": False, "message": "Invalid preset name after sanitization."}, status=400)

        file_path = os.path.join(PRESETS_DIR, f"{sanitized_name}.json")

        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(preset_data, f, indent=4)

        # Return success and the
        # updated list of presets
        updated_presets = load_presets()

        return web.json_response({"success": True, "message": "Preset saved successfully.", "presets": updated_presets})

    except Exception as e:
        print(f"[EsesImageEffectLevels] Error saving preset: {e}")
        return web.json_response({"success": False, "message": str(e)}, status=500)


@PromptServer.instance.routes.get("/eses_levels/get_presets")
async def eses_levels_get_presets(request):
    """
    API endpoint to fetch the available level presets.
    """
    presets = load_presets()
    return web.json_response(presets)


@PromptServer.instance.routes.post("/eses_levels/get_histogram")
async def eses_levels_get_histogram(request):
    data = await request.json()
    base_data_b64 = data.get("base_data_b64")
    channel_mode = data.get("channel_mode", "RGB")

    if not base_data_b64:
        return web.json_response({"error": "Missing image data"}, status=400)
    try:
        img_bytes = base64.b64decode(base_data_b64)
        base_img = Image.open(BytesIO(img_bytes)).convert("RGB")
        target_channel = base_img.convert('L')

        if channel_mode != "RGB":
             if channel_mode == "Mask":
                target_channel = Image.open(BytesIO(img_bytes)).convert('L')
             else:
                channels = base_img.split()

                if channel_mode == "Red":
                    target_channel = channels[0]
                elif channel_mode == "Green":
                    target_channel = channels[1]
                elif channel_mode == "Blue":
                    target_channel = channels[2]

        hist = target_channel.histogram()[:256]
        max_val = max(hist) if max(hist) > 0 else 1

        return web.json_response({"histogram": [h / max_val for h in hist]})

    except Exception as e: return web.json_response({"error": str(e)}, status=500)


@PromptServer.instance.routes.post("/eses_levels/apply")
async def eses_levels_apply_endpoint(request):
    data = await request.json()
    base_data_b64 = data.get("base_data_b64"); all_levels_json = data.get("all_levels_json")
    base_data_type = data.get("base_data_type", "image")

    if not base_data_b64 or not all_levels_json:
        return web.json_response({"error": "Missing data"}, status=400)
    try:
        img_bytes = base64.b64decode(base_data_b64); base_img = Image.open(BytesIO(img_bytes))
        all_levels = json.loads(all_levels_json)

        if base_data_type == 'mask':
            mask_levels_adjusted = EsesImageEffectLevels._apply_levels(base_img.convert("L"), all_levels["mask"])
            adjusted_img = EsesImageEffectLevels._apply_output_levels(mask_levels_adjusted, all_levels["mask"].get("output_black", 0), all_levels["mask"].get("output_white", 255))
        else:
            adjusted_img = EsesImageEffectLevels.process_image(base_img.convert("RGB"), all_levels)

        buffered = BytesIO(); adjusted_img.save(buffered, format="PNG")

        return web.json_response({"adjusted_image_data": base64.b64encode(buffered.getvalue()).decode("utf-8")})

    except Exception as e: return web.json_response({"error": str(e)}, status=500)