"""
Kraken Resolution Helper (Exact)
MIT License © 2025 The Kraken (@KrakenUnbound)
"""

import math, re
from typing import Optional, Tuple
import numpy as np, torch
from PIL import Image as PILImage

def _parse_wxH(s: str) -> Optional[Tuple[int, int]]:
    if not s: return None
    s = str(s).strip().lower()
    m = re.match(r"^\s*(\d+)\s*[xX]\s*(\d+)\s*$", s)
    if not m: return None
    w, h = int(m.group(1)), int(m.group(2))
    if w <= 0 or h <= 0: return None
    return w, h

def _tensor_to_pil(img) -> PILImage.Image:
    if isinstance(img, torch.Tensor):
        t = img[0] if img.ndim == 4 else img
        t = t.clamp(0.0,1.0).detach().cpu()
        if t.shape[-1] == 1: t = t.repeat(1,1,3)
        if t.shape[-1] == 4: t = t[..., :3]
        arr = (t.numpy()*255.0+0.5).astype(np.uint8)
        return PILImage.fromarray(arr, mode="RGB")
    if isinstance(img, np.ndarray):
        a = img[0] if img.ndim == 4 else img
        a = np.clip(a,0.0,1.0).astype(np.float32)
        if a.ndim == 2: a = np.stack([a,a,a], axis=-1)
        if a.shape[-1] == 1: a = np.repeat(a,3,axis=-1)
        if a.shape[-1] == 4: a = a[..., :3]
        arr = (a*255.0+0.5).astype(np.uint8)
        return PILImage.fromarray(arr, mode="RGB")
    raise TypeError("Unsupported image type for _tensor_to_pil")

def _pil_to_tensor(img: PILImage.Image) -> torch.Tensor:
    arr = np.array(img).astype(np.float32)/255.0
    if arr.ndim == 2: arr = np.stack([arr,arr,arr], axis=-1)
    if arr.shape[-1] == 4: arr = arr[..., :3]
    return torch.from_numpy(arr).unsqueeze(0)

def _cover_scale(wi,hi,wt,ht): return max(wt/wi, ht/hi)
def _contain_scale(wi,hi,wt,ht): return min(wt/wi, ht/hi)
def _round_up(x,step): import math; return math.ceil(x/step)*step
def _ceil_to_multiple(x,m): import math; return x if m<=1 else int(math.ceil(x/m)*m)

class KrakenResolutionHelper:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "image": ("IMAGE",),
                "mode": (["fill / crop","pad","keep proportion","stretch"], {"default": "keep proportion"}),
                "interpolation": (["lanczos","bicubic","bilinear","nearest"], {"default": "lanczos"}),
                "allow_upscale": ("BOOLEAN", {"default": False}),
                "preset_in": ("STRING", {"default": ""}),
            },
            "optional": {
                "target_resolution_in": ("STRING", {"default": ""}),
                "width": ("INT", {"default": 1024, "min": 1, "max": 8192, "step": 1}),
                "height": ("INT", {"default": 1024, "min": 1, "max": 8192, "step": 1}),
                "anchor_x": (["left","center","right"], {"default": "center"}),
                "anchor_y": (["top","center","bottom"], {"default": "center"}),
                "crop_margin_px": ("INT", {"default": 32, "min": 0, "max": 512, "step": 1}),
                "scale_step": ("FLOAT", {"default": 0.01, "min": 0.001, "max": 0.25, "step": 0.001}),
                "multiple_of": ("INT", {"default": 0, "min": 0, "max": 128, "step": 1}),
            },
        }
    RETURN_TYPES = ("IMAGE","STRING")
    RETURN_NAMES = ("image","size")
    FUNCTION = "apply"
    CATEGORY = "kraken/resize"

    def _goal(self, preset_in, target_resolution_in, width, height):
        r = _parse_wxH(target_resolution_in) or _parse_wxH(preset_in)
        return r if r else (max(1,int(width)), max(1,int(height)))

    def _anchor_offsets(self,big_w,big_h,small_w,small_h,ax,ay):
        off_x = 0 if ax=="left" else (big_w-small_w if ax=="right" else (big_w-small_w)//2)
        off_y = 0 if ay=="top" else (big_h-small_h if ay=="bottom" else (big_h-small_h)//2)
        return max(0,off_x), max(0,off_y)

    def apply(self,image,mode="keep proportion",interpolation="lanczos",allow_upscale=False,
              preset_in="",target_resolution_in="",width=1024,height=1024,
              anchor_x="center",anchor_y="center",crop_margin_px=32,scale_step=0.01,multiple_of=0):

        Wt,Ht = self._goal(preset_in, target_resolution_in, width, height)
        if isinstance(image, torch.Tensor) and image.ndim==4: _,Hi,Wi,_ = image.shape
        else: Hi,Wi = image.shape[0], image.shape[1]

        pil = _tensor_to_pil(image)
        interp = {"lanczos":PILImage.LANCZOS,"bicubic":PILImage.BICUBIC,"bilinear":PILImage.BILINEAR,"nearest":PILImage.NEAREST}.get(interpolation,PILImage.LANCZOS)

        if mode=="stretch":
            result = pil.resize((Wt,Ht), interp)
        elif mode=="keep proportion":
            s = _contain_scale(Wi,Hi,Wt,Ht)
            if s>1.0 and not allow_upscale: s=1.0
            new_w,new_h = max(1,int(Wi*s)), max(1,int(Hi*s))
            if multiple_of>1:
                new_w=_ceil_to_multiple(new_w,multiple_of); new_h=_ceil_to_multiple(new_h,multiple_of)
            if not allow_upscale:
                new_w=min(new_w,Wt); new_h=min(new_h,Ht)
            scaled = pil.resize((new_w,new_h), interp)
            canvas = PILImage.new("RGB", (Wt,Ht), (0,0,0))
            off_x,off_y = self._anchor_offsets(Wt,Ht,new_w,new_h,"center","center")
            canvas.paste(scaled,(off_x,off_y))
            result = canvas
        elif mode=="pad":
            s = _contain_scale(Wi,Hi,Wt,Ht)
            if s>1.0 and not allow_upscale: s=1.0
            new_w,new_h = max(1,int(Wi*s)), max(1,int(Hi*s))
            if multiple_of>1:
                new_w=_ceil_to_multiple(new_w,multiple_of); new_h=_ceil_to_multiple(new_h,multiple_of)
            if not allow_upscale:
                new_w=min(new_w,Wt); new_h=min(new_h,Ht)
            scaled = pil.resize((new_w,new_h), interp)
            canvas = PILImage.new("RGB", (Wt,Ht), (0,0,0))
            off_x,off_y = self._anchor_offsets(Wt,Ht,new_w,new_h,anchor_x,anchor_y)
            canvas.paste(scaled,(off_x,off_y))
            result = canvas
        else: # fill / crop
            margin=max(0,int(crop_margin_px))
            s_req=_cover_scale(Wi,Hi,Wt+2*margin,Ht+2*margin)
            s=_round_up(s_req,max(0.001,float(scale_step)))
            if s>1.0 and not allow_upscale: s=1.0
            new_w,max_w = max(Wt,int(Wi*s)), Wt
            new_h,max_h = max(Ht,int(Hi*s)), Ht
            if multiple_of>1:
                new_w=_ceil_to_multiple(new_w,multiple_of); new_h=_ceil_to_multiple(new_h,multiple_of)
            scaled = pil.resize((new_w,new_h), interp)
            left,top = self._anchor_offsets(new_w,new_h,Wt,Ht,anchor_x,anchor_y)
            result = scaled.crop((left,top,left+Wt,top+Ht))

        out = _pil_to_tensor(result)
        return out, f"{result.width} x {result.height}"

NODE_CLASS_MAPPINGS = {"KrakenResolutionHelper": KrakenResolutionHelper}
NODE_DISPLAY_NAME_MAPPINGS = {"KrakenResolutionHelper": "🦑 Kraken Resolution Helper (Exact)"}
