import torch
import numpy as np
from PIL import Image, ImageFilter
import cv2
from typing import Tuple, Optional


def extract_average_skin_color(reference_image: np.ndarray, skin_mask: np.ndarray) -> np.ndarray:
    """
    从参考图mask区域中提取唯一的平均肤色
    
    Args:
        reference_image: 参考图像 (H, W, 3)
        skin_mask: 肤色mask (H, W) 0-255
    
    Returns:
        平均肤色值 (3,) RGB
    """
    # 确保mask是二值的
    mask_normalized = (skin_mask > 128).astype(np.bool_)
    
    # 提取mask区域内的所有像素
    skin_pixels = reference_image[mask_normalized]
    
    if len(skin_pixels) == 0:
        # 如果没有有效像素，返回中性肤色
        return np.array([220, 180, 160], dtype=np.float32)
    
    # 计算平均肤色
    average_skin_color = np.mean(skin_pixels, axis=0).astype(np.float32)
    
    return average_skin_color

def apply_grayscale_based_skin_color(
    white_model: np.ndarray, 
    white_model_mask: np.ndarray,
    average_skin_color: np.ndarray
) -> np.ndarray:
    """
    根据白模的灰度值直接计算融合色
    - 白色区域：完全采用参考肤色
    - 阴影区域：参考肤色按比例变暗
    
    Args:
        white_model: 白模图像 (H, W, 3)
        white_model_mask: 白模mask (H, W) 0-255
        average_skin_color: 平均肤色 (3,) RGB
    
    Returns:
        融合后的图像 (H, W, 3)
    """
    # 确保所有输入都是numpy数组
    white_model = np.array(white_model)
    white_model_mask = np.array(white_model_mask)
    average_skin_color = np.array(average_skin_color)
    
    # 创建融合区域mask
    fusion_mask = (white_model_mask > 128).astype(np.float32)
    
    # 如果融合区域为空，返回原始白模
    if np.sum(fusion_mask) == 0:
        return white_model
    
    # 将白模转换为灰度图，归一化到0-1
    white_gray = cv2.cvtColor(white_model, cv2.COLOR_RGB2GRAY).astype(np.float32) / 255.0
    
    # 创建结果图像
    result = white_model.copy().astype(np.float32)
    
    # 根据灰度值计算融合色
    # 灰度值越高（越白），越接近参考肤色
    # 灰度值越低（越暗），参考肤色按比例变暗
    for c in range(3):  # RGB三个通道
        # 在融合区域内应用计算后的肤色
        result[:, :, c] = np.where(
            fusion_mask > 0,
            average_skin_color[c] * white_gray,  # 根据灰度值调整肤色亮度
            white_model[:, :, c]  # 非融合区域保持原色
        )
    
    return np.clip(result, 0, 255).astype(np.uint8)


def enhance_skin_texture(
    fused_image: np.ndarray,
    original_skin: np.ndarray,
    mask: np.ndarray,
    texture_strength: float = 0.2
) -> np.ndarray:
    """
    增强融合后的皮肤纹理
    
    Args:
        fused_image: 融合后的图像 (H, W, 3)
        original_skin: 原始肤色图像 (H, W, 3)
        mask: 融合区域mask (H, W)
        texture_strength: 纹理增强强度 0-1
    
    Returns:
        增强纹理后的图像 (H, W, 3)
    """
    if np.sum(mask) == 0:
        return fused_image
    
    # 提取原始肤色的高频细节
    skin_gray = cv2.cvtColor(original_skin, cv2.COLOR_RGB2GRAY)
    skin_laplacian = cv2.Laplacian(skin_gray, cv2.CV_64F)
    
    # 将高频细节应用到融合图像
    result = fused_image.copy().astype(np.float32)
    mask_3d = mask[:, :, np.newaxis]
    
    for c in range(3):
        detail = skin_laplacian * texture_strength
        result[:, :, c] += detail * mask_3d[:, :, 0] * 0.1
    
    return np.clip(result, 0, 255).astype(np.uint8)

def skin_color_fusion_pipeline(
    reference_image: np.ndarray,
    reference_skin_mask: np.ndarray,
    white_model: np.ndarray,
    white_model_mask: np.ndarray,
    texture_strength: float = 0.2
) -> Tuple[np.ndarray, np.ndarray]:
    """
    完整的肤色融合流水线，基于灰度值的简单融合
    
    Args:
        reference_image: 参考图像 (H, W, 3)
        reference_skin_mask: 参考图肤色mask (H, W)
        white_model: 白模图像 (H, W, 3)
        white_model_mask: 白模mask (H, W)
        texture_strength: 纹理增强强度 0-1
    
    Returns:
        (融合后的图像, 融合区域mask)
    """
    # 确保所有图像尺寸一致
    target_height, target_width = white_model.shape[:2]
    
    # 调整参考图像和mask尺寸
    if reference_image.shape[:2] != (target_height, target_width):
        reference_image = cv2.resize(reference_image, (target_width, target_height))
        reference_skin_mask = cv2.resize(reference_skin_mask, (target_width, target_height))
    
    # 1. 提取平均肤色
    average_skin_color = extract_average_skin_color(reference_image, reference_skin_mask)
    
    # 2. 创建融合区域mask
    fusion_mask = ((white_model_mask > 128) & (reference_skin_mask > 128)).astype(np.float32)
    
    # 3. 应用基于灰度的肤色融合
    fused_image = apply_grayscale_based_skin_color(
        white_model, white_model_mask, average_skin_color
    )
    
    # 4. 增强纹理（可选）
    if texture_strength > 0:
        fused_image = enhance_skin_texture(fused_image, reference_image, fusion_mask, texture_strength)
    
    # 5. 创建融合区域mask（用于输出）
    fusion_mask_output = (fusion_mask * 255).astype(np.uint8)
    
    return fused_image, fusion_mask_output

# ComfyUI节点类
class SkinColorFusionNode:
    """
    ComfyUI节点：肤色融合（基于灰度值的简单融合）
    """
    
    @staticmethod
    def _ensure_chw(tensor):
        """
        确保tensor格式为 [1, 3, H, W]，参考brightness_correction.py
        """
        arr = tensor.cpu().numpy()
        # [1, H, W, 3] -> [1, 3, H, W]
        if arr.ndim == 4 and arr.shape[-1] == 3:
            arr = arr.transpose(0, 3, 1, 2)
        # [H, W, 3] -> [1, 3, H, W]
        elif arr.ndim == 3 and arr.shape[-1] == 3:
            arr = arr.transpose(2, 0, 1)[None, ...]
        # [1, H, W] -> [1, 3, H, W]
        elif arr.ndim == 3 and arr.shape[0] == 1:
            arr = np.repeat(arr, 3, axis=0)[None, ...]
        # [H, W] -> [1, 3, H, W]
        elif arr.ndim == 2:
            arr = np.stack([arr, arr, arr], axis=0)[None, ...]
        return torch.from_numpy(arr).to(tensor.device).float()

    @staticmethod
    def _to_single_mask(mask_tensor, threshold=0.5):
        """
        自动将任意图片/张量转为二值 mask，参考brightness_correction.py
        支持 [1,3,H,W]、[1,H,W,3]、[1,H,W]、[H,W]、[1,1,H,W]、[1,H,W,1] 等格式
        """
        arr = mask_tensor.detach().cpu().float().numpy()
        # [1, H, W, 3] -> [1, 3, H, W]
        if arr.ndim == 4 and arr.shape[-1] == 3:
            arr = arr.transpose(0, 3, 1, 2)
        # [1, 3, H, W] 或 [3, H, W]
        if arr.ndim == 4 and arr.shape[1] == 3:
            arr = arr.mean(axis=1, keepdims=True)  # [1, 1, H, W]
        elif arr.ndim == 4 and arr.shape[1] == 1:
            pass  # [1, 1, H, W]
        elif arr.ndim == 3 and arr.shape[0] == 1:
            arr = arr  # [1, H, W]
        elif arr.ndim == 3 and arr.shape[2] == 1:
            arr = arr.transpose(2, 0, 1)  # [1, H, W]
        elif arr.ndim == 3 and arr.shape[2] == 3:
            arr = arr.transpose(2, 0, 1).mean(axis=0, keepdims=True)  # [1, H, W]
        elif arr.ndim == 2:
            arr = arr[None, ...]  # [1, H, W]
        # 归一化到0~1
        if arr.max() > 1.1:
            arr = arr / 255.0
        mask = (arr > threshold)
        # squeeze到[H,W]
        mask = mask.squeeze()
        return mask
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "reference_image": ("IMAGE",),
                "reference_skin_mask": ("MASK",),
                "white_model": ("IMAGE",),
                "white_model_mask": ("MASK",),
            },
            "optional": {
                "texture_strength": ("FLOAT", {
                    "default": 0.2,
                    "min": 0.0,
                    "max": 1.0,
                    "step": 0.01
                }),
            }
        }
    
    RETURN_TYPES = ("IMAGE", "MASK")
    RETURN_NAMES = ("fused_image", "fusion_mask")
    FUNCTION = "skin_color_fusion"
    CATEGORY = "image/color"
    
    def skin_color_fusion(
        self,
        reference_image,
        reference_skin_mask,
        white_model,
        white_model_mask,
        texture_strength=0.2
    ):
        print(f"[调试] reference_image shape: {getattr(reference_image, 'shape', None)}, dtype: {getattr(reference_image, 'dtype', None)}")
        print(f"[调试] reference_skin_mask shape: {getattr(reference_skin_mask, 'shape', None)}, dtype: {getattr(reference_skin_mask, 'dtype', None)}")
        print(f"[调试] white_model shape: {getattr(white_model, 'shape', None)}, dtype: {getattr(white_model, 'dtype', None)}")
        print(f"[调试] white_model_mask shape: {getattr(white_model_mask, 'shape', None)}, dtype: {getattr(white_model_mask, 'dtype', None)}")

        # 保证输入格式统一
        ref_img = self._ensure_chw(reference_image.clone())
        white_img = self._ensure_chw(white_model.clone())
        ref_mask_img = self._ensure_chw(reference_skin_mask.clone())
        white_mask_img = self._ensure_chw(white_model_mask.clone())

        # 转换为numpy数组进行处理
        ref_img_np = ref_img[0].cpu().numpy().transpose(1, 2, 0) * 255  # [3, H, W] -> [H, W, 3] -> 0-255
        white_img_np = white_img[0].cpu().numpy().transpose(1, 2, 0) * 255
        ref_mask_np = self._to_single_mask(ref_mask_img) * 255  # 已经是[H, W]格式
        white_mask_np = self._to_single_mask(white_mask_img) * 255
        
        # 确保数据类型正确
        ref_img_np = ref_img_np.astype(np.uint8)
        white_img_np = white_img_np.astype(np.uint8)
        ref_mask_np = ref_mask_np.astype(np.uint8)
        white_mask_np = white_mask_np.astype(np.uint8)
        
        # 执行肤色融合
        fused_image, fusion_mask = skin_color_fusion_pipeline(
            ref_img_np, ref_mask_np, white_img_np, white_mask_np,
            texture_strength
        )
        
        # 转换回tensor格式，确保输出格式与输入一致
        fused_tensor = torch.from_numpy(fused_image.astype(np.float32) / 255.0).permute(2, 0, 1).unsqueeze(0)  # [H, W, 3] -> [1, 3, H, W]
        mask_tensor = torch.from_numpy(fusion_mask.astype(np.float32) / 255.0).unsqueeze(0)  # [H, W] -> [1, H, W]
        
        # 确保输出格式与原始输入格式一致
        if len(reference_image.shape) == 4 and reference_image.shape[-1] == 3:
            fused_tensor = fused_tensor.permute(0, 2, 3, 1)  # [1, 3, H, W] -> [1, H, W, 3]
        
        fused_tensor = fused_tensor.clamp(0, 1).to(torch.float32)
        mask_tensor = mask_tensor.clamp(0, 1).to(torch.float32)
        
        return (fused_tensor, mask_tensor)
