import torch
import numpy as np
from comfy.k_diffusion.sampling import trange, to_d, BrownianTreeNoiseSampler
import comfy.model_patcher
import comfy.samplers
from comfy.k_diffusion import sampling
from comfy import model_sampling
import comfy.model_management
from math import pi
import math
from functools import partial

EPSILON = 1e-4

# ===============================================================================
# DPM++ 2M SDE HELPER FUNCTIONS
# ===============================================================================

def sigma_to_half_log_snr(sigma, model_sampling_obj):
    """Convert sigma to half-logSNR log(alpha_t / sigma_t)."""
    if isinstance(model_sampling_obj, model_sampling.CONST):
        return sigma.logit().neg()
    return sigma.log().neg()

def offset_first_sigma_for_snr(sigmas, model_sampling_obj, percent_offset=1e-4):
    """Adjust the first sigma to avoid invalid logSNR."""
    if len(sigmas) <= 1:
        return sigmas
    if isinstance(model_sampling_obj, model_sampling.CONST):
        if sigmas[0] >= 1:
            sigmas = sigmas.clone()
            sigmas[0] = model_sampling_obj.percent_to_sigma(percent_offset)
    return sigmas

class SoulBrownianTreeNoiseSampler:
    """Soul-enhanced Brownian tree noise sampler with adaptive characteristics."""
    
    def __init__(self, x, sigma_min, sigma_max, seed=None, transform=lambda x: x, cpu=False,
                 soul_strength=0.1, shadow_boost=1.4, highlight_boost=1.3, frequency_mix=0.7):
        self.transform = transform
        self.soul_strength = soul_strength
        self.shadow_boost = shadow_boost
        self.highlight_boost = highlight_boost
        self.frequency_mix = frequency_mix
        self.seed = seed
        
        # Initialize base Brownian tree
        try:
            from comfy.k_diffusion.sampling import BatchedBrownianTree
            t0, t1 = self.transform(torch.as_tensor(sigma_min)), self.transform(torch.as_tensor(sigma_max))
            self.tree = BatchedBrownianTree(x, t0, t1, seed, cpu=cpu)
            self.has_tree = True
        except ImportError:
            # Fallback if BatchedBrownianTree not available
            self.has_tree = False
            self.device = x.device
            self.shape = x.shape
            if seed is not None:
                torch.manual_seed(seed)
    
    def __call__(self, sigma, sigma_next, current_denoised=None):
        """Generate soul-enhanced noise for the current step."""
        if self.has_tree:
            t0, t1 = self.transform(torch.as_tensor(sigma)), self.transform(torch.as_tensor(sigma_next))
            base_noise = self.tree(t0, t1) / (t1 - t0).abs().sqrt()
        else:
            # Fallback noise generation
            base_noise = torch.randn(self.shape, device=self.device)
        
        # Add soul characteristics if enabled and denoised prediction available
        if self.soul_strength > 0 and current_denoised is not None:
            try:
                # Generate soul-enhanced noise
                soul_noise = self._generate_soul_noise(current_denoised, sigma)
                
                # Blend base Brownian noise with soul characteristics
                enhanced_noise = base_noise * (1 - self.soul_strength * 0.5) + soul_noise * self.soul_strength
                return enhanced_noise
            except Exception as e:
                print(f"Warning: Soul noise enhancement failed, using base noise: {e}")
        
        return base_noise
    
    def _generate_soul_noise(self, denoised, sigma):
        """Generate soul-based noise characteristics."""
        # Simple multi-frequency noise for DPM++ integration
        shape = denoised.shape
        device = denoised.device
        
        # Generate different frequency components
        high_freq = torch.randn(shape, device=device) * 0.1
        mid_freq = torch.randn(shape, device=device) * 0.05  
        low_freq = torch.randn(shape, device=device) * 0.02
        
        # Mix frequencies
        combined = (high_freq * self.frequency_mix + 
                   mid_freq * (1 - self.frequency_mix) * 0.7 + 
                   low_freq * (1 - self.frequency_mix) * 0.3)
        
        # Apply luminance-based scaling
        try:
            luminance = calculate_soul_luminance(denoised)
            shadow_mask = (luminance < 0.15).float()
            highlight_mask = (luminance > 0.85).float()
            midtone_mask = 1.0 - shadow_mask - highlight_mask
            
            scaling = (shadow_mask * self.shadow_boost + 
                      highlight_mask * self.highlight_boost + 
                      midtone_mask * 1.0)
            
            # Broadcast scaling to match noise shape
            while scaling.dim() < combined.dim():
                scaling = scaling.unsqueeze(-1)
            scaling = scaling.expand_as(combined)
            
            combined = combined * scaling
        except Exception:
            pass  # Use unscaled noise if luminance calculation fails
        
        return combined

# ===============================================================================
# DISTANCE SAMPLER CORE FUNCTIONS (from DistanceSampler)
# Based on the excellent work from:
# - DistanceSampler: https://github.com/Extraltodeus/DistanceSampler
# - Original implementation by Extraltodeus
# - Provides advanced distance-based prediction resampling for improved sampling quality
# ===============================================================================

@torch.no_grad()
def matrix_batch_slerp(t, tn, w):
    """Spherical linear interpolation for tensor batches."""
    dots = torch.mul(tn.unsqueeze(0), tn.unsqueeze(1)).sum(dim=[-1,-2], keepdim=True).clamp(min=-1.0 + EPSILON, max=1.0 - EPSILON)
    mask = ~torch.eye(tn.shape[0], dtype=torch.bool, device=tn.device)
    A, B, *rest = dots.shape
    rest_1s = (1,) * len(rest)
    dots = dots[mask].reshape(A, B - 1, *rest)
    omegas = dots.acos()
    sin_omega = omegas.sin()
    res = t.unsqueeze(1).repeat(1, B - 1, *rest_1s) * torch.sin(w.div(B - 1).unsqueeze(1).repeat(1, B - 1, *rest_1s) * omegas) / sin_omega
    res = res.sum(dim=[0, 1]).unsqueeze(0)
    return res

@torch.no_grad()
def soul_distance_weights(t, use_softmax=False, use_slerp=False, uncond=None):
    """Calculate distance-based weights for soul sampling."""
    orig_shape = t.shape[1:]
    if t.shape[1] == 1 and t.ndim == 4:
        t = t.squeeze(1)
    elif t.ndim < 3:
        raise ValueError("Can't handle input with dimensions < 3")
    else:
        t = t.reshape(t.shape[0], -1, *t.shape[-2 if t.ndim > 3 else -1:])
        if t.ndim == 3:
            t = t.unsqueeze(-1)
        if uncond is not None:
            uncond = uncond.reshape(1, *t.shape[1:])
    
    norm = torch.linalg.matrix_norm(t, keepdim=True)
    n = t.shape[0]
    tn = t.div(norm)

    distances = (tn.unsqueeze(0) - tn.unsqueeze(1)).abs().sum(dim=0)
    distances = distances.max(dim=0, keepdim=True).values - distances

    if uncond is not None:
        uncond = uncond.div(torch.linalg.matrix_norm(uncond, keepdim=True))
        distances += tn.sub(uncond).abs()

    if use_softmax:
        distances = distances.mul(n).softmax(dim=0)
    else:
        distances = distances.div(distances.max(dim=0).values).pow(2)
        distances = distances / distances.sum(dim=0)

    if use_slerp:
        res = matrix_batch_slerp(t, tn, distances)
    else:
        res = (t * distances).sum(dim=0).unsqueeze(0)
        res = res.div(torch.linalg.matrix_norm(res, keepdim=True)).mul(norm.mul(distances).sum(dim=0).unsqueeze(0))
    
    return res if res.shape == orig_shape else res.reshape(orig_shape)

def get_soul_ancestral_step(sigma, sigma_next, eta=1.0, is_rf=False):
    """Get ancestral step parameters for soul sampling."""
    if sigma_next == 0 or eta == 0:
        return sigma_next, sigma_next * 0.0, 1.0
    if not is_rf:
        return (*sampling.get_ancestral_step(sigma, sigma_next, eta=eta), 1.0)
    # Flow model handling
    downstep_ratio = 1.0 + (sigma_next / sigma - 1.0) * eta
    sigma_down = sigma_next * downstep_ratio
    alpha_ip1, alpha_down = 1.0 - sigma_next, 1.0 - sigma_down
    sigma_up = (sigma_next**2 - sigma_down**2 * alpha_ip1**2 / alpha_down**2)**0.5
    x_coeff = alpha_ip1 / alpha_down
    return sigma_down, sigma_up, x_coeff

def soul_internal_step(x, d, dt, sigma, sigma_next, sigma_up, x_coeff, noise_sampler):
    """Internal step function for soul sampling."""
    x = x + d * dt
    if sigma_up == 0 or noise_sampler is None:
        return x
    noise = noise_sampler(sigma, sigma_next).mul_(sigma_up)
    if x_coeff != 1:
        x *= x_coeff
    return x.add_(noise)

def fix_soul_step_range(steps, start, end):
    """Fix step range for soul sampling."""
    if start < 0:
        start = steps + start
    if end < 0:
        end = steps + end
    start = max(0, min(steps - 1, start))
    end = max(0, min(steps - 1, end))
    return (end, start) if start > end else (start, end)

# ===============================================================================
# CORE NOISE GENERATION FUNCTIONS
# ===============================================================================

def generate_soul_noise(shape, device, scales, amplitudes, offset=0):
    """
    Generate multi-octave structured noise for SoulSampler.
    Uses trigonometric functions for robust, GPU-optimized noise generation.
    """
    try:
        batch_size, channels, height, width = shape
        
        # Fallback to simple random noise if shape is too large
        if height * width > 512 * 512:
            return torch.randn(batch_size, channels, height, width, device=device) * 0.1
        
        noise = torch.zeros(batch_size, channels, height, width, device=device)
        
        for scale, amplitude in zip(scales, amplitudes):
            # Generate coordinates
            y_coords = torch.arange(height, device=device, dtype=torch.float32).view(-1, 1) * scale + offset
            x_coords = torch.arange(width, device=device, dtype=torch.float32).view(1, -1) * scale + offset
            
            # Create grid
            y_grid = y_coords.repeat(1, width)
            x_grid = x_coords.repeat(height, 1)
            
            # Generate structured noise using trigonometric functions
            octave_noise = torch.zeros_like(noise)
            
            for b in range(batch_size):
                for c in range(channels):
                    # Multiple frequency components for rich texture
                    noise_val = (
                        torch.sin(x_grid * 2.0 + offset) * torch.cos(y_grid * 2.0 + offset) * 0.5 +
                        torch.sin(x_grid * 4.0 + offset * 1.5) * torch.cos(y_grid * 4.0 + offset * 1.5) * 0.3 +
                        torch.sin(x_grid * 8.0 + offset * 2.0) * torch.cos(y_grid * 8.0 + offset * 2.0) * 0.2
                    )
                    
                    # Add organic randomness
                    random_component = torch.sin(x_grid * 13.7 + y_grid * 17.3 + offset * 23.1) * 0.1
                    noise_val += random_component
                    
                    octave_noise[b, c] = noise_val
            
            noise += octave_noise * amplitude
        
        # Normalize to reasonable range
        noise = torch.tanh(noise)
        
        return noise
    
    except Exception as e:
        # Fallback to simple random noise if generation fails
        print(f"Warning: Soul noise generation failed, using random noise: {e}")
        batch_size, channels, height, width = shape
        return torch.randn(batch_size, channels, height, width, device=device) * 0.1

def calculate_soul_luminance(latent):
    """
    Calculate luminance from latent representation for soul-aware processing.
    """
    try:
        if latent.shape[1] >= 3:
            # Use RGB-like luminance calculation
            luminance = 0.299 * latent[:, 0:1] + 0.587 * latent[:, 1:2] + 0.114 * latent[:, 2:3]
        else:
            # Use average of all channels if less than 3
            luminance = latent.mean(dim=1, keepdim=True)
        
        # Normalize to [0, 1] range
        min_val = luminance.min()
        max_val = luminance.max()
        
        # Handle edge cases
        if torch.isnan(luminance).any() or torch.isinf(luminance).any():
            return torch.ones_like(luminance) * 0.5
        
        if abs(max_val - min_val) < EPSILON:
            return torch.ones_like(luminance) * 0.5
        
        luminance = (luminance - min_val) / (max_val - min_val + EPSILON)
        return luminance
    
    except Exception as e:
        print(f"Warning: Soul luminance calculation failed, using default: {e}")
        return torch.ones_like(latent[:, 0:1]) * 0.5

def apply_soul_luminance_scaling(noise, latent, shadow_boost=1.4, highlight_boost=1.3):
    """
    Apply luminance-aware scaling to noise for enhanced soul detail.
    """
    try:
        luminance = calculate_soul_luminance(latent)
        
        # Create scaling masks for different luminance regions
        shadow_mask = (luminance < 0.15).float()  # Dark regions
        highlight_mask = (luminance > 0.85).float()  # Bright regions
        midtone_mask = 1.0 - shadow_mask - highlight_mask  # Mid-tones
        
        # Apply scaling factors
        scaling_factor = (shadow_mask * shadow_boost + 
                         highlight_mask * highlight_boost + 
                         midtone_mask * 1.0)
        
        # Broadcast scaling factor to match noise shape
        while scaling_factor.dim() < noise.dim():
            scaling_factor = scaling_factor.unsqueeze(-1)
        
        scaling_factor = scaling_factor.expand_as(noise)
        
        # Check for invalid values
        if torch.isnan(scaling_factor).any() or torch.isinf(scaling_factor).any():
            return noise
        
        return noise * scaling_factor
    
    except Exception as e:
        print(f"Warning: Soul luminance scaling failed, using unscaled noise: {e}")
        return noise

# ===============================================================================
# SOUL SAMPLER IMPLEMENTATIONS
# ===============================================================================

def soul_adaptive_noise_wrap(
    base_noise_strength=0.1,
    shadow_boost=1.4,
    highlight_boost=1.3,
    frequency_mix=0.7,
    adaptive_timestep=True
):
    """
    SoulSampler with adaptive noise injection for enhanced texture and detail.
    """
    @torch.no_grad()
    def sample_soul_adaptive_noise(model, x, sigmas, extra_args=None, callback=None, disable=None):
        try:
            extra_args = {} if extra_args is None else extra_args
            seed = extra_args.get("seed", None)
            
            is_rf = isinstance(model.inner_model.inner_model.model_sampling, model_sampling.CONST)
            steps = len(sigmas) - 1
            
            # Define soul frequency bands for texture enhancement
            high_freq_scales = [0.02, 0.03, 0.05]  # Fine detail
            high_freq_amplitudes = [0.08, 0.10, 0.12]
            
            mid_freq_scales = [0.1, 0.15, 0.2]  # Medium texture
            mid_freq_amplitudes = [0.05, 0.065, 0.08]
            
            low_freq_scales = [0.4, 0.6, 0.8]  # Organic variation
            low_freq_amplitudes = [0.02, 0.03, 0.04]
            
            s_in = x.new_ones([x.shape[0]])
            
            for i in trange(steps, disable=disable):
                try:
                    # Check for interrupt at the beginning of each sampling step
                    comfy.model_management.throw_exception_if_processing_interrupted()
                    
                    sigma, sigma_next = sigmas[i], sigmas[i + 1]
                    
                    # Get model prediction
                    denoised = model(x, sigma * s_in, **extra_args)
                    
                    # Check for interrupt after model call
                    comfy.model_management.throw_exception_if_processing_interrupted()
                    
                    d = to_d(x, sigma, denoised)
                    
                    if callback is not None:
                        callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma, 'denoised': denoised})
                    
                    # Calculate soul timestep scaling
                    if adaptive_timestep and steps > 1:
                        timestep_scale = (sigma / sigmas[0]) * 0.5 + 0.3
                    else:
                        timestep_scale = 1.0
                    
                    # Generate and apply soul noise
                    if base_noise_strength > 0:
                        try:
                            # Generate multi-octave soul noise
                            high_noise = generate_soul_noise(
                                x.shape, x.device, 
                                high_freq_scales, high_freq_amplitudes,
                                offset=i * 0.1
                            )
                            
                            mid_noise = generate_soul_noise(
                                x.shape, x.device,
                                mid_freq_scales, mid_freq_amplitudes,
                                offset=i * 0.05
                            )
                            
                            low_noise = generate_soul_noise(
                                x.shape, x.device,
                                low_freq_scales, low_freq_amplitudes,
                                offset=i * 0.02
                            )
                            
                            # Mix frequency bands for soul texture
                            combined_noise = (high_noise * frequency_mix + 
                                            mid_noise * (1 - frequency_mix) * 0.7 + 
                                            low_noise * (1 - frequency_mix) * 0.3)
                            
                            # Apply soul luminance scaling
                            combined_noise = apply_soul_luminance_scaling(
                                combined_noise, denoised, shadow_boost, highlight_boost
                            )
                            
                            # Scale by timestep and base strength
                            final_noise_strength = base_noise_strength * timestep_scale
                            noise_injection = combined_noise * final_noise_strength
                            
                            # Validate and inject soul noise
                            if not (torch.isnan(noise_injection).any() or torch.isinf(noise_injection).any()):
                                d = d + noise_injection
                        
                        except Exception as e:
                            print(f"Warning: Soul noise generation failed at step {i}: {e}")
                    
                    # Apply Euler step
                    dt = sigma_next - sigma
                    x = x + d * dt
                    
                    # Validate soul state
                    if torch.isnan(x).any() or torch.isinf(x).any():
                        print(f"Warning: Invalid soul values detected at step {i}")
                
                except comfy.model_management.InterruptProcessingException:
                    # Re-raise interrupt exceptions to properly stop sampling
                    raise
                except Exception as e:
                    print(f"Error in soul sampling step {i}: {e}")
                    # Continue with basic step
                    dt = sigma_next - sigma
                    x = x + d * dt
            
            return x
        
        except comfy.model_management.InterruptProcessingException:
            # Re-raise interrupt exceptions to properly stop sampling
            raise
        except Exception as e:
            print(f"Critical error in soul adaptive noise sampling: {e}")
            # Fallback to basic Euler sampling
            return sampling.sample_euler(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable)
    
    return sample_soul_adaptive_noise

def soul_dpmpp_2m_sde_wrap(
    base_noise_strength=0.1,
    shadow_boost=1.4,
    highlight_boost=1.3,
    frequency_mix=0.7,
    adaptive_timestep=True,
    eta=1.0,
    s_noise=1.0,
    solver_type='midpoint'
):
    """
    SoulSampler DPM++ 2M SDE: Modern sampling with adaptive soul noise integration.
    """
    @torch.no_grad()
    def sample_soul_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=eta, s_noise=s_noise, noise_sampler=None, solver_type=solver_type):
        try:
            if len(sigmas) <= 1:
                return x

            if solver_type not in {'heun', 'midpoint'}:
                raise ValueError('solver_type must be \'heun\' or \'midpoint\'')

            extra_args = {} if extra_args is None else extra_args
            seed = extra_args.get("seed", None)
            sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
            
            # Create soul-enhanced noise sampler
            if base_noise_strength > 0:
                soul_noise_sampler = SoulBrownianTreeNoiseSampler(
                    x, sigma_min, sigma_max, seed=seed, cpu=True,
                    soul_strength=base_noise_strength, shadow_boost=shadow_boost,
                    highlight_boost=highlight_boost, frequency_mix=frequency_mix
                )
            else:
                # Use standard noise sampler if soul enhancement disabled
                if noise_sampler is None:
                    soul_noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True)
                else:
                    soul_noise_sampler = noise_sampler
            
            s_in = x.new_ones([x.shape[0]])

            # Get model sampling for SNR calculations (matching official implementation)
            model_sampling_obj = model.inner_model.model_patcher.get_model_object('model_sampling')
            lambda_fn = partial(sigma_to_half_log_snr, model_sampling_obj=model_sampling_obj)
            sigmas = offset_first_sigma_for_snr(sigmas, model_sampling_obj)

            old_denoised = None
            h, h_last = None, None

            for i in trange(len(sigmas) - 1, disable=disable):
                try:
                    # Check for interrupt at the beginning of each sampling step
                    comfy.model_management.throw_exception_if_processing_interrupted()
                    
                    # Model prediction
                    denoised = model(x, sigmas[i] * s_in, **extra_args)
                    
                    # Check for interrupt after model call
                    comfy.model_management.throw_exception_if_processing_interrupted()
                    
                    # Apply soul enhancement to model prediction if enabled
                    if base_noise_strength > 0 and adaptive_timestep:
                        try:
                            timestep_scale = (sigmas[i] / sigmas[0]) * 0.5 + 0.3 if len(sigmas) > 1 else 1.0
                            soul_enhancement = apply_soul_prediction_enhancement(
                                denoised, sigmas[i], base_noise_strength * timestep_scale * 0.3,
                                shadow_boost, highlight_boost, frequency_mix, i
                            )
                            denoised = denoised + soul_enhancement
                        except Exception as e:
                            print(f"Warning: Soul prediction enhancement failed at step {i}: {e}")
                    
                    if callback is not None:
                        callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
                    
                    if sigmas[i + 1] == 0:
                        # Final denoising step
                        x = denoised
                    else:
                        # DPM-Solver++(2M) SDE step
                        lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1])
                        h = lambda_t - lambda_s
                        h_eta = h * (eta + 1)

                        alpha_t = sigmas[i + 1] * lambda_t.exp()

                        x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x + alpha_t * (-h_eta).expm1().neg() * denoised

                        # Multistep correction with previous prediction
                        if old_denoised is not None:
                            r = h_last / h
                            if solver_type == 'heun':
                                x = x + alpha_t * ((-h_eta).expm1().neg() / (-h_eta) + 1) * (1 / r) * (denoised - old_denoised)
                            elif solver_type == 'midpoint':
                                x = x + 0.5 * alpha_t * (-h_eta).expm1().neg() * (1 / r) * (denoised - old_denoised)

                        # SDE noise injection with soul enhancement
                        if eta > 0 and s_noise > 0:
                            try:
                                if hasattr(soul_noise_sampler, '_generate_soul_noise'):
                                    # Soul-enhanced noise sampler
                                    noise = soul_noise_sampler(sigmas[i], sigmas[i + 1], current_denoised=denoised)
                                else:
                                    # Standard noise sampler
                                    noise = soul_noise_sampler(sigmas[i], sigmas[i + 1])
                                
                                x = x + noise * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise
                            except Exception as e:
                                print(f"Warning: Soul noise injection failed at step {i}: {e}")
                                # Fallback to basic noise
                                noise = torch.randn_like(x)
                                x = x + noise * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise

                    old_denoised = denoised
                    h_last = h
                
                except comfy.model_management.InterruptProcessingException:
                    # Re-raise interrupt exceptions to properly stop sampling
                    raise
                except Exception as e:
                    print(f"Error in soul DPM++ 2M SDE step {i}: {e}")
                    # Fallback to basic step
                    x = x + (denoised - x) * 0.1  # Simple fallback step

            return x
        
        except comfy.model_management.InterruptProcessingException:
            # Re-raise interrupt exceptions to properly stop sampling
            raise
        except Exception as e:
            print(f"Critical error in soul DPM++ 2M SDE sampling: {e}")
            # Fallback to basic sampling
            return sampling.sample_euler(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable)
    
    return sample_soul_dpmpp_2m_sde

def apply_soul_prediction_enhancement(denoised, sigma, enhancement_strength, shadow_boost, highlight_boost, frequency_mix, step):
    """
    Apply subtle soul enhancement to model predictions in DPM++ sampling.
    """
    try:
        # Generate subtle enhancement noise
        shape = denoised.shape
        device = denoised.device
        
        # Create subtle structured noise for prediction enhancement
        high_freq = torch.randn(shape, device=device) * 0.02
        mid_freq = torch.randn(shape, device=device) * 0.01
        
        # Mix frequencies
        enhancement = (high_freq * frequency_mix + mid_freq * (1 - frequency_mix))
        
        # Apply luminance-based scaling
        luminance = calculate_soul_luminance(denoised)
        shadow_mask = (luminance < 0.15).float()
        highlight_mask = (luminance > 0.85).float()
        midtone_mask = 1.0 - shadow_mask - highlight_mask
        
        scaling = (shadow_mask * shadow_boost + 
                  highlight_mask * highlight_boost + 
                  midtone_mask * 1.0) * 0.5  # Reduced scaling for prediction enhancement
        
        # Broadcast scaling
        while scaling.dim() < enhancement.dim():
            scaling = scaling.unsqueeze(-1)
        scaling = scaling.expand_as(enhancement)
        
        enhanced_noise = enhancement * scaling * enhancement_strength
        
        # Validate enhancement
        if torch.isnan(enhanced_noise).any() or torch.isinf(enhanced_noise).any():
            return torch.zeros_like(denoised)
        
        return enhanced_noise
    
    except Exception as e:
        print(f"Warning: Soul prediction enhancement failed: {e}")
        return torch.zeros_like(denoised)

def soul_hybrid_wrap(
    # Distance sampler parameters
    resample=3,
    resample_end=-1,
    cfgpp=False,
    use_softmax=False,
    use_slerp=False,
    use_negative=False,
    distance_first=0,
    distance_last=-1,
    eta_first=0,
    eta_last=-1,
    distance_eta_first=0,
    distance_eta_last=-1,
    eta=0.0,
    s_noise=1.0,
    distance_step_eta=0.0,
    distance_step_s_noise=1.0,
    # Soul noise parameters
    base_noise_strength=0.1,
    shadow_boost=1.4,
    highlight_boost=1.3,
    frequency_mix=0.7,
    adaptive_timestep=True,
    # Hybrid control
    noise_during_resample=True,
    noise_final_step=True,
):
    """
    SoulSampler Hybrid: Combines distance-based resampling with adaptive soul noise.
    """
    @torch.no_grad()
    def sample_soul_hybrid(model, x, sigmas, eta=eta, s_noise=s_noise, noise_sampler=None, distance_step_noise_sampler=None, extra_args=None, callback=None, disable=None):
        nonlocal distance_first, distance_last, eta_first, eta_last, distance_eta_first, distance_eta_last
        
        try:
            extra_args = {} if extra_args is None else extra_args
            seed = extra_args.get("seed", None)
            
            dstep_noise_sampler = None if distance_step_eta == 0 else distance_step_noise_sampler or noise_sampler or sampling.default_noise_sampler(x, seed=seed)
            noise_sampler = None if eta == 0 else noise_sampler or sampling.default_noise_sampler(x, seed=seed)
            is_rf = isinstance(model.inner_model.inner_model.model_sampling, model_sampling.CONST)
            uncond = None
            steps = len(sigmas) - 1
            
            # Fix step ranges
            distance_first, distance_last = fix_soul_step_range(steps, distance_first, distance_last)
            eta_first, eta_last = fix_soul_step_range(steps, eta_first, eta_last)
            distance_eta_first, distance_eta_last = fix_soul_step_range(steps, distance_eta_first, distance_eta_last)
            
            # CFG++ setup
            if cfgpp or use_negative:
                uncond = None
                def post_cfg_function(args):
                    nonlocal uncond
                    uncond = args["uncond_denoised"]
                    return args["denoised"]
                model_options = extra_args.get("model_options", {}).copy()
                extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function)
            
            # Define soul frequency bands
            high_freq_scales = [0.02, 0.03, 0.05]
            high_freq_amplitudes = [0.08, 0.10, 0.12]
            mid_freq_scales = [0.1, 0.15, 0.2]
            mid_freq_amplitudes = [0.05, 0.065, 0.08]
            low_freq_scales = [0.4, 0.6, 0.8]
            low_freq_amplitudes = [0.02, 0.03, 0.04]
            
            s_min, s_max = sigmas[sigmas > 0].min(), sigmas.max()
            progression = lambda x, y=0.5: max(0, min(1, ((x - s_min) / (s_max - s_min)) ** y))
            
            if resample == -1:
                current_resample = min(10, sigmas.shape[0] // 2)
            else:
                current_resample = resample
            
            s_in = x.new_ones([x.shape[0]])
            
            for i in trange(steps, disable=disable):
                try:
                    # Check for interrupt at the beginning of each sampling step
                    comfy.model_management.throw_exception_if_processing_interrupted()
                    
                    use_distance = distance_first <= i <= distance_last
                    use_eta = eta_first <= i <= eta_last
                    use_distance_eta = distance_eta_first <= i <= distance_eta_last
                    sigma, sigma_next = sigmas[i:i + 2]
                    
                    # Get ancestral step parameters
                    sigma_down, sigma_up, x_coeff = get_soul_ancestral_step(sigma, sigma_next, eta=eta if use_eta else 0.0, is_rf=is_rf)
                    sigma_up *= s_noise
                    dstep_sigma_down, dstep_sigma_up, dstep_x_coeff = get_soul_ancestral_step(sigma, sigma_next, eta=distance_step_eta if use_distance_eta else 0.0, is_rf=is_rf)
                    dstep_sigma_up *= distance_step_s_noise
                    
                    # Calculate resample steps
                    res_mul = progression(sigma)
                    if resample_end >= 0:
                        resample_steps = max(min(current_resample, resample_end), min(max(current_resample, resample_end), int(current_resample * res_mul + resample_end * (1 - res_mul))))
                    else:
                        resample_steps = current_resample
                    
                    # Get model prediction
                    denoised = model(x, sigma * s_in, **extra_args)
                    
                    # Check for interrupt after model call
                    comfy.model_management.throw_exception_if_processing_interrupted()
                    
                    # Apply CFG++
                    if cfgpp and torch.any(uncond):
                        d = to_d(x - denoised + uncond, sigma, denoised)
                    else:
                        d = to_d(x, sigma, denoised)
                    
                    # Apply soul noise to initial prediction if enabled
                    if base_noise_strength > 0 and (not use_distance or noise_final_step):
                        d = apply_soul_noise_to_derivative(d, denoised, i, sigma, sigmas[0], base_noise_strength, shadow_boost, highlight_boost, frequency_mix, adaptive_timestep, steps, x.shape, x.device, high_freq_scales, high_freq_amplitudes, mid_freq_scales, mid_freq_amplitudes, low_freq_scales, low_freq_amplitudes)
                    
                    if callback is not None:
                        callback({'x': x, 'i': i, 'sigma': sigmas, 'sigma_hat': sigma, 'denoised': denoised})
                    
                    dt = sigma_down - sigma
                    dstep_dt = dstep_sigma_down - sigma
                    
                    # Use distance sampling if enabled
                    if sigma_next != 0 and resample_steps > 0 and use_distance:
                        # Distance-based resampling with soul enhancement
                        x_n = [d]
                        for re_step in trange(resample_steps, initial=1, disable=disable or resample_steps < 2, leave=False, desc="    Soul+Distance"):
                            # Check for interrupt during resampling steps
                            comfy.model_management.throw_exception_if_processing_interrupted()
                            
                            x_new = soul_internal_step(x, d, dstep_dt, sigma, sigma_next, dstep_sigma_up, dstep_x_coeff, dstep_noise_sampler)
                            new_denoised = model(x_new, sigma_next * s_in, **extra_args)
                            
                            # Check for interrupt after model call in resampling
                            comfy.model_management.throw_exception_if_processing_interrupted()
                            
                            if cfgpp and torch.any(uncond):
                                new_d = to_d(x_new - new_denoised + uncond, sigma_next, new_denoised)
                            else:
                                new_d = to_d(x_new, sigma_next * s_in, new_denoised)
                            
                            # Apply soul noise during resampling if enabled
                            if base_noise_strength > 0 and noise_during_resample:
                                new_d = apply_soul_noise_to_derivative(new_d, new_denoised, i, sigma_next, sigmas[0], base_noise_strength * 0.7, shadow_boost, highlight_boost, frequency_mix, adaptive_timestep, steps, x.shape, x.device, high_freq_scales, high_freq_amplitudes, mid_freq_scales, mid_freq_amplitudes, low_freq_scales, low_freq_amplitudes)
                            
                            x_n.append(new_d)
                            
                            if re_step == 0:
                                d = (new_d + d) / 2
                                continue
                            
                            # Soul distance-based weighting
                            u = uncond if (use_negative and uncond is not None and torch.any(uncond)) else None
                            d = soul_distance_weights(torch.stack(x_n), use_softmax=use_softmax, use_slerp=use_slerp, uncond=u)
                            x_n.append(d)
                        
                        x = soul_internal_step(x, d, dt, sigma, sigma_next, sigma_up, x_coeff, noise_sampler)
                    else:
                        # Standard Euler step
                        x = soul_internal_step(x, d, dt, sigma, sigma_next, sigma_up, x_coeff, noise_sampler)
                
                except comfy.model_management.InterruptProcessingException:
                    # Re-raise interrupt exceptions to properly stop sampling
                    raise
                except Exception as e:
                    print(f"Error in soul hybrid sampling step {i}: {e}")
                    # Fallback to basic step
                    dt = sigma_next - sigma
                    x = x + d * dt
            
            return x
        
        except comfy.model_management.InterruptProcessingException:
            # Re-raise interrupt exceptions to properly stop sampling
            raise
        except Exception as e:
            print(f"Critical error in soul hybrid sampling: {e}")
            # Fallback to basic sampling
            return sampling.sample_euler(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable)
    
    return sample_soul_hybrid

def apply_soul_noise_to_derivative(d, denoised, step, sigma, max_sigma, base_noise_strength, shadow_boost, highlight_boost, frequency_mix, adaptive_timestep, total_steps, shape, device, high_freq_scales, high_freq_amplitudes, mid_freq_scales, mid_freq_amplitudes, low_freq_scales, low_freq_amplitudes):
    """
    Apply soul noise to a derivative tensor.
    """
    try:
        # Calculate timestep scaling
        if adaptive_timestep and total_steps > 1:
            timestep_scale = (sigma / max_sigma) * 0.5 + 0.3
        else:
            timestep_scale = 1.0
        
        # Generate multi-octave soul noise
        high_noise = generate_soul_noise(shape, device, high_freq_scales, high_freq_amplitudes, offset=step * 0.1)
        mid_noise = generate_soul_noise(shape, device, mid_freq_scales, mid_freq_amplitudes, offset=step * 0.05)
        low_noise = generate_soul_noise(shape, device, low_freq_scales, low_freq_amplitudes, offset=step * 0.02)
        
        # Mix frequency bands
        combined_noise = (high_noise * frequency_mix + 
                         mid_noise * (1 - frequency_mix) * 0.7 + 
                         low_noise * (1 - frequency_mix) * 0.3)
        
        # Apply soul luminance scaling
        combined_noise = apply_soul_luminance_scaling(combined_noise, denoised, shadow_boost, highlight_boost)
        
        # Scale by timestep and base strength
        final_noise_strength = base_noise_strength * timestep_scale
        noise_injection = combined_noise * final_noise_strength
        
        # Check for invalid values
        if torch.isnan(noise_injection).any() or torch.isinf(noise_injection).any():
            return d
        
        return d + noise_injection
    
    except Exception as e:
        print(f"Warning: Soul noise application failed: {e}")
        return d

# ===============================================================================
# SOUL SAMPLER NODE CLASSES
# ===============================================================================

class SoulSamplerBase:
    """
    Base class for SoulSampler nodes.
    """
    _SOUL_OPTIONS = None
    _SOUL_PARAMS = {
        "base_noise_strength": ("FLOAT", {
            "default": 0.1, "min": 0.0, "max": 0.3, "step": 0.01,
            "tooltip": "Base strength of soul noise injection. Higher values add more organic texture but may introduce artifacts."
        }),
        "shadow_boost": ("FLOAT", {
            "default": 1.4, "min": 1.0, "max": 2.0, "step": 0.1,
            "tooltip": "Multiplier for noise in shadow regions (<15% luminance). Enhances depth in dark areas."
        }),
        "highlight_boost": ("FLOAT", {
            "default": 1.3, "min": 1.0, "max": 2.0, "step": 0.1,
            "tooltip": "Multiplier for noise in highlight regions (>85% luminance). Adds texture to bright areas."
        }),
        "frequency_mix": ("FLOAT", {
            "default": 0.7, "min": 0.0, "max": 1.0, "step": 0.1,
            "tooltip": "Balance between high-frequency (1.0) and low-frequency (0.0) soul noise. Higher values emphasize fine details."
        }),
        "adaptive_timestep": ("BOOLEAN", {
            "default": True,
            "tooltip": "Scale soul noise strength based on diffusion timestep. Reduces noise towards generation end."
        })
    }
    
    @classmethod
    def INPUT_TYPES(cls):
        if cls._SOUL_OPTIONS is None:
            return {"required": cls._SOUL_PARAMS.copy()}
        return {"required": {k: cls._SOUL_PARAMS[k] for k in cls._SOUL_OPTIONS}}
    
    RETURN_TYPES = ("SAMPLER",)
    CATEGORY = "42lux"
    FUNCTION = "get_sampler"
    
    def get_sampler(self, **kwargs):
        sampler = comfy.samplers.KSAMPLER(soul_adaptive_noise_wrap(**kwargs))
        return (sampler,)

class SoulSamplerDPMBase:
    """
    Base class for SoulSampler DPM++ 2M SDE nodes.
    """
    _DPM_OPTIONS = None
    _DPM_PARAMS = {
        # Soul noise parameters
        "base_noise_strength": ("FLOAT", {
            "default": 0.1, "min": 0.0, "max": 0.3, "step": 0.01,
            "tooltip": "Base strength of soul noise injection in DPM++ sampling. Adds organic texture and prevents over-smoothing."
        }),
        "shadow_boost": ("FLOAT", {
            "default": 1.4, "min": 1.0, "max": 2.0, "step": 0.1,
            "tooltip": "Soul noise multiplier for shadow regions. Enhances detail in dark areas."
        }),
        "highlight_boost": ("FLOAT", {
            "default": 1.3, "min": 1.0, "max": 2.0, "step": 0.1,
            "tooltip": "Soul noise multiplier for highlight regions. Adds texture to bright areas."
        }),
        "frequency_mix": ("FLOAT", {
            "default": 0.7, "min": 0.0, "max": 1.0, "step": 0.1,
            "tooltip": "Mix between high-frequency and low-frequency soul noise. Higher values emphasize fine details."
        }),
        "adaptive_timestep": ("BOOLEAN", {
            "default": True,
            "tooltip": "Scale soul noise strength based on DPM++ timestep for natural progression."
        }),
        # DPM++ 2M SDE parameters
        "eta": ("FLOAT", {
            "default": 1.0, "min": 0.0, "max": 2.0, "step": 0.01,
            "tooltip": "Stochasticity parameter. 0.0 = deterministic ODE, 1.0 = full SDE. Higher values add more randomness."
        }),
        "s_noise": ("FLOAT", {
            "default": 1.0, "min": 0.0, "max": 2.0, "step": 0.01,
            "tooltip": "Noise scaling factor for DPM++ SDE steps. Generally should be left at 1.0."
        }),
        "solver_type": (["midpoint", "heun"], {
            "default": "midpoint",
            "tooltip": "DPM++ solver type. Midpoint is faster, Heun is more accurate but slower."
        }),
    }
    
    @classmethod
    def INPUT_TYPES(cls):
        if cls._DPM_OPTIONS is None:
            return {"required": cls._DPM_PARAMS.copy()}
        return {"required": {k: cls._DPM_PARAMS[k] for k in cls._DPM_OPTIONS}}
    
    RETURN_TYPES = ("SAMPLER",)
    CATEGORY = "42lux"
    FUNCTION = "get_sampler"
    
    def get_sampler(self, **kwargs):
        sampler = comfy.samplers.KSAMPLER(soul_dpmpp_2m_sde_wrap(**kwargs))
        return (sampler,)

# ===============================================================================
# SOUL SAMPLER NODE IMPLEMENTATIONS
# ===============================================================================

class SoulSampler(SoulSamplerBase):
    """
    SoulSampler - Adaptive noise injection for enhanced texture and organic detail.
    Uses intelligent frequency-based noise that adapts to image luminance and generation progress.
    """
    _SOUL_OPTIONS = ("base_noise_strength", "shadow_boost", "highlight_boost", "frequency_mix")

class SoulSamplerAdvanced(SoulSamplerBase):
    """
    SoulSampler Advanced - Full parameter control for adaptive noise injection.
    Includes all parameters for fine-tuning soul noise behavior.
    """
    pass

class SoulSamplerDPM(SoulSamplerDPMBase):
    """
    SoulSampler DPM++ 2M SDE - Modern sampling with adaptive soul noise injection.
    Uses DPM++ 2M SDE as the base algorithm for superior quality and speed.
    """
    _DPM_OPTIONS = ("base_noise_strength", "shadow_boost", "highlight_boost", "frequency_mix", "eta", "solver_type")

class SoulSamplerDPMAdvanced(SoulSamplerDPMBase):
    """
    SoulSampler DPM++ 2M SDE Advanced - Full parameter control for modern sampling.
    All parameters available for fine-tuning both DPM++ and soul noise behavior.
    """
    pass

class SoulSamplerHybridBase:
    """
    Base class for SoulSampler Hybrid nodes (soul noise + distance sampling).
    """
    _HYBRID_OPTIONS = None
    _HYBRID_PARAMS = {
        # Distance sampler parameters
        "resample": ("INT", {
            "default": 3, "min": -1, "max": 32, "step": 1,
            "tooltip": "Number of distance resampling steps. 0=Euler, 1=Heun, 2+=Distance method. Higher values improve quality but increase generation time."
        }),
        "resample_end": ("INT", {
            "default": -1, "min": -1, "max": 32, "step": 1,
            "tooltip": "Resample steps at the end of generation. -1 means constant throughout."
        }),
        "cfgpp": ("BOOLEAN", {
            "default": True,
            "tooltip": "Use CFG++ sampling for improved guidance. Set CFG scale to lower values (3-7) when enabled."
        }),
        "use_softmax": ("BOOLEAN", {
            "default": False,
            "tooltip": "Use softmax for distance weighting instead of min/max normalization. May provide smoother results."
        }),
        "use_slerp": ("BOOLEAN", {
            "default": False,
            "tooltip": "Use spherical linear interpolation for prediction blending. Can improve coherence with some models."
        }),
        "use_negative": ("BOOLEAN", {
            "default": False,
            "tooltip": "Use negative prediction for distance scoring. Often reduces artifacts and improves quality."
        }),
        "eta": ("FLOAT", {
            "default": 0.0, "min": 0.0, "max": 32.0, "step": 0.01,
            "tooltip": "Ancestral sampling strength for main steps. 0.0 = deterministic, higher values add randomness."
        }),
        "s_noise": ("FLOAT", {
            "default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01,
            "tooltip": "Scale factor for ancestral noise. Generally should be left at 1.0."
        }),
        # Soul noise parameters
        "base_noise_strength": ("FLOAT", {
            "default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01,
            "tooltip": "Base strength of soul noise injection. Adds organic texture and prevents over-smoothing."
        }),
        "shadow_boost": ("FLOAT", {
            "default": 1.4, "min": 1.0, "max": 3.0, "step": 0.1,
            "tooltip": "Soul noise multiplier for shadow regions. Enhances detail in dark areas."
        }),
        "highlight_boost": ("FLOAT", {
            "default": 1.3, "min": 1.0, "max": 3.0, "step": 0.1,
            "tooltip": "Soul noise multiplier for highlight regions. Adds texture to bright areas."
        }),
        "frequency_mix": ("FLOAT", {
            "default": 0.7, "min": 0.0, "max": 1.0, "step": 0.1,
            "tooltip": "Mix between high-frequency and low-frequency soul noise. Higher values emphasize fine details."
        }),
        "adaptive_timestep": ("BOOLEAN", {
            "default": True,
            "tooltip": "Scale soul noise strength based on diffusion timestep for natural progression."
        }),
        # Hybrid control parameters
        "noise_during_resample": ("BOOLEAN", {
            "default": True,
            "tooltip": "Apply soul noise during distance resampling steps. Usually improves texture quality."
        }),
        "noise_final_step": ("BOOLEAN", {
            "default": True,
            "tooltip": "Apply soul noise to final prediction before distance sampling. Enhances overall texture."
        }),
    }
    
    @classmethod
    def INPUT_TYPES(cls):
        if cls._HYBRID_OPTIONS is None:
            return {"required": cls._HYBRID_PARAMS.copy()}
        return {"required": {k: cls._HYBRID_PARAMS[k] for k in cls._HYBRID_OPTIONS}}
    
    RETURN_TYPES = ("SAMPLER",)
    CATEGORY = "42lux"
    FUNCTION = "get_sampler"
    
    def get_sampler(self, **kwargs):
        sampler = comfy.samplers.KSAMPLER(soul_hybrid_wrap(**kwargs))
        return (sampler,)

class SoulSamplerHybrid(SoulSamplerHybridBase):
    """
    SoulSampler Hybrid - Combines distance-based resampling with adaptive soul noise.
    Simplified parameter set for the best balance of quality and texture enhancement.
    """
    _HYBRID_OPTIONS = ("resample", "resample_end", "cfgpp", "base_noise_strength", "shadow_boost", "highlight_boost", "frequency_mix")

class SoulSamplerHybridAdvanced(SoulSamplerHybridBase):
    """
    SoulSampler Hybrid Advanced - Full control over both distance sampling and soul noise.
    Maximum flexibility for expert users who want to fine-tune every aspect.
    """
    pass
