"""
Utility functions for multi-Gaussian fitting of lateral dose profiles.
横向 Dose 分布拟合, 使用 多高斯分布

The module includes:
- 多高斯模型 Multi-Gaussian model
- 优化的目标函数 Objective functions
- 多高斯模型参数估计 和 约束函数 Parameters and constraints
- Bridge class, for 模型参数Tuple 与 结构体命名字段 互相转换
- 工具函数: 归一化, 统计量等
"""

from dataclasses import dataclass
from enum import Enum, auto
from typing import Any, Dict, Optional, Tuple

import numpy as np
from icecream import ic
from scipy.optimize import OptimizeResult, minimize

##0000000000000000000000000000000000000000000000000000000000000000000000000000000 Constants
## D -> Default; LM -> Lambda parameters; RLXD -> Relaxed
DFACTOR_SYMMETRY = 2.0  # 对于 cylinder 探测器, Phi 方向没有分bin, R 方向 从 1 到 N 个 bin; 相当于高斯分布只有对称的一半
DEPSILON = 1e-12  # Small value to prevent division by zero
DLM_SIGMA_SMOOTH_ = 1e3  # Lambda for sigma smooth penalty
DLM_NORM = 1e3  # Lambda for normalization penalty
DMIN_SIGMA = 1e-3  # Minimum allowed sigma value
DMIN_WEIGHT = 1e-8  # Minimum allowed weight
DMIN_NORM_FACTOR = 0.5  # Minimum normalization factor
DMAX_NORM_FACTOR = 1.5  # Maximum normalization factor
DSTEP_RANGE = 0.2  # Parameter step range for adjacent layer fitting
DRLXD_STEP_RANGE = 5.0  # Relaxed step range for post-Bragg region
DSMOOTH_WINDOW = 15  # Window size for smoothing in mid-tail detection
DMIN_WIDTH_CLIP = 2.0  # Minimum width for mid-tail detection
DMAX_WIDTH_CLIP = 40.0  # Maximum width for mid-tail detection
DMIN_LAM_M_CLIP = 6.0  # Minimum lambda_m for mid-tail detection
DMAX_LAM_M_CLIP = 40.0  # Maximum lambda_m for mid-tail detection
DREL_ERR = 0.01  # Relative error threshold
DABS_ERR = 0.0  # Absolute error threshold
DGAP_THRESHOLD = 0.15  # Gap threshold for continuous region detection (1.5 * dx)

## Default values for mid_tail_weights; MT -> Mid Tail
DMT_CENTER = 15.0
DMT_WIDTH = 3.0
DMT_LM_MIDTAIL = 10.0

## Default ranges for mid_tail_detect
DR_MIN = 5.0
DR_MAX = 40.0
DWIDTH_CLIP = (DMIN_WIDTH_CLIP, DMAX_WIDTH_CLIP)
DLAM_M_CLIP = (DMIN_LAM_M_CLIP, DMAX_LAM_M_CLIP)


##0000000000000000000000000000000000000000000000000000000000000000000000000 网格类型
class MeshTypeE(Enum):
    Box = auto()
    Cyliner = auto()


##0000000000000000000000000000000000000000000000000000000000000000000000000 转换工具
@dataclass
class GaussParams:
    """
    在fitting中, 高斯模型的独立参数(自由度) pack 到一个 array;
    此工具方便在 array 和 dataclass 之间转换.
    结构体字段具名， 方便访问
    """

    norm_factor: float  # Overall normalization factor
    weights: np.ndarray  # Weights [w2, w3, ..., wn] for components 2 to n (first weight is 1 - sum of others)
    sigmas: np.ndarray  # Sigmas [s1, s2, ..., sn] for each Gaussian component
    background: Optional[float] = None  # 可选, background constant

    @property
    def w1(self) -> float:
        """Calculate the first component weight as 1 - sum of other weights."""
        return max(0.0, 1.0 - np.sum(self.weights))  # Ensure non-negative

    @property
    def all_weights(self) -> np.ndarray:
        """Compose all weights"""
        return np.concatenate(([self.w1], self.weights))

    @classmethod
    def from_array(cls, params: np.ndarray, n_gauss: int, with_background: bool = False):
        """
        raw parameter array -> GaussParams object
        """
        norm_factor = params[0]
        weights = params[1:n_gauss]  # weights for components 2 to n
        sigmas = params[n_gauss : 2 * n_gauss]  # sigmas for all components

        background = None
        if with_background:
            background = params[-1]

        return cls(norm_factor=norm_factor, weights=weights, sigmas=sigmas, background=background)

    def to_array(self, n_gauss: int, with_background: bool = False) -> np.ndarray:
        """
        GaussParams object -> raw parameter array.
        """
        # Start with norm factor
        array_parts = [self.norm_factor]
        # Add weights w2 to wn
        array_parts.extend(self.weights)
        # Add sigmas s1 to sn
        array_parts.extend(self.sigmas)
        # Add background if requested
        if with_background and self.background is not None:
            array_parts.append(self.background)
        return np.array(array_parts)


@dataclass
class FittingData:
    """
    Data model for fitting inputs: coordinates and values.
    """

    coords: np.ndarray
    values: np.ndarray

    @property
    def x(self) -> np.ndarray:
        """Alias for coordinates"""
        return self.coords

    @property
    def y(self) -> np.ndarray:
        """Alias for values"""
        return self.values


@dataclass
class FittingConfig:
    """
    Configuration parameters for multi-Gaussian fitting.
    """

    n_gauss: int = 2
    with_background: bool = False
    is_post_bragg: bool = False
    lam_n: float = DLM_NORM
    info_verbose: bool = False
    info_tag: Optional[str] = None


@dataclass
class OptimizationParams:
    """
    Parameters for optimization bounds and constraints.
    """

    step_range: float = DSTEP_RANGE
    step_range_relax: float = DRLXD_STEP_RANGE
    s_prev: Optional[np.ndarray] = None


@dataclass
class MidTailConfig:
    """
    Configuration for mid-tail detection parameters.
    """

    r_min: float = DR_MIN
    r_max: float = DR_MAX
    smooth_window: int = DSMOOTH_WINDOW
    width_clip: Tuple[float, float] = (DMIN_WIDTH_CLIP, DMAX_WIDTH_CLIP)
    lam_m_clip: Tuple[float, float] = (DMIN_LAM_M_CLIP, DMAX_LAM_M_CLIP)
    rel_err: float = DREL_ERR
    abs_err: float = DABS_ERR


##xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx shortcut functions
def cvt_params_to_stctd(params: np.ndarray, n_gauss: int, with_background: bool = False) -> GaussParams:
    """
    raw parameter array -> structured GaussParams object.
    """
    return GaussParams.from_array(params, n_gauss, with_background)


def cvt_stctd_to_params(gauss_params: GaussParams, n_gauss: int, with_background: bool = False) -> np.ndarray:
    """
    Convert structured GaussParams object back to raw parameter array.
    Returns:
        Raw parameter array [normFactor, w2..wn, s1..sn, (c)]
    """
    return gauss_params.to_array(n_gauss, with_background)


##0000000000000000000000000000000000000000000000000000000000000000000000000 拟合工具箱
def normalize_by_sum(mtype: MeshTypeE, arr1D: np.ndarray, bin_width: float) -> np.ndarray:
    """
    输入 arr1D, 除以 sum 作归一化; 再除以 area, 从 分 bin dose -> density 分布

    Parameters:
    - mtype: mesh type; box, cylinder
    - arr1D: Input array (dose values)
    - binwidth: Bin width for normalization

    Returns:
    - Normalized array (density distribution)
    """
    total_sum = np.sum(arr1D)
    if total_sum == 0:
        return np.zeros_like(arr1D)

    if mtype == MeshTypeE.Box:
        return arr1D / total_sum / bin_width

    if mtype == MeshTypeE.Cyliner:
        rs = np.arange(len(arr1D), dtype=float)  # 创建一个索引数组
        rs *= bin_width
        areas = np.pi * (1 + 2 * rs)  ## 圆环面积
        areas[0] = np.pi * np.pow(bin_width, 2.0)  ## 中心圆盘面积
        return arr1D / areas / total_sum

    raise ValueError(f"wrong mesh type:{mtype}")


def weighted_stddev(qty: np.ndarray, wts: np.ndarray, eps: float = DEPSILON) -> float:
    """
    Calculate the weighted standard deviation of x with weights y.

    Parameters:
    - qty: Input values
    - wts: Weights (typically a density or probability distribution)
    - eps: Small value to prevent division by zero

    Returns:
    - Weighted standard deviation
    """
    wts = np.maximum(wts, eps)
    wts_tot = np.sum(wts)
    if wts_tot == 0:
        return 0.0
    iwts = wts / wts_tot
    mu = np.average(qty, weights=iwts)
    var = np.average(np.pow(qty - mu, 2.0), weights=iwts)
    return np.sqrt(var)


def multi_gauss_model(x: np.ndarray, params: np.ndarray, n_gauss: int, with_background: bool = False) -> np.ndarray:
    """
    N-Gaussian model function.

    Parameters:
        x: Input coordinates
        params: Parameters array [normFactor, w2..wn, s1..sn, (c)]
            - normFactor: Overall normalization factor
            - w2..wn: Weights for 2nd to nth Gaussians (first weight is 1 - sum of others)
            - s1..sn: Sigmas for each Gaussian
            - c: Optional background constant
        n_gauss: Number of Gaussian components
        with_background: Whether to include background term

    Returns:
        Predicted y values according to the multi-Gaussian model
    """
    ## 使用 GaussParams 结构化对象来访问参数
    gauss_params = cvt_params_to_stctd(params, n_gauss, with_background)

    # 归一化因子
    norm_factor = gauss_params.norm_factor
    # Create arrays for all weights and sigmas at once for vectorization
    all_weights = DFACTOR_SYMMETRY * gauss_params.all_weights
    all_sigmas = gauss_params.sigmas

    # Expand x to (n_gauss, len(x)) and compute all Gaussians at once
    x_expanded = x[np.newaxis, :]  # Shape: (1, len(x))

    # Compute all Gaussian components at once
    # Shape: (n_gauss, len(x))
    gaussian_components = all_weights[:, np.newaxis] * np.exp(-0.5 * (x_expanded / all_sigmas[:, np.newaxis]) ** 2) / (np.sqrt(2 * np.pi) * all_sigmas[:, np.newaxis])

    # Sum all components along the first axis (n_gauss dimension)
    y_pred = np.sum(gaussian_components, axis=0)  # Shape: (len(x),)

    # Apply normalization factor
    y_pred *= norm_factor

    # Add background if requested
    if with_background and gauss_params.background is not None:
        y_pred += gauss_params.background

    return y_pred


def sigma_smooth_penalty(sigmas: np.ndarray, s_prev: Optional[np.ndarray], lam_s: float = DLM_SIGMA_SMOOTH_) -> float:
    """
    Penalty term to ensure smooth transition of sigma values between adjacent layers.

    Parameters:
        s: Current sigma values
        s_prev: Previous sigma values (from adjacent layer)
        lam_s: Penalty strength

    Returns:
        Penalty value
    """
    if s_prev is None or len(sigmas) == 0 or len(s_prev) != len(sigmas):
        return 0.0

    # Compute relative difference and apply penalty using vectorized operations
    rel_diff = (sigmas - s_prev) / np.maximum(s_prev, DEPSILON)
    return lam_s * np.sum(rel_diff**2)


def mid_tail_weights(x: np.ndarray, center: float = DMT_CENTER, width: float = DMT_WIDTH, lam_m: float = DMT_LM_MIDTAIL) -> np.ndarray:
    """
    Exponential weighting function to emphasize regions around the center value.

    Parameters:
        x: Input coordinates
        center: Center of the weighting region
        width: Width of the weighting region
        lam_m: Weighting strength

    Returns:
        Weight array
    """
    x_abs = np.abs(x)
    return 1.0 + lam_m * np.exp(-0.5 * ((x_abs - center) / max(width, DEPSILON)) ** 2)


def mid_tail_detect_v2(
    crds: np.ndarray,
    vals: np.ndarray,
    vals_pred0: np.ndarray,
    config: Optional[MidTailConfig] = None,
) -> Dict[str, Any]:
    """
    Detect regions where the predicted values are significantly below the actual values,
    typically in the mid-tail regions of the distribution.

    Parameters:
        crds: Input coordinates
        vals: Actual values
        vals_pred0: Initial prediction values
        config: MidTailConfig object with detection parameters

    Returns:
        Dictionary containing center, width, and lam_m parameters
    """
    # Use default config if not provided
    if config is None:
        config = MidTailConfig()

    r_min = config.r_min
    r_max = config.r_max
    smooth_window = config.smooth_window
    width_clip = config.width_clip
    lam_m_clip = config.lam_m_clip
    rel_err = config.rel_err
    abs_err = config.abs_err
    diff = vals_pred0 - vals
    eps = DEPSILON
    threshold = np.maximum(abs_err, rel_err * np.maximum(vals, eps))

    # Find regions where y_pred < y by more than threshold
    sel_mask = (diff < -threshold) & (np.abs(crds) >= r_min) & (np.abs(crds) <= r_max)
    if not np.any(sel_mask):
        return dict(center=DMT_CENTER, width=5.0, lam_m=1.0, x_left=None, x_right=None)

    x_selected = crds[sel_mask]
    rel_selected = -(diff[sel_mask]) / np.maximum(vals[sel_mask], eps)

    # Sort by x values
    idx_sort = np.argsort(x_selected)
    x_sorted = x_selected[idx_sort]
    rel_sorted = rel_selected[idx_sort]

    # Find longest continuous region where y_pred < y
    if len(x_sorted) < 3:
        return dict(
            x_left=x_sorted.min(),
            x_right=x_sorted.max(),
            center=np.mean(x_sorted),
            width=2.0,
            lam_m=8.0,
        )

    gap_threshold = DGAP_THRESHOLD  # 1.5 * dx, where dx is 0.1

    # Find start and end points of continuous regions using numpy operations
    # Calculate differences between consecutive x values
    x_diffs = np.diff(x_sorted)
    # Find gap locations where difference > threshold
    gap_indices = np.where(x_diffs > gap_threshold)[0]
    # Add boundaries: start at 0, end at len(x_sorted)
    breakpoints = np.concatenate(([0], gap_indices + 1, [len(x_sorted)]))

    # Find the longest continuous region
    seg_lengths = np.diff(breakpoints)
    k_max = int(np.argmax(seg_lengths))
    left, right = breakpoints[k_max], breakpoints[k_max + 1]
    x_seg, rel_seg = x_sorted[left:right], rel_sorted[left:right]

    if len(x_seg) < 3:
        return dict(
            x_left=x_seg.min(),
            x_right=x_seg.max(),
            center=np.mean(x_seg),
            width=2.0,
            lam_m=8.0,
        )

    # Smooth the relative error values
    if smooth_window > 1 and smooth_window < len(rel_seg):
        kernel = np.ones(smooth_window) / smooth_window
        rel_smoothed = np.convolve(rel_seg, kernel, mode="same")
    else:
        rel_smoothed = rel_seg

    # Find peak of the relative error
    i_peak = np.argmax(rel_smoothed)
    x_peak = x_seg[i_peak]
    peak_val = rel_smoothed[i_peak]
    print(f"peak_val: {peak_val:.3f}")

    # Calculate width as half maximum width
    half = 0.1 * peak_val

    # Find leftmost index where value drops below half using numpy
    rel_to_left = rel_smoothed[: i_peak + 1][::-1]  # Reverse order from peak to left
    left_indices = np.where(rel_to_left < half)[0]
    iL = i_peak - left_indices[0] if len(left_indices) > 0 else 0

    # Find rightmost index where value drops below half using numpy
    rel_to_right = rel_smoothed[i_peak:]
    right_indices = np.where(rel_to_right < half)[0]
    iR = i_peak + right_indices[0] if len(right_indices) > 0 else len(x_seg) - 1

    x_left, x_right = x_seg[iL], x_seg[iR]
    width = 0.5 * abs(x_right - x_left)
    width = np.clip(width, width_clip[0], width_clip[1])

    # Calculate lambda_m parameter
    lam_m = 8.0 + 30.0 * (1 - np.exp(-3.0 * peak_val))
    lam_m = np.clip(lam_m, lam_m_clip[0], lam_m_clip[1])

    return dict(center=x_peak, width=width, lam_m=lam_m, x_left=x_left, x_right=x_right)


def objective_log(
    params: np.ndarray,
    fitting_data: FittingData,
    config: FittingConfig,
    s_prev: Optional[np.ndarray] = None,
    mid_tail_weight_para: Optional[Dict[str, Any]] = None,
) -> float:
    """
    First stage objective function: log residual sum of squares with soft constraints.

    Parameters:
        params: Fitting parameters [normFactor, w2..wn, s1..sn, (c)]
        fitting_data: FittingData object with coordinates and values
        config: FittingConfig object with fitting parameters
        s_prev: Previous sigma values (for smoothness penalty)
        mid_tail_weight_para: Parameters for mid-tail weighting

    Returns:
        Objective function value
    """
    x = fitting_data.x
    y = fitting_data.y
    n_gauss = config.n_gauss
    with_background = config.with_background
    lam_n = config.lam_n
    # 使用 GaussParams 结构化对象来访问参数
    gs_params = cvt_params_to_stctd(params, n_gauss, with_background)
    sigmas = gs_params.sigmas  # sigmas for all components

    # All parameters must be non-negative and sigmas positive
    if np.any(params < 0) or np.any(sigmas <= 0):
        return 1e20

    y_pred = multi_gauss_model(x, params, n_gauss, with_background)
    eps = DEPSILON

    # Apply mid-tail weights if provided
    if mid_tail_weight_para is None:
        weights = 1.0
    else:
        weights = mid_tail_weights(x, **mid_tail_weight_para)

    # Calculate log residual sum of squares (vectorized)
    log_y_pred = np.log(y_pred + eps)
    log_y = np.log(y + eps)
    log_residual = log_y_pred - log_y
    loss = np.sum(weights * log_residual**2.0)

    # Soft constraint: normFactor should be close to 1
    loss += lam_n * np.pow(gs_params.norm_factor - 1.0, 2.0)

    # Add smoothness penalty if previous sigmas are provided
    if s_prev is not None:
        loss += sigma_smooth_penalty(sigmas, s_prev)

    return loss


def objective_linear(
    params: np.ndarray,
    fitting_data: FittingData,
    config: FittingConfig,
    s_prev: Optional[np.ndarray] = None,
    mid_tail_weight_para: Optional[Dict[str, Any]] = None,
) -> float:
    """
    Second stage objective function: linear residual sum of squares with soft constraints.

    Parameters:
        params: Fitting parameters [normFactor, w2..wn, s1..sn, (c)]
        fitting_data: FittingData object with coordinates and values
        config: FittingConfig object with fitting parameters
        s_prev: Previous sigma values (for smoothness penalty)
        mid_tail_weight_para: Parameters for mid-tail weighting

    Returns:
        Objective function value
    """
    crds = fitting_data.x
    vals = fitting_data.y
    n_gauss = config.n_gauss
    with_background = config.with_background
    lam_n = config.lam_n
    # 使用 GaussParams 结构化对象来访问参数
    gauss_params = cvt_params_to_stctd(params, n_gauss, with_background)

    # 从结构化对象中获取参数
    s = gauss_params.sigmas  # sigmas for all components

    # All parameters must be non-negative and sigmas positive
    if np.any(params < 0) or np.any(s <= 0):
        return 1e20

    y_pred = multi_gauss_model(crds, params, n_gauss, with_background)

    # Apply mid-tail weights if provided
    if mid_tail_weight_para is None:
        weights = 1.0
    else:
        weights = mid_tail_weights(crds, **mid_tail_weight_para)

    # Calculate linear residual sum of squares (vectorized)
    linear_residual = y_pred - vals
    loss = np.sum(weights * linear_residual**2.0)

    # Soft constraint: normFactor should be close to 1
    norm_factor = gauss_params.norm_factor
    loss += lam_n * (norm_factor - 1.0) ** 2.0

    # Add smoothness penalty if previous sigmas are provided
    if s_prev is not None:
        loss += sigma_smooth_penalty(s, s_prev)

    return loss


def build_constraints(n_gauss: int, is_post_bragg: bool = False) -> list:
    """
    Build constraints for the optimization problem.

    Parameters:
        n_gauss: Number of Gaussian components
        is_post_bragg: Whether this is for post-Bragg peak region

    Returns:
        List of constraint dictionaries for scipy.optimize.minimize
    """
    constraints = []

    # Weight sum constraint: sum of weights should be <= 1
    # This means 1 - sum(w2..wn) >= w1 >= 0, so sum(w2..wn) <= 1
    constraints.append({"type": "ineq", "fun": lambda p, n=n_gauss: 1 - np.sum(p[1:n])})

    # Sigma ordering constraint: sigma values should be non-decreasing
    for i in range(n_gauss - 1):
        # Skip first sigma constraint for post-Bragg if specified
        if is_post_bragg and i == 0:
            continue
        constraints.append({"type": "ineq", "fun": lambda p, i=i, n=n_gauss: p[n + i + 1] - p[n + i]})

    return constraints


def build_bounds(
    x: np.ndarray,
    n_gauss: int,
    opt_params: Optional[OptimizationParams] = None,
    is_post_bragg: bool = False,
) -> list:
    """
    Build bounds for the optimization parameters.

    Parameters:
        x: Input coordinates (used to determine max range for sigmas)
        n_gauss: Number of Gaussian components
        opt_params: OptimizationParams object with optimization parameters
        is_post_bragg: Whether this is for post-Bragg peak region

    Returns:
        List of (min, max) bounds for each parameter
    """
    # Use default optimization parameters if not provided
    if opt_params is None:
        opt_params = OptimizationParams()

    s_prev = opt_params.s_prev
    step_range = opt_params.step_range
    step_range_relax = opt_params.step_range_relax
    # Initial bounds [normFactor, w2..wn, s1..sn, (c)]
    lower = [DMIN_NORM_FACTOR] + [DMIN_WEIGHT] * (n_gauss - 1) + [DMIN_SIGMA] * n_gauss
    upper = [DMAX_NORM_FACTOR] + [1.0] * (n_gauss - 1) + [(x.max() - x.min())] * n_gauss

    # Adjust bounds based on previous sigma values if provided
    if s_prev is not None:
        for j in range(n_gauss):
            idx_sigma = n_gauss + j  # Index of j-th sigma parameter

            # Use relaxed range for post-Bragg region
            s_range = step_range_relax if is_post_bragg else step_range

            lb = max(DMIN_SIGMA, (1.0 - s_range) * s_prev[j])
            ub = max(lb * 1.05, (1.0 + s_range) * s_prev[j])
            lower[idx_sigma] = lb
            upper[idx_sigma] = ub

    bounds = list(zip(lower, upper))
    print(bounds)  # Debug info
    return bounds


def fit_multi_gauss(
    fitting_data: FittingData,
    config: Optional[FittingConfig] = None,
    init_params: Optional[np.ndarray] = None,
    opt_params: Optional[OptimizationParams] = None,
) -> Tuple[np.ndarray, Any]:
    """
    Fit multi-Gaussian model to the data.

    Parameters:
        fitting_data: FittingData object with coordinates and values
        config: FittingConfig object with fitting parameters
        init_params: Initial parameter values
        opt_params: OptimizationParams object with optimization parameters

    Returns:
        Tuple of (fitted parameters, result object)
    """
    # Use default configurations if not provided
    if config is None:
        config = FittingConfig()
    if opt_params is None:
        opt_params = OptimizationParams()

    crds = fitting_data.coords
    vals = fitting_data.values
    n_gauss = config.n_gauss
    with_background = config.with_background
    is_post_bragg = config.is_post_bragg
    lam_n = config.lam_n
    info_verbose = config.info_verbose
    info_tag = config.info_tag
    s_prev = opt_params.s_prev
    crds = np.asarray(crds, dtype=float)
    vals = np.asarray(vals, dtype=float)

    # Initialize parameters if not provided
    if init_params is None:
        norm_init = 1.0
        # Evenly distribute weights among all components except the first
        w_init = np.ones(n_gauss - 1) * 0.5 / (n_gauss - 1)
        # Estimate initial sigma using weighted standard deviation
        s_guess = weighted_stddev(crds, vals)
        s_init = [s_guess * (0.8 + 2 * i) for i in range(n_gauss)]

        # 使用 GaussParams 结构化对象来构建初始参数
        initial_gauss_params = GaussParams(norm_factor=norm_init, weights=w_init, sigmas=np.array(s_init), background=vals.min() if with_background else None)
        initial_params = cvt_stctd_to_params(initial_gauss_params, n_gauss, with_background)

        if info_verbose:
            print("Initial params: ", initial_params)
    else:
        initial_params = init_params.copy()

    # Create data models for the optimization parameters
    fitting_data = FittingData(coords=crds, values=vals)
    fitting_config = FittingConfig(n_gauss=n_gauss, with_background=with_background, lam_n=lam_n, is_post_bragg=is_post_bragg)
    opt_params = OptimizationParams(s_prev=s_prev)

    # Build bounds and constraints
    bounds = build_bounds(crds, n_gauss, opt_params, is_post_bragg=is_post_bragg)
    constraints = build_constraints(n_gauss, is_post_bragg)

    # First stage: preliminary fit to determine mid-tail weighting
    res0 = minimize(
        objective_log,
        initial_params,
        args=(fitting_data, fitting_config, s_prev, None),
        method="SLSQP",
        bounds=bounds,
        constraints=constraints,
        options={"maxiter": 500, "ftol": 1e-10},
    )
    y_pred0 = multi_gauss_model(crds, res0.x, n_gauss, with_background)

    # Detect mid-tail parameters for better fitting
    mid_tail_config = MidTailConfig()
    mid_tail_weight_para = mid_tail_detect_v2(crds, vals, y_pred0, mid_tail_config)

    if info_verbose:
        prefix = f"[{info_tag}] " if info_tag is not None else ""
        print(f"{prefix}mid_tail_weight_para: center={mid_tail_weight_para['center']:.3f}, width={mid_tail_weight_para['width']:.3f}, lam_m={mid_tail_weight_para['lam_m']:.1f}, n={n_gauss}")

    # Stage 1: Log residual fitting to emphasize tails
    res1: OptimizeResult = minimize(
        objective_log,
        initial_params,
        args=(fitting_data, fitting_config, s_prev, mid_tail_weight_para),
        method="SLSQP",
        bounds=bounds,
        constraints=constraints,
        options={"maxiter": 3000, "ftol": 1e-10},
    )

    # Stage 2: Linear residual refinement
    res2 = minimize(
        objective_linear,
        res1.x,
        args=(fitting_data, fitting_config, s_prev, mid_tail_weight_para),
        method="SLSQP",
        bounds=bounds,
        constraints=constraints,
        options={"maxiter": 2000, "ftol": 1e-10},
    )

    if not res2.success:
        print("Optimization failed to converge: ", res2.message)

    return res2.x, res2


## Backward compatibility functions
def fit_multi_gauss_legacy(
    crds: np.ndarray,
    vals: np.ndarray,
    n_gauss: int = 2,
    init_params: Optional[np.ndarray] = None,
    with_background: bool = False,
    is_post_bragg: bool = False,
    s_prev: Optional[np.ndarray] = None,
    lam_n: float = DLM_NORM,
    info_verbose: bool = False,
    info_tag: Optional[str] = None,
) -> Tuple[np.ndarray, Any]:
    """
    Legacy version of fit_multi_gauss for backward compatibility.
    """
    fitting_data = FittingData(coords=crds, values=vals)
    config = FittingConfig(n_gauss=n_gauss, with_background=with_background, is_post_bragg=is_post_bragg, lam_n=lam_n, info_verbose=info_verbose, info_tag=info_tag)
    opt_params = OptimizationParams(s_prev=s_prev)

    return fit_multi_gauss(fitting_data, config, init_params, opt_params)


def objective_log_legacy(
    params: np.ndarray,
    x: np.ndarray,
    y: np.ndarray,
    n_gauss: int,
    with_background: bool,
    lam_n: float = DLM_NORM,
    s_prev: Optional[np.ndarray] = None,
    mid_tail_weight_para: Optional[Dict[str, Any]] = None,
) -> float:
    """
    Legacy version of objective_log for backward compatibility.
    """
    fitting_data = FittingData(coords=x, values=y)
    config = FittingConfig(n_gauss=n_gauss, with_background=with_background, lam_n=lam_n)

    return objective_log(params, fitting_data, config, s_prev, mid_tail_weight_para)


def objective_linear_legacy(
    params: np.ndarray,
    crds: np.ndarray,
    vals: np.ndarray,
    n_gauss: int,
    with_background: bool,
    lam_n: float = DLM_NORM,
    s_prev: Optional[np.ndarray] = None,
    mid_tail_weight_para: Optional[Dict[str, Any]] = None,
) -> float:
    """
    Legacy version of objective_linear for backward compatibility.
    """
    fitting_data = FittingData(coords=crds, values=vals)
    config = FittingConfig(n_gauss=n_gauss, with_background=with_background, lam_n=lam_n)

    return objective_linear(params, fitting_data, config, s_prev, mid_tail_weight_para)


def mid_tail_detect_v2_legacy(
    crds: np.ndarray,
    vals: np.ndarray,
    vals_pred0: np.ndarray,
    r_min: float = DR_MIN,
    r_max: float = DR_MAX,
    smooth_window: int = DSMOOTH_WINDOW,
    width_clip: Tuple[float, float] = DWIDTH_CLIP,
    lam_m_clip: Tuple[float, float] = DLAM_M_CLIP,
    rel_err: float = DREL_ERR,
    abs_err: float = DABS_ERR,
) -> Dict[str, Any]:
    """
    Legacy version of mid_tail_detect_v2 for backward compatibility.
    """
    config = MidTailConfig(r_min=r_min, r_max=r_max, smooth_window=smooth_window, width_clip=width_clip, lam_m_clip=lam_m_clip, rel_err=rel_err, abs_err=abs_err)

    return mid_tail_detect_v2(crds, vals, vals_pred0, config)


def build_bounds_legacy(
    x: np.ndarray,
    n_gauss: int,
    s_prev: Optional[np.ndarray] = None,
    step_range: float = DSTEP_RANGE,
    is_post_bragg: bool = False,
    step_range_relax: float = DRLXD_STEP_RANGE,
) -> list:
    """
    Legacy version of build_bounds for backward compatibility.
    """
    opt_params = OptimizationParams(s_prev=s_prev, step_range=step_range, step_range_relax=step_range_relax)

    return build_bounds(x, n_gauss, opt_params, is_post_bragg)
