"""
Utility functions for multi-Gaussian fitting of lateral dose profiles.
横向 Dose 分布拟合, 使用 多高斯分布

The module includes:
- 多高斯模型 Multi-Gaussian model
- 优化的目标函数 Objective functions
- 多高斯模型参数估计 和 约束函数 Parameters and constraints
- Bridge class, for 模型参数Tuple 与 结构体命名字段 互相转换
- 工具函数: 归一化, 统计量等
"""

from dataclasses import dataclass
from enum import Enum, auto
from typing import Any, Dict, Optional, Tuple

import numpy as np
from scipy.optimize import minimize

##0000000000000000000000000000000000000000000000000000000000000000000000000000000 Constants
## D -> Default; LM -> Lambda parameters; RLXD -> Relaxed
DFACTOR_SYMMETRY = 2.0  # 对于 cylinder 探测器, Phi 方向没有分bin, R 方向 从 1 到 N 个 bin; 相当于高斯分布只有对称的一半
DEPSILON = 1e-12  # Small value to prevent division by zero
DLM_SIGMA_SMOOTH_ = 1e3  # Lambda for sigma smooth penalty
DLM_NORM = 1e3  # Lambda for normalization penalty
DMIN_SIGMA = 1e-3  # Minimum allowed sigma value
DMIN_WEIGHT = 1e-8  # Minimum allowed weight
DMIN_NORM_FACTOR = 0.5  # Minimum normalization factor
DMAX_NORM_FACTOR = 1.5  # Maximum normalization factor
DSTEP_RANGE = 0.2  # Parameter step range for adjacent layer fitting
DRLXD_STEP_RANGE = 5.0  # Relaxed step range for post-Bragg region
DSMOOTH_WINDOW = 15  # Window size for smoothing in mid-tail detection
DMIN_WIDTH_CLIP = 2.0  # Minimum width for mid-tail detection
DMAX_WIDTH_CLIP = 40.0  # Maximum width for mid-tail detection
DMIN_LAM_M_CLIP = 6.0  # Minimum lambda_m for mid-tail detection
DMAX_LAM_M_CLIP = 40.0  # Maximum lambda_m for mid-tail detection
DREL_ERR = 0.01  # Relative error threshold
DABS_ERR = 0.0  # Absolute error threshold
DGAP_THRESHOLD = 0.15  # Gap threshold for continuous region detection (1.5 * dx)

## Default values for mid_tail_weights; MT -> Mid Tail
DMT_CENTER = 15.0
DMT_WIDTH = 3.0
DMT_LM_MIDTAIL = 10.0

## Default ranges for mid_tail_detect
DR_MIN = 5.0
DR_MAX = 40.0
DWIDTH_CLIP = (DMIN_WIDTH_CLIP, DMAX_WIDTH_CLIP)
DLAM_M_CLIP = (DMIN_LAM_M_CLIP, DMAX_LAM_M_CLIP)


class MeshTypeE(Enum):
    Box = auto()
    Cyliner = auto()


def normalize_by_sum(mtype: MeshTypeE, arr1D: np.ndarray, bin_width: float) -> np.ndarray:
    """
    输入 arr1D, 除以 sum 作归一化; 再除以 area, 从 分 bin dose -> density 分布

    Parameters:
    - mtype: mesh type; box, cylinder
    - arr1D: Input array (dose values)
    - binwidth: Bin width for normalization

    Returns:
    - Normalized array (density distribution)
    """
    total_sum = np.sum(arr1D)
    if total_sum == 0:
        return np.zeros_like(arr1D)

    if mtype == MeshTypeE.Box:
        return arr1D / total_sum / bin_width

    if mtype == MeshTypeE.Cyliner:
        rs = np.arange(len(arr1D), dtype=float)  # 创建一个索引数组
        rs *= bin_width
        areas = np.pi * (1 + 2 * rs)  ## 圆环面积
        areas[0] = np.pi * np.pow(bin_width, 2.0)  ## 中心圆盘面积
        return arr1D / areas / total_sum

    raise ValueError(f"wrong mesh type:{mtype}")


def weighted_stddev(qty: np.ndarray, wts: np.ndarray, eps: float = DEPSILON) -> float:
    """
    Calculate the weighted standard deviation of x with weights y.

    Parameters:
    - qty: Input values
    - wts: Weights (typically a density or probability distribution)
    - eps: Small value to prevent division by zero

    Returns:
    - Weighted standard deviation
    """
    wts = np.maximum(wts, eps)
    wts_tot = np.sum(wts)
    if wts_tot == 0:
        return 0.0
    iwts = wts / wts_tot
    mu = np.average(qty, weights=iwts)
    var = np.average(np.pow(qty - mu, 2.0), weights=iwts)
    return np.sqrt(var)


def multi_gauss_model(x: np.ndarray, params: np.ndarray, n_gauss: int, with_background: bool = False) -> np.ndarray:
    """
    N-Gaussian model function.

    Parameters:
        x: Input coordinates
        params: Parameters array [normFactor, w2..wn, s1..sn, (c)]
            - normFactor: Overall normalization factor
            - w2..wn: Weights for 2nd to nth Gaussians (first weight is 1 - sum of others)
            - s1..sn: Sigmas for each Gaussian
            - c: Optional background constant
        n_gauss: Number of Gaussian components
        with_background: Whether to include background term

    Returns:
        Predicted y values according to the multi-Gaussian model
    """
    norm_factor = params[0]
    w = params[1:n_gauss]  # weights for components 2 to n
    s = params[n_gauss : 2 * n_gauss]  # sigmas for all components

    # First Gaussian weight = 1 - sum of other weights
    w1 = 1 - np.sum(w)

    # Create arrays for all weights and sigmas at once for vectorization
    all_weights = DFACTOR_SYMMETRY * np.concatenate(([w1], w))
    all_sigmas = s

    # Expand x to (n_gauss, len(x)) and compute all Gaussians at once
    x_expanded = x[np.newaxis, :]  # Shape: (1, len(x))

    # Compute all Gaussian components at once
    # Shape: (n_gauss, len(x))
    gaussian_components = all_weights[:, np.newaxis] * np.exp(-0.5 * (x_expanded / all_sigmas[:, np.newaxis]) ** 2) / (np.sqrt(2 * np.pi) * all_sigmas[:, np.newaxis])

    # Sum all components along the first axis (n_gauss dimension)
    y_pred = np.sum(gaussian_components, axis=0)  # Shape: (len(x),)

    # Apply normalization factor
    y_pred *= norm_factor

    # Add background if requested
    if with_background:
        y_pred += params[-1]

    return y_pred


def sigma_smooth_penalty(s: np.ndarray, s_prev: Optional[np.ndarray], lam_s: float = DLM_SIGMA_SMOOTH_) -> float:
    """
    Penalty term to ensure smooth transition of sigma values between adjacent layers.

    Parameters:
        s: Current sigma values
        s_prev: Previous sigma values (from adjacent layer)
        lam_s: Penalty strength

    Returns:
        Penalty value
    """
    if s_prev is None or len(s) == 0 or len(s_prev) != len(s):
        return 0.0

    # Compute relative difference and apply penalty using vectorized operations
    rel_diff = (s - s_prev) / np.maximum(s_prev, DEPSILON)
    return lam_s * np.sum(rel_diff**2)


def mid_tail_weights(x: np.ndarray, center: float = DMT_CENTER, width: float = DMT_WIDTH, lam_m: float = DMT_LM_MIDTAIL) -> np.ndarray:
    """
    Exponential weighting function to emphasize regions around the center value.

    Parameters:
        x: Input coordinates
        center: Center of the weighting region
        width: Width of the weighting region
        lam_m: Weighting strength

    Returns:
        Weight array
    """
    x_abs = np.abs(x)
    return 1.0 + lam_m * np.exp(-0.5 * ((x_abs - center) / max(width, DEPSILON)) ** 2)


def mid_tail_detect_v2(
    x: np.ndarray,
    y: np.ndarray,
    y_pred0: np.ndarray,
    r_min: float = DR_MIN,
    r_max: float = DR_MAX,
    smooth_window: int = DSMOOTH_WINDOW,
    width_clip: Tuple[float, float] = DWIDTH_CLIP,
    lam_m_clip: Tuple[float, float] = DLAM_M_CLIP,
    rel_err: float = DREL_ERR,
    abs_err: float = DABS_ERR,
) -> Dict[str, Any]:
    """
    Detect regions where the predicted values are significantly below the actual values,
    typically in the mid-tail regions of the distribution.

    Parameters:
        x: Input coordinates
        y: Actual values
        y_pred0: Initial prediction values
        r_min: Minimum radius to consider
        r_max: Maximum radius to consider
        smooth_window: Window size for smoothing
        width_clip: (min, max) limits for detected width
        lam_m_clip: (min, max) limits for lambda_m parameter
        rel_err: Relative error threshold
        abs_err: Absolute error threshold

    Returns:
        Dictionary containing center, width, and lam_m parameters
    """
    diff = y_pred0 - y
    eps = DEPSILON
    threshold = np.maximum(abs_err, rel_err * np.maximum(y, eps))

    # Find regions where y_pred < y by more than threshold
    sel_mask = (diff < -threshold) & (np.abs(x) >= r_min) & (np.abs(x) <= r_max)
    if not np.any(sel_mask):
        return dict(center=DMT_CENTER, width=5.0, lam_m=1.0, x_left=None, x_right=None)

    x_selected = x[sel_mask]
    rel_selected = -(diff[sel_mask]) / np.maximum(y[sel_mask], eps)

    # Sort by x values
    idx_sort = np.argsort(x_selected)
    x_sorted = x_selected[idx_sort]
    rel_sorted = rel_selected[idx_sort]

    # Find longest continuous region where y_pred < y
    if len(x_sorted) < 3:
        return dict(
            x_left=x_sorted.min(),
            x_right=x_sorted.max(),
            center=np.mean(x_sorted),
            width=2.0,
            lam_m=8.0,
        )

    gap_threshold = DGAP_THRESHOLD  # 1.5 * dx, where dx is 0.1

    # Find start and end points of continuous regions using numpy operations
    # Calculate differences between consecutive x values
    x_diffs = np.diff(x_sorted)
    # Find gap locations where difference > threshold
    gap_indices = np.where(x_diffs > gap_threshold)[0]
    # Add boundaries: start at 0, end at len(x_sorted)
    breakpoints = np.concatenate(([0], gap_indices + 1, [len(x_sorted)]))

    # Find the longest continuous region
    seg_lengths = np.diff(breakpoints)
    k_max = int(np.argmax(seg_lengths))
    left, right = breakpoints[k_max], breakpoints[k_max + 1]
    x_seg, rel_seg = x_sorted[left:right], rel_sorted[left:right]

    if len(x_seg) < 3:
        return dict(
            x_left=x_seg.min(),
            x_right=x_seg.max(),
            center=np.mean(x_seg),
            width=2.0,
            lam_m=8.0,
        )

    # Smooth the relative error values
    if smooth_window > 1 and smooth_window < len(rel_seg):
        kernel = np.ones(smooth_window) / smooth_window
        rel_smoothed = np.convolve(rel_seg, kernel, mode="same")
    else:
        rel_smoothed = rel_seg

    # Find peak of the relative error
    i_peak = np.argmax(rel_smoothed)
    x_peak = x_seg[i_peak]
    peak_val = rel_smoothed[i_peak]
    print(f"peak_val: {peak_val:.3f}")

    # Calculate width as half maximum width
    half = 0.1 * peak_val

    # Find leftmost index where value drops below half using numpy
    rel_to_left = rel_smoothed[: i_peak + 1][::-1]  # Reverse order from peak to left
    left_indices = np.where(rel_to_left < half)[0]
    iL = i_peak - left_indices[0] if len(left_indices) > 0 else 0

    # Find rightmost index where value drops below half using numpy
    rel_to_right = rel_smoothed[i_peak:]
    right_indices = np.where(rel_to_right < half)[0]
    iR = i_peak + right_indices[0] if len(right_indices) > 0 else len(x_seg) - 1

    x_left, x_right = x_seg[iL], x_seg[iR]
    width = 0.5 * abs(x_right - x_left)
    width = np.clip(width, width_clip[0], width_clip[1])

    # Calculate lambda_m parameter
    lam_m = 8.0 + 30.0 * (1 - np.exp(-3.0 * peak_val))
    lam_m = np.clip(lam_m, lam_m_clip[0], lam_m_clip[1])

    return dict(center=x_peak, width=width, lam_m=lam_m, x_left=x_left, x_right=x_right)


# Bridge class for converting between tuple and structured fields for minimize args
@dataclass
class FittingParams:
    """
    Data class to hold fitting parameters, making it easier to pass parameters
    to objective functions without dealing with tuple indexing.
    """

    x: np.ndarray
    y: np.ndarray
    n_gauss: int
    with_background: bool
    lam_n: float = DLM_NORM
    s_prev: Optional[np.ndarray] = None
    mid_tail_weight_para: Optional[Dict[str, Any]] = None

    def to_tuple(self) -> Tuple:
        """Convert the dataclass to a tuple for use with scipy.optimize.minimize."""
        return (self.x, self.y, self.n_gauss, self.with_background, self.lam_n, self.s_prev, self.mid_tail_weight_para)

    @classmethod
    def from_tuple(cls, args: Tuple):
        """Create a FittingParams instance from a tuple."""
        return cls(x=args[0], y=args[1], n_gauss=args[2], with_background=args[3], lam_n=args[4], s_prev=args[5], mid_tail_weight_para=args[6])


def objective_log(params: np.ndarray, x: np.ndarray, y: np.ndarray, n_gauss: int, with_background: bool, lam_n: float = DLM_NORM, s_prev: Optional[np.ndarray] = None, mid_tail_weight_para: Optional[Dict[str, Any]] = None) -> float:
    """
    First stage objective function: log residual sum of squares with soft constraints.

    Parameters:
        params: Fitting parameters [normFactor, w2..wn, s1..sn, (c)]
        x: Input coordinates
        y: Observed values
        n_gauss: Number of Gaussian components
        with_background: Whether to include background term
        lam_n: Normalization penalty strength
        s_prev: Previous sigma values (for smoothness penalty)
        mid_tail_weight_para: Parameters for mid-tail weighting

    Returns:
        Objective function value
    """
    # All parameters must be non-negative and sigmas positive
    s = params[n_gauss : 2 * n_gauss]
    if np.any(params < 0) or np.any(s <= 0):
        return 1e20

    y_pred = multi_gauss_model(x, params, n_gauss, with_background)
    eps = DEPSILON

    # Apply mid-tail weights if provided
    if mid_tail_weight_para is None:
        weights = 1.0
    else:
        weights = mid_tail_weights(x, **mid_tail_weight_para)

    # Calculate log residual sum of squares (vectorized)
    log_y_pred = np.log(y_pred + eps)
    log_y = np.log(y + eps)
    log_residual = log_y_pred - log_y
    loss = np.sum(weights * log_residual**2.0)

    # Soft constraint: normFactor should be close to 1
    norm_factor = params[0]
    loss += lam_n * (norm_factor - 1.0) ** 2.0

    # Add smoothness penalty if previous sigmas are provided
    if s_prev is not None:
        loss += sigma_smooth_penalty(s, s_prev)

    return loss


def objective_linear(params: np.ndarray, x: np.ndarray, y: np.ndarray, n_gauss: int, with_background: bool, lam_n: float = DLM_NORM, s_prev: Optional[np.ndarray] = None, mid_tail_weight_para: Optional[Dict[str, Any]] = None) -> float:
    """
    Second stage objective function: linear residual sum of squares with soft constraints.

    Parameters:
        params: Fitting parameters [normFactor, w2..wn, s1..sn, (c)]
        x: Input coordinates
        y: Observed values
        n_gauss: Number of Gaussian components
        with_background: Whether to include background term
        lam_n: Normalization penalty strength
        s_prev: Previous sigma values (for smoothness penalty)
        mid_tail_weight_para: Parameters for mid-tail weighting

    Returns:
        Objective function value
    """
    # All parameters must be non-negative and sigmas positive
    s = params[n_gauss : 2 * n_gauss]
    if np.any(params < 0) or np.any(s <= 0):
        return 1e20

    y_pred = multi_gauss_model(x, params, n_gauss, with_background)

    # Apply mid-tail weights if provided
    if mid_tail_weight_para is None:
        weights = 1.0
    else:
        weights = mid_tail_weights(x, **mid_tail_weight_para)

    # Calculate linear residual sum of squares (vectorized)
    linear_residual = y_pred - y
    loss = np.sum(weights * linear_residual**2.0)

    # Soft constraint: normFactor should be close to 1
    norm_factor = params[0]
    loss += lam_n * (norm_factor - 1.0) ** 2.0

    # Add smoothness penalty if previous sigmas are provided
    if s_prev is not None:
        loss += sigma_smooth_penalty(s, s_prev)

    return loss


def objective_log_wrapper(params: np.ndarray, fitting_params: FittingParams) -> float:
    """
    Wrapper for the log objective function that takes a FittingParams object instead of a tuple.

    This function serves as a bridge between the structured parameter object and
    the function signature expected by scipy.optimize.minimize.

    Parameters:
        params: Fitting parameters [normFactor, w2..wn, s1..sn, (c)]
        fitting_params: FittingParams object containing x, y, and other parameters

    Returns:
        Objective function value
    """
    return objective_log(params, fitting_params.x, fitting_params.y, fitting_params.n_gauss, fitting_params.with_background, fitting_params.lam_n, fitting_params.s_prev, fitting_params.mid_tail_weight_para)


def objective_linear_wrapper(params: np.ndarray, fitting_params: FittingParams) -> float:
    """
    Wrapper for the linear objective function that takes a FittingParams object instead of a tuple.

    This function serves as a bridge between the structured parameter object and
    the function signature expected by scipy.optimize.minimize.

    Parameters:
        params: Fitting parameters [normFactor, w2..wn, s1..sn, (c)]
        fitting_params: FittingParams object containing x, y, and other parameters

    Returns:
        Objective function value
    """
    return objective_linear(params, fitting_params.x, fitting_params.y, fitting_params.n_gauss, fitting_params.with_background, fitting_params.lam_n, fitting_params.s_prev, fitting_params.mid_tail_weight_para)


def build_constraints(n_gauss: int, is_post_bragg: bool = False) -> list:
    """
    Build constraints for the optimization problem.

    Parameters:
        n_gauss: Number of Gaussian components
        is_post_bragg: Whether this is for post-Bragg peak region

    Returns:
        List of constraint dictionaries for scipy.optimize.minimize
    """
    constraints = []

    # Weight sum constraint: sum of weights should be <= 1
    # This means 1 - sum(w2..wn) >= w1 >= 0, so sum(w2..wn) <= 1
    constraints.append({"type": "ineq", "fun": lambda p, n=n_gauss: 1 - np.sum(p[1:n])})

    # Sigma ordering constraint: sigma values should be non-decreasing
    for i in range(n_gauss - 1):
        # Skip first sigma constraint for post-Bragg if specified
        if is_post_bragg and i == 0:
            continue
        constraints.append({"type": "ineq", "fun": lambda p, i=i, n=n_gauss: p[n + i + 1] - p[n + i]})

    return constraints


def build_bounds(x: np.ndarray, n_gauss: int, s_prev: Optional[np.ndarray] = None, step_range: float = DSTEP_RANGE, is_post_bragg: bool = False, step_range_relax: float = DRLXD_STEP_RANGE) -> list:
    """
    Build bounds for the optimization parameters.

    Parameters:
        x: Input coordinates (used to determine max range for sigmas)
        n_gauss: Number of Gaussian components
        s_prev: Previous sigma values for constraining range
        step_range: Range factor for constraining parameter changes
        is_post_bragg: Whether this is for post-Bragg peak region
        step_range_relax: Relaxed range factor for post-Bragg region

    Returns:
        List of (min, max) bounds for each parameter
    """
    # Initial bounds [normFactor, w2..wn, s1..sn, (c)]
    lower = [DMIN_NORM_FACTOR] + [DMIN_WEIGHT] * (n_gauss - 1) + [DMIN_SIGMA] * n_gauss
    upper = [DMAX_NORM_FACTOR] + [1.0] * (n_gauss - 1) + [(x.max() - x.min())] * n_gauss

    # Adjust bounds based on previous sigma values if provided
    if s_prev is not None:
        for j in range(n_gauss):
            idx_sigma = n_gauss + j  # Index of j-th sigma parameter

            # Use relaxed range for post-Bragg region
            s_range = step_range_relax if is_post_bragg else step_range

            lb = max(DMIN_SIGMA, (1.0 - s_range) * s_prev[j])
            ub = max(lb * 1.05, (1.0 + s_range) * s_prev[j])
            lower[idx_sigma] = lb
            upper[idx_sigma] = ub

    bounds = list(zip(lower, upper))
    print(bounds)  # Debug info
    return bounds


def fit_multi_gauss(x: np.ndarray, y: np.ndarray, n_gauss: int = 2, init_params: Optional[np.ndarray] = None, with_background: bool = False, is_post_bragg: bool = False, s_prev: Optional[np.ndarray] = None, lam_n: float = DLM_NORM, info_verbose: bool = False, info_tag: Optional[str] = None) -> Tuple[np.ndarray, Any]:
    """
    Fit multi-Gaussian model to the data.

    Parameters:
        x: Input coordinates
        y: Observed values
        n_gauss: Number of Gaussian components
        init_params: Initial parameter values
        with_background: Whether to include background term
        is_post_bragg: Whether this is for post-Bragg peak region
        s_prev: Previous sigma values for constraint
        lam_n: Normalization penalty strength
        info_verbose: Whether to print verbose information
        info_tag: Tag for verbose information

    Returns:
        Tuple of (fitted parameters, result object)
    """
    x = np.asarray(x, dtype=float)
    y = np.asarray(y, dtype=float)

    # Initialize parameters if not provided
    if init_params is None:
        norm_init = 1.0
        # Evenly distribute weights among all components except the first
        w_init = np.ones(n_gauss - 1) * 0.5 / (n_gauss - 1)
        # Estimate initial sigma using weighted standard deviation
        s_guess = weighted_stddev(x, y)
        s_init = [s_guess * (0.8 + 2 * i) for i in range(n_gauss)]
        initial_params = np.array([norm_init] + list(w_init) + list(s_init) + ([y.min()] if with_background else []))
        if info_verbose:
            print("Initial params: ", initial_params)
    else:
        initial_params = init_params.copy()

    # Build bounds and constraints
    bounds = build_bounds(x, n_gauss, s_prev, is_post_bragg=is_post_bragg)
    constraints = build_constraints(n_gauss, is_post_bragg)

    # First stage: preliminary fit to determine mid-tail weighting
    res0 = minimize(
        objective_log,
        initial_params,
        args=(x, y, n_gauss, with_background, lam_n, s_prev, None),
        method="SLSQP",
        bounds=bounds,
        constraints=constraints,
        options={"maxiter": 500, "ftol": 1e-10},
    )
    y_pred0 = multi_gauss_model(x, res0.x, n_gauss, with_background)

    # Detect mid-tail parameters for better fitting
    mid_tail_weight_para = mid_tail_detect_v2(x, y, y_pred0)

    if info_verbose:
        prefix = f"[{info_tag}] " if info_tag is not None else ""
        print(f"{prefix}mid_tail_weight_para: center={mid_tail_weight_para['center']:.3f}, width={mid_tail_weight_para['width']:.3f}, lam_m={mid_tail_weight_para['lam_m']:.1f}, n={n_gauss}")

    # Stage 1: Log residual fitting to emphasize tails
    res1 = minimize(
        objective_log,
        initial_params,
        args=(x, y, n_gauss, with_background, lam_n, s_prev, mid_tail_weight_para),
        method="SLSQP",
        bounds=bounds,
        constraints=constraints,
        options={"maxiter": 3000, "ftol": 1e-10},
    )

    # Stage 2: Linear residual refinement
    res2 = minimize(
        objective_linear,
        res1.x,
        args=(x, y, n_gauss, with_background, lam_n, s_prev, mid_tail_weight_para),
        method="SLSQP",
        bounds=bounds,
        constraints=constraints,
        options={"maxiter": 2000, "ftol": 1e-10},
    )

    if not res2.success:
        print("Optimization failed to converge: ", res2.message)

    return res2.x, res2


# Example usage function demonstrating the bridge class
def example_usage_with_bridge():
    """
    Example demonstrating how to use the bridge class with structured parameters.
    This shows how to use FittingParams to avoid dealing with tuple indexing.
    """
    # Example data
    x = np.linspace(-10, 10, 100)
    y = np.exp(-0.5 * (x / 2) ** 2) / (np.sqrt(2 * np.pi) * 2) + 0.1 * np.random.random(len(x))

    # Create fitting parameters object
    fitting_params = FittingParams(x=x, y=y, n_gauss=2, with_background=False, lam_n=1e3, s_prev=None, mid_tail_weight_para={"center": 15.0, "width": 3.0, "lam_m": 10.0})

    # Example initial parameters
    initial_params = np.array([1.0, 0.3, 1.5, 3.0])

    # Use the wrapper functions with the structured parameter object
    result_log = minimize(
        objective_log_wrapper,
        initial_params,
        args=(fitting_params,),
        method="SLSQP",
        bounds=[(0.1, 2.0), (1e-8, 1.0), (0.1, 10.0), (0.1, 10.0)],
        options={"maxiter": 1000, "ftol": 1e-10},
    )

    return result_log


@dataclass
class GaussParams:
    """
    Data class to represent Gaussian fitting parameters with named fields,
    making it easier to access individual parameter components without
    having to remember array indices.
    """
    norm_factor: float  # Overall normalization factor
    weights: np.ndarray  # Weights [w2, w3, ..., wn] for components 2 to n (first weight is 1 - sum of others)
    sigmas: np.ndarray   # Sigmas [s1, s2, ..., sn] for each Gaussian component
    background: Optional[float] = None  # Optional background constant

    @property
    def w1(self) -> float:
        """Calculate the first component weight as 1 - sum of other weights."""
        return max(0.0, 1.0 - np.sum(self.weights))  # Ensure non-negative

    @classmethod
    def from_array(cls, params: np.ndarray, n_gauss: int, with_background: bool = False):
        """
        Create a GaussParams object from a raw parameter array.

        Parameters:
            params: Raw parameter array in format [normFactor, w2..wn, s1..sn, (c)]
            n_gauss: Number of Gaussian components
            with_background: Whether the array includes a background term
        """
        norm_factor = params[0]
        weights = params[1:n_gauss]  # weights for components 2 to n
        sigmas = params[n_gauss : 2 * n_gauss]  # sigmas for all components

        background = None
        if with_background:
            background = params[-1]

        return cls(
            norm_factor=norm_factor,
            weights=weights,
            sigmas=sigmas,
            background=background
        )

    def to_array(self, n_gauss: int, with_background: bool = False) -> np.ndarray:
        """
        Convert the GaussParams object back to a raw parameter array.

        Parameters:
            n_gauss: Number of Gaussian components
            with_background: Whether to include a background term in the array

        Returns:
            Raw parameter array in format [normFactor, w2..wn, s1..sn, (c)]
        """
        # Start with norm factor
        array_parts = [self.norm_factor]

        # Add weights w2 to wn
        array_parts.extend(self.weights)

        # Add sigmas s1 to sn
        array_parts.extend(self.sigmas)

        # Add background if requested
        if with_background and self.background is not None:
            array_parts.append(self.background)

        return np.array(array_parts)


def convert_params_to_structured(params: np.ndarray, n_gauss: int, with_background: bool = False) -> GaussParams:
    """
    Convert raw parameter array to structured GaussParams object.

    Parameters:
        params: Raw parameter array [normFactor, w2..wn, s1..sn, (c)]
        n_gauss: Number of Gaussian components
        with_background: Whether the array includes a background term

    Returns:
        GaussParams object with named access to parameters
    """
    return GaussParams.from_array(params, n_gauss, with_background)


def convert_params_from_structured(gauss_params: GaussParams, n_gauss: int, with_background: bool = False) -> np.ndarray:
    """
    Convert structured GaussParams object back to raw parameter array.

    Parameters:
        gauss_params: GaussParams object with named parameter access
        n_gauss: Number of Gaussian components
        with_background: Whether to include a background term in the array

    Returns:
        Raw parameter array [normFactor, w2..wn, s1..sn, (c)]
    """
    return gauss_params.to_array(n_gauss, with_background)


# Example usage demonstrating the parameter bridge
def example_usage_param_bridge():
    """
    Example demonstrating how to use the parameter bridge class to convert
    between raw arrays and structured parameter objects.
    """
    # Example raw parameter array: [normFactor, w2, w3, s1, s2, s3]
    raw_params = np.array([1.2, 0.3, 0.2, 1.5, 2.0, 3.0])
    n_gauss = 3
    with_background = False

    # Convert to structured format
    structured_params = convert_params_to_structured(raw_params, n_gauss, with_background)

    print(f"Raw params: {raw_params}")
    print(f"Structured norm_factor: {structured_params.norm_factor}")
    print(f"Structured weights: {structured_params.weights}")
    print(f"Structured sigmas: {structured_params.sigmas}")
    print(f"First weight (calculated as 1 - sum of others): {structured_params.w1}")

    # Convert back to raw format
    raw_converted = convert_params_from_structured(structured_params, n_gauss, with_background)
    print(f"Converted back to raw: {raw_converted}")
    print(f"Arrays match: {np.allclose(raw_params, raw_converted)}")

    return structured_params
