"""
Fitting script for lateral dose profiles using multiple Gaussians.

This script fits the lateral (cross-beam) dose profiles at various depths
with a sum of multiple Gaussian functions. It includes logic for
pre-fitting to detect tail regions needing extra emphasis,
two-stage fitting (log and linear), and iterative fitting across depths.

The original script was refactored for better readability and maintainability.
"""

import os
from dataclasses import dataclass
from pathlib import Path

import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize


# --- Configuration ---
@dataclass
class FitConfig:
    """Stores configuration parameters for the fitting process."""

    # Simulation and grid parameters
    particle: str = "he"
    energy: str = "275MeV"
    material: str = "WATER"
    z_bins: int = 1000
    z_max: float = 50.0
    y_bins: int = 1000
    y_min: float = -50.0
    y_max: float = 50.0
    y_min_fit: float = -50.0
    y_max_fit: float = 50.0
    # Data file path
    data_base_path: str = "/data/wenxiao/cmcvalidate_letd_new/cudaCMC_validate_Geant4/G4Project/"
    # Fitting parameters
    n_gauss: int = 5
    initial_step_range: float = 0.2  # For parameter bounds relative to previous layer
    relaxed_step_range: float = 5.0  # For parameter bounds post Bragg peak
    penalty_lambda: float = 1e3  # For normFactor proximity to 1
    # Peak detection and fitting range
    peak_detection_factor: float = 0.85  # Relative depth to start fitting (e.g., 0.85 * peak_idx)
    stop_dose_factor: float = 0.01  # Stop if dose is less than this fraction of peak dose
    post_bragg_start_offset: int = 40  # Number of bins after peak to start post-Bragg fitting
    # Plotting
    plot_dpi: int = 150

    # File paths (derived from other config)
    @property
    def data_path(self) -> Path:
        return Path(self.data_base_path) / f"{self.particle}_{self.energy}"

    @property
    def output_dir(self) -> Path:
        return Path(f"fit_plots_{self.energy}")

    @property
    def results_file(self) -> Path:
        return Path(f"fit_results_{self.energy}.txt")


@dataclass
class MidTailParams:
    """Parameters for the mid-tail weighting function."""

    center: float = 15.0
    width: float = 3.0
    multiplier: float = 10.0


# --- Utility Functions ---
def get_y_axis(config: FitConfig) -> np.ndarray:
    """Calculates the Y-axis bin centers."""
    y_bin_width = (config.y_max - config.y_min) / config.y_bins
    return np.arange(config.y_min + y_bin_width / 2, config.y_max, y_bin_width)


def get_z_axis(config: FitConfig) -> np.ndarray:
    """Calculates the Z-axis bin centers."""
    z_bin_width = config.z_max / config.z_bins
    return np.arange(z_bin_width / 2, config.z_max, z_bin_width)


def normalize_by_integral(arr: np.ndarray, bin_width: float) -> np.ndarray:
    """
    Normalizes an array by its integral (sum * bin_width).
    If the integral is zero, returns an array of zeros.
    """
    integral = np.sum(arr) * bin_width
    if integral == 0:
        return np.zeros_like(arr)
    return arr / integral


def weighted_stddev(x: np.ndarray, y: np.ndarray, eps: float = 1e-12) -> float:
    """
    Calculates the weighted standard deviation of x with weights y.
    """
    y = np.maximum(y, eps)
    w = y / np.sum(y)
    mu = np.sum(w * x)
    var = np.sum(w * (x - mu) ** 2)
    return np.sqrt(var)


# --- Model and Fitting Functions ---
def multi_gauss_model(x: np.ndarray, params: np.ndarray, n_gauss: int, with_background: bool = False) -> np.ndarray:
    """
    Evaluates a model of n Gaussians sharing the same center (x=0).

    Args:
        x: Input coordinates (1D array).
        params: [norm_factor, w2...wn, s1...sn, (background)].
                w1 is calculated as 1 - sum(w2...wn).
        n_gauss: Number of Gaussian components.
        with_background: If True, includes a constant background term.

    Returns:
        The calculated y values.
    """
    norm_factor = params[0]
    w_other = params[1:n_gauss]  # Weights for components 2 to n
    sigmas = params[n_gauss : 2 * n_gauss]  # Sigmas for all n components
    y_pred = np.zeros_like(x, dtype=float)

    w1 = 1.0 - np.sum(w_other)  # Calculate w1 to ensure total weight is 1
    # Add the first Gaussian component
    y_pred += w1 * np.exp(-0.5 * (x / sigmas[0]) ** 2) / (np.sqrt(2 * np.pi) * sigmas[0])

    # Add the other Gaussian components
    for w_i, sigma_i in zip(w_other, sigmas[1:]):
        y_pred += w_i * np.exp(-0.5 * (x / sigma_i) ** 2) / (np.sqrt(2 * np.pi) * sigma_i)

    y_pred *= norm_factor

    if with_background:
        y_pred += params[-1]  # Add background term if requested

    return y_pred


def sigma_smooth_penalty(s_current: np.ndarray, s_previous: np.ndarray, lam_s: float = 1e3) -> float:
    """Calculates a penalty term to enforce smoothness of sigma parameters."""
    # Prevent division by zero if previous sigma is very small.
    relative_diff = (s_current - s_previous) / np.maximum(s_previous, 1e-3)
    return lam_s * np.sum(relative_diff**2)


def get_mid_tail_weights(x: np.ndarray, params: MidTailParams) -> np.ndarray:
    """
    Generates a weighting array to emphasize fitting in the 'mid-tail' region.

    The weights are 1.0 everywhere, plus a Gaussian-shaped bump centered at `params.center`
    with a width `params.width` and a height controlled by `params.multiplier`.
    This function works symmetrically for x values around zero.
    """
    r = np.abs(x)  # Use absolute value to make it symmetric
    exponent_term = -0.5 * ((r - params.center) / max(params.width, 1e-6)) ** 2
    return 1.0 + params.multiplier * np.exp(exponent_term)


def build_fitting_bounds(x: np.ndarray, n_gauss: int, s_prev: np.ndarray = None, is_post_bragg: bool = False, config: FitConfig = FitConfig()) -> list[tuple[float, float]]:
    """Creates parameter bounds for the optimizer."""
    n_params = 1 + (n_gauss - 1) + n_gauss  # norm_factor + other_weights + sigmas
    if s_prev is not None and len(s_prev) != n_gauss:
        raise ValueError(f"s_prev length ({len(s_prev)}) must match n_gauss ({n_gauss}).")

    lower = [0.5] + [1e-8] * (n_gauss - 1) + [1e-3] * n_gauss
    upper = [1.5] + [1.0] * (n_gauss - 1) + [(x.max() - x.min())] * n_gauss

    if s_prev is not None:
        step_range = config.relaxed_step_range if is_post_bragg else config.initial_step_range
        for i in range(n_gauss):
            idx_sigma = 1 + (n_gauss - 1) + i  # Index of the i-th sigma parameter
            lb = max(1e-3, (1 - step_range) * s_prev[i])
            ub = max(lb * 1.05, (1 + step_range) * s_prev[i])
            lower[idx_sigma] = lb
            upper[idx_sigma] = ub

    bounds = list(zip(lower, upper))
    # Debug print for bounds if needed
    # print(bounds)
    return bounds


def build_fitting_constraints(n_gauss: int, is_post_bragg: bool = False) -> list[dict]:
    """Creates constraints for the optimizer (e.g., sum of weights <= 1, sigma ordering)."""
    constraints = []
    # Constraint: Sum of all weights (w1 + w2 + ... + wn) <= 1
    # Since w1 = 1 - sum(w_other), this becomes sum(w_other) <= 1
    constraints.append({"type": "ineq", "fun": lambda p, n=n_gauss: 1 - np.sum(p[1:n])})

    # Constraint: Sigmas are ordered (s1 <= s2 <= ... <= sn)
    # Note: This might not be strictly necessary depending on the model's symmetry.
    # The original code had a condition `if is_post_bragg and i == 0: continue`
    # which seems to skip the first sigma constraint in post-Bragg cases.
    # Keeping it for consistency with the original.
    for i in range(n_gauss - 1):
        if is_post_bragg and i == 0:
            continue
        constraints.append(
            {
                "type": "ineq",
                "fun": lambda p, i=i, n=n_gauss: p[n + i + 1] - p[n + i],  # s_{i+1} - s_i >= 0
            }
        )

    return constraints


def objective_log(
    params: np.ndarray,
    x: np.ndarray,
    y: np.ndarray,
    n_gauss: int,
    with_background: bool,
    penalty_lambda: float = 1e3,
    s_prev: np.ndarray = None,
    mid_tail_params: MidTailParams = None,
) -> float:
    """
    Objective function for the first fitting stage: minimize log-residuals.
    This stage focuses more on the shape, especially the tails.
    """
    if np.any(params < 0):
        return 1e20  # Return large number if any parameter is negative

    sigmas = params[n_gauss : 2 * n_gauss]
    if np.any(sigmas <= 0):
        return 1e20  # Return large number if any sigma is non-positive

    y_pred = multi_gauss_model(x, params, n_gauss, with_background)

    # Add weighting if specified
    W = 1.0 if mid_tail_params is None else get_mid_tail_weights(x, mid_tail_params)

    eps = 1e-12
    log_residual_squared = (np.log(y_pred + eps) - np.log(y + eps)) ** 2
    loss = np.sum(W * log_residual_squared)

    # Penalty term to keep the norm factor close to 1
    norm_factor = params[0]
    loss += penalty_lambda * (norm_factor - 1.0) ** 2

    # Smoothness penalty for sigmas relative to previous layer
    if s_prev is not None:
        loss += sigma_smooth_penalty(sigmas, s_prev)

    return loss


def objective_linear(
    params: np.ndarray,
    x: np.ndarray,
    y: np.ndarray,
    n_gauss: int,
    with_background: bool,
    penalty_lambda: float = 1e3,
    s_prev: np.ndarray = None,
    mid_tail_params: MidTailParams = None,
) -> float:
    """
    Objective function for the second fitting stage: minimize linear-residuals.
    This stage refines the overall scale.
    """
    if np.any(params < 0):
        return 1e20
    sigmas = params[n_gauss : 2 * n_gauss]
    if np.any(sigmas <= 0):
        return 1e20

    y_pred = multi_gauss_model(x, params, n_gauss, with_background)

    # Add weighting if specified (applied to the sum, not per point like log)
    W = 1.0 if mid_tail_params is None else get_mid_tail_weights(x, mid_tail_params)

    linear_residual_squared = (y_pred - y) ** 2
    loss = W * np.sum(linear_residual_squared)  # Note: W multiplies the total sum

    # Penalty term to keep the norm factor close to 1
    norm_factor = params[0]
    loss += penalty_lambda * (norm_factor - 1.0) ** 2

    # Smoothness penalty for sigmas relative to previous layer
    if s_prev is not None:
        loss += sigma_smooth_penalty(sigmas, s_prev)

    return loss


def run_fitting_stage(params_initial: np.ndarray, x: np.ndarray, y: np.ndarray, n_gauss: int, with_background: bool, objective_func, bounds: list[tuple[float, float]], constraints: list[dict], max_iter: int = 1000, ftol: float = 1e-10) -> tuple[np.ndarray, object]:
    """Helper function to run a single stage of the fitting process."""
    res = minimize(
        objective_func,
        params_initial,
        args=(x, y, n_gauss, with_background),
        method="SLSQP",
        bounds=bounds,
        constraints=constraints,
        options={"maxiter": max_iter, "ftol": ftol},
    )
    if not res.success:
        print(f"Fitting stage failed to converge: {res.message}")
        # Optionally, return initial params if optimization failed, or raise an exception
        # For now, just warn and return the result object
    return res.x, res


def fit_profile(x: np.ndarray, y: np.ndarray, n_gauss: int, config: FitConfig, init_params: np.ndarray = None, s_prev: np.ndarray = None, is_post_bragg: bool = False, verbose: bool = False, info_tag: str = "") -> tuple[np.ndarray, object]:
    """
    Performs the two-stage fitting for a single lateral profile.

    This function executes the following steps:
    1. Generate initial parameters if not provided.
    2. Build constraints and bounds.
    3. Perform a preliminary log-fit to get an initial estimate `y_pred0`.
    4. Detect the mid-tail region based on the error of the preliminary fit.
    5. Perform the main two-stage fit (log, then linear) using the detected mid-tail weights.
    """
    x = np.asarray(x, float)
    y = np.asarray(y, float)

    if init_params is None:
        norm_init = 1.0
        # Distribute remaining weight (0.5) equally among other Gaussians
        w_init = np.ones(n_gauss - 1) * 0.5 / (n_gauss - 1)
        # Estimate sigma using weighted standard deviation
        s_guess = weighted_stddev(x, y)
        # Create initial sigma values increasing for subsequent Gaussians
        s_init = [s_guess * (0.8 + 2 * i) for i in range(n_gauss)]
        p0 = np.array([norm_init] + list(w_init) + list(s_init))
        if verbose:
            print(f"[{info_tag}] Initial params: ", p0)
    else:
        p0 = init_params.copy()

    bounds = build_fitting_bounds(x, n_gauss, s_prev, is_post_bragg, config)
    constraints = build_fitting_constraints(n_gauss, is_post_bragg)

    # --- Pre-fitting Stage ---
    # A quick fit to estimate the shape and detect where fitting needs more emphasis.
    # This stage does not use mid-tail weights.
    y_pred0 = None
    mid_tail_params = None
    res0 = None
    try:
        # Run the preliminary log-fit
        res0, _ = run_fitting_stage(
            p0,
            x,
            y,
            n_gauss,
            False,  # background not used in pre-fit
            objective_log,
            bounds,
            constraints,
            max_iter=500,  # Lower iterations for quick pre-fit
        )
        y_pred0 = multi_gauss_model(x, res0, n_gauss, with_background=False)
        # Detect mid-tail region based on the pre-fit's error
        mid_tail_params = detect_mid_tail_v2(x, y, y_pred0)

        if verbose:
            print(f"[{info_tag}] Mid-tail params: center={mid_tail_params.center:.3f}, width={mid_tail_params.width:.3f}, mult={mid_tail_params.multiplier:.1f}")
    except Exception as e:
        # If pre-fitting or detection fails, proceed without mid-tail weights
        print(f"[{info_tag}] Warning: Pre-fit or mid-tail detection failed: {e}. Continuing without.")
        mid_tail_params = None  # Fallback to no weighting

    # Note: The original script passed `mid_tail_params` to the log stage but not the linear stage.
    # This implementation follows that pattern if needed.
    # For consistency, we pass it to both, but it's only used if calculated.

    # --- Main Fitting Stage 1: Log Residuals ---
    p1, res_log = run_fitting_stage(p0, x, y, n_gauss, False, lambda p, *args: objective_log(p, *args, config.penalty_lambda, s_prev, mid_tail_params), bounds, constraints, max_iter=3000)

    # --- Main Fitting Stage 2: Linear Residuals ---
    final_params, res_linear = run_fitting_stage(
        p1,  # Use result from stage 1 as initial guess for stage 2
        x,
        y,
        n_gauss,
        False,
        lambda p, *args: objective_linear(p, *args, config.penalty_lambda, s_prev, mid_tail_params),
        bounds,
        constraints,
        max_iter=2000,
    )

    # Return the final parameters from the linear stage and the result object
    # (e.g., for checking success, message, etc.)
    return final_params, res_linear


# --- Mid-Tail Detection (Refactored) ---
def detect_mid_tail_v2(
    x: np.ndarray,
    y: np.ndarray,
    y_pred0: np.ndarray,
    r_min: float = 5.0,
    r_max: float = 40.0,
    smooth_window: int = 15,
    width_clip: tuple[float, float] = (2.0, 40.0),
    mult_clip: tuple[float, float] = (6.0, 40.0),
    rel_err_threshold: float = 0.01,
    abs_err_threshold: float = 0.0,
) -> MidTailParams:
    """
    Detects the region where the fit underestimates the data (y_pred < y)
    to define parameters for mid_tail_weights.
    """
    eps = 1e-12
    diff = y_pred0 - y  # Positive where pred > data
    threshold = np.maximum(abs_err_threshold, rel_err_threshold * np.maximum(y, eps))

    # Mask where prediction is significantly *less* than data, within r range
    underestimation_mask = (diff < -threshold) & (np.abs(x) >= r_min) & (np.abs(x) <= r_max)

    if not np.any(underestimation_mask):
        # Fallback if no under-estimated region is found
        return MidTailParams()

    x_unders = x[underestimation_mask]
    # Calculate magnitude of underestimation (always positive)
    rel_underestim = -(diff[underestimation_mask]) / np.maximum(y[underestimation_mask], eps)

    # Sort points by x coordinate to find continuous segments
    sorted_indices = np.argsort(x_unders)
    x_sorted = x_unders[sorted_indices]
    rel_sorted = rel_underestim[sorted_indices]

    if len(x_sorted) < 3:
        # Fallback if not enough points
        return MidTailParams(center=np.mean(x_sorted), width=2.0, multiplier=8.0)

    dx = 0.1  # Approximate bin spacing
    gap_threshold = 1.5 * dx

    # Find continuous segments of underestimation
    starts = [0]
    for i in range(1, len(x_sorted)):
        if (x_sorted[i] - x_sorted[i - 1]) > gap_threshold:
            starts.append(i)
    ends = starts[1:] + [len(x_sorted)]

    # Find the longest segment
    segment_lengths = [ends[k] - starts[k] for k in range(len(starts))]
    k_max = int(np.argmax(segment_lengths))
    seg_start, seg_end = starts[k_max], ends[k_max]
    x_seg, rel_seg = x_sorted[seg_start:seg_end], rel_sorted[seg_start:seg_end]

    if len(x_seg) < 3:
        return MidTailParams(center=np.mean(x_seg), width=2.0, multiplier=8.0)

    # Smooth the underestimation magnitude within the segment
    if 1 < smooth_window < len(rel_seg):
        kernel = np.ones(smooth_window) / smooth_window
        rel_smoothed = np.convolve(rel_seg, kernel, mode="same")
    else:
        rel_smoothed = rel_seg

    # Find the point of maximum underestimation within the segment
    i_peak = np.argmax(rel_smoothed)
    x_peak = x_seg[i_peak]
    peak_val = rel_smoothed[i_peak]
    # print(f"Detected peak underestimation: {peak_val:.3f} at x={x_peak:.3f}")

    # Determine the 'width' of the underestimation bump (similar to HWHM)
    half_level = 0.1 * peak_val  # Use 10% of peak as the width criterion
    iL, iR = 0, len(x_seg) - 1
    for i in range(i_peak, -1, -1):
        if rel_smoothed[i] < half_level:
            iL = i
            break
    for i in range(i_peak, len(x_seg)):
        if rel_smoothed[i] < half_level:
            iR = i
            break

    x_width = 0.5 * abs(x_seg[iR] - x_seg[iL])
    x_width = np.clip(x_width, width_clip[0], width_clip[1])

    # Calculate the multiplier based on the peak underestimation value
    multiplier_raw = 8.0 + 30.0 * (1 - np.exp(-3.0 * peak_val))
    multiplier_clipped = np.clip(multiplier_raw, mult_clip[0], mult_clip[1])

    return MidTailParams(center=abs(x_peak), width=x_width, multiplier=multiplier_clipped)  # Use abs(x_peak) as center is typically positive


# --- Main Execution Logic ---
def load_data(config: FitConfig) -> tuple[np.ndarray, np.ndarray]:
    """Loads the 3D dose array and calculates integrated depth dose (IDD)."""
    data_file = config.data_path / "eDep.bin"
    if not data_file.exists():
        raise FileNotFoundError(f"Data file not found: {data_file}")

    arr = np.fromfile(str(data_file), dtype=np.float64).reshape(config.z_bins, config.y_bins)
    idd = arr.sum(axis=1)  # Integrated Dose Depth
    return arr, idd


def find_peak_and_starting_depth(arr: np.ndarray, config: FitConfig) -> tuple[int, int, float]:
    """Finds the peak dose index and the starting depth index for fitting."""
    peak_z_index = np.argmax(arr.sum(axis=1))
    peak_idd_value = arr[peak_z_index].sum()
    start_z_index = int(config.peak_detection_factor * peak_z_index)
    print(f"Peak Z index: {peak_z_index}, Peak IDD: {peak_idd_value:.4e}")
    print(f"Starting fit at Z index: {start_z_index} (z = {get_z_axis(config)[start_z_index]:.3f})")
    return peak_z_index, start_z_index, peak_idd_value


def prepare_profile_for_fitting(raw_profile: np.ndarray, y_axis: np.ndarray, config: FitConfig) -> tuple[np.ndarray, np.ndarray]:
    """Normalizes a single lateral profile and applies fitting range mask."""
    normalized_profile = normalize_by_integral(raw_profile, (config.y_max - config.y_min) / config.y_bins)
    mask = (y_axis >= config.y_min_fit) & (y_axis <= config.y_max_fit)
    return y_axis[mask], normalized_profile[mask]


def plot_and_save(x: np.ndarray, y_data: np.ndarray, y_fit: np.ndarray, z_index: int, z_axis: np.ndarray, output_dir: Path):
    """Creates and saves a plot of the data and fit for a specific depth."""
    y_min_plot = np.min(y_data)
    y_max_plot = np.max(y_data)
    y_lower = max(y_min_plot * 0.5, 1e-12)
    y_upper = y_max_plot * 2

    plt.figure(dpi=150)
    plt.semilogy(x, y_data, "o", label="Data", markersize=4, markerfacecolor="none", alpha=0.8)
    plt.semilogy(x, y_fit, "-", label="Fit")
    plt.legend()
    plt.xlabel("x [mm]")
    plt.ylabel("Dose [a.u.]")
    plt.title(f"Depth idx={z_index} (z={z_axis[z_index]:.3f} cm)")
    plt.ylim(y_lower, y_upper)
    plot_path = output_dir / f"depth_{z_index:03d}.png"
    plt.savefig(plot_path)
    plt.close()


def save_results(results: np.ndarray, filename: Path, n_gauss: int):
    """Saves the fitting results to a text file."""
    header = f"norm, w1..w{n_gauss}, sigma1..sigma{n_gauss} (w1 calculated as 1-sum(other_weights))"
    np.savetxt(filename, results, header=header, fmt="%.6e")
    print(f"Fitting complete. Results saved to {filename}")


def main():
    """Main execution function."""
    config = FitConfig()
    os.makedirs(config.output_dir, exist_ok=True)

    print(f"Loading data from {config.data_path / 'eDep.bin'}")
    arr_3d, integrated_depth_dose = load_data(config)
    peak_z_idx, start_z_idx, peak_idd_val = find_peak_and_starting_depth(arr_3d, config)

    y_axis_full = get_y_axis(config)
    z_axis = get_z_axis(config)
    results_storage = np.zeros((config.z_bins, 2 * config.n_gauss + 1))

    # --- Fit the starting depth profile ---
    initial_profile_raw = arr_3d[start_z_idx, :]
    x_fit, y_fit_data = prepare_profile_for_fitting(initial_profile_raw, y_axis_full, config)

    initial_params, fit_result = fit_profile(x_fit, y_fit_data, config.n_gauss, config, verbose=True, info_tag=f"Initial (idx={start_z_idx})")

    if not fit_result.success:
        print(f"Initial fit at idx {start_z_idx} failed. Aborting.")
        return

    # Store results, converting parameters back to the format used for saving:
    # [norm, w1, w2..wn, s1..sn] where w1 = 1 - sum(w2..wn)
    first_weight = 1 - np.sum(initial_params[1 : config.n_gauss])
    results_storage[start_z_idx] = np.hstack(
        [
            [initial_params[0]],  # norm
            [first_weight],  # w1 (calculated)
            initial_params[1 : config.n_gauss],  # w2..wn (fitted)
            initial_params[config.n_gauss : 2 * config.n_gauss],  # s1..sn (fitted)
        ]
    )

    fitted_profile = multi_gauss_model(x_fit, initial_params, config.n_gauss, False)
    plot_and_save(x_fit, y_fit_data, fitted_profile, start_z_idx, z_axis, config.output_dir)
    print(f"Saved plot for initial depth idx {start_z_idx}")

    # --- Fit shallower depths (z_index decreasing from start_z_idx) ---
    current_params = initial_params
    for z_idx in range(start_z_idx - 1, -1, -1):
        profile_raw = arr_3d[z_idx, :]
        _, profile_normalized_masked = prepare_profile_for_fitting(profile_raw, y_axis_full, config)

        # Use previous fit's sigmas as reference for bounds
        s_prev_fit = current_params[config.n_gauss : 2 * config.n_gauss]

        current_params, fit_result = fit_profile(
            x_fit,
            profile_normalized_masked,
            config.n_gauss,
            config,
            init_params=current_params,
            s_prev=s_prev_fit,  # Use previous params and sigmas
            verbose=(z_idx % 20 == 0),
            info_tag=f"Shallow idx={z_idx}",  # Print every 20th
        )

        if fit_result.success:
            first_w = 1 - np.sum(current_params[1 : config.n_gauss])
            results_storage[z_idx] = np.hstack([[current_params[0]], [first_w], current_params[1 : config.n_gauss], current_params[config.n_gauss : 2 * config.n_gauss]])
            fitted_profile = multi_gauss_model(x_fit, current_params, config.n_gauss, False)
            plot_and_save(x_fit, profile_normalized_masked, fitted_profile, z_idx, z_axis, config.output_dir)
        else:
            print(f"Fit failed at shallow depth idx {z_idx}, stopping here.")
            break  # Or continue, depending on desired behavior

    # --- Fit deeper depths (z_index increasing from start_z_idx) ---
    # Reset params to the ones from the *initial* depth for the deeper pass
    # Note: The original code did `params = np.delete(results[z_depth_index], 1)`
    # which removed the *calculated* w1, leaving [norm, w2..wn, s1..sn].
    # This is unusual. I'll keep the full set of fitted params.
    current_params = initial_params
    for z_idx in range(start_z_idx + 1, min(config.z_bins, peak_z_idx + config.post_bragg_start_offset)):
        if integrated_depth_dose[z_idx] < config.stop_dose_factor * peak_idd_val:
            print(f"Depth {z_idx} has dose < {config.stop_dose_factor:.2%} of peak, stopping fit.")
            break

        profile_raw = arr_3d[z_idx, :]
        _, profile_normalized_masked = prepare_profile_for_fitting(profile_raw, y_axis_full, config)

        s_prev_fit = current_params[config.n_gauss : 2 * config.n_gauss]

        is_post_bragg = z_idx > peak_z_idx
        current_params, fit_result = fit_profile(
            x_fit,
            profile_normalized_masked,
            config.n_gauss,
            config,
            init_params=current_params,
            s_prev=s_prev_fit,
            is_post_bragg=is_post_bragg,  # Flag for different constraints/bounds
            verbose=(z_idx % 20 == 0),
            info_tag=f"Deep idx={z_idx}",
        )

        if fit_result.success:
            first_w = 1 - np.sum(current_params[1 : config.n_gauss])
            results_storage[z_idx] = np.hstack([[current_params[0]], [first_w], current_params[1 : config.n_gauss], current_params[config.n_gauss : 2 * config.n_gauss]])
            fitted_profile = multi_gauss_model(x_fit, current_params, config.n_gauss, False)
            plot_and_save(x_fit, profile_normalized_masked, fitted_profile, z_idx, z_axis, config.output_dir)
        else:
            print(f"Fit failed at deep depth idx {z_idx}, stopping here.")
            break  # Or continue, depending on desired behavior

    # --- Save Final Results ---
    save_results(results_storage, config.results_file, config.n_gauss)
    print(f"Plots saved to {config.output_dir}")


if __name__ == "__main__":
    main()
