class SiCDrudeParameters:
    def __init__(self):
        self.EPSILON_0 = 8.854187817e-12
        self.E_CHARGE = 1.602176634e-19
        self.C_LIGHT = 299792458.0
        self.M_ELECTRON = 9.1093837015e-31
        self.EPSILON_L = 6.7
        self.M_EFFECTIVE = 0.3 * self.M_ELECTRON
        self.C_DRUDE = (self.E_CHARGE**2) / (4 * np.pi**2 * self.C_LIGHT**2 * \
                                             self.EPSILON_0 * self.M_EFFECTIVE)
        self.N_CARRIER_CM3 = 1e16
        self.N_CARRIER_SI = self.N_CARRIER_CM3 * 1e6
    def calculate_refractive_index(self, wavelength_m):
        n_squared = self.EPSILON_L - self.C_DRUDE * self.N_CARRIER_SI * (wavelength_m**2)
        if n_squared < 0:
            return np.nan
        return np.sqrt(n_squared)
siC_params = SiCDrudeParameters()
class PhysicalModel:
    def __init__(self):
        self.C_LIGHT = 299792458.0
        self.N_AIR = 1.0
        self.EPSILON_L = 6.52
        self.C_DRUDE = 1.44e-18
        self.N_CARRIER_CM3 = 1e16
        self.N_CARRIER_SI = self.N_CARRIER_CM3 * 1e6
    def calculate_geometric_path_difference(self, n1, d, theta1):
        geometric_opd = 2 * n1 * d * np.cos(theta1)
        return geometric_opd
    def calculate_half_wave_loss(self, wavelength):
        half_wave_loss = wavelength / 2
        return half_wave_loss
    def calculate_total_optical_path_difference(self, n1, d, theta1, wavelength):
        geometric_opd = self.calculate_geometric_path_difference(n1, d, theta1)
        half_wave_loss = self.calculate_half_wave_loss(wavelength)
        total_opd = geometric_opd + half_wave_loss
        return total_opd
    def snells_law_refraction_angle(self, theta0, n1):
        sin_theta1 = (self.N_AIR * np.sin(theta0)) / n1
        if sin_theta1 > 1:
            sin_theta1 = 1.0
        theta1 = np.arcsin(sin_theta1)
        return theta1
    def drude_model_refractive_index(self, wavelength):
        n1_squared = self.EPSILON_L - self.C_DRUDE * self.N_CARRIER_SI * (wavelength**2)
        if n1_squared < 0:
            return np.nan
        n1 = np.sqrt(n1_squared)
        return n1
    def demonstrate_physical_model(self, theta0_deg=10):
        theta0_rad = np.deg2rad(theta0_deg)
        test_wavelengths = np.array([10e-6, 12e-6, 15e-6])
        test_thickness = 5e-6
        for wl in test_wavelengths:
            n1 = self.drude_model_refractive_index(wl)
        wavelength = test_wavelengths[1]
        n1 = self.drude_model_refractive_index(wavelength)
        theta1 = self.snells_law_refraction_angle(theta0_rad, n1)
        geometric_opd = self.calculate_geometric_path_difference(n1, test_thickness, theta1)
        half_wave_loss = self.calculate_half_wave_loss(wavelength)
        total_opd = self.calculate_total_optical_path_difference(n1, test_thickness, theta1, wavelength)
        k_float = total_opd / wavelength
class InterferenceModel(PhysicalModel):
    def __init__(self):
        super().__init__()
    def interference_constructive_condition(self, k, wavelength):
        required_opd = k * wavelength
        return required_opd
    def core_model_equation(self, n1, d, theta1, k, wavelength):
        left_side = 2 * n1 * d * np.cos(theta1)
        right_side = (k - 0.5) * wavelength
        return left_side, right_side
    def solve_thickness_from_single_peak(self, k, wavelength, theta0):
        n1 = self.drude_model_refractive_index(wavelength)
        if np.isnan(n1):
            return np.nan
        theta1 = self.snells_law_refraction_angle(theta0, n1)
        d = ((k - 0.5) * wavelength) / (2 * n1 * np.cos(theta1))
        return d
    def verify_interference_condition(self, n1, d, theta1, k, wavelength, tolerance=1e-12):
        left_side, right_side = self.core_model_equation(n1, d, theta1, k, wavelength)
        error = abs(left_side - right_side)
        is_satisfied = error < tolerance
        return is_satisfied, error
    def calculate_wavelength_for_maximum(self, k, n1, d, theta0_deg):
        theta0_rad = np.deg2rad(theta0_deg)
        theta1 = self.snells_law_refraction_angle(theta0_rad, n1)
        wavelength = (2 * n1 * d * np.cos(theta1)) / (k - 0.5)
        return wavelength
    def calculate_thickness_from_adjacent_maxima(self, lambda1, lambda2, k1, n1, theta0_deg):
        theta0_rad = np.deg2rad(theta0_deg)
        theta1 = self.snells_law_refraction_angle(theta0_rad, n1)
        d = (lambda1 * lambda2) / (2 * n1 * np.cos(theta1) * (lambda1 - lambda2))
        return d
    def demonstrate_interference_conditions(self, theta0_deg=10):
        theta0_rad = np.deg2rad(theta0_deg)
        test_wavelengths = np.array([10e-6, 12e-6, 15e-6])
        test_k_values = [10, 15, 20]
        for i, wavelength in enumerate(test_wavelengths):
            k = test_k_values[i]
            d = self.solve_thickness_from_single_peak(k, wavelength, theta0_rad)
            if not np.isnan(d):
                n1 = self.drude_model_refractive_index(wavelength)
                theta1 = self.snells_law_refraction_angle(theta0_rad, n1)
                is_satisfied, error = self.verify_interference_condition(n1, d, theta1, k, wavelength)
        wavelength = 12e-6
        k = 15
        d = self.solve_thickness_from_single_peak(k, wavelength, theta0_rad)
        if not np.isnan(d):
            n1 = self.drude_model_refractive_index(wavelength)
            theta1 = self.snells_law_refraction_angle(theta0_rad, n1)
            left_side, right_side = self.core_model_equation(n1, d, theta1, k, wavelength)
            total_opd = self.calculate_total_optical_path_difference(n1, d, theta1, wavelength)
            required_opd = self.interference_constructive_condition(k, wavelength)

class DrudeModelIntegration(InterferenceModel):
    def __init__(self):
        super().__init__()
    def drude_model_detailed_analysis(self, wavelength):
        epsilon_L_term = self.EPSILON_L
        drude_correction_term = self.C_DRUDE * self.N_CARRIER_SI * (wavelength**2)
        n1_squared = epsilon_L_term - drude_correction_term
        analysis = {
            'wavelength_m': wavelength,
            'wavelength_um': wavelength * 1e6,
            'epsilon_L_term': epsilon_L_term,
            'drude_correction_term': drude_correction_term,
            'n1_squared': n1_squared,
            'n1': np.sqrt(n1_squared) if n1_squared > 0 else np.nan,
            'is_valid': n1_squared > 0
        }
        return analysis
    def validate_drude_model_physics(self, wavelength_range=(8e-6, 20e-6), num_points=100):
        wavelengths = np.linspace(wavelength_range[0], wavelength_range[1], num_points)
        n1_values = []
        valid_wavelengths = []
        invalid_wavelengths = []
        for wl in wavelengths:
            analysis = self.drude_model_detailed_analysis(wl)
            if analysis['is_valid']:
                n1_values.append(analysis['n1'])
                valid_wavelengths.append(wl)
            else:
                invalid_wavelengths.append(wl)
        total_points = len(wavelengths)
        valid_points = len(valid_wavelengths)
        invalid_points = len(invalid_wavelengths)
        if valid_points > 0:
            n1_array = np.array(n1_values)
            wl_array = np.array(valid_wavelengths)
            dn_dlambda = np.gradient(n1_array, wl_array)
            is_monotonic = np.all(dn_dlambda >= 0) or np.all(dn_dlambda <= 0)
        validation_results = {
            'wavelength_range': wavelength_range,
            'total_points': total_points,
            'valid_points': valid_points,
            'invalid_points': invalid_points,
            'valid_wavelengths': valid_wavelengths,
            'n1_values': n1_values,
            'invalid_wavelengths': invalid_wavelengths
        }
        return validation_results

    def demonstrate_wavelength_dependent_calculation(self):
        test_wavelengths = np.array([8e-6, 10e-6, 12e-6, 15e-6, 18e-6, 20e-6])
        n1_values = []
        for wl in test_wavelengths:
            analysis = self.drude_model_detailed_analysis(wl)
            n1_values.append(analysis['n1'] if analysis['is_valid'] else np.nan)
        valid_n1 = [n for n in n1_values if not np.isnan(n)]
        if valid_n1:
            n1_range = max(valid_n1) - min(valid_n1)
            n1_relative_change = n1_range / np.mean(valid_n1) * 100
        return test_wavelengths, n1_values
    def compare_constant_vs_wavelength_dependent_n1(self, test_thickness=10e-6, theta0_deg=10):
        theta0_rad = np.deg2rad(theta0_deg)
        test_wavelengths = np.array([10e-6, 12e-6, 15e-6, 18e-6])
        n1_values = []
        for wl in test_wavelengths:
            n1 = self.drude_model_refractive_index(wl)
            if not np.isnan(n1):
                n1_values.append(n1)
        if not n1_values:
            return
        n1_constant = np.mean(n1_values)
        for wl in test_wavelengths:
            n1_actual = self.drude_model_refractive_index(wl)
            if np.isnan(n1_actual):
                continue
            theta1_actual = self.snells_law_refraction_angle(theta0_rad, n1_actual)
            opd_actual = self.calculate_total_optical_path_difference(
                n1_actual, test_thickness, theta1_actual, wl)
            theta1_constant = self.snells_law_refraction_angle(theta0_rad, n1_constant)
            opd_constant = self.calculate_total_optical_path_difference(
                n1_constant, test_thickness, theta1_constant, wl)
            n1_error = abs(n1_actual - n1_constant) / n1_actual * 100
            opd_error = abs(opd_actual - opd_constant) / opd_actual * 100
class SolvingStrategy(DrudeModelIntegration):
    def __init__(self):
        super().__init__()
    def establish_equation_system(self, lambda_k, lambda_k_plus_1, n1_k, n1_k_plus_1, theta0_rad):
        theta1_k = self.snells_law_refraction_angle(theta0_rad, n1_k)
        theta1_k_plus_1 = self.snells_law_refraction_angle(theta0_rad, n1_k_plus_1)
        cos_theta1_k = np.cos(theta1_k)
        cos_theta1_k_plus_1 = np.cos(theta1_k_plus_1)
        equation_system = {
            'lambda_k': lambda_k,
            'lambda_k_plus_1': lambda_k_plus_1,
            'n1_k': n1_k,
            'n1_k_plus_1': n1_k_plus_1,
            'theta1_k': theta1_k,
            'theta1_k_plus_1': theta1_k_plus_1,
            'cos_theta1_k': cos_theta1_k,
            'cos_theta1_k_plus_1': cos_theta1_k_plus_1,
            'coeff_k': 2 * n1_k * cos_theta1_k,
            'coeff_k_plus_1': 2 * n1_k_plus_1 * cos_theta1_k_plus_1,
            'rhs_k_coeff': lambda_k,
            'rhs_k_plus_1_coeff': lambda_k_plus_1
        }
        return equation_system
    def solve_interference_order_k(self, equation_system):
        n1_k = equation_system['n1_k']
        n1_k_plus_1 = equation_system['n1_k_plus_1']
        lambda_k = equation_system['lambda_k']
        lambda_k_plus_1 = equation_system['lambda_k_plus_1']
        cos_theta1_k = equation_system['cos_theta1_k']
        cos_theta1_k_plus_1 = equation_system['cos_theta1_k_plus_1']
        left_ratio = (n1_k * cos_theta1_k) / (n1_k_plus_1 * cos_theta1_k_plus_1)
        term1 = n1_k * lambda_k_plus_1
        term2 = n1_k_plus_1 * lambda_k
        k_float = term1 / (term2 - term1)
        k_rounded = round(k_float)
        k_error = abs(k_float - k_rounded)
        is_integer_like = k_error < 0.2
        k_solution = {
            'k_float': k_float,
            'k_rounded': k_rounded,
            'k_error': k_error,
            'is_valid': is_integer_like,
            'left_ratio': left_ratio,
            'term1': term1,
            'term2': term2
        }
        return k_solution
    def solve_thickness_d(self, k, equation_system, use_equation=1):
        if use_equation == 1:
            lambda_val = equation_system['lambda_k']
            n1_val = equation_system['n1_k']
            cos_theta1_val = equation_system['cos_theta1_k']
            d = (k * lambda_val) / (2 * n1_val * cos_theta1_val)
        else:
            lambda_val = equation_system['lambda_k_plus_1']
            n1_val = equation_system['n1_k_plus_1']
            cos_theta1_val = equation_system['cos_theta1_k_plus_1']
            d = ((k + 1) * lambda_val) / (2 * n1_val * cos_theta1_val)
        return d
    def solve_adjacent_peaks_pair(self, lambda_k, lambda_k_plus_1, theta0_rad=0):
        n1_k = self.drude_model_refractive_index(lambda_k)
        n1_k_plus_1 = self.drude_model_refractive_index(lambda_k_plus_1)
        if np.isnan(n1_k) or np.isnan(n1_k_plus_1):
            return None
        equation_system = self.establish_equation_system(
            lambda_k, lambda_k_plus_1, n1_k, n1_k_plus_1, theta0_rad)
        k_solution = self.solve_interference_order_k(equation_system)
        if not k_solution['is_valid']:
            return None
        k = k_solution['k_rounded']
        d1 = self.solve_thickness_d(k, equation_system, use_equation=1)
        d2 = self.solve_thickness_d(k, equation_system, use_equation=2)
        d_avg = (d1 + d2) / 2
        d_diff = abs(d1 - d2)
        d_relative_error = d_diff / d_avg * 100
        solution = {
            'lambda_k': lambda_k,
            'lambda_k_plus_1': lambda_k_plus_1,
            'n1_k': n1_k,
            'n1_k_plus_1': n1_k_plus_1,
            'k': k,
            'k_float': k_solution['k_float'],
            'k_error': k_solution['k_error'],
            'd1': d1,
            'd2': d2,
            'd_avg': d_avg,
            'd_diff': d_diff,
            'd_relative_error': d_relative_error,
            'is_consistent': d_relative_error < 1.0
        }
        return solution
    def demonstrate_solving_strategy(self):
        test_wavenumbers = np.array([800, 900])
        test_wavelengths = 1 / (test_wavenumbers * 100)
        lambda_k = test_wavelengths[0]
        lambda_k_plus_1 = test_wavelengths[1]
        solution = self.solve_adjacent_peaks_pair(lambda_k, lambda_k_plus_1, theta0_rad=0)
        if solution and solution['is_consistent']:
            print(f"外延层厚度: d = {solution['d_avg']*1e6:.4f} ± {solution['d_diff']*1e6/2:.4f} μm")
            print(f"干涉级次: k = {solution['k']}")
        return solution
    def batch_solve_multiple_pairs(self, wavelength_pairs, theta0_rad=0):
        solutions = []
        valid_count = 0
        for i, (lambda_k, lambda_k_plus_1) in enumerate(wavelength_pairs):
            solution = self.solve_adjacent_peaks_pair(lambda_k, lambda_k_plus_1, theta0_rad)
            if solution and solution['is_consistent']:
                solutions.append(solution)
                valid_count += 1
        if solutions:
            d_values = [sol['d_avg'] for sol in solutions]
            d_array = np.array(d_values) * 1e6
            d_mean = np.mean(d_array)
            d_std = np.std(d_array)
            d_min = np.min(d_array)
            d_max = np.max(d_array)
            print(f"平均厚度: {d_mean:.4f} μm")
            print(f"标准差: {d_std:.4f} μm")
        return solutions
class DataAnalysisValidation(SolvingStrategy):
    def __init__(self):
        super().__init__()
    def load_and_prepare_data(self, filepath):
        try:
            df = pd.read_excel(filepath)
            df.columns = ['wavenumber_cm', 'reflectivity_percent']
            df['wavelength_m'] = 1.0 / (df['wavenumber_cm'] * 100.0)
            df['reflectivity_percent'] = df['reflectivity_percent'].clip(lower=0, upper=100)
            try:
                window = max(5, (len(df) // 200) * 2 + 1)
                df['reflectivity_smoothed'] = savgol_filter(df['reflectivity_percent'].values, window_length=window, polyorder=2)
            except Exception:
                df['reflectivity_smoothed'] = df['reflectivity_percent']
            valid_data = df.dropna()
            data_info = {
                'dataframe': valid_data,
                'filename': os.path.basename(filepath),
                'total_points': len(valid_data),
                'wavenumber_range': (valid_data['wavenumber_cm'].min(), valid_data['wavenumber_cm'].max()),
                'wavelength_range': (valid_data['wavelength_m'].min(), valid_data['wavelength_m'].max()),
                'reflectivity_range': (valid_data['reflectivity_percent'].min(), valid_data['reflectivity_percent'].max())
            }
            return data_info
        except Exception as e:
            return None
    def identify_interference_peaks(self, data_info, height_threshold=None, distance_threshold=None, 
                                  prominence_threshold=None, show_plot=False, **kwargs):
        df = data_info['dataframe']
        if height_threshold is None:
            height_threshold = df['reflectivity_percent'].max() * 0.3
        if distance_threshold is None:
            distance_threshold = len(df) // 100
        if prominence_threshold is None:
            prominence_threshold = df['reflectivity_percent'].std() * 2
        signal_series = df['reflectivity_smoothed'] if 'reflectivity_smoothed' in df.columns else df['reflectivity_percent']
        max_r = df['reflectivity_percent'].max()
        std_r = df['reflectivity_percent'].std()
        n_points = len(df)
        def attempt_detect(h, d, p):
            idx, props = find_peaks(signal_series, height=h, distance=int(max(1, d)), prominence=p)
            return idx, props
        candidates = [
            (height_threshold, distance_threshold, prominence_threshold),
            (max_r * 0.25, max(n_points // 150, 1), max(std_r * 1.7, (prominence_threshold or 0))),
            (max_r * 0.15, max(n_points // 200, 1), std_r * 1.4),
            (max_r * 0.08, max(n_points // 250, 1), std_r * 1.2),
        ]
        chosen = None
        for h, d, p in candidates:
            idx, props = attempt_detect(h, d, p)
            if len(idx) >= 2:
                chosen = (idx, props, h, int(max(1, d)), p)
                break
        if chosen is None:
            idx, props = attempt_detect(*candidates[-1])
            chosen = (idx, props, candidates[-1][0], int(max(1, candidates[-1][1])), candidates[-1][2])
        peaks_indices, peaks_properties, used_h, used_d, used_p = chosen
        peak_wavenumbers = df['wavenumber_cm'].iloc[peaks_indices].values
        peak_wavelengths = df['wavelength_m'].iloc[peaks_indices].values
        peak_reflectivities = df['reflectivity_percent'].iloc[peaks_indices].values
        refined_wavenumbers = []
        refined_wavelengths = []
        refined_reflectivities = []
        x_all = df['wavenumber_cm'].values
        y_all = (df['reflectivity_smoothed'] if 'reflectivity_smoothed' in df.columns else df['reflectivity_percent']).values
        half_win = 2
        for idx in peaks_indices:
            left = max(0, idx - half_win)
            right = min(len(x_all) - 1, idx + half_win)
            if right - left + 1 >= 3:
                x = x_all[left:right+1]
                y = y_all[left:right+1]
                try:
                    a, b, c = np.polyfit(x, y, 2)
                    if a < 0:
                        x_peak = -b / (2 * a)
                        if x.min() <= x_peak <= x.max():
                            refined_wavenumbers.append(x_peak)
                            refined_wavelengths.append(1.0 / (x_peak * 100.0))
                            refined_reflectivities.append(np.polyval([a, b, c], x_peak))
                            continue
                except Exception:
                    pass
            refined_wavenumbers.append(x_all[idx])
            refined_wavelengths.append(1.0 / (x_all[idx] * 100.0))
            refined_reflectivities.append(df['reflectivity_percent'].iloc[idx])
        refined_wavenumbers = np.array(refined_wavenumbers)
        refined_wavelengths = np.array(refined_wavelengths)
        refined_reflectivities = np.array(refined_reflectivities)
        peaks_info = {
            'indices': peaks_indices,
            'wavenumbers': peak_wavenumbers,
            'wavelengths': peak_wavelengths,
            'reflectivities': peak_reflectivities,
            'wavenumbers_refined': refined_wavenumbers,
            'wavelengths_refined': refined_wavelengths,
            'reflectivities_refined': refined_reflectivities,
            'count': len(peaks_indices),
            'properties': peaks_properties,
            'used_params': {'height': float(used_h), 'distance': int(used_d), 'prominence': float(used_p)}
        }
        return peaks_info
    def calculate_thickness_from_all_peak_pairs(self, peaks_info, theta0_deg=0, 
                                              min_k_error_threshold=0.1, show_details=True, override_params=None):
        theta0_rad = np.deg2rad(theta0_deg)
        thickness_values = []
        valid_pairs = []
        invalid_pairs = []
        for i in range(peaks_info['count'] - 1):
            wl_array = peaks_info['wavelengths_refined'] if 'wavelengths_refined' in peaks_info else peaks_info['wavelengths']
            wn_array = peaks_info['wavenumbers']
            if wn_array[i] < wn_array[i+1]:
                lambda_k = wl_array[i]
                lambda_k_plus_1 = wl_array[i+1]
                wavenumber_k = wn_array[i]
                wavenumber_k_plus_1 = wn_array[i+1]
            else:
                lambda_k = wl_array[i+1]
                lambda_k_plus_1 = wl_array[i]
                wavenumber_k = wn_array[i+1]
                wavenumber_k_plus_1 = wn_array[i]
            pair_info = {
                'pair_index': i,
                'lambda_k': lambda_k,
                'lambda_k_plus_1': lambda_k_plus_1,
                'wavenumber_k': wavenumber_k,
                'wavenumber_k_plus_1': wavenumber_k_plus_1
            }
            solution = self.solve_adjacent_peaks_pair(lambda_k, lambda_k_plus_1, theta0_rad)
            accepted = False
            if solution:
                if solution.get('is_consistent', False) and solution['k_error'] <= min_k_error_threshold:
                    accept_level = 'primary'
                    accepted = True
                else:
                    fallback_k_err = min(0.35, min_k_error_threshold + 0.15)
                    if (solution['k_error'] <= fallback_k_err) and (solution['d_relative_error'] <= 2.0):
                        accept_level = 'secondary'
                        accepted = True
                if accepted:
                    thickness_values.append(solution['d_avg'])
                    pair_info.update(solution)
                    pair_info['accept_level'] = accept_level
                    valid_pairs.append(pair_info)
                else:
                    invalid_pairs.append(pair_info)
            else:
                invalid_pairs.append(pair_info)
        if len(thickness_values) > 0:
            thickness_array = np.array(thickness_values)
            statistics = {
                'thickness_values': thickness_values,
                'valid_pairs': valid_pairs,
                'invalid_pairs': invalid_pairs,
                'valid_count': len(valid_pairs),
                'invalid_count': len(invalid_pairs),
                'total_pairs': peaks_info['count'] - 1,
                'success_rate': len(valid_pairs) / (peaks_info['count'] - 1) * 100,
                'd_avg_um': np.mean(thickness_array) * 1e6,
                'd_std_um': np.std(thickness_array) * 1e6,
                'd_min_um': np.min(thickness_array) * 1e6,
                'd_max_um': np.max(thickness_array) * 1e6,
                'd_range_um': (np.max(thickness_array) - np.min(thickness_array)) * 1e6,
                'd_avg_m': np.mean(thickness_array),
                'd_std_m': np.std(thickness_array),
                'd_values_m': thickness_array
            }
            print(f"平均值: {statistics['d_avg_um']:.6f} μm")
            print(f"标准差: {statistics['d_std_um']:.6f} μm")
        else:
            statistics = {
                'thickness_values': [],
                'valid_pairs': [],
                'invalid_pairs': invalid_pairs,
                'valid_count': 0,
                'invalid_count': len(invalid_pairs),
                'total_pairs': peaks_info['count'] - 1,
                'success_rate': 0,
                'd_avg_um': np.nan,
                'd_std_um': np.nan
            }
        return statistics
    def generate_peak_plot(self, data_info, peaks_info, filename_suffix=''):
        """
        根据提供的数据和峰值信息生成并保存在样式上与用户提供的图像匹配的图表。
        """
        df = data_info['dataframe']
        wavelengths_um = df['wavelength_m'] * 1e6
        
        plt.figure(figsize=(12, 7))
        
        # 绘制原始光谱和经Savgol滤波器平滑后的光谱
        plt.plot(wavelengths_um, df['reflectivity_percent'], color='lightgrey', label='原始光谱')
        plt.plot(wavelengths_um, df['reflectivity_smoothed'], color='#4CAF50', alpha=0.8, label='平滑后光谱 (用于寻峰)')
        
        # 绘制识别出的干涉峰
        peak_wl_um = peaks_info['wavelengths_refined'] * 1e6
        peak_refl = peaks_info['reflectivities_refined']
        num_peaks = len(peak_wl_um)
        plt.plot(peak_wl_um, peak_refl, 'o', color='red', markersize=6, label=f'识别的干涉峰 ({num_peaks}个)')

        # 为每个峰添加波长值的文本标注
        for i in range(num_peaks):
            plt.text(peak_wl_um[i], peak_refl[i] + 3, f'{peak_wl_um[i]:.2f} μm', 
                     ha='center', va='bottom', fontsize=9, color='black')

        plt.title(f'干涉反射光谱及峰值识别 (基于{filename_suffix}数据)', fontsize=16)
        plt.xlabel('波长 (μm)', fontsize=12)
        plt.ylabel('反射率 (%)', fontsize=12)
        plt.legend(loc='upper right')
        plt.grid(True, which='both', linestyle='--', linewidth=0.5)
        plt.xlim(8, 25)
        plt.ylim(0, 105)
        
        output_filename = f'干涉峰分析图_{filename_suffix}.png'
        plt.savefig(output_filename, dpi=300, bbox_inches='tight')
        print(f"成功生成图像并保存至: {output_filename}")
        plt.close()
    def validate_model_consistency(self, statistics_list, sample_names):
        valid_results = []
        valid_names = []
        for i, (stats, name) in enumerate(zip(statistics_list, sample_names)):
            if stats and stats['valid_count'] > 0 and not np.isnan(stats['d_avg_um']):
                valid_results.append(stats)
                valid_names.append(name)
                print(f"{name}: d = {stats['d_avg_um']:.6f} ± {stats['d_std_um']:.6f} μm "
                      f"({stats['valid_count']}/{stats['total_pairs']} 有效对)")
        if len(valid_results) < 2:
            return None
        sample_means = np.array([r['d_avg_um'] for r in valid_results])
        sample_stds = np.array([r['d_std_um'] for r in valid_results])
        validation = {
            'valid_samples': len(valid_results),
            'sample_names': valid_names,
            'individual_means': sample_means,
            'individual_stds': sample_stds,
            'overall_mean': np.mean(sample_means),
            'between_sample_std': np.std(sample_means),
            'within_sample_std_avg': np.mean(sample_stds),
            'coefficient_of_variation': np.std(sample_means) / np.mean(sample_means) * 100,
            'relative_std_between': np.std(sample_means) / np.mean(sample_means) * 100,
            'relative_std_within': np.mean(sample_stds) / np.mean(sample_means) * 100
        }
        if validation['coefficient_of_variation'] < 5:
            consistency_level = "优秀"
        elif validation['coefficient_of_variation'] < 10:
            consistency_level = "良好"
        elif validation['coefficient_of_variation'] < 20:
            consistency_level = "一般"
        else:
            consistency_level = "较差"
        validation['consistency_level'] = consistency_level
        return validation
    def generate_comprehensive_report(self, all_results, validation_result=None):
        print(f"第一题程序 - 综合分析报告")
        print(f"一、数据处理概况")
        for i, (sample_name, data_info, peaks_info, statistics) in enumerate(all_results):
            print(f"{i+1}. {sample_name}:")
            if statistics and statistics['valid_count'] > 0:
                print(f"厚度结果: {statistics['d_avg_um']:.6f} ± {statistics['d_std_um']:.6f} μm")
        print(f"二、模型验证结果")
        if validation_result:
            print(f"总体平均厚度: {validation_result['overall_mean']:.6f} μm")
            print(f"模型一致性: {validation_result['consistency_level']}")
    def auto_optimize_and_calculate(self, peaks_info, theta0_deg=0, min_k_error_threshold=0.2, show_plot=False, **kwargs):
        epsilon_L_range = np.linspace(6.5, 6.9, 3)
        N_carrier_cm3_range = np.logspace(15, 17, 5)
        best_result = {'valid_count': -1, 'std_dev': float('inf'), 'params': None, 'stats': None}
        param_grid = list(itertools.product(epsilon_L_range, N_carrier_cm3_range))
        for epsilon_L, N_cm3 in param_grid:
            override_params = {
                'EPSILON_L': epsilon_L,
                'N_CARRIER_CM3': N_cm3,
                'N_CARRIER_SI': N_cm3 * 1e6
            }
            stats = self.calculate_thickness_from_all_peak_pairs(
                peaks_info,
                theta0_deg=theta0_deg,
                min_k_error_threshold=min_k_error_threshold,
                override_params=override_params,
                show_details=False,
                suppress_print=True
            )
            valid_count = stats.get('valid_count', 0)
            std_dev = stats.get('d_std_um', float('inf'))
            if valid_count > best_result['valid_count']:
                best_result = {'valid_count': valid_count, 'std_dev': std_dev, 'params': override_params, 'stats': stats}
            elif valid_count == best_result['valid_count'] and valid_count > 0:
                if std_dev < best_result['std_dev']:
                    best_result = {'valid_count': valid_count, 'std_dev': std_dev, 'params': override_params, 'stats': stats}
        best_params = best_result['params']
        if show_plot and best_result['stats'] is not None and best_result['valid_count'] > 0:
            final_results = self.calculate_thickness_from_all_peak_pairs(
                peaks_info,
                theta0_deg=theta0_deg,
                min_k_error_threshold=min_k_error_threshold,
                override_params=best_params,
                show_details=True, 
                suppress_print=True
            )
            title_suffix = kwargs.get('title_suffix', '')
        return best_params, best_result['stats']
    def optimize_incidence_angle_for_sample(self, peaks_info, min_k_error_threshold=0.2, show_details=False):
        theta_candidates = [0, 2, 5, 8, 10, 12, 15]
        best = {
            'score': -1,
            'valid_count': 0,
            'theta0_deg': 0,
            'stats': None,
        }
        for theta in theta_candidates:
            stats = self.calculate_thickness_from_all_peak_pairs(
                peaks_info,
                theta0_deg=theta,
                min_k_error_threshold=min_k_error_threshold,
                show_details=False
            )
            valid_count = stats.get('valid_count', 0)
            if valid_count > 0:
                d_rel = [p['d_relative_error'] for p in stats['valid_pairs']]
                k_err = [p['k_error'] for p in stats['valid_pairs']]
                d_rel_mean = float(np.mean(d_rel)) if d_rel else 1e9
                k_err_mean = float(np.mean(k_err)) if k_err else 1e9
                score = valid_count * 100 - d_rel_mean - 50 * k_err_mean
            else:
                score = -1
            if score > best['score']:
                best.update({
                    'score': score,
                    'valid_count': valid_count,
                    'theta0_deg': theta,
                    'stats': stats,
                })
        return best