import numpy as np
import time
import os
import scipy.io
from scipy.optimize import curve_fit
from scipy.fftpack import fft, fftfreq
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from matplotlib.cm import ScalarMappable
from matplotlib import cm
import json
import h5py
import io
import ipywidgets as widgets
from IPython.display import display
from datetime import datetime
from collections import defaultdict
from lmfit import Model
import math
from scipy.special import laguerre
from uncertainties import ufloat
from scipy.signal import argrelextrema
from scipy.signal import periodogram

import csv
from pathlib import Path
import ast
from typing import Any, Dict, List, Tuple, Union, Optional

    
debug_mode = False

global channel_return

class DataDoc():
    """
    this is the definition of the DataDoc class, which is used to store and manage the data of 
    a single experiment sequence count results.
    """
    def __init__(self, disk_path=r'D:\Data', title="", path=None, debug_mode=False):
        if path==None:
            self.path_prefix = disk_path + time.strftime("\%Y\%Y%m\%Y%m%d\\")
        else:
            self.path_prefix = path
        #if not os.path.exists(self.path_prefix) and not debug_mode:
        path_prefix = self.path_prefix
        if not os.path.exists(self.path_prefix):
            print("Data will be save into ",self.path_prefix)
            os.makedirs(self.path_prefix)
        
        self.title            = title
        self.xlabel           = "Duration"
        self.ylabel           = "Counts"
        self.SystemParameters = {"CONTENT":"EMPTY"}
        self.IonParameters    = {"CONTENT":"EMPTY"}
        self.Sequence         = {"CONTENT":"EMPTY"}
        self.RawData          = {"Duration":np.array([],dtype=np.float32),"Counts":np.array([],dtype=np.float32)}
        self.DataFileName     = ""

    def new_data_file(self):
        file_name = self.title + time.strftime("-%Y%m%d%H%M%S")
        return file_name

    def jsonify(self, data):
        data_return = list(data)
        return data_return

    def reset_raw_data(self, xtype="Duration"):
        self.xlabel = xtype
        self.RawData          = {self.xlabel: np.array([],dtype=np.float32), self.ylabel: []}
        self.DataFileName     = self.new_data_file()
        print(self.path_prefix+self.DataFileName+".hdf5")

    def append_raw_data(self, x, y):
        self.RawData[self.xlabel] = np.append(self.RawData[self.xlabel],x)
        self.RawData[self.ylabel] = self.RawData[self.ylabel] + [y]

    def generate_json(self):
        file_name = self.DataFileName
        json_name = file_name + ".json"
        json_path = self.path_prefix + json_name
        hdf5_name = file_name + ".hdf5"
        hdf5_path = self.path_prefix + hdf5_name

        self.DataFileName = hdf5_path

        data_to_save = self.__dict__.copy()
        del data_to_save["RawData"]
        with open(json_path, 'w', encoding='utf-8') as f:
            json.dump(data_to_save, f, ensure_ascii=False, indent=4)

        with h5py.File(hdf5_path, 'w') as f:
            f[self.xlabel] = self.RawData[self.xlabel]
            f[self.ylabel] = np.array(self.RawData[self.ylabel],dtype=np.float32)
        return file_name

now = datetime.now()
folder_path = f'D:/Data/{now.year}/{now.year}{now.month:02d}/{now.year}{now.month:02d}{now.day:02d}/'
def data_viewer(folder_path=folder_path):
    file_names = [file for file in os.listdir(folder_path) if file.endswith('.hdf5')]

    dropdown = widgets.Dropdown(
        options=file_names,
        description='Files:',
    )

    prev_button = widgets.Button(
        description='Previous Image',
    )

    next_button = widgets.Button(
        description='Next Image',
    )

    image_widget = widgets.Image()
    file_path_widget = widgets.Text(description='File Path:', disabled=True)

    display(dropdown, prev_button, next_button, image_widget, file_path_widget)

    def update_plot(change):
        selected_file = folder_path + dropdown.value
        buf = io.BytesIO()
        average_plot(selected_file)
        plt.savefig(buf, format='png')
        buf.seek(0)
        image_widget.value = buf.read()
        plt.close()
        file_path_widget.value = selected_file

    def on_prev_button_click(button):
        current_index = file_names.index(dropdown.value)
        if current_index > 0:
            dropdown.value = file_names[current_index - 1]

    def on_next_button_click(button):
        current_index = file_names.index(dropdown.value)
        if current_index < len(file_names) - 1:
            dropdown.value = file_names[current_index + 1]

    # Observe the dropdown value changes and automatically update the plot
    dropdown.observe(update_plot, names='value')

    # Update the plot with the first file when the function is called
    update_plot(None)

    prev_button.on_click(on_prev_button_click)
    next_button.on_click(on_next_button_click)
    
class scanParameterType():
    time = 1
    frequency = 2

def raw_count(path):
    """
    直接读取文件并返回原始数据，返回两个列表和一个标签，第一个列表是存储扫描参数（如时间频率相位）的点，
    第二个列表是每一个扫描点对应的重复多次采样的实验光子计数数据，标签是x轴的标签，一般是扫描参数的英文名，
     
    Parameters
    ----------
    path : str
        实验数据文件的路径
    Returns
    -------
    list_time : list
        扫描参数的点,结构是一维数组，shape=[scanlistlenghth]
    list_values : list
        每一个扫描点对应的重复多次采样的实验数据，结构是三维数组，shape=[scanlistlenghth,repeat,ionnumber]
    xlabel : str
        x轴的标签
    Raises
    ------
    ValueError
        如果文件中不包含至少两个键，则说明它没有扫描参数或实验数据。
    """
    # Open the file
    with h5py.File(path, 'r') as mat:
        keys = list(mat.keys())
        if len(keys) < 2:
            raise ValueError(f"Expected at least 2 keys in the HDF5 file, got {len(keys)}.")
        
        ylabel = 'Counts'
        xlabel = next(s for s in keys if s != 'Counts')
        list_time = [mat[xlabel][i] for i in range(len(mat[xlabel]))]
        list_values = [mat[ylabel][i] for i in range(len(mat[ylabel]))]

    return list_time, list_values, xlabel

def cycle_raw_counts(list_scanparameter,list_rawcountsvalues, atol=1e-8):
    """
    Identify the period M and number of cycles of a cyclic parameter scan
    and merge the raw-count data so that all repetitions of the same
    parameter value are concatenated.

    Parameters
    ----------
    list_scanparameter : (L,) array-like
        1-D sequence of scan parameters that repeats every M points
        (total length L = cycles * M).
    list_rawcountsvalues : sequence of ndarray, length L
        list_rawcountsvalues[i] has shape (repeat, ionnumber) and contains
        `repeat` single-shot count arrays for list_scanparameter[i].
    atol : float, optional
        Absolute tolerance used when comparing floating-point parameters.

    Returns
    -------
    unique_params : (M,) ndarray
        One copy of the parameter values in the order they appear in a cycle.
    merged : ndarray, shape (M, cycles*repeat, ionnumber)
        Raw-count data concatenated along the shot axis so that
        ``merged[k]`` contains *all* shots recorded at
        ``unique_params[k]``.

    Raises
    ------
    ValueError
        If a consistent cycle length M cannot be identified.
    """
    # --- convert inputs to arrays ------------------------------------------------
    params = np.asarray(list_scanparameter)
    raw = np.stack(list_rawcountsvalues, axis=0)   # (L, repeat, ionnumber)
    L, repeat, ionnumber = raw.shape

    # --- find the cycle length M -------------------------------------------------
    first_val = params[0]
    # indices where the first value appears again (ignore index 0 itself)
    candidates = np.where(np.isclose(params, first_val, atol=atol))[0][1:]

    if candidates.size == 0:      # never repeats → no cycling detected
        M = L
    else:
        for cand in candidates:   # smallest candidate that tiles the whole array
            if L % cand:          # must divide total length exactly
                continue
            pattern = params[:cand]
            if np.allclose(params, np.tile(pattern, L // cand), atol=atol):
                M = cand
                break
        else:
            raise ValueError("Unable to determine a consistent cycle length M.")

    cycles = L // M

    # --- reshape & merge ---------------------------------------------------------
    # raw                 : (cycles*M, repeat, ionnumber)
    # raw.reshape         : (cycles, M, repeat, ionnumber)
    # .transpose          : (M, cycles, repeat, ionnumber)
    # .reshape(final)     : (M, cycles*repeat, ionnumber)
    merged = (
        raw
        .reshape(cycles, M, repeat, ionnumber)
        .transpose(1, 0, 2, 3)
        .reshape(M, cycles * repeat, ionnumber)
    )

    unique_params = params[:M]

    return unique_params, merged





def verify(total_channels, show_channels):
    """
    该函数在2d台子上没使用过，意义不明
    Validates the input parameters for total_channels and show_channels.
    Returns the validated total_channels and show_channels.
    """
    # Validate total_channels
    assert isinstance(total_channels, int), "total_channels must be an integer."
    assert total_channels > 0, "total_channels must be greater than 0."
    
    # Validate show_channels
    if isinstance(show_channels, int):
        show_channels = [show_channels]
    elif show_channels is None:
        show_channels = list(range(total_channels))
    assert isinstance(show_channels, list), "show_channels must be a list."
    
    return total_channels, show_channels

def average(filename, ionnumber,threshold=None):
    """
    对一次扫描实验的原始数据进行平均处理，返回平均后的每个离子的平均光子计数数据。

    Parameters
    ----------
    filename : str
        实验数据文件的路径
    ionnumber : int
        离子的数量
    threshold : float, optional
        亮暗分界阈值, by default None

    Returns
    -------
    list
        结构为两元素列表，第一个元素为扫描参数的点的列表，一维列表长度为扫描点的数量，
        第二个元素为每一个扫描点对应的重复多次采样的实验平均光子计数数据。形状为[ionnumber,scanlistlenghth]
    """
    # Process the list_values to calculate the average counts, give the ion bright and dark count
    list_time, list_values, xlabel = raw_count(filename)
    repeat=len(list_values[0])
    sorted_values=[[] for _ in range(ionnumber)]
    for i in range(len(list_time)):
        counts = np.array([sum( list_values[i])/repeat])
        counts= counts[0]
        for j in range(ionnumber):
            sorted_values[j].append(counts[j])
    return [list_time, sorted_values]

def average_plot(filename, threshold=None, show_channels=[0]):
    """
    该函数在2d台子上没使用过，意义不明

    Parameters
    ----------
    filename : _type_
        _description_
    threshold : _type_, optional
        _description_, by default None
    show_channels : list, optional
        _description_, by default [0]
    """
    total_channels=4
    total_channels, show_channels = verify(total_channels, show_channels)
    list_time, list_values = average(filename, threshold=threshold)
    list_values = np.transpose(list_values)
    ch_number = len(show_channels)
    plt.figure(figsize=(8,8))
    for i in range(ch_number):
        ch_idx = show_channels[i]
        plt.subplot(ch_number,1,i+1)
        plt.plot(list_time, list_values[ch_idx])
        plt.title('This is channel {}'.format(ch_idx))
        plt.xlabel('time '+r'$(\mu s)$')
        plt.ylabel('average count')
        plt.tight_layout()
    return

def pre_process(list_matrices, convert_matrix, threshold):
    """
    该函数在2d台子上没使用过，意义不明

    Parameters
    ----------
    list_matrices : _type_
        _description_
    convert_matrix : _type_
        _description_
    threshold : _type_
        _description_

    Returns
    -------
    _type_
        _description_
    """
    results = []
    total_channels = 4
    repeat = len(list_matrices[0])//total_channels
    for index, raw_matrix in enumerate(list_matrices):
        new_matrix = raw_matrix.reshape(repeat, total_channels)
        new_matrix = np.transpose(new_matrix)
        new_matrix = convert_matrix @ new_matrix
        if threshold != None:
            for j in range(new_matrix.shape[0]):
                new_matrix[j] = np.where(new_matrix[j]>threshold[j], 1, 0)
        avrg = np.sum(new_matrix, axis = 1)/repeat
        results.append(avrg)

    return results

def average_fit(file_name, convert_matrix = None, threshold = None, para_type = scanParameterType.time):
    """
    该函数在2d台子上没使用过，意义不明

    Parameters
    ----------
    file_name : _type_
        _description_
    convert_matrix : _type_, optional
        _description_, by default None
    threshold : _type_, optional
        _description_, by default None
    para_type : _type_, optional
        _description_, by default scanParameterType.time

    Returns
    -------
    _type_
        _description_
    """
    assert(type(convert_matrix) != type(None))

    list_para, list_matrices, xlabel = raw_count(file_name)
    avrg_data = pre_process(list_matrices, convert_matrix)
    results = avrg_data
    if threshold != None:
        results = []
        for avrg in avrg_data:
            avrg = (avrg > threshold).astype(int)
            results.append(avrg)
    return results

def histogram_plot(bright_data, dark_data):
    #TODO 与notebook中histogram_plot函数功能重复，需要合并
    max_index = max(max(bright_data),max(dark_data))
    hist_bright = np.histogram(bright_data, bins=np.arange(max_index), density=True)
    hist_dark = np.histogram(dark_data, bins=np.arange(max_index), density=True)
    plt.figure()
    plt.bar(hist_bright[1][:-1]+0.5,hist_bright[0])
    plt.bar(hist_dark[1][:-1]+0.5,hist_dark[0])
    plt.show() 
    fidelity_list = [(sum(hist_bright[0][i:])+sum(hist_dark[0][:i]))/2 for i in range(max_index)]
    threshold = max(range(len(fidelity_list)), key=fidelity_list.__getitem__)
    return [threshold + 1, fidelity_list[threshold]]

def correlation(file_name, convert_matrix = None, threshold = None, para_type = scanParameterType.time):
    pass


def cosine_func(x,a0,a1,a2,a3):
    return a0 * np.sin(a1*x+a2) + a3

def gaussian_func(x,a,mu,sigma):
    return a*np.exp(-(x-mu)**2/(2*sigma**2))

def biased_gaussian_func(x,a,mu,sigma,b):
    return a*np.exp(-(x-mu)**2/(2*sigma**2))+b

def gaussian_func2(x, a, mu, sigma_reverse):
    return a*np.exp(-(x-mu)**2 * sigma_reverse**2)

def thermal_single_func(x, p0, gamma, omega):
    return 1/2*p0*(1 - np.exp(-gamma*x)*np.cos(omega*x))

def combinatorial_number(n,m):
    return math.factorial(n) // (math.factorial(m)*math.factorial(n-m))

###################################### Generated by GPT-4 ######################################
def set_plot_style():
    """
    设置画图风格
    """
    # Set the default style
    plt.style.use('default')
    # Set the background color of the axes area
    plt.rcParams['axes.facecolor'] = '#ffffff'
    plt.rcParams['axes.edgecolor'] = '#000000'
    plt.rcParams['axes.labelcolor'] = '#000000'
    plt.rcParams['xtick.color'] = '#000000'
    plt.rcParams['ytick.color'] = '#000000'
    plt.rcParams['figure.facecolor'] = '#ffffff'
    plt.rcParams['grid.color'] = '#d3d3d3'
    plt.rcParams['grid.alpha'] = 0.5
    plt.rcParams['grid.linestyle'] = '--'

def pre_process_data(file_name,ionnumber=1):
    """
    预处理文件中的实验数据，和raw count函数功能类似，但是返回的数据结构不同，且已经对每个离子的光子计数对重复次数做了平均
    返回的数据适用于单独画出每一个离子的亮暗随着扫描参数的变化曲线。

    Parameters
    ----------
    file_name : str
        实验数据文件的路径
    ionnumber : int, optional
        离子的数量, by default 1

    Returns
    -------
    x_data : list
        扫描参数的点的列表，shape=[ionnumber,scanlistlenghth]
    y_data : list
        每一个扫描点对应的重复多次采样的实验平均光子计数数据。形状为[ionnumber,scanlistlenghth]
    """
    data = average(file_name,ionnumber=ionnumber)
    x_data = [data[0] for _ in range(ionnumber) ]
    y_data= [data[1][i] for i in range(ionnumber)]
    return x_data, y_data

def lorentzian(x, a, x0, gamma):
    # Define the Lorentzian function
    return a * gamma**2 / ((x - x0)**2 + gamma**2)

def truncated_lorentzian(x, a, x0, gamma):
    # Define the truncated Lorentzian function
    y = a * gamma**2 / ((x - x0)**2 + gamma**2)
    y[x > x0] = 0
    return y

def fit_plot_lorentzian(file_name):
    """
    用洛伦兹线型函数拟合实验数据，并把拟合曲线和原始数据画在一张图上。
    数据预处理方式是使用pre_process_data函数，也就是对每个离子的光子计数对重复次数做了平均。
    Parameters
    ----------
    file_name : str
        实验数据文件的路径

    Returns
    -------
    dict
        拟合得到的各项参数，包括：
            - amp: 振幅
            - f0: 中心频率
            - gamma: 全宽
    """
    x_data, y_data = pre_process_data(file_name)
    # Find the index of the maximum y_data value
    max_index = np.argmax(y_data)

    # Set the initial values for the fitting parameters
    initial_values = [y_data[max_index], x_data[max_index], 1]

    # Fit the function to the data
    popt, _ = curve_fit(lorentzian, x_data, y_data, p0=initial_values)
    
    # Print the fitted parameters
    print("Fitted parameters:")
    print("Amplitude a =", popt[0])
    print("Center frequency x0 =", popt[1])
    print("Full width at half maximum gamma =", popt[2])
    
    # Set the plot style
    set_plot_style()
    
    # Create the figure and plot the data and fitted curve
    plt.figure()
    plt.scatter(x_data, y_data, label="Original data", s=10, color='#f39c12')
    plt.plot(x_data, lorentzian(x_data, *popt), label="lorentzian fit", color="#3498db", linewidth=2)
    plt.legend()
    plt.show()
    return {"amp": popt[0], "f0": popt[1], "gamma": popt[2]}

### spin dependent force related
def alpha(t, delta, omega_sb, phi_m):
    return (omega_sb/2/delta)*np.exp(-1j*phi_m)*(1-np.exp(-1j*delta*t))

def P_up(t, nbar, d_nbar, delta, omega_sb):
    phi_m = 0
    delta = delta*2*np.pi
    omega_sb = omega_sb*2*np.pi
    return 0.5 - 0.5 * np.exp(-0.5*d_nbar*t*np.abs(4*omega_sb/2/delta)**2 - (nbar+0.5)*np.abs(2*alpha(t, delta, omega_sb, phi_m))**2)

  #这是对time scan进行模拟的。
def spin_dependent_fit(file_name, nbar0=0.05, d_nbar0=0.15, delta0=10, omega_sb0=10, raw_threshold=2):
    """
    这个函数用于对单个离子的spin dependent force的Rabi进行拟合，并画图 

    Parameters
    ----------
    file_name : str
        实验数据文件的路径
    nbar0 : float, optional
        _description_, by default 0.05
    d_nbar0 : float, optional
        _description_, by default 0.15
    delta0 : int, optional
        _description_, by default 10
    omega_sb0 : int, optional
        _description_, by default 10
    raw_threshold : int, optional
        _description_, by default 2
    """
    x_data, y_data = pre_process_data(file_name)
    x_data = np.array(x_data[0])/1E3
    y_data = np.array(y_data[0])
    
    #P_up(t, nbar, d_nbar, delta, omega_sb, phi_m)
    bounds = ([0, 0.1, 0, 1], [0.1, 1, 100, 20])
    estimated_frequency = estimate_frequency(x_data, y_data)
    delta0 = estimated_frequency
    print(estimated_frequency)
    initial_guess = [0.05,0.15, delta0, 10]
    popt, pcov = curve_fit(P_up, x_data, y_data, p0 = initial_guess, bounds= bounds)
    print("Fitted parameters:")
    print("nbar =", popt[0])
    print("d_nbar =", popt[1])
    print("delta =", popt[2])
    print("omega_sb =", popt[3])
    
    #set_plot_style()
    #plt.style.use('seaborn-whitegrid')
    plt.figure()
    plt.scatter(x_data, y_data, label="Original data", s=10, color='blue')
    plt.plot(x_data, P_up(x_data, *popt), label="Fit", color="red", linewidth=2)
    plt.legend()
    plt.show()
    
  #这是对扫频函数进行x坐标重制的
def PlotSingleForceFreScan(file_name, f_center,duration, nbar,d_nbar,sb_pitime):
    """
    这个函数只用于单离子，所以不对传过来的数组进行遍历
  
    Parameters
    ----------
    file_name : _type_
        _description_
    f_center : _type_
        _description_
    duration : _type_
        _description_
    nbar : _type_
        _description_
    d_nbar : _type_
        _description_
    sb_pitime : _type_
        _description_
    """
    x_data, y_data=pre_process_data(file_name)
    f = h5py.File(file_name, 'r')
    f_list =(np.array(x_data[0])-f_center)*1E3
    omega_sb = 1/(2*sb_pitime*1E-3)
    #keys = list(f.keys())
    plt.figure()
    #数据的曲线
    plt.plot(f_list , np.array(y_data[0]), label='exp_data')
    #理论模拟的图片
    plt.plot(f_list ,P_up(duration/1E3, nbar, d_nbar/1E3, f_list, omega_sb),color='r', ls='--', label='Simulation')
    # plt.xlabel(keys[1]+'/kHz')
    plt.xlabel(r'Detuning $\delta / 2\pi$ [kHz]')
    plt.show()
    


def doppler_fit(file_name):
    """
    用截断洛伦兹线型函数拟合多普勒扫频数据，并把拟合曲线和原始数据画在一张图上。

    Parameters
    ----------
    file_name : str
        实验数据文件的路径

    Returns
    -------
    dict
        拟合得到的各项参数，包括：
            - amp: 振幅
            - f0: 中心频率
            - gamma: 全宽
    """
    # Load the data from a file and fit the truncated Lorentzian function
    x_data, y_data = pre_process_data(file_name)
    # Find the index of the maximum y_data value
    max_index = np.argmax(y_data)

    # Set the initial values for the fitting parameters
    initial_values = [y_data[max_index], x_data[max_index], 1]

    # Fit the function to the data
    popt, _ = curve_fit(truncated_lorentzian, x_data, y_data, p0=initial_values)
    
    # Print the fitted parameters
    print("Fitted parameters:")
    print("Amplitude a =", popt[0])
    print("Center frequency x0 =", popt[1])
    print("Full width at half maximum gamma =", popt[2])
    
    # Set the plot style
    set_plot_style()
    
    # Create the figure and plot the data and fitted curve
    plt.figure()
    plt.scatter(x_data, y_data, label="Original data", s=10, color='#f39c12')
    plt.plot(x_data, truncated_lorentzian(x_data, *popt), label="lorentzian fit", color="#3498db", linewidth=2)
    plt.legend()
    plt.show()
    return {"amp": popt[0], "f0": popt[1], "gamma": popt[2]}

def rabi_oscillation_decay(x, a, omega, offset, tau):
    return -a * np.exp(-x / tau) * np.cos(2*np.pi*omega * x) + offset

def rabi_oscillation_nodecay(x, a, omega, offset):
    return -a* np.cos(2*np.pi*omega * x) + offset

def estimate_frequency(x_data, y_data):
    """
    估算一组时间扫描出来的实验数据的周期函数的频率，输入的x_data和y_data应该是pre_process_data函数返回的数据

    Parameters
    ----------
    x_data : list
        扫描参数的点的列表，shape=[ionnumber,scanlistlenghth]
    y_data : list
        每一个扫描点对应的重复多次采样的实验平均光子计数数据。形状为[ionnumber,scanlistlenghth]

    Returns
    -------
    float
        估算出的频率
    """
    y_fft = fft(y_data - np.mean(y_data))
    freq = fftfreq(len(x_data), x_data[1] - x_data[0])
    max_index = np.argmax(np.abs(y_fft[:len(y_fft) // 2]))
    return np.abs(freq[max_index])



def ramsey(t, delta, tau):
    """Full model: y = ½[1 + e^(−t/τ) cos(δ t)]."""
    return 0.5 * (1.0 + np.exp(-t / tau) * np.cos(delta * t))

def ramsey_no_decay(t, delta):
    """Limit τ → ∞."""
    return 0.5 * (1.0 + np.cos(delta * t))

def guess_delta(t, y):
    """Return an angular-frequency guess (rad s⁻¹) from the dominant FFT peak."""
    y_detr = y - np.mean(y)            # remove DC offset (~0.5)
    fs     = 1.0 / np.mean(np.diff(t)) # sample rate
    f, Pxx = periodogram(y_detr, fs=fs, scaling='spectrum')
    f_peak = f[np.argmax(Pxx[1:]) + 1] # ignore DC component
    return 2 * np.pi * f_peak


def fit_singleramsey(tdata, ydata, max_rel_err=1.0, max_tau_factor=10.0):
    """
    Returns delta, tau.  If tau is ill-determined, tau = np.inf.
    """
    tdata = np.asarray(tdata, dtype=float)
    ydata = np.asarray(ydata, dtype=float)

    # Initial guesses
    delta0 = guess_delta(tdata, ydata)
    tau0   = 0.5 * (tdata[-1] - tdata[0])      # half of trace length

    try:
        popt, pcov = curve_fit(
            ramsey, tdata, ydata,
            p0=[delta0, tau0],
            bounds=([0.0, 0.0], [np.inf, np.inf]),
            maxfev=10000
        )
        d, tau = popt
        perr   = np.sqrt(np.diag(pcov))

        tau_rel_err  = perr[1] / tau if tau > 0 else np.inf
        tau_too_big  = tau > max_tau_factor * tdata.max()

        if tau_rel_err > max_rel_err or tau_too_big:
            # τ is unreliable → refit with τ → ∞ model
            d, = curve_fit(ramsey_no_decay, tdata, ydata, p0=[d])[0]
            tau = np.inf

    except RuntimeError:               # Full model failed – fall back directly
        d = curve_fit(ramsey_no_decay, tdata, ydata, p0=[delta0])[0]
        tau = np.inf

    return d, tau

def ramsey_fit(file_name,ionnumber):
    x_data_list, y_data_list = pre_process_data(file_name,ionnumber=ionnumber)
    # Estimate the frequency using FFT
    delta_list=[]
    plt.figure()
    for i in range(ionnumber):
        x_data=np.array(x_data_list[i])
        y_data=np.array(y_data_list[i])
        
        delta,tau=fit_singleramsey(x_data, y_data)
        # Print the fitted parameters
        print("the",i+1," th ion Fitted parameters:")
        print("detuning 2pi*delta =2pi*",delta/(2*np.pi))
        print("Pi-time 0.5/omega =",np.pi/delta)
        if tau != np.inf:
            print("coherence time tau =", tau)
        
        # Set the plot style
        set_plot_style()

        # Create the figure and plot the data and fitted curve
        
        plt.scatter(x_data, y_data, label=f"{i+1}th ion Original data", s=10)
        if tau != np.inf:
            plt.plot(x_data, ramsey(x_data, delta, tau), label=f"{i+1}th ion Fit", linewidth=2)
        else:
            plt.plot(x_data, ramsey_no_decay(x_data, delta), label=f"{i+1}th ion Fit", linewidth=2)
        delta_list.append(delta)
        
    plt.legend()
    plt.show()
    print("all ion average Pi-time 0.5/omega =", np.average(np.pi/np.array(delta_list)))
    return delta_list


def greater_equal(x, y):
    return x >= y

def peak_decay(x_data, y_data):
    """
    通过找出带衰减的三角函数的各个极大值拟合出该函数的衰减包络的衰减参数，输入的x_data和y_data应该是pre_process_data函数返回的数据

    Parameters
    ----------
    x_data : list
        扫描参数的点的列表，shape=[ionnumber,scanlistlenghth]
    y_data : list
        每一个扫描点对应的重复多次采样的实验平均光子计数数据。形状为[ionnumber,scanlistlenghth]

    Returns
    -------
    float
        估算出的衰减参数tau
    """
    # Find the local maxima (peaks) of the data
    peak_indices = argrelextrema(y_data,comparator=greater_equal)
    x_peaks = x_data[peak_indices]
    y_peaks = y_data[peak_indices]
    # Fit an exponential decay to the peak amplitudes to estimate tau
    popt_decay, _ = curve_fit(lambda x, a, tau: a * np.exp(-x / tau), x_peaks, y_peaks, p0=[np.ptp(y_data), x_data[-1] / 2])
    return popt_decay[1]

def rabi_fitnodecay(file_name,ionnumber):
    """
    这个函数用于对多个离子的假设没有衰减的Rabi进行拟合，并画图，输入的file_name为实验数据文件的路径，ionnumber为离子的数量

    Parameters
    ----------
    file_name : str
        实验数据文件的路径
    ionnumber : int
        离子的数量

    Returns
    -------
    list
        popt_list是每一个离子拟合得到的参数列表的列表，结构是[ionnumber]，其内部元素结构是[3]，3为拟合得到的各项参数，包括：
            - a: 振幅
            - omega: 频率
            - offset: 偏移量
    """
    # Set the initial values for the fitting parameters
    # for multi ion, xdata and ydata will be a list of length ion number and data
    x_data_list, y_data_list = pre_process_data(file_name,ionnumber=ionnumber)
    # Estimate the frequency using FFT
    popt_list=[]
    pcov_list=[]
    plt.figure()
    for i in range(ionnumber):
        x_data=np.array(x_data_list[i])
        y_data=np.array(y_data_list[i])
        estimated_frequency = estimate_frequency(x_data, y_data)
        # Estimate the decay time constant tau using peak_decay function

        # Set the initial guess for the fit parameters
        initial_guess = [
            np.ptp(y_data)/2,
            estimated_frequency,
            np.mean(y_data),
        ]

        # Fit the rabi_oscillation_decay function to the data using curve_fit
        popt, pcov = curve_fit(rabi_oscillation_nodecay, x_data, y_data, p0=initial_guess)

        # Print the fitted parameters
        print("the",i+1," th ion Fitted parameters:")
        print("Contrast 2a =", 2*popt[0])
        print("Pi-time 0.5/omega =", 0.5/popt[1])
        
        # Set the plot style
        set_plot_style()

        # Create the figure and plot the data and fitted curve
        
        plt.scatter(x_data, y_data, label=f"{i+1}th ion Original data", s=10)
        plt.plot(x_data, rabi_oscillation_nodecay(x_data, *popt), label=f"{i+1}th ion Fit", linewidth=2)
        popt_list.append(popt)
        pcov_list.append(pcov)
        
    plt.legend()
    plt.show()
    print("all ion average Pi-time 0.5/omega =", np.average(0.5/np.array([ popt_list[i][1] for i in range(ionnumber)])))
    return popt_list

def RedandBluesidebandPitimecomparision(file_name1, file_name2, ionnumber):
    """
    这个函数用于比较红蓝边带的Rabi频率，在实验中，如果我们想要进行MS gate的测量，要确保红蓝边带的Rabi频率是相同的。
    所以我们会使用微波 Ramesey测量来检测红蓝边带对zeeman能级造成的AC stark shift，这个AC stark shift与红蓝边带的Rabi频率有关，
    会使得Zeenman能级偏离原来的共振频率，然后可以通过Microwave Ramesey测量来检测到。
    这个函数的输入参数为红色辐射带和蓝色辐射带的Microwave Ramesey测量数据文件名，以及离子的数量。

    Parameters
    ----------
    file_name1 : str
        红边带微波 Ramesey测量数据文件名
    file_name2 : str
        蓝边带微波 Ramesey测量数据文件名
    ionnumber : int
        离子的数量
        
    Returns
    -------
    list
        pi_ratio是每一个离子的红蓝边带的AC stark shift ramsey Pi-time的比值的列表，结构是[ionnumber]
        
    """
    
    # Process the first file
    x_data_list1, y_data_list1 = pre_process_data(file_name1, ionnumber=ionnumber)
    # Process the second file
    x_data_list2, y_data_list2 = pre_process_data(file_name2, ionnumber=ionnumber)
    pi_ratio = []  # List to store pi_ratio

    
    plt.figure()
    for i in range(ionnumber):
        x_data1=np.array(x_data_list1[i])
        y_data1=np.array(y_data_list1[i])
        x_data2=np.array(x_data_list2[i])
        y_data2=np.array(y_data_list2[i])
        # Estimate the frequency using FFT
        estimated_frequency1 = estimate_frequency(x_data1, y_data1)
        estimated_frequency2 = estimate_frequency(x_data2, y_data2)
        # Estimate the decay time constant tau using peak_decay function
        tau_estimate1 = peak_decay(x_data1, y_data1)
        tau_estimate2 = peak_decay(x_data2, y_data2)
        # Set the initial guess for the fit parameters
        initial_guess1 = [
            np.ptp(y_data1)/2,
            estimated_frequency1,
            np.mean(y_data1),
        ]
        # Fit the rabi_oscillation_decay function to the data using curve_fit
        popt1, pcov1 = curve_fit(rabi_oscillation_nodecay, x_data1, y_data1, p0=initial_guess1)
        # Set the initial guess for the fit parameters
        initial_guess2 = [
            np.ptp(y_data2)/2,
            estimated_frequency2,
            np.mean(y_data2),
        ]
        # Fit the rabi_oscillation_decay function to the data using curve_fit
        popt2, pcov2 = curve_fit(rabi_oscillation_nodecay, x_data2, y_data2, p0=initial_guess2)
        # Print the fitted parameters
        plt.scatter(x_data1, y_data1, label=f"{i+1}th ion Original red sideband data", s=10)
        plt.plot(x_data1, rabi_oscillation_nodecay(x_data1, *popt1), label=f"{i+1}th ion Fit red sideband", linewidth=2)
        plt.scatter(x_data2, y_data2, label=f"{i+1}th ion Original blue sideband data", s=10)
        plt.plot(x_data2, rabi_oscillation_nodecay(x_data2, *popt2), label=f"{i+1}th ion Fit blue sideband", linewidth=2)
        pi_ratio.append((0.5/popt1[1])/(0.5/popt2[1]))
        
        print("the",i+1," th ion Fitted parameters:")
        print("red sideband AC stark shift ramsey Pi-time 0.5/omega =", 0.5/popt1[1])
        print("blue sideband AC stark shift ramsey Pi-time 0.5/omega =", 0.5/popt2[1])
        print("their ratio is", (0.5/popt1[1])/(0.5/popt2[1]))
    plt.legend()
    plt.show()
    return pi_ratio

def rabi_fit(file_name,ionnumber):
    """
    这个函数用于对多个离子的假设有衰减的Rabi进行拟合，并画图，输入的file_name为实验数据文件的路径，ionnumber为离子的数量

    Parameters
    ----------
    file_name : str
        实验数据文件的路径
    ionnumber : int
        离子的数量

    Returns
    -------
    list
        popt_list是每一个离子拟合得到的参数列表的列表，结构是[ionnumber]，其内部元素结构是[4]，4为拟合得到的各项参数，包括：
            - a: 振幅
            - omega: 频率
            - offset: 偏移量
            - tau: 衰减时间常数
    """
    # Set the initial values for the fitting parameters
    # for multi ion, xdata and ydata will be a list of length ion number and data
    x_data_list, y_data_list = pre_process_data(file_name,ionnumber=ionnumber)
    # Estimate the frequency using FFT
    popt_list=[]
    pcov_list=[]
    plt.figure()
    for i in range(ionnumber):
        x_data=np.array(x_data_list[i])
        y_data=np.array(y_data_list[i])
        estimated_frequency = estimate_frequency(x_data, y_data)
        # Estimate the decay time constant tau using peak_decay function
        tau_estimate = peak_decay(x_data, y_data)

        # Set the initial guess for the fit parameters
        initial_guess = [
            np.ptp(y_data)/2,
            estimated_frequency,
            np.mean(y_data),
            tau_estimate,
        ]

        # Fit the rabi_oscillation_decay function to the data using curve_fit
        popt, pcov = curve_fit(rabi_oscillation_decay, x_data, y_data, p0=initial_guess)

        # Print the fitted parameters
        print("the",i+1," th ion Fitted parameters:")
        print("Contrast 2a =", 2*popt[0])
        print("Pi-time 0.5/omega =", 0.5/popt[1])
        print("Decay time constant tau =", popt[3])
        
        # Set the plot style
        set_plot_style()

        # Create the figure and plot the data and fitted curve
        
        plt.scatter(x_data, y_data, label=f"{i+1}th ion Original data", s=10)
        plt.plot(x_data, rabi_oscillation_decay(x_data, *popt), label=f"{i+1}th ion Fit", linewidth=2)
        popt_list.append(popt)
        pcov_list.append(pcov)
        
    plt.legend()
    plt.show()
    print("all ion average Pi-time 0.5/omega =", np.average(0.5/np.array([ popt_list[i][1] for i in range(ionnumber)])))
    return popt_list

def rabi_pi_time_fit(file_name1, file_name2, ionnumber):
    """

    对比多离子的carrier rabi速度和micromotion 边带速度的,file1是carrier曲线,file2是Micromotion sideband曲线

    Parameters
    ----------
    file_name1 : str
        carrier rabi震荡实验数据的文件路径
    file_name2 : str
        Micromotion sideband震荡实验数据的文件路径
    ionnumber : int
        离子的数量

    Returns
    -------
    list
        
    """
    # Process the first file
    x_data_list1, y_data_list1 = pre_process_data(file_name1, ionnumber=ionnumber)
    # Process the second file
    x_data_list2, y_data_list2 = pre_process_data(file_name2, ionnumber=ionnumber)

    popt_list = []  # Store fitting parameters (including Pi-time)
    pcov_list = []  # Store covariance matrices (if needed)

    pi_times1 = []  # List to store Pi-times from the first file
    pi_times2 = []  # List to store Pi-times from the second file

    # Process both files (file1 and file2)
    for file_idx, (x_data_list, y_data_list, pi_times) in enumerate([(x_data_list1, y_data_list1, pi_times1), 
                                                                     (x_data_list2, y_data_list2, pi_times2)]):
        for i in range(ionnumber):
            x_data = np.array(x_data_list[i])
            y_data = np.array(y_data_list[i])
            
            # Estimate the frequency using FFT
            estimated_frequency = estimate_frequency(x_data, y_data)
            
            # Estimate the decay time constant tau using peak_decay function
            tau_estimate = peak_decay(x_data, y_data)
            
            # Set the initial guess for the fit parameters
            initial_guess = [
                np.ptp(y_data) / 2,
                estimated_frequency,
                np.mean(y_data),
                tau_estimate,
            ]
            
            # Fit the rabi_oscillation_decay function to the data using curve_fit
            popt, pcov = curve_fit(rabi_oscillation_decay, x_data, y_data, p0=initial_guess)

            # Calculate Pi-time (0.5/omega) and store it in the appropriate list
            pi_time = 0.5 / popt[1]
            pi_times.append(pi_time)

            # Store the fitting parameters and covariance matrix for later use
            popt_list.append(popt)
            pcov_list.append(pcov)

            # # Output the fitted Pi-time for each ion
            # print(f"File {file_idx + 1}, Ion {i + 1} Fitted Pi-time 0.5/omega = {pi_time}")

    # Calculate the Pi-time ratio for each ion (file1 Pi-time / file2 Pi-time)
    pi_time_ratios = [pi_times1[i] / pi_times2[i] for i in range(ionnumber)]

    # Output the Pi-time ratios
    print("Pi-time ratios (File1 / File2) for each ion:")
    for i in range(ionnumber):
        print(f"Ion {i + 1} Pi-time ratio = {pi_time_ratios[i]}")


    # Calculate and output the average Pi-time ratio
    average_pi_time_ratio = np.average(pi_time_ratios)
    print("Average Pi-time ratio (File1 / File2) for all ions =", average_pi_time_ratio)

    return popt_list, pi_times1, pi_times2, pi_time_ratios  # Return the fitting parameters and Pi-times

def detect_peaks(x_data, y_data, threshold, prominence, resolution):
    """
    基于scipy.signal.find_peaks函数探测一组数据中的峰值，并根据给定的阈值、突出度和分辨率进行过滤。只能处理单条曲线
    Parameters
    ----------
    x_data : list or array
       输入的扫描参数点 
    y_data : list or array
        扫描参数得到的实验光子计数
    threshold : float
        阈值
    prominence : float
        突出度
    resolution : float
        分辨率

    Returns
    -------
    list or array
        返回一个列表，包含所有符合条件的峰值索引
    """
    peaks, properties = find_peaks(y_data, height=threshold, prominence=prominence)
    filtered_peaks = []
    
    for peak in peaks:
        if not filtered_peaks or (x_data[peak] - x_data[filtered_peaks[-1]]) >= resolution:
            filtered_peaks.append(peak)
        elif y_data[peak] > y_data[filtered_peaks[-1]]:
            filtered_peaks[-1] = peak

    return filtered_peaks

def plot_spectrum(file_name, threshold, prominence,ionnumber, resolution=0.1):
    """
    绘制扫频时每个离子亮暗的曲线，并根据给定的阈值、突出度和分辨率进行过滤，并返回所有符合条件的峰值频率（不是索引）。

    Parameters
    ----------
    file_name : str
        实验数据文件的路径
    threshold : float
        阈值
    prominence : float
        突出度
    ionnumber : int
        离子的数量
    resolution : float, optional
        分辨率, by default 0.1

    Returns
    -------
    list
        返回一个元素为列表的列表，包含所有符合条件的光子计数峰值对应的频率（不是索引）。
        结构是[ionnumber]，其内部元素结构是[n]，n为符合条件的峰值数量。
    """
    x_data_list, y_data_list = pre_process_data(file_name,ionnumber=ionnumber)
    plt.figure()
    peaks_list=[[] for _ in range(ionnumber)]
    for i in range(ionnumber):
        x_data, y_data=x_data_list[i], y_data_list[i]
        peaks = detect_peaks(x_data, y_data, threshold, prominence, resolution)
        plt.plot(x_data, y_data)
        plt.xlabel(f'{i+1}th ion Frequency')
        plt.ylabel(f'{i+1}th ion Amplitude')
        for peak in peaks:
            plt.plot(x_data[peak], y_data[peak], marker='o', markersize=5, color='red')
            plt.text(x_data[peak], y_data[peak], f'({x_data[peak]:.3f}, {y_data[peak]:.2f})', fontsize=8)
        for peak in peaks:
            peaks_list[i].append(x_data[peak])
    plt.show()

    return  peaks_list#np.array([x_data[peak] for peak in peaks])


###################################### END Generated by GPT-4 ######################################
def Laguerre(n,x):
    sum = 0
    for k in range(n+1):
        sum += (-1)**k*combinatorial_number(n+1,n-k)*(x**k / math.factorial(k))
    return sum

def thermal_func(x, *args):
    '''
    pn, gamma, omega are all lists
    Laguerre function:
        L_n^a(x) = \sum_{k=0}^{n} (-1)^k C_{n+a}^{n-k} x^k/k!
    没有在2d台子上使用过
    '''
    eta = 0.098 #eta is a pre-given constant

    n = len(args) // 2

    assert (len(args) == 2*n+1)

    pn = np.array(args[0:n])
    gamma = np.array(args[n:2*n])
    omega = args[-1]


    omega_l = np.array([omega]+[0 for i in range(n-1)])
    for i in range(1,n):
        omega_l[i] = omega * np.exp(-eta*eta/2) * eta * np.sqrt(1/(i+1)) * Laguerre(i,eta*eta)
    sum_p = 0
    for i in range(n):
        sum_p += 1/2*pn[i]*(1 - np.exp(-gamma[i]*x) * np.cos(omega_l[i]*x))
    return sum_p

def automatic_find_initial_omega(xdata, ydata):
    pass

def check_fitting_quality(ion, xdata, ydata, y_fit):
    pass

def gaussian_fit(fileName, convert_matrix = None, threshold = None, para_type = scanParameterType.frequency, plot_figure = False):
    """
    #旧写法，无法在2d台子上使用

    Parameters
    ----------
    fileName : _type_
        _description_
    convert_matrix : _type_, optional
        _description_, by default None
    threshold : _type_, optional
        _description_, by default None
    para_type : _type_, optional
        _description_, by default scanParameterType.frequency
    plot_figure : bool, optional
        _description_, by default False

    Returns
    -------
    _type_
        _description_
    """
    list_frequency, list_matrices, xlabel = raw_count(fileName)
    avrg_data_all = pre_process(list_matrices, convert_matrix, threshold)

    ion_number = convert_matrix.shape[0]
    fit_paras = []

    for ion_index in range(ion_number):
        
        avrg_single_ion = [avrg[ion_index] for avrg in avrg_data_all]
        xdata = np.array(list_frequency)
        ydata = np.array(avrg_single_ion)

        #mean,std=scipy.stats.norm.fit(ydata)
        a0 = max(ydata)
        
        if a0 == 0:
            fit_paras.append([0,0,1])
            continue
        a1 = xdata[np.argmax(ydata)]
        #a2 = np.std(ydata)
        a2 = np.std(ydata) * (xdata[1] - xdata[0]) / a0
        p0 = [a0, a1, a2]

        #a2 = sum(y * (x - a1)**2)
        #sigma_reverse = 1/(a2 * np.sqrt(2)
        #p0 = [a0, a1, sigma_reverse]
        #p_l = [a0/2, xdata[0], a2/2]
        #p_h = [a0*2, xdata[-1], a2*2]

        #print(p0)
        popt, pcov = curve_fit(gaussian_func, xdata, ydata, p0=p0)
        #popt, pcov = curve_fit(gaussian_func2, xdata, ydata, p0=p0)
        fit_paras.append(popt)
        #print(popt)

        fit_data = gaussian_func(xdata, *popt)
        check_fitting_quality(ion_index, xdata, ydata, fit_data)
        #print('fit_paras', popt)

    if plot_figure:
        plt.figure(figsize=(8,8))
        for ion_index in range(ion_number):
            if ion_index != 2:
                continue ##目前只需要第3个通道
            avrg_single_ion = [avrg[ion_index] for avrg in avrg_data_all]
            x_fit = np.linspace(min(list_frequency),max(list_frequency), 100)
            avrg_fit = [gaussian_func(x, *fit_paras[ion_index]) for x in x_fit]

            plt.subplot(ion_number,1,ion_index+1)
            plt.plot(list_frequency, avrg_single_ion)

            xdata = np.array(list_frequency)
            ydata = np.array(avrg_single_ion)
            #a0 = max(ydata)
            #a1 = xdata[np.argmax(ydata)]
            #a2 = sum(y * (x - a1)**2)
            #ydata2 = gaussian_func(xdata, a0, a1, a2)
            #plt.plot(xdata, ydata2)

            plt.plot(x_fit, avrg_fit)
            #print(fit_paras[ion_index])
            plt.title(('This is channel {}, '+r'$\mu $'+'= {:.4f}, '+r'$\sigma = {:.4f}$').format(ion_index, fit_paras[ion_index][1], fit_paras[ion_index][2]))
            #plt.title(('This is ion {}, '+r'$\mu $'+'= {:.4f}, '+r'$\sigma = {:.4f}$').format(ion_index, fit_paras[ion_index][1], np.sqrt(2)/fit_paras[ion_index][2]))
            plt.xlabel('frequency '+' (MHz)')
            plt.ylabel('average count')

        plt.tight_layout()

    return fit_paras


#gaussian fit
def plot_spectrum_useGassianfit(file_name, threshold, prominence,ionnumber, resolution=0.1,average_flag=True):
    """
    
    这个函数用于绘制给定文件中离子频率响应谱线，并使用高斯拟合法得到峰值频率。对于多离子情况，我们可以用所有离子的平均曲线来得到平均峰值频率。
    本函数默认处理单峰扫频曲线，主要用于分析扫描红蓝边带测量运动模式频率时改变光强扫频找出峰值然后分析AC stark shift
    Parameters
    ----------
    file_name : str
        频率数据文件的路径
    threshold : float
        阈值
    prominence : float
        突出度
    ionnumber : int
        离子数目
    resolution : float, optional
         频率分辨率，默认值为0.1
    average_flag : bool, optional
        对于多离子情况，是否对所有离子的曲线进行平均，默认值为True
    Returns
    -------
    list or float
        对于多离子情况，if average_flag=True，返回平均峰值对应的频率，否则返回一个列表，包含每个离子的拟合峰值对应频率。
    """
    x_data_list, y_data_list = pre_process_data(file_name,ionnumber=ionnumber)
    plt.figure()
    peaks_list=[[] for _ in range(ionnumber)]
    mu_list=[]
    if average_flag:
        x_data,y_data=np.sum(x_data_list,axis=0)/ionnumber,np.sum(y_data_list,axis=0)/ionnumber
        peaks = detect_peaks(x_data, y_data, threshold, prominence, resolution)
        plt.plot(x_data, y_data)
        popt, _ = curve_fit(gaussian_func, x_data,  y_data,
                            p0=[1,np.mean(x_data),np.std(x_data) ])
        # Get the fitted Gaussian parameters
        amplitude,mu, sigma  = popt
        mu_list.append(mu)
        print(f"Fitted Gaussian parameters: mu = {mu}, sigma = { sigma}, amplitude = {amplitude}")
        # Step 4: Plot the Gaussian fit
        plt.plot(x_data, gaussian_func(x_data, *popt))
        plt.xlabel(f'Frequency')
        plt.ylabel(f'Amplitude')
        for peak in peaks:
            plt.plot(x_data[peak], y_data[peak], marker='o', markersize=5, color='red')
            plt.text(x_data[peak], y_data[peak], f'({x_data[peak]:.3f}, {y_data[peak]:.2f})', fontsize=8)
        plt.show()
        return  mu
    else:
        for i in range(ionnumber):
            x_data, y_data=x_data_list[i], y_data_list[i]
            peaks = detect_peaks(x_data, y_data, threshold, prominence, resolution)
            plt.plot(x_data, y_data)
            popt, _ = curve_fit(gaussian_func, x_data,  y_data,
                                p0=[1,np.mean(x_data),np.std(x_data) ])
            # Get the fitted Gaussian parameters
            amplitude,mu, sigma  = popt
            mu_list.append(mu)
            print(f"Fitted Gaussian parameters: mu = {mu}, sigma = { sigma}, amplitude = {amplitude}")
            # Step 4: Plot the Gaussian fit
            plt.plot(x_data, gaussian_func(x_data, *popt))
            plt.xlabel(f'{i+1}th ion Frequency')
            plt.ylabel(f'{i+1}th ion Amplitude')
            for peak in peaks:
                plt.plot(x_data[peak], y_data[peak], marker='o', markersize=5, color='red')
                plt.text(x_data[peak], y_data[peak], f'({x_data[peak]:.3f}, {y_data[peak]:.2f})', fontsize=8)
            for peak in peaks:
                peaks_list[i].append(x_data[peak])
        plt.show()
        return  mu_list
def plot_spectrum_usebiasedGassianfit(file_name, threshold, prominence,ionnumber, resolution=0.1,average_flag=True):
    """
    
    这个函数用于绘制给定文件中离子频率响应谱线，并使用带偏置的高斯函数的高斯拟合法得到峰值频率。对于多离子情况，我们可以用所有离子的平均曲线来得到平均峰值频率。
    本函数默认处理单峰扫频曲线，主要用于分析扫描红蓝边带测量运动模式频率时改变光强扫频找出峰值然后分析AC stark shift
    Parameters
    ----------
    file_name : str
        频率数据文件的路径
    threshold : float
        阈值
    prominence : float
        突出度
    ionnumber : int
        离子数目
    resolution : float, optional
         频率分辨率，默认值为0.1
    average_flag : bool, optional
        对于多离子情况，是否对所有离子的曲线进行平均，默认值为True
    Returns
    -------
    list or float
        对于多离子情况，if average_flag=True，返回平均峰值对应的频率，否则返回一个列表，包含每个离子的拟合峰值对应频率。
    """
    x_data_list, y_data_list = pre_process_data(file_name,ionnumber=ionnumber)
    plt.figure()
    peaks_list=[[] for _ in range(ionnumber)]
    mu_list=[]
    if average_flag:
        x_data,y_data=np.sum(x_data_list,axis=0)/ionnumber,np.sum(y_data_list,axis=0)/ionnumber
        peaks = detect_peaks(x_data, y_data, threshold, prominence, resolution)
        plt.plot(x_data, y_data)
        popt, _ = curve_fit(biased_gaussian_func, x_data,  y_data,
                            p0=[-1,np.mean(x_data),np.std(x_data) ,1])
        # Get the fitted Gaussian parameters
        amplitude,mu, sigma,b  = popt
        mu_list.append(mu)
        print(f"Fitted Gaussian parameters: mu = {mu}, sigma = { sigma}, amplitude = {amplitude}")
        # Step 4: Plot the Gaussian fit
        plt.plot(x_data, biased_gaussian_func(x_data, *popt))
        plt.xlabel(f'Frequency')
        plt.ylabel(f'Amplitude')
        for peak in peaks:
            plt.plot(x_data[peak], y_data[peak], marker='o', markersize=5, color='red')
            plt.text(x_data[peak], y_data[peak], f'({x_data[peak]:.3f}, {y_data[peak]:.2f})', fontsize=8)
        plt.show()
        return  mu
    else:
        for i in range(ionnumber):
            x_data, y_data=x_data_list[i], y_data_list[i]
            peaks = detect_peaks(x_data, y_data, threshold, prominence, resolution)
            plt.plot(x_data, y_data)
            popt, _ = curve_fit(biased_gaussian_func, x_data,  y_data,
                                p0=[-1,np.mean(x_data),np.std(x_data),1 ])
            # Get the fitted Gaussian parameters
            amplitude,mu, sigma ,b = popt
            mu_list.append(mu)
            print(f"Fitted Gaussian parameters: mu = {mu}, sigma = { sigma}, amplitude = {amplitude}")
            # Step 4: Plot the Gaussian fit
            plt.plot(x_data, biased_gaussian_func(x_data, *popt))
            plt.xlabel(f'{i+1}th ion Frequency')
            plt.ylabel(f'{i+1}th ion Amplitude')
            for peak in peaks:
                plt.plot(x_data[peak], y_data[peak], marker='o', markersize=5, color='red')
                plt.text(x_data[peak], y_data[peak], f'({x_data[peak]:.3f}, {y_data[peak]:.2f})', fontsize=8)
            for peak in peaks:
                peaks_list[i].append(x_data[peak])
        plt.show()
        return  mu_list

def calculatesidebandpopulationbyGaussianfit(filename,correction_flag=False,ionfidelity_list=None,stdcacluation_flag=False,correction_method=1,cyclefileflag=False,fit_flag=True):
    """
    这个函数用于计算一个扫频数据中边带跃迁的布居数并估计声子数，测heating一般是单离子数据
    Parameters
    ----------
    filename : str
        scan扫描数据文件的路径
    time_coefficient : list
        时间系数的列表，长度为len(list_scanparameter)
    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None

    Returns
    -------
    float
        拟合出的总自旋大小
    float
        实验测得的总自旋大小的标准差
    """
    list_scanparameter, list_rawcountsvalues, scanparametername = raw_count(path=filename)
    assert len(list_rawcountsvalues) > 0
    

    if cyclefileflag:
        list_scanparameter,list_rawcountsvalues=cycle_raw_counts(list_scanparameter,list_rawcountsvalues)
        
    repeat, ionnumber = list_rawcountsvalues[0].shape
    time_coefficient=np.array(list_scanparameter)
    if correction_flag:
        assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
        Populationresult = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target='ion',correction_method=correction_method)
    else:
        Populationresult = dataprocess(list_rawcountsvalues, target='ion')


    if stdcacluation_flag:
        stdS=np.zeros(len(list_rawcountsvalues))
        for i, result in enumerate( list_rawcountsvalues):
            stdS[i]=bootstrap_std_singledataset(dataset=result, function=datasetToSingleionpopulation,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)

    
    # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
    averageS=Populationresult[0]
    
    if fit_flag:
        if stdcacluation_flag:
            S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=biased_gaussian_func, p0=[1,np.mean(time_coefficient),np.std(time_coefficient) ,0])
        else:
            params, params_covariance = curve_fit(f=biased_gaussian_func, xdata=time_coefficient, ydata=averageS,p0=[1,np.mean(time_coefficient),np.std(time_coefficient) ,0])
        A, w, phi,b= params
   
    plt.figure(figsize=(10, 6))
    plt.scatter(list_scanparameter, averageS, label='exp Data')
    if fit_flag:
        plt.plot(list_scanparameter, biased_gaussian_func(time_coefficient, A, w, phi,b), label='Fit')
    if stdcacluation_flag:
        plt.errorbar(list_scanparameter, averageS, yerr=stdS,  fmt='none', color='red', label='exp Data with errorbar', capsize=4)
    plt.title(r'$P_{SB}$')
    plt.xlabel(r'freqency (MHz)')
    plt.ylabel('Population')
    plt.show()
    if fit_flag:
        print('fit_amplitude P', A)
    if fit_flag:
        if stdcacluation_flag:
            return np.abs(A),sigma_S
        else:
            return np.abs(A)
    else:
        if stdcacluation_flag:
            return np.max(averageS),stdS[np.argmax(averageS)]
        else:
            return np.max(averageS)


def thermal_state_fit(fileName, pt0, nth0, eta=0.03, plot=True):
    """
    # 2d台子上没用过

    Parameters
    ----------
    fileName : _type_
        _description_
    pt0 : _type_
        _description_
    nth0 : _type_
        _description_
    eta : float, optional
        _description_, by default 0.03
    plot : bool, optional
        _description_, by default True

    Returns
    -------
    _type_
        _description_
    """
    rawdata = average(fileName)
    rawdata = np.array([rawdata[0], [i[0] for i in rawdata[1]]])
    dat = np.transpose(rawdata)
    dat[:, 0] = dat[:, 0] - np.min(dat[:, 0])
    maxN = np.round(-np.log(0.01)/np.log(1 + 1 / nth0)).astype(int)
    print(f"maxN: {maxN}")  # print maxN for debugging

    if np.mean(dat[:3, 1]) < np.mean(dat[:, 1]):
        s = -1
    else:
        s = 1

    def func(t, A, B, nth, pt, lmbd, alpha):
        terms = [nth ** i / (1 + nth) ** (i + 1) * np.exp(-lmbd * (i + 1) ** 0.7) * np.cos(laguerre(i)(eta ** 2) / np.sqrt(i + 1) * np.pi/pt * t) for i in range(maxN)]
        return (A * s) / 2 * sum(terms) * np.exp(-alpha*t) + B

    model = Model(func)
    params = model.make_params(A=1, B=0.5, nth=nth0, pt=pt0, lmbd=0.1/np.max(dat[:, 0]), alpha = 0)
   # params['pt'].vary = False  # This will fix the value of pt at pt0
    params['nth'].min = 0
    params['A'].min = 0.5
    params['A'].max = 1.1
    params['lmbd'].max = 1/max(dat[:,0])
    params['lmbd'].min = 0
    params['alpha'].max = 0.004
    fit = model.fit(dat[:, 1], params, t=dat[:, 0])
    fit_params = fit.params
    R2 = 1 - fit.residual.var() / np.var(dat[:, 1])
    conf_interval_ufloat = {param: ufloat(fit_params[param].value, fit_params[param].stderr)
                            for param in fit_params if fit_params[param].stderr is not None}
    plt.figure()
    if plot:
        plt.scatter(dat[:, 0], dat[:, 1], color='blue')
        plt.plot(dat[:, 0], fit.best_fit, color='red')
        plt.show()
    print("nth: ", fit.params['nth'].value)
    print("pi time: ", fit.params['pt'].value)
    
from scipy.optimize import minimize

def detection_error_croection_Plot(ionfidelity_list,filename,target='state',compare_flag=False,correction_method=1):
    """
    这个函数用于画出修正探测错误后的态的布居数或者离子的亮暗度随着扫描参数的变化。可以选择是否与修正之前的图作比较。
    <=9 小规模，使用路尧thesis上的极大似然法优化估计真实的态布居数,对应correction_method=1
    <=14 中等规模，用修正矩阵的逆 乘上 态的布居数 然后归一化,对应correction_method=2
    >=15 大规模， 无法写出态的布居数分布，计算压缩系数等参量需要重写,对应correction_method=3

    Parameters
    ----------
    ionfidelity_list : list
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision]
    filename : str
        scan数据文件的路径
    target : str, optional
        画图的目标，'state'或者'ion'，默认值为'state'
    compare_flag : bool, optional
        是否与修正之前的曲线作比较，如果比较则把修正前修正后画到同一张图中，默认值为False

    Raises
    ------
    ValueError
        修正矩阵不可逆，是奇异的，请检查输入的ionfidelity_list
    ValueError
        优化问题没收敛，请检查输入的ionfidelity_list
    """

    assert target in {'state','ion'}
    list_scanparameter, list_rawcountsvalues, scanparametername=raw_count(path=filename)
    assert len(list_rawcountsvalues)>0
    repeat, ionnumber = list_rawcountsvalues[0].shape
    if ionnumber>=15:
        print("The number of ions is too large, state populartion is changed to ion brightness/darkness.")
        target='ion'

    num_states = 2 ** ionnumber
    correctionM_list=[]
    
    for i in range(ionnumber):
        A = np.array([[ionfidelity_list[i][0], 1 - ionfidelity_list[i][1]],[1 -  ionfidelity_list[i][0],  ionfidelity_list[i][1]]])
        correctionM_list.append(A)
        
    if target=='state':
        multicolors = plt.cm.viridis(np.linspace(0, 1, 2**ionnumber))
        correctioMpre = correctionM_list[0]  # Start with the first matrix
        for matrix in correctionM_list[1:]:  # Iterate over the remaining matrices
            correctioMpre = np.kron(correctioMpre, matrix)
        try:
            correctionM = np.linalg.inv(correctioMpre)
        except np.linalg.LinAlgError:
            raise ValueError("Matrix is singular and cannot be inverted.")
        
        if compare_flag:
            result_beforecorrect=[[] for _ in range(2**ionnumber)]
        result_aftercorrect=[[] for _ in range(2**ionnumber)]
        
        for raw_counts in list_rawcountsvalues:
            state_counts = np.zeros(num_states, dtype=int)
            # Iterate over each image and calculate the state of the ions
            for k in range(repeat):
            # Convert the brightness/darkness states into an integer (binary number)
                state = 0
                for j in range(ionnumber):
                    state |= int(raw_counts[k, j]) << (ionnumber-j-1) # Shift the bit and OR it
                state_counts[state] += 1
            # Calculate probabilities by dividing the count by the number of repetitions
            probabilities = np.array(state_counts / repeat,dtype='float')
            correctedprobabilities_initalguess=correctionM.dot(probabilities)
            Q_init = correctedprobabilities_initalguess/np.sum(correctedprobabilities_initalguess)
            if correction_method==1:
                # Constraint: sum of elements in Q is 1
                constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
                # Bounds:Q should be non-negative (e.g., Q >= 0)
                bounds = [(0, 1) for _ in range(len(probabilities))]  #
                # Solve the optimization problem
                result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctedprobabilities_initalguess), Q_init, constraints=constraints, bounds=bounds)
                if result_correctedprobabilities.success:
                    correctedprobabilities=result_correctedprobabilities.x
                else:
                    raise ValueError("Optimization did not converge.")
            elif correction_method==2:
                correctedprobabilities= Q_init
            else:
                print("correction_method should be 1 or 2 when correcting state populartion.")
            for i in range(2**ionnumber):
                result_aftercorrect[i].append(correctedprobabilities[i])
                if compare_flag:
                    result_beforecorrect[i].append(probabilities [i])
                    
                    
        plt.figure(figsize=(10, 6))
        plt.xlabel("duration")
        plt.ylabel("state populartion")
        plt.title("timescanresult"+filename)
        for i in range(2**ionnumber):
            # Plot the line
            plt.plot(list_scanparameter, result_aftercorrect[i], label=format(i, f'0{ionnumber}b')+" state",color=multicolors[i])
            # Plot the scatter
            plt.scatter(list_scanparameter,result_aftercorrect[i],color=multicolors[i])
            if compare_flag:
                plt.plot(list_scanparameter, result_beforecorrect[i], label=format(i, f'0{ionnumber}b')+" state before correction",color=multicolors[i])
        plt.legend()
        plt.show()
        
    elif target=='ion':
        if compare_flag:
            result_beforecorrect=[[] for _ in range(ionnumber)]
        result_aftercorrect=[[] for _ in range(ionnumber)]
        
        for raw_counts in list_rawcountsvalues:
            counts = np.array([sum(raw_counts)/repeat])
            counts=counts[0]
            for i in range(ionnumber):
                ionpopulartion=np.zeros(2)
                ionpopulartion[0]=1-counts[i]
                ionpopulartion[1]=counts[i]
                try:
                    correctionM = np.linalg.inv(correctionM_list[i])
                except np.linalg.LinAlgError:
                    raise ValueError("Matrix is singular and cannot be inverted.")
                correctedprobabilities_initalguess=correctionM.dot(ionpopulartion)
                Q_init = correctedprobabilities_initalguess/np.sum(correctedprobabilities_initalguess)
                constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
                # Bounds:Q should be non-negative (e.g., Q >= 0)
                bounds = [(0, 1) for _ in range(len(ionpopulartion))] 
                result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(ionpopulartion)), Q_init, constraints=constraints, bounds=bounds)
                if result_correctedprobabilities.success:
                    correctedprobabilities=result_correctedprobabilities.x
                else:
                    raise ValueError("Optimization did not converge.")
                result_aftercorrect[i].append(correctedprobabilities[1])
                if compare_flag:
                    result_beforecorrect[i].append(counts[i])
        
        plt.figure(figsize=(10, 6))
        plt.xlabel("duration")
        plt.ylabel("ion brightdark")
        plt.title("timescanresult"+filename)
        for i in range(ionnumber):
            # Plot the line
            plt.plot(list_scanparameter, result_aftercorrect[i], label=f'{i+1}th ion')
            # Plot the scatter
            plt.scatter(list_scanparameter,result_aftercorrect[i])
            if compare_flag:
                plt.plot(list_scanparameter, result_beforecorrect[i], label=f'{i+1}th ion before correction')
        plt.legend()
        plt.show()





def raw_counts_To_state_populartion(raw_counts,repeat,ionnumber):
    """
    这个函数用于将list_rawcountsvalues的单个元素的raw_counts（也就是扫描时一个扫描点对应的数据，形状为repeat*ionnumber）转化为
    这么多离子的量子态的布居数,由于经典计算机的限制，至多只能画出2^14个态的布居数
    需要说明的是这里的编码顺序的问题，以四离子态0000为例，0为暗，1为亮，4个离子的位置默认是从左到右，比如1010代表第一个离子亮，第二个离子暗
    第三个离子亮，第四个离子暗，1000代表第一个离子亮，其余所有的离子为暗
    Parameters
    ----------
    raw_counts : array_like
        list_rawcountsvalues的单个元素
    repeat : int
        单点测量的重复次数
    ionnumber : int
        离子数目

    Returns
    -------
    array_like
        态的布居数，形状为[2**ionnumber]
    """
    assert ionnumber<=14, "to have state populartion,ionnumber should be less than or equal to 14"
    num_states = 2 ** ionnumber
    #作为工具函数，把一个扫描点对应的数据(形状为repeat*ionnumber)转化为态的布居数
    state_counts = np.zeros(num_states, dtype=int)
    # Iterate over each image and calculate the state of the ions
    for k in range(repeat):
        # Convert the brightness/darkness states into an integer (binary number)
        state = 0
        for j in range(ionnumber):
            state |= int(raw_counts[k, j]) << (ionnumber - j - 1)  # Shift the bit and OR it
        state_counts[state] += 1
    # Calculate probabilities by dividing the count by the number of repetitions
    probabilities = np.array(state_counts / repeat, dtype='float')
    return probabilities

def raw_counts_To_ion_brightdarkcounts(raw_counts,repeat):
    """
    这个函数用于将list_rawcountsvalues的单个元素的raw_counts（也就是扫描时一个扫描点对应的数据，形状为repeat*ionnumber）
    转化为每个离子的在这么多次重复测量中的为亮的比例

    Parameters
    ----------
    raw_counts : array_like
        list_rawcountsvalues的单个元素
    repeat : int
        单点测量的重复次数
    Returns
    -------
    array_like
        每个离子的在这么多次重复测量中的为亮的比例，形状为[ionnumber]
    """
    return np.array([sum(raw_counts) / repeat])[0]

def singledataset_state_populartion_correction(statepopulation,ionnumber,ionfidelity_list,correction_method=1):
    
    correctionM_list = []

    for i in range(ionnumber):
        A = np.array([[ionfidelity_list[i][0], 1 - ionfidelity_list[i][1]],
                      [1 - ionfidelity_list[i][0], ionfidelity_list[i][1]]])
        correctionM_list.append(A)
    correctioMpre = correctionM_list[0]  # Start with the first matrix
    for matrix in correctionM_list[1:]:  # Iterate over the remaining matrices
        correctioMpre = np.kron(correctioMpre, matrix)
    try:
        correctionM = np.linalg.inv(correctioMpre)
    except np.linalg.LinAlgError:
        raise ValueError("Matrix is singular and cannot be inverted.")

    probabilities=statepopulation
    correctedprobabilities_initalguess = correctionM.dot(probabilities)
    Q_init = correctedprobabilities_initalguess / np.sum(correctedprobabilities_initalguess)

    if correction_method == 1:
        # Constraint: sum of elements in Q is 1 概率分布的物理要求
        constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
        # Bounds:Q should be non-negative (e.g., Q >= 0)
        bounds = [(0, 1) for _ in range(len(probabilities))]  #
        # Solve the optimization problem
        result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(probabilities)),
                                                    Q_init, constraints=constraints, bounds=bounds)
        if result_correctedprobabilities.success:
            correctedprobabilities = result_correctedprobabilities.x
        else:
            raise ValueError("Optimization did not converge.")
    elif correction_method == 2:
        correctedprobabilities= Q_init 
    else: 
        print("correction_method should be 1 or 2 when correcting state populartion.")
    return correctedprobabilities


def singledataset_ionbrightdarkcounts_correction(ionbrightdarkcounts,ionnumber,ionfidelity_list,correction_method=1):
    
    correctionM_list = []

    for i in range(ionnumber):
        A = np.array([[ionfidelity_list[i][0], 1 - ionfidelity_list[i][1]],
                      [1 - ionfidelity_list[i][0], ionfidelity_list[i][1]]])
        correctionM_list.append(A)
    ionbrightdarkcounts_aftercorrect=np.zeros_like(ionbrightdarkcounts)
    for i in range(ionnumber):
        ionpopulartion=np.zeros(2)
        ionpopulartion[0]=1-ionbrightdarkcounts[i]
        ionpopulartion[1]=ionbrightdarkcounts[i]
        try:
            correctionM = np.linalg.inv(correctionM_list[i])
        except np.linalg.LinAlgError:
            raise ValueError("Matrix is singular and cannot be inverted.")
        correctedprobabilities_initalguess=correctionM.dot(ionpopulartion)
        Q_init = correctedprobabilities_initalguess/np.sum(correctedprobabilities_initalguess)
        constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
        # Bounds:Q should be non-negative (e.g., Q >= 0)
        bounds = [(0, 1) for _ in range(len(ionpopulartion))] 
        result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(ionpopulartion)), Q_init, constraints=constraints, bounds=bounds)
        if result_correctedprobabilities.success:
            correctedprobabilities=result_correctedprobabilities.x
        else:
            raise ValueError("Optimization did not converge.")
        ionbrightdarkcounts_aftercorrect[i]=correctedprobabilities[1]
    return ionbrightdarkcounts_aftercorrect


def dataprocess(list_rawcountsvalues, target='state'):
    """
    这个函数直接处理raw_counts函数返回的list_rawcountsvalues，不做detection error croection。返回的是一个list，

    Parameters
    ----------
    list_rawcountsvalues : list
        raw_counts函数返回的list_rawcountsvalues,形状为[scanlistlenghth,repeat,ionnumber]
    target : str, optional
        转换为态的布居数还是离子的亮暗比例, by default 'state'，也就是态的布居数

    Returns
    -------
    list
        转换后的结果，如果是态的布居数形状为[2**ionnumber,scanlistlenghth]或[ionnumber,scanlistlenghth],
        方便直接和扫描参数的列表画图
    """
 
    assert target in {'state', 'ion'}
    assert len(list_rawcountsvalues) > 0
    repeat, ionnumber = list_rawcountsvalues[0].shape

    if ionnumber>=15:
        print("The number of ions is too large, state populartion is changed to ion brightness/darkness.")
        target='ion'
    if target == 'state':
        result = [[] for _ in range(2 ** ionnumber)]

        for raw_counts in list_rawcountsvalues:
            probabilities=raw_counts_To_state_populartion(raw_counts, repeat, ionnumber)
            for i in range(2 ** ionnumber):
                result[i].append(probabilities[i])
        return result

    elif target == 'ion':
        result = [[] for _ in range(ionnumber)]

        for raw_counts in list_rawcountsvalues:
            counts=raw_counts_To_ion_brightdarkcounts(raw_counts,repeat)
            for i in range(ionnumber):
                result[i].append(counts[i])
        return result

def dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target='state',correction_method=1):
    """
    这个函数处理raw_counts函数返回的list_rawcountsvalues，做detection error croection。返回的是一个list，

    Parameters
    ----------
    ionfidelity_list : list
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision],形状是[ionnumber,2]
    list_rawcountsvalues : list
        raw_counts函数返回的list_rawcountsvalues,形状为[scanlistlenghth,repeat,ionnumber]
    target : str, optional
        转换为态的布居数还是离子的亮暗比例, by default 'state'，也就是态的布居数
        
    Returns
    list
        转换后的结果，如果是态的布居数形状为[2**ionnumber,scanlistlenghth]或[ionnumber,scanlistlenghth],
        方便直接和扫描参数的列表画图

    Raises
    ------
    ValueError
        修正矩阵不可逆，是奇异的，请检查输入的ionfidelity_list
    ValueError
        优化问题没收敛，请检查输入的ionfidelity_list
    """
    # ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision]
    #作为工具函数，直接返回修正后的数据
    assert target in {'state', 'ion'}
    assert len(list_rawcountsvalues) > 0
    repeat, ionnumber = list_rawcountsvalues[0].shape
    if ionnumber>=15:
        print("The number of ions is too large, state populartion is changed to ion brightness/darkness.")
        target='ion'
    correctionM_list = []

    for i in range(ionnumber):
        A = np.array([[ionfidelity_list[i][0], 1 - ionfidelity_list[i][1]],
                      [1 - ionfidelity_list[i][0], ionfidelity_list[i][1]]])
        correctionM_list.append(A)

    if target == 'state':

        correctioMpre = correctionM_list[0]  # Start with the first matrix
        for matrix in correctionM_list[1:]:  # Iterate over the remaining matrices
            correctioMpre = np.kron(correctioMpre, matrix)
        try:
            correctionM = np.linalg.inv(correctioMpre)
        except np.linalg.LinAlgError:
            raise ValueError("Matrix is singular and cannot be inverted.")

        result_aftercorrect = [[] for _ in range(2 ** ionnumber)]

        for raw_counts in list_rawcountsvalues:
            probabilities=raw_counts_To_state_populartion(raw_counts, repeat, ionnumber)
            correctedprobabilities_initalguess = correctionM.dot(probabilities)
            Q_init = correctedprobabilities_initalguess / np.sum(correctedprobabilities_initalguess)
            if correction_method == 1:
                # Constraint: sum of elements in Q is 1 概率分布的物理要求
                constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
                # Bounds:Q should be non-negative (e.g., Q >= 0)
                bounds = [(0, 1) for _ in range(len(probabilities))]  #
                # Solve the optimization problem
                result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(probabilities)),
                                                        Q_init, constraints=constraints, bounds=bounds)
                if result_correctedprobabilities.success:
                    correctedprobabilities = result_correctedprobabilities.x
                else:
                    raise ValueError("Optimization did not converge.")
            elif correction_method == 2:
                correctedprobabilities= Q_init
            else:
                print("correction_method should be 1 or 2 when correcting state populartion.")
            for i in range(2 ** ionnumber):
                result_aftercorrect[i].append(correctedprobabilities[i])
        return result_aftercorrect

    elif target == 'ion':
        result_aftercorrect = [[] for _ in range(ionnumber)]

        for raw_counts in list_rawcountsvalues:
            counts=raw_counts_To_ion_brightdarkcounts(raw_counts,repeat)
            for i in range(ionnumber):
                ionpopulartion = np.zeros(2)
                ionpopulartion[0] = 1 - counts[i]
                ionpopulartion[1] = counts[i]
                try:
                    correctionM = np.linalg.inv(correctionM_list[i])
                except np.linalg.LinAlgError:
                    raise ValueError("Matrix is singular and cannot be inverted.")
                correctedprobabilities_initalguess = correctionM.dot(ionpopulartion)
                Q_init = correctedprobabilities_initalguess / np.sum(correctedprobabilities_initalguess)
                constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
                # Bounds:Q should be non-negative (e.g., Q >= 0)
                bounds = [(0,1) for _ in range(len(ionpopulartion))]
                result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(ionpopulartion)),
                                                         Q_init, constraints=constraints, bounds=bounds)
                if result_correctedprobabilities.success:
                    correctedprobabilities = result_correctedprobabilities.x
                else:
                    raise ValueError("Optimization did not converge.")
                result_aftercorrect[i].append(correctedprobabilities[1])
        return result_aftercorrect


def plotAnyScanwithHorizontalaxisreplaced(filename,newHorizontalaxis,addtionaltask='phaseReshape',newHorizontalname='phase/Pi',title='scanresult',target='state',correction_flag=False,ionfidelity_list=None,correction_method=1):
    """
    这个函数是用来画任何一个参数扫描scan的图，可以选择画态的布居数还是离子的亮暗比例，但是横坐标的值和标题从外部输入，
    这个函数的额外任务只能处理两离子实验的情况。phaseReshape的额外任务是画出phasescan的图，并且横坐标的值重置为是phase/Pi，方便看出与π的关系
    （1）：time_coefficient
    time scan里面，需要扫描看是多少倍的gate time。横坐标是gate time的系数。纵坐标是需要detection error correction、state correlation的图。
    然后再输出一个类似表格的，第一列是gate time 的系数，第二列是00+11的布居数(需要detection error correction, state correlation)。
    （2）：Amp_all
        Amp_scan里面，扫描红蓝边带的整体幅度，横坐标是amp_all的系数，纵坐标是需要detection error correction、state correlation的图。
    然后再输出一个类似表格的，第一列是amp_all 的系数，第二列是11的布居数(需要detection error correction, state correlation)。
    （3）：Amp_ratio
    Amp_scan里面，扫描红蓝边带的比例，横坐标是amp_ratio的系数，纵坐标是需要detection error correction、state correlation的图。
    然后再输出一个类似表格的，第一列是amp_ratio的系数，第二列是11的布居数(需要detection error correction, state correlation)。
    （4）：Fre_reverse
    Freq_scan里面，把awg生成的红蓝边带一起反向扫描，横坐标是fre_reverse的系数，纵坐标是需要detection error correction、state correlation的图。
    然后再输出一个类似表格的，第一列是fre_reverse 的系数，第二列是01+10的布居数(需要detection error correction, state correlation)。
    （5）：Spin_freq_scan
    Freq_scan里面，把awg生成的红蓝边带一起同向扫描，横坐标是spin_freq_scan的系数，纵坐标是需要detection error correction、state correlation的图。
    然后再输出一个图，横坐标是spin_freq_scan 的系数，纵坐标11+00-01-10的布居数(需要detection error correction, state correlation)，并且对它们进行sinx函数拟合（这个可以模仿涛哥）拟合函数的参数也需要print出来
    （6）：Parity_measure
        Phase_scan里面，扫描ms gate后的全局旋转操作的相位,横坐标是phase的大小，纵坐标是需要detection error correction、state correlation的图。
    然后再输出一个图，横坐标是spin_freq_scan 的系数，纵坐标11+00-01-10的布居数(需要detection error correction, state correlation)，并且对它们进行sinx函数拟合（这个可以模仿涛哥）拟合函数的参数也需要print出来。

    Parameters
    ----------
    filename : str
        scan扫描数据文件的路径
    newHorizontalaxis : list
        横坐标的新值，形状为[scanlistlenghth]
    addtionaltask : str, optional
        额外任务，目前只能处理'phaseReshape'，'time_coefficient','Amp_all','Amp_ratio','Fre_reverse','Spin_freq_scan','Parity_measure', by default 'phaseReshape'
    newHorizontalname : str, optional
        横坐标的名字, by default 'phase/Pi'
    title : str, optional
        标题, by default 'scanresult'
    target : str, optional
        转换为态的布居数还是离子的亮暗比例, by default 'state'
    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None
    """
    #这个函数是用来画任何一个scan的，但是横坐标是替换过的
    # ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision]
    assert target in {'state', 'ion'}
    list_scanparameter, list_rawcountsvalues, scanparametername = raw_count(path=filename)
    assert len(list_rawcountsvalues) > 0
    repeat, ionnumber = list_rawcountsvalues[0].shape
    if ionnumber >= 15:
        print("The number of ions is too large, state populartion is changed to ion brightness/darkness.")
        target = 'ion'
    if correction_flag:
        assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
        result = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target=target,correction_method=correction_method)
    else:
        result = dataprocess(list_rawcountsvalues, target=target)

    plotlength=len(result)

    plt.figure(figsize=(10, 6))
    plt.xlabel(newHorizontalname)
    if target == 'state':
        plt.ylabel("state populartion")
    else:
        plt.ylabel("ion brightdark")
    plt.title(title)
    for i in range(plotlength):
        # Plot the line
        if target == 'state':
            plotlabel = format(i, f'0{ionnumber}b') + " state"
        else:
            plotlabel = f'{i + 1}th ion'
        plt.plot(newHorizontalaxis, result[i], label=plotlabel)
        # Plot the scatter
        plt.scatter(newHorizontalaxis, result[i])
    plt.legend()
    plt.show()
    assert addtionaltask in {'phaseReshape','time_coefficient','Amp_all','Amp_ratio','Fre_reverse','Spin_freq_scan','Parity_measure'}
    if addtionaltask=='time_coefficient':
        if ionnumber==2:
            ydata=np.array(result[0])+np.array(result[-1])
            print(f"{'gate time ratio':<10}{'00+11 state populartion'}")
            for t, p in zip(newHorizontalaxis, ydata):
                print(f"{t:<10}{p}")
    elif addtionaltask=='Amp_all':
        ydata=np.array(result[-1])
        print(f"{'Amp_all':<10}{'11 state populartion'}")
        for a, p in zip(newHorizontalaxis, ydata):
            print(f"{a:<10}{p}")
    elif addtionaltask=='Amp_ratio':
        ydata=np.array(result[-1])
        print(f"{'Amp_ratio':<10}{'11 state populartion'}")
        for a, p in zip(newHorizontalaxis, ydata):
            print(f"{a:<10}{p}")
    elif addtionaltask=='Fre_reverse':
        ydata=np.array(result[1])+np.array(result[2])
        print(f"{'Fre_reverse':<10}{'01+10 state populartion'}")
        for f, p in zip(newHorizontalaxis, ydata):
            print(f"{f:<10}{p}")
    elif addtionaltask=='Spin_freq_scan':
        ydata = np.array(result[0])+np.array( result[-1])-np.array(result[1])-np.array(result[2])
        plt.figure(figsize=(10, 6))
        plt.scatter(newHorizontalaxis, ydata, label='real Data', color='red')
        plt.show()
        # 使用curve_fit进行拟合
        params, params_covariance = curve_fit(f=sin_with_bias, xdata=newHorizontalaxis,ydata= ydata, p0=[0.9, 3 * np.pi, 0, 0])
        # 提取拟合参数
        A, w, phi, b = params
        # 使用拟合参数生成拟合曲线
        y_fit = sin_with_bias(np.array(newHorizontalaxis), A, w, phi, b)
        # 绘图
        
        plt.plot(newHorizontalaxis, y_fit, label='Fit: A*sin(w*x + phi) + b', color='blue')
        plt.xlabel(newHorizontalname)
        plt.ylabel('00+11-01-10 state populartion')
        plt.legend()
        plt.show()
        print('A',A, 'w',w, 'phi',phi,'b', b)
    elif addtionaltask=='Parity_measure':
        ydata = np.array(result[0]) + np.array(result[-1]) - np.array(result[1]) - np.array(result[2])
        plt.figure(figsize=(10, 6))
        plt.scatter(newHorizontalaxis, ydata, label='real Data', color='red')
        plt.show()
        # 使用curve_fit进行拟合
        params, params_covariance = curve_fit(f=sin_with_bias, xdata=newHorizontalaxis, ydata=ydata, p0=[0.9, 3 * np.pi, 0, 0])
        # 提取拟合参数
        A, w, phi, b = params
        # 使用拟合参数生成拟合曲线
        y_fit = sin_with_bias(np.array(newHorizontalaxis), A, w, phi, b)
        # 绘图
        plt.figure(figsize=(10, 6))
        plt.scatter(newHorizontalaxis, ydata, label='real Data', color='red')
        plt.plot(newHorizontalaxis, y_fit, label='Fit: A*sin(w*x + phi) + b', color='blue')
        plt.xlabel(newHorizontalname)
        plt.ylabel('00+11-01-10 state populartion')
        plt.legend()
        plt.show()
        print('A', A, 'w', w, 'phi', phi, 'b', b)
    else:
        pass

def NionsParityPlot(filename,newHorizontalaxis,newHorizontalname='phase/Pi',title='scanresult',target='state',correction_flag=False,ionfidelity_list=None,correction_method=1):
    """
    这个函数针对N个离子的宇称测量实验进行绘图，绘制宇称的变化以及拟合出宇称的大小

    Parameters
    ----------
    filename : str
        宇称实验数据文件的路径
    newHorizontalaxis : list
        横坐标的新值，形状为[scanlistlenghth]
    newHorizontalname : str, optional
        横坐标的名字, by default 'phase/Pi'
    title : str, optional
        标题, by default 'scanresult'
    target : str, optional
        转换为态的布居数还是离子的亮暗比例, by default 'state'
    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None
    """
    assert target in {'state', 'ion'}
    list_scanparameter, list_rawcountsvalues, scanparametername = raw_count(path=filename)
    assert len(list_rawcountsvalues) > 0
    repeat, ionnumber = list_rawcountsvalues[0].shape
    if ionnumber <=14 and correction_method!=3:
        if correction_flag:
            assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
            result = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target=target,correction_method=correction_method)
        else:
            result = dataprocess(list_rawcountsvalues, target=target)
        paritylist=NionstateParitylist(ionnumber)
        ydata=np.array(result[0])*paritylist[0]
        for i   in range(1,2**ionnumber):
            ydata+=np.array(result[i])*paritylist[i]
        plt.figure(figsize=(10, 6))
        plt.scatter(newHorizontalaxis, ydata, label='real Data', color='red')
        plt.show()
        # 使用curve_fit进行拟合
        params, params_covariance = curve_fit(f=sin_with_bias, xdata=newHorizontalaxis, ydata=ydata, p0=[0.9, ionnumber * np.pi, 0, 0],bounds = ([-np.inf, ionnumber*np.pi-0.001, -np.inf, -np.inf], [np.inf,ionnumber*np.pi, np.inf, np.inf]))
        # 提取拟合参数
        A, w, phi, b = params
        # 使用拟合参数生成拟合曲线
        y_fit = sin_with_bias(np.array(newHorizontalaxis), A, w, phi, b)
        # 绘图
        plt.figure(figsize=(10, 6))
        plt.scatter(newHorizontalaxis, ydata, label='real Data', color='red')
        plt.plot(newHorizontalaxis, y_fit, label='Fit: A*sin(w*x + phi) + b', color='blue')
        plt.xlabel(newHorizontalname)
        plt.ylabel('N ion Parity')
        plt.legend()
        plt.show()
        print('A', A, 'w', w, 'phi', phi, 'b', b)
    else:
        #TODO wrtie the code for N>14 of Parity measurement
        pass

def getParity(i,N):
    Parity = 0
    for j in range(N):
        if i >> j &1:
        # Determine if atom j is in state 1 or 0
            Parity +=0
        else:
            Parity +=1
    return (-1)**(Parity%2)

def NionstateParitylist(N):
    assert N <= 14, "N should be less than 14"
    return np.array([getParity(i,N) for i in range(2**N)])


def NionselectMionParity(targetionindex,filename,newHorizontalaxis,newHorizontalname='phase/Pi',title='scanresult',target='state',correction_flag=False,ionfidelity_list=None,correction_method=1):
    """
    这个函数针对N个离子中选出需要的M个离子的宇称测量实验进行绘图，绘制宇称的变化以及拟合出宇称的大小

    Parameters
    ----------
    targetionindex : list
        需要选择的M个离子的索引，形状为[M]
    filename : str
        宇称实验数据文件的路径
    newHorizontalaxis : list
        横坐标的新值，形状为[scanlistlenghth]
    newHorizontalname : str, optional
        横坐标的名字, by default 'phase/Pi'
    title : str, optional
        标题, by default 'scanresult'
    target : str, optional
        转换为态的布居数还是离子的亮暗比例, by default 'state'
    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None
    """
    assert target in {'state', 'ion'}
    list_scanparameter, list_rawcountsvalues, scanparametername = raw_count(path=filename)
    assert len(list_rawcountsvalues) > 0
    repeat, ionnumber = list_rawcountsvalues[0].shape
    if ionnumber <=14 and correction_method!=3:
        if correction_flag:
            assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
            result = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target=target,correction_method=correction_method)
        else:
            result = dataprocess(list_rawcountsvalues, target=target)
        M = len(targetionindex)
        # Create an array of indices corresponding to each state in the full distribution.
        indices = np.arange( ionnumber**2)  # 0, 1, ..., 2^N - 1

        # new_indices will hold the integer representation of the M-ion state extracted from each full state.
        new_indices = np.zeros_like(indices)
        # For each desired ion position, extract the corresponding bit from the full state's index.
        # Note: The leftmost ion (position 0) corresponds to the most significant bit (bit N-1)
        for i, pos in enumerate(targetionindex):
            # Extract the bit at position (N - 1 - pos)
            bit = (indices >> (ionnumber - 1 - pos)) & 1
            # Place this bit into the correct position of the M-bit number.
            new_indices |= bit << (M - 1 - i)
        # Now, new_indices holds the new state (from 0 to 2^M - 1) for each full state.
        # We sum the probabilities of all full states that have the same new state.
        prob=np.array(result).T
        sub_prob=[]
        for scanpointprob in prob:
            sub_prob.append(np.bincount(new_indices, weights=scanpointprob, minlength=2**M))
        result=np.array(sub_prob).T
        paritylist=NionstateParitylist(ionnumber)
        ydata=np.array(result[0])*paritylist[0]
        for i   in range(1,2**M):
            ydata+=np.array(result[i])*paritylist[i]
        plt.figure(figsize=(10, 6))
        plt.scatter(newHorizontalaxis, ydata, label='real Data', color='red')
        plt.show()
        # 使用curve_fit进行拟合
        params, params_covariance = curve_fit(f=sin_with_bias, xdata=newHorizontalaxis, ydata=ydata, p0=[0.9,M * np.pi, 0, 0],bounds = ([-np.inf, M*np.pi-0.001, -np.inf, -np.inf], [np.inf,M*np.pi, np.inf, np.inf]))
        # 提取拟合参数
        A, w, phi, b = params
        # 使用拟合参数生成拟合曲线
        y_fit = sin_with_bias(np.array(newHorizontalaxis), A, w, phi, b)
        # 绘图
        plt.figure(figsize=(10, 6))
        plt.scatter(newHorizontalaxis, ydata, label='real Data', color='red')
        plt.plot(newHorizontalaxis, y_fit, label='Fit: A*sin(w*x + phi) + b', color='blue')
        plt.xlabel(newHorizontalname)
        plt.ylabel('M ion Parity')
        plt.legend()
        plt.show()
        print('A', A, 'w', w, 'phi', phi, 'b', b)
    else:
        #TODO wrtie the code for N>14 of Parity measurement
        pass


def sin_with_bias(x, A, w, phi, b):
    return A * np.sin(w * x + phi) + b

def PlotFreScan(file_name, f_center):
    """
    单离子频率测量绘图，输入中心频率使得横坐标变为扫频的相对值

    Parameters
    ----------
    file_name : str
        scan扫描数据文件的路径
    f_center : float
        中心频率
    """
    #这个函数只用于单离子，所以不对传过来的数组进行遍历
    x_data, y_data=pre_process_data(file_name)
    f = h5py.File(file_name, 'r')
    f_list =(np.array(x_data[0])-f_center)
    plt.figure()
    #数据的曲线
    plt.plot(f_list , np.array(y_data[0]), label='exp_data')
    plt.xlabel(r'Detuning $\delta / 2\pi$ [MHz]')
    plt.show()
    
#就近取step附近的整数。
def to_nearest_multiple(number, step=0.04):
    return round(round(number / step) * step, 2)


def bootstrap_std_singledatasetold(dataset, function,  *args,tol=0.01, initial_N=10, **kwargs):
    """
    Estimate the standard deviation of X = Afunction(dataset) via bootstrap,
    increasing the number of resamples by a factor of 10 each iteration
    until the change in std is within tol * |X_original|.

    用bootstrap估计X=function(dataset)的标准差，每次增加10倍的resamples，直到变化幅度小于tol*|X_original|。
    Parameters
    ----------
    dataset : array-like, shape (M, ...)
        Original data samples.初始数据集
    function : callable
        Function that takes a dataset (same format as `dataset`) and returns X. 在数据集上计算X的函数
    tol : float, optional
        Relative tolerance for convergence (default 0.01 for 1%). 标准差收敛判定条件
    initial_N : int, optional
        Starting number of bootstrap resamples (default 10). 初始bootstrap resamples的数据集数量

    Returns
    -------
    float
        Converged estimate of the standard deviation of X.
        收敛的X的标准差估计值
    """
    # ensure numpy array
    data = np.asarray(dataset)
    M = data.shape[0]

    # compute X on the original data
    X0 = function(data, *args, **kwargs)

    prev_std = None
    N = initial_N

    while True:
        # generate N bootstrap estimates of X
        Xs = []
        for _ in range(N):
            # sample indices with replacement
            indices = np.random.randint(0, M, size=M)
            sample = data[indices]
            Xs.append(function(sample, *args, **kwargs))
        Xs = np.array(Xs)

        # include the original X0 in the std calculation
        all_X = np.concatenate(([X0], Xs))
        current_std = np.std(all_X, ddof=1)

        # check convergence against previous iteration
        if prev_std is not None:
            if abs(current_std - prev_std) < tol * abs(X0):
                return current_std

        # not yet converged: prepare next iteration
        prev_std = current_std
        N *= 10
        if N > M:
            # avoid infinite loop if M is too small
            print("Warning: bootstrap std estimate did not converge")
            return current_std
def bootstrap_std_singledataset(
    dataset, function, *args,
    rel_tol=0.01, abs_tol=None,
    initial_N=100, growth=2,
    max_iters=8, random_state=None,
    **kwargs
):
    """
    估计 X=function(dataset) 的bootstrap标准差；累积重采样，直到std收敛或达到最大迭代。
    收敛准则：相对变化 < rel_tol （相对于 current_std），或 绝对变化 < abs_tol（若给定）。
    """
    rng = np.random.default_rng(random_state)
    data = np.asarray(dataset)
    M = data.shape[0]
    if M < 2:
        raise ValueError("dataset size M must be >= 2 for bootstrap with ddof=1")

    # 原始统计量
    X0 = function(data, *args, **kwargs)

    # 累积重采样的结果池
    Xs = []

    prev_std = None
    N = int(initial_N)

    for _iter in range(max_iters):
        # 追加 N 份重采样
        for _ in range(N):
            idx = rng.integers(0, M, size=M)
            sample = data[idx]
            Xs.append(function(sample, *args, **kwargs))

        # 计算当前的std（包含原始X0）
        all_X = np.concatenate(([X0], np.asarray(Xs)))
        current_std = np.std(all_X, ddof=1)

        if prev_std is not None:
            delta = abs(current_std - prev_std)
            # 既支持相对阈值，也支持绝对阈值（任一满足即可收敛）
            scale = max(current_std, np.finfo(float).eps)
            rel_ok = (delta / scale) < rel_tol
            abs_ok = (abs_tol is not None) and (delta < abs_tol)
            if rel_ok or abs_ok:
                return current_std

        prev_std = current_std
        N = int(max(1, N * growth))  # 下轮增加的“新增重采样”数量

    # 仍未收敛：返回当前估计，
    print("Warning: bootstrap std estimate did not converge after",N,"sampling")
    return current_std

def calculate_Y_with_uncertainty_fit(t, X, deltaX, function, p0=None):
    """
    Fit X_i = function(t_i; Y, w) to data (t, X) with uncertainties deltaX,
    and return the best-fit Y and its 1-sigma uncertainty.
    用数据(t, X)以及其误差deltaX拟合函数function(t_i; Y, w)得到最佳拟合值Y以及其1-sigma的标准差。
    Parameters
    ----------
    t : array_like, shape (L,)
        Independent variable values t_i. t的数组
    X : array_like, shape (L,)
        Measured dependent values X_i. X的数组
    deltaX : array_like, shape (L,)
        Standard deviations of X_i. X的标准差deltaX的数组
    function : callable
        Model function function(t, Y, w), signature B(t, Y, w) -> X_model. 函数B
    p0 : sequence, optional
        Initial guess for the parameters [Y0, w0]. Y0是初始Y值，w0是初始w值，默认值None

    Returns
    -------
    Y_fit : float
        Best-fit value of Y. 拟合得到的Y值
    sigma_Y : float
        Estimated 1‑sigma standard deviation of Y. 估计的Y值的1-sigma标准差
    popt : ndarray, shape (2,)
        Full array of best‑fit parameters [Y, w]. 拟合得到的全部参数
    pcov : 2×2 ndarray
        The full covariance matrix of the fit. 拟合得到的全部参数的协方差矩阵
    """
    # perform weighted least-squares fit, telling curve_fit that the sigma array
    # are absolute uncertainties:
    popt, pcov = curve_fit(
        function, t, X,
        sigma=deltaX,
        absolute_sigma=True,
        p0=p0
    )
    # popt = [Y_fit, w_fit]
    # pcov[i, i] is Var(parameter_i)
    Y_fit = popt[0]
    sigma_Y = np.sqrt(pcov[0, 0])
    return Y_fit, sigma_Y, popt, pcov



def sin_without_bias(x, A, w, phi):
    return A * np.sin(w * x + phi)

from scipy.stats import norm

def build_detection_matrix(ionfidelity_list):
    """
    用卷积的方法从N个离子的探测误差矩阵计算出N+1*N+1的矩阵L,Lij代表j个亮的测出来是i个亮的概率
    Given two length-N arrays:
      p0[j] = P(detect 0 | jth ion actual state = 0)   (so 1-p0[j] = P(detect 1 | actual 0))
      p1[j] = P(detect 1 | jth ion actual state = 1)   (so 1-p1[j] = P(detect 0 | actual 1))
    returns an (N+1)x(N+1) array L where
      L[i,L] = P(detected = i ones | actual = l ones).
      so  \sum_L P(detected = i ones | actual = l ones)P(actual = l ones) = P(detected = i ones)
    L@P_actual L.dot(P_actual)=np.sum(L,P_actual,axis=1) = P_detected


    It assumes that for the column 'l', the first l entries of (p0,p1)
    correspond to the particles actually in state 1, and the remaining
    N-l entries to those actually in state 0. If your “actual” ones
    are in different positions, just reorder p0,p1 so that those
    l indices come first.
    """
    p0=np.zeros(len(ionfidelity_list))
    p1=np.zeros(len(ionfidelity_list))
    for i,ionfidelity in enumerate(ionfidelity_list):
        p0[i]=ionfidelity[0]
        p1[i]=ionfidelity[1]
    
    p0 = np.array(np.repeat(np.max(p0), len(p0)), dtype=float)
    p1 = np.array(np.repeat(np.max(p1), len(p1)), dtype=float)
    if p0.shape != p1.shape:
        raise ValueError("p0 and p1 must have the same length")
    N = len(p0)

    # L[l, i] = P(detect i ones | actual l ones)
    L = np.zeros((N+1, N+1), dtype=float)

    for l in range(N+1):
        # 1) build distribution of 'ones-detected' from the l actual ones
        dist_ones = np.array([1.0])
        for pj in p1[:l]:
            # each actual-1 particle contributes a Bernoulli(pj) for detecting 1
            dist_ones = np.convolve(dist_ones, [1-pj, pj])

        # 2) build distribution of 'ones-detected' from the N-l zeros
        dist_zeros = np.array([1.0])
        for pj in p0[l:]:
            # each actual-0 particle contributes Bernoulli(1-pj) for false 1
            dist_zeros = np.convolve(dist_zeros, [pj, 1-pj])

        # 3) total detected ones = sum from both groups ⇒ convolution
        dist_total = np.convolve(dist_ones, dist_zeros)
        # dist_total has length (l+1) + (N-l+1) - 1 = N+1

        L[:, l] = dist_total

    return L

def datasetToSingleionpopulation(dataset,repeat, ionnumber,correction_flag,ionfidelity_list,correction_method):
    ionbrightdarkcounts=raw_counts_To_ion_brightdarkcounts(raw_counts=dataset,repeat=repeat)
    if correction_flag and ionfidelity_list is not None:
        ionbrightdarkcounts=singledataset_ionbrightdarkcounts_correction(ionbrightdarkcounts,ionnumber,ionfidelity_list,correction_method=correction_method)
    return np.mean(ionbrightdarkcounts,axis=0)

def datasetToTotalSpin(dataset,repeat, ionnumber,correction_flag,ionfidelity_list,correction_method):
    if ionnumber<=14 and correction_method!=3:
        probabilities=raw_counts_To_state_populartion(dataset, repeat, ionnumber)
        if correction_flag and ionfidelity_list is not None:
            probabilities=singledataset_state_populartion_correction(statepopulation=probabilities,ionnumber=ionnumber,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        totalspin=probabilities.dot(NionstateSpinlist(ionnumber))
    else:
        if correction_flag and ionfidelity_list is not None:
            L=build_detection_matrix(ionfidelity_list)
            try:
                correctionM = np.linalg.inv(L)
            except np.linalg.LinAlgError:
                raise ValueError("Matrix is singular and cannot be inverted.")
            
            unique_integers = np.arange(-ionnumber,ionnumber+1,2)
            summed_frequencies = np.zeros_like(unique_integers, dtype=float)
            spins = 2*np.array(dataset) - 1
            S = spins.sum(axis=1)
            for i, integer in enumerate(unique_integers):
                summed_frequencies[i] = np.sum(S == integer)/len(S)

            correctedprobabilities_initalguess = correctionM.dot(summed_frequencies)
            Q_init = correctedprobabilities_initalguess / np.sum(correctedprobabilities_initalguess)
            constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
            # Bounds:Q should be non-negative (e.g., Q >= 0)
            bounds = [(0, 1) for _ in range(len(summed_frequencies))]  #
            # Solve the optimization problem
            result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(summed_frequencies)),
                                                    Q_init, constraints=constraints, bounds=bounds)
            if result_correctedprobabilities.success:
                correctedprobabilities = result_correctedprobabilities.x
            else:
                raise ValueError("Optimization did not converge.")
            totalspin=correctedprobabilities.dot(unique_integers)
        else:
            spins = 2*np.array(dataset) - 1
            S = spins.sum(axis=1)
            totalspin = np.mean(S)
    return totalspin

def datasetToTotalSpinsquare(dataset,repeat, ionnumber,correction_flag,ionfidelity_list,correction_method):
    if ionnumber<=14 and correction_method!=3:
        probabilities=raw_counts_To_state_populartion(dataset, repeat, ionnumber)
        if correction_flag and ionfidelity_list is not None:
            probabilities=singledataset_state_populartion_correction(statepopulation=probabilities,ionnumber=ionnumber,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        totalspin2=probabilities.dot((NionstateSpinlist(ionnumber))**2)
    else:
        if correction_flag and ionfidelity_list is not None:
            L=build_detection_matrix(ionfidelity_list)
            try:
                correctionM = np.linalg.inv(L)
            except np.linalg.LinAlgError:
                raise ValueError("Matrix is singular and cannot be inverted.")
            
            unique_integers = np.arange(-ionnumber,ionnumber+1,2)
            summed_frequencies = np.zeros_like(unique_integers, dtype=float)
            spins = 2*np.array(dataset) - 1
            S = spins.sum(axis=1)
            for i, integer in enumerate(unique_integers):
                summed_frequencies[i] = np.sum(S == integer)/len(S)

            correctedprobabilities_initalguess = correctionM.dot(summed_frequencies)
            Q_init = correctedprobabilities_initalguess / np.sum(correctedprobabilities_initalguess)
            constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
            # Bounds:Q should be non-negative (e.g., Q >= 0)
            bounds = [(0, 1) for _ in range(len(summed_frequencies))]  #
            # Solve the optimization problem
            result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(summed_frequencies)),
                                                    Q_init, constraints=constraints, bounds=bounds)
            if result_correctedprobabilities.success:
                correctedprobabilities = result_correctedprobabilities.x
            else:
                raise ValueError("Optimization did not converge.")
            totalspin2=correctedprobabilities.dot(unique_integers**2)
        else:
            spins = 2*np.array(dataset) - 1
            S = spins.sum(axis=1)
            totalspin2 = np.mean(S**2)
    return totalspin2

def datasetSpinVarience(dataset,repeat, ionnumber,correction_flag,ionfidelity_list,correction_method):
    if ionnumber<=14 and correction_method!=3:
        probabilities=raw_counts_To_state_populartion(dataset, repeat, ionnumber)
        if correction_flag and ionfidelity_list is not None:
            probabilities=singledataset_state_populartion_correction(statepopulation=probabilities,ionnumber=ionnumber,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        NstateSpin=NionstateSpinlist(ionnumber)
        spinvarience=probabilities.dot(NstateSpin ** 2) - (probabilities.dot(NstateSpin)) ** 2
    else:
        if correction_flag and ionfidelity_list is not None:
            L=build_detection_matrix(ionfidelity_list)
            unique_integers = np.arange(-ionnumber,ionnumber+1,2)
            summed_frequencies = np.zeros_like(unique_integers, dtype=float)
            spins = 2*np.array(dataset) - 1
            S = spins.sum(axis=1)
            for i, integer in enumerate(unique_integers):
                summed_frequencies[i] = np.sum(S == integer)/len(S)
            try:
                correctionM = np.linalg.inv(L)
            except np.linalg.LinAlgError:
                raise ValueError("Matrix is singular and cannot be inverted.")
            correctedprobabilities_initalguess = correctionM.dot(summed_frequencies)
            Q_init = correctedprobabilities_initalguess / np.sum(correctedprobabilities_initalguess)
            constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
            # Bounds:Q should be non-negative (e.g., Q >= 0)
            bounds = [(0, 1) for _ in range(len(summed_frequencies))]  #
            # Solve the optimization problem
            result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(summed_frequencies)),
                                                    Q_init, constraints=constraints, bounds=bounds)
            if result_correctedprobabilities.success:
                correctedprobabilities = result_correctedprobabilities.x
            else:
                raise ValueError("Optimization did not converge.")
            spinvarience=correctedprobabilities.dot(unique_integers**2)-(correctedprobabilities.dot(unique_integers))**2
        else:
            spins = 2*np.array(dataset) - 1
            S = spins.sum(axis=1)
            S2 = S**2
            spinvarience = np.mean(S2) - (np.mean(S))**2   
    return spinvarience


def datasetQfunction(dataset,repeat, ionnumber,correction_flag,ionfidelity_list,correction_method):
    if ionnumber<=14 and correction_method!=3:
        probabilities=raw_counts_To_state_populartion(dataset, repeat, ionnumber)
        if correction_flag and ionfidelity_list is not None:
            probabilities=singledataset_state_populartion_correction(statepopulation=probabilities,ionnumber=ionnumber,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        Qfunction=probabilities[0]
    else:
        if correction_flag and ionfidelity_list is not None:
            L=build_detection_matrix(ionfidelity_list)
            try:
                correctionM = np.linalg.inv(L)
            except np.linalg.LinAlgError:
                raise ValueError("Matrix is singular and cannot be inverted.")
            
            unique_integers = np.arange(-ionnumber,ionnumber+1,2)
            summed_frequencies = np.zeros_like(unique_integers, dtype=float)
            spins = 2*np.array(dataset) - 1
            S = spins.sum(axis=1)
            for i, integer in enumerate(unique_integers):
                summed_frequencies[i] = np.sum(S == integer)/len(S)

            correctedprobabilities_initalguess = correctionM.dot(summed_frequencies)
            Q_init = correctedprobabilities_initalguess / np.sum(correctedprobabilities_initalguess)
            constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
            # Bounds:Q should be non-negative (e.g., Q >= 0)
            bounds = [(0, 1) for _ in range(len(summed_frequencies))]  #
            # Solve the optimization problem
            result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(summed_frequencies)),
                                                    Q_init, constraints=constraints, bounds=bounds)
            if result_correctedprobabilities.success:
                correctedprobabilities = result_correctedprobabilities.x
            else:
                raise ValueError("Optimization did not converge.")
            Qfunction=correctedprobabilities[0]
        else:
            all_zero_rows = np.all(dataset == 0, axis=1)
            num_all_zero_strings = np.count_nonzero(all_zero_rows) 
            Qfunction=num_all_zero_strings/len(dataset)
    return Qfunction

def getSpin(i,N):
    spin = 0
    for j in range(N):
        if i >> j &1:
        # Determine if atom j is in state 1 or 0
            spin +=1
        else:
            spin +=-1
    return spin

def NionstateSpinlist(N):
    assert N <= 14, "N should be less than 14"
    return np.array([getSpin(i,N) for i in range(2**N)])

def plotTotalSpin(filename,time_coefficient,correction_flag=False,ionfidelity_list=None,stdcacluation_flag=False,correction_method=1,cyclefileflag=False,fit_flag=True):
    """
    这个函数用于绘制总自旋的变化，输入时间系数，拟合出总自旋的大小，可以选择是否做detection error croection
    一般这个实验改变的是carrier旋转的时间，这样的总自旋在Z上的投影就会类似一个sin函数
    Parameters
    ----------
    filename : str
        scan扫描数据文件的路径
    time_coefficient : list
        时间系数的列表，长度为len(list_scanparameter)
    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None

    Returns
    -------
    float
        拟合出的总自旋大小
    float
        实验测得的总自旋大小的标准差
    """
    list_scanparameter, list_rawcountsvalues, scanparametername = raw_count(path=filename)
    assert len(list_rawcountsvalues) > 0
    

    if cyclefileflag:
        list_scanparameter,list_rawcountsvalues=cycle_raw_counts(list_scanparameter,list_rawcountsvalues)
        
    repeat, ionnumber = list_rawcountsvalues[0].shape
    time_coefficient=np.array(time_coefficient)
    if ionnumber <=14 and correction_method!=3:
        
        if correction_flag:
            assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
            Populationresult = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target='state',correction_method=correction_method)
        else:
            Populationresult = dataprocess(list_rawcountsvalues, target='state')


        if stdcacluation_flag:
            stdS=np.zeros(len(list_rawcountsvalues))
            for i, result in enumerate( list_rawcountsvalues):
                stdS[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)

        
        # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
        Populationresult=np.array(Populationresult).T
        averageS=np.zeros(len(Populationresult))
        NstateSpin=NionstateSpinlist(ionnumber)
        for i, result in enumerate(Populationresult):
            averageS[i]=Populationresult[i].dot(NstateSpin)
        
        if fit_flag:
            if stdcacluation_flag:
                S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
            else:
                params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
            A, w, phi,b= params
    else:
        averageS=np.zeros(len(list_rawcountsvalues))
        for i, dataset in enumerate(list_rawcountsvalues):
            averageS[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        if stdcacluation_flag:
            stdS=np.zeros(len(list_rawcountsvalues))
            for i, result in enumerate( list_rawcountsvalues):
                stdS[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        if fit_flag:
            if stdcacluation_flag:
                S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
            else:
                params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
            A, w, phi,b= params
    plt.figure(figsize=(10, 6))
    plt.scatter(time_coefficient, averageS, label='exp Data')
    if fit_flag:
        plt.plot(time_coefficient,sin_with_bias(time_coefficient, A, w, phi,b), label='Fit')
    if stdcacluation_flag:
        plt.errorbar(time_coefficient, averageS, yerr=stdS,  fmt='none', color='red', label='exp Data with errorbar', capsize=4)
    plt.title('<S>')
    plt.xlabel(r'$\Phi(*\pi)$')
    plt.ylabel('<S>')
    plt.show()
    if fit_flag:
        print('fit_amplitude <S>', np.abs(A))
    if fit_flag:
        if stdcacluation_flag:
            return np.abs(A),sigma_S
        else:
            return np.abs(A)
    else:
        if stdcacluation_flag:
            return np.max(averageS),stdS[np.argmax(averageS)]
        else:
            return np.max(averageS)
    
def plotSpinVarience(filename,time_coefficient,correction_flag=False,ionfidelity_list=None,correction_method=1,cyclefileflag=False):
    """
    这个函数用于绘制总自旋方差的变化，输入时间系数，直接绘制出总自旋方差的大小变化曲线，可以选择是否做detection error croection
    一般是扫描转动时间也就是转动角度然后看总自旋方差什么时候最小，然后找出最小的方差
    Parameters
    ----------
    filename : str
        scan扫描数据文件的路径
    time_coefficient : list
        时间系数的列表，长度为len(list_scanparameter)
    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : _type_, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None

    Returns
    -------
    float
        实验测得的最小方差
    """
    list_scanparameter, list_rawcountsvalues, scanparametername = raw_count(path=filename)
    assert len(list_rawcountsvalues) > 0
    if cyclefileflag:
        list_scanparameter,list_rawcountsvalues=cycle_raw_counts(list_scanparameter,list_rawcountsvalues)
    time_coefficient=np.array(time_coefficient)
    repeat, ionnumber = list_rawcountsvalues[0].shape

    if ionnumber <=14 and correction_method!=3:
        if correction_flag:
            assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
            Populationresult = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target='state',correction_method=correction_method)
        else:
            Populationresult = dataprocess(list_rawcountsvalues, target='state')
        # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，这个list的长度是len(list_scanparameter)
        Populationresult=np.array(Populationresult).T
        Svariancelist = np.zeros(len(Populationresult))
        NstateSpin = NionstateSpinlist(ionnumber)
        for i, result in enumerate(Populationresult):
            Svariancelist[i] = Populationresult[i].dot(NstateSpin**2)-(Populationresult[i].dot(NstateSpin))**2
    else:
        Svariancelist = np.zeros(len(list_rawcountsvalues))
        for i, dataset in enumerate(list_rawcountsvalues):
            Svariancelist[i] = datasetSpinVarience(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
    plt.figure(figsize=(10, 6))
    plt.scatter(time_coefficient,Svariancelist)
    plt.plot(time_coefficient, Svariancelist)
    plt.title(r'<$(\Delta S)^2$>')
    plt.xlabel(r'$\Phi(*\pi)$')
    plt.ylabel(r'<$(\Delta S)^2$>')
    print('< (ΔS)^2 >最小时候的Φ角度值，以及< (ΔS)^2 >值',time_coefficient[np.argmin(Svariancelist)],np.min(Svariancelist))
    plt.show()
    return np.min(Svariancelist)


def plotSpinVariencebyfiting(filename, time_coefficient, correction_flag=False, ionfidelity_list=None,stdcacluation_flag=False,correction_method=1,cyclefileflag=False):
    """
    这个函数用于绘制总自旋方差的变化，输入时间系数，拟合出总自旋方差的大小，可以选择是否做detection error croection
    一般是扫描转动时间也就是转动角度然后看总自旋方差什么时候最小，然后拟合出最小的方差以克服实验数据的不完美
    拟合的delta S的errorbar比较好估计，但是delta S的最小值对应的角度和最大值对应的角度的errorbar不太好估计，GPT用到了某种隐函数理论，不太直观
    Parameters
    ----------
    filename : str
        scan扫描数据文件的路径
    time_coefficient : list
        时间系数的列表，长度为len(list_scanparameter)
    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None

    Returns
    -------
    float
        拟合出的总自旋方差
    """
    list_scanparameter, list_rawcountsvalues, scanparametername = raw_count(path=filename)
    assert len(list_rawcountsvalues) > 0
    if cyclefileflag:
        list_scanparameter,list_rawcountsvalues=cycle_raw_counts(list_scanparameter,list_rawcountsvalues)
    time_coefficient=np.array(time_coefficient)
    repeat, ionnumber = list_rawcountsvalues[0].shape
    if ionnumber <=14 and correction_method!=3:
        if correction_flag:
            assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
            Populationresult = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues,
                                                                        target='state',correction_method=correction_method)
        else:
            Populationresult = dataprocess(list_rawcountsvalues, target='state')
        # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
        Populationresult = np.array(Populationresult).T
        Svariancelist = np.zeros(len(Populationresult))
        NstateSpin = NionstateSpinlist(ionnumber)
        for i, result in enumerate(Populationresult):
            Svariancelist[i] = Populationresult[i].dot(NstateSpin ** 2) - (Populationresult[i].dot(NstateSpin)) ** 2

        if stdcacluation_flag:
            stdSvariance = np.zeros(len(list_rawcountsvalues))
            for i, result in enumerate(list_rawcountsvalues):
                stdSvariance[i] = bootstrap_std_singledataset(dataset=result, function=datasetSpinVarience, repeat=repeat,
                                                            ionnumber=ionnumber, correction_flag=correction_flag,
                                                            ionfidelity_list=ionfidelity_list,correction_method=correction_method)
    else:
        Svariancelist = np.zeros(len(list_rawcountsvalues))
        for i, dataset in enumerate(list_rawcountsvalues):
            Svariancelist[i] = datasetSpinVarience(dataset=dataset,repeat=repeat, 
                                                   ionnumber=ionnumber,correction_flag=correction_flag,
                                                   ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        if stdcacluation_flag:
            stdSvariance = np.zeros(len(list_rawcountsvalues))
            for i, result in enumerate(list_rawcountsvalues):
                stdSvariance[i] = bootstrap_std_singledataset(dataset=result, function=datasetSpinVarience, repeat=repeat,
                                                            ionnumber=ionnumber, correction_flag=correction_flag,
                                                            ionfidelity_list=ionfidelity_list,correction_method=correction_method)
    plt.figure(figsize=(10, 6))
    plt.scatter(time_coefficient, Svariancelist, label='real Data', color='red')
    #plt.plot(time_coefficient, Svariancelist, label='real Data', color='red')
    plt.title(r'<$(\Delta S)^2$>')
    plt.xlabel(r'$\Phi(*\pi)$')
    plt.ylabel(r'<$(\Delta S)^2$>')
    print('实验上测出的< (ΔS)^2 >最小时候的Φ角度值，以及< (ΔS)^2 >值', time_coefficient[np.argmin(Svariancelist)],
          np.min(Svariancelist))
    detaildtime_coefficient=np.linspace(np.min(time_coefficient),np.max(time_coefficient),1000)

    if stdcacluation_flag:
        _, __, params, params_covariance =calculate_Y_with_uncertainty_fit (t=np.pi*np.array(time_coefficient), X=Svariancelist, deltaX=stdSvariance, function=Svariencemeasurefunction, p0=[1, 1, 1])
        minSvariance,sigma_minSvariance,t0=Xmin_uncertainty_linear(t_grid=np.pi*detaildtime_coefficient,function= Svariencemeasurefunction, popt=params, pcov=params_covariance, eps_rel=1e-6)
        popt=params
        t0=t0/np.pi
    else:
        popt, _ = curve_fit(Svariencemeasurefunction, np.pi*np.array(time_coefficient), Svariancelist)
        minSvariance=np.min(Svariencemeasurefunction(np.pi*detaildtime_coefficient, *popt))
        t0= detaildtime_coefficient[np.argmin(Svariencemeasurefunction(np.pi*detaildtime_coefficient, *popt))]
    tmax=detaildtime_coefficient[np.argmax(Svariencemeasurefunction(np.pi*detaildtime_coefficient, *popt))]
    if stdcacluation_flag:
        plt.plot(detaildtime_coefficient, Svariencemeasurefunction(np.pi*detaildtime_coefficient, *popt), label='Fit ', color='blue')
        print('数据拟合的< (ΔS)^2 >最小时候的Φ角度值，以及< (ΔS)^2 >值', t0, minSvariance)
        print('数据拟合的< (ΔS)^2 >的标准差',sigma_minSvariance)
        print('数据拟合的< (ΔS)^2 >的最大值对应的角度值',tmax)
        plt.errorbar(time_coefficient, Svariancelist, yerr=stdSvariance,  fmt='none', color='red', label='exp Data errorbar',capsize=4)
        plt.errorbar([t0], [minSvariance], yerr=sigma_minSvariance,  fmt='none', color='blue', label='Fit minmum errorbar',capsize=4)
    else:
        plt.plot(detaildtime_coefficient, Svariencemeasurefunction(np.pi*detaildtime_coefficient, *popt), label='Fit ', color='blue')
        print('数据拟合的< (ΔS)^2 >最小时候的Φ角度值，以及< (ΔS)^2 >值',t0,minSvariance)
        print('数据拟合的< (ΔS)^2 >的最大值对应的角度值',tmax)
    plt.show()
    
    if stdcacluation_flag:
        return minSvariance,sigma_minSvariance,t0,tmax
    else:
        return minSvariance,t0,tmax


def plotTFIGHZQFIandangle(filename,correction_flag=False,ionfidelity_list=None,correction_method=1):
    Interaction_timelist, SxandSx2list, SyandSy2list, SxSyandSxSy2list=read_TFIGHZQFI_hdf5(filename)
    length=len(Interaction_timelist)
    repeat, ionnumber = SxandSx2list[0].shape
    if ionnumber <=14 and correction_method!=3:
        if correction_flag:
            assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
            SxPopulationresult = dataprocess_with_detection_error_croection(ionfidelity_list, SxandSx2list,
                                                                        target='state',correction_method=correction_method)
            SyPopulationresult = dataprocess_with_detection_error_croection(ionfidelity_list, SyandSy2list,
                                                                        target='state',correction_method=correction_method)
            SxSyPopulationresult = dataprocess_with_detection_error_croection(ionfidelity_list, SxSyandSxSy2list,
                                                                        target='state',correction_method=correction_method)
        else:
            SxPopulationresult = dataprocess(SxandSx2list, target='state')
            SyPopulationresult = dataprocess(SyandSy2list, target='state')
            SxSyPopulationresult = dataprocess(SxSyandSxSy2list, target='state')
        # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
        SxPopulationresult = np.array(SxPopulationresult).T
        SyPopulationresult = np.array(SyPopulationresult).T
        SxSyPopulationresult = np.array(SxSyPopulationresult).T
        Sxmeanlist = np.zeros(length,dtype=float)
        Symeanlist = np.zeros(length,dtype=float)
        SxSymeanlist = np.zeros(length,dtype=float)
        Sx2meanlist = np.zeros(length,dtype=float)
        Sy2meanlist = np.zeros(length,dtype=float)
        SxplusSy2meanlist = np.zeros(length,dtype=float)
        QFIlist=np.zeros(length,dtype=float)
        anglelist=np.zeros(length,dtype=float)
        NstateSpin = NionstateSpinlist(ionnumber)
        for i in np.arange(length):
            Sxmeanlist[i] = SxPopulationresult[i].dot(NstateSpin)
            Symeanlist[i] =SyPopulationresult[i].dot(NstateSpin)
            
            Sx2meanlist[i] = SxPopulationresult[i].dot(NstateSpin**2)
            Sy2meanlist[i] = SyPopulationresult[i].dot(NstateSpin**2)
            SxplusSy2meanlist[i] = SxSyPopulationresult[i].dot(NstateSpin**2)
            GammaQ=np.zeros((2,2),dtype=np.complex128)
            GammaQ[0,0]=Sx2meanlist[i]-Sxmeanlist[i]**2
            GammaQ[1,1]=Sy2meanlist[i]-Symeanlist[i]**2
            GammaQ[0,1]=(SxplusSy2meanlist[i]-1/2*(Sx2meanlist[i]+Sy2meanlist[i]))-Sxmeanlist[i]*Symeanlist[i]
            GammaQ[1,0]=(SxplusSy2meanlist[i]-1/2*(Sx2meanlist[i]+Sy2meanlist[i]))-Sxmeanlist[i]*Symeanlist[i]
            eig,eigv=np.linalg.eigh(GammaQ)
            QFIlist[i]=np.max(np.abs(eig))
            thetav=np.array(eigv[np.argmax(np.abs(eig))],dtype=float)
            anglelist[i]=np.arctan2(thetav[1],thetav[0])

        # if stdcacluation_flag:
        #     stdSvariance = np.zeros(len(list_rawcountsvalues))
        #     for i, result in enumerate(list_rawcountsvalues):
        #         stdSvariance[i] = bootstrap_std_singledataset(dataset=result, function=datasetSpinVarience, repeat=repeat,
        #                                                     ionnumber=ionnumber, correction_flag=correction_flag,
        #                                                     ionfidelity_list=ionfidelity_list,correction_method=correction_method)
    else:
        Sxmeanlist = np.zeros(length,dtype=float)
        Symeanlist = np.zeros(length,dtype=float)
        Sx2meanlist = np.zeros(length,dtype=float)
        Sy2meanlist = np.zeros(length,dtype=float)
        SxplusSy2meanlist = np.zeros(length,dtype=float)
        QFIlist=np.zeros(length,dtype=float)
        anglelist=np.zeros(length,dtype=float)
        for i in np.arange(length):
            Sxmeanlist[i] = datasetToTotalSpin(dataset=SxandSx2list[i],repeat=repeat, 
                                                   ionnumber=ionnumber,correction_flag=correction_flag,
                                                   ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            Symeanlist[i] = datasetToTotalSpin(dataset=SyandSy2list[i],repeat=repeat, 
                                                   ionnumber=ionnumber,correction_flag=correction_flag,
                                                   ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            Sx2meanlist[i] = datasetToTotalSpinsquare(dataset=SxandSx2list[i],repeat=repeat, 
                                                   ionnumber=ionnumber,correction_flag=correction_flag,
                                                   ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            Sy2meanlist[i] = datasetToTotalSpinsquare(dataset=SyandSy2list[i],repeat=repeat, 
                                                   ionnumber=ionnumber,correction_flag=correction_flag,
                                                   ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            SxplusSy2meanlist[i] = datasetToTotalSpinsquare(dataset=SxSyandSxSy2list[i],repeat=repeat, 
                                                   ionnumber=ionnumber,correction_flag=correction_flag,
                                                   ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            GammaQ=np.zeros((2,2),dtype=np.complex128)
            GammaQ[0,0]=Sx2meanlist[i]-Sxmeanlist[i]**2
            GammaQ[1,1]=Sy2meanlist[i]-Symeanlist[i]**2
            GammaQ[0,1]=(SxplusSy2meanlist[i]-1/2*(Sx2meanlist[i]+Sy2meanlist[i]))-Sxmeanlist[i]*Symeanlist[i]
            GammaQ[1,0]=(SxplusSy2meanlist[i]-1/2*(Sx2meanlist[i]+Sy2meanlist[i]))-Sxmeanlist[i]*Symeanlist[i]
            eig,eigv=np.linalg.eigh(GammaQ)
            QFIlist[i]=np.max(np.abs(eig))
            thetav=np.array(eigv[np.argmax(np.abs(eig))],dtype=float)
            anglelist[i]=np.arctan2(thetav[1],thetav[0])
        # if stdcacluation_flag:
        #     stdSvariance = np.zeros(len(list_rawcountsvalues))
        #     for i, result in enumerate(list_rawcountsvalues):
        #         stdSvariance[i] = bootstrap_std_singledataset(dataset=result, function=datasetSpinVarience, repeat=repeat,
        #                                                     ionnumber=ionnumber, correction_flag=correction_flag,
        #                                                     ionfidelity_list=ionfidelity_list,correction_method=correction_method)
    plt.figure(figsize=(10, 6))
    plt.scatter(Interaction_timelist, QFIlist, label='QFI', color='red')
    plt.title(r'QFI')
    plt.xlabel(r'Interaction_time')
    plt.ylabel(r'QFI')
    plt.show()
    plt.figure(figsize=(10, 6))
    plt.scatter(Interaction_timelist, anglelist, label='angle', color='red')
    plt.title(r'angle')
    plt.xlabel(r'Interaction_time')
    plt.ylabel(r'angle')
    plt.show()
    print('实验上测出的QFI最大时候的交流时间，以及QFI值,以及此时的角度(弧度制)，角度是pi的多少倍数', Interaction_timelist[np.argmax(QFIlist)], np.max(QFIlist),anglelist[np.argmax(QFIlist)]/np.pi)
    return anglelist,QFIlist


def Xmin_uncertainty_linear(t_grid,function, popt, pcov, eps_rel=1e-6):
    """
    Linear error propagation to the minimum, holding t0 fixed.
    """
    
    Xlist = function(t_grid, *popt)
    idx = np.argmin(Xlist)
    t0 = t_grid[idx]
    
    # finite-difference gradient in parameter space
    p0 = np.array(popt, dtype=float)
    grad = np.zeros_like(p0)
    for j in range(len(p0)):
        dp = np.zeros_like(p0)
        dp[j] = eps_rel * (abs(p0[j]) if p0[j] != 0 else eps_rel)
        f_plus  = function(t0, *(p0 + dp))
        f_minus = function(t0, *(p0 - dp))
        grad[j] = (f_plus - f_minus) / (2*dp[j])
    
    var_Xmin = grad @ pcov @ grad
    sigma_Xmin = np.sqrt(var_Xmin)
    return Xlist[idx], sigma_Xmin, t0


def Svariencemeasurefunction(theta,A,B,C):
    return A*np.sin(theta)**2+B*np.cos(theta)**2+C*np.sin(theta)*np.cos(theta)


def plotTotalSpinDistribution(Sresultlist,name,correction_flag=False,ionfidelity_list=None,correction_method=1,fit_flag=True):
    """
    画出实验中测得的总自旋S的分布,输入两次单次测量的实验数据，分别是squeezed和coherence，然后画出它们的分布以判断是否存在压缩

    Parameters
    ----------
    Sresultlist : list
        实验测得的总自旋S的列表，列表的长度为2，分别是squeezed和coherence，runonce获得的数据，内部结构为[repeat,ionnumber]
    name : list
        实验名称的列表，列表的长度为2，分别是squeezed和coherence
    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : _type_, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None
    """    
    plt.figure(figsize=(10, 6))

    for Sresult,Sname in zip(Sresultlist,name):
        repeat, ionnumber = Sresult.shape
        if ionnumber <=14 and correction_method!=3 :
            if correction_flag:
                assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
                Populationresult = dataprocess_with_detection_error_croection(ionfidelity_list, [Sresult],
                                                                            target='state',correction_method=correction_method)
            else:
                Populationresult = dataprocess([Sresult], target='state')
            Populationresult = np.array(Populationresult).T
            Populationresult = Populationresult[0]
            # print(Populationresult)
            # Step 1: Calculate the unique integers and their corresponding frequencies
            NstateSpin = NionstateSpinlist(ionnumber)
            unique_integers = np.unique(NstateSpin)
            summed_frequencies = np.zeros_like(unique_integers, dtype=float)
            # Summing frequencies for duplicate integers
            for i, integer in enumerate(unique_integers):
                summed_frequencies[i] = np.sum(Populationresult[NstateSpin == integer])
            
            print("mean",Populationresult.dot(NstateSpin))
            print("varence",Populationresult.dot(NstateSpin**2)-(Populationresult.dot(NstateSpin))**2)
            # Step 3: Fit a Gaussian distribution to the histogram data
            # Fit using curve_fit to the histogram data (summed frequencies as y-values)
            if fit_flag:
                popt, _ = curve_fit(gaussian_func, unique_integers, summed_frequencies,
                                    p0=[np.max(summed_frequencies),Populationresult.dot(NstateSpin), np.sqrt(Populationresult.dot(NstateSpin**2)-(Populationresult.dot(NstateSpin))**2)])
                # Get the fitted Gaussian parameters
                amplitude,mu, sigma  = popt
                print(f"Fitted Gaussian parameters: mu = {mu}, sigma = { sigma}, amplitude = {amplitude}")
                # Step 4: Plot the Gaussian fit
                x_range = np.linspace(min(unique_integers), max(unique_integers), 1000)
                plt.plot(x_range, gaussian_func(x_range, *popt) , label=f'{Sname} Var {  (Populationresult.dot(NstateSpin**2)-(Populationresult.dot(NstateSpin))**2):.2f}')
        else:
            unique_integers = np.arange(-ionnumber,ionnumber+1,2)
            summed_frequencies = np.zeros_like(unique_integers, dtype=float)
            spins = 2*np.array(Sresult) - 1
            S = spins.sum(axis=1)
            for i, integer in enumerate(unique_integers):
                summed_frequencies[i] = np.sum(S == integer)/len(S)

            if correction_flag and ionfidelity_list is not None:
                
                L=build_detection_matrix(ionfidelity_list)
                try:
                    correctionM = np.linalg.inv(L)
                except np.linalg.LinAlgError:
                    raise ValueError("Matrix is singular and cannot be inverted.")

                correctedprobabilities_initalguess = correctionM.dot(summed_frequencies)
                Q_init = correctedprobabilities_initalguess / np.sum(correctedprobabilities_initalguess)
                constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
                # Bounds:Q should be non-negative (e.g., Q >= 0)
                bounds = [(0, 1) for _ in range(len(summed_frequencies))]  #
                # Solve the optimization problem
                result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(summed_frequencies)),
                                                        Q_init, constraints=constraints, bounds=bounds)
                if result_correctedprobabilities.success:
                    correctedprobabilities = result_correctedprobabilities.x
                else:
                    raise ValueError("Optimization did not converge.")
                totalspin=correctedprobabilities.dot(unique_integers)
                spinvarience = np.sum(correctedprobabilities*np.square(unique_integers)) - np.square(totalspin)
                summed_frequencies=correctedprobabilities
            else:
                totalspin=np.mean(S)
                S2 = S**2
                spinvarience = np.mean(S2) - (np.mean(S))**2
            print("mean",totalspin)
            print("varence",spinvarience)
            if fit_flag:
                popt, _ = curve_fit(gaussian_func, unique_integers, summed_frequencies,
                                    p0=[np.max(summed_frequencies),totalspin, np.sqrt(spinvarience)])
                # Get the fitted Gaussian parameters
                amplitude,mu, sigma  = popt
                print(f"Fitted Gaussian parameters: mu = {mu}, sigma = { sigma}, amplitude = {amplitude}")
                # Step 4: Plot the Gaussian fit
                x_range = np.linspace(min(unique_integers), max(unique_integers), 1000)
                plt.plot(x_range, gaussian_func(x_range, *popt) , label=f'{Sname} Var {  (spinvarience):.2f}')
            
        # Step 2: Plot the histogram of the frequency distribution
        # Set the width of each bar
        bar_width = 1  # Adjust this value based on how much space you want between bars
        # Create a histogram with bars only at the positions specified in A
        plt.bar(unique_integers  , summed_frequencies, width=bar_width, align='center',alpha=0.5)
        # plt.hist(unique_integers, weights=summed_frequencies, bins=len(unique_integers), alpha=0.6,
        #          label=f'{Sname}')
    # Add labels and legend
    plt.xlabel("total Spin S")
    plt.ylabel("occurrences")
    plt.legend()
    plt.title("Spin S  distribution")
    plt.show()



def plotMSEmeasurements(MSEfilelist,name,totalspinfilelist,time_coefficient,centerphiangle=5/4,correction_flag=False,ionfidelity_list=None,correction_method=1,stdcacluation_flag=False,formation='db'):
    """
    画出实验中测得的总自旋S的分布,输入两次单次测量的实验数据，分别是squeezed和coherence，然后画出它们的分布以判断是否存在压缩

    Parameters
    ----------
    Sresultlist : list
        实验测得的总自旋S的列表，列表的长度为2，分别是squeezed和coherence，runonce获得的数据，内部结构为[repeat,ionnumber]
    name : list
        实验名称的列表，列表的长度为2，分别是squeezed和coherence
    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : _type_, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None
    """   
    colorlist=['blue','red']
    totalspinresultlisyt=[]
    for totalspinfilename in totalspinfilelist:
        totalspinresult=plotTotalSpin(totalspinfilename,time_coefficient=time_coefficient,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method,stdcacluation_flag=stdcacluation_flag)
        totalspinresultlisyt.append(totalspinresult)
    plt.figure(figsize=(10, 6))
    for filename,Sname,totalspinfilename,totalspinresult,color in zip(MSEfilelist,name,totalspinfilelist,totalspinresultlisyt,colorlist):
        list_scanparameter, list_rawcountsvalues, scanparametername = raw_count(path=filename)
        assert len(list_rawcountsvalues) > 0
        list_scanparameter=np.array(list_scanparameter)-centerphiangle*np.pi
        repeat, ionnumber = list_rawcountsvalues[0].shape
        if ionnumber <=14 and correction_method!=3:
            
            if correction_flag:
                assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
                Populationresult = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target='state',correction_method=correction_method)
            else:
                Populationresult = dataprocess(list_rawcountsvalues, target='state')
            if stdcacluation_flag:
                stdS=np.zeros(len(list_rawcountsvalues))
                stdS2=np.zeros(len(list_rawcountsvalues))
                for i, result in enumerate( list_rawcountsvalues):
                    stdS[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                    stdS2[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpinsquare,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
            Populationresult=np.array(Populationresult).T
            averageS=np.zeros(len(Populationresult))
            averageS2=np.zeros(len(Populationresult))
            NstateSpin=NionstateSpinlist(ionnumber)
            for i, result in enumerate(Populationresult):
                averageS[i]=Populationresult[i].dot(NstateSpin)
                averageS2[i]=Populationresult[i].dot(NstateSpin**2)
            
        else:
            averageS=np.zeros(len(list_rawcountsvalues))
            averageS2=np.zeros(len(list_rawcountsvalues))
            for i, dataset in enumerate(list_rawcountsvalues):
                averageS[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                averageS2[i]=datasetToTotalSpinsquare(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            if stdcacluation_flag:
                stdS=np.zeros(len(list_rawcountsvalues))
                stdS2=np.zeros(len(list_rawcountsvalues))
                for i, result in enumerate( list_rawcountsvalues):
                    stdS[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                    stdS2[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpinsquare,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        if stdcacluation_flag:
            totalspin, stdtotalspin = totalspinresult
        else:
            totalspin=totalspinresult
        MSE=list_scanparameter**2+2*averageS*list_scanparameter/totalspin+averageS2/totalspin**2
        if formation=='db':
            MSE=10*np.log10(MSE)
        plt.scatter(list_scanparameter,MSE, label=f'{Sname} MSE')
        plt.plot(list_scanparameter,MSE, label=f'{Sname} MSE')
        if stdcacluation_flag:
            _, stdMSE1,_,stdMSE2 = F_and_uncertainty(A=list_scanparameter,C=averageS, stdC=stdS,B=totalspin, stdB=stdtotalspin,E=averageS2, stdE=stdS2)
            if formation=='db':
                stdMSE=stdMSE2
            else:
                stdMSE=stdMSE1
            plt.errorbar(list_scanparameter, MSE, yerr=stdMSE,  fmt='none',color=color, capsize=4)
    plt.title('MSE')
    plt.xlabel(r'$\Phi$')
    plt.ylabel('MSE')
    plt.legend()
    plt.show()


def plotMSEmeasurementsshift(MSEfilelist,name,totalspinfilelist,time_coefficient,phishiftlist,centerphiangle=5/4,correction_flag=False,ionfidelity_list=None,correction_method=1,stdcacluation_flag=False,formation='db'):
    # colorlist=['blue','red']
    totalspinresultlisyt=[]
    for totalspinfilename in totalspinfilelist:
        totalspinresult=plotTotalSpin(totalspinfilename,time_coefficient=time_coefficient,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method,stdcacluation_flag=stdcacluation_flag)
        totalspinresultlisyt.append(totalspinresult)
    plt.figure(figsize=(10, 6))
    for filename,Sname,totalspinfilename,totalspinresult,phishift in zip(MSEfilelist,name,totalspinfilelist,totalspinresultlisyt,phishiftlist):
        list_scanparameter, list_rawcountsvalues, scanparametername = raw_count(path=filename)
        assert len(list_rawcountsvalues) > 0
        list_scanparameter=np.array(list_scanparameter)-(centerphiangle+phishift)*np.pi
        repeat, ionnumber = list_rawcountsvalues[0].shape
        if ionnumber <=14 and correction_method!=3:

            if correction_flag:
                assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
                Populationresult = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target='state',correction_method=correction_method)
            else:
                Populationresult = dataprocess(list_rawcountsvalues, target='state')
            if stdcacluation_flag:
                stdS=np.zeros(len(list_rawcountsvalues))
                stdS2=np.zeros(len(list_rawcountsvalues))
                for i, result in enumerate( list_rawcountsvalues):
                    stdS[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                    stdS2[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpinsquare,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
            Populationresult=np.array(Populationresult).T
            averageS=np.zeros(len(Populationresult))
            averageS2=np.zeros(len(Populationresult))
            NstateSpin=NionstateSpinlist(ionnumber)
            for i, result in enumerate(Populationresult):
                averageS[i]=Populationresult[i].dot(NstateSpin)
                averageS2[i]=Populationresult[i].dot(NstateSpin**2)

        else:
            averageS=np.zeros(len(list_rawcountsvalues))
            averageS2=np.zeros(len(list_rawcountsvalues))
            for i, dataset in enumerate(list_rawcountsvalues):
                averageS[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                averageS2[i]=datasetToTotalSpinsquare(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            if stdcacluation_flag:
                stdS=np.zeros(len(list_rawcountsvalues))
                stdS2=np.zeros(len(list_rawcountsvalues))
                for i, result in enumerate( list_rawcountsvalues):
                    stdS[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                    stdS2[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpinsquare,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        if stdcacluation_flag:
            totalspin, stdtotalspin = totalspinresult
        else:
            totalspin=totalspinresult
        MSE=list_scanparameter**2+2*averageS*list_scanparameter/totalspin+averageS2/totalspin**2
        if formation=='db':
            MSE=10*np.log10(MSE)
        plt.scatter(list_scanparameter,MSE, label=f'{Sname} MSE')
        plt.plot(list_scanparameter,MSE, label=f'{Sname} MSE')
        if stdcacluation_flag:
            _, stdMSE1,_,stdMSE2 = F_and_uncertainty(A=list_scanparameter,C=averageS, stdC=stdS,B=totalspin, stdB=stdtotalspin,E=averageS2, stdE=stdS2)
            if formation=='db':
                stdMSE=stdMSE2
            else:
                stdMSE=stdMSE1
            plt.errorbar(list_scanparameter, MSE, yerr=stdMSE,  fmt='none', capsize=4)
    plt.title('MSE')
    plt.xlabel(r'$\Phi$')
    plt.ylabel('MSE')
    plt.legend()
    plt.show()

def D_and_uncertainty(A,
                      C, stdC,
                      B, stdB,
                      E, stdE,
                      covCB=None, covCE=None, covBE=None):
    """
    Compute D = A**2 + 2*A*C/B + E/B**2 and its 1-sigma uncertainty.

    Parameters
    ----------
    A        : float
        Exact constant.
    C, B, E  : float or ndarray
        Experimental measurements.
    stdC, stdB, stdE : float or ndarray
        Their standard deviations.
    covCB, covCE, covBE : float or ndarray, optional
        Covariances (leave None if variables are independent).

    Returns
    -------
    D      : value(s) of D
    stdD   : propagated 1-sigma uncertainty of D
    """
    C   = np.asarray(C,   dtype=float)
    B   = np.asarray(B,   dtype=float)
    E   = np.asarray(E,   dtype=float)
    sC  = np.asarray(stdC, dtype=float)
    sB  = np.asarray(stdB, dtype=float)
    sE  = np.asarray(stdE, dtype=float)

    if np.any(B == 0):
        raise ZeroDivisionError("B must be non-zero.")

    # value
    D = A**2 + 2*A*C/B + E/B**2

    # partials
    dDC =  2*A / B
    dDB = -2*A*C / B**2 - 2*E / B**3
    dDE =  1.0 / B**2

    # variance
    varD = (dDC * sC)**2 + (dDB * sB)**2 + (dDE * sE)**2

    # add cross terms if covariances supplied
    if covCB is not None:
        varD += 2 * dDC * dDB * np.asarray(covCB)
    if covCE is not None:
        varD += 2 * dDC * dDE * np.asarray(covCE)
    if covBE is not None:
        varD += 2 * dDB * dDE * np.asarray(covBE)

    stdD = np.sqrt(varD)
    return D, stdD




def F_and_uncertainty(A,
                      C, stdC,
                      B, stdB,
                      E, stdE,
                      covCB=None, covCE=None, covBE=None):
    """
    Propagate the uncertainties of C, B, E to

        D = A**2 + 2*A*C/B + E/B**2
        F = 10*log10(D)

    Parameters
    ----------
    A        : float            (exact constant)
    C, B, E  : float or ndarray (measurements)
    stdC, stdB, stdE : float or ndarray (1-sigma uncertainties)
    covCB, covCE, covBE : float or ndarray, optional
        Covariances.  Leave None if variables are independent.

    Returns
    -------
    D, stdD : value and 1-σ of D   (same dtype/shape as inputs)
    F, stdF : value and 1-σ of F   (dB)
    """
    # ensure numpy arrays
    _LN10 = np.log(10.0)        # handy constant
    C  = np.asarray(C,  dtype=float)
    B  = np.asarray(B,  dtype=float)
    E  = np.asarray(E,  dtype=float)
    sC = np.asarray(stdC, dtype=float)
    sB = np.asarray(stdB, dtype=float)
    sE = np.asarray(stdE, dtype=float)

    if np.any(B == 0):
        raise ZeroDivisionError("B must not be zero.")
    
    # -----------------------------------------------------------------
    # 1.  Nominal values
    # -----------------------------------------------------------------
    D = A**2 + 2*A*C/B + E/B**2
    if np.any(D <= 0):
        raise ValueError("D must be positive for log10().")

    F = 10.0 * np.log10(D)

    # -----------------------------------------------------------------
    # 2.  Gradients of D wrt each variable
    # -----------------------------------------------------------------
    dD_dC =  2*A / B
    dD_dB = -2*A*C / B**2 - 2*E / B**3
    dD_dE =  1.0 / B**2

    # -----------------------------------------------------------------
    # 3.  σ(D) : variance-covariance propagation
    # -----------------------------------------------------------------
    varD = (dD_dC * sC)**2 + (dD_dB * sB)**2 + (dD_dE * sE)**2
    if covCB is not None:
        varD += 2 * dD_dC * dD_dB * np.asarray(covCB)
    if covCE is not None:
        varD += 2 * dD_dC * dD_dE * np.asarray(covCE)
    if covBE is not None:
        varD += 2 * dD_dB * dD_dE * np.asarray(covBE)

    stdD = np.sqrt(varD)

    # -----------------------------------------------------------------
    # 4.  σ(F)  — use  F = K * ln(D)  with  K = 10/ln(10)
    #             so ∂F/∂D = K / D
    # -----------------------------------------------------------------
    K = 10.0 / _LN10
    stdF = K * stdD / D          # because σ_F = |∂F/∂D| σ_D

    return D, stdD, F, stdF



def plotS0population(interactiontimelist,S0populationdatalst,correction_flag=False,ionfidelity_list=None,correction_method=1):
    plt.figure(figsize=(10, 6))
    repeat, ionnumber = S0populationdatalst[0].shape
    if ionnumber <=14 and correction_method!=3 :
        if correction_flag:
            assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
            Populationresult = dataprocess_with_detection_error_croection(ionfidelity_list, S0populationdatalst,
                                                                        target='state',correction_method=correction_method)
        else:
            Populationresult = dataprocess(S0populationdatalst, target='state')
        # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
        NstateSpin = NionstateSpinlist(ionnumber)
        S0population= np.sum(Populationresult[NstateSpin == 0],axis=0)
    else:
        S0population=np.zeros(len(S0populationdatalst))
        unique_integers = np.arange(-ionnumber,ionnumber+1,2)
        for i in range(len(S0populationdatalst)):
            summed_frequencies = np.zeros_like(unique_integers, dtype=float)
            spins = 2*np.array(S0populationdatalst[i]) - 1
            S = spins.sum(axis=1)
            for j, integer in enumerate(unique_integers):
                summed_frequencies[j] = np.sum(S == integer)/len(S)

            if correction_flag and ionfidelity_list is not None:
                
                L=build_detection_matrix(ionfidelity_list)
                try:
                    correctionM = np.linalg.inv(L)
                except np.linalg.LinAlgError:
                    raise ValueError("Matrix is singular and cannot be inverted.")

                correctedprobabilities_initalguess = correctionM.dot(summed_frequencies)
                Q_init = correctedprobabilities_initalguess / np.sum(correctedprobabilities_initalguess)
                constraints = {'type': 'eq', 'fun': lambda Q: np.sum(Q) - 1}
                # Bounds:Q should be non-negative (e.g., Q >= 0)
                bounds = [(0, 1) for _ in range(len(summed_frequencies))]  #
                # Solve the optimization problem
                result_correctedprobabilities = minimize(lambda Q: np.linalg.norm(Q - correctionM.dot(summed_frequencies)),
                                                        Q_init, constraints=constraints, bounds=bounds)
                if result_correctedprobabilities.success:
                    correctedprobabilities = result_correctedprobabilities.x
                else:
                    raise ValueError("Optimization did not converge.")
                summed_frequencies=correctedprobabilities
            S0population[i]=summed_frequencies[unique_integers==0]
    plt.plot(interactiontimelist,S0population,label=f'S0 population')
    plt.xlabel("time")
    plt.ylabel("population")
    plt.legend()
    plt.title("S=0 population")
    plt.show()



def plotMeanfieldresult(thetalist,philist,meanfieldresult,interactiontype='OAT',operatorname='Sz',angelname="X",correction_flag=False,ionfidelity_list=None,stdcacluation_flag=False,correction_method=1,cyclefileflag=False):
    """

    Parameters
    ----------

    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None

    Returns
    -------

    """
    list_scanparameter=np.array(thetalist)
    
    norm = Normalize(vmin=np.min(list_scanparameter), vmax=np.max(list_scanparameter))
    cmap = plt.cm.coolwarm
    sm = ScalarMappable(norm=norm, cmap=cmap)
    sm.set_array([])    

    scanparametername = operatorname
    
    time_coefficient=np.array(philist)
    fig, ax = plt.subplots(figsize=(6, 4))

    for i,theta in enumerate(list_scanparameter):
        color = cmap(norm(theta))
        list_rawcountsvalues=np.array(meanfieldresult[i])
        assert len(list_rawcountsvalues) > 0
        repeat, ionnumber = list_rawcountsvalues[0].shape
        if ionnumber <=14 and correction_method!=3:
            if correction_flag:
                assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
                Populationresult = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvalues, target='state',correction_method=correction_method)
            else:
                Populationresult = dataprocess(list_rawcountsvalues, target='state')
            if stdcacluation_flag:
                stdS=np.zeros(len(list_rawcountsvalues))
                for i, result in enumerate( list_rawcountsvalues):
                    stdS[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)

            
            # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
            Populationresult=np.array(Populationresult).T
            averageS=np.zeros(len(Populationresult))
            NstateSpin=NionstateSpinlist(ionnumber)
            for i, result in enumerate(Populationresult):
                averageS[i]=Populationresult[i].dot(NstateSpin)
            
            
          #  if stdcacluation_flag:
            #    S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
           # else:
          #      params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
          #  A, w, phi,b= params
        else:
            averageS=np.zeros(len(list_rawcountsvalues))
            for i, dataset in enumerate(list_rawcountsvalues):
                averageS[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            if stdcacluation_flag:
                stdS=np.zeros(len(list_rawcountsvalues))
                for i, result in enumerate( list_rawcountsvalues):
                    stdS[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
           # if stdcacluation_flag:
             #   S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
           # else:
           #     params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
           # A, w, phi,b= params
    
        ax.scatter(time_coefficient, averageS, c=[color], s=20, edgecolor='k', alpha=0.8)
        ax.plot(time_coefficient,averageS,c=color, linewidth=1)
        if stdcacluation_flag:
            ax.errorbar(time_coefficient, averageS, yerr=stdS,  fmt='none', color=color, capsize=4)
    cbar = fig.colorbar(sm, ax=ax, pad=0.02)
    cbar.set_label(rf"$\theta_{{{angelname}}}$(rad)")
    cbar_ticks = [0, np.pi/4, np.pi/2, 3*np.pi/4, np.pi]
    cbar.set_ticks(cbar_ticks)
    cbar.set_ticklabels([r'$0$', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$',r'$\frac{3\pi}{4}$', r'$\pi$'])

    ax.set_xlim(0, 2*np.pi)
    xt = [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]
    xl = [r'$0$', r'$\frac{\pi}{2}$', r'$\pi$',r'$\frac{3\pi}{2}$', r'$2\pi$']
    ax.set_xticks(xt)
    ax.set_xticklabels(xl)

    ax.set_xlabel(rf'$\Phi_{{{angelname}}}$')
    ax.set_ylabel('<'+scanparametername+'>')
    ax.set_title(interactiontype+'<'+scanparametername+'>')
    plt.tight_layout()
    plt.show()

def plotQfunctionresult(thetalist,philist,Qfunctionresult,interactiontime,interactiontype='OAT',correction_flag=False,ionfidelity_list=None,stdcacluation_flag=False,correction_method=1):
    """

    Parameters
    ----------

    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None
    Returns
    -------
    """
    fig, ax = plt.subplots(figsize=(6, 4))
    Qfunctiondata=np.array(Qfunctionresult)
    Qfunction=np.zeros((len(thetalist),len(philist)))
    stdQ=np.zeros((len(thetalist),len(philist)))
    repeat, ionnumber = Qfunctiondata[0,0].shape
    thetaarray=np.array(thetalist)
    phiarray=np.array(philist)
    for i in range(len(thetalist)):
        for j in range(len(philist)):
            Qfunction[i,j]=datasetQfunction(dataset=Qfunctiondata[i,j],repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            if stdcacluation_flag:
                stdQ[i,j]=bootstrap_std_singledataset(dataset=Qfunctiondata[i,j], function=datasetQfunction,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)         
    
    dtheta =thetaarray[1] -thetaarray[0]
    dphi   =phiarray[1]   - phiarray[0]
    sin_theta = np.sin(thetaarray)[:, None]

    I = np.sum(Qfunction* sin_theta) * dtheta * dphi
    
    
    Phi, Theta = np.meshgrid(phiarray, thetaarray)

    pcm = ax.pcolormesh(Phi, Theta, Qfunction/I,cmap=plt.cm.viridis,shading='auto')
    ax.set_aspect('equal')
    ax.set_xlabel(r'$\phi$(rad)')
    ax.set_xlim(0, 5*np.pi/2)
    xt = [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi, 5*np.pi/2]
    xl = [r'$0$', r'$\frac{\pi}{2}$', r'$\pi$',r'$\frac{3\pi}{2}$', r'$2\pi$', r'$5\frac{\pi}{2}$']
    ax.set_xticks(xt)
    ax.set_xticklabels(xl)

    ax.set_ylabel(r'$\theta$(rad)')
    ax.set_ylim(0, np.pi)
    yt = [0, np.pi/2, np.pi]
    yl = [r'$0$', r'$\frac{\pi}{2}$', r'$\pi$']
    ax.set_yticks(yt)
    ax.set_yticklabels(yl)

    cbar = fig.colorbar(pcm, ax=ax, pad=0.02)
    cbar.set_label(r'$Q(\theta,\phi)$')

    ax.set_title(interactiontype+' T='+str(interactiontime)+'us Q function')
    plt.tight_layout()
    plt.show()


def plotQfunctionresultonBlochsphere(thetalist,philist,Qfunctionresult,interactiontime,interactiontype='OAT',correction_flag=False,ionfidelity_list=None,stdcacluation_flag=False,correction_method=1):
    """

    Parameters
    ----------

    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None
    Returns
    -------
    """
    fig = plt.figure(figsize=(6,6))
    ax  = fig.add_subplot(111, projection='3d')

    Qfunctiondata=np.array(Qfunctionresult)
    Qfunction=np.zeros((len(thetalist),len(philist)))
    stdQ=np.zeros((len(thetalist),len(philist)))
    repeat, ionnumber = Qfunctiondata[0,0].shape
    thetaarray=np.array(thetalist)
    phiarray=np.array(philist)
    for i in range(len(thetalist)):
        for j in range(len(philist)):
            Qfunction[i,j]=datasetQfunction(dataset=Qfunctiondata[i,j],repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            if stdcacluation_flag:
                stdQ[i,j]=bootstrap_std_singledataset(dataset=Qfunctiondata[i,j], function=datasetQfunction,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)         
    
    dtheta =thetaarray[1] -thetaarray[0]
    dphi   =phiarray[1]   - phiarray[0]
    sin_theta = np.sin(thetaarray)[:, None]

    I = np.sum(Qfunction* sin_theta) * dtheta * dphi
    
    
    Phi, Theta = np.meshgrid(phiarray, thetaarray)


    x = np.sin(Theta) * np.cos(Phi)               # x = r sinθ cosφ, with r = 1
    y = np.sin(Theta) * np.sin(Phi)               # y = r sinθ sinφ
    z = np.cos(Theta)                             # z = r cosθ
    Q_norm=Qfunction/I
    norm   = Normalize(vmin=0, vmax=Q_norm.max()) # lock colour scale
    cmap   = cm.viridis
    colors = cmap(norm(Q_norm))                   # colour array for every (θ,φ)


    surf = ax.plot_surface(
    x, y, z,
    rstride=1, cstride=1,               # plot every grid square
    facecolors=colors,
    linewidth=0,
    antialiased=False,
    shade=False                         # keep colours un-shaded
    )
    mappable = cm.ScalarMappable(norm=norm, cmap=cmap)
    mappable.set_array([])                 # dummy, so colourbar knows limits
    cbar = fig.colorbar(mappable, shrink=0.6, pad=0.05)
    cbar.set_label(r'$Q(\theta,\phi)$')

    ax.set_axis_off()                 # hide the Matplotlib 3-D box

    axis_len = 1.5                   # a bit longer than the unit sphere


    # axis_len = 1.0   *N                      # length of the axis arrows
    arrow_kw = dict(pivot='tail', arrow_length_ratio=0.05, linewidth=2)
    
    ax.quiver(0, 0, 0,  axis_len, 0, 0,color='tab:red', **arrow_kw)   # +x
    ax.quiver(0, 0, 0,  0, axis_len, 0,color='tab:green', **arrow_kw)   # +y
    ax.quiver(0, 0, 0,  0, 0, axis_len,color='tab:blue', **arrow_kw)   # +z
    # x-axis (red)
    #ax.plot([0,  axis_len], [0, 0], [0, 0],color='tab:red', linewidth=2)
    ax.text(axis_len*1.05, 0, 0, 'x', color='k', fontsize=12,
            va='center', ha='left')

    # y-axis (green)
    #ax.plot([0, 0], [0,  axis_len], [0, 0],color='tab:green', linewidth=2)
    ax.text(0, axis_len*1.05, 0, 'y', color='k', fontsize=12,
            va='bottom', ha='center')

    # z-axis (blue)
    #ax.plot([0, 0], [0, 0], [0,  axis_len],color='tab:blue', linewidth=2)
    ax.text(0, 0, axis_len*1.05, 'z', color='k', fontsize=12,
            va='bottom', ha='center')

    ax.set_title(interactiontype+' T='+str(interactiontime)+'us Q function')
    plt.tight_layout()
    plt.show()


def plotQfunctionresultonBlochsphereCircularprojection(thetalist,philist,Qfunctionresult,interactiontime,interactiontype='OAT',correction_flag=False,ionfidelity_list=None,stdcacluation_flag=False,correction_method=1):
    """

    Parameters
    ----------

    correction_flag : bool, optional
        是否做detection error croection, by default False
    ionfidelity_list : list, optional
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision], by default None
    Returns
    -------
    """

    Qfunctiondata=np.array(Qfunctionresult)
    Qfunction=np.zeros((len(thetalist),len(philist)))
    stdQ=np.zeros((len(thetalist),len(philist)))
    repeat, ionnumber = Qfunctiondata[0,0].shape
    thetaarray=np.array(thetalist)
    phiarray=np.array(philist)
    for i in range(len(thetalist)):
        for j in range(len(philist)):
            Qfunction[i,j]=datasetQfunction(dataset=Qfunctiondata[i,j],repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            if stdcacluation_flag:
                stdQ[i,j]=bootstrap_std_singledataset(dataset=Qfunctiondata[i,j], function=datasetQfunction,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)         
    
    dtheta =thetaarray[1] -thetaarray[0]
    dphi   =phiarray[1]   - phiarray[0]
    sin_theta = np.sin(thetaarray)[:, None]

    I = np.sum(Qfunction* sin_theta) * dtheta * dphi

    Q_norm=Qfunction/I

    Q_norm = rotate_Q( thetaarray, phiarray, Q_norm)
    
    mask        = thetaarray <= np.pi/2
    theta_cap   = thetaarray[mask]          # 1-D (M_cap,)
    Q_cap       = Q_norm[mask, :]      # 2-D (M_cap, N)
    
    # 2. polar-coordinate mapping  ρ = N·sinθ
    N_scale =10                      # radius of the final disk (pick any ≥1)
    rho_cap = N_scale * np.sin(theta_cap)
    
    # 3. build 2-D grids for pcolormesh
    Phi, Rho = np.meshgrid(phiarray, rho_cap)
    
    # 4. plot on a polar axis ---------------------------------------------
    fig, ax = plt.subplots(subplot_kw=dict(projection='polar'), figsize=(6,6))

    cmap=plt.cm.viridis
    pcm  = ax.pcolormesh(Phi, Rho, Q_cap,
                         cmap=cmap, shading='auto')
    
    # tidy up the polar plot
    ax.set_rlim(0, N_scale)          # make full radius = N_scale
    ax.set_yticklabels([])           # optional: hide radial tick labels
    ax.set_theta_zero_location('N')  # 0° at the top
    ax.set_theta_direction(-1)       # φ increases clockwise (like usual φ)
    
    # colourbar
    cbar = fig.colorbar(pcm, ax=ax, pad=0.08)
    cbar.set_label(r'$Q(\theta,\phi)$')
    
    plt.tight_layout()
    plt.show()

from scipy.interpolate import RegularGridInterpolator

def rotate_Q(theta, phi, Q):
    """
    Re-express Q(θ,φ) after the coordinate change
        x' =  x
        y' =  z
        z' = -y
    keeping the original θ, φ arrays unchanged.

    Parameters
    ----------
    theta : (M,) array_like        # 0 … π   (angle from old +z)
    phi   : (N,) array_like        # 0 … 2π  (angle from old +x toward +y)
    Q     : (M,N) array_like       # scalar field on the sphere

    Returns
    -------
    Q_rot : (M,N) ndarray          # Q expressed in the new frame,
                                   # still sampled on the same θ–φ grid
    """
    theta = np.asarray(theta)
    phi   = np.asarray(phi)
    Q     = np.asarray(Q)

    # build mesh of the *new* spherical directions (θ_new, φ_new)
    Θn, Φn = np.meshgrid(theta, phi, indexing='ij')        # (M,N)

    # Cartesian unit vectors in the NEW frame ---------------------------
    x_cart = np.sin(Θn) * np.cos(Φn)
    y_cart = np.sin(Θn) * np.sin(Φn)
    z_cart = np.cos(Θn)

    # expressed in the OLD frame:
    #   x_old = x'
    #   y_old = -z'
    #   z_old = y'
    x_old = x_cart
    y_old = -z_cart
    z_old = y_cart

    # spherical angles in the OLD frame ---------------------------------
    θ_old = np.arccos(np.clip(z_old, -1.0, 1.0))           #  0 … π
    φ_old = (np.arctan2(y_old, x_old) + 2*np.pi) % (2*np.pi)  # wrap to 0 … 2π

    # interpolate Q at those old-frame angles ---------------------------
    interp = RegularGridInterpolator(
        (theta, phi), Q,
        bounds_error=False, fill_value=np.nan)             # NaN for points just outside

    pts   = np.column_stack((θ_old.ravel(), φ_old.ravel()))
    Q_rot = interp(pts).reshape(Q.shape)

    return Q_rot



def fit_ellipse(x, y):
    """
    Robust direct least-squares fit of an ellipse to 2-D points (x, y).

    Returns
    -------
    xc, yc : float          centre
    a, b   : float          semi-major and semi-minor axes (a ≥ b)
    theta  : float          tilt angle of major axis w.r.t. +x (radians, ccw)
    """
    x = x[:, None]
    y = y[:, None]

    # ----- 1.  Solve the generalized eigen-problem --------------------------
    D = np.hstack([x*x, x*y, y*y, x, y, np.ones_like(x)])
    S = D.T @ D
    C = np.zeros((6, 6))
    C[0, 2] = C[2, 0] = 2
    C[1, 1] = -1
    eigvals, eigvecs = np.linalg.eig(np.linalg.inv(S) @ C)
    a_vec = eigvecs[:, np.isreal(eigvals) & (eigvals > 0)][:, 0].real  # (A,B,C,D,E,F)

    # ----- 2.  Unpack and move to centre ------------------------------------
    A, B, Cc, Dd, Ee, Ff = a_vec
    # centre formulae
    denom = B*B - 4*A*Cc
    xc = (2*Cc*Dd - B*Ee) / denom
    yc = (2*A*Ee - B*Dd) / denom

    # constant term at the centre
    F_c = (A*xc*xc + B*xc*yc + Cc*yc*yc + Dd*xc + Ee*yc + Ff)

    # If F_c > 0 we flip all coefficients (same geometry, opposite sign)
    if F_c > 0:
        A, B, Cc, Dd, Ee, Ff = -A, -B, -Cc, -Dd, -Ee, -Ff
        F_c = -F_c

    # ----- 3.  Orientation ---------------------------------------------------
    theta = 0.5 * np.arctan2(B, A - Cc)
    cos_t, sin_t = np.cos(theta), np.sin(theta)

    # rotated second-order coefficients
    Ap = A*cos_t**2 + B*cos_t*sin_t + Cc*sin_t**2
    Cp = A*sin_t**2 - B*cos_t*sin_t + Cc*cos_t**2

    # ----- 4.  Semi-axes -----------------------------------------------------
    #  In the rotated frame, ellipse is  Ap·x'^2 + Cp·y'^2 = -F_c
    a_len = np.sqrt(np.abs(-F_c / Ap))          # Ap > 0 after the sign test
    b_len = np.sqrt(np.abs(-F_c / Cp))      # Cp > 0 after the sign test

    # Ensure a ≥ b
    if b_len > a_len:
        a_len, b_len = b_len, a_len
        theta = (theta + np.pi/2) % np.pi

    return xc, yc, a_len, b_len, theta % np.pi


def radial_length(a, b, theta, phi):
    """
    Length from ellipse centre to perimeter along direction `phi`.

    Parameters
    ----------
    a, b  : semi-major / semi-minor axes (a ≥ b)
    theta : tilt of major axis (radians, ccw from +x)
    phi   : direction of ray (radians, ccw from +x)

    Returns
    -------
    float  length s(phi)
    """
    # components of direction vector in ellipse frame
    vx =  np.cos(phi)*np.cos(theta) + np.sin(phi)*np.sin(theta)
    vy = -np.cos(phi)*np.sin(theta) + np.sin(phi)*np.cos(theta)
    return 1.0 / np.sqrt((vx**2) / a**2 + (vy**2) / b**2)
# ------------------------------------------------------------
# 2.  Visualisation helper
# ------------------------------------------------------------
def plot_points_and_ellipse(averageSX, averageSY, 
                            xc, yc, a, b, theta,ThetaZ,stdSX=None, stdSY=None,stdcacluation_flag=False):
    fig, ax = plt.subplots(figsize=(6, 6))
    # points with error bars
    if stdcacluation_flag:
        ax.errorbar(averageSX, averageSY, xerr=stdSX, yerr=stdSY,fmt='o', ecolor='grey', capsize=3, label='data')
    else :
        ax.scatter(averageSX, averageSY, c='b', s=20, edgecolor='k', alpha=0.8, label='data')
    # ellipse
    length_pos45 = radial_length(a, b, theta,  np.pi/4)
    length_neg45 = radial_length(a, b, theta, -np.pi/4)
    for phi, s, colour in [( np.pi/4, length_pos45, 'g'),(-np.pi/4, length_neg45, 'm')]:
        dx, dy = s*np.cos(phi), s*np.sin(phi)
        ax.plot([xc, xc+dx], [yc, yc+dy], colour+'-', lw=2,label=fr'$s({phi:+.0f}^{{\circ}})={s:.2f}$')
    # parametric ellipse
    t = np.linspace(0, 2*np.pi, 400)
    cos_t, sin_t = np.cos(theta), np.sin(theta)
    ex = xc + a*np.cos(t)*cos_t - b*np.sin(t)*sin_t
    ey = yc + a*np.cos(t)*sin_t + b*np.sin(t)*cos_t
    ax.plot(ex, ey, 'r-', lw=2, label='fitted ellipse')

    # major/minor cross
    ax.plot([xc - a*cos_t, xc + a*cos_t],
            [yc - a*sin_t, yc + a*sin_t], 'r--', lw=1)
    ax.plot([xc - b*(-sin_t), xc + b*(-sin_t)],
            [yc - b*( cos_t),  yc + b*( cos_t)], 'r--', lw=1)

    ax.set_aspect('equal')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.legend()
    ax.set_title(rf"$\theta_Z={phi_to_tex(ThetaZ)},a={a:.3g},\; b={b:.3g},\; \theta={np.degrees(theta):.1f}°$")
    plt.show()


def plotMeanfieldEllipse(thetalist,SxSymeanfieldresult,angelname="X",correction_flag=False,ionfidelity_list=None,stdcacluation_flag=False,correction_method=1,cyclefileflag=False):
    
    list_scanparameter=np.array(thetalist)
    
    Sxmeanfieldresult=SxSymeanfieldresult[0]
    Symeanfieldresult=SxSymeanfieldresult[1]
    Lratio=np.zeros_like(list_scanparameter)
    for j,theta in enumerate(list_scanparameter):

        list_rawcountsvaluesX=np.array(Sxmeanfieldresult[j])
        list_rawcountsvaluesY=np.array(Symeanfieldresult[j])
        assert len(list_rawcountsvaluesX) > 0
        assert len(list_rawcountsvaluesY) > 0
        repeat, ionnumber = list_rawcountsvaluesX[0].shape
        if ionnumber <=14 and correction_method!=3:
            if correction_flag:
                assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
                PopulationresultX = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvaluesX, target='state',correction_method=correction_method)
                PopulationresultY = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvaluesY, target='state',correction_method=correction_method)
            else:
                PopulationresultX = dataprocess(list_rawcountsvaluesX, target='state')
                PopulationresultY = dataprocess(list_rawcountsvaluesY, target='state')
            if stdcacluation_flag:
                stdSX=np.zeros(len(list_rawcountsvaluesX))
                stdSY=np.zeros(len(list_rawcountsvaluesY))
                for i, result in enumerate( list_rawcountsvaluesX):
                    stdSX[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                for i, result in enumerate( list_rawcountsvaluesY):    
                    stdSY[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            
            # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
            PopulationresultX=np.array(PopulationresultX).T
            PopulationresultY=np.array(PopulationresultY).T
            averageSX=np.zeros(len(PopulationresultX))
            averageSY=np.zeros(len(PopulationresultY))
            NstateSpin=NionstateSpinlist(ionnumber)
            for i, result in enumerate(PopulationresultX):
                averageSX[i]=PopulationresultX[i].dot(NstateSpin)
            for i, result in enumerate(PopulationresultY):
                averageSY[i]=PopulationresultY[i].dot(NstateSpin)
            
            
          #  if stdcacluation_flag:
            #    S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
           # else:
          #      params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
          #  A, w, phi,b= params
        else:
            averageSX=np.zeros(len(list_rawcountsvaluesX))
            averageSY=np.zeros(len(list_rawcountsvaluesY))
            for i, dataset in enumerate(list_rawcountsvaluesX):
                averageSX[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            for i, dataset in enumerate(list_rawcountsvaluesY):
                averageSY[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            if stdcacluation_flag:
                stdSX=np.zeros(len(list_rawcountsvaluesX))
                for i, result in enumerate( list_rawcountsvaluesX):
                    stdSX[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                stdSY=np.zeros(len(list_rawcountsvaluesY))
                for i, result in enumerate( list_rawcountsvaluesY):
                    stdSY[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
           # if stdcacluation_flag:
             #   S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
           # else:
           #     params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
           # A, w, phi,b= params
        xc, yc, a_len, b_len, thetabetwenXandLongaxis=fit_ellipse(x=averageSX, y=averageSY)
        length_pos45 = radial_length(a=a_len, b=b_len, theta=thetabetwenXandLongaxis,  phi=np.pi/4)
        length_neg45 = radial_length(a=a_len, b=b_len, theta=thetabetwenXandLongaxis, phi=-np.pi/4)
        Lratio[j]=length_pos45/length_neg45
        if stdcacluation_flag:
            plot_points_and_ellipse(averageSX=averageSX, averageSY=averageSY,  xc=xc, yc=yc, a=a_len, b=b_len, theta=thetabetwenXandLongaxis,ThetaZ=theta,stdSX=stdSX, stdSY=stdSY,stdcacluation_flag=True)
        else:
            plot_points_and_ellipse(averageSX=averageSX, averageSY=averageSY, xc=xc, yc=yc, a=a_len, b=b_len, theta=thetabetwenXandLongaxis,ThetaZ=theta)
    
    fig, ax = plt.subplots(figsize=(6, 4))
    ax.scatter(list_scanparameter, Lratio,  s=20, edgecolor='k', alpha=0.8)
    ax.plot(list_scanparameter,Lratio, linewidth=1)

    
   # ax.set_xlim(0, 2*np.pi)
   # xt = [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]
   # xl = [r'$0$', r'$\frac{\pi}{2}$', r'$\pi$',r'$\frac{3\pi}{2}$', r'$2\pi$']
   # ax.set_xticks(xt)
    #ax.set_xticklabels(xl)

    ax.set_xlabel(rf'$\theta_{{{angelname}}}$')
    ax.set_ylabel(r'$L_{\pi/4}/L_{-\pi/4}$')
    ax.set_title('MeanField_Ellipse')
    plt.tight_layout()
    plt.show()

from fractions import Fraction

def phi_to_tex(phi, max_denominator: int = 24) -> str:
    """
    Convert a numeric angle `phi` (radians) into a LaTeX string
    expressed as a rational multiple of π.

    Examples
    --------
    >>> phi_to_tex(np.pi/4)          #  'π/4'
    >>> phi_to_tex(3*np.pi/2)        #  '3π/2'
    >>> phi_to_tex(-np.pi)           #  '-π'
    >>> phi_to_tex(0)                #  '0'
    """
    # rational approximation of phi / π
    frac = Fraction(phi / np.pi).limit_denominator(max_denominator)
    n, d = frac.numerator, frac.denominator

    if n == 0:
        return "0"

    sign = "-" if n < 0 else ""
    n = abs(n)

    if d == 1:                         # integer multiple of π
        coeff = "" if n == 1 else f"{n}"
        return rf"{sign}{coeff}\pi"

    if n == 1:                         #  π / d
        return rf"{sign}\pi/{d}"

    # general case  nπ / d
    return rf"{sign}{n}\pi/{d}"

def DeltaThetaphipluspi4( theta0,k, t):
    """
    Evaluate  arccos( tanh[k t + arctanh(cos θ0)] ) - θ0.

    This expression is the small-θ0 (m → 1) limit of the full elliptic solution.
    """
    return np.arccos(np.tanh(k * t + np.arctanh(np.cos(theta0)))) - theta0
def DeltaThetaphiminuspi4( theta0,k, t):
    """
    Evaluate  arccos( tanh[k t + arctanh(cos θ0)] ) - θ0.

    This expression is the small-θ0 (m → 1) limit of the full elliptic solution.
    """
    return np.arccos(np.tanh(-k * t + np.arctanh(np.cos(theta0)))) - theta0


def plotMeanfieldThetaZDeltaTheta(thetalist,philist,SxSySzmeanfieldresult,angelname="X",correction_flag=False,ionfidelity_list=None,stdcacluation_flag=False,correction_method=1,cyclefileflag=False,plottheoryline=False):
    
    list_scanparameter=np.array(philist)
    time_coefficient=np.array(thetalist)
    fig, ax = plt.subplots(figsize=(6, 4))
    Sxmeanfieldresult=SxSySzmeanfieldresult[0]
    Symeanfieldresult=SxSySzmeanfieldresult[1]
    Szmeanfieldresult=SxSySzmeanfieldresult[2]
    
    for j,phi in enumerate(list_scanparameter):

        list_rawcountsvaluesX=np.array(Sxmeanfieldresult[j])
        list_rawcountsvaluesY=np.array(Symeanfieldresult[j])
        list_rawcountsvaluesZ=np.array(Szmeanfieldresult[j])
        assert len(list_rawcountsvaluesX) > 0
        assert len(list_rawcountsvaluesY) > 0
        assert len(list_rawcountsvaluesZ) > 0
        repeat, ionnumber = list_rawcountsvaluesX[0].shape
        if ionnumber <=14 and correction_method!=3:
            if correction_flag:
                assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
                PopulationresultX = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvaluesX, target='state',correction_method=correction_method)
                PopulationresultY = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvaluesY, target='state',correction_method=correction_method)
                PopulationresultZ = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvaluesZ, target='state',correction_method=correction_method)
            else:
                PopulationresultX = dataprocess(list_rawcountsvaluesX, target='state')
                PopulationresultY = dataprocess(list_rawcountsvaluesY, target='state')
                PopulationresultZ = dataprocess(list_rawcountsvaluesZ, target='state')
            if stdcacluation_flag:
                stdSX=np.zeros(len(list_rawcountsvaluesX))
                stdSY=np.zeros(len(list_rawcountsvaluesY))
                stdSZ=np.zeros(len(list_rawcountsvaluesZ))
                for i, result in enumerate( list_rawcountsvaluesX):
                    stdSX[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                for i, result in enumerate( list_rawcountsvaluesY):    
                    stdSY[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                for i, result in enumerate( list_rawcountsvaluesZ):    
                    stdSZ[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
            PopulationresultX=np.array(PopulationresultX).T
            PopulationresultY=np.array(PopulationresultY).T
            PopulationresultZ=np.array(PopulationresultZ).T
            averageSX=np.zeros(len(PopulationresultX))
            averageSY=np.zeros(len(PopulationresultY))
            averageSZ=np.zeros(len(PopulationresultZ))
            NstateSpin=NionstateSpinlist(ionnumber)
            for i, result in enumerate(PopulationresultX):
                averageSX[i]=PopulationresultX[i].dot(NstateSpin)
            for i, result in enumerate(PopulationresultY):
                averageSY[i]=PopulationresultY[i].dot(NstateSpin)
            for i, result in enumerate(PopulationresultZ):
                averageSZ[i]=PopulationresultZ[i].dot(NstateSpin)
            
            
          #  if stdcacluation_flag:
            #    S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
           # else:
          #      params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
          #  A, w, phi,b= params
        else:
            averageSX=np.zeros(len(list_rawcountsvaluesX))
            averageSY=np.zeros(len(list_rawcountsvaluesY))
            averageSZ=np.zeros(len(list_rawcountsvaluesZ))
            for i, dataset in enumerate(list_rawcountsvaluesX):
                averageSX[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            for i, dataset in enumerate(list_rawcountsvaluesY):
                averageSY[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            for i, dataset in enumerate(list_rawcountsvaluesZ):
                averageSZ[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            if stdcacluation_flag:
                stdSX=np.zeros(len(list_rawcountsvaluesX))
                for i, result in enumerate( list_rawcountsvaluesX):
                    stdSX[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                stdSY=np.zeros(len(list_rawcountsvaluesY))
                for i, result in enumerate( list_rawcountsvaluesY):
                    stdSY[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
                stdSZ=np.zeros(len(list_rawcountsvaluesZ))
                for i, result in enumerate( list_rawcountsvaluesZ):
                    stdSZ[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
           # if stdcacluation_flag:
             #   S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
           # else:
           #     params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
           # A, w, phi,b= params
        ThetaZ=np.arccos(averageSZ/np.sqrt(averageSX**2+averageSY**2+averageSZ**2))
        ax.scatter(time_coefficient, ThetaZ-(np.pi-time_coefficient), s=20, edgecolor='k', alpha=0.8,label = rf'$\Phi_{{{angelname}}} = {phi_to_tex(phi)}$')
        ax.plot(time_coefficient,ThetaZ-(np.pi-time_coefficient), linewidth=1)
        if stdcacluation_flag:
            stdThetaZ=uncertainty_A(averageSX, averageSY, averageSZ, stdSX, stdSY, stdSZ)
            ax.errorbar(time_coefficient,ThetaZ-(np.pi-time_coefficient), yerr=stdThetaZ,  fmt='none', capsize=4)
    
    #ax.set_xlim(0, 2*np.pi)
    #xt = [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]
    #xl = [r'$0$', r'$\frac{\pi}{2}$', r'$\pi$',r'$\frac{3\pi}{2}$', r'$2\pi$']
    #ax.set_xticks(xt)
    #ax.set_xticklabels(xl)

    ax.set_xlabel(rf'$\theta_{{{angelname}}}$')
    ax.set_ylabel(r'$\Delta<\theta_Z >$')
    ax.set_title('MeanfieldThetaZDeltaTheta')
    plt.legend()
    plt.tight_layout()
    plt.show()

def uncertainty_A(B, C, D, sigma_B, sigma_C, sigma_D,
                  cov=None):
    """
    Propagate uncertainties of B, C, D to A = -arccos(D/sqrt(B^2+C^2+D^2)).

    Parameters
    ----------
    B, C, D          : floats  (or same-length arrays)
    sigma_B, sigma_C, sigma_D : corresponding 1-sigma uncertainties
    cov              : optional 3×3 covariance matrix of [B, C, D].
                       If None, variables are treated as uncorrelated.

    Returns
    -------
    A         : value of A  (radians)
    sigma_A   : 1-sigma standard deviation of A
    """
    B, C, D = np.asarray(B), np.asarray(C), np.asarray(D)
    R2 = B*B + C*C + D*D
    R = np.sqrt(R2)
    t = D / R
    # value
    A = -np.arccos(t)

    # gradient components
    common_denom = R2 * np.sqrt(B*B + C*C)
    dAdB = -B*D / common_denom
    dAdC = -C*D / common_denom
    dAdD =  (B*B + C*C) / common_denom
    grad = np.stack([dAdB, dAdC, dAdD], axis=-1)

    if cov is None:
        # independent uncertainties
        variances = np.array([sigma_B**2, sigma_C**2, sigma_D**2])
        sigma_A = np.sqrt(np.sum((grad**2)*variances, axis=-1))
    else:
        # full covariance propagation: σ² = gᵀ C g
        cov = np.asarray(cov)
        sigma_A = np.sqrt(np.einsum('...i,ij,...j->...', grad, cov, grad))

    return A, sigma_A



def plotMeanfieldThetaZFlux(theta,phi,interactiontimelist,SxSySzmeanfieldresult,angelname="Z",correction_flag=False,ionfidelity_list=None,stdcacluation_flag=False,correction_method=1,cyclefileflag=False):
    
    
    time_coefficient=np.array(interactiontimelist)
    fig, ax = plt.subplots(figsize=(6, 4))
    Sxmeanfieldresult=SxSySzmeanfieldresult[0]
    Symeanfieldresult=SxSySzmeanfieldresult[1]
    Szmeanfieldresult=SxSySzmeanfieldresult[2]
    list_rawcountsvaluesX=np.array(Sxmeanfieldresult)
    list_rawcountsvaluesY=np.array(Symeanfieldresult)
    list_rawcountsvaluesZ=np.array(Szmeanfieldresult)
    assert len(list_rawcountsvaluesX) > 0
    assert len(list_rawcountsvaluesY) > 0
    assert len(list_rawcountsvaluesZ) > 0
    repeat, ionnumber = list_rawcountsvaluesX[0].shape
    if ionnumber <=14 and correction_method!=3:
        if correction_flag:
            assert ionfidelity_list is not None, "warning:ionfidelity_list is None"
            PopulationresultX = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvaluesX, target='state',correction_method=correction_method)
            PopulationresultY = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvaluesY, target='state',correction_method=correction_method)
            PopulationresultZ = dataprocess_with_detection_error_croection(ionfidelity_list, list_rawcountsvaluesZ, target='state',correction_method=correction_method)
        else:
            PopulationresultX = dataprocess(list_rawcountsvaluesX, target='state')
            PopulationresultY = dataprocess(list_rawcountsvaluesY, target='state')
            PopulationresultZ = dataprocess(list_rawcountsvaluesZ, target='state')
        if stdcacluation_flag:
            stdSX=np.zeros(len(list_rawcountsvaluesX))
            stdSY=np.zeros(len(list_rawcountsvaluesY))
            stdSZ=np.zeros(len(list_rawcountsvaluesZ))
            for i, result in enumerate( list_rawcountsvaluesX):
                stdSX[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            for i, result in enumerate( list_rawcountsvaluesY):    
                stdSY[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            for i, result in enumerate( list_rawcountsvaluesZ):    
                stdSZ[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        # Populationresult 的结构是Populationresult[stateindex]=statepopulartion的list，长度是len(list_scanparameter)
        PopulationresultX=np.array(PopulationresultX).T
        PopulationresultY=np.array(PopulationresultY).T
        PopulationresultZ=np.array(PopulationresultZ).T
        averageSX=np.zeros(len(PopulationresultX))
        averageSY=np.zeros(len(PopulationresultY))
        averageSZ=np.zeros(len(PopulationresultZ))
        NstateSpin=NionstateSpinlist(ionnumber)
        for i, result in enumerate(PopulationresultX):
            averageSX[i]=PopulationresultX[i].dot(NstateSpin)
        for i, result in enumerate(PopulationresultY):
            averageSY[i]=PopulationresultY[i].dot(NstateSpin)
        for i, result in enumerate(PopulationresultZ):
            averageSZ[i]=PopulationresultZ[i].dot(NstateSpin)
        
        
        #  if stdcacluation_flag:
        #    S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
        # else:
        #      params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
        #  A, w, phi,b= params
    else:
        averageSX=np.zeros(len(list_rawcountsvaluesX))
        averageSY=np.zeros(len(list_rawcountsvaluesY))
        averageSZ=np.zeros(len(list_rawcountsvaluesZ))
        for i, dataset in enumerate(list_rawcountsvaluesX):
            averageSX[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        for i, dataset in enumerate(list_rawcountsvaluesY):
            averageSY[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        for i, dataset in enumerate(list_rawcountsvaluesZ):
            averageSZ[i]=datasetToTotalSpin(dataset=dataset,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        if stdcacluation_flag:
            stdSX=np.zeros(len(list_rawcountsvaluesX))
            for i, result in enumerate( list_rawcountsvaluesX):
                stdSX[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            stdSY=np.zeros(len(list_rawcountsvaluesY))
            for i, result in enumerate( list_rawcountsvaluesY):
                stdSY[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
            stdSZ=np.zeros(len(list_rawcountsvaluesZ))
            for i, result in enumerate( list_rawcountsvaluesZ):
                stdSZ[i]=bootstrap_std_singledataset(dataset=result, function=datasetToTotalSpin,repeat=repeat, ionnumber=ionnumber,correction_flag=correction_flag,ionfidelity_list=ionfidelity_list,correction_method=correction_method)
        # if stdcacluation_flag:
            #   S_fit, sigma_S, params, params_covariance=calculate_Y_with_uncertainty_fit(t=time_coefficient, X=averageS, deltaX=stdS, function=sin_with_bias, p0=[ionnumber, np.pi, 0,0])
        # else:
        #     params, params_covariance = curve_fit(f=sin_with_bias, xdata=time_coefficient, ydata=averageS,p0=[ionnumber, np.pi, 0,0])
        # A, w, phi,b= params
    ThetaZ=np.arccos(averageSZ/np.sqrt(averageSX**2+averageSY**2+averageSZ**2))
    ax.scatter(time_coefficient, np.pi-ThetaZ, s=20, edgecolor='k', alpha=0.8,label=rf'$\Phi_{{{angelname}}}={phi_to_tex(phi)}$, $\Theta_{{{angelname}}}={phi_to_tex(theta)}$')
    ax.plot(time_coefficient,np.pi-ThetaZ, linewidth=1)
    if stdcacluation_flag: 
        stdThetaZ=uncertainty_A(averageSX, averageSY, averageSZ, stdSX, stdSY, stdSZ)
        ax.errorbar(time_coefficient, np.pi-ThetaZ, yerr=stdThetaZ,  fmt='none', capsize=4)
    
    #ax.set_xlim(0, 2*np.pi)
    #xt = [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]
    #xl = [r'$0$', r'$\frac{\pi}{2}$', r'$\pi$',r'$\frac{3\pi}{2}$', r'$2\pi$']
    #ax.set_xticks(xt)
    #ax.set_xticklabels(xl)

    ax.set_xlabel('interaction time/us')
    ax.set_ylabel(r'$<\theta_Z >$')
    ax.set_title('MeanfieldThetaZFlux')
    plt.legend()
    plt.tight_layout()
    plt.show()





def store_Squeezddata_hdf5(best_angle, coherence, squeezed):
    """
    存储HDF5文件中的Squeezed数据

    Parameters
    ----------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据
    squeezed : numpy.ndarray
        压缩态的测量数据
    """    
    disk_path=r'D:\Data'
    path_prefix = disk_path + time.strftime("\%Y\%Y%m\%Y%m%d\\")
    #if not os.path.exists(self.path_prefix) and not debug_mode:
    if not os.path.exists(path_prefix):
        print("Data will be save into ",path_prefix)
        os.makedirs(path_prefix)
    file_name="Squeezed_data"+ time.strftime("-%Y%m%d%H%M%S")
    hdf5_name = file_name + ".hdf5"
    hdf5_path =path_prefix + hdf5_name
    with h5py.File(hdf5_path, 'w') as f:
        # Store the scalar value best_angle
        f.create_dataset('best_angle', data=best_angle)
        
        # Store the multi-dimensional arrays coherence and squeezed
        f.create_dataset('coherence', data=coherence)
        f.create_dataset('squeezed', data=squeezed)
    print(hdf5_path)
    return hdf5_path

# Function to read data from HDF5
def read_Squeezddata_hdf5(filename):
    """
    读取HDF5文件中的Squeezed数据

    Parameters
    ----------
    filename :  str
        HDF5文件路径

    Returns
    -------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据，结构是#TODO
    squeezed : numpy.ndarray
        压缩态的测量数据
        
    """    
    with h5py.File(filename, 'r') as f:
        # Read the scalar value best_angle
        best_angle = f['best_angle'][()]
        
        # Read the multi-dimensional arrays coherence and squeezed
        coherence = f['coherence'][:]
        squeezed = f['squeezed'][:]
        
    return best_angle, coherence, squeezed

def store_meanfielddata_hdf5(interactiontype,operatorname,thetalist,philist,meanfieldresult):
    """
    存储平均场实验测量数据，既可以测量比较OAT和TAT的平均场，也可以存储TAT椭圆曲线的平均场数据

    Parameters
    ----------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据
    squeezed : numpy.ndarray
        压缩态的测量数据
    """    
    disk_path=r'D:\Data'
    path_prefix = disk_path + time.strftime("\%Y\%Y%m\%Y%m%d\\")
    #if not os.path.exists(self.path_prefix) and not debug_mode:
    if not os.path.exists(path_prefix):
        print("Data will be save into ",path_prefix)
        os.makedirs(path_prefix)
    file_name=str(interactiontype)+str(operatorname)+"meanfielddata"+ time.strftime("-%Y%m%d%H%M%S")
    hdf5_name = file_name + ".hdf5"
    hdf5_path =path_prefix + hdf5_name
    with h5py.File(hdf5_path, 'w') as f:
        f.create_dataset('thetalist', data=np.array(thetalist))
        f.create_dataset('philist', data=np.array(philist))
        f.create_dataset('meanfieldresult', data=np.array(meanfieldresult))
    print(hdf5_path)
    return hdf5_path


def store_Thetafluxmeanfielddata_hdf5(interactiontype,operatorname,interactiontime,meanfieldresult):
    """
    存储平均场实验测量数据，既可以测量比较OAT和TAT的平均场，也可以存储TAT椭圆曲线的平均场数据

    Parameters
    ----------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据
    squeezed : numpy.ndarray
        压缩态的测量数据
    """    
    disk_path=r'D:\Data'
    path_prefix = disk_path + time.strftime("\%Y\%Y%m\%Y%m%d\\")
    #if not os.path.exists(self.path_prefix) and not debug_mode:
    if not os.path.exists(path_prefix):
        print("Data will be save into ",path_prefix)
        os.makedirs(path_prefix)
    file_name=str(interactiontype)+str(operatorname)+"Thetafluxmeanfielddata"+ time.strftime("-%Y%m%d%H%M%S")
    hdf5_name = file_name + ".hdf5"
    hdf5_path =path_prefix + hdf5_name
    with h5py.File(hdf5_path, 'w') as f:
        f.create_dataset('interactiontime', data=np.array(interactiontime))
        f.create_dataset('meanfieldresult', data=np.array(meanfieldresult))
    print(hdf5_path)
    return hdf5_path

def store_TFIGHZQFI_hdf5(Interaction_timelist,SxandSx2list,SyandSy2list,SxSyandSxSy2list):
    """
    存储HDF5文件中的Squeezed数据

    Parameters
    ----------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据
    squeezed : numpy.ndarray
        压缩态的测量数据
    """    
    disk_path=r'D:\Data'
    path_prefix = disk_path + time.strftime("\%Y\%Y%m\%Y%m%d\\")
    #if not os.path.exists(self.path_prefix) and not debug_mode:
    if not os.path.exists(path_prefix):
        print("Data will be save into ",path_prefix)
        os.makedirs(path_prefix)
    file_name="TFIGHZQFI"+ time.strftime("-%Y%m%d%H%M%S")
    hdf5_name = file_name + ".hdf5"
    hdf5_path =path_prefix + hdf5_name
    with h5py.File(hdf5_path, 'w') as f:
        # Store the scalar value best_angle
        f.create_dataset('Interaction_timelist', data=np.array(Interaction_timelist))
        # Store the multi-dimensional arrays coherence and squeezed
        f.create_dataset('SxandSx2list', data=np.array(SxandSx2list))
        f.create_dataset('SyandSy2list', data=np.array(SyandSy2list))
        f.create_dataset('SxSyandSxSy2list', data=np.array(SxSyandSxSy2list))
    print(hdf5_path)
    return hdf5_path

def store_TFIGHZdistribution_hdf5(GHZdisrtibution):
    """
    存储HDF5文件中的Squeezed数据

    Parameters
    ----------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据
    squeezed : numpy.ndarray
        压缩态的测量数据
    """    
    disk_path=r'D:\Data'
    path_prefix = disk_path + time.strftime("\%Y\%Y%m\%Y%m%d\\")
    #if not os.path.exists(self.path_prefix) and not debug_mode:
    if not os.path.exists(path_prefix):
        print("Data will be save into ",path_prefix)
        os.makedirs(path_prefix)
    file_name="TFIGHZdistribution"+ time.strftime("-%Y%m%d%H%M%S")
    hdf5_name = file_name + ".hdf5"
    hdf5_path =path_prefix + hdf5_name
    with h5py.File(hdf5_path, 'w') as f:
        # Store the scalar value best_angle
        f.create_dataset('GHZdisrtibution', data=np.array(GHZdisrtibution))
        # Store the multi-dimensional arrays coherence and squeezed
        
    print(hdf5_path)
    return hdf5_path

def read_TFIGHZdisrtibution_hdf5(filename):
    """
    读取HDF5文件中的Squeezed数据

    Parameters
    ----------
    filename :  str
        HDF5文件路径

    Returns
    -------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据，结构是#TODO
    squeezed : numpy.ndarray
        压缩态的测量数据
        
    """    
    with h5py.File(filename, 'r') as f:
        GHZdisrtibution = f['GHZdisrtibution'][:]
    return GHZdisrtibution



def read_TFIGHZQFI_hdf5(filename):
    """
    读取HDF5文件中的Squeezed数据

    Parameters
    ----------
    filename :  str
        HDF5文件路径

    Returns
    -------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据，结构是#TODO
    squeezed : numpy.ndarray
        压缩态的测量数据
        
    """    
    with h5py.File(filename, 'r') as f:
        Interaction_timelist = f['Interaction_timelist'][:]
        SxandSx2list = f['SxandSx2list'][:]
        SyandSy2list = f['SyandSy2list'][:]
        SxSyandSxSy2list = f['SxSyandSxSy2list'][:]
        
    return Interaction_timelist, SxandSx2list, SyandSy2list, SxSyandSxSy2list



def read_meanfielddata_hdf5(filename):
    """
    读取HDF5文件中的Squeezed数据

    Parameters
    ----------
    filename :  str
        HDF5文件路径

    Returns
    -------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据，结构是#TODO
    squeezed : numpy.ndarray
        压缩态的测量数据
        
    """    
    with h5py.File(filename, 'r') as f:
        thetalist = f['thetalist'][:]
        philist = f['philist'][:]
        meanfieldresult = f['meanfieldresult'][:]
        
    return thetalist, philist, meanfieldresult

def read_Thetafluxmeanfielddata_hdf5(filename):
    """
    读取HDF5文件中的Squeezed数据

    Parameters
    ----------
    filename :  str
        HDF5文件路径

    Returns
    -------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据，结构是#TODO
    squeezed : numpy.ndarray
        压缩态的测量数据
        
    """    
    with h5py.File(filename, 'r') as f:
        interactiontime = f['interactiontime'][:]
        meanfieldresult = f['meanfieldresult'][:]
        
    return interactiontime, meanfieldresult

def store_Qfunction_hdf5(interactiontype,interactiontime,thetalist,philist,Qfunctionresult):
    """
    存储HDF5文件中的Qfunction数据
    Parameters
    ----------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据
    squeezed : numpy.ndarray
        压缩态的测量数据
    """    
    disk_path=r'D:\Data'
    path_prefix = disk_path + time.strftime("\%Y\%Y%m\%Y%m%d\\")
    #if not os.path.exists(self.path_prefix) and not debug_mode:
    if not os.path.exists(path_prefix):
        print("Data will be save into ",path_prefix)
        os.makedirs(path_prefix)
    file_name=str(interactiontype)+str(interactiontime)+"Qfunctiondata"+ time.strftime("-%Y%m%d%H%M%S")
    hdf5_name = file_name + ".hdf5"
    hdf5_path =path_prefix + hdf5_name
    with h5py.File(hdf5_path, 'w') as f:
        f.create_dataset('interactiontime', data=interactiontime)
        f.create_dataset('thetalist', data=np.array(thetalist))
        f.create_dataset('philist', data=np.array(philist))
        f.create_dataset('Qfunctionresult', data=np.array(Qfunctionresult))
    print(hdf5_path)
    return hdf5_path


def read_Qfunction_hdf5(filename):
    """
    读取HDF5文件中的Squeezed数据
    Parameters
    ----------
    filename :  str
        HDF5文件路径

    Returns
    -------
    best_angle : float
        最佳角度
    coherence : numpy.ndarray
        相干态的测量数据，结构是#TODO
    squeezed : numpy.ndarray
        压缩态的测量数据
    """    
    with h5py.File(filename, 'r') as f:
        interactiontime = f['interactiontime'][()]
        thetalist = f['thetalist'][:]
        philist = f['philist'][:]
        Qfunctionresult = f['Qfunctionresult'][:]
        
    return interactiontime,thetalist, philist,Qfunctionresult


def store_ionfidelitydata_hdf5(ionfidelity_list):
    """
    存储HDF5文件中的ionfidelity_list数据

    Parameters
    ----------
    ionfidelity_list : list
        ionfidelity_list数据
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision]
    """    
    disk_path=r'D:\Data'
    path_prefix = disk_path + time.strftime("\%Y\%Y%m\%Y%m%d\\")
    #if not os.path.exists(self.path_prefix) and not debug_mode:
    if not os.path.exists(path_prefix):
        print("Data will be save into ",path_prefix)
        os.makedirs(path_prefix)
    file_name="ionfidelity_data"+ time.strftime("-%Y%m%d%H%M%S")
    hdf5_name = file_name + ".hdf5"
    hdf5_path =path_prefix + hdf5_name
    with h5py.File(hdf5_path, 'w') as f:
        # Store the scalar value best_angle
        f.create_dataset('ionfidelity_list', data=ionfidelity_list)

    print(hdf5_path)
    return hdf5_path

# Function to read data from HDF5
def read_ionfidelitydata_hdf5(filename):
    """
    读取HDF5文件中的ionfidelity_list数据
    
    Parameters
    ----------
    filename : str
        HDF5文件路径

    Returns
    -------
    ionfidelity_list : list
        ionfidelity_list数据
        ionfidelity_list的结构是ionfidelity_list[ionindex]=[darkfidelityofthision,brightfidelityofthision]
    """    
    with h5py.File(filename, 'r') as f:
        # Read the scalar value best_angle
        ionfidelity_list = f['ionfidelity_list'][()]
    return ionfidelity_list



# Helper aliases for better readability under 3.8
StrOrPath   = Union[str, Path]
ScalarOrStr = Union[float, int, str]


class ExperimentLogger:
    """
    用于存储压缩度测量实验日志

    Returns
    -------
    _type_
        _description_

    Raises
    ------
    RuntimeError
        _description_
    """
    _DATA_HEADER = (
        "compressed_interaction_time",
        "total_spin_file",
        "spin_fluct_file",
    )

    def __init__(self,
                 experiment_name: str = "Experiment",
                 log_dir: StrOrPath = "./logs") -> None:
        self.log_dir = Path(log_dir)
        self.log_dir.mkdir(parents=True, exist_ok=True)
        self._open_new_file(experiment_name)

    # ───────────────────────────────── section I: parameters ──
    def log_parameters(self, **params: Any) -> None:
        if self._params_written:
            raise RuntimeError("Parameters already written once.")
        header  = list(params.keys())
        values  = [repr(v) for v in params.values()]
        self._writer.writerow(header)
        self._writer.writerow(values)
        self._writer.writerow([])                 # blank separator
        self._writer.writerow(self._DATA_HEADER)  # data header
        self._params_written = True
        self._fh.flush()

    # ───────────────────────────────── section II: data lines ─
    def add_measurement(self,
                        compressed_t: ScalarOrStr,
                        total_spin_file: StrOrPath,
                        spin_fluct_file: StrOrPath) -> None:
        row = [
            compressed_t,
            Path(total_spin_file).as_posix(),
            Path(spin_fluct_file).as_posix(),
        ]
        self._writer.writerow(row)
        self._fh.flush()

    # ──────────────────────────────── utility methods ──
    def new_file(self,
                 experiment_name: Optional[str] = None,
                 reset_params: bool = True) -> None:
        self.close()
        self._open_new_file(experiment_name or self._experiment_name)
        self._params_written = not reset_params

    def close(self) -> None:
        try:
            self._fh.close()
        except AttributeError:
            pass

    # ─────────────────────────────── private helpers ──
    def _open_new_file(self, experiment_name: str) -> None:
        
        now  = datetime.now()
        # 1) build the hierarchical path:  logs/YYYY/YYYYMM/YYYYMMDD
        datedir = (
            self.log_dir
            / now.strftime("%Y")        # ─ year  e.g. 2025
            / now.strftime("%Y%m")      # ─ month e.g. 202506
            / now.strftime("%Y%m%d")    # ─ day   e.g. 20250604
        )
        datedir.mkdir(parents=True, exist_ok=True)
        timestamp = now.strftime("%Y%m%d_%H%M%S")
        self._experiment_name = experiment_name or "Experiment"
        filename = f"{timestamp}_{self._experiment_name}.csv"
        self._file_path =  datedir  / filename
        print("实验信息存储在",self._file_path)
        self._fh =self._file_path.open(
            "w", newline="", encoding="utf-8"
        )
        self._writer = csv.writer(self._fh)
        self._params_written = False
    def log_path(self) -> Path:
        """Return the absolute Path to the log file currently being written."""
        return self._file_path.resolve()    

    # enable `with ExperimentLogger(...) as log:`
    def __enter__(self): return self
    def __exit__(self, exc_type, exc, tb): self.close()



def load_experiment_log(
    file_path: Union[str, Path],
) -> Tuple[Dict[str, Any], List[Any], List[str], List[str]]:
    """
    Parse an experiment-log CSV produced by *ExperimentLogger*.
    读取压缩度实验日志
    Parameters
    ----------
    file_path
        Path-like pointing to the `.csv` file.

    Returns
    -------
    params : dict
        All key/value pairs written in the first section.
    interaction_times : list
        Values from the “compressed_interaction_time” column,
        converted to float when possible.
    total_spin_files : list[str]
        Filenames from the “total_spin_file” column.
    spin_fluct_files : list[str]
        Filenames from the “spin_fluct_file” column.
    """
    file_path = Path(file_path)
    if not file_path.is_file():
        raise FileNotFoundError(file_path)

    params: Dict[str, Any] = {}
    interaction_times: List[Any] = []
    total_spin_files: List[str] = []
    spin_fluct_files: List[str] = []

    with file_path.open(newline="", encoding="utf-8") as fh:
        reader = csv.reader(fh)

        # ── section I: parameter rows ─────────────────────────
        try:
            keys   = next(reader)
            values = next(reader)
        except StopIteration:
            raise ValueError("CSV is missing the parameter rows")

        # literal-eval each value for round-trip safety
        for k, v in zip(keys, values):
            try:
                params[k] = ast.literal_eval(v)
            except Exception:
                params[k] = v  # fallback: keep raw string

        # ── skip blank lines until we hit the data header ────
        for row in reader:
            if row and row[0] == "compressed_interaction_time":
                break  # found the header
        else:
            raise ValueError("CSV is missing the data-header row")

        # ── section II: measurement lines ────────────────────
        for row in reader:
            if not row or all(cell.strip() == "" for cell in row):
                continue  # skip empty lines

            t_raw = row[0]
            try:
                t_val: Any = float(t_raw)
            except ValueError:
                t_val = t_raw  # leave as string if not numeric

            interaction_times.append(t_val)
            total_spin_files.append(row[1] if len(row) > 1 else "")
            spin_fluct_files.append(row[2] if len(row) > 2 else "")

    return params, interaction_times, total_spin_files, spin_fluct_files





class MeanFieldEllipseLogger:
    """
    Record experiment parameters + (interaction_time, ellipse_file) pairs.

    File layout
    -----------
    ┌ keys ................ (row 0)
    ├ values .............. (row 1)
    ├ (blank row) ......... (row 2)
    ├ interaction_time, ellipse_file   <-- header (row 3)
    ├ t₁, ellipse_1.h5 ...            (row 4+)
    └ ...
    """

    _DATA_HEADER = ("interaction_time", "ellipse_file")

    def __init__(self,
                 experiment_name: str = "Experiment",
                 log_dir: StrOrPath = "./logs") -> None:
        self.log_dir = Path(log_dir)
        self.log_dir.mkdir(parents=True, exist_ok=True)
        self._open_new_file(experiment_name)

    # ───────────────────────── parameters section ───────────────
    def log_parameters(self, **params: Any) -> None:
        if self._params_written:
            raise RuntimeError("Parameters already logged.")
        self._writer.writerow(params.keys())
        self._writer.writerow([repr(v) for v in params.values()])
        self._writer.writerow([])                # visual separator
        self._writer.writerow(self._DATA_HEADER) # data header
        self._params_written = True
        self._fh.flush()

    # ───────────────────────── data lines section ───────────────
    def add_entry(self,
                  interaction_time: ScalarOrStr,
                  ellipse_file: StrOrPath) -> None:
        self._writer.writerow(
            [interaction_time, Path(ellipse_file).as_posix()]
        )
        self._fh.flush()

    # ─────────────────────────── housekeeping ──────────────────
    def new_file(self,
                 experiment_name: Optional[str] = None,
                 reset_params: bool = True) -> None:
        self.close()
        self._open_new_file(experiment_name or self._experiment_name)
        self._params_written = not reset_params

    def close(self) -> None:
        try:
            self._fh.close()
        except AttributeError:
            pass

    # context-manager sugar
    def __enter__(self): return self
    def __exit__(self, exc_type, exc, tb): self.close()

    # ───────────────────────── private helpers ──────────────────
    def _open_new_file(self, experiment_name: str) -> None:
        now  = datetime.now()
        # 1) build the hierarchical path:  logs/YYYY/YYYYMM/YYYYMMDD
        datedir = (
            self.log_dir
            / now.strftime("%Y")        # ─ year  e.g. 2025
            / now.strftime("%Y%m")      # ─ month e.g. 202506
            / now.strftime("%Y%m%d")    # ─ day   e.g. 20250604
        )
        datedir.mkdir(parents=True, exist_ok=True)
        timestamp = now.strftime("%Y%m%d_%H%M%S")
        self._experiment_name = experiment_name or "Experiment"
        fname = f"{timestamp}_{self._experiment_name}.csv"
        self._file_path = datedir  / fname
        print("实验信息存储在",self._file_path)
        self._fh =self._file_path.open("w", newline="", encoding="utf-8")
        self._writer = csv.writer(self._fh)
        self._params_written = False
    def log_path(self) -> Path:
        """Return the absolute Path to the log file currently being written."""
        return self._file_path.resolve()    


# reader ----------------------------------------------------------

def load_Ellipsemeanfield_log(
    file_path: StrOrPath,
) ->  Tuple[Dict[str, Any], List[Any], List[str]]:
    """
    Read a CSV produced by MeanFieldEllipseLogger.

    Returns
    -------
    params ............. dict of initial parameters
    interaction_times .. list of floats/ints/strs
    ellipse_files ...... list of filenames (str)
    """
    path = Path(file_path)
    if not path.is_file():
        raise FileNotFoundError(path)

    with path.open(newline="", encoding="utf-8") as fh:
        rdr = csv.reader(fh)

        try:
            keys   = next(rdr)
            values = next(rdr)
        except StopIteration:
            raise ValueError("Missing parameter rows.")

        params = {}
        for k, v in zip(keys, values):
            try:
                params[k] = ast.literal_eval(v)
            except Exception:
                params[k] = v

        # find header row
        for row in rdr:
            if row and row[0] == "interaction_time":
                break
        else:
            raise ValueError("Data header not found.")

        times:   List[Any]  = []
        efiles:  List[str]  = []

        for row in rdr:
            if not row or all(c.strip() == "" for c in row):
                continue
            raw_t = row[0]
            try:
                times.append(float(raw_t))
            except ValueError:
                times.append(raw_t)
            efiles.append(row[1] if len(row) > 1 else "")

    return params, times, efiles
