"""
更高效的twirling set选择
"""
import itertools
import os
import pickle
from pkgutil import extend_path
import random
import re
import statistics
import sys
from telnetlib import EC
from threading import local
import time
from collections import defaultdict,deque
from copy import copy, deepcopy
from tkinter import EXCEPTION 

import cirq
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd 
import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from qiskit import QuantumCircuit 
from qiskit.circuit.library import U3Gate
from qiskit_aer.noise import NoiseModel,ReadoutError,depolarizing_error,amplitude_damping_error,pauli_error,coherent_unitary_error,phase_damping_error
from qiskit.quantum_info import Operator
from qiskit.circuit.library import UnitaryGate,CXGate
import json
from qiskit_aer import AerSimulator,StatevectorSimulator 
from scipy.linalg import expm
from itertools import combinations 
import torch.nn as nn
import torch
import torch.nn.functional as F
import torch.optim as optim
import math 
import warnings 
warnings.filterwarnings("ignore")


idx=0
sy23_1e=1.6*10**-3
sy23_2e=6*10**-3
sy23_2dur=20*10**-9
sy23_t1dur=np.mean(np.array([25,18.8,29.1,24.4,14.9,21.9,20.6,27,23.9,24.1,18,18.5,13.3,19,21.3,26,30.1,33.3,16.6,25.3,21.2,18.9,22.9,10.6,21,23.8,24.6,32,29.9,28.5,21,21,18.2,22.2,22.8,22,23,26.7,22.6,29.6,14.4,21.9,23.9,20.7,26.9,21.4,22.5,4.8,20.8,27.3,25.7,34.4,20.7,30,19.5,21.9,19.3,23.8,23.7,33.4,28.7,17,22.7,23.3,24.8,35.3,22.8,24.3,31.9,24.6]))*10**-6
sy23_decay=1-np.exp(-sy23_2dur/sy23_t1dur) 
#In[]
#// ANCHOR settings
datapath="D:/data/chainTPN"
def cx_power_gate(a):
    cx_gate = CXGate() 
    cx_power_a = cx_gate.power(a)
    cx_power_a_gate = UnitaryGate(cx_power_a) 
    return cx_power_a_gate 
corr_matrix_1=np.array([[0,0,0,0],
                    [0,-1,1,0],
                    [0,0,0,0],
                    [0,0,0,0]])
corr_matrix_2=np.array([[0,0,0,0],
                    [0,0,0,0],
                    [0,1,-1,0],
                    [0,0,0,0]])
corr_matrix_3=np.array([[-1,0,0,1],
                    [0,0,0,0],
                    [0,0,0,0],
                    [0,0,0,0]])
corr_matrix_4=np.array([[0,0,0,0],
                    [0,0,0,0],
                    [0,0,0,0],
                    [1,0,0,-1]]) 
def rx(theta):
    return np.array([[np.cos(theta/2),-1j*np.sin(theta/2)],[-1j*np.sin(theta/2),np.cos(theta/2)]])
def int2string(n,base,qn)->str:
    b=[]
    while True:
        s=n//base
        y=n%base
        b=b+[y]
        if s == 0:
            break
        n=s
    string=''.join([str(x) for x in b[::-1]])
    #将str补全
    while len(string)<qn:
        string='0'+string
    return string
def int2Zstring(n,qn):
    bs=int2string(n,2,qn)
    string=""
    for s in bs:
        if s=="0":
            string+="I"
        else:
            string+="Z"
    return string
def find_factors_or_sqrt(n):
    if math.isqrt(n)**2==n:
        return (math.isqrt(n),math.isqrt(n)) 
    factors=[]
    for i in range(1,int(math.sqrt(n))+1):
        if n%i==0:
            factors.append((i,n//i)) 
    closest_pair=min(factors,key=lambda x: abs(x[0]-x[1])) 
    return closest_pair
def Pauli_nontrivial_qubit(obs,inverse=False,Z_ok=False):
    qn=len(obs)
    qidx_list=[]
    if Z_ok:
        non_trivial=["X","Y"]
    else:
        non_trivial=['X','Y','Z']
    for i in range(qn):
        if obs[i] in non_trivial:
            if inverse:
                qidx_list.append(qn-1-i)
            else:
                qidx_list.append(i)
    return qidx_list
def continuous_tensor(A_list):
    matrix=A_list[0]
    for A in A_list[1:]:
        matrix=np.kron(matrix,A)
    return matrix
def savedata(data,fname,cover_old):
    joinedpath=fname
    if os.path.isfile(joinedpath):
        if cover_old==False:
            data.to_csv(joinedpath,mode='a',header=False)
        else:
            data.to_csv(joinedpath)
    else:
        data.to_csv(joinedpath,mode='a')
def random_continoue_obs(qn,c_weight,base=4,seed=-1): 
    if seed==-1:
        seed=np.random.randint(0,10000)
    rnd=np.random.RandomState(seed)
    if qn==c_weight:
        return random_obs(qn,qn,base=base,seed=seed)
    elif qn>c_weight:
        start_point=rnd.randint(0,qn+1-c_weight)
        return "I"*start_point+random_obs(c_weight,c_weight,base=base,seed=seed)+"I"*(qn-start_point-c_weight)
    else:
        raise ValueError("Wrong c weight!")
def random_obs(qn,weight,base=4,seed=-1):
    #修改后以字符串的形式存在
    if seed==-1:
        seed=np.random.randint(0,10000)
    rnd=np.random.RandomState(seed)
    if weight==False:
        pidx=rnd.randint(1,4**qn)
        s=""
        for ps in int2string(pidx,4,qn):
            if ps=="0":
                s+="I"
            elif ps=="1":
                s+="X"
            elif ps=="2":
                s+="Y"
            else:
                s+="Z"
    else:
        if weight>qn:
            weight=qn
        chosen_idx=rnd.choice(range(qn),size=weight,replace=False)
        if base==4:
            chosen_string=[rnd.choice(["X","Y","Z"],size=1)[0] for i in range(weight)]
        else:
            chosen_string=["Z" for i in range(weight)]
        s=""
        idx=0
        for i in range(qn):
            if i in chosen_idx:
                s+=chosen_string[idx]
                idx+=1
            else:
                s+="I"
    return s 
 
#// ANCHOR Twirling
I=cirq.unitary(cirq.I)
X=cirq.unitary(cirq.X)
Y=cirq.unitary(cirq.Y)
Z=cirq.unitary(cirq.Z)  
def add_pauli(qc,pauli_s):
    for i in range(len(pauli_s)):
        if pauli_s[i]=="X":
            qc.x(i)
        elif pauli_s[i]=="Y":
            qc.y(i)
        elif pauli_s[i]=="Z":
            qc.z(i) 
    return qc
def Pauli_XY_qubit(pauli_s,inverse=False):
    qn=len(pauli_s)
    qidx_list=[]
    for i in range(qn):
        if pauli_s[i] in ['X','Y']:
            if inverse:
                qidx_list.append(qn-1-i)
            else:
                qidx_list.append(i)
    return qidx_list
def add_pauli_post_process(pauli_s,counts):
    new_counts={}
    qn=len(pauli_s)
    qidx_list=Pauli_nontrivial_qubit(pauli_s,Z_ok=True)
    for state_str,count in counts.items(): 
        new_state_str=""
        for i in range(qn):
            if i in qidx_list:
                new_state_str=str((int(state_str[::-1][i])+1)%2)+new_state_str
            else:
                new_state_str=state_str[::-1][i]+new_state_str
        new_counts[new_state_str]=count
    return new_counts 
def pauli_tranfer_idx(pauli,idx):
    qn=len(pauli)
    bit_string=int2string(idx,2,qn)
    new_bit=""
    for i in range(qn):
        if pauli[i] in ["X","Y"]:
            new_bit+=str((int(bit_string[i])+1)%2)
        else:
            new_bit+=bit_string[i]
    return int(new_bit,base=2)
def apply_pauli_post_process_prob(pauli,prob):
    qn=len(pauli)
    new_prob=np.zeros(2**qn)
    for i in range(2**qn):
        new_idx=pauli_tranfer_idx(pauli,i)
        new_prob[new_idx]=prob[i]
    return np.array(new_prob)
def Pauli2Tableau(obs):
    qn=len(obs)
    x=np.zeros(qn,dtype=bool)
    z=np.zeros(qn,dtype=bool)
    for i, char in enumerate(obs):
        if char=='X':
            x[i]=True
        elif char=='Z':
            z[i]=True
        elif char=='Y':
            x[i]=True
            z[i]=True 
    temp_tableau=cirq.CliffordTableau(num_qubits=qn)
    temp_tableau.xs[0]=x
    temp_tableau.zs[0]=z
    return temp_tableau
def Tableau2Pauli(tableau,qn):
    #return的是obs string
    transformed_x=tableau.xs[0]
    transformed_z=tableau.zs[0]
    result_pauli=""
    for i in range(qn):
        if transformed_x[i] and transformed_z[i]:
            result_pauli+="Y"
        elif transformed_x[i]:
            result_pauli+="X"
        elif transformed_z[i]:
            result_pauli+="Z"
        else:
            result_pauli+="I"
    return result_pauli 
def qiskit2Tableau(qc,qn):
    tableau=cirq.CliffordTableau(num_qubits=qn)
    for instruction,qargs,_ in qc.data:
        op_name=instruction.name
        if op_name=='cx':
            tableau.apply_cx(qc.find_bit(qargs[0]).index,qc.find_bit(qargs[1]).index)
        elif op_name=="id":
            pass
        else:
            raise ValueError(f"Unsupported operation: {op_name}")
    return tableau.inverse()
def Clifford_conjugation(cliff_circ,qn,pauli_s):
    #pauli_s在后面，从后往前propagation
    pauli_tableau=Pauli2Tableau(pauli_s)
    cx_chain_tableau=qiskit2Tableau(cliff_circ.copy(),qn)
    result_pauli_tableau=pauli_tableau.then(cx_chain_tableau) 
    result_pauli=Tableau2Pauli(result_pauli_tableau,qn)
    return result_pauli 
def obs2bitstring(obs):
    s=""
    for o in obs:
        if o=="I":
            s+="0"
        else:
            s+="1"
    return s
def bitstring2obs(string):
    obs=""
    for s in string:
        if s=="0":
            obs+="I"
        else:
            obs+="Z"
    return obs
def bitstring2Pauliobs(string):
    obs=""
    for s in string:
        if s=="0":
            obs+="I"
        elif s=="1":
            obs+="X"
        elif s=="2":
            obs+="Y"
        else:
            obs+="Z"
    return obs
#In[]
#// ANCHOR expectation  
def readjson(fname):
    # print(fname)
    joindpath=os.path.join(datapath,fname)
    # print(joinedpath)
    try:
        with open(joindpath,'r') as f:
            data=f.read()
            data_dict=json.loads(data)
            return data_dict
    except:
        return 'fail'
def binlist2num(n,l):
    return sum([l[n-1-i]*2**i for i in range(n)])

def read0630_6():
    fname="20230630readoutdata/all.json"
    dict3=readjson(fname)['dataall']
    n=6
    T=np.zeros((2**n,2**n))
    for i in range(2**n):
        arr=dict3[i]
        for result in arr:
            lst = [1 if elem else 0 for elem in result]
            T[binlist2num(n,lst),i]+=1
    T=T/5000
    return T

def partial_tran(T,dimA):
    #转移矩阵专用
    a=np.shape(T)[0]
    dimB=int(a/dimA)
    T=np.reshape(T,(dimA,dimB,dimA,dimB))
    b=np.zeros((dimA,dimA))
    for k in range(dimA):
        for i in range(dimA):
            s=0
            for j in range(dimB):
                for l in range(dimB):
                    s+=T[k,j,i,l]
            b[k,i]=s
    return b

def make_transition_mat(n):
    calied_n=6
    T=read0630_6()
    T=partial_tran(T,2**n)/2**(calied_n-n)
    return T

def count_elements(arr,condition):
    count=0
    for element in arr:
        if condition(element):
            count+=1
    return count

def calied_transition_submats(idx_list):
    idx_list=[i%6 for i in idx_list]
    qid=[2,5,6,9,10,11]
    submats=[]
    for i in idx_list:
        fname=f"20230630readoutdata/{qid[i]}.json"
        dict3=readjson(fname)
        dataall=dict3['dataall']
        p00=count_elements(dataall[0],lambda x:x ==[False])/len(dataall[0])
        p11=count_elements(dataall[1],lambda x:x ==[True])/len(dataall[1])
        T=np.array([[p00,1-p11],[1-p00,p11]])
        submats.append(T)
    return submats

def calied_transition_mat(idx_list):
    each_T=calied_transition_submats(idx_list)
    TT=1
    for i in range(len(idx_list)):
        TT=np.kron(TT,each_T[i])
    return np.real(TT)

def read_T_cali(n):
    datafile_T=datapath+"/TPN_cali/T_{}.npy".format(n)
    datafile_caliT=datapath+"/TPN_cali/caliT_{}.npy".format(n)
    if os.path.exists(datafile_T):
        T=np.array(np.load(datafile_T))
    else:
        T=make_transition_mat(n)
        np.save(datafile_T,T)
    if os.path.exists(datafile_caliT):
        cali_T=np.array(np.load(datafile_caliT))
    else:
        cali_T=calied_transition_mat(list(range(n)))
        np.save(datafile_caliT,cali_T)
    return T,cali_T

def query_T(qn):
    if qn>6:
        n=6
    else:
        n=qn
    T,cali_T=read_T_cali(n)
    if qn>6:
        sup_T,sup_cali_T=read_T_cali(qn-n)
        T=np.kron(T,sup_T)
        cali_T=np.kron(cali_T,sup_cali_T)
    op=lambda x: np.linalg.inv(cali_T)@np.array(x) 
    return [np.array(T),op] 
def query_cali_T(qn):
    if qn>6:
        n=6
    else:
        n=qn
    calied_n=6
    T,cali_T=read_T_cali(n) 
    cali_T=partial_tran(cali_T,2**n)/2**(calied_n-n)
    return cali_T
def random_MER_ind(qn,measure_error,std,seed=1,max_retries=1,data_qubit_scale=1,max_qn=20):
    #新增：还需要生成只对data qubits的error进行缩放的meausure分布，只在最后进行微调。
    dq_indices=list(range(qn)) 
    rnd=np.random.RandomState(seed)
    rates=rnd.normal(measure_error,std,max_qn)
    rates=rates[:qn]
    retries=0
    while np.any(rates<0) and retries<max_retries:
        negative_indices=np.where(rates<0)[0]
        new_rates=rnd.normal(measure_error,std,len(negative_indices))
        rates[negative_indices]=new_rates
        retries+=1
    rates[rates<0]=0
    measure_error_dict=dict(zip(dq_indices,rates))
    if (data_qubit_scale>0) and data_qubit_scale!=1: 
        for dq in dq_indices:
            measure_error_dict[dq]=measure_error_dict[dq]*data_qubit_scale
    return measure_error_dict
#先默认是一个1D chain结构 
def random_MER_corr(qn,corr_mean,corr_std,seed=1): 
    rnd=np.random.RandomState(seed=seed)
    p_corr=[rnd.normal(loc=corr_mean,scale=corr_std,size=3) for _ in range(qn-1)]
    corr_dict={}
    for q in range(qn-1):
        corr_matrix=p_corr[q][0]*2*corr_matrix_1+ \
            p_corr[q][1]*corr_matrix_2+ \
            p_corr[q][2]*corr_matrix_3 
        error_matrix=expm(corr_matrix).T
        normalized_err_mat=[]
        for err_row in error_matrix:
            err_row=[0 if x<0 else x for x in err_row]
            normalized_err_mat.append(np.array(err_row)/np.sum(err_row))
        corr_dict[(q,q+1)]=np.array(normalized_err_mat)
    return corr_dict
def random_cx_error_dict(qn,cx_ger_mean,cx_ger_std,seed=1,min_value=0):
    rnd=np.random.RandomState(seed=seed)
    cx_ger_dict={}
    cx_ger_list=rnd.normal(loc=cx_ger_mean,scale=cx_ger_std,size=qn-1)
    cx_ger_list=np.where(cx_ger_list<min_value,min_value,cx_ger_list)
    for i in range(qn-1):
        cx_ger_dict[(i,i+1)]=cx_ger_list[i]
    return cx_ger_dict
def rz(theta):
    return np.array([[np.exp(-1j*theta/2),0],[0,np.exp(1j*theta/2)]])
def ry(theta):
    return np.array([[np.cos(theta/2),-np.sin(theta/2)],[np.sin(theta/2),np.cos(theta/2)]])
def compute_coherent_matrix(rate):
    X=np.array([[0,1],[1,0]]) 
    XX_chain=np.kron(X,X)
    error_unitary=expm(-1j*(rate/2)*XX_chain) 
    # rx_uni=rz(rate)
    # error_unitary=np.kron(rx_uni,rx_uni) 
    return error_unitary 
#In[]
def generate_noise_model(ger1,cx_ger_dict,measure_error_dict,error_type,co_error=0.01):
    noise_model=NoiseModel()
    if error_type=="real":
        error_type="composite"
    if ger1>0: 
        if error_type=="dephase":
            bit_flip=pauli_error([('Z',ger1),('I',1-ger1)])
            error1=bit_flip
        elif error_type=="depo":
            error1=depolarizing_error(ger1,1) 
        else:
            error1=depolarizing_error(ger1,1)  
        noise_model.add_all_qubit_quantum_error(error1,["h","sdg","x","y","z","ry","rz","rx"])
    if len(cx_ger_dict)>0:
        if error_type=="depo":
            for cx_pair,rate in cx_ger_dict.items():
                error_depo=depolarizing_error(rate,2) 
                error2=error_depo
                noise_model.add_quantum_error(error2,'cx',qubits=cx_pair)
                noise_model.add_quantum_error(error2,'cx',qubits=cx_pair[::-1])
        elif error_type in ["ampl","real-ampl"]:
            for cx_pair,rate in cx_ger_dict.items(): 
                error_depo=depolarizing_error(rate,2) 
                error_ampl=amplitude_damping_error(sy23_decay)
                # error_ampl=phase_damping_error(sy23_decay)
                error_ampl=error_ampl.tensor(error_ampl)
                error2=error_depo.compose(error_ampl)
                noise_model.add_quantum_error(error2,'cx',qubits=cx_pair)
                noise_model.add_quantum_error(error2,'cx',qubits=cx_pair[::-1])
        elif error_type in ["co","real-co"]: 
            for cx_pair,rate in cx_ger_dict.items(): 
                error_depo=depolarizing_error(rate,2) 
                error_co=coherent_unitary_error(compute_coherent_matrix(co_error)) 
                error2=error_depo.compose(error_co)  
                noise_model.add_quantum_error(error2,'cx',qubits=cx_pair)
                noise_model.add_quantum_error(error2,'cx',qubits=cx_pair[::-1])
        elif error_type in ["composite","real-composite"]:
            for cx_pair,rate in cx_ger_dict.items(): 
                error_depo=depolarizing_error(rate,2)  
                error_ampl=amplitude_damping_error(sy23_decay)
                error_co=coherent_unitary_error(compute_coherent_matrix(co_error))
                error2=(error_depo.compose(error_co)).compose(error_ampl) 
                noise_model.add_quantum_error(error2,'cx',qubits=cx_pair)
                noise_model.add_quantum_error(error2,'cx',qubits=cx_pair[::-1])
    if len(measure_error_dict)>0:
        for q_idx,measure_error in measure_error_dict.items():
            p0=measure_error*0.4
            p1=measure_error*1.6 
            prob_matrix=[[1-p0,p0],[p1,1-p1] 
            ]    
            error=ReadoutError(prob_matrix) 
            noise_model.add_readout_error(error,[q_idx])
    return noise_model 
def apply_correlated_noise(counts,noise_mats,n_qubits,big_endian= False):
    total_shots=sum(counts.values())
    prob=defaultdict(float)
    for k,v in counts.items():
        prob[k]=v/total_shots
    for (q1,q2),mat in noise_mats.items():
        new_prob=defaultdict(float)
        bit1_pos=(n_qubits-1-q1) if big_endian else q1
        bit2_pos=(n_qubits-1-q2) if big_endian else q2
        mask=(1<<bit1_pos)|(1<<bit2_pos) 
        for state_str,p in prob.items():
            state=int(state_str,2)
            bit1=(state>>bit1_pos)&1
            bit2=(state>>bit2_pos)&1
            bits=(bit1<<1)|bit2
            for meas in range(4):
                p_trans=mat[bits][meas]
                if p_trans<=1e-8:
                    continue
                new_state=state&(~mask) 
                new_bit1=(meas>>1)&1
                new_bit2=meas & 1
                new_state|=(new_bit1<<bit1_pos)|(new_bit2<<bit2_pos)
                new_str=format(new_state,f'0{n_qubits}b')
                new_prob[new_str]+=p*p_trans
        prob=new_prob
    new_counts={}
    remaining=total_shots
    for k,v in sorted(prob.items(),key=lambda x:-x[1]):
        cnt=round(v*total_shots)
        cnt=min(cnt,remaining)
        if cnt>0:
            new_counts[k]=cnt
            remaining-=cnt
    if remaining>0:
        new_counts[list(prob.keys())[0]]=new_counts.get(list(prob.keys())[0],0)+remaining
    return new_counts
def apply_correlated_noise_to_probability(prob_vector,noise_mats,n_qubits,big_endian=False): 
    prob = prob_vector.copy()
    total_states = 2**n_qubits 
    for (q1, q2), mat in noise_mats.items():
        new_prob = np.zeros_like(prob) 
        bit1_pos = (n_qubits - 1 - q1) if big_endian else q1
        bit2_pos = (n_qubits - 1 - q2) if big_endian else q2 
        for state in range(total_states):
            p = prob[state]
            if p < 1e-12:  
                continue 
            bit1 = (state >> bit1_pos) & 1
            bit2 = (state >> bit2_pos) & 1
            bits = (bit1 << 1) | bit2   
            for meas in range(4):
                p_trans = mat[bits, meas]
                if p_trans < 1e-8:
                    continue 
                mask = (1 << bit1_pos) | (1 << bit2_pos)
                new_state = state & (~mask)   
                new_bit1 = (meas >> 1) & 1
                new_bit2 = meas & 1
                new_state |= (new_bit1 << bit1_pos) | (new_bit2 << bit2_pos)
                new_prob[new_state] += p * p_trans 
        prob = new_prob   
    return prob
def get_Zobs(obs,qn):
    identity=np.eye(2)
    Z_pauli=np.array([[1,0],[0,-1]])
    if obs[0]=="I":
        obs_op=copy(identity)
    else: 
        obs_op=copy(Z_pauli)
    for i in range(qn-1):
        if obs[1+i]=="I":
            obs_op=np.kron(obs_op,copy(identity))
        else:
            obs_op=np.kron(obs_op,copy(Z_pauli)) 
    return obs_op
def probs_expectation(p,obs):
    a,b=np.shape(obs)
    flag=0
    assert a==b
    for i in range(a):
        for j in range(b):
            if i!=j:
                flag+=np.abs(obs[i,j])
    assert flag<1e-3
    d=np.diag(obs)
    return np.real(p.dot(d))
def compute_expectation(qn,popu,obs):
    return probs_expectation(popu,get_Zobs(obs,qn)) 
def add_obs_basis(qn,qc,obs):
    new_circ=qc.copy()
    for i in range(qn):
        if obs[i]=='X':
            new_circ.h(i) 
        elif obs[i]=='Y':
            new_circ.sdg(i)
            new_circ.h(i)
    return new_circ
def query_idx_obs_eigen(meas_len,qidx): 
    bit_string=int2string(qidx,2,meas_len)
    meas=1
    for i in range(meas_len):
        if bit_string[i]=="1": 
            meas*=-1
    return meas
def probs_expectation_value(meas_len,popu):
    exp=0
    for i in range(2**meas_len):
        meas=query_idx_obs_eigen(meas_len,i)
        exp+=popu[i]*meas
    return exp 
def check_string_non_meas_1(bit_string:str,meas_qubit): 
    bit_string=bit_string[::-1]
    for i in range(len(bit_string)):
        if bit_string[i]=="1": 
            if i not in meas_qubit: 
                return True
    return False
def clear_non_measure(counts,meas_qubits):
    clear_counts={}
    for string,count in counts.items():
        if check_string_non_meas_1(string,meas_qubits):
            continue
        else:
            clear_counts[string]=count
    return clear_counts
def check_corr_dict(corr_dict,meas_qubits):
    if len(corr_dict)==0:
        return {}
    else:
        new_corr_dict={}
        for q_pair,rate in corr_dict.items():
            if (q_pair[0] in meas_qubits) or (q_pair[1] in meas_qubits):
                if np.sum(rate)>0:
                    new_corr_dict[q_pair]=rate
        return new_corr_dict
def get_counts(qc_list,pauli_list,nshots,meas_qubits,noise_model=None,corr_dict={}):
    simulator=AerSimulator()  
    counts_dict=defaultdict(int)
    for i,qc in enumerate(qc_list):
        pauli_s=pauli_list[i]
        if noise_model is None:
            counts=simulator.run(qc,shots=nshots).result().get_counts()
        else: 
            counts=simulator.run(qc,noise_model=noise_model,shots=nshots).result().get_counts()  
        corr_dict_check=check_corr_dict(corr_dict,meas_qubits)
        if len(corr_dict_check)>0: 
            qn=qc.num_qubits
            counts=apply_correlated_noise(counts,corr_dict_check,qn)  
            counts=clear_non_measure(counts,meas_qubits)  
        counts=add_pauli_post_process(pauli_s,counts)  
        for key,value in counts.items():
            counts_dict[key]+=value
    return counts_dict
def count2popu(meas_qubits,nshots,counts):
    meas_len=len(meas_qubits)
    prob_vector=[0.0]*(2**meas_len)  
    for state_str,count in counts.items(): 
        try:
            index=int("".join([state_str[::-1][a] for a in meas_qubits]),2) 
        except:
            print(counts,state_str,meas_qubits)
            raise Exception()
        prob_vector[index]=count/nshots
    return prob_vector 
def check_sub_prob(prob,qn,meas_qubits):
    if len(meas_qubits)==qn:
        return prob
    else:
        meas_len=len(meas_qubits)
        new_prob=np.zeros(2**meas_len)
        for i in range(2**qn):
            bs=int2string(i,2,qn)
            sub_bs=""
            for j in meas_qubits:
                sub_bs+=bs[j]
            sub_idx=int(sub_bs,base=2)
            new_prob[sub_idx]+=prob[i]
    # new_prob[new_prob<0]=0.0
    return np.array(new_prob)/np.sum(new_prob) 
def remove_measurements(qc):
    new_circuit=QuantumCircuit(qc.qubits,qc.clbits)
    for instr in qc.data:
        if instr.operation.name in ["measure","barrier"]:
            continue
        new_circuit.append(instr.operation,instr.qubits,instr.clbits)
    return new_circuit
def extract_ind_mem(noise_model,qn):
    T_list=[np.eye(2) for _ in range(qn)]  
    for qubits,readout_error in noise_model._local_readout_errors.items():
        if len(qubits)==1:
            qidx=qubits[0]
            if qidx<qn:
                p0,p1=readout_error.probabilities 
                T_list[qidx]=np.array([p0,p1]).T
    return T_list 
def get_probability(qc_list,pauli_list,nshots,meas_qubits,noise_model=None,corr_dict={},real_machine=False,no_sub=False):
    simulator=AerSimulator()  
    prob_vector_list=[] 
    for i,qc in enumerate(qc_list):
        pauli_s=pauli_list[i]
        if nshots>0:
            if noise_model is None:
                counts=simulator.run(qc,shots=nshots).result().get_counts()
            else: 
                counts=simulator.run(qc,noise_model=noise_model,shots=nshots).result().get_counts()   
            if not real_machine:  
                qn=qc.num_qubits 
                counts=apply_correlated_noise(counts,corr_dict,qn)
                counts=clear_non_measure(counts,meas_qubits)  
            if real_machine:  
                qn=qc.num_qubits 
                prob_vector=count2popu(list(range(qn)),nshots,counts)  
                T,_=query_T(qn)
                prob_vector=T.dot(np.real(prob_vector))  
                if no_sub:
                    prob_vector=apply_pauli_post_process_prob(pauli_s,prob_vector)
                else:   
                    prob_vector=check_sub_prob(prob_vector,qn,meas_qubits)
                    sub_pauli_s=""
                    for qidx in meas_qubits:
                        sub_pauli_s+=pauli_s[qidx] 
                    prob_vector=apply_pauli_post_process_prob(sub_pauli_s,prob_vector)
            else:
                counts=add_pauli_post_process(pauli_s,counts) 
                if no_sub:
                    qn=qc.num_qubits 
                    prob_vector=count2popu(list(range(qn)),nshots,counts) 
                else:
                    prob_vector=count2popu(meas_qubits,nshots,counts) 
        else:
            #vector 
            qn=qc.num_qubits 
            new_qc=remove_measurements(qc.copy())  
            new_qc.save_density_matrix(qubits=list(range(qn)))  
            if noise_model is None:
                result=simulator.run(new_qc,shots=None).result()
            else:
                result=simulator.run(new_qc,noise_model=noise_model,shots=None).result() 
            rho=result.data()['density_matrix']
            prob=np.diag(np.asarray(rho))
            prob=inverse_popu(prob,qn) 

            # corr_dict_check=check_corr_dict(corr_dict,meas_qubits)  
            if real_machine:  
                # T,_=query_T(len(meas_qubits))
                # prob_vector=check_sub_prob(prob,qn,meas_qubits) 
                # prob_vector=T.dot(np.real(prob_vector)) 

                T,_=query_T(qn)
                prob_vector=T.dot(np.real(prob))  
                if not no_sub:
                    prob_vector=check_sub_prob(prob_vector,qn,meas_qubits) 
            else: 
                #add ind mem error 
                T_list=extract_ind_mem(noise_model,qn) 
                prob=generalized_kron_matvec(T_list,prob)
                prob=prob.ravel()
                prob=inverse_vec(prob,qn)
                #add corr mem error
                # if len(corr_dict_check)>0:
                prob=apply_correlated_noise_to_probability(prob,corr_dict,qn) 
                if not no_sub:
                    prob_vector=check_sub_prob(prob,qn,meas_qubits)
                
            sub_pauli_s=""
            for qidx in meas_qubits:
                sub_pauli_s+=pauli_s[qidx] 
            prob_vector=apply_pauli_post_process_prob(sub_pauli_s,prob_vector) 
        prob_vector_list.append(np.array(prob_vector))  
    prob=np.mean(prob_vector_list,axis=0)
    return prob/np.sum(prob)  

def get_expectation_value_probs(qn,qc,nshots,obs,ri=0,ri_seed=1,noise_model=None,corr_dict={},real_machine=False): 
    if ri>0:
        pauli_list=[random_obs(qn,False,seed=ri_seed+i) for i in range(ri)]
    else:
        pauli_list=["I"*qn]
    qc=add_obs_basis(qn,qc,obs)
    meas_qubits=Pauli_nontrivial_qubit(obs) 
    meas_len=len(meas_qubits)
    qc_list=[]
    for pauli in pauli_list:
        circ=qc.copy()
        circ=add_pauli(circ,pauli)
        circ.measure(meas_qubits,meas_qubits)
        qc_list.append(circ)
    probs=get_probability(qc_list,pauli_list,nshots,meas_qubits,noise_model=noise_model,corr_dict=corr_dict,real_machine=real_machine)
    return probs_expectation_value(meas_len,probs) 
def omega_compute(obs):
    string=""
    for s in obs:
        if s=="I":
            string+="0"
        else:
            string+="1"
    return int(string,base=2)
def compute_inner(qn,idx,w): 
    bs1=int2string(idx,2,qn)[::-1]
    bs2=int2string(w,2,qn)
    inner=0
    for i in range(qn):
        if (bs1[i]=="1") and (bs2[i]=="1"):
            inner+=1 
    return inner%2
def ideal_exp_fast(qn,qc,obs):
    qc=add_obs_basis(qn,qc,obs)
    qc.save_statevector() 
    simulator=AerSimulator()
    result=simulator.run(qc).result()
    statevector=result.get_statevector()
    prob=vec2popu(statevector,qn) 
    w=omega_compute(obs)
    exp=0
    for i in range(2**qn):
        exp+=(((-1)**compute_inner(qn,i,w))*prob[i])
    return np.real(exp)

#// ANCHOR circuit
def add_TS_block(circ,q1,q2,theta):
    circ.cx(q1,q2)
    circ.rz(theta,q2)
    circ.cx(q1,q2)
    return circ
def add_TS_block_cirq(circ,q1,q2,theta,q):
    circ.append(cirq.CX(q[q1],q[q2]))
    circ.append(cirq.rz(theta).on(q[q2]))
    circ.append(cirq.CX(q[q1],q[q2])) 
    return circ
def TS_circ(qn,step,delta,ratio):
    #先不写RC有关的版本（直接看对于coherent，类似IBM那种）
    circ=QuantumCircuit(qn,qn)
    for _ in range(step):
        for i in range(qn):
            circ.rx(delta,i)
        for i in range(int(qn/2)):
            circ=add_TS_block(circ,2*i,2*i+1,-delta*ratio)
        for i in range(int((qn-1)/2)):
            circ=add_TS_block(circ,2*i+1,2*i+2,-delta*ratio)
    return circ
def TS_circ_cirq(qn,step,delta,ratio):
    circ=cirq.Circuit()
    q=cirq.LineQubit.range(qn)
    for _ in range(step):
        for i in range(qn):
            circ.append(cirq.rx(delta).on(q[i])) 
        for i in range(int(qn/2)):
            circ=add_TS_block_cirq(circ,2*i,2*i+1,-delta*ratio,q)
        for i in range(int((qn-1)/2)):
            circ=add_TS_block_cirq(circ,2*i+1,2*i+2,-delta*ratio,q)
    return circ
def random_TS_circuit(qn,depth,circ_seed=-1):
    circ=QuantumCircuit(qn,qn)
    if circ_seed==-1:
        circ_seed=np.random.randint(0,1000000)
    rnd=np.random.RandomState(seed=circ_seed)
    idx=0
    params=rnd.uniform(0,2*np.pi,2*depth+1)     
    for _ in range(depth):
        for i in range(qn):
            circ.rx(params[idx],i)
        idx+=1
        for i in range(int(qn/2)):
            circ=add_TS_block(circ,2*i,2*i+1,params[idx])
        for i in range(int((qn-1)/2)):
            circ=add_TS_block(circ,2*i+1,2*i+2,params[idx])
        idx+=1
    for i in range(qn):
        circ.rx(params[idx],i)
    return circ,params
def random_TS_circuit_cirq(qn,depth,circ_seed=-1):
    circ=cirq.Circuit()
    q=cirq.LineQubit.range(qn)
    if circ_seed==-1:
        circ_seed=np.random.randint(0,1000000)
    rnd=np.random.RandomState(seed=circ_seed)
    idx=0
    params=rnd.uniform(0,2*np.pi,2*depth+1)     
    for _ in range(depth):
        for i in range(qn):
            circ.append(cirq.cx(params[idx]).on(q[i])) 
        idx+=1
        for i in range(int(qn/2)):
            circ=add_TS_block_cirq(circ,2*i,2*i+1,params[idx],q) 
        for i in range(int((qn-1)/2)):
            circ=add_TS_block_cirq(circ,2*i+1,2*i+2,params[idx],q)  
        idx+=1
    for i in range(qn):
        circ.append(cirq.cx(params[idx]).on(q[i])) 
    return circ,params
def HE_param_circ(qn,depth,circ_seed=-1):
    circ=QuantumCircuit(qn,qn)
    if circ_seed==-1:
        circ_seed=np.random.randint(0,1000000)
    rnd=np.random.RandomState(seed=circ_seed)
    idx=0
    params=rnd.uniform(0,2*np.pi,(depth+1)*qn)
    for _ in range(depth):
        for i in range(qn):
            circ.ry(params[idx],i)
            idx+=1
        for i in range(qn-1):
            circ.cx(i,i+1)
    for i in range(qn):
        circ.ry(params[idx],i)
        idx+=1
    return circ,params 
def GHZ_circuit(qn):
    circ=QuantumCircuit(qn,qn)
    circ.h(0)
    for i in range(qn-1):
        circ.cx(i,i+1)
    for i in range(qn):
        circ.h(i)
    return circ
def GHZ_statevec(qn):
    state_vector=np.zeros(2**qn,dtype=np.complex64)
    state_vector[0]=1
    state_vector[-1]=1
    state_vector/=np.linalg.norm(state_vector)
    return state_vector
def rand_vec(qn,seed,obs_check="",threshold=1e-3):
    rnd=np.random.RandomState(seed)
    while True:
        dim=np.prod(2**qn)
        vec=rnd.standard_normal(dim).astype(complex)
        vec+=1j*rnd.standard_normal(dim)
        vec/=np.linalg.norm(vec)
        if len(obs_check)==qn:
            qc=QuantumCircuit(qn,qn)
            qc.set_statevector(vec)
            ideal=get_expectation_value_probs(qn,qc.copy(),int(1e6),obs_check)
            if abs(ideal)>threshold:
                break
        else:
            break
    return vec 
def inverse_index(qn,idx):
    idx_string=int2string(idx,2,qn)
    return int(idx_string[::-1],base=2)
def vec2popu(vec,qn,inverse=False):
    if not inverse:
        return np.array([abs(a)**2 for a in vec])
    else:
        popu=np.zeros(2**qn)
        for i in range(2**qn):
            popu[inverse_index(qn,i)]=abs(vec[i])**2
        return popu 
def inverse_popu(popu,qn):
    new_popu=np.zeros(2**qn)
    for i in range(2**qn):
        new_popu[inverse_index(qn,i)]=popu[i]
    return new_popu
#In[]
#// ANCHOR TPN 
def calculate_probability(qn,counts,bit_index,measured_outcome:int):
    #measured_outcome:0或者1两个值
    total_shots=sum(counts.values()) 
    zero_count=0
    for bitstring,count in counts.items():
        if bitstring[qn-1-bit_index]==str(measured_outcome):
            zero_count+=count
    probability=zero_count/total_shots
    return probability 
def sparse_p01(qn,counts):
    p01=np.zeros(qn)
    for i in range(qn):
        p01[i]=calculate_probability(qn,counts,i,0)
    return p01
def sparse_p10(qn,counts):
    p10=np.zeros(qn)
    for i in range(qn):
        p10[i]=calculate_probability(qn,counts,i,1)
    return p10
def parallel_tpn_calibration(qn,calishot,noise_model,corr_dict={}):
    qc=QuantumCircuit(qn,qn)   
    qc.measure(list(range(qn)),list(range(qn))) 
    counts=get_counts([qc],["I"*qn],calishot,list(range(qn)),noise_model=noise_model,corr_dict=corr_dict) 
    p01=1-sparse_p01(qn,counts) 
    qc=QuantumCircuit(qn,qn) 
    for i in range(qn):
        qc.x(i) 
    qc.measure(list(range(qn)),list(range(qn))) 
    counts=get_counts([qc],["I"*qn],calishot,list(range(qn)),noise_model=noise_model,corr_dict=corr_dict) 
    p10=1-sparse_p10(qn,counts) 
    T_list=[]
    for i in range(qn):
        A=np.array([[1-p01[i],p10[i]],[p01[i],1-p10[i]]]) 
        T_list.append(A)
    return T_list
def save_load_cali(qn,cali_T_filename,calishots,noise_model,corr_dict={}):
    #存的时候是单个比特存的
    if os.path.exists(cali_T_filename):
        cali_T_dict=np.load(cali_T_filename,allow_pickle=True).item()
        cali_T=[np.array(a) for a in cali_T_dict.values()]
    else:
        cali_T=parallel_tpn_calibration(qn,calishots,noise_model,corr_dict=corr_dict)  
        cali_T_dict={}
        for q in range(qn):
            cali_T_dict[q]=cali_T[q]
        np.save(cali_T_filename,cali_T_dict)
    return cali_T 
def get_qubit_probabilities(counts,meas_qubits): 
    if not counts:
        return [] 
    n_qubits=len(next(iter(counts)))  #从第一个键的长度推断比特数
    total_shots=sum(counts.values())
    prob_vectors=[]
    for qubit_idx in range(n_qubits):
        if qubit_idx in meas_qubits:
            total_0,total_1=0,0
            
            for state_str,count in counts.items():
                reversed_state=state_str[::-1]
                bit_value=reversed_state[qubit_idx]
                if bit_value=='0':
                    total_0+=count
                else:
                    total_1+=count
            p0=total_0/total_shots
            p1=total_1/total_shots
            prob_vectors.append((p0,p1)) 
    return prob_vectors 
def generalized_kron_matvec(mats,vec):
    total_cols=np.prod([mat.shape[1] for mat in mats])
    assert vec.size==total_cols,f"向量维度{vec.size}与矩阵总列数{total_cols}不匹配"
    
    dims=[mat.shape[1] for mat in reversed(mats)]  # 关键修改：维度顺序反转
    current=vec.reshape(dims, order='F').copy()
    
    # 关键修改：按反向顺序处理矩阵
    for i, mat in enumerate(reversed(mats)):  
        axis_pos=i
        current=np.moveaxis(current,axis_pos,-1)
        current=np.tensordot(current,mat,axes=(-1, 1))
        current=np.moveaxis(current,-1,axis_pos)
    
    current=np.transpose(current,axes=list(reversed(range(current.ndim))))
    return current.reshape(-1,1,order='F').ravel()
def inverse_index(qn,idx):
    idx_string=int2string(idx,2,qn)
    return int(idx_string[::-1],base=2)
def inverse_vec(vec,qn):
    new_vec=np.zeros(2**qn)
    for i in range(2**qn):
        new_vec[inverse_index(qn,i)]=vec[i]
    return np.array(new_vec)
def change_pauli(pauli_s,meas_qubits):
    new_s=""
    for i,s in enumerate(pauli_s):
        if i not in meas_qubits:
            new_s+="I"
        else:
            new_s+=s
    return new_s
def get_compectation_value_with_TPN(qn,qc,nshots,noise_model,cali_T,obs,corr_dict={},ri=0,ri_seed=1,only_prob=False,real_machine=False): 
    if ri>0:
        pauli_list=[random_obs(qn,False,seed=ri_seed+50+i) for i in range(ri)]
    else:
        pauli_list=["I"*qn]
    qc_list=[]
    qc=add_obs_basis(qn,qc,obs)
    meas_qubits=Pauli_nontrivial_qubit(obs) 
    meas_len=len(meas_qubits) 
    for pauli in pauli_list:
        circ=qc.copy()  
        circ=add_pauli(circ,pauli)
        circ.measure(meas_qubits,meas_qubits)
        qc_list.append(circ)
    qn_vec=get_probability(qc_list,pauli_list,nshots,meas_qubits,noise_model=noise_model,corr_dict=corr_dict,real_machine=real_machine,no_sub=True) 
    if real_machine:
        _,op=query_T(qn)
        mitigated_prob=op(qn_vec)
        mitigated_prob=check_sub_prob(mitigated_prob,qn,meas_qubits)
    else:
        qn_vec=check_sub_prob(qn_vec,qn,meas_qubits)
        if meas_len>4:
            cali_T_matrix=[np.linalg.inv(cali_T[i]) for i in meas_qubits] 
            mitigated_prob=generalized_kron_matvec(cali_T_matrix,qn_vec)
            mitigated_prob=mitigated_prob.ravel() 
            mitigated_prob=inverse_vec(mitigated_prob,meas_len)
        else: 
            cali_T_matrix=continuous_tensor([np.linalg.inv(cali_T[i]) for i in meas_qubits])
            mitigated_prob=cali_T_matrix@qn_vec
    mitigated_prob[mitigated_prob<0]=0.0
    mitigated_prob=np.array(mitigated_prob)/np.sum(mitigated_prob)
    if only_prob:
        return mitigated_prob
    return probs_expectation_value(meas_len,mitigated_prob) 
def random_ri_pauli(qn,ri): 
    pauli_indices=list(range(4**qn))
    if ri>4**qn:
        ri=4**qn
    elif ri==4**qn:
        pauli_list=[bitstring2Pauliobs(int2string(a,4,qn)) for a in range(4**qn)]
        np.random.shuffle(pauli_list)
        return pauli_list
    # rnd=np.random.RandomState(seed=seed)
    pauli_list=[bitstring2Pauliobs(int2string(a,4,qn)) for a in np.random.choice(pauli_indices,size=ri,replace=False)]
    return pauli_list 

def get_compectation_value_with_MF(qn,qc,nshots,noise_model,obs,corr_dict={},ri=0,real_machine=False,fast_twirling=False): 
    qc=add_obs_basis(qn,qc,obs)
    meas_qubits=Pauli_nontrivial_qubit(obs) 
    meas_len=len(meas_qubits) 
    if ri=="max":
        ri=4*qn
    if ri>0:
        # pauli_list=[random_obs(qn,False) for i in range(ri)]
        if fast_twirling:
            pauli_list=fast_twirling_set(qn,meas_qubits,ri)
        else:
            pauli_list=random_ri_pauli(qn,ri)
    else:
        pauli_list=["I"*qn]
    MS_nshots=int(nshots/len(pauli_list))
    qc_list=[]
    for pauli in pauli_list:
        circ=qc.copy()  
        circ=add_pauli(circ,pauli)
        circ.measure(meas_qubits,meas_qubits)
        qc_list.append(circ)
    qn_vec=get_probability(qc_list,pauli_list,MS_nshots,meas_qubits,noise_model=noise_model,corr_dict=corr_dict,real_machine=real_machine)  
    return probs_expectation_value(meas_len,qn_vec) 
#In[]
#// ANCHOR MS with fixed pattern
# 还是采取local-greedy的方案
def find_continuous_obs_indices(s,repre_type="I"):
    result=[]
    current_group=[]
    for i, char in enumerate(s):
        if char!=repre_type:
            current_group.append(i)
        else:
            if current_group:
                result.append(current_group)
                current_group=[] 
    if current_group:
        result.append(current_group) 
    return result 
def split_list_of_lists(input_list,max_batch):
    result=[]
    for sublist in input_list:
        if len(sublist)<=max_batch:
            result.append(sublist)
        else:
            for i in range(0,len(sublist),max_batch):
                result.append(sublist[i:i+max_batch])
    return result 
def update_measure_status(id_list,measure_status,set_value):
    for qid in id_list:
        if qid in measure_status.keys():
            measure_status[qid]=set_value
    return measure_status
def check_unify(term,measure_error_dict,cx_dict,local_greedy=False,index=0):
    if local_greedy: 
        qidx=min(term,key=lambda x:measure_error_dict[x])
    elif local_greedy and (index==0):
        qidx=term[0]
    else:
        qidx=term[index]
    q_error=measure_error_dict[qidx]
    q_cost=q_error
    for idx in term[:-1]:
        q_cost+=cx_dict[(idx,idx+1)]        
    return [2,term,qidx,q_cost] 
def check_latter_Z(string,idx):
    idx_list=[]
    for i in range(idx+1,len(string)):
        if string[i]=="Z":
            idx_list.append(i)
        else:
            break
    return idx_list
def query_cost(action_id,qubits,target_qubit,ind_mem_dict,cx_ger_dict):
        cost=0
        l_num=len(qubits)
        if action_id==2:
            target_index=qubits.index(target_qubit)
            for i in range(target_index):
                cost+=cx_ger_dict.get((qubits[i],qubits[i+1]),0)
            for i in range(l_num-1-target_index): 
                cost+=cx_ger_dict.get((qubits[l_num-1-i],qubits[l_num-2-i]),0)
        elif action_id==3:
            cost+=cx_ger_dict.get((qubits[0],qubits[1]),0)*2 
        else:
            raise ValueError("Unsupported action id!")
        cost+=ind_mem_dict.get(target_qubit,0)
        return cost
def alphas(n,s):
    # 大小2s，2^n-1
    nat_para=np.zeros((2*s,2**n-1))
    for i in range(2*s):
        for j in range(2**n-1):
            if i%2==0 and j+1>=2**(int(i/2)+n-s):
                nat_para[i,j]=1
            if i%2!=0 and j+1>=2**(int((i-1)/2)+n-s) and (j+1)%(2**(int((i-1)/2)+n-s))<2**(int((i-3)/2)+n-s):
                nat_para[i,j]=1
    return nat_para
def betas(n,s):
    # 大小2(n-s-1)，2^n-1
    out=np.zeros((2*(n-s-1),2**n-1))
    for i in range(2*(n-s-1)):
        for j in range(2**n-1):
            if i%2==0 and j%(2**(n-s-i/2-1))<2**(n-s-i/2-1)-1:
                out[i,j]=1
            if i%2!=0 and i!=1 and j%(2**(n-s-(i-1)/2))<2**(n-s-(i+1)/2)-1:
                out[i,j]=1
            if i==1 and j<2**(n-s-(i+1)/2)-1:
                out[i,j]=1
    return out
def side_circuits(t,qlist,qn):
    a,l=t.shape
    n=int((a+2)/2)
    out=[]
    for j in range(l): 
        qc=QuantumCircuit(qn,qn) 
        for i in range(n-2,-1,-1):
            if t[2*i+1,j]==1:
                qc.cx(qlist[i],qlist[i+1])
            if t[2*i,j]==1:
                qc.cx(qlist[i+1],qlist[i]) 
        out.append(qc)
    return out
def query_actions_cost(actions,ind_mem_dict,cx_ger_dict):
    cost=0
    for action in actions:
        cost+=query_cost(action[0],action[1],action[2],ind_mem_dict,cx_ger_dict)
    return cost 
def q1_circuits(n,s):
    q=list(range(n))
    up_c=side_circuits(alphas(n,s),q[s::-1],n)
    down_c=side_circuits(betas(n,s),q[s:n],n)
    out=[up_c[i].compose(down_c[i]) for i in range(2**n-1)]
    return out
def compression_intervals(n, sl):
    if not sl:
        return []
    out = []
    m = len(sl)
    for i in range(m):
        if i == 0:
            left = 0
        else:
            left = (sl[i-1] + sl[i]) // 2 + 1
        if i == m - 1:
            right = n - 1
        else:
            right = (sl[i] + sl[i+1]) // 2
        out.append(list(range(left, right + 1)))
    return out
def interval_circuits_obs(qn,interval, point): 
    a=interval[0]
    b=interval[-1]
    n=len(interval)
    q=list(range(100))
    # pt(interval,"interval")
    # pt(point,"point")
    # pt(q[point:a-1:-1])
    up_c=side_circuits(alphas(n,point-a),q[point:a:-1]+[q[a]],qn)
    # pt(q[point:b+1])
    down_c=side_circuits(betas(n,point-a),q[point:b+1],qn)
    outc=[QuantumCircuit(qn,qn)]+[up_c[i].compose(down_c[i]) for i in range(2**n-1)] 
    q=cirq.LineQubit.range(100)
    outo=[cirq.PauliString()]+[cirq.PauliString([cirq.Z(q[point])]) for i in range(2**n-1)]
    return outc,outo
def sm_circuits_obs(qn,sl):
    sl.sort()
    m=len(sl)
    ci=compression_intervals(qn, sl) 
    interval_circs=[]
    interval_obs=[]
    for i in range(m):
        a,b=interval_circuits_obs(qn,ci[i], sl[i])
        interval_circs.append(a)
        interval_obs.append(b)  
    # pt(interval_circs,"interval_circs")
    clist = list(itertools.product(*interval_circs))
    olist=list(itertools.product(*interval_obs)) 
    outclist=[]
    outolist=[]
    for k in range(2**qn):
        c=clist[k]
        o=olist[k]
        outc=QuantumCircuit(qn,qn)
        outo=cirq.PauliString()
        for subc in c:
            outc=outc.compose(subc)
        for subo in o:
            outo*=subo
        outclist.append(outc)
        outolist.append(outo)
    return outclist[1:],outolist[1:]

#In[]
# qn=6
# sl=[0,3]
# index=int("110110",base=2)-1 
# cs,os=sm_circuits_obs(qn,sl)
# print(cs[index])
#In[]
#In[]
def measurement_scheduling(measure_operator,qn,measure_error_dict,cx_dict,error_type,max_batch=4,local_greedy=False,index=0): 
    unified_terms=find_continuous_obs_indices(measure_operator) 
    unified_terms=split_list_of_lists(unified_terms,max_batch)
    measure_status={i:0 for i in range(qn)}
    measure_scheme=[]
    for term in unified_terms:
        if len(term)>1:
            action=check_unify(term,measure_error_dict,cx_dict,local_greedy=local_greedy,index=index)
            measure_scheme.append(action)
            measure_status=update_measure_status(action[1],measure_status,1) 
    return measure_scheme 
def count_two_qubit_cost(qc,cx_ger_dict): 
    gate_cost=0 
    for _,qargs,_ in qc.data:
        num_qubits=len(qargs)
        if num_qubits==2:
            q1_index=qc.qubits.index(qargs[0])
            q2_index=qc.qubits.index(qargs[1])
            gate_cost+=cx_ger_dict.get(tuple(sorted([q1_index,q2_index])),0)
    return gate_cost
def query_qc_cost(qc,cx_ger_dict,mem): 
    gate_cost=count_two_qubit_cost(qc,cx_ger_dict)
    return gate_cost+mem
def query_MS_qc(obs,loc_idx,cx_ger_dict,ind_mem_dict,local_greedy=False):
    qn=len(obs)
    index_string=obs.replace("I","0").replace("Z","1")
    index=int(index_string,base=2)-1
    if local_greedy:
        qc_list=[] 
        cost_list=[]
        for loc in range(qn):
            qc=q1_circuits(qn,loc)[index]
            qc_list.append(qc)
            cost_list.append(query_qc_cost(qc,cx_ger_dict,ind_mem_dict[loc]))
        lg_index=np.argmin(cost_list)
        return qc_list[lg_index],lg_index
    else:
        return q1_circuits(qn,loc_idx)[index],loc_idx
def compute_eff_obs(measure_scheme,obs): 
    new_obs=list(obs)
    for action in measure_scheme:
        if action[0]==2: 
            for qidx in action[1]:
                if qidx!=action[2]: 
                    new_obs[qidx]="I"
        if action[0]==3:
            target_idx=action[1].index(action[2])
            ori_idx=int((target_idx+1)%2)
            new_obs[action[2]]="Z"
            new_obs[action[1][ori_idx]]="I"
    new_obs="".join(new_obs)
    return new_obs

#In[]
#// ANCHOR MS-cali
def compose_pauli(p1,p2):
    qn=len(p1)
    new_p=""
    all_nontrivial_terms=["X","Y","Z"]
    for i in range(qn):
        if p1[i]=="I":
            new_p+=p2[i]
        elif p2[i]=="I":
            new_p+=p1[i]
        elif p1[i]==p2[i]:
            new_p+="I"
        else:
            new_p+=(set(all_nontrivial_terms)-{p1[i],p2[i]}).pop()
    return new_p 
def add_MS_circuit(circ,measure_scheme,pauli_s,pauli_meas,sub=False,MS_qc=None):
    #默认已经进行了obs的基矢变换 
    #pauli_latter默认和measurement之前的pauli合并只作用一次
    qn=len(pauli_s)
    if MS_qc is None:
        MS_circ=QuantumCircuit(qn,qn)
        for action in measure_scheme:
            if action[0]==2:
                #unified action
                term=action[1] 
                l=len(term)
                target_id=action[2]
                target_index=term.index(target_id)
                for i in range(target_index):
                    MS_circ.cx(term[i],term[i+1])
                for i in range(l-1-target_index):
                    MS_circ.cx(term[l-1-i],term[l-2-i])
            elif action[0]==3:
                #transfer
                term=action[1] 
                target_id=action[2]
                target_index=term.index(target_id)
                qidx=action[1][(target_index+1)%2]
                MS_circ.cx(target_id,qidx)
                MS_circ.cx(qidx,target_id)
    else:
        MS_circ=MS_qc.copy()
    #add twirled circuit
    if sub: 
        pauli_former=Clifford_conjugation(MS_circ.copy(),qn,compose_pauli(pauli_s,pauli_meas)) 
        pauli_latter=pauli_s 
    else:
        pauli_former=Clifford_conjugation(MS_circ.copy(),qn,pauli_s) 
        pauli_latter=compose_pauli(pauli_s,pauli_meas)  
    circ=add_pauli(circ,pauli_former) 
    circ=circ.compose(MS_circ)
    circ=add_pauli(circ,pauli_latter)
    return circ 
def get_ori_circ(qn,ms=[],obs=None):
    qc=QuantumCircuit(qn,qn)  
    return qc  
def effective_Z_obs(obs):
    new_obs=""
    for s in obs:
        if s in ["X","Y"]:
            new_obs+="Z"
        else:
            new_obs+=s
    return new_obs

def generate_subtwirling_obs(qn,meas_qubits,pauli_s): 
    new_obs=""
    idx=0
    for i in range(qn):
        if i in meas_qubits:
            new_obs+=pauli_s[idx]
            idx+=1 
        else:
            new_obs+="I"
    return new_obs
def query_sub_cali_T(meas_qubits):
    cali_T=[]
    for qidx in meas_qubits:
        cali_T.append(calied_transition_mat([qidx]))
    return cali_T
def combine_pauli(s1,s2,qn,meas_qubits):
    s=""
    idx1=0
    idx2=0
    for i in range(qn):
        if i in meas_qubits:
            s+=s1[idx1]
            idx1+=1
        else:
            s+=s2[idx2]
            idx2+=1
    return s

def fast_twirling_set(qn,meas_qubits,ri):  
    if ri==4**qn:
        return random_ri_pauli(qn,ri)
    meas_len=len(meas_qubits)
    first_qubits=random_ri_pauli(meas_len,ri)
    num=len(first_qubits)
    if len(first_qubits)<ri:
        ri=int(ri/num)*num
    first_qubits=[a for _ in range(int(ri/num)) for a in first_qubits]
    other_qubits=random_ri_pauli(qn-meas_len,ri)
    if ri>4**(qn-meas_len):
        other_qubits=other_qubits*int(ri/(4**(qn-meas_len)))+other_qubits[:int(ri%(4**(qn-meas_len)))]
    fast_pauli_list=[]
    for i in range(ri):
        fast_pauli_list.append(combine_pauli(first_qubits[i],other_qubits[i],qn,meas_qubits))
    return fast_pauli_list 
#In[]
# fast_twirling_set(3,[0,2],12)
#In[]
def get_MS_exp(qn,qc,nshots,measure_scheme,obs,noise_model,ri=0,corr_dict={},fast_twirling=True,real_machine=False,MS_qc=None,loc=[]): 
    if len(measure_scheme)==0:
        if MS_qc is None:
            raise Exception("unsupported ms scheme and circuit!")
        elif len(loc)==0:
            raise Exception("Unsupported loc")
        else: 
            eff_obs=""
            for i in range(qn):
                if i in loc:
                    eff_obs+="Z"
                else:
                    eff_obs+="I"
            meas_qubits=loc
    else:
        eff_obs=compute_eff_obs(measure_scheme,obs) 
        meas_qubits=Pauli_nontrivial_qubit(eff_obs) 
    meas_len=len(meas_qubits)
    if ri=="max":
        ri=4*qn
    if ri>0:
        if fast_twirling:
            MS_pauli_list=fast_twirling_set(qn,meas_qubits,ri)
            meas_pauli_list=MS_pauli_list[:]  
        else:
            MS_pauli_list=random_ri_pauli(qn,ri) 
            meas_pauli_list=MS_pauli_list[:] 
    else:
        MS_pauli_list=["I"*qn]
        meas_pauli_list=["I"*qn]   
    MS_nshots=int(nshots/ri)
    qc=add_obs_basis(qn,qc,obs)
    circ_list=[]
    readout_pauli_list=[] 
    for i in range(len(MS_pauli_list)):
        MS_pauli=MS_pauli_list[i] 
        meas_pauli=meas_pauli_list[i]
        circ=qc.copy()
        circ=add_MS_circuit(circ,measure_scheme,MS_pauli,meas_pauli,MS_qc=MS_qc) 
        circ.measure(meas_qubits,meas_qubits)
        circ_list.append(circ) 
        readout_pauli_list.append(meas_pauli) 
    qn_vec=get_probability(circ_list,readout_pauli_list,MS_nshots,meas_qubits,noise_model=noise_model,corr_dict=corr_dict,real_machine=real_machine)  
    return probs_expectation_value(meas_len,qn_vec) 

def MS_cali_circ(qn,obs):
    qc=QuantumCircuit(qn,qn)
    for i,s in enumerate(obs):
        if s=="X":
            qc.h(i)
        elif s=="Y":
            qc.sdg(i)
            qc.h(i)
        elif s=="Z" or s=="I":
            qc.x(i)
    return qc 
#In[] 
# obs="ZZIZZI"
# n=len(obs)
# index_string=obs.replace("I","0").replace("Z","1")
# index=int(index_string,base=2)-1
# loc=3
# print(q1_circuits(n,loc)[index])
#In[]
#// ANCHOR RL
class StringEnv:
    def __init__(self,string,ind_mem_dict,cx_ger_dict,max_step=5,max_depth=3):
        self.ori_string=string
        self.qn=len(string)
        self.state=np.array(list(string),dtype=int) 
        self.step_count=0
        self.mem_dict=ind_mem_dict
        self.ger_dict=cx_ger_dict
        self.action_log=[]
        self.cur_depth=0
        self.total_cost=0
        self.max_step=max_step
        self.max_depth=max_depth

        self.highest_reward=-100
        self.best_actions=[]
        self.alter_state=copy(self.state)
    def _get_state(self):
        return torch.tensor([int(c) for c in self.state], dtype=torch.float32)
    def reset(self):
        self.state=np.array(list(self.ori_string),dtype=int)
        self.step_count=0
        self.action_log=[]
        self.cur_depth=0
        self.total_cost=0
        return self._get_state()
    def turn2cx_chain(self):
        cx_chain=[]
        for action in self.action_log:
            if action[0]==1:
                term=action[1] 
                l=len(term)
                target_id=action[2]
                target_index=term.index(target_id)
                for i in range(target_index):
                    cx_chain.append([term[i],term[i+1]])
                for i in range(l-1-target_index):
                    cx_chain.append([term[l-1-i],term[l-2-i]])
            elif action[0]==2:
                target_id=action[1][0]
                qidx=action[1][1]
                cx_chain.append([target_id,qidx])
                cx_chain.append([qidx,target_id])
            else:
                raise ValueError("Unsupported action idx!")
        return cx_chain 
    def check_already(self,cx_chain,cx_gate,compute_cx_list):
        for idx,cx in enumerate(cx_chain):
            if tuple(cx)==tuple(cx_gate):
                break
            else:
                if not set(cx).isdisjoint(set(cx_gate)):
                    if idx not in compute_cx_list:
                        return False
        return True
    def update_depth(self):
        #计算当前MS circuit的depth 
        cx_queue=[]
        cx_chain=self.turn2cx_chain()  
        computed_cx_list=[]
        while True:
            cx_queue.append([])
            for i,cx_gate in enumerate(cx_chain):
                if i in computed_cx_list:
                    continue
                occupied_qubits=[]
                for a in cx_queue[-1]:
                    occupied_qubits.extend(a)
                if set(occupied_qubits).isdisjoint(set(cx_gate)):
                    if self.check_already(cx_chain,cx_gate,computed_cx_list):
                        cx_queue[-1].append(cx_gate)
                        computed_cx_list.append(i)
            if len(computed_cx_list)==len(cx_chain):
                break 
        self.cur_depth=len(cx_queue) 
    def query_cost(self,action_id,qubits,target_qubit):
        cost=0
        l_num=len(qubits)
        if action_id==1:
            target_index=qubits.index(target_qubit)
            for i in range(target_index):
                cost+=self.ger_dict.get((qubits[i],qubits[i+1]),0)
            for i in range(l_num-1-target_index): 
                cost+=self.ger_dict.get((qubits[l_num-1-i],qubits[l_num-2-i]),0)
        elif action_id==2:
            cost+=self.ger_dict.get((qubits[0],qubits[1]),0)*2 
        else:
            raise ValueError("Unsupported action id!")
        cost+=self.mem_dict.get(target_qubit,0)
        return cost
    def get_valid_actions(self):
        valid_actions=[]
        unified_terms=find_continuous_obs_indices("".join([str(int(s)) for s in self.state]),repre_type="0")
        #reduction
        for term in unified_terms:
            if len(term)>1: 
                for m in range(2,self.max_depth+2): 
                    combinations=[term[i:i+m] for i in range(len(term)-1) if i+m<=len(term)] 
                    for comb in combinations:
                        for c in comb:
                            valid_actions.append([1,comb,c,self.query_cost(1,comb,c)])
        #swap
        #只挑选边缘
        for term in unified_terms:
            if len(term)==1:
                qidx=term[0]
                qidx1=qidx-1 
                qidx2=qidx+1 
                if (qidx1>=0) and (qidx1<self.qn):
                    if self.state[qidx1]==0: 
                        valid_actions.append([2,[qidx1,qidx],qidx1,self.query_cost(2,[qidx1,qidx],qidx1)])
                if (qidx2>=0) and (qidx2<self.qn):
                    if self.state[qidx2]==0:
                        valid_actions.append([2,[qidx,qidx2],qidx2,self.query_cost(2,[qidx,qidx2],qidx2)])
            else:
                edge1=term[0]
                qidx1=edge1-1
                edge2=term[-1]
                qidx2=edge2+1 
                if (qidx1>=0) and (qidx1<self.qn):
                    if self.state[qidx1]==0:
                        valid_actions.append([2,[qidx1,edge1],qidx1,self.query_cost(2,[qidx1,edge1],qidx1)]) 
                if (qidx2>=0) and (qidx2<self.qn):
                    if self.state[qidx2]==0:
                        valid_actions.append([2,[edge2,qidx2],qidx2,self.query_cost(2,[edge2,qidx2],qidx2)])
        return valid_actions
    def is_done(self):
        if self.cur_depth==self.max_depth:
            return True
        else:
            return False
    def calculate_reward(self): 
        ones_reward=-(np.sum(self.state)/self.qn)
        # Reward for minimizing total cost
        cost_reward=-self.total_cost 
        # Weights for each reward component 
        beta=0.3   # Weight for ones reward
        gamma=0.7  # Weight for cost reward 

        reward=beta*ones_reward+gamma*cost_reward
        return reward
    def step(self,action):
        self.step_count+=1 
        valid_actions=self.get_valid_actions()
        if (len(valid_actions)==0) or (self.step_count>self.max_step):
            return self._get_state(),-100,True,{} 
        self.action_log.append(action)
        self.total_cost+=action[3]
        #execute the action
        if action[0]==1:
            for q in action[1]:
                if q!=action[2]:
                    self.state[q]=0
        elif action[0]==2:
            qidx=(action[1].index(action[2])+1)%2
            self.state[action[1][qidx]]=0
            self.state[action[2]]=1
        #check max depth
        self.update_depth()
        if self.cur_depth>self.max_depth:
            return self._get_state(),-100,True,{}
        #stop condition
        done=self.is_done()
        #compute reward
        reward=self.calculate_reward() 

        #update record
        if done:
            if self.highest_reward<reward:
                self.highest_reward=reward
                self.best_actions=self.action_log[:]
                self.alter_state=copy(self.state)
        return self._get_state(),reward,done,{}
class DQN(nn.Module):
    def __init__(self,action_input_dim,state_input_dim,max_len=12,max_depth=2):
        super(DQN, self).__init__()
        self.max_len=max_len
        self.max_depth=max_depth
        self.action_fc=nn.Linear(action_input_dim,64)
        self.state_fc=nn.Linear(state_input_dim,64)
        self.fc1=nn.Linear(128,64)
        self.fc2=nn.Linear(64,64)
        self.fc3=nn.Linear(64,1)

    def forward(self,action,state): 
        action_id=torch.tensor([action[0]],dtype=torch.float)
        qubits=action[1]+[0]*(self.max_depth+1-len(action[1]))
        qubits=torch.tensor(qubits,dtype=torch.float)
        target_id=torch.tensor([action[2]],dtype=torch.float)
        cost=torch.tensor([action[3]],dtype=torch.float)
 
        action_vector=torch.cat([action_id.unsqueeze(0).float(),qubits.unsqueeze(0).float(),target_id.unsqueeze(0).float(),cost.unsqueeze(0)],dim=1)
        action_vector=F.relu(self.action_fc(action_vector))
 
        if len(state.shape)==1:
            state=state.unsqueeze(0).float()
        current_len=state.shape[1]
        pad_size=self.max_len-current_len
        if pad_size>0:
            pad=torch.zeros(state.size(0),pad_size)
            state=torch.cat([state,pad],dim=1) 
        state_vector=F.relu(self.state_fc(state))

        combined=torch.cat([action_vector,state_vector],dim=1)

        x=F.relu(self.fc1(combined))
        x=F.relu(self.fc2(x))
        x=self.fc3(x)
        return x

#// ANCHOR  train RL
def train_dqn(env,dqn,episodes=1000,lr=0.001,batch_size=32,gamma=0.99,epsilon=1.0,epsilon_decay=0.995,epsilon_min=0.01,plot=True):
    memory=deque(maxlen=2000)
    optimizer=torch.optim.Adam(dqn.parameters(),lr=lr)
    criterion=nn.MSELoss() 
    for episode in range(episodes):
        state=env.reset() 
        done=False

        while not done:
            valid_actions=env.get_valid_actions()
            if random.uniform(0,1)<epsilon:
                action=random.choice(valid_actions)
            else:  
                q_values=[dqn(action,env._get_state()).detach().numpy() for action in valid_actions]
                action=valid_actions[np.argmax(q_values)] 
            next_state,reward,done,_=env.step(action) 
            memory.append((state,action,reward,next_state,done))
            state=next_state

            if len(memory)>batch_size:
                batch=random.sample(memory,batch_size)
                states,actions,rewards,next_states,dones=zip(*batch)  
                rewards=torch.tensor(rewards,dtype=torch.float32) 
                dones=torch.tensor(dones,dtype=torch.float32) 

                q_values_list=[]
                next_q_values_list=[]
                for i in range(batch_size): 
                    state=states[i] 
                    action=actions[i] 
                    q_value=dqn(action,state)
                    q_values_list.append(q_value) 
                    next_state=next_states[i]  
                    next_q_value=dqn(action,next_state)
                    next_q_values_list.append(next_q_value)
                q_values=torch.cat(q_values_list,dim=0)
                next_q_values=torch.cat(next_q_values_list,dim=0) 
                expected_q_value=rewards.unsqueeze(1)+gamma*next_q_values*(1-dones.unsqueeze(1)) 
                loss=criterion(q_values,expected_q_value) 

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

        epsilon=max(epsilon_min,epsilon*epsilon_decay)
        if (episode+1)%50==0:
            if plot:
                print(f"Episode:{episode+1}/{episodes},Cur State:{env.alter_state},Total Reward:{env.highest_reward},Epsilon:{epsilon}")
    # print("Final output:")
    # print(f"State:{str(env.alter_state)},Actions:{str(env.best_actions)}") 
    return env.best_actions,env.alter_state

def RL_measure_scheduling(measure_operator,error_seed,ind_mean,corr_mean,ger2,error_type,ind_measure_dict,cx_ger_dict,max_depth=3,max_len=12,max_step=20,episodes=1000,lr=0.001,batch_size=32,gamma=0.99,epsilon=1.0,epsilon_decay=0.995,epsilon_min=0.01):
    """"
    max_depth:限制的MS circuit深度：待完成
    """ 
    filename=datapath+f"/RL_modality/{error_type}_{error_seed}_{measure_operator}_{ind_mean}_{corr_mean}_{ger2}_{max_depth}.pkl"
    if os.path.exists(filename):
        with open(filename,'rb') as f:
            measure_scheme=pickle.load(f)
        return measure_scheme
    else:
        if error_type=="composite":
            ger2+=sy23_decay
        bit_string=obs2bitstring(measure_operator)
        env=StringEnv(bit_string,ind_measure_dict,cx_ger_dict,max_depth=max_depth,max_step=max_step)
        action_input_dim=1+max_depth+1+1+1
        state_input_dim=max_len
        agent=DQN(action_input_dim,state_input_dim,max_len=max_len,max_depth=max_depth)
        measure_schemes,alter_bit_string=train_dqn(env,agent,episodes=episodes,lr=lr,batch_size=batch_size,gamma=gamma,epsilon=epsilon,epsilon_decay=epsilon_decay,epsilon_min=epsilon_min,plot=False)
        measure_scheme=[]
        for action in measure_schemes:
            action[0]+=1
            measure_scheme.append(action) 
        eff_Z_obs=bitstring2obs("".join([str(a) for a in alter_bit_string]))
        eff_obs=compute_eff_obs(measure_scheme,measure_operator) 
        if eff_Z_obs==eff_obs:
            with open(filename,'wb') as f:
                pickle.dump(measure_scheme,f)
            return measure_scheme
        else:
            print(eff_Z_obs,eff_obs,measure_scheme)
            # raise Exception("Wrong measure scheme!!")
            return None
# #In[]
# measure_operator="ZZIZ"
# qn=len(measure_operator)
# error_seed=0
# ind_mean=0.015
# ind_std=0.01
# corr_mean=0.008
# ger2=5e-3+0.01
# ger2_std=0.001
# error_type="co"
# ind_measure_dict=random_MER_ind(qn,ind_mean,ind_std,seed=error_seed)
# cx_ger_dict=cx_ger_dict=random_cx_error_dict(qn,ger2,ger2_std,seed=error_seed)  
# RL_measure_scheduling(measure_operator,error_seed,ind_mean,corr_mean,ger2,error_type,ind_measure_dict,cx_ger_dict,max_depth=4,max_len=12,max_step=20)
# #In[]
#// ANCHOR collect data
def query_obs(obs_type,qn,seed=1,interval=2):
    if obs_type=="two-local":
        obs_dict={3:"ZZI",4:"ZZIZ",6:"ZZIZZI",8:"ZZIZZIZZ",10:"ZZIZZIZZIZ",12:"ZZIZZIZZIZZI",14:"ZZIZZIZZIZZIZZ",16:"ZZIZZIZZIZZIZZIZ"}
        return obs_dict[qn]
    elif obs_type=="global":
        return "Z"*qn
    elif obs_type=="interval":
        term_len=interval+1
        num=qn//term_len+1
        return (("Z"*interval+"I")*num)[:qn]
def setting_pre(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,co_error=0.01,no_meas_model=False): 
    cx_ger_dict=random_cx_error_dict(qn,ger2,ger2_std,seed=error_seed)  
    ind_mem_dict=random_MER_ind(qn,ind_mean,ind_std,seed=error_seed)
    corr_dict=random_MER_corr(qn,corr_mean,corr_std,seed=error_seed) 
    noise_model=generate_noise_model(ger1,cx_ger_dict,ind_mem_dict,error_type,co_error=co_error)   
    cali_T_filename=datapath+f"/TPN_cali/{error_type}_{ind_mean}_{ind_std}_{ger1}_{ger2}_{qn}_{error_seed}_{corr_mean}.npy" 
    cali_T=save_load_cali(qn,cali_T_filename,int(1e6),noise_model,corr_dict=corr_dict)
    if no_meas_model:
        noise_model_no_meas=generate_noise_model(ger1,cx_ger_dict,{},error_type) 
        return cx_ger_dict,ind_mem_dict,corr_dict,noise_model,cali_T,noise_model_no_meas
    else:
        return cx_ger_dict,ind_mem_dict,corr_dict,noise_model,cali_T
def get_MS_cali(qn,measure_scheme,obs,noise_model,corr_dict={},ri=0,calishot=int(1e5),fast_twirling=True,real_machine=False,MS_qc=None,loc=[]): 
    ori_circ1=get_ori_circ(qn,ms=measure_scheme,obs=obs)
    cali_value=np.abs(get_MS_exp(qn,ori_circ1.copy(),calishot,measure_scheme,obs,noise_model,corr_dict=corr_dict,ri=ri,fast_twirling=fast_twirling,real_machine=real_machine,MS_qc=MS_qc,loc=loc))
    return cali_value
def get_MF_cali(qn,noise_model,obs,corr_dict={},ri=0,calishot=int(1e5),real_machine=False,fast_twirling=False):
    ori_circ=get_ori_circ(qn)
    cali_value=np.abs(get_compectation_value_with_MF(qn,ori_circ.copy(),calishot,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine,fast_twirling=fast_twirling))
    return cali_value
def rand_basis_state(n,seed):
    a=np.zeros(2**n)
    rnd=np.random.RandomState(seed=seed)
    a[rnd.randint(2**n)]=1
    a=np.array(a)
    return a
def initial_circ(qn,circ_type,circ_seed,obs):
    circ=QuantumCircuit(qn,qn)
    if circ_type=="rand": 
        circ.set_statevector(rand_vec(qn,circ_seed,obs_check=obs))
        # print(rand_vec(qn,circ_seed,obs_check=obs))
        ideal=ideal_exp_fast(qn,circ.copy(),obs)
    elif circ_type=="GHZ":
        circ.set_statevector(GHZ_statevec(qn))
        for i in range(qn):
            circ.h(i)
        obs="Z"*qn
        ideal=1 
    elif circ_type=="basis":
        circ.set_statevector(rand_basis_state(qn,circ_seed))
        ideal=ideal_exp_fast(qn,circ.copy(),obs)
    return circ,ideal

def collect_data_MF(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,nshot_list,obs,filename,co_error=0.01,ri=0,circ_type="rand"):
    #global的情况下检查twirling的效果
    if error_type in ["real-ampl","real-co","real-composite"]:
        cx_ger_dict,ind_mem_dict,corr_dict,_,cali_T,noise_model=setting_pre(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,co_error=co_error,no_meas_model=True)
        real_machine=True
    else:
        cx_ger_dict,ind_mem_dict,corr_dict,noise_model,cali_T=setting_pre(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,co_error=co_error)
        real_machine=False  

    for circ_seed in circ_seed_list:
        circ,ideal=initial_circ(qn,circ_type,circ_seed,obs)
        for nshots in nshot_list: 
            if nshots=="qn":
                nshot_degree="qn"
                nshots=qn*(1e5)
            else:
                if nshots>0:
                    nshot_degree=int(np.log10(nshots))
                else:
                    nshot_degree="inf"
            noisy=get_expectation_value_probs(qn,circ.copy(),nshots,obs,noise_model=noise_model,corr_dict=corr_dict,real_machine=real_machine)
            noisy_TPN=get_compectation_value_with_TPN(qn,circ.copy(),nshots,noise_model,cali_T,obs,corr_dict=corr_dict,real_machine=real_machine)  
            
            MF_cali=get_MF_cali(qn,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine,calishot=nshots)
            noisy_MF=get_compectation_value_with_MF(qn,circ.copy(),nshots,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine)   
            
            MF_cali_fast=get_MF_cali(qn,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine,calishot=nshots,fast_twirling=True)
            noisy_MF_fast=get_compectation_value_with_MF(qn,circ.copy(),nshots,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine,fast_twirling=True) 
 
            data={"qn":[str(qn)],"ind_mean":[str(ind_mean)],"ind_std":[str(ind_std)],"corr_std":[str(corr_std)],"corr_mean":[str(corr_mean)],"error_seed":[str(error_seed)],"nshots":[str(nshot_degree)],"ger1":[str(ger1)],"ger2":[str(ger2)],"error_type":[str(error_type)],"co_rate":[str(co_error)],"ri":[str(ri)],"obs":[str(obs)],"circ_seed":[str(circ_seed)],"ideal":[str(ideal)],"noisy":[str(noisy)],"noisy_TPN":[str(noisy_TPN)],"noisy_MF":[str(noisy_MF/MF_cali)],"noisy_MF_fast":[str(noisy_MF_fast/MF_cali_fast)]}

            df=pd.DataFrame(data=data)
            global idx 
            idx+=1
            print(idx)   
            # print(ideal,noisy,MS_fixed1_sub/MS_fixed_cali1_sub,MS_fixed2_sub/MS_fixed_cali2_sub) 
            # print(ideal,MS_fixed2_sub,MS_fixed_cali2_sub,MS_fixed2_sub/MS_fixed_cali2_sub)
            # raise Exception()
            savedata(df,filename,False) 


def collect_data_global(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,nshot_list,filename,co_error=0.01,ri=0,circ_type="rand"):
    #global的情况下检查twirling的效果
    if error_type in ["real-ampl","real-co","real-composite"]:
        cx_ger_dict,ind_mem_dict,corr_dict,_,cali_T,noise_model=setting_pre(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,co_error=co_error,no_meas_model=True)
        real_machine=True
    else:
        cx_ger_dict,ind_mem_dict,corr_dict,noise_model,cali_T=setting_pre(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,co_error=co_error)
        real_machine=False 
    obs="Z"*qn 
    obs_type="global"
    #初始化measure scheme
    fixed_ms_one_depth=measurement_scheduling(obs,qn,ind_mem_dict,cx_ger_dict,error_type,max_batch=2)
    fixed_ms_full_depth=measurement_scheduling(obs,qn,ind_mem_dict,cx_ger_dict,error_type,max_batch=qn)
 
    for circ_seed in circ_seed_list:
        circ,ideal=initial_circ(qn,circ_type,circ_seed,obs)
        for nshots in nshot_list: 
            if nshots=="qn":
                nshot_degree="qn"
                nshots=qn*(1e5)
            else:
                if nshots>0:
                    nshot_degree=int(np.log10(nshots))
                else:
                    nshot_degree="inf"
            noisy=get_expectation_value_probs(qn,circ.copy(),nshots,obs,noise_model=noise_model,corr_dict=corr_dict,real_machine=real_machine)
            noisy_TPN=get_compectation_value_with_TPN(qn,circ.copy(),nshots,noise_model,cali_T,obs,corr_dict=corr_dict,real_machine=real_machine) 
            
            MS_fixed_cali1=get_MS_cali(qn,fixed_ms_one_depth,obs,noise_model,corr_dict=corr_dict,ri=ri,fast_twirling=False,real_machine=real_machine,calishot=nshots)
            MS_fixed_cali2=get_MS_cali(qn,fixed_ms_full_depth,obs,noise_model,corr_dict=corr_dict,ri=ri,fast_twirling=False,real_machine=real_machine,calishot=nshots)
            MF_cali=get_MF_cali(qn,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine,calishot=nshots)
            MS_fixed_cali1_fast=get_MS_cali(qn,fixed_ms_one_depth,obs,noise_model,corr_dict=corr_dict,ri=ri,real_machine=real_machine,calishot=nshots)
            MS_fixed_cali2_fast=get_MS_cali(qn,fixed_ms_full_depth,obs,noise_model,corr_dict=corr_dict,ri=ri,real_machine=real_machine,calishot=nshots) 
            
            noisy_MF=get_compectation_value_with_MF(qn,circ.copy(),nshots,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine)  

            MS_fixed1=get_MS_exp(qn,circ.copy(),nshots,fixed_ms_one_depth,obs,noise_model,corr_dict=corr_dict,ri=ri,fast_twirling=False,real_machine=real_machine)  
            MS_fixed2=get_MS_exp(qn,circ.copy(),nshots,fixed_ms_full_depth,obs,noise_model,corr_dict=corr_dict,ri=ri,fast_twirling=False,real_machine=real_machine)   

            MS_fixed1_fast=get_MS_exp(qn,circ.copy(),nshots,fixed_ms_one_depth,obs,noise_model,corr_dict=corr_dict,ri=ri,real_machine=real_machine)  
            MS_fixed2_fast=get_MS_exp(qn,circ.copy(),nshots,fixed_ms_full_depth,obs,noise_model,corr_dict=corr_dict,ri=ri,real_machine=real_machine)  
 
            data={"qn":[str(qn)],"ind_mean":[str(ind_mean)],"ind_std":[str(ind_std)],"corr_std":[str(corr_std)],"corr_mean":[str(corr_mean)],"error_seed":[str(error_seed)],"nshots":[str(nshot_degree)],"ger1":[str(ger1)],"ger2":[str(ger2)],"error_type":[str(error_type)],"co_rate":[str(co_error)],"ri":[str(ri)],"obs":[str(obs)],"obs_type":[str(obs_type)],"circ_seed":[str(circ_seed)],"ideal":[str(ideal)],"noisy":[str(noisy)],"noisy_TPN":[str(noisy_TPN)],"noisy_MF":[str(noisy_MF/MF_cali)],"MS_fixed1":[str(MS_fixed1/MS_fixed_cali1)],"MS_fixed2":[str(MS_fixed2/MS_fixed_cali2)],"MS_fixed1_fast":[str(MS_fixed1_fast/MS_fixed_cali1_fast)],"MS_fixed2_fast":[str(MS_fixed2_fast/MS_fixed_cali2_fast)],"circ_type":[str(circ_type)]}

            df=pd.DataFrame(data=data)
            global idx 
            idx+=1
            print(idx)   
            # print(ideal,noisy,MS_fixed1_sub/MS_fixed_cali1_sub,MS_fixed2_sub/MS_fixed_cali2_sub) 
            # print(ideal,MS_fixed2_sub,MS_fixed_cali2_sub,MS_fixed2_sub/MS_fixed_cali2_sub)
            # raise Exception()
            savedata(df,filename,False) 

def query_obs(obs_type,qn):
    if obs_type=="two-local":
        obs_dict={3:"ZZI",4:"ZZIZ",6:"ZZIZZI",8:"ZZIZZIZZ",10:"ZZIZZIZZIZ",12:"ZZIZZIZZIZZI",14:"ZZIZZIZZIZZIZZ",16:"ZZIZZIZZIZZIZZIZ"}
        return obs_dict[qn]
    elif obs_type=="global":
        return "Z"*qn 
    elif obs_type=="interval":
        if qn%2==0:
            return "ZI"*int(qn/2)
        else:
            return "ZI"*int(qn/2)+"Z"

def collect_data_loc(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,obs_type,circ_seed_list,nshots,filename,ri,co_rate=0.01,circ_type="rand"):
    if error_type in ["real-ampl","real-co","real-composite"]:
        cx_ger_dict,ind_mem_dict,corr_dict,_,cali_T,noise_model=setting_pre(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,no_meas_model=True,co_error=co_rate)
        real_machine=True
    else:
        cx_ger_dict,ind_mem_dict,corr_dict,noise_model,cali_T=setting_pre(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,co_error=co_rate)
        real_machine=False
    obs=query_obs(obs_type,qn)
    for circ_seed in circ_seed_list:
        circ,ideal=initial_circ(qn,circ_type,circ_seed,obs)
        noisy=get_expectation_value_probs(qn,circ.copy(),nshots,obs,noise_model=noise_model,corr_dict=corr_dict,real_machine=real_machine)
        noisy_TPN=get_compectation_value_with_TPN(qn,circ.copy(),nshots,noise_model,cali_T,obs,corr_dict=corr_dict,real_machine=real_machine) 

        MF_cali=get_MF_cali(qn,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine,calishot=nshots)
        noisy_MF=get_compectation_value_with_MF(qn,circ.copy(),nshots,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine)
        
        MF_cali_fast=get_MF_cali(qn,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine,calishot=nshots,fast_twirling=True)
        noisy_MF_fast=get_compectation_value_with_MF(qn,circ.copy(),nshots,noise_model,obs,corr_dict=corr_dict,ri=ri,real_machine=real_machine,fast_twirling=True) 
 
 
        ms_qc,_=query_MS_qc(obs,0,cx_ger_dict,ind_mem_dict)
        MS_cali_sub=get_MS_cali(qn,[],obs,noise_model,corr_dict=corr_dict,ri=ri,real_machine=real_machine,calishot=nshots,MS_qc=ms_qc.copy(),loc=[0])
        MS_sub=get_MS_exp(qn,circ.copy(),nshots,[],obs,noise_model,corr_dict=corr_dict,ri=ri,real_machine=real_machine,MS_qc=ms_qc.copy(),loc=[0])  
        MS1=MS_sub/MS_cali_sub 
        
        if obs_type=="two-local":
            sl=[0,3]
        elif obs_type=="interval":
            sl=[0,2]
        else:
            raise Exception("Unsupported obs type for loc!")
        obs_int=deepcopy(obs)
        obs_int=obs_int.replace("Z","1").replace("I","0") 
        index=int(obs_int,base=2)-1
        ms_qc,_=sm_circuits_obs(qn,sl)
        ms_qc=ms_qc[index]
        MS_cali_sub=get_MS_cali(qn,[],obs,noise_model,corr_dict=corr_dict,ri=ri,real_machine=real_machine,calishot=nshots,MS_qc=ms_qc.copy(),loc=sl)
        MS_sub=get_MS_exp(qn,circ.copy(),nshots,[],obs,noise_model,corr_dict=corr_dict,ri=ri,real_machine=real_machine,MS_qc=ms_qc.copy(),loc=sl)   
        MS2=MS_sub/MS_cali_sub 

        #data saving
        if nshots=="qn":
            nshot_degree=-1
        else:
            if nshots>0:
                nshot_degree=int(np.log10(nshots))
            else:
                nshot_degree="inf"
        data={"qn":[str(qn)],"ind_mean":[str(ind_mean)],"ind_std":[str(ind_std)],"corr_std":[str(corr_std)],"corr_mean":[str(corr_mean)],"error_seed":[str(error_seed)],"nshots":[str(nshot_degree)],"ger1":[str(ger1)],"ger2":[str(ger2)],"ger2_std":[str(ger2_std)],"co_rate":[str(co_rate)],"error_type":[str(error_type)],"ri":[str(ri)],"obs":[str(obs)],"obs_type":[str(obs_type)],"circ_seed":[str(circ_seed)],"ideal":[str(ideal)],"noisy":[str(noisy)],"noisy_TPN":[str(noisy_TPN)],"noisy_MF":[str(noisy_MF/MF_cali)],"noisy_MF_fast":[str(noisy_MF_fast/MF_cali_fast)],"MS1":[str(MS1)],"MS2":[str(MS2)]}

        df=pd.DataFrame(data=data)
        global idx
        idx+=1 
        print(idx) 
        # print(ideal,noisy_TPN,noisy_MF_fast/MF_cali_fast,MS1,MS2)
        savedata(df,filename,False)
# #In[]
# qn=6
# error_type="real-co"
# ind_mean=0.015  
# ind_std=0.01  
# corr_mean=0.008  
# corr_std=0.005  
# ger1,ger2=5e-4,5e-3
# ger2_std=0.005
# co_rate=0.01
# error_seed=1
# obs="ZZIZZI"
# j=int("100100",base=2)-1
# cx_ger_dict,ind_mem_dict,corr_dict,_,cali_T,noise_model=setting_pre(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,no_meas_model=True,co_error=co_rate)
# ms_qc,_=query_MS_qc(obs,j,cx_ger_dict,ind_mem_dict)
# print(ms_qc)
# #In[]
  
#// ANCHOR test
#In[]
if __name__=="__main__":
    task_id=int(sys.argv[1])

    ind_mean=0.015  
    ind_std=0.01  
    corr_mean=0.008  
    corr_std=0.005  
    ger1,ger2=5e-4,5e-3
    ger2_std=0.005
    circ_seed_list=list(range(100))

    filename=datapath+f"/rand_RL_{task_id}.csv"
    if task_id==1: 
        circ_type="rand"
    elif task_id==2:
        circ_type="basis"
    else:
        circ_type="rand"
    if task_id in [1,2]:
        filename=datapath+f"/rand_RL_1.csv"
        error_seed_list=list(range(1))
        nshot_list=[0]
        # qn_list=[6,2,3,4,5]
        qn_list=[6]
        ri_list=[4,64] 
        error_type_list=["real-co"]
        circ_seed_list=list(range(100))
        for qn in qn_list:
            for error_type in error_type_list:
                for error_seed in error_seed_list:  
                    for ri in ri_list:
                        collect_data_global(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,nshot_list,filename,ri=ri,circ_type=circ_type)
    
    
    elif task_id==3:
        #finite varying
        filename=datapath+f"/rand_RL_2.csv"
        error_type="co"
        ri=4
        circ_seed_list=list(range(100))
        error_seed=0
        # nshot_list=[int(1e5)]
        nshot_list=[int(1e5)]
        for qn in [6,8,2,4,10,12,14]:
            collect_data_global(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,nshot_list,filename,ri=ri,circ_type=circ_type)
    elif task_id==4:
        filename=datapath+f"/rand_RL_6.csv"
        #nshots
        error_type="real-co"
        ri=16
        error_seed=0
        qn=6
        nshot_list=[int(a) for a in [1e2,1e3,1e4,1e5,1e6]]
        collect_data_global(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,nshot_list,filename,ri=ri,circ_type=circ_type)
        #ri
        nshot_list=[0]
        ri_list=[4,8,12,16,20,40,60,80,100]
        for ri in ri_list:
            collect_data_global(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,nshot_list,filename,ri=ri,circ_type=circ_type)
        
        error_type="co"
        ri=16
        nshot_list=[int(1e5)]
        # ind_mean
        ind_mean_list=[1e-4,1e-3,5e-3,8e-3,0.01,0.02,0.05]
        for ind_mean in ind_mean_list:
            collect_data_global(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,nshot_list,filename,ri=ri,circ_type=circ_type)
        # corr_mean
        ind_mean=0.015 
        corr_mean_list=[1e-4,1e-3,0.003,0.005,0.008,0.01,0.02]
        for corr_mean in corr_mean_list:
            collect_data_global(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,nshot_list,filename,ri=ri,circ_type=circ_type)
        corr_mean=0.008
        error_type="real-co"
        #co_rate
        co_rate_list=[0.001,0.005,0.008,0.01,0.02,0.03,0.05]
        for co_rate in co_rate_list:
            collect_data_global(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,nshot_list,filename,ri=ri,circ_type=circ_type,co_error=co_rate)

    elif task_id==5:
        filename=datapath+f"/rand_RL_5.csv"
        qn=6
        error_type="real-co" 
        error_seed_list=[0]
        circ_seed_list=list(range(100))
        nshots=int(1e5)
        ri_list=[16] 
        obs_type_list=["two-local","interval"]
        for error_seed in error_seed_list:
            for ri in ri_list:
                for obs_type in obs_type_list:
                    collect_data_loc(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,obs_type,circ_seed_list,nshots,filename,ri)
    elif task_id==6:
        filename=datapath+f"/rand_RL_4.csv"
        qn=6
        error_type="real-co" 
        error_seed_list=[0]
        circ_seed_list=list(range(100))
        nshots=0
        ri_list=[2,4,16,32,64] 
        obs_list=["ZIIIII","ZZIIII","ZZZIII"]
        for error_seed in error_seed_list:
            for ri in ri_list:
                for obs in obs_list:
                    collect_data_MF(qn,ind_mean,ind_std,corr_mean,corr_std,ger1,ger2,ger2_std,error_type,error_seed,circ_seed_list,[nshots],obs,filename,ri=ri)
#In[]