#!/usr/bin/env python
from astropy.io import fits
from scipy.stats import sigmaclip
from multiprocessing import Pool, cpu_count
from glob import glob
from astropy.table import Table
from testlib.overscan import crop_overscan,corr_overscan
from testlib.imgconfig import load_imgconfig
from testlib.utils import create_folder,read_monitor
from scipy.stats import sigmaclip,trim_mean
from astropy.table import Table
from scipy.optimize import curve_fit
from astropy.time import Time
import numpy as np
import os
import matplotlib.pyplot as plt
from argparse import ArgumentParser

def calculate_median(filepath):
    return (filepath,np.median(fits.getdata(filepath).astype(float)))
def parallel_fits_median(d,key_word,num_processes=30):
    files = glob(d+"/"+key_word)
    if num_processes is None:
        num_processes = cpu_count()
    print(f"Processing {len(files)} FITS files using {num_processes} processes...")
    with Pool(processes=num_processes) as pool:
        results = pool.map(calculate_median, files)
    filenames = np.array([r[0] for r in results])
    mean_array = np.array([r[1] for r in results])
    return filenames,mean_array
def get_ch_stats(chi_data):
    mean,sub,dcorr = chi_data[0],chi_data[1],chi_data[2]
    ny,nx = mean.shape[0],mean.shape[1]
    ry,rx = ny//200,nx//200
    mean_data = np.zeros((ry,rx))
    cov_data = np.zeros((dcorr,dcorr,ry,rx))
    for yi in range(ry):
        for xi in range(rx):
            meanr = mean[yi*200:(yi+1)*200,xi*200:(xi+1)*200]
            subr = sub[yi*200:(yi+1)*200,xi*200:(xi+1)*200]
            mean_data[yi,xi] = trim_mean(meanr.flatten(),0.1)
            _,l,h = sigmaclip(subr.flatten(),low=4,high=4)
            mask0 = (subr>l)*(subr<h)
            mask = np.zeros(mask0.shape)
            mask[mask0] = 1
            for i in range(dcorr):
                for j in range(dcorr):
                    data1,mask1 = subr[i:,j:],mask[i:,j:]
                    if i==0 and j==0:
                        data2,mask2 = subr,mask
                    elif i==0:
                        data2,mask2 = subr[:,:-j],mask[:,:-j]
                    elif j==0:
                        data2,mask2 = subr[:-i,:],mask[:-i,:]
                    else:
                        data2,mask2 = subr[:-i,:-j],mask[:-i,:-j]
                    #print(mask1.shape,mask2.shape)
                    mask12 = mask1*mask2
                    l = np.sum(mask12)
                    #print("l=",l)
                    mean1 = np.sum(data1*mask12)/l
                    mean2 = np.sum(data2*mask12)/l
                    cov = np.sum((data1-mean1)*(data2-mean2)*mask12)/l/2
                    #if i==0 and j==0:
                    #    print("cov",cov)
                    cov_data[i,j,yi,xi] = cov
    #print("mean",np.median(mean_data),"cov",np.median(cov_data,axis=(2,3)))
    return (np.median(mean_data),np.median(cov_data,axis=(2,3)))
                        
def get_fpair_data(f1,f2,conf,dcorr):
    data1 = fits.getdata(f1).astype(float)
    data2 = fits.getdata(f2).astype(float)
    if conf.overscan == 1:
        data1 = corr_overscan(data1,conf)
        data1 = crop_overscan(data1,conf)
        data2 = corr_overscan(data2,conf)
        data2 = crop_overscan(data2,conf)
    mean = (data1+data2)/2
    sub = data1-data2
    ch_data = []
    for i in range(conf.nchan):
        x1,x2,y1,y2 = conf.mos_x0[i],conf.mos_x0[i]+conf.nx[i],conf.mos_y0[i],conf.mos_y0[i]+conf.ny[i]
        ch_data.append((mean[y1:y2,x1:x2],sub[y1:y2,x1:x2],dcorr))
    with Pool(processes=conf.nchan) as pool:
        results = pool.map(get_ch_stats,ch_data)
    return results
    
def linear_func(x,a):
    return a*x

def cov(a,b):
    cov_result = np.zeros((a.shape[0]+a.shape[0]//2,a.shape[1]+a.shape[1]//2))
    binv = b[::-1,::-1]
    for i in range(a.shape[0]):
        for j in range(a.shape[1]):
            cov_result[i,j] = np.sum(a[:i+1,:j+1]*binv[-i-1:,-j-1:])
    cov_result = cov_result[a.shape[0]//2:,a.shape[1]//2:]
    return cov_result[:a.shape[0]//2+1,:a.shape[1]//2+1][::-1,::-1]

def get_bnum_gnum(din):
    print("getting bnum_gnum")
    bias_num = len(glob(din+"/ptc-0-*.fits"))
    fs = glob(din+"/*.fits")
    print("all fs",fs)
    imidx,gnums = [],[]
    for f in fs:
        fname = f.split('/')[-1]  
        imidx.append(int(fname.split("-")[1]))
    print("imidx",imidx)
    imnum = np.max(imidx)
    for i in range(imnum):
        gnums.append(len(glob(din+"/ptc-"+str(i+1)+"-*.fits")))    
    return bias_num,gnums

def run_gain_corr(conf,din,dout,dcorr,key_word,fmin_ratio,fmax_ratio,bias_num,gnums,fs):
    if not os.path.exists(dout+"/cov.fits"):
        bias_mean,bias_cov = np.zeros((bias_num//2,conf.nchan)),np.zeros((bias_num//2,conf.nchan,dcorr,dcorr))
        for i in range(bias_num//2):
            print(f"processing bias pair {i}/{bias_num//2}")
            bf1,bf2 = fs[2*i],fs[2*i+1]
            print("f1",bf1,"f2",bf2)
            results = get_fpair_data(bf1,bf2,conf,dcorr)
            for j in range(conf.nchan):
                bias_mean[i,j] = results[j][0]
                bias_cov[i,j] = results[j][1]
        bias_mean,bias_cov = np.median(bias_mean,axis=0),np.median(bias_cov,axis=0)
        flat_means,flat_covs = np.zeros((len(gnums),conf.nchan)),np.zeros((len(gnums),conf.nchan,dcorr,dcorr))
        num = bias_num
        for k,gnum in enumerate(gnums):
            print("gnum",gnum)
            print(f"processing flat level {k}/{len(gnums)}")
            flat_mean,flat_cov = np.zeros((gnum//2,conf.nchan)),np.zeros((gnum//2,conf.nchan,dcorr,dcorr))
            for i in range(gnum//2):
                print(f"processing flat pair {i}/{gnum//2}")
                if k == 0:
                    f1,f2 = fs[num+2*i],fs[num+2*i+1]
                else:
                    f1,f2 = fs[num+2*i],fs[num+2*i+1]
                print("f1",f1,"f2",f2)
                results = get_fpair_data(f1,f2,conf,dcorr)
                for j in range(conf.nchan):
                    flat_mean[i,j] = results[j][0]
                    flat_cov[i,j] = results[j][1]
            num += gnum
            flat_mean,flat_cov = np.median(flat_mean,axis=0)-bias_mean,np.median(flat_cov,axis=0)-bias_cov
            flat_means[k],flat_covs[k] = flat_mean,flat_cov
        fits.writeto(dout+"/mean.fits",data=flat_means,overwrite=True)
        fits.writeto(dout+"/cov.fits",data=flat_covs,overwrite=True)   
    else:
        flat_means = fits.getdata(dout+"/mean.fits")
        flat_covs = fits.getdata(dout+"/cov.fits")
    var_chs = []
    gains = []
    corrs = np.zeros((conf.nchan,dcorr,dcorr))
    if not os.path.exists(dout+"/plot"):
        os.system("mkdir "+dout+"/plot")
    gain_tab = Table()
    gain_tab['channel'] = np.arange(conf.nchan)+1
    tab_datas = []
    for i in range(conf.nchan):
        var = 0
        for j in range(dcorr):
            for k in range(dcorr):
                if j==0 and k==0:
                    var += flat_covs[:,i,j,k]
                elif j==0:
                    var += 2*flat_covs[:,i,j,k]
                elif k==0:
                    var += 2*flat_covs[:,i,j,k]
                else:
                    var += 4*flat_covs[:,i,j,k]
        var_chs.append(var)
        mean = flat_means[:,i]
        var = var
        #print("mean",mean,"var",var)
        fmax = mean[np.argmax(var)]
        #print("fmax",fmax)
        idx = (mean>fmin_ratio*fmax)*(mean<fmax_ratio*fmax)
        mean_data,var_data = mean[idx],var[idx]
        params,_ = curve_fit(linear_func,mean_data,var_data,p0=[1/1.5])
        Var = flat_covs[:,i,0,0]
        
        tab_data = Table()
        tab_data['mean'] = mean
        tab_data['var'] = var
        tab_data['fit_flag'] = idx
        tab_data.write(dout+"/ptc_result_ch%i.tab"%(i+1),format='ipac',overwrite=True)
        tab_datas.append(tab_data)

        slope = params[0]     
        gain = 1/slope
        gains.append(gain)
        plt.switch_backend('agg')
        plt.plot(mean,Var,marker='^',label='variance',color='blue')
        plt.plot(mean,var,marker='o',label='total variance_all',color='green')
        plt.plot(mean_data,var_data,marker='o',label='total variance_fit',color='red')
        plt.plot(mean,slope*mean,linestyle='--',label='gain=%.3fe/ADU'%(gain),color='black')
        plt.legend(frameon=False)
        plt.savefig(dout+"/plot/ch%i_var.jpg"%(i+1))
        plt.clf()
        plt.close()
        corri = np.zeros((flat_covs.shape[0],dcorr,dcorr))
        for k in range(flat_covs.shape[0]):
            corr = flat_covs[k,i,:,:]/flat_means[k,i]**2
            corri[k] = corr
        corrs[i] = np.median(corri,axis=0)
    gain_tab['gains'] = gains        
    gain_map = np.zeros((conf.mos_shape))
    for i in range(conf.nchan):
        gain_map[conf.row[i]*conf.ny[i]:(conf.row[i]+1)*conf.ny[i],conf.col[i]*conf.nx[i]:(conf.col[i]+1)*conf.nx[i]] = gains[i]
    fits.writeto(dout+"/gain.fits",data=gain_map,overwrite=True)
    gain_tab.write(dout+"/gain.tab",format='ipac',overwrite=True)
    fits.writeto(dout+"/corr_coeff.fits",data=corrs,overwrite=True)
    return tab_datas,gains

def get_current(conf,fs,din,dout,bias_num,gnums,tabs,gains,threshold=5e-9,nl_threshold1=0.1,nl_threshold2 = 0.5, tshutter=0, current_flag=False):
    ftxt = din+"/monitor_sphere.txt"
    print("getting catalog")
    fs_flat = fs[bias_num:]
    print("plotting")
    if current_flag:
        cat = read_monitor(ftxt, smooth=0, montype='sphere') 
        plt.scatter(cat['mjd'],cat['value'],s=2)
        flat_mjds = []
        for f in fs_flat:
            flat_mjd = Time(fits.open(f)[0].header['DATE']).mjd
            flat_mjds.append(flat_mjd)
            plt.axvline(x=flat_mjd,linestyle='--')
        plt.savefig(dout+"/monitor.jpg",bbox_inches='tight')
        plt.close()

    print("getting rsp data")
    if current_flag:
        threshold = np.percentile(cat['value'],0.9)
        bias = np.mean(cat['value'][cat['mjd']<flat_mjds[0]])
        if np.isnan(bias):
            bias_data = cat['value']<threshold
            bias_data,_,_ = sigmaclip(bias_data,low=4,high=4)
            bias = np.mean(bias_data)
        print("bias",bias)
    count = 0
    texps,currents = [],[]
    for gnum in gnums:
        texp = fits.open(fs_flat[count])[0].header['EXPTIME']
        texps.append(texp)
        if not current_flag:
            currents.append(1)
            count += gnum
            continue
        current = 0
        cnum = 0
        for i in range(gnum):
            if count != len(fs_flat)-1:
                idx = (cat['mjd']>flat_mjds[count])*(cat['mjd']<flat_mjds[count+1])*(cat['value']>threshold)
                idx *= (cat['value']>0.8*np.mean(cat['value'][idx]))
            else:
                idx = (cat['mjd']>flat_mjds[count])*(cat['value']>threshold)
                idx *= (cat['value']>0.8*np.mean(cat['value'][idx]))
            print("datanum",np.sum(idx))
            if np.sum(idx) != 0:
                current += np.mean(cat['value'][idx]) - bias
                cnum += 1
            count += 1
        if cnum == 0:
            current = np.nan
        else:
            current /= cnum
        currents.append(current)
    print("currents",currents)
    texps = np.array(texps)
    print("texps",len(texps))
    print(len(tabs[0]['mean']))
    currents = np.array(currents)
    Texps = (texps-tshutter)*currents/np.nanmean(currents)   
    invalid_idx = (currents<0.8*np.median(currents))
    Texps[invalid_idx] = texps[invalid_idx]-tshutter
    invalid_idx = (currents>1.2*np.median(currents))
    Texps[invalid_idx] = texps[invalid_idx]-tshutter
    fullwells = []
    print("writting tables")
    for i in range(conf.nchan):
        tab = tabs[i]
        tab['texp'] = texps
        tab['texp'].unit = 's'
        tab['currents'] = currents
        tab['currents'].unitt = 'A'
        tab['texp_modi'] = Texps
        tab['texp_modi'].unit = 's'
        print(dout+"/ptc_result_ch%i.tab"%(i+1))
        print("table",tab)
        dataidx = ~np.isnan(Texps)
        Texps_data = Texps[dataidx]
        mean_data = tab['mean'][dataidx]
        params,_ = curve_fit(linear_func,Texps_data,mean_data,p0=[np.nanmedian(mean_data/Texps_data)])
        slope = params[0]
        nl = (tab['mean']-slope*Texps)/(slope*Texps)*100
        nl_median = np.nanmedian(nl)
        #print("nl_median",nl_median)
        #print("nl-nl_median",nl-nl_median)
        if len(Texps)>20:
            nl_threshold = nl_threshold1
        else:
            nl_threshold = nl_threshold2
        idx = abs(nl-nl_median)<nl_threshold
        Texps_data = Texps[idx]
        tab['rsp_fit'] = idx
        mean_data = tab['mean'][idx]
        params,_ = curve_fit(linear_func,Texps_data,mean_data,p0=[np.nanmedian(mean_data/Texps_data)])
        slope = params[0]
        fig,axes = plt.subplots(2,1,figsize=(5,10))
        axes[0].scatter(Texps,tab['mean'],s=5,label='data',c='r')
        axes[0].scatter(Texps_data,mean_data,label='data_fit',c='g')
        axes[0].plot(Texps,slope*Texps,linestyle='--',c='k')
        axes[1].plot(mean_data,(mean_data-slope*Texps_data)/(slope*Texps_data)*100,marker='o')
        tab['linear_data'] = slope*Texps
        tab['nl_rsp'] = (tab['mean']-slope*Texps)/(slope*Texps)*100
        tab['nl_rsp'].unit = '%'
        tab.write(dout+"/ptc_result_ch%i.tab"%(i+1),format='ipac',overwrite=True)
        idx = len(tab)-1
        for j in range(len(tab)-1):
            if abs(tab['nl_rsp'][j])>2:
                idx -= 1
            else:
                fullwell_ADU = tab['mean'][idx]
                fullwells.append(fullwell_ADU*gains[i])
                break
        print(dout+"/plot/rsp_ch%i.jpg"%(i+1))
        plt.savefig(dout+"/plot/rsp_ch%i.jpg"%(i+1),bbox_inches='tight')
        plt.close()
    fullwells = np.array(fullwells)
    nl_tab = Table()
    nl_tab['ch'] = np.arange(conf.nchan)+1
    nl_tab['fullwell'] = fullwells
    nl_tab['fullwell'].unit = 'e'
    nl_tab.write(dout+"/nl_tab.tab",format='ipac',overwrite=True)

def grab_qy_data(dpro,waves,conf):
    qy_result = np.zeros((len(waves),conf.nchan))
    qy_tab = Table()
    qy_tab['wave'] = waves
    if 850.0 in waves:
        wave_ref = 850
    else:
        wave_ref = 650
    dout_ref = glob(dpro+"/*_"+str(wave_ref)+"*")[0]
    fgain_ref = dout_ref+"/gain.tab"
    gain_tab_ref = Table.read(fgain_ref,format='ipac')
    gain_ref = gain_tab_ref['gains']
    print("gain_ref",gain_ref)

    for i,wave in enumerate(waves):
        print("wave",wave)
        dout = glob(dpro+"/*_"+str(wave))[0]
        fgain = dout+"/gain.tab"
        gain_tab = Table.read(fgain,format='ipac')
        print("gains",gain_tab['gains'])
        qy = gain_ref/gain_tab['gains']
        qy_result[i,:] = qy 
    for j in range(conf.nchan):
        qy_tab['ch'+str(j+1)] = qy_result[:,j]
        plt.plot(waves,qy_result[:,j],marker='o',label='ch'+str(j+1))
    plt.legend(frameon=False)
    plt.savefig(dpro+"/qy.jpg",bbox_inches='tight')
    qy_tab.write(dpro+"/qy.tab",format='ipac',overwrite=True)

def run(ddata,dpro,dcorr,chiptype,qy_all_wave,fmin_ratio=0.02,fmax_ratio=0.8,bias_num=None,gnums=None,key_word="*.fits",height=None,width=None,rsp_process=False,current_flag=False,tshutter=0.03,dir_sort=False):
    print("ddata",ddata)
    print("dirsortin",dir_sort)
    ds = glob(ddata+"/*")
    print("ds",ds)
    for d in ds:
        if os.path.isfile(d):
            ds.remove(d)
    if len(ds) == 0:
        raise Exception("not directory found in"+ddata)
    # conf = load_imgconfig(chiptype,height,width)
    conf = load_imgconfig(chiptype,height)
    print("ds",ds)
    print("dir_sort",dir_sort)
    waves = []
    for din in ds:
        print("din",din)
        f = glob(din+"/*.fits")[0]
        header = fits.open(f)[0].header
        if not dir_sort:
            try:
                tag,wave = header['DATETAG'],header['WAVELEN']
                waves.append(wave)
                dout = dpro+"/ptc_"+tag+"_"+str(wave)
                lab_suite = True
            except:
                dout = dpro+"/"+din.split('/')[-1]
                lab_suite = False
        else:
            dout = dpro+"/"+din.split('/')[-1]
            print("din",din)
            print("dout",dout)
            lab_suite = False
        create_folder(dout+"/")
        if bias_num is None or gnums is None:
            bn,gns = get_bnum_gnum(din)
        else:
            bn = bias_num
            gns = gnums
        if not os.path.exists(dout+"/file_median.tab"):
            fs,medians = parallel_fits_median(din,key_word)
            if dir_sort:
                ggs = []
                for f in fs:
                    print("group",f.split('/')[-1],int(f.split('/')[-1][5:7]))
                    ggs.append(int(f.split('/')[-1][5:7]))
                ggs = np.array(ggs)
                tab_sort = Table()
                tab_sort['group'] = ggs
                tab_sort['fs'] = fs
                tab_sort['median'] = medians
                tab_sort.sort(['median','group'])
                fs = tab_sort['fs'].data
                medians = tab_sort['median']
            else:
                fs = fs[np.argsort(medians)]
            medians = np.sort(medians)
            tab = Table()
            tab['file'] = fs
            tab['median'] = medians
            tab.write(dout+"/file_median.tab",format='ipac')
        else:
            tab = Table.read(dout+"/file_median.tab",format='ipac')
            fs,medians = tab['file'],tab['median']
        print("biasnum",bn,"gnums",gns)
        ptc_tabs,gains = run_gain_corr(conf,din,dout,dcorr,key_word,fmin_ratio,fmax_ratio,bn,gns,fs)
        print("din",din)
        get_current(conf,fs,din,dout,bn,gns,ptc_tabs,gains,current_flag=current_flag,tshutter=tshutter)  
    if not qy_all_wave and lab_suite:
        waves = np.array(waves)
    elif lab_suite:
        waves = []
        ds = glob(dpro+"/ptc*")
        print(dpro+"/ptc*")
        for d in ds:
            try:
                waves.append(float(d.split('_')[-1]))
            except:
                continue
        print("waves",waves)
        waves = np.array(waves)
    if lab_suite:
        waves = np.sort(waves)
        grab_qy_data(dpro,waves,conf)


if __name__ == "__main__":
    #chiptype = "e2v"
    #dcorr = 3
    #key_word = "*.fits"
    #bias_num = 11
    #gnums = [2] * 9
    #bias_num = None
    #gnums = None
    #fmin_ratio,fmax_ratio = 0.02,0.8
    #chiptype = 'e2v290' 
    #ddata = "/data/luozun/E2V_formal/F82-20053-24-01/F82-20053-24-01_final_all/ptc_20241205_kh"
    #dpro = "/data/luozun/E2V_formal/F82-20053-24-01/F82-20053-24-01_final_all/processed"
    #qy_all_wave = True

    parser = ArgumentParser()
    parser.add_argument('datadir', type=str)
    parser.add_argument('outdir', type=str)
    parser.add_argument('dcorr', type=int)
    parser.add_argument('--chiptype', '-c', type=str)
    parser.add_argument('--qy_all_wave', '-qa', type=bool,  default=True)
    parser.add_argument('--fmin_ratio', '-fmin', type=float, default=0.02)
    parser.add_argument('--fmax_ratio', '-fmax', type=float, default=0.8)
    parser.add_argument('--bias_num', '-bn', type=int, default=None)
    parser.add_argument('--group_nums', '-gn', type=str, default=None)
    parser.add_argument('--key_word', '-ky', type=str, default="*.fits")
    parser.add_argument('--height', '-he', type=int, default=None)
    parser.add_argument('--width', '-w', type=int, default=None)
    parser.add_argument('--current_flag', '-cf', type=bool, default=False)
    parser.add_argument('--tshutter', '-ts', type=float, default=0.03)
    parser.add_argument('--dir_sort', '-ds', type=bool, default=False)
    args = parser.parse_args()
    
    if args.group_nums is not None:
        gnum_strs = args.group_nums.split(',')
        gnums = []
        for gnum_str in gnum_strs:
            gnums.append(int(gnum_str))
    else:
        gnums = None
    print("args.dir_sort",args.dir_sort)
    run(args.datadir,args.outdir,args.dcorr,args.chiptype,args.qy_all_wave,fmin_ratio=args.fmin_ratio,fmax_ratio=args.fmax_ratio,bias_num=args.bias_num,gnums=gnums,key_word=args.key_word,width=args.width,height=args.height,current_flag=args.current_flag,tshutter=args.tshutter,dir_sort=args.dir_sort)
#    flates = np.arange(5000,60000,5000)
#    covs_simul = np.zeros((16,len(flates),8,8))
#    num_arr = 4*np.ones((8,8))
#    num_arr[:,0] = 2
#    num_arr[0,:] = 2
#    num_arr[0,0] = 0
#    for i in range(16):
#        print(gains[i])
#        flat_ADUs = flates/gains[i]
#        a0 = corrs[i].copy()
#        a0[6:,:] = 0
#        a0[:,6:] = 0
#        a0[5,5] = 0
#        a0[0,0] = -np.sum(a0*num_arr)
#        print(a0[0,0])
#        acal = np.zeros((15,15))
#        acal[0:8,0:8] = a0[::-1,::-1]
#        acal[7:15,0:8] = a0[:,::-1]
#        acal[0:8,7:15] = a0[::-1,:]
#        acal[7:15,7:15] = a0
#        #print(a)
#        print("a",acal)
#        a2 = cov(acal,acal)
#        a2cal = np.zeros((15,15))
#        a2cal[0:8,0:8] = a2[::-1,::-1]
#        a2cal[7:15,0:8] = a2[:,::-1]
#        a2cal[0:8,7:15] = a2[::-1,:]
#        a2cal[7:15,7:15] = a2
#        a3 = cov(a2cal,acal)
        #fig,axes = plt.subplots(1,3)
        #axes[0].imshow(a0,origin='lower')
        #axes[1].imshow(a2,origin='lower')
        #axes[2].imshow(a3,origin='lower')
        #plt.show()
        #print(a3)
        #print(a[7,7],a2[7,7],a3[7,7])
#        for j in range(len(flat_ADUs)):
#            flat_ADU = flat_ADUs[j]
#            covs_simul[i,j] = (a0*flat_ADU**2+2/3*a2*flat_ADU**3*gains[i]+a3*flat_ADU**4*gains[i]**2/3)
#            covs_simul[i,j,0,0] += flat_ADU/gains[i]
#            covs_simul[i,j] /= flat_ADU**2
        #plt.imshow(covs_simul[i,j],origin='lower')
        #plt.show()
        #plt.plot(flat_ADUs,covs_simul[i,:,1,1],marker='o')
        #plt.plot(flat_means[:,i],flat_covs[:,i,1,1],marker='^')
        #plt.show()
#    fits.writeto("/media/xyh/Elements/Z1_0808/normal_PTC/"+chiptype+"_corr_coeff_simul.fits",data=covs_simul,overwrite=True)
    #fits.writeto(dout+"/cov_simul.fits",data=covs_simul,overwrite=True)
