"""
PTC（Photon Transfer Curve）底层实现
=====================================

提供 PTC 数据构建、拟合与绘图的核心函数：
  - get_ptcdata: 由成对曝光图像构造每通道/子区域的均值-方差数据表（可选 binning、平场修正、监视探头归一）
  - fit_ptc:     基于 ODR 线性拟合估计增益，计算满阱与非线性指标；支持裁剪（clipflag）与是否固定截距（fixflag）
  - plot_ptc:    将拟合结果与数据可视化，输出四宫格图（DN-Var、PTC非线性、时间-信号、响应非线性）

辅助函数：
  - _get_boundary: 将区域长度均匀分割为若干 box（返回每个子区域的起止索引）
  - _bindata:     对图像做 n×n 的空间 binning
  - smooth_line/_add_thresh_lines: 绘图辅助
"""
from scipy.signal import savgol_filter
from tqdm import trange
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.time import Time
from scipy import odr
from scipy.ndimage import median_filter
from scipy.stats import trim_mean
from scipy.stats import sigmaclip
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator


def _get_boundary(length, box):
    """将长度 length 尽量均匀地分割成多个宽度为 box 的子区间

    返回值：两个等长数组（x1, x2），分别为每个子区间的起止位置（左闭右开）
    分割策略会在两端留出尽量均匀的间隙，使子区间尽可能分布均匀。
    """
    n = length // box
    res = length - box * n
    gap = res // (n + 1)

    res2 = res - gap * (n + 1)
    x = [gap] * n
    for i in range(res2):
        x[i] = x[i] + 1
    for i in range(len(x) - 1):
        x[i+1] = x[i+1] + box

    x1 = np.cumsum(x)
    x2 = x1 + box
    return x1, x2


def _bindata(image, n):
    """对二维图像做 n×n 空间 binning 并取均值

    中心裁剪使得宽高能被 n 整除后再重排、平均，返回降采样后的图像。
    """
    ny, nx = image.shape

    nx2 = nx // n
    x_res = nx - nx2 * n
    x1 = x_res // 2
    x2 = x_res - x1

    ny2 = ny // n
    y_res = ny - ny2 * n
    y1 = y_res // 2
    y2 = y_res - y1

    data = image[y1:ny-y2, x1:nx-x2]
    data = data.reshape((ny2, n, nx2, n)).mean(axis=1).mean(axis=-1)
    return data


# def get_ptcdata(flist1, flist2, region=None, boxsize=100, binsize=4, channel=0, flat_flag=False, flat_file=None):
def get_ptcdata(flist1, flist2, region=None, boxsize=100, binsize=4, channel=0, flat_flag=False, flat_file=None, \
                monitor_mjd=None, monitor_flux=None, monitor_flux_mean=None):
    """由两两配对的曝光序列构造 PTC 统计数据表

    参数：
        flist1, flist2: list[str]
            成对曝光图像的路径列表（同一曝光组的两张图一一对应）
        region: [x1, x2, y1, y2] or None
            参与统计的 ROI；为 None 时使用全幅
        boxsize: int
            将 ROI 划分为 boxsize×boxsize 的子区域，用于统计稳健的均值/方差
        binsize: int
            进一步对每个子区域做 n×n binning，以提升方差估计稳定性
        channel: int
            当前通道编号（写入输出表）
        flat_flag / flat_file:
            是否使用平场做像素响应归一；flat_file 提供平场图路径
        monitor_*:
            监视探头时间/光强/均值（当前实现保留了接口，示范了归一思路；按需开启）

    返回：
        Table(IPAC)：列包括 channel, imgid, regid, (x, y 中心), t, dn_raw/var_raw, dn_bin/var_bin。
        额外会添加 imgid 为 0 的“合成行”，给出每个曝光时刻的（trimmed mean）整体统计。
    处理流程：
        - 读取 ROI，第一组作为 bias 参考，将后续两张与 bias 相减求均值（m1）与差分（m2）
        - 若 flat_flag，则对 m1/m2 做平场归一
        - 对每个子区域：
            flux = trimmed_mean(m1)
            varn = var(sigmaclip(m2, 4sigma)) / 2
            同时对 bin 后的 r1_bin/r2_bin 计算 flux_bin、varn_bin（并乘以 binsize^2）
        - 在每个 reg 内减去最早 imgid 的方差，等价于减去 bias 方差；同时归一化时间 t
    """
    if flat_flag == True:
        flat_field = fits.getdata(flat_file).astype('float')
        flat_field /= np.mean(flat_field)
    # split the whole region into several subregions
    ny, nx = fits.getdata(flist1[0]).shape
    if region is not None:
        x1, x2, y1, y2 = region
    else:
        x1, x2, y1, y2 = 0, nx, 0, ny
    reg_x1, reg_x2 = _get_boundary(x2-x1, boxsize)
    reg_y1, reg_y2 = _get_boundary(y2-y1, boxsize)
    reg_x1, reg_y1 = np.meshgrid(reg_x1, reg_y1)
    reg_x2, reg_y2 = np.meshgrid(reg_x2, reg_y2)
    reg_x1 = reg_x1.ravel()
    reg_x2 = reg_x2.ravel()
    reg_y1 = reg_y1.ravel()
    reg_y2 = reg_y2.ravel()
    nreg = len(reg_x1)

    # allocate space for output
    n = len(flist1)
    imgid = np.zeros((n, nreg), dtype=int)
    regid = np.zeros((n, nreg), dtype=int)
    xcent = np.zeros((n, nreg), dtype=np.float32)
    ycent = np.zeros((n, nreg), dtype=np.float32)
    t = np.zeros((n, nreg))
    flux = np.zeros((n, nreg))
    varn = np.zeros((n, nreg))
    flux_bin = np.zeros((n, nreg))
    varn_bin = np.zeros((n, nreg))

    # iterate by exposure time（按曝光组循环处理）
    for i in trange(n, desc='processing PTC images'):
        with fits.open(flist1[i]) as img1, fits.open(flist2[i]) as img2:
            date1 = img1[0].header['DATE']
            date2 = img2[0].header['DATE']
            mjd1 = Time(date1).mjd
            mjd2 = Time(date2).mjd

#            idx1 = np.logical_and( monitor_mjd>=mjd1, monitor_mjd<mjd1+20./86400 )
#            idx2 = np.logical_and( monitor_mjd>=mjd2, monitor_mjd<mjd2+20./86400 )

#            flux1 = np.median(monitor_flux[idx1])
#            flux2 = np.median(monitor_flux[idx2])

            if 'IMGINDEX' in img1[0].header:
                dd = img1[0].header['IMGINDEX']
            else:
                dd = i + 1
            # treat the first image as bias
            if i==n-1:
                if flat_flag == True:
                    fits.writeto("ptc_flat.fits",img1[0].data/flat_field,overwrite=True)
                    fits.writeto("ptc_flat_1.fits",img2[0].data/flat_field,overwrite=True)
            d1 = img1[0].data[y1:y2, x1:x2].astype(float)
            d2 = img2[0].data[y1:y2, x1:x2].astype(float)

            if i==0:
                m1 = (d1 + d2) / 2
                bias = m1.copy()
                m2 = d1 - d2
            else:
                m1 = (d1 + d2) / 2
                m1_a = d1 - bias
                m1_b = d2 - bias
                # 以第一组作为 bias，后续两张各自减去 bias

                # 若提供监视探头（sphere）数据，则按曝光时间附近的监视电流做归一
                # 取每帧时间点 ±20 秒的窗口计算监视电流中位数，并与稳定段均值比值得到尺度
                if (monitor_mjd is not None) and (monitor_flux is not None) and (monitor_flux_mean is not None):
                    try:
                        dt = 20.0 / 86400.0
                        idx1 = np.logical_and(monitor_mjd >= mjd1 - dt, monitor_mjd <= mjd1 + dt)
                        idx2 = np.logical_and(monitor_mjd >= mjd2 - dt, monitor_mjd <= mjd2 + dt)
                        flux1 = np.median(monitor_flux[idx1]) if np.any(idx1) else np.nan
                        flux2 = np.median(monitor_flux[idx2]) if np.any(idx2) else np.nan
                        if np.isfinite(flux1) and flux1 > 0 and np.isfinite(flux2) and flux2 > 0:
                            scale1 = monitor_flux_mean / flux1
                            scale2 = monitor_flux_mean / flux2
                            # 仅在监视值接近稳定段（>80%）时进行尺度校正，避免过度外推
                            if (flux1 > 0.8 * monitor_flux_mean) and (flux2 > 0.8 * monitor_flux_mean):
                                m1_a = m1_a * scale1
                                m1_b = m1_b * scale2
                    except Exception:
                        # 若监视数据异常，忽略归一，按未归一处理
                        pass

                # 归一后再取平均
                m1 = (m1_a + m1_b)/2

                # 对flux修正
                # if monitor_scale is not None and i > 3:
                # if i > 5:
                #     scale = ( monitor_scale(mjd1)+monitor_scale(mjd2) )/2
                #     print('mjd1 = {}. mjd2 = {}, scale = {}'.format(mjd1, mjd2, scale))
                #     m1 = m1*scale

#                scale1 = monitor_flux_mean/flux1
#                scale2 = monitor_flux_mean/flux2
#                if flux1 > 0.8*monitor_flux_mean and flux2 > 0.8*monitor_flux_mean:
#                    m1_a = m1_a * scale1
#                    m1_b = m1_b * scale2
#                    m1 = (m1_a + m1_b)/2
#                    print('scale1 = {}, scale2 = {}'.format(scale1, scale2))

                if flat_flag == True:
                    m1 /= flat_field[y1:y2, x1:x2]
                    m2 = (d1 - d2)/flat_field[y1:y2, x1:x2]
                else:
                    m2 = d1 - d2
#            d1 = img1[0].data[y1:y2, x1:x2].astype(np.float)/flat_field[y1:y2, x1:x2]
#            d2 = img2[0].data[y1:y2, x1:x2].astype(np.float)/flat_field[y1:y2, x1:x2]
#            m1 = (d1 + d2) / 2
#            m2 = d1 - d2
#            # treat the first image as bias
#            if i == 0:
#                bias = m1.copy()
#            else:
#                m1 = m1 - bias

            # iterate by subregions（遍历每个子区域，统计均值与方差）
            for j in range(nreg):
                imgid[i, j] = dd
                regid[i, j] = j + 1
                xcent[i, j] = x1 + (reg_x1[j] + reg_x2[j]) / 2
                ycent[i, j] = y1 + (reg_y1[j] + reg_y2[j]) / 2
                t[i, j] = img1[0].header['EXPTIME']
                r1 = m1[reg_y1[j]:reg_y2[j], reg_x1[j]:reg_x2[j]]
                r2 = m2[reg_y1[j]:reg_y2[j], reg_x1[j]:reg_x2[j]]
                # 10% trimmed mean 抑制极端值；差分图经 4σ 裁剪后估方差，再除以 2（两帧差分的方差之和）
                flux[i, j] = trim_mean(r1, 0.1, axis=None)
                varn[i, j] = sigmaclip(r2, 4, 4)[0].var(ddof=1) / 2
                r1_bin = _bindata(r1, binsize)
                r2_bin = _bindata(r2, binsize)
                # bin 后的方差需乘 binsize^2 做归一，回到像素尺度
                flux_bin[i, j] = trim_mean(r1_bin, 0.1, axis=None)
                varn_bin[i, j] = sigmaclip(r2_bin, 4, 4)[0].var(ddof=1) / 2 * binsize ** 2

    # output table
    tab = Table()
    tab['channel'] = [channel] * (n * nreg)
    tab['imgid'] = imgid.ravel()
    tab['regid'] = regid.ravel()
    tab['x'] = xcent.ravel()
    tab['y'] = ycent.ravel()
    tab['t'] = t.ravel().astype(np.float32)
    tab['dn_raw'] = flux.ravel().astype(np.float32)
    tab['var_raw'] = varn.ravel().astype(np.float32)
    tab['dn_bin'] = flux_bin.ravel().astype(np.float32)
    tab['var_bin'] = varn_bin.ravel().astype(np.float32)

    # subtract bias variance（在每个子区域内减去最早 imgid 的方差；时间也以该点为零）
    regid = np.unique(tab['regid'])
    for i in regid:
        index = tab['regid'] == i
        ind = np.argmin(tab['imgid'][index])
        tab['var_raw'][index] = tab['var_raw'][index] - tab['var_raw'][index][ind]
        tab['var_bin'][index] = tab['var_bin'][index] - tab['var_bin'][index][ind]
        tab['t'][index] = tab['t'][index] - tab['t'][index][ind]

    # add trimmed mean value for each exposure time (imgid)
    # 为每个曝光时刻追加合成行（regid=0），作为绘图/拟合的聚合轨迹
    imgid = np.unique(tab['imgid'])
    for i in imgid:
        index = tab['imgid'] == i
        fall_raw = trim_mean(tab['dn_raw'][index], 0.1)
        vall_raw = trim_mean(tab['var_raw'][index], 0.1)
        fall_bin = trim_mean(tab['dn_bin'][index], 0.1)
        vall_bin = trim_mean(tab['var_bin'][index], 0.1)
        x = np.mean(tab['x'][index])
        y = np.mean(tab['y'][index])
        t = tab['t'][index][0]
        tab.add_row([channel, i, 0, x, y, t, fall_raw, vall_raw, fall_bin, vall_bin])

    tab.sort(['imgid', 'regid'])

    return tab


def fit_ptc(tab_in, tag_flux='dn', tag_var='var', tag_t='t', fullwell=7e4, clipflag=False, fixflag=True):
    """对 PTC 数据做线性拟合并计算满阱/非线性/响应等指标

    参数：
        tab_in: Table
            get_ptcdata 的输出（或其子集）
        tag_flux/tag_var/tag_t: str
            指定使用的列名（如 'dn_bin' 与 'var_bin'）
        fullwell: float
            名义满阱（电子数），用于定义 10%/20%-80% 的线性区间
        clipflag: bool
            是否进行迭代裁剪：根据拟合残差阈值反复更新 valid 点
        fixflag: bool
            True 时固定截距为 0（只拟合斜率），False 时同时拟合截距

    返回：
        (cat, tab):
            cat 为单通道结果表（gain、full、nl*、rsp 等）；tab 为带 valid/disp 标记的数据表
    核心步骤：
        1) 自动找到拐点 fmax（DN 或方差的峰值点之前）并构造用于显示的 disp 掩码与 valid 区间
        2) 使用 ODR 线性拟合：var ≈ (1/gain) * dn (+ intercept)
        3) 依据非线性阈值（±5%）外推/插值得到满阱估计 fw
        4) 计算 PTC 非线性（在 ptc10/ptc20 区间内）与响应非线性（按时间-信号关系）
    """
    # print("fixflag",fixflag)
    index = np.logical_and(tab_in['t'] > 0, tab_in['imgid'] > 1)
    tab = tab_in[index]
    tab.sort('imgid')
    n = len(tab)
    if n < 4:
        raise Exception('No valid data')
    mod = odr.polynomial(1)
    x, y, t = tab[tag_flux].data, tab[tag_var].data, tab[tag_t].data

    # decide where the turning point is（决定拐点并构造可视/可拟合掩码）
    if ('valid' in tab.colnames) and ('disp' in tab.colnames):
        fmax = max(tab[tag_flux][tab['disp']])
    else:
        if n > 10:
            dn = median_filter(x, 3)
            peak_dn = np.argmax(dn)
            peak_var = np.argmax(y)
            peak = min(peak_dn, peak_var)
            tab['disp'] = np.zeros(n, dtype=bool)
            tab['disp'][:peak+3] = True
            #tab['disp'] = True
            fmax = x[peak]
            tab['valid'] = (x <= fmax * 0.8)*(x >= fmax * 0.02)
        else:
            peak_dn = np.argmax(x)
            peak_var = np.argmax(y)
            peak = min(peak_dn, peak_var)
            tab['disp'] = np.ones(n, dtype=bool)
            fmax = x[peak]
            tab['valid'] = (x <= fmax)

    # fit gain（ODR 拟合 var 对 dn 的线性关系，权重与 dn/var 成反比）
    if clipflag == False:
        # print("valid",tab['valid'])
        xfit, yfit = x[tab['valid']],y[tab['valid']]
        data = odr.Data(xfit, yfit, wd=1 / np.power(xfit, 2), we=1 / np.power(yfit, 2))
        if fixflag == True:
            fitter = odr.ODR(data, mod, ifixb=[0, 1], beta0=[0, np.median(yfit / xfit)])
        else:
            fitter = odr.ODR(data, mod, ifixb=[1, 1], beta0=[0, np.median(yfit / xfit)])
        res = fitter.run()
        if res.beta[1] == 0:
            raise Exception('invalid fitting: slope is 0')
        gain = 1 / res.beta[1]
        if gain == 0:
            raise Exception('invalid fitting: gain is 0')
        gain_err = res.sd_beta[1]
        tab['gain_err'] = gain_err
        if fixflag == False:
            intercep = res.beta[0]
            intercep_err = res.sd_beta[0]
            tab['intercep'] = intercep
            tab['intercep_err'] = intercep_err
    else:
        valid = np.array([True]*len(tab['valid']))
        cliptime = 0
        while(True):
            xfit, yfit = x[valid], y[valid]
            data = odr.Data(xfit, yfit, wd=1 / np.power(xfit, 2), we=1 / np.power(yfit, 2))
            if fixflag == True:
                fitter = odr.ODR(data, mod, ifixb=[0, 1], beta0=[0, np.median(yfit / xfit)])
            else:
                fitter = odr.ODR(data, mod, ifixb=[1, 1], beta0=[0, np.median(yfit / xfit)])
            res = fitter.run()
            if res.beta[1] == 0:
                raise Exception('invalid fitting: slope is 0')
            gain = 1 / res.beta[1]
            if gain == 0:
                raise Exception('invalid fitting: gain is 0')
            if fixflag == True:
                valid_new = abs((yfit-xfit/gain)/(xfit/gain))<0.05
            else:
                valid_new = abs((yfit-xfit/gain-res.beta[0])/(xfit/gain))<0.05
            if np.all(valid_new == valid[valid]):
                break
            valid[valid] = valid_new.copy()
            cliptime += 1
            if cliptime == 50:
                print("warning: clipping is not sucessful")
                break
        tab['valid'] = valid
        gain_err = res.sd_beta[1]
        tab['gain_err'] = gain_err
        if fixflag == False:
            intercep = res.beta[0]
            intercep_err = res.sd_beta[0]
            tab['intercep'] = intercep
            tab['intercep_err'] = intercep_err
            #print("intercep",intercep,tab['intercep'])
    # pre-defined regions（定义 10% 与 20%~80% 的线性区间，用于非线性统计）
    if ('ptc20' not in tab.colnames) or ('ptc10' not in tab.colnames):
        fw_use = fullwell / gain
        # print("fw_use",fw_use,fullwell)
        tab['ptc10'] = (x < fw_use) * (x > fw_use * 0.1)
        tab['ptc20'] = (x < fw_use * 0.8) * (x > fw_use * 0.2)

    fw = np.nan
    nonlin_ptc_10 = np.nan
    nonlin_ptc_20 = np.nan
    t_factor = np.nan
    nonlin_rsp_10 = np.nan
    nonlin_rsp_20 = np.nan
    # print("tab[valid]",tab['valid'].data)
    if tab['valid'].sum() > 3:
        # full well（以 ±5% 非线性阈值在高信号段外推/插值估计满阱）
        if fixflag == True:
            nonlin = (y / (x / gain) - 1) * 100
        else:
            nonlin = ((y - intercep) / (x / gain) - 1) * 100
        if clipflag == False:
            index = (x > fmax * 0.3) * (x <= fmax) * (np.abs(nonlin) > 5)
        else:
            index = (x > fmax * 0.6) * (x <= fmax) * (np.abs(nonlin) > 5) * tab['valid'].data
        if index.sum() == 0:
            fw = fmax
        else:
            p2 = min(np.arange(len(x), dtype=int)[index])
            p1 = p2 - 1
            if nonlin[p2] > 5:
                fw = np.interp(5, [nonlin[p1], nonlin[p2]], [x[p1], x[p2]])
            else:
                fw = np.interp(-5, [nonlin[p1], nonlin[p2]], [x[p1], x[p2]])

        # PTC non-linearity（取线性区间内的非线性最大绝对值）
        if tab['ptc10'].sum() > 0:
            # print(tab['ptc10'].data)
            # print(tab['valid'].data)
            # print(tab['dn_bin'].data)
            nonlin_ptc_10 = np.max(abs(nonlin[np.where((tab['ptc10'].data)*(tab['valid'].data))]))
        if tab['ptc20'].sum() > 0:
            nonlin_ptc_20 = np.max(abs(nonlin[np.where((tab['ptc20'].data)*(tab['valid'].data))]))

        # fit time-signal curve（以时间-信号关系估计响应斜率并评估响应非线性）
        if np.isfinite(t).sum() == len(t):
            tfit, xfit = t[tab['ptc10']] - t[0], x[tab['ptc10']] - x[0]
            # print('xfit = {}, tfit = {}'.format(xfit, tfit))
            data = odr.Data(tfit, xfit, wd=1 / np.power(tfit, 2), we=1 / np.power(xfit, 2))
            fitter = odr.ODR(data, mod, ifixb=[0, 1], beta0=[0, np.median(xfit / tfit)])
            res = fitter.run()
            t_factor = res.beta[1]
            nonlin = np.zeros(len(x))
            nonlin[1:] = ((x[1:] - x[0]) / (t_factor * (t[1:] - t[0])) - 1) * 100
            # nonlin = (x / (t_factor * t) - 1) * 100
            #print("flags",tab['ptc10'].data,tab['valid'].data)
            nonlin_rsp_10 = np.max(abs(nonlin[np.where((tab['ptc10'].data)*(tab['valid'].data))]))
            nonlin_rsp_20 = np.max(abs(nonlin[np.where((tab['ptc20'].data)*(tab['valid'].data))]))
            #nonlin_rsp_10 = np.max(np.abs(nonlin[tab['ptc10']]))
            #nonlin_rsp_20 = np.max(np.abs(nonlin[tab['ptc20']]))

    cat = Table()
    cat['channel'] = [tab['channel'][0], ]
    cat['regid'] = tab['regid'][0]
    cat['gain'] = np.float32(gain)
    cat['gain'].unit = 'ph/adu'
    cat['gain_err'] = np.float32(gain_err)
    cat['gain_err'].unit = 'ph/adu'
    if fixflag == False:
        cat['intercep'] = np.float32(intercep)
        cat['intercep'].unit = 'adu^2'
        cat['intercep_err'] = np.float32(intercep_err)
        cat['intercep_err'].unit = 'adu^2'
    cat['full_dn'] = np.float32(fw)
    cat['full_dn'].unit = 'adu'
    cat['full'] = np.float32(fw * gain)
    cat['full'].unit = 'ph'
    cat['nlptc20'] = np.float32(nonlin_ptc_20)
    cat['nlptc20'].unit = '%'
    cat['nlptc10'] = np.float32(nonlin_ptc_10)
    cat['nlptc10'].unit = '%'
    cat['rsp'] = np.float32(t_factor)
    cat['rsp'].unit = 'adu/s'
    cat['nlrsp20'] = np.float32(nonlin_rsp_20)
    cat['nlrsp20'].unit = '%'
    cat['nlrsp10'] = np.float32(nonlin_rsp_10)
    cat['nlrsp10'].unit = '%'

    return cat, tab


def _add_thresh_lines(xr, thresh1, thresh2):
    """在当前坐标系添加非线性阈值辅助线（±thresh1，±thresh2）"""
    plt.plot(xr, [0, 0], color='grey', zorder=-1)
    plt.plot(xr, [thresh1, thresh1], color='grey', ls='--', zorder=-1)
    plt.plot(xr, [-thresh1, -thresh1], color='grey', ls='--', zorder=-1)
    plt.plot(xr, [thresh2, thresh2], color='grey', ls='dotted', zorder=-1)
    plt.plot(xr, [-thresh2, -thresh2], color='grey', ls='dotted', zorder=-1)


def smooth_line(x, y):
    """对曲线做 Savitzky-Golay 平滑，并剔除 4σ 之外的异常点

    返回剔除后的 (xx, yy)；用于绘图时辅助生成更平滑的参考线。
    """
    smth = savgol_filter(y, 7, 2)
    diff = y - smth
    clip = sigmaclip(diff, 4, 4)[0]
    index = (diff > clip.mean() - clip.std() * 5) * (diff < clip.mean() + clip.std() * 5)
    xx = x[index]
    yy = y[index]
    yy = savgol_filter(yy, 7, 2)
    return xx, yy


def plot_ptc(res, data, ffig, raw=None, tag_flux='dn', tag_var='var', fixflag=True):
    """将 PTC 拟合结果与数据可视化输出为四宫格图

    图像包含：
        (1) DN-方差散点 + 拟合直线；(2) PTC 非线性；
        (3) 时间-信号散点 + 线性拟合；(4) 响应非线性。
    参数：
        res:  fit_ptc 输出的单通道结果表
        data: 拟合时使用的数据表（含 valid/disp/ptc10/ptc20 标记）
        ffig: 输出图像文件路径
        raw:  （可选）原始散点，用于浅灰色背景展示
        tag_flux/tag_var: 指定绘制列名（与拟合一致）
        fixflag: 与拟合一致，决定是否绘制带截距的直线
    """
    ptc_thresh = 5
    ptc_thresh2 = 3
    resp_thresh = 2
    resp_thresh2 = 1
    fw_nominal_e = 70
    plt.ioff()
    plt.figure(figsize=[9, 7.5])

    gain = res['gain'][0]
    if fixflag == False:
        intercep = res['intercep'][0]
    fw = res['full'][0] / 1000
    rsp = res['rsp'][0] / 1000

    # dn-variance curve
    x, y = data[tag_flux] / 1000, data[tag_var] / 1000
    plt.subplot(221)
    plt.xlabel('Signal (k ADU)')
    plt.ylabel('Corrected Variance (k ADU^2)')
    plt.scatter(x[data['disp']], y[data['disp']], s=10, color='coral', zorder=2)
    ax = plt.gca()
    yr = np.array([0, ax.get_ylim()[1]])
    xr = np.array([0, ax.get_xlim()[1]])
    s2 = plt.scatter(x[data['valid']], y[data['valid']], s=10, color='green', zorder=3)
    if raw is not None:
        plt.scatter(raw[tag_flux] / 1000, raw[tag_var] / 1000, s=1.5, color='lightgray', zorder=1) 
    if fixflag == True:
        l1, = plt.plot(xr, xr / gain, color='orange', zorder=4)
    else:
        l1, = plt.plot(xr, xr / gain + intercep/1000, color='orange', zorder=4)
    ax.set_xlim(xr), ax.set_ylim(yr)
    plt.legend([l1, s2], ['gain = {:.3f} e-/ADU'.format(gain), 'used data'], loc='upper left')

    if np.isfinite(fw):
        fw_use = fw_nominal_e

        # PTC non-linearity
        plt.subplot(223)
        plt.xlabel('Signal (k ADU)')
        plt.ylabel('PTC Linearity (%)')
        ax = plt.gca()
        ax.yaxis.set_minor_locator(MultipleLocator(1))
        _add_thresh_lines(xr, ptc_thresh, ptc_thresh2)
        if fixflag == True:
            nonlin = (y / (x / gain) - 1) * 100
        else:
            nonlin = ((y - intercep/1000) / (x / gain) - 1)*100
        plt.scatter(x[data['valid']], nonlin[data['valid']], s=10, color='green', zorder=2)
        yr = ax.get_ylim()
        yr = np.array([max(yr[0], -20), min(yr[1], 15)])
        if raw is not None:
            plt.scatter(raw['dn'], (raw['var_sub'] / (raw['dn'] / gain) - 1) * 100,
                        s=1.5, color='lightgray', zorder=1)
        plt.scatter(x[data['disp']], nonlin[data['disp']], s=10, color='coral', zorder=1)
        l2, = plt.plot([fw / gain, fw / gain], yr, color='lightskyblue', ls='--', zorder=2)
        handlers = [l2, ]
        txts = ['PTC-full: {:.1f} ke-'.format(fw)]
        if data['ptc10'].sum() > 0:
            s2 = plt.scatter(x[data['ptc10']], nonlin[data['ptc10']], s=10, color='blue', zorder=3)
            handlers.append(s2)
            txts.append('{:.1f}~{:.1f} ke-'.format(fw_use * 0.1, fw_use))
        if data['ptc20'].sum() > 0:
            s3 = plt.scatter(x[data['ptc20']], nonlin[data['ptc20']], s=10, color='magenta', zorder=4)
            handlers.append(s3)
            txts.append('{:.1f}~{:.1f} ke-'.format(fw_use * 0.2, fw_use * 0.8))
        ax.set_xlim(xr), ax.set_ylim(yr)
        plt.legend(handlers, txts)

        # time-signal curve
        x, y = data['t'].data, data[tag_flux].data / 1000
        plt.subplot(222)
        plt.xlabel('Time (s)')
        plt.ylabel('Signal (k ADU)')
        ax = plt.gca()
        plt.scatter(x[data['disp']], y[data['disp']], s=10, color='coral', zorder=2)
        xr, yr = np.array(ax.get_xlim()), np.array(ax.get_ylim())
        xr, yr = np.array([0, xr[1]]), np.array([0, yr[1]])
        if raw is not None:
            plt.scatter(raw['t'], raw[tag_flux] / 1000, s=1.5, color='lightgray', zorder=1)
        s2 = plt.scatter(x[data['valid']], y[data['valid']], s=10, color='green', zorder=3)
        l1, = plt.plot(xr, xr * rsp, color='orange', zorder=4)
        ax.set_xlim(xr), ax.set_ylim(yr)
        plt.legend([l1, s2], ['linear fitting', 'used data'], loc='lower right')

        # time-signal non-linearity
        plt.subplot(224)
        plt.xlabel('Time (s)')
        plt.ylabel('Response Linearity (%)')
        ax = plt.gca()
        _add_thresh_lines(xr, resp_thresh, resp_thresh2)
        nonlin = (y / (x * rsp) - 1) * 100
        plt.scatter(x[data['disp']], nonlin[data['disp']], s=10, color='coral', zorder=2)
        yr = ax.get_ylim()
        plt.scatter(x[data['valid']], nonlin[data['valid']], s=10, color='green', zorder=3)
        if raw is not None:
            plt.scatter(raw['t'], (raw[tag_flux] / (raw['t'] * rsp * 1000) - 1) * 100,
                        s=1.5, color='lightgray', zorder=1)
        s2 = plt.scatter(x[data['ptc10']], nonlin[data['ptc10']], s=10, color='blue', zorder=4)
        s3 = plt.scatter(x[data['ptc20']], nonlin[data['ptc20']], s=10, color='magenta', zorder=5)
        yr = np.array([max(yr[0], -10), min(yr[1], 15)])
        ax.set_xlim(xr), ax.set_ylim(yr)
        plt.legend([s2, s3], ['{:.1f}~{:.1f} ke-'.format(fw_use * 0.1, fw_use),
                              '{:.1f}~{:.1f} ke-'.format(fw_use * 0.2, fw_use * 0.8)])

    plt.tight_layout()
    plt.savefig(ffig)
    plt.close()
