# -*- coding: utf-8 -*-
"""ECG_EDA_Data.py
-------------------
本模块用于处理 Biopac .acq 数据文件，提取 ECG（心电）与 EDA（皮电）信号并计算常用生理指标。

主要功能流程：
1. 读入 .acq → 转存为仅包含 ECG & EDA 的 .csv
2. ECG 信号预处理（高通 → 低通 → 50 Hz 陷波）
3. ECG 特征提取（R 峰检测 → RR 校正 → 平均心率 & HRV-LF/HF）
4. EDA 信号预处理（低通滤波 → 下采样）
5. EDA 特征提取（cvxEDA 分解 → SCL & SCR）
6. 将结果追加写入 * _eda_metrics.csv 汇总文件

脚本亦可作为命令行工具使用：
>>> python ECG_EDA_Data.py  /path/to/file.acq
若直接运行将进入交互模式；也可在其它脚本中 import 调用 convert_acq_to_csv、pipeline 等函数复用。
"""

from __future__ import annotations

import os
import time
import traceback
from datetime import datetime
from typing import Tuple, List, Optional, Sequence

import numpy as np
import pandas as pd
import sqlite3
import neurokit2 as nk
from scipy.signal import butter, filtfilt, iirnotch
from cvxEDA import cvxEDA
import re

# 尝试导入 tqdm 进度条
try:
    from tqdm import tqdm  # type: ignore
    _TQDM_AVAILABLE = True
except ImportError:  # pragma: no cover
    _TQDM_AVAILABLE = False

# =============================================================================
# 全局常量与配置
# =============================================================================
MIN_RR_SEC = 0.4                # RR 最小阈值 (s)
SCR_THRESHOLD = 0.05            # SCR 峰值检测阈值 (μS)
ECG_UNIT_DIVISOR = 1000.0       # µV → mV
# 默认数据库文件（与数据上传脚本一致）
DB_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LHL_hrv.db3")
# acq_meta 表名常量
META_TABLE = "acq_meta"
DEFAULT_SAVE_ENCODING = "utf-8-sig"


# =============================================================================
# 基础工具函数
# =============================================================================

def user_confirm(prompt: str) -> bool:
    """命令行交互"自动"确认"""
    print(f"{prompt} —— 已自动继续")
    return True


def adjust_rr_intervals(rr: np.ndarray) -> np.ndarray:
    """根据采样规则清洗 / 修正 RR 间期，并返回新的序列 (单位: 秒)。"""
    rr = rr.copy()

    # 1) 丢弃过短区段
    rr = rr[rr >= MIN_RR_SEC]

    # 2) 对 0.4–0.6 s 的 RR 做特殊合并处理
    mask_small = rr < 0.6
    small_intervals = rr[mask_small]
    rr = rr[~mask_small]

    adjusted: List[float] = []
    if len(small_intervals) == 1:
        adjusted.append(0.6)
    elif len(small_intervals) > 1:
        i = 0
        while i < len(small_intervals) - 1:
            adjusted.append(small_intervals[i] + small_intervals[i + 1])
            i += 2
        if i == len(small_intervals) - 1:
            adjusted.append(0.6)

    rr_clean = np.concatenate([rr, np.array(adjusted)]) if adjusted else rr

    # 3) 对过长 RR 拆分
    final_rr: List[float] = []
    for r in rr_clean:
        if r < 1.2:
            final_rr.append(r)
        elif r < 2.4:
            final_rr.append(r / 2)
        elif r < 3.6:
            final_rr.append(r / 3)
        elif r < 4.8:
            final_rr.append(r / 4)
        else:
            final_rr.append(r / 5)

    return np.array(final_rr)


def butter_filter(data: np.ndarray, low: float, high: float, fs: float, *, btype: str = "band", order: int = 4) -> np.ndarray:
    """通用 Butterworth 滤波器封装。"""
    nyq = 0.5 * fs
    if btype == "band":
        b, a = butter(order, [low / nyq, high / nyq], btype="band")
    elif btype == "lowpass":
        b, a = butter(order, high / nyq, btype="lowpass")
    elif btype == "highpass":
        b, a = butter(order, low / nyq, btype="highpass")
    else:
        raise ValueError(f"暂不支持的滤波类型: {btype}")
    return filtfilt(b, a, data)


def notch_filter(data: np.ndarray, freq: float, fs: float, *, quality: int = 30) -> np.ndarray:
    """陷波滤波，用于去除工频干扰。"""
    nyq = 0.5 * fs
    b, a = iirnotch(freq / nyq, quality)
    return filtfilt(b, a, data)


# =============================================================================
# 文件 & 通道相关函数
# =============================================================================

def find_ecg_eda_indices(data: bioread.Bioread) -> Tuple[Optional[int], Optional[int]]:
    """根据通道名称自动寻找 ECG 与 EDA 通道索引 (优先标准差最大的候选)。"""
    ecg_candidates, eda_candidates = [], []
    for idx, ch in enumerate(data.channels):
        name = ch.name.lower()
        if any(k in name for k in ("ecg", "ekg")):
            ecg_candidates.append((idx, np.std(ch.data)))
        if any(k in name for k in ("eda", "gsr")):
            eda_candidates.append((idx, np.std(ch.data)))
    ecg_idx = max(ecg_candidates, key=lambda x: x[1])[0] if ecg_candidates else None
    eda_idx = max(eda_candidates, key=lambda x: x[1])[0] if eda_candidates else None
    return ecg_idx, eda_idx


# ------------------------------------------------------------
# 数据库读取函数
# ------------------------------------------------------------

def list_tables(conn: sqlite3.Connection) -> Sequence[str]:
    cur = conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
    return [r[0] for r in cur.fetchall() if r[0] not in ("sqlite_sequence", META_TABLE)]


def match_tables(pattern: str, tables: Sequence[str]) -> Sequence[str]:
    pattern = pattern.lower()
    return [t for t in tables if pattern in t.lower()]


def load_table(conn: sqlite3.Connection, table_name: str) -> pd.DataFrame:
    return pd.read_sql_query(f'SELECT * FROM "{table_name}"', conn)


# =============================================================================
# ECG 处理函数
# =============================================================================

def preprocess_ecg(raw_ecg: np.ndarray, fs: float) -> np.ndarray:
    """ECG 三步滤波：高通 0.5 Hz → 低通 40 Hz → 50 Hz 陷波。"""
    hp = butter_filter(raw_ecg, 0.5, 0, fs, btype="highpass")
    lp = butter_filter(hp, 0, 40, fs, btype="lowpass")
    clean = notch_filter(lp, 50, fs)
    return clean


def extract_ecg_metrics(clean_ecg: np.ndarray, fs: float) -> Optional[dict]:
    """检测 R 峰并计算常见 HRV 指标，返回 dict；失败返回 None。"""
    try:
        _, info = nk.ecg_process(clean_ecg, sampling_rate=fs)
        r_peaks = info.get("ECG_R_Peaks", [])
        if len(r_peaks) < 3:
            print("[警告] R 峰数量不足，跳过 ECG 特征计算。")
            return None

        raw_rr = np.diff(r_peaks) / fs
        adj_rr = adjust_rr_intervals(raw_rr)
        if len(adj_rr) == 0:
            return None

        nn_ms = adj_rr * 1000.0  # 转换为毫秒

        # NeuroKit2 API 需以 dict 形式传入 RRI
        rri_dict = {"RRI": nn_ms}

        # Time-domain
        metrics_time = nk.hrv_time(rri_dict, show=False).iloc[0].to_dict()
        # Frequency-domain（Welch）
        metrics_freq = nk.hrv_frequency(rri_dict, show=False).iloc[0].to_dict()
        # Non-linear
        metrics_non = nk.hrv_nonlinear(rri_dict, show=False).iloc[0].to_dict()

        # 合并
        metrics = {**metrics_time, **metrics_freq, **metrics_non}
        # 方便访问的核心值
        metrics["Mean_HR_bpm"] = 60.0 / np.mean(adj_rr)
        return metrics
    except Exception as exc:
        print(f"[错误] ECG 指标计算失败：{exc}")
        return None


# =============================================================================
# EDA 处理函数
# =============================================================================

def preprocess_eda(raw_eda: np.ndarray, fs: float, *, lowpass_hz: float = 5.0, resample_hz: int = 10) -> Tuple[np.ndarray, int]:
    """EDA 低通滤波 + 下采样。返回 (processed_signal, new_fs)。"""
    filtered = butter_filter(raw_eda, 0, lowpass_hz, fs, btype="lowpass")
    resampled = nk.signal_resample(filtered, sampling_rate=fs, desired_length=int(len(filtered) * resample_hz / fs))
    return resampled, resample_hz


def extract_eda_features(eda_processed: np.ndarray, fs: int) -> Tuple[float, float]:
    """分解 EDA，计算 SCL 均值与 SCR 频率。"""
    scl_mean, scr_freq = 0.0, 0.0
    try:
        phasic, _, tonic, *_ = cvxEDA(eda_processed, 1.0 / fs)
        scl_mean = float(np.mean(tonic))
        total_min = len(phasic) / fs / 60.0
        peaks = nk.signal_findpeaks(phasic, height_min=SCR_THRESHOLD)
        scr_freq = len(peaks["Peaks"]) / total_min if total_min > 0 else 0.0
    except Exception as exc:
        print(f"[错误] EDA 特征提取失败：{exc}")
    return scl_mean, scr_freq


# =============================================================================
# 结果保存
# =============================================================================

def save_summary_csv(src_path: str, *, duration: float, mean_hr: float, lf_hf: float, scl_mean: float, scr_freq: float) -> None:
    """追加写入 *_eda_metrics.csv 汇总文件。"""
    summary_path = os.path.splitext(src_path)[0] + "_eda_metrics.csv"
    df = pd.DataFrame([
        {
            "File": os.path.basename(src_path),
            "Duration_s": duration,
            "Mean_HR_bpm": mean_hr,
            "LF_HF": lf_hf,
            "SCL_mean_uS": scl_mean,
            "SCR_freq_per_min": scr_freq,
        }
    ])
    header = not os.path.exists(summary_path)
    df.to_csv(summary_path, mode="a", index=False, header=header, encoding=DEFAULT_SAVE_ENCODING)
    action = "创建" if header else "追加"
    print(f"[{action}] 汇总文件：{summary_path}")


# =============================================================================
# 主管道
# =============================================================================

def pipeline_table(df: pd.DataFrame, table_name: str, fs: float, tag: int = 0) -> None:
    """
    对 DataFrame 执行 ECG & EDA 分析并打印详细中文结果，包括参数、过滤细节、计算方法说明和核心指标。
    """
    import sys
    import contextlib

    # 识别通道名：排除非数值列
    数值列 = [c for c in df.columns if df[c].dtype != object]
    # 排除 sample_idx、tag、modify_time、upload_time
    数值列 = [c for c in 数值列 if c not in ("sample_idx", "tag")]

    # 找 ECG & EDA 列
    ecg_idx, eda_idx = find_ecg_eda_indices(type("obj", (), {"channels": [type("ch", (), {"name": n, "data": df[n].values}) for n in 数值列]}))
    if ecg_idx is None or eda_idx is None:
        print(f"[跳过] 表 {table_name} 未找到 ECG/EDA 通道")
        return

    ecg_col = 数值列[ecg_idx]
    eda_col = 数值列[eda_idx]

    # =====================
    # 基本参数统计
    # =====================
    原始_ecg = df[ecg_col].values / ECG_UNIT_DIVISOR
    原始_eda = df[eda_col].values
    信号点数 = len(原始_ecg)
    时长 = 信号点数 / fs
    ecg_均值 = float(np.mean(原始_ecg))
    ecg_最大 = float(np.max(原始_ecg))
    ecg_最小 = float(np.min(原始_ecg))
    eda_均值 = float(np.mean(原始_eda))
    eda_最大 = float(np.max(原始_eda))
    eda_最小 = float(np.min(原始_eda))

    # =====================
    # ECG 处理与详细统计
    # =====================
    ecg_滤波 = preprocess_ecg(原始_ecg, fs)
    # R波检测与HRV分析
    try:
        _, info = nk.ecg_process(ecg_滤波, sampling_rate=fs)
        r_peaks = info.get("ECG_R_Peaks", [])
        原始_r波数 = len(r_peaks)
        if 原始_r波数 < 3:
            print("[警告] R波数量不足，跳过ECG特征计算。")
            metrics_ecg = None
            rr_原始数 = 0
            rr_过滤后数 = 0
            rr_被过滤数 = 0
            rr_合并说明 = "无"
        else:
            raw_rr = np.diff(r_peaks) / fs
            rr_原始数 = len(raw_rr)
            # 过滤条件：去除小于0.4秒的RR间期
            rr_过滤条件 = "去除小于0.4秒的RR间期"
            rr_过滤后 = raw_rr[raw_rr >= MIN_RR_SEC]
            rr_被过滤数 = rr_原始数 - len(rr_过滤后)
            # 特殊合并/拆分处理
            adj_rr = adjust_rr_intervals(raw_rr)
            rr_合并说明 = "对0.4-0.6秒区间合并，对过长RR拆分"
            rr_最终数 = len(adj_rr)
            nn_ms = adj_rr * 1000.0
            rri_dict = {"RRI": nn_ms}
            # HRV各项指标
            metrics_ecg = {}
            try:
                metrics_ecg.update(nk.hrv_time(rri_dict, show=False).iloc[0].to_dict())
                metrics_ecg.update(nk.hrv_frequency(rri_dict, show=False).iloc[0].to_dict())
                metrics_ecg.update(nk.hrv_nonlinear(rri_dict, show=False).iloc[0].to_dict())
                metrics_ecg["Mean_HR_bpm"] = 60.0 / np.mean(adj_rr)
            except Exception as exc:
                print(f"[错误] HRV计算失败：{exc}")
                metrics_ecg = None
    except Exception as exc:
        print(f"[错误] ECG处理失败：{exc}")
        metrics_ecg = None
        原始_r波数 = 0
        rr_原始数 = 0
        rr_过滤后数 = 0
        rr_被过滤数 = 0
        rr_合并说明 = "无"

    # =====================
    # EDA 处理与详细统计
    # =====================
    eda_proc, new_fs = preprocess_eda(原始_eda, fs)
    # 屏蔽cvxEDA底层日志
    try:
        import contextlib
        import io
        with contextlib.redirect_stdout(io.StringIO()):
            phasic, _, tonic, *_ = cvxEDA(eda_proc, 1.0 / new_fs)
    except Exception:
        phasic, _, tonic, *_ = cvxEDA(eda_proc, 1.0 / new_fs)
    scl_mean = float(np.mean(tonic))
    scl_first = float(tonic[0])
    scl_last = float(tonic[-1])
    scl_slope = (scl_last - scl_first) / (len(tonic) / new_fs)
    # SCR峰值检测
    peaks = nk.signal_findpeaks(phasic, height_min=SCR_THRESHOLD)
    scr_count = len(peaks["Peaks"])
    scr_freq = scr_count / (len(phasic) / new_fs / 60.0) if len(phasic) > 0 else 0.0

    # =====================
    # 中文标签与数据源
    # =====================
    tag_map = {1: "记忆采集", 2: "记忆激活"}
    tag_label = tag_map.get(tag, f"标签{tag}")
    data_source = f"{table_name}（{tag_label}）"

    print("\n==============================")
    print("【分析参数】")
    print(f"数据源表名：{data_source}")
    print(f"采样率：{fs:.1f} Hz")
    print(f"信号总长度：{信号点数} 点（{时长:.1f} 秒）")
    print(f"ECG原始均值：{ecg_均值:.3f} mV，最大值：{ecg_最大:.3f} mV，最小值：{ecg_最小:.3f} mV")
    print(f"EDA原始均值：{eda_均值:.3f} μS，最大值：{eda_最大:.3f} μS，最小值：{eda_最小:.3f} μS")
    print("------------------------------")
    print("【R波检测与RR间期处理】")
    print(f"原始R波数量：{原始_r波数}")
    print(f"原始RR间期数量：{rr_原始数}")
    print(f"过滤条件：去除小于0.4秒的RR间期")
    print(f"过滤后剩余RR间期数量：{len(rr_过滤后) if rr_原始数>0 else 0}")
    print(f"被过滤掉的RR间期数量：{rr_被过滤数}")
    print(f"特殊合并/拆分处理说明：{rr_合并说明}")
    print(f"最终用于HRV分析的RR间期数量：{metrics_ecg and len(metrics_ecg)>0 and rr_最终数 or 0}")
    print("------------------------------")
    print("【SCL与SCR详细参数】")
    print(f"SCL均值：{scl_mean:.6f} μS，首点：{scl_first:.6f} μS，末点：{scl_last:.6f} μS")
    print(f"SCL斜率：{scl_slope:.6f} μS/秒")
    print(f"SCR峰值个数：{scr_count}，分析区间：{len(phasic)/new_fs:.1f} 秒")
    print("------------------------------")
    print("【计算方法说明】")
    print("Mean RR (ms)：R波间期均值（单位：毫秒）")
    print("平均心率：60/Mean RR")
    print("LF/HF：HRV频域分析，低频/高频功率比")
    print("SCL均值：cvxEDA分解后tonic分量均值")
    print("SCL斜率：cvxEDA分解后tonic分量首末点差/总时长")
    print("SCR频率：cvxEDA分解后phasic分量峰值数/分钟")
    print("------------------------------")
    print("【核心结果】")
    if metrics_ecg:
        mean_rr = metrics_ecg.get('HRV_MeanNN', float('nan'))
        mean_hr = metrics_ecg.get('Mean_HR_bpm', float('nan'))
        lf_hf = metrics_ecg.get('HRV_LFHF', float('nan'))
        print(f"Mean RR (ms)        : {mean_rr:.1f}")
        print(f"平均心率                : {mean_hr:.1f}")
        print(f"LF/HF               : {lf_hf:.2f}")
    else:
        print("ECG指标计算失败/数据不足")
    print(f"SCL 均值 (μS)         : {scl_mean:.3f}")
    print(f"SCL 斜率              : {scl_slope:.6f}")
    print(f"SCR 频率（次/分）         : {scr_freq:.1f}")
    print("==============================\n")


# =============================================================================
# 元数据工具
# =============================================================================

def get_sample_rate(conn: sqlite3.Connection, table_name: str) -> Optional[float]:
    """根据表名从 acq_meta 获取采样率，使用 LIKE 保证带路径文件也能匹配。

    由于 acq_meta.file_name 保存的是相对路径（可能包含多级目录），
    只要以 ".../<table_name>.acq" 结尾即可视为对应条目。
    优先取最新 modify_time 的记录。"""
    # 表名格式: 姓名_阶段-任务-组别，取姓名部分用于匹配文件名末尾 .acq
    person = table_name.split("_")[0]
    like_pattern = f"%{person}.acq"
    cur = conn.execute(
        f"SELECT sample_rate FROM {META_TABLE} WHERE file_name LIKE ? AND sample_rate IS NOT NULL "
        "ORDER BY modify_time DESC LIMIT 1",
        (like_pattern,),
    )
    row = cur.fetchone()
    return row[0] if row else None


def ensure_meta_columns(conn: sqlite3.Connection):
    """保证 acq_meta 中存在 compute_status / failure_reason 等列。"""
    cur = conn.execute(f"PRAGMA table_info({META_TABLE})")
    cols = [r[1] for r in cur.fetchall()]
    if "compute_status" not in cols:
        conn.execute(f"ALTER TABLE {META_TABLE} ADD COLUMN compute_status INTEGER DEFAULT 0")
    if "failure_reason" not in cols:
        conn.execute(f"ALTER TABLE {META_TABLE} ADD COLUMN failure_reason TEXT")
    if "mean_rr" not in cols:
        conn.execute(f"ALTER TABLE {META_TABLE} ADD COLUMN mean_rr REAL")
    if "mean_hr" not in cols:
        conn.execute(f"ALTER TABLE {META_TABLE} ADD COLUMN mean_hr REAL")
    if "lf_hf" not in cols:
        conn.execute(f"ALTER TABLE {META_TABLE} ADD COLUMN lf_hf REAL")
    if "scl_mean" not in cols:
        conn.execute(f"ALTER TABLE {META_TABLE} ADD COLUMN scl_mean REAL")
    if "scl_slope" not in cols:
        conn.execute(f"ALTER TABLE {META_TABLE} ADD COLUMN scl_slope REAL")
    if "scr_freq" not in cols:
        conn.execute(f"ALTER TABLE {META_TABLE} ADD COLUMN scr_freq REAL")
    conn.commit()


# =============================================================================
# 辅助函数
# =============================================================================

def extract_person_name(base: str) -> str:
    """提取中文姓名，用于根据表名反查 acq_meta"""
    chinese_parts = re.findall(r"[\u4e00-\u9fa5]+", base)
    return "".join(chinese_parts) if chinese_parts else base


def compute_core_metrics(df: pd.DataFrame, fs: float):
    """返回核心指标 dict，失败返回 None"""
    num_cols = [c for c in df.columns if df[c].dtype != object and c not in ("sample_idx", "modify_time", "upload_time", "level1", "level2", "level3", "level4")]
    if len(num_cols) < 2:
        return None

    ecg_col = next((c for c in num_cols if "ecg" in c.lower() or "ekg" in c.lower()), num_cols[0])
    eda_col = next((c for c in num_cols if "eda" in c.lower() or "gsr" in c.lower()), num_cols[1])

    ecg_mv = df[ecg_col].values / 1000.0
    eda = df[eda_col].values

    # ECG
    try:
        _, info = nk.ecg_process(ecg_mv, sampling_rate=fs)
        r_peaks = info.get("ECG_R_Peaks", [])
        if len(r_peaks) < 3:
            return None
        rr = np.diff(r_peaks) / fs
        if len(rr) == 0:
            return None
        nn_ms = rr * 1000.0
        rri_dict = {"RRI": nn_ms}
        metrics = {}
        metrics.update(nk.hrv_time(rri_dict, show=False).iloc[0].to_dict())
        metrics.update(nk.hrv_frequency(rri_dict, show=False).iloc[0].to_dict())
        metrics["Mean_HR_bpm"] = 60.0 / np.mean(rr)
        mean_rr = metrics.get("HRV_MeanNN", np.nan)
        mean_hr = metrics.get("Mean_HR_bpm", np.nan)
        lf_hf = metrics.get("HRV_LFHF", np.nan)
    except Exception:
        return None

    # EDA
    eda_proc, new_fs = preprocess_eda(eda, fs)
    try:
        import contextlib, io
        with contextlib.redirect_stdout(io.StringIO()):
            phasic, _, tonic, *_ = cvxEDA(eda_proc, 1.0 / new_fs)
    except Exception:
        phasic, _, tonic, *_ = cvxEDA(eda_proc, 1.0 / new_fs)

    scl_mean = float(np.mean(tonic))
    scl_slope = (tonic[-1] - tonic[0]) / (len(tonic) / new_fs) if len(tonic) else 0.0
    peaks = nk.signal_findpeaks(phasic, height_min=SCR_THRESHOLD)
    scr_freq = len(peaks["Peaks"]) / (len(phasic) / new_fs / 60.0) if len(phasic) else 0.0

    return {
        "mean_rr": float(mean_rr),
        "mean_hr": float(mean_hr),
        "lf_hf": float(lf_hf),
        "scl_mean": scl_mean,
        "scl_slope": scl_slope,
        "scr_freq": float(scr_freq),
    }


def update_meta_for_table(conn: sqlite3.Connection, table_name: str, metrics: dict):
    """根据表名前缀(中文姓名)模糊匹配 acq_meta.file_name 并写入指标。"""
    person = table_name.split("_")[0]  # 取中文姓名部分
    like_pattern = f"%{person}.acq"
    conn.execute(
        "UPDATE acq_meta SET mean_rr=?, mean_hr=?, lf_hf=?, scl_mean=?, scl_slope=?, scr_freq=?, "
        "compute_status=1, failure_reason=NULL WHERE file_name LIKE ?",
        (
            metrics["mean_rr"], metrics["mean_hr"], metrics["lf_hf"],
            metrics["scl_mean"], metrics["scl_slope"], metrics["scr_freq"],
            like_pattern,
        ),
    )
    conn.commit()


# =============================================================================
# CLI 入口
# =============================================================================

if __name__ == "__main__":
    import sys

    print("=== ECG & EDA 指标批量计算 ===")
    pattern_input = " ".join(sys.argv[1:]) if len(sys.argv) > 1 else input("输入表名关键字(留空=全部): ").strip()

    if not os.path.exists(DB_PATH):
        print(f"[错误] 数据库不存在: {DB_PATH}")
        sys.exit(1)

    with sqlite3.connect(DB_PATH) as conn:
        ensure_meta_columns(conn)
        tables = list_tables(conn)
        matched_tables = tables if pattern_input == "" else match_tables(pattern_input, tables)
        if not matched_tables:
            print("[提示] 未匹配到任何表")
            sys.exit(0)

        modified, skipped, failed = [], [], []

        # 进度迭代器
        if _TQDM_AVAILABLE:
            iterator = tqdm(matched_tables, desc="表处理进度", ncols=80)
        else:
            iterator = matched_tables

        for tbl in iterator:
            # 判断是否需要计算：若 compute_status!=1 或任一指标为空则重算
            cur = conn.execute(
                "SELECT compute_status, mean_rr, mean_hr, lf_hf, scl_mean, scl_slope, scr_freq "
                "FROM acq_meta WHERE file_name LIKE ? LIMIT 1",
                (f"%{tbl}.acq",),
            )
            row = cur.fetchone()
            if row:
                status_flag = row[0]
                metrics_any_none = any(x is None for x in row[1:])
                if status_flag == 1 and not metrics_any_none:
                    skipped.append((tbl, "已计算"))
                    continue

            fs_val = get_sample_rate(conn, tbl)
            if fs_val is None:
                skipped.append((tbl, "acq_meta 无采样率"))
                continue

            df = load_table(conn, tbl)
            metrics = compute_core_metrics(df, fs_val)
            if metrics is None:
                failed.append((tbl, "指标计算失败/数据不足"))
                conn.execute("UPDATE acq_meta SET compute_status=-1, failure_reason=? WHERE file_name LIKE ?", ("指标计算失败/数据不足", f"%{tbl}.acq"))
                conn.commit()
                continue

            update_meta_for_table(conn, tbl, metrics)
            modified.append(tbl)

            # 若无 tqdm，则手动打印进度
            if not _TQDM_AVAILABLE:
                processed = len(modified) + len(skipped) + len(failed)
                total = len(matched_tables)
                print(f"[进度] {processed}/{total} 已处理 \r", end="", flush=True)

        # 生成 CSV 汇总
        cur = conn.execute("SELECT file_name, modify_time, sample_rate, mean_rr, mean_hr, lf_hf, scl_mean, scl_slope, scr_freq FROM acq_meta WHERE compute_status=1")
        rows = cur.fetchall()
        summary_rows = []
        for r in rows:
            file_name, mtime, fs, mean_rr, mean_hr, lf_hf, scl_mean, scl_slope, scr_freq = r
            tbl_name = extract_person_name(os.path.splitext(os.path.basename(file_name))[0])
            # 直接根据相对路径解析阶段(level1)、任务类型(level2)、组别(level3)
            rel_parts = os.path.normpath(file_name).split(os.sep)
            # 去掉文件名部分，只保留目录层级
            if rel_parts and rel_parts[-1].lower().endswith('.acq'):
                rel_parts = rel_parts[:-1]
            stage = rel_parts[0] if len(rel_parts) >= 1 else None
            task  = rel_parts[1] if len(rel_parts) >= 2 else None
            group = rel_parts[2] if len(rel_parts) >= 3 else None

            summary_rows.append({
                "文件": tbl_name,
                "阶段": stage,
                "任务类型": task,
                "组别": group,
                "平均RR_ms": mean_rr,
                "平均心率_bpm": mean_hr,
                "LF_HF": lf_hf,
                "SCL均值_uS": scl_mean,
                "SCL斜率": scl_slope,
                "SCR频率_次每分": scr_freq,
                "修改时间": mtime,
                "采样率": fs,
            })

        if summary_rows:
            out_csv = os.path.join(os.path.dirname(DB_PATH), "生理指标汇总.csv")
            df_new = pd.DataFrame(summary_rows)

            if os.path.exists(out_csv):
                # 已存在：读取现有文件，判断重复（按 file_name 字段）
                try:
                    df_exist = pd.read_csv(out_csv, encoding="utf-8-sig")
                except UnicodeDecodeError:
                    df_exist = pd.read_csv(out_csv, encoding="utf-8")

                # 基于 "文件+阶段" 组合键判断是否已存在
                df_new["_key"] = df_new["文件"].astype(str) + "|" + df_new["阶段"].astype(str)
                df_exist["_key"] = df_exist["文件"].astype(str) + "|" + df_exist["阶段"].astype(str)
                dup_mask = df_new["_key"].isin(df_exist["_key"])
                df_append = df_new[~dup_mask]

                if df_append.empty:
                    print("[CSV] 无新增记录需要写入，已跳过保存。")
                else:
                    # 写入前移除临时列
                    if "_key" in df_append.columns:
                        df_append = df_append.drop(columns=["_key"])
                    df_append.to_csv(out_csv, mode="a", index=False, header=False, encoding="utf-8-sig")
                    print(f"[CSV] 已追加 {len(df_append)} 行至 {out_csv}")

                # 清理临时列（无论是否追加）
                for _df in (df_new, df_exist):
                    if "_key" in _df.columns:
                        _df.drop(columns=["_key"], inplace=True)
            else:
                # 首次创建
                df_new.to_csv(out_csv, index=False, encoding="utf-8-sig")
                print(f"[CSV] 新建 CSV: {out_csv} （{len(df_new)} 行）")

        # 打印汇总
        print("\n===== 处理汇总 =====")
        print(f"修改成功 {len(modified)} 个表")
        print(f"跳过 {len(skipped)} 个表")
        print(f"失败 {len(failed)} 个表")
        if skipped:
            print("[跳过列表]")
            for t, rsn in skipped:
                print(f"  - {t}: {rsn}")
        if failed:
            print("[失败列表]")
            for t, rsn in failed:
                print(f"  - {t}: {rsn}")