import os
import sqlite3
from datetime import datetime
import traceback
import re
from collections import defaultdict
import numpy as np  # 仅用于底部备用工具函数的类型注解

import bioread  # 读取 .acq 文件

# -----------------------------
# 配置区
# -----------------------------
# 脚本所在目录
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# 预处理数据根目录
PREPROCESS_DIR = os.path.join(BASE_DIR, "预处理文件")
DB_PATH = os.path.join(BASE_DIR, "LHL_hrv.db3")  # 数据库文件名
BATCH = 5000  # 每批写入行数

# -----------------------------
# 元数据表
# -----------------------------

def ensure_meta_table(conn: sqlite3.Connection):
    """确保元数据表存在，并含有 sample_rate 列。"""
    conn.execute(
        """CREATE TABLE IF NOT EXISTS acq_meta (
                file_name   TEXT PRIMARY KEY,
                modify_time TEXT,
                sample_rate REAL
        );"""
    )
    # 若旧表缺列则动态补充
    cur = conn.execute("PRAGMA table_info(acq_meta)")
    cols = [r[1] for r in cur.fetchall()]
    need_cols = {
        "sample_rate": "REAL",
        "compute_status": "INTEGER DEFAULT 0",  # 0: 待计算 1: 成功 -1: 失败
        "failure_reason": "TEXT",
        "mean_rr": "REAL",
        "mean_hr": "REAL",
        "lf_hf": "REAL",
        "scl_mean": "REAL",
        "scl_slope": "REAL",
        "scr_freq": "REAL",
    }
    for col, typ in need_cols.items():
        if col not in cols:
            conn.execute(f"ALTER TABLE acq_meta ADD COLUMN {col} {typ}")
    conn.commit()

def get_stored_mtime(conn: sqlite3.Connection, file_name: str):
    cur = conn.execute("SELECT modify_time FROM acq_meta WHERE file_name=?", (file_name,))
    row = cur.fetchone()
    return row[0] if row else None

def get_stored_fs(conn: sqlite3.Connection, file_name: str):
    cur = conn.execute("SELECT sample_rate FROM acq_meta WHERE file_name=?", (file_name,))
    row = cur.fetchone()
    return row[0] if row and row[0] is not None else None

def upsert_meta(conn: sqlite3.Connection, file_name: str, modify_time: str, sample_rate: float):
    """插入或更新元数据，并将 computed 标记重置为 0（待计算）。"""
    conn.execute(
        "INSERT INTO acq_meta(file_name, modify_time, sample_rate, compute_status) "
        "VALUES(?, ?, ?, 0) "
        "ON CONFLICT(file_name) DO UPDATE SET modify_time=excluded.modify_time, "
        "sample_rate=excluded.sample_rate, compute_status=0",
        (file_name, modify_time, sample_rate),
    )
    conn.commit()

# -----------------------------
# SQLite 工具函数
# -----------------------------

def ensure_table_for_acq(conn: sqlite3.Connection, table_name: str, channel_names):
    """若表不存在则创建。

    列包含 sample_idx、所有通道信号，以及 level1~level4（对应最多四级文件夹名称）
    再加上 modify_time、upload_time。"""
    cols_sql = ["sample_idx INTEGER"]
    # 通道列
    for ch in channel_names:
        cols_sql.append(f'"{ch}" REAL')
    # 文件夹层级列（仅保留 level1~level3）
    cols_sql.extend([f"level{i} TEXT" for i in range(1, 4)])
    # 元数据列
    cols_sql.extend([
        "modify_time TEXT",
        "upload_time TEXT",
    ])
    ddl = f'CREATE TABLE IF NOT EXISTS "{table_name}" ({", ".join(cols_sql)}, PRIMARY KEY(sample_idx))'
    conn.execute(ddl)
    conn.commit()


def insert_signal_rows(conn: sqlite3.Connection, table_name: str, rows):
    """批量插入 rows (list[tuple])"""
    placeholders = ", ".join(["?"] * len(rows[0]))
    sql = f'INSERT OR REPLACE INTO "{table_name}" VALUES ({placeholders})'
    conn.executemany(sql, rows)
    conn.commit()


# -----------------------------
# 主处理逻辑
# -----------------------------

def extract_person_name(base: str) -> str:
    """从文件基础名中提取中文姓名，若无中文则返回原字符串。"""
    chinese_parts = re.findall(r"[\u4e00-\u9fa5]+", base)
    return "".join(chinese_parts) if chinese_parts else base

def process_acq_to_db(acq_path: str, folder_levels: list[str], conn: sqlite3.Connection, *, verbose: bool = True):
    try:
        file_name = os.path.basename(acq_path)
        # 以相对路径作为 meta 键，确保同名不同层级可分别存储
        rel_path_key = os.path.relpath(acq_path, PREPROCESS_DIR)
        modify_time = datetime.fromtimestamp(os.path.getmtime(acq_path)).isoformat(sep=" ", timespec="seconds")

        # ---- 重复检测 ----
        stored_mtime = get_stored_mtime(conn, rel_path_key)
        if stored_mtime is not None:
            if stored_mtime == modify_time:
                if verbose:
                    print(f"[跳过] 文件未更新（{rel_path_key}）")
                return "skipped", rel_path_key
            else:
                ans = input(f"[提示] 检测到同名文件但修改时间不同，是否覆盖已存数据？(y/N)：").strip().lower()
                if ans != 'y':
                    if verbose:
                        print("  已跳过该文件。")
                    return "skipped", rel_path_key
                # 覆盖：先删除旧表
                table_to_drop = extract_person_name(os.path.splitext(file_name)[0])
                conn.execute(f'DROP TABLE IF EXISTS "{table_to_drop}"')
                conn.commit()

        # 读取数据（bioread）
        data = bioread.read_file(acq_path)
        # 采样率（假设所有通道一致，取第一通道）
        sample_rate = getattr(data, "sample_rate", None) or data.channels[0].samples_per_second
        channel_names = [ch.name for ch in data.channels]
        # 构建表名 = 姓名_阶段-任务-组别 (缺失部分自动忽略)
        person = extract_person_name(os.path.splitext(file_name)[0])
        stage_key_parts = folder_levels[:3] + [None] * 3  # 保证至少3个元素
        stage_key_parts = [p for p in stage_key_parts[:3] if p]  # 移除 None / 空
        stage_key = "-".join(stage_key_parts) if stage_key_parts else "unknown"
        table_name = f"{person}_{stage_key}"

        # 创建表（如不存在）
        ensure_table_for_acq(conn, table_name, channel_names)

        # 若旧表结构仍包含 tag 而无 level1~level4，则删除并重建
        cur = conn.execute(f'PRAGMA table_info("{table_name}")')
        existing_cols = [r[1] for r in cur.fetchall()]
        if "level1" not in existing_cols:
            if verbose:
                print(f"[提示] 表 '{table_name}' 结构旧版，自动重建。")
            conn.execute(f'DROP TABLE IF EXISTS "{table_name}"')
            conn.commit()
            ensure_table_for_acq(conn, table_name, channel_names)

        # 获取已有最大 sample_idx
        cur = conn.execute(f'SELECT MAX(sample_idx) FROM "{table_name}"')
        res = cur.fetchone()[0]
        existing_max = res if res is not None else -1

        n_samples = len(data.channels[0].data)
        if existing_max + 1 >= n_samples:
            if verbose:
                print(f"[跳过] 无新增数据（已有 {existing_max+1} 行，文件 {n_samples} 行），采样率 {sample_rate} Hz")
            upsert_meta(conn, rel_path_key, modify_time, sample_rate)
            return "skipped", rel_path_key

        upload_time = datetime.now().isoformat(sep=" ", timespec="seconds")

        start_idx = existing_max + 1
        rows_buffer = []
        inserted = 0
        for i in range(start_idx, n_samples):
            row = [i]
            for ch in data.channels:
                row.append(float(ch.data[i]))
            # 填充三级文件夹名称（不足补 None，多余截断）
            levels = (folder_levels + [None] * 3)[:3]
            row.extend(levels)
            row.extend([modify_time, upload_time])
            rows_buffer.append(tuple(row))
            if len(rows_buffer) >= BATCH:
                insert_signal_rows(conn, table_name, rows_buffer)
                inserted += len(rows_buffer)
                rows_buffer.clear()
        if rows_buffer:
            insert_signal_rows(conn, table_name, rows_buffer)
            inserted += len(rows_buffer)

        if verbose:
            print(f"[追加] {file_name}: 新增 {inserted} 行 (总 {n_samples}) -> 表 '{table_name}', 采样率 {sample_rate} Hz")

        # 更新 meta
        upsert_meta(conn, rel_path_key, modify_time, sample_rate)

        return "added", rel_path_key

    except Exception as exc:
        if verbose:
            print(f"[错误] 处理 {acq_path} 失败: {exc}")
        traceback.print_exc()
        return "failed", acq_path


def main():
    print("\n=== ACQ→SQLite 增量转储程序 ===")
    print(f"数据库文件: {DB_PATH}\n")

    conn = sqlite3.connect(DB_PATH)
    ensure_meta_table(conn)

    try:
        if not os.path.isdir(PREPROCESS_DIR):
            print(f"[错误] 未找到预处理目录: {PREPROCESS_DIR}")
            return

        print(f"\n--- 遍历预处理目录 '{PREPROCESS_DIR}' ---")
        total_files = 0
        added = 0
        skipped = 0
        failed = []
        name_counter = defaultdict(int)
        for root, _, files in os.walk(PREPROCESS_DIR):
            for fname in files:
                if not fname.lower().endswith('.acq'):
                    continue

                fpath = os.path.join(root, fname)
                # 计算从 PREPROCESS_DIR 开始的相对路径并拆分为文件夹层级
                rel_dir = os.path.relpath(root, PREPROCESS_DIR)
                if rel_dir == '.':
                    levels = []
                else:
                    levels = rel_dir.split(os.sep)

                # 统计中文姓名出现次数（判断同名文件数量）
                cname = extract_person_name(os.path.splitext(fname)[0])
                name_counter[cname] += 1

                # 调用处理函数（禁用 verbose 以屏蔽逐文件输出）
                status, key = process_acq_to_db(fpath, levels, conn, verbose=False)
                total_files += 1
                if status == "added":
                    added += 1
                elif status == "skipped":
                    skipped += 1
                elif status == "failed":
                    failed.append(key)
    finally:
        conn.close()

        success = added + skipped
        print("\n===== 处理汇总 =====")
        print(f"共检测到 {total_files} 个 .acq 文件")
        print(f"成功处理 {success} 个，其中新增 {added} 个，跳过 {skipped} 个")
        print(f"失败 {len(failed)} 个")
        if failed:
            print("失败文件列表：")
            for fp in failed:
                print(f"  - {fp}")
        print("====================\n")

        if name_counter:
            duplicate_names = {n:c for n,c in name_counter.items() if c > 1}
            if duplicate_names:
                dup_files = sum(c for c in duplicate_names.values())
                print(f"其中 {len(duplicate_names)} 个中文姓名出现重复，共 {dup_files} 份同名文件：")
                for n, c in duplicate_names.items():
                    print(f"  - {n}: {c} 份")
            else:
                print("无同名文件。")

# ---- 主入口 ----
if __name__ == "__main__":
    main()

# === 以下函数/工具定义完毕后再执行脚本入口 ===

# RR 清洗工具
def _adjust_rr_intervals(rr: np.ndarray) -> np.ndarray:
    MIN_RR_SEC = 0.4
    rr = rr[rr >= MIN_RR_SEC]
    # 针对 0.4–0.6 s 合并相邻 RR
    small = rr[rr < 0.6]
    rr = rr[rr >= 0.6]
    merged = []
    i = 0
    while i < len(small):
        if i + 1 < len(small):
            merged.append(small[i] + small[i + 1])
            i += 2
        else:
            merged.append(0.6)
            i += 1
    rr = np.concatenate([rr, np.array(merged)]) if merged else rr

    # 拆分过长 RR (>1.2s)
    fixed = []
    for r in rr:
        if r < 1.2:
            fixed.append(r)
        elif r < 2.4:
            fixed.append(r / 2)
        elif r < 3.6:
            fixed.append(r / 3)
        elif r < 4.8:
            fixed.append(r / 4)
        else:
            fixed.append(r / 5)
    return np.array(fixed)


# 简易信号过滤器
from scipy.signal import butter, filtfilt, iirnotch

def _butter_bandpass(data: np.ndarray, low: float, high: float, fs: float, order: int = 4):
    ny = 0.5 * fs
    b, a = butter(order, [low / ny, high / ny], btype="band")
    return filtfilt(b, a, data)

def _butter_lowpass(data: np.ndarray, cutoff: float, fs: float, order: int = 4):
    ny = 0.5 * fs
    b, a = butter(order, cutoff / ny, btype="lowpass")
    return filtfilt(b, a, data)

def _butter_highpass(data: np.ndarray, cutoff: float, fs: float, order: int = 4):
    ny = 0.5 * fs
    b, a = butter(order, cutoff / ny, btype="highpass")
    return filtfilt(b, a, data)

def _notch_filter(data: np.ndarray, freq: float, fs: float, q: int = 30):
    ny = 0.5 * fs
    b, a = iirnotch(freq / ny, q)
    return filtfilt(b, a, data)

def _preprocess_ecg(raw: np.ndarray, fs: float) -> np.ndarray:
    hp = _butter_highpass(raw, 0.5, fs)
    lp = _butter_lowpass(hp, 40, fs)
    return _notch_filter(lp, 50, fs)

def _preprocess_eda(raw: np.ndarray, fs: float, lowpass_hz: float = 5.0, resample_hz: int = 10):
    lp = _butter_lowpass(raw, lowpass_hz, fs)
    factor = int(round(fs / resample_hz)) if fs > resample_hz else 1
    if factor > 1:
        lp = lp[::factor]
    return lp, resample_hz if factor > 1 else fs


SCR_THRESHOLD = 0.05  # μS

def compute_core_metrics(df, fs: float):
    """返回核心指标 dict，失败返回 None"""
    # 找数值列
    num_cols = [c for c in df.columns if df[c].dtype != object and c not in ("sample_idx", "modify_time", "upload_time", "level1", "level2", "level3", "level4")]
    if len(num_cols) < 2:
        return None
    # 简单匹配通道
    ecg_col = next((c for c in num_cols if "ecg" in c.lower() or "ekg" in c.lower()), num_cols[0])
    eda_col = next((c for c in num_cols if "eda" in c.lower() or "gsr" in c.lower()), num_cols[1])

    ecg_mv = df[ecg_col].values / 1000.0  # 转 mV
    eda = df[eda_col].values

    ecg_filt = _preprocess_ecg(ecg_mv, fs)
    try:
        _, info = nk.ecg_process(ecg_filt, sampling_rate=fs)
        r_peaks = info.get("ECG_R_Peaks", [])
        if len(r_peaks) < 3:
            return None
        raw_rr = np.diff(r_peaks) / fs
        adj_rr = _adjust_rr_intervals(raw_rr)
        if len(adj_rr) == 0:
            return None
        nn_ms = adj_rr * 1000.0
        rri_dict = {"RRI": nn_ms}
        metrics = {}
        metrics.update(nk.hrv_time(rri_dict, show=False).iloc[0].to_dict())
        metrics.update(nk.hrv_frequency(rri_dict, show=False).iloc[0].to_dict())
        metrics["Mean_HR_bpm"] = 60.0 / np.mean(adj_rr)
        mean_rr = metrics.get("HRV_MeanNN", np.nan)
        mean_hr = metrics.get("Mean_HR_bpm", np.nan)
        lf_hf = metrics.get("HRV_LFHF", np.nan)
    except Exception:
        return None

    # EDA
    eda_proc, new_fs = _preprocess_eda(eda, fs)
    try:
        import contextlib, io
        with contextlib.redirect_stdout(io.StringIO()):
            phasic, _, tonic, *_ = cvxEDA(eda_proc, 1.0 / new_fs)
    except Exception:
        phasic, _, tonic, *_ = cvxEDA(eda_proc, 1.0 / new_fs)

    scl_mean = float(np.mean(tonic))
    scl_slope = (tonic[-1] - tonic[0]) / (len(tonic) / new_fs) if len(tonic) > 0 else 0.0
    peaks = nk.signal_findpeaks(phasic, height_min=SCR_THRESHOLD)
    scr_freq = len(peaks["Peaks"]) / (len(phasic) / new_fs / 60.0) if len(phasic) > 0 else 0.0

    return {
        "mean_rr": float(mean_rr),
        "mean_hr": float(mean_hr),
        "lf_hf": float(lf_hf),
        "scl_mean": scl_mean,
        "scl_slope": scl_slope,
        "scr_freq": float(scr_freq),
    } 