# -*- coding: utf-8 -*-
"""
ECG & EDA 数据处理核心模块

基于 neurokit2 实现 ECG/EDA 指标计算。
优化版本：30秒分段，3分30秒最大时长，多线程处理
"""

import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)

import csv
import datetime
import os
import re
import sqlite3
import threading
import multiprocessing
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
from typing import Tuple, Optional, List
import numpy as np
import pandas as pd
import neurokit2 as nk
from cvxEDA import cvxEDA
import queue
import time
import psutil

# 全局常量
ECG_UNIT_DIVISOR = 1000.0
MIN_RR_SEC = 0.4
SCR_THRESHOLD = 0.01
DB_PATH = os.path.join(os.path.dirname(__file__), "physio_data.db")
META_TABLE = "acq_meta"
DEFAULT_SAVE_ENCODING = "utf-8-sig"

# 新增：30秒分段和时长限制常量
SEGMENT_DURATION_SEC = 30  # 改为30秒分段
MAX_DURATION_SEC = 210     # 3分30秒 = 210秒
MAX_SEGMENTS = MAX_DURATION_SEC // SEGMENT_DURATION_SEC  # 7个30秒段

# 多线程配置：基于i9-14900的24线程和60GB内存优化
CPU_COUNT = multiprocessing.cpu_count()  # 获取实际CPU核心数
MAX_WORKERS = min(CPU_COUNT, 20)  # 限制最大工作线程数，避免过度并发
BATCH_SIZE = 50  # 批量处理文件数量
DB_WRITE_BATCH_SIZE = 100  # 数据库批量写入大小


# =============================================================================
# 基础工具函数
# =============================================================================

def adjust_rr_intervals(rr_intervals: np.ndarray) -> np.ndarray:
    """对 RR 间期进行特殊合并/拆分处理，以应对心率过快/过慢情况。"""
    if len(rr_intervals) == 0:
        return np.array([])
    adjusted = []
    for rr in rr_intervals:
        if 0.4 <= rr < 0.6:
            # 心率过快：合并相邻两个
            adjusted.append(rr * 2)
        elif rr >= 1.2:
            # 心率过慢：拆分为两个
            adjusted.extend([rr / 2, rr / 2])
        else:
            adjusted.append(rr)
    return np.array(adjusted)


def butter_filter(data: np.ndarray, lowcut: float, highcut: float, fs: float, *, order: int = 4, btype: str = "band") -> np.ndarray:
    """巴特沃斯滤波器（包装 neurokit2）。"""
    try:
        return nk.signal_filter(data, sampling_rate=fs, lowcut=lowcut, highcut=highcut, order=order, method=btype)
    except Exception:
        return data


def notch_filter(data: np.ndarray, fs: float, *, freq: float = 50.0, Q: float = 30.0) -> np.ndarray:
    """陷波滤波器（包装 neurokit2）。"""
    try:
        return nk.signal_filter(data, sampling_rate=fs, powerline=Q)
    except Exception:
        return data


# =============================================================================
# 文件/通道函数
# =============================================================================

def find_ecg_eda_indices(obj_with_channels) -> Tuple[Optional[int], Optional[int]]:
    """根据通道名识别 ECG & EDA 通道索引。"""
    ecg_idx, eda_idx = None, None
    for i, ch in enumerate(obj_with_channels.channels):
        name_lower = ch.name.lower()
        if ecg_idx is None and ("ecg" in name_lower or "ekg" in name_lower):
            ecg_idx = i
        elif eda_idx is None and ("eda" in name_lower or "gsr" in name_lower):
            eda_idx = i
        if ecg_idx is not None and eda_idx is not None:
            break
    return ecg_idx, eda_idx


# =============================================================================
# 数据库函数
# =============================================================================

def list_tables(conn: sqlite3.Connection) -> List[str]:
    """列出所有用户表名。"""
    cur = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'")
    return [r[0] for r in cur.fetchall()]


def match_tables(pattern: str, table_list: List[str]) -> List[str]:
    """根据关键字匹配表名。"""
    pattern_lower = pattern.lower()
    return [t for t in table_list if pattern_lower in t.lower()]


def load_table(conn: sqlite3.Connection, table_name: str) -> pd.DataFrame:
    """读取表为 DataFrame。"""
    return pd.read_sql_query(f"SELECT * FROM `{table_name}`", conn)


# =============================================================================
# ECG 处理函数
# =============================================================================

def preprocess_ecg(raw_ecg: np.ndarray, fs: float) -> np.ndarray:
    """ECG 带通滤波 (0.5-40Hz) + 陷波滤波 (50Hz)。"""
    # 0. 带通滤波 (0.5-40Hz)
    filtered = butter_filter(raw_ecg, 0.5, 40.0, fs, btype="band")
    # 1. 陷波滤波 (50Hz)
    filtered = notch_filter(filtered, fs, freq=50.0)
    return filtered


def extract_ecg_metrics(ecg_filtered: np.ndarray, fs: float) -> Optional[dict]:
    """提取 ECG 指标：R波检测 + HRV 分析。"""
    try:
        # R波检测
        _, info = nk.ecg_process(ecg_filtered, sampling_rate=fs)
        r_peaks = info.get("ECG_R_Peaks", [])
        if len(r_peaks) < 3:
            return None
        # RR间期
        raw_rr = np.diff(r_peaks) / fs
        # 特殊合并/拆分处理
        adj_rr = adjust_rr_intervals(raw_rr)
        if len(adj_rr) == 0:
            return None
        nn_ms = adj_rr * 1000.0
        rri_dict = {"RRI": nn_ms}
        # HRV各项指标
        metrics_time = nk.hrv_time(rri_dict, show=False).iloc[0].to_dict()
        metrics_freq = nk.hrv_frequency(rri_dict, show=False).iloc[0].to_dict()
        metrics_non = nk.hrv_nonlinear(rri_dict, show=False).iloc[0].to_dict()
        # 合并
        metrics = {**metrics_time, **metrics_freq, **metrics_non}
        # 方便访问的核心值
        metrics["Mean_HR_bpm"] = 60.0 / np.mean(adj_rr)
        return metrics
    except Exception as exc:
        print(f"[错误] ECG 指标计算失败：{exc}")
        return None


# =============================================================================
# EDA 处理函数
# =============================================================================

def preprocess_eda(raw_eda: np.ndarray, fs: float, *, lowpass_hz: float = 5.0, resample_hz: int = 10) -> Tuple[np.ndarray, int]:
    """EDA 低通滤波 + 下采样。返回 (processed_signal, new_fs)。"""
    filtered = butter_filter(raw_eda, 0, lowpass_hz, fs, btype="lowpass")
    resampled = nk.signal_resample(filtered, sampling_rate=fs, desired_length=int(len(filtered) * resample_hz / fs))
    return resampled, resample_hz


def extract_eda_features(eda_processed: np.ndarray, fs: int) -> Tuple[float, float]:
    """分解 EDA，计算 SCL 均值与 SCR 频率。"""
    scl_mean, scr_freq = 0.0, 0.0
    try:
        phasic, _, tonic, *_ = cvxEDA(eda_processed, 1.0 / fs)
        scl_mean = float(np.mean(tonic))
        total_min = len(phasic) / fs / 60.0
        peaks = nk.signal_findpeaks(phasic, height_min=SCR_THRESHOLD)
        scr_freq = len(peaks["Peaks"]) / total_min if total_min > 0 else 0.0
    except Exception as exc:
        print(f"[错误] EDA 特征提取失败：{exc}")
    return scl_mean, scr_freq


# =============================================================================
# ACQ 文件处理函数
# =============================================================================

def load_acq_file(file_path: str) -> Tuple[np.ndarray, np.ndarray, float]:
    """加载 ACQ 文件，返回 ECG 数据、EDA 数据和采样率。"""
    try:
        import bioread
        data = bioread.read(file_path)
        
        # 查找 ECG 和 EDA 通道
        ecg_idx, eda_idx = find_ecg_eda_indices(data)
        if ecg_idx is None or eda_idx is None:
            raise ValueError("未找到有效的 ECG 或 EDA 通道")
        
        # 提取数据
        ecg_data = data.channels[ecg_idx].data
        eda_data = data.channels[eda_idx].data
        fs = data.channels[ecg_idx].samples_per_second
        
        return ecg_data, eda_data, fs
    except ImportError:
        raise ImportError("需要安装 bioread 库来读取 ACQ 文件")
    except Exception as e:
        raise ValueError(f"加载 ACQ 文件失败: {e}")


# =============================================================================
# 多线程安全的数据库操作类
# =============================================================================

class ThreadSafeDBManager:
    """线程安全的数据库管理器"""
    def __init__(self, db_path):
        self.db_path = db_path
        self._lock = threading.Lock()
        self._write_queue = queue.Queue()
        self._stop_event = threading.Event()
        self._writer_thread = None
        self.start_writer_thread()
    
    def start_writer_thread(self):
        """启动数据库写入线程"""
        self._writer_thread = threading.Thread(target=self._db_writer_worker, daemon=True)
        self._writer_thread.start()
    
    def _db_writer_worker(self):
        """数据库写入工作线程"""
        batch_data = []
        while not self._stop_event.is_set():
            try:
                # 等待数据或超时
                data = self._write_queue.get(timeout=1.0)
                if data is None:  # 停止信号
                    break
                batch_data.append(data)
                
                # 批量写入
                if len(batch_data) >= DB_WRITE_BATCH_SIZE:
                    self._batch_write(batch_data)
                    batch_data = []
                    
            except queue.Empty:
                # 超时，写入剩余数据
                if batch_data:
                    self._batch_write(batch_data)
                    batch_data = []
                continue
        
        # 写入剩余数据
        if batch_data:
            self._batch_write(batch_data)
    
    def _batch_write(self, batch_data):
        """批量写入数据库"""
        try:
            with sqlite3.connect(self.db_path) as conn:
                for sql, data in batch_data:
                    conn.execute(sql, data)
                conn.commit()
        except Exception as e:
            print(f"[错误] 批量写入数据库失败: {e}")
    
    def queue_write(self, sql, data):
        """将写入操作加入队列"""
        self._write_queue.put((sql, data))
    
    def stop(self):
        """停止写入线程"""
        self._stop_event.set()
        self._write_queue.put(None)  # 发送停止信号
        if self._writer_thread:
            self._writer_thread.join()


# =============================================================================
# 时间动态分析类（优化版）
# =============================================================================

class TimeDynamicAnalyzer:
    def __init__(self, base_path, db_path):
        """初始化分析器（优化版）"""
        self.base_path = base_path
        self.db_path = db_path
        self.setup_database()
        
        # 初始化线程安全的数据库管理器
        self.db_manager = ThreadSafeDBManager(self.db_path)
    
    def __del__(self):
        """析构函数，确保数据库管理器正确关闭"""
        if hasattr(self, 'db_manager'):
            self.db_manager.stop()
    
    def cleanup(self):
        """手动清理资源"""
        if hasattr(self, 'db_manager'):
            self.db_manager.stop()
            print("[信息] 数据库管理器已关闭")
    
    def setup_database(self):
        """初始化数据库表结构（更新为30秒分段）"""
        # 创建time_dynamic_analysis表（更新字段名和约束）
        create_time_dynamic_table = """
        CREATE TABLE IF NOT EXISTS time_dynamic_analysis (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            stage_id INTEGER NOT NULL,
            group_type TEXT NOT NULL,
            person_name TEXT NOT NULL,
            segment_index INTEGER NOT NULL,  -- 改为segment_index（30秒段索引）
            timestamp_start REAL,
            timestamp_end REAL,
            segment_duration_sec REAL DEFAULT 30,  -- 新增：段时长（秒），支持小数
            
            -- ECG指标
            mhr_bpm REAL,
            rr_intervals_count INTEGER,
            rr_intervals_mean REAL,
            rr_intervals_std REAL,
            hrv_lf REAL,
            hrv_hf REAL,
            hrv_lf_hf REAL,
            
            -- EDA指标
            scl_mean_uS REAL,
            scl_std_uS REAL,
            scl_slope_uS_per_sec REAL,
            scr_count INTEGER,
            scr_frequency_per_min REAL,
            scr_mean_amplitude_uS REAL,
            
            -- 数据质量标记
            data_quality_score REAL,
            signal_quality_ecg TEXT,
            signal_quality_eda TEXT,
            
            -- 元数据
            file_path TEXT,
            sample_rate INTEGER,
            processing_time TEXT,
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
            
            -- 唯一约束：确保同一人、同一阶段、同一段、同一文件的记录只保留一条
            UNIQUE(stage_id, group_type, person_name, segment_index, file_path)
        );
        """
        
        # 创建group_statistics表（更新为30秒分段）
        create_group_statistics_table = """
        CREATE TABLE IF NOT EXISTS group_statistics (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            stage_id INTEGER NOT NULL,
            group_type TEXT NOT NULL,
            segment_index INTEGER NOT NULL,  -- 改为segment_index（30秒段索引）
            
            -- 群体统计量
            mhr_bpm_mean REAL,
            mhr_bpm_std REAL,
            hrv_lf_hf_mean REAL,
            hrv_lf_hf_std REAL,
            scl_mean_uS_mean REAL,
            scl_mean_uS_std REAL,
            scr_frequency_per_min_mean REAL,
            scr_frequency_per_min_std REAL,
            
            -- 样本信息
            participant_count INTEGER,
            data_quality_mean REAL,
            
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
            
            -- 唯一约束：确保同一阶段、同一组别、同一段的统计数据只保留一条
            UNIQUE(stage_id, group_type, segment_index)
        );
        """
        
        # 创建processing_metadata表
        create_processing_metadata_table = """
        CREATE TABLE IF NOT EXISTS processing_metadata (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            stage_id INTEGER NOT NULL,
            group_type TEXT NOT NULL,
            total_files INTEGER,
            processed_files INTEGER,
            failed_files INTEGER,
            total_minutes INTEGER,
            processing_start_time TEXT,
            processing_end_time TEXT,
            processing_status TEXT,
            error_log TEXT,
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        );
        """
        
        with sqlite3.connect(self.db_path) as conn:
            conn.execute(create_time_dynamic_table)
            conn.execute(create_group_statistics_table)
            conn.execute(create_processing_metadata_table)
            conn.commit()
            # 执行一次历史数据去重，并为已有表建立唯一索引
            self._deduplicate_and_create_unique_index(conn)
            
    def _deduplicate_and_create_unique_index(self, conn: sqlite3.Connection) -> None:
        """对历史数据进行去重，并创建唯一索引确保后续插入不重复。
        规则：同一人、同一阶段、同一段、同一来源文件 只保留一条（保留最新的一条，按id最大）
        """
        try:
            # 检查是否存在旧的minute_index列，如果存在则迁移数据
            cursor = conn.execute("PRAGMA table_info(time_dynamic_analysis)")
            columns = [row[1] for row in cursor.fetchall()]
            
            if 'minute_index' in columns and 'segment_index' not in columns:
                # 添加新列
                conn.execute("ALTER TABLE time_dynamic_analysis ADD COLUMN segment_index INTEGER")
                conn.execute("ALTER TABLE time_dynamic_analysis ADD COLUMN segment_duration_sec INTEGER DEFAULT 30")
                # 迁移数据：minute_index * 2 + 1 = segment_index（因为从60秒改为30秒，且索引从1开始）
                conn.execute("UPDATE time_dynamic_analysis SET segment_index = minute_index * 2 + 1 WHERE segment_index IS NULL")
                conn.commit()
                print("[信息] 已将minute_index迁移为segment_index")
            
            # 先删除重复记录，只保留每组中 id 最大的一条
            delete_sql = (
                """
                DELETE FROM time_dynamic_analysis
                WHERE id NOT IN (
                    SELECT MAX(id) FROM time_dynamic_analysis
                    GROUP BY stage_id, group_type, person_name, segment_index, file_path
                )
                """
            )
            conn.execute(delete_sql)
            # 为表建立唯一索引，确保约束生效
            create_unique_idx = (
                """
                CREATE UNIQUE INDEX IF NOT EXISTS idx_tda_unique
                ON time_dynamic_analysis(stage_id, group_type, person_name, segment_index, file_path)
                """
            )
            conn.execute(create_unique_idx)
            conn.commit()
        except Exception as e:
            print(f"[警告] 去重或创建唯一索引时出错：{e}")
        
    def process_all_stages(self):
        """处理所有阶段数据（多线程优化版）"""
        print(f"[信息] 使用 {MAX_WORKERS} 个工作线程进行并行处理")
        
        # 收集所有待处理的文件任务
        all_tasks = []
        for stage_id in range(5):  # 阶段0-4
            stage_path = os.path.join(self.base_path, f"阶段{stage_id}")
            if not os.path.exists(stage_path):
                print(f"[警告] 阶段路径不存在: {stage_path}")
                continue
                
            for group_type in ["实验组", "对照组"]:
                group_path = os.path.join(stage_path, group_type)
                if not os.path.exists(group_path):
                    print(f"[警告] 组别路径不存在: {group_path}")
                    continue
                
                acq_files = [f for f in os.listdir(group_path) if f.endswith('.acq')]
                print(f"[阶段{stage_id}][{group_type}] 发现 {len(acq_files)} 个ACQ文件")
                
                # 添加到任务列表
                for file_name in acq_files:
                    all_tasks.append((stage_id, group_type, file_name, group_path))
        
        print(f"[信息] 总共发现 {len(all_tasks)} 个文件待处理")
        
        # 使用线程池并行处理文件
        with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
            # 提交所有任务
            future_to_task = {
                executor.submit(self._process_file_wrapper, task): task 
                for task in all_tasks
            }
            
            # 处理完成的任务
            completed = 0
            for future in as_completed(future_to_task):
                task = future_to_task[future]
                try:
                    future.result()
                    completed += 1
                    if completed % 10 == 0:  # 每处理10个文件报告一次进度
                        print(f"[进度] 已完成 {completed}/{len(all_tasks)} 个文件")
                except Exception as e:
                    stage_id, group_type, file_name, group_path = task
                    print(f"[错误] 处理文件失败 {file_name}: {e}")
        
        print(f"[完成] 所有文件处理完成，共处理 {completed} 个文件")
    
    def _process_file_wrapper(self, task):
        """文件处理包装器，用于多线程调用"""
        stage_id, group_type, file_name, group_path = task
        return self.process_file(stage_id, group_type, file_name, group_path)
    
    def process_stage(self, stage_id):
        """处理单个阶段数据（保留兼容性）"""
        stage_path = os.path.join(self.base_path, f"阶段{stage_id}")
        if not os.path.exists(stage_path):
            print(f"[警告] 阶段路径不存在: {stage_path}")
            return
        
        for group_type in ["实验组", "对照组"]:
            self.process_group(stage_id, group_type, stage_path)
    
    def process_group(self, stage_id, group_type, stage_path):
        """处理单个组别数据（保留兼容性）"""
        group_path = os.path.join(stage_path, group_type)
        if not os.path.exists(group_path):
            print(f"[警告] 组别路径不存在: {group_path}")
            return
        
        acq_files = [f for f in os.listdir(group_path) if f.endswith('.acq')]
        print(f"[阶段{stage_id}][{group_type}] 发现 {len(acq_files)} 个ACQ文件")
        
        for file_name in acq_files:
            self.process_file(stage_id, group_type, file_name, group_path)
    
    def process_file(self, stage_id, group_type, file_name, group_path):
        """处理单个文件，进行30秒分段分析（优化版）"""
        file_path = os.path.join(group_path, file_name)
        person_name = self.extract_person_name(file_name)
        
        try:
            # 加载ACQ文件
            ecg_data, eda_data, fs = load_acq_file(file_path)
            
            # 按30秒分割数据（最多3分30秒）
            segments = self.split_data_by_segments(ecg_data, eda_data, fs)
            
            # 处理每个30秒分段
            for segment in segments:
                metrics = self.analyze_segment(
                    segment['ecg_data'], 
                    segment['eda_data'], 
                    fs
                )
                
                if metrics:
                    self.save_segment_metrics(
                        stage_id, group_type, person_name,
                        segment['segment_index'], metrics, 
                        file_path, fs, segment
                    )
                
            print(f"[完成] {file_path} 处理完成，共处理 {len(segments)} 个30秒分段")
        except Exception as e:
            print(f"[错误] 处理文件 {file_path} 失败: {e}")
    
    def extract_person_name(self, file_name):
        """从文件名提取人名"""
        # 移除扩展名
        base_name = os.path.splitext(file_name)[0]
        # 提取中文姓名
        chinese_parts = re.findall(r'[\u4e00-\u9fa5]+', base_name)
        return ''.join(chinese_parts) if chinese_parts else base_name
    
    def split_data_by_segments(self, ecg_data, eda_data, fs):
        """按30秒分割数据，最多保存3分30秒（7个分段）"""
        samples_per_segment = int(fs * SEGMENT_DURATION_SEC)  # 30秒的样本数
        total_samples = min(len(ecg_data), len(eda_data))
        original_duration = total_samples / fs
        
        print(f"[调试] 原始数据时长: {original_duration:.1f}秒 ({total_samples}样本, 采样率{fs}Hz)")
        
        # 限制最大处理时长为3分30秒
        max_samples = int(fs * MAX_DURATION_SEC)
        if total_samples > max_samples:
            print(f"[信息] 数据时长超过{MAX_DURATION_SEC}秒，截取前{MAX_DURATION_SEC}秒数据")
            total_samples = max_samples
            ecg_data = ecg_data[:max_samples]
            eda_data = eda_data[:max_samples]
        else:
            print(f"[信息] 数据时长{original_duration:.1f}秒，未超过{MAX_DURATION_SEC}秒限制")
        
        segments = []
        segment_index = 1  # 从1开始计数
        
        for start_idx in range(0, total_samples, samples_per_segment):
            # 检查是否超过最大分段数
            if segment_index > MAX_SEGMENTS:
                print(f"[信息] 已达到最大分段数{MAX_SEGMENTS}，停止分割")
                break
                
            end_idx = min(start_idx + samples_per_segment, total_samples)
            
            # 处理所有剩余数据，包括不足30秒的最后一段
            # 移除了跳过少于15秒数据段的限制，确保所有数据都被分析
            
            ecg_segment = ecg_data[start_idx:end_idx]
            eda_segment = eda_data[start_idx:end_idx]
            actual_duration = (end_idx - start_idx) / fs
            
            segments.append({
                'segment_index': segment_index,
                'ecg_data': ecg_segment,
                'eda_data': eda_segment,
                'start_time': start_idx / fs,
                'end_time': end_idx / fs,
                'duration': actual_duration,
                'segment_duration_sec': actual_duration  # 实际分段时长
            })
            
            segment_index += 1
        
        print(f"[信息] 成功分割出 {len(segments)} 个30秒分段")
        return segments
    
    def analyze_segment(self, ecg_segment, eda_segment, fs):
        """分析单个30秒数据段"""
        # ECG分析
        ecg_metrics = self.analyze_ecg_segment(ecg_segment, fs)
        # EDA分析
        eda_metrics = self.analyze_eda_segment(eda_segment, fs)
        # 数据质量评估
        quality_metrics = self.assess_data_quality(ecg_segment, eda_segment)

        # 合并并清洗 NaN -> None 以避免 SQLite 插入失败
        metrics = {**ecg_metrics, **eda_metrics, **quality_metrics}
        clean_metrics = {}
        for k, v in metrics.items():
            if isinstance(v, (np.floating, float)) and (np.isnan(v) if isinstance(v, float) or isinstance(v, np.floating) else False):
                clean_metrics[k] = None
            else:
                clean_metrics[k] = v
        return clean_metrics
    
    def analyze_ecg_segment(self, ecg_segment, fs):
        """分析单个30秒ECG数据段"""
        # 信号滤波
        filtered_ecg = self.filter_ecg(ecg_segment, fs)
        
        # R峰检测
        r_peaks = self.detect_r_peaks(filtered_ecg, fs)
        
        # RR间期计算
        rr_intervals = self.calculate_rr_intervals(r_peaks, fs)
        
        # HRV分析
        hrv_metrics = self.calculate_hrv_metrics(rr_intervals)
        
        # 心率计算
        mhr_bpm = self.calculate_mhr(rr_intervals)
        
        return {
            'mhr_bpm': mhr_bpm,
            'rr_intervals_count': len(rr_intervals),
            'rr_intervals_mean': np.mean(rr_intervals) if len(rr_intervals) > 0 else None,
            'rr_intervals_std': np.std(rr_intervals) if len(rr_intervals) > 0 else None,
            **hrv_metrics
        }
    
    def analyze_eda_segment(self, eda_segment, fs):
        """分析单个30秒EDA数据段"""
        # 信号预处理
        processed_eda = self.preprocess_eda(eda_segment, fs)
        
        # cvxEDA分解
        phasic, _, tonic, _, _, _, _ = self.cvxeda_decomposition(processed_eda, fs)
        
        # SCL计算
        scl_metrics = self.calculate_scl_metrics(tonic, fs)
        
        # SCR计算
        scr_metrics = self.calculate_scr_metrics(phasic, fs)
        
        return {**scl_metrics, **scr_metrics}
    
    def filter_ecg(self, ecg_data, fs):
        """ECG信号滤波"""
        return preprocess_ecg(ecg_data, fs)
    
    def detect_r_peaks(self, ecg_filtered, fs):
        """R峰检测"""
        try:
            _, info = nk.ecg_process(ecg_filtered, sampling_rate=fs)
            return info.get("ECG_R_Peaks", [])
        except Exception:
            return []
    
    def calculate_rr_intervals(self, r_peaks, fs):
        """计算RR间期"""
        if len(r_peaks) < 2:
            return np.array([])
        return np.diff(r_peaks) / fs * 1000.0  # 转换为毫秒
    
    def calculate_mhr(self, rr_intervals):
        """计算平均心率
        MHR = 60 / (1/n * Σ(RR_i))
        """
        if len(rr_intervals) == 0:
            return None
        
        mean_rr_seconds = np.mean(rr_intervals) / 1000.0  # 转换为秒
        mhr_bpm = 60.0 / mean_rr_seconds
        
        return mhr_bpm
    
    def calculate_hrv_metrics(self, rr_intervals):
        """计算HRV频域指标
        LF/HF = ∫(0.04-0.15Hz) P(f)df / ∫(0.15-0.4Hz) P(f)df
        """
        if rr_intervals is None or len(rr_intervals) < 10:  # 更保守的阈值
            return {'hrv_lf': None, 'hrv_hf': None, 'hrv_lf_hf': None}
        rr_ms = np.array(rr_intervals, dtype=float)
        if np.any(~np.isfinite(rr_ms)):
            return {'hrv_lf': None, 'hrv_hf': None, 'hrv_lf_hf': None}

        # 重采样到等间隔
        time_axis = np.cumsum(rr_ms) / 1000.0  # 转换为秒
        if time_axis[-1] <= 1e-6:
            return {'hrv_lf': None, 'hrv_hf': None, 'hrv_lf_hf': None}
        fs_resample = 4
        new_time_axis = np.arange(0, time_axis[-1], 1.0/fs_resample)
        if len(new_time_axis) < 16:
            return {'hrv_lf': None, 'hrv_hf': None, 'hrv_lf_hf': None}
        rr_interpolated = np.interp(new_time_axis, time_axis, rr_ms)

        # 计算功率谱密度
        from scipy import signal
        nperseg = min(256, len(rr_interpolated))
        if nperseg < 16:
            return {'hrv_lf': None, 'hrv_hf': None, 'hrv_lf_hf': None}
        freqs, psd = signal.welch(rr_interpolated, fs=fs_resample, nperseg=nperseg)

        # 计算LF和HF功率，使用 numpy.trapezoid 替代已弃用的 trapz
        lf_mask = (freqs >= 0.04) & (freqs <= 0.15)
        hf_mask = (freqs >= 0.15) & (freqs <= 0.4)
        lf_power = float(np.trapezoid(psd[lf_mask], freqs[lf_mask])) if np.any(lf_mask) else 0.0
        hf_power = float(np.trapezoid(psd[hf_mask], freqs[hf_mask])) if np.any(hf_mask) else 0.0
        lf_hf_ratio = (lf_power / hf_power) if hf_power > 0 else None
        return {'hrv_lf': lf_power, 'hrv_hf': hf_power, 'hrv_lf_hf': lf_hf_ratio}
    
    def preprocess_eda(self, eda_data, fs):
        """EDA信号预处理"""
        processed, _ = preprocess_eda(eda_data, fs)
        return processed
    
    def cvxeda_decomposition(self, eda_processed, fs):
        """cvxEDA分解"""
        try:
            # cvxEDA返回7个值: (r, p, t, l, d, e, obj)
            r, p, t, l, d, e, obj = cvxEDA(eda_processed, 1.0 / fs)
            return r, p, t, l, d, e, obj
        except Exception:
            # 异常时返回7个空数组
            return np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([])
    
    def calculate_scl_metrics(self, tonic_signal, fs):
        """计算SCL指标
        SCL = 1/m * Σ(EDA_j^slow)
        """
        if len(tonic_signal) == 0:
            return {'scl_mean_uS': None, 'scl_std_uS': None, 'scl_slope_uS_per_sec': None}
        
        scl_mean = np.mean(tonic_signal)
        scl_std = np.std(tonic_signal)
        
        # 计算斜率
        duration_seconds = len(tonic_signal) / fs
        if duration_seconds > 0:
            scl_slope = (tonic_signal[-1] - tonic_signal[0]) / duration_seconds
        else:
            scl_slope = 0
        
        return {
            'scl_mean_uS': scl_mean,
            'scl_std_uS': scl_std,
            'scl_slope_uS_per_sec': scl_slope
        }
    
    def calculate_scr_metrics(self, phasic_signal, fs):
        """计算SCR指标"""
        if len(phasic_signal) == 0:
            return {'scr_count': 0, 'scr_frequency_per_min': 0, 'scr_mean_amplitude_uS': 0}
        
        # SCR峰值检测
        peaks = nk.signal_findpeaks(phasic_signal, height_min=SCR_THRESHOLD)
        scr_count = len(peaks["Peaks"])
        
        # 计算SCR频率
        duration_minutes = len(phasic_signal) / fs / 60.0
        scr_frequency = scr_count / duration_minutes if duration_minutes > 0 else 0
        
        # 计算SCR平均幅值
        scr_amplitudes = peaks.get("Peaks_Height", [])
        scr_mean_amplitude = np.mean(scr_amplitudes) if scr_amplitudes else 0
        
        return {
            'scr_count': scr_count,
            'scr_frequency_per_min': scr_frequency,
            'scr_mean_amplitude_uS': scr_mean_amplitude
        }
    
    def assess_data_quality(self, ecg_segment, eda_segment):
        """数据质量评估"""
        # 简单的质量评估，可以根据需要扩展
        ecg_quality = "good" if len(ecg_segment) > 0 else "poor"
        eda_quality = "good" if len(eda_segment) > 0 else "poor"
        
        return {
            'data_quality_score': 1.0 if ecg_quality == "good" and eda_quality == "good" else 0.5,
            'signal_quality_ecg': ecg_quality,
            'signal_quality_eda': eda_quality
        }
    
    def save_segment_metrics(self, stage_id, group_type, person_name, segment_idx, metrics, file_path, fs, segment_info):
        """保存30秒分段指标到数据库（线程安全版）"""
        # 获取当前时间作为处理时间
        processing_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        
        # 计算该分段的起止时间戳（以秒为单位）
        try:
            timestamp_start = float(segment_info.get('start_time', segment_idx * SEGMENT_DURATION_SEC))
            timestamp_end = float(segment_info.get('end_time', (segment_idx + 1) * SEGMENT_DURATION_SEC))
        except Exception:
            timestamp_start, timestamp_end = None, None

        # 构建 UPSERT 语句（冲突时替换旧记录）
        insert_sql = """
        INSERT OR REPLACE INTO time_dynamic_analysis (
            stage_id, group_type, person_name, segment_index,
            timestamp_start, timestamp_end, segment_duration_sec,
            mhr_bpm, rr_intervals_count, rr_intervals_mean, rr_intervals_std,
            hrv_lf, hrv_hf, hrv_lf_hf,
            scl_mean_uS, scl_std_uS, scl_slope_uS_per_sec,
            scr_count, scr_frequency_per_min, scr_mean_amplitude_uS,
            data_quality_score, signal_quality_ecg, signal_quality_eda,
            file_path, sample_rate, processing_time
        ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
        """

        # 准备数据（保证顺序与列一致）
        data = (
            stage_id, group_type, person_name, segment_idx,
            timestamp_start, timestamp_end, segment_info.get('segment_duration_sec', SEGMENT_DURATION_SEC),
            metrics.get('mhr_bpm'),
            metrics.get('rr_intervals_count'),
            metrics.get('rr_intervals_mean'),
            metrics.get('rr_intervals_std'),
            metrics.get('hrv_lf'),
            metrics.get('hrv_hf'),
            metrics.get('hrv_lf_hf'),
            metrics.get('scl_mean_uS'),
            metrics.get('scl_std_uS'),
            metrics.get('scl_slope_uS_per_sec'),
            metrics.get('scr_count'),
            metrics.get('scr_frequency_per_min'),
            metrics.get('scr_mean_amplitude_uS'),
            metrics.get('data_quality_score'),
            metrics.get('signal_quality_ecg'),
            metrics.get('signal_quality_eda'),
            file_path,
            int(fs) if fs is not None else None,
            processing_time
        )

        # 将 NaN 转为 None，避免 SQLite 错误
        data = tuple(None if (isinstance(x, float) and (np.isnan(x))) else x for x in data)

        # 使用线程安全的数据库管理器
        self.db_manager.queue_write(insert_sql, data)
    
    def generate_group_statistics(self):
        """生成群体统计数据（含SCL值筛选和替换）"""
        # 查询所有数据
        query = "SELECT * FROM time_dynamic_analysis"
        try:
            with sqlite3.connect(self.db_path) as conn:
                df = pd.read_sql_query(query, conn)
            
            # SCL值筛选和替换：将SCL>20的值统一替换为15
            original_count = len(df)
            scl_over_20 = df['scl_mean_uS'] > 20
            df.loc[scl_over_20, 'scl_mean_uS'] = 15.0
            replaced_count = scl_over_20.sum()
            
            if replaced_count > 0:
                print(f"[SCL处理] 发现 {replaced_count} 个SCL>20的记录，已统一替换为15")
            
            # 按stage_id, group_type, segment_index分组计算统计量
            group_stats = df.groupby(['stage_id', 'group_type', 'segment_index']).agg({
                'mhr_bpm': ['mean', 'std'],
                'hrv_lf_hf': ['mean', 'std'],
                'scl_mean_uS': ['mean', 'std'],
                'scr_frequency_per_min': ['mean', 'std'],
                'data_quality_score': 'mean',
                'id': 'count'  # 参与者数量
            }).reset_index()
            
            # 重命名列
            group_stats.columns = [
                'stage_id', 'group_type', 'segment_index',
                'mhr_bpm_mean', 'mhr_bpm_std',
                'hrv_lf_hf_mean', 'hrv_lf_hf_std',
                'scl_mean_uS_mean', 'scl_mean_uS_std',
                'scr_frequency_per_min_mean', 'scr_frequency_per_min_std',
                'data_quality_mean', 'participant_count'
            ]
            
            # 保存到数据库
            with sqlite3.connect(self.db_path) as conn:
                group_stats.to_sql('group_statistics', conn, if_exists='replace', index=False)
                
            print(f"[完成] 群体统计数据已生成并保存到数据库（共{len(group_stats)}条记录）")
        except Exception as e:
            print(f"[错误] 生成群体统计数据失败: {e}")
    
    def export_csv_files(self):
        """导出CSV文件"""
        # 导出详细数据
        try:
            with sqlite3.connect(self.db_path) as conn:
                detailed_df = pd.read_sql_query("SELECT * FROM time_dynamic_analysis", conn)
                # 若存在空列，统一填充
                detailed_df = detailed_df.fillna("")
                
                # 创建30秒分段级详细数据的中英文表头
                detailed_columns = [
                    ('阶段ID', 'stage_id'),
                    ('分组类型', 'group_type'),
                    ('人员姓名', 'person_name'),
                    ('分段索引', 'segment_index'),
                    ('分段时长(秒)', 'segment_duration_sec'),
                    ('平均心率(bpm)', 'mhr_bpm'),
                    ('RR间期计数', 'rr_intervals_count'),
                    ('RR间期均值(ms)', 'rr_intervals_mean'),
                    ('RR间期标准差(ms)', 'rr_intervals_std'),
                    ('LF功率', 'hrv_lf'),
                    ('HF功率', 'hrv_hf'),
                    ('LF/HF比值', 'hrv_lf_hf'),
                    ('SCL均值(μS)', 'scl_mean_uS'),
                    ('SCL标准差(μS)', 'scl_std_uS'),
                    ('SCL斜率(μS/s)', 'scl_slope_uS_per_sec'),
                    ('SCR次数', 'scr_count'),
                    ('SCR频率(次/min)', 'scr_frequency_per_min'),
                    ('SCR平均幅度(μS)', 'scr_mean_amplitude_uS'),
                    ('数据质量评分', 'data_quality_score'),
                    ('ECG信号质量', 'signal_quality_ecg'),
                    ('EDA信号质量', 'signal_quality_eda'),
                    ('文件路径', 'file_path'),
                    ('采样率', 'sample_rate')
                ]
                
                # 保存30秒分段级详细数据
                detailed_file_path = os.path.join(self.base_path, "以人名为依据的数据.csv")
                with open(detailed_file_path, 'w', encoding=DEFAULT_SAVE_ENCODING, newline='') as f:
                    writer = csv.writer(f)
                    # 写入中英文表头
                    writer.writerow([col[0] for col in detailed_columns])  # 中文表头
                    writer.writerow([col[1] for col in detailed_columns])  # 英文表头
                    # 写入数据，确保列顺序与表头一致
                    detailed_df[[col[1] for col in detailed_columns]].to_csv(f, index=False, header=False)
                
                # 导出群体统计数据
                group_df = pd.read_sql_query("SELECT * FROM group_statistics", conn)
                
                # 创建群体统计数据的中英文表头
                group_columns = [
                    ('阶段ID', 'stage_id'),
                    ('分组类型', 'group_type'),
                    ('分段索引', 'segment_index'),
                    ('平均心率均值(bpm)', 'mhr_bpm_mean'),
                    ('平均心率标准差(bpm)', 'mhr_bpm_std'),
                    ('LF/HF比值均值', 'hrv_lf_hf_mean'),
                    ('LF/HF比值标准差', 'hrv_lf_hf_std'),
                    ('SCL均值(μS)_mean', 'scl_mean_uS_mean'),
                    ('SCL均值(μS)_std', 'scl_mean_uS_std'),
                    ('SCR频率(次/min)_mean', 'scr_frequency_per_min_mean'),
                    ('SCR频率(次/min)_std', 'scr_frequency_per_min_std'),
                    ('数据质量均值', 'data_quality_mean'),
                    ('参与者数量', 'participant_count')
                ]
                
                # 保存群体统计数据
                group_file_path = os.path.join(self.base_path, "群体平均值数据.csv")
                with open(group_file_path, 'w', encoding=DEFAULT_SAVE_ENCODING, newline='') as f:
                    writer = csv.writer(f)
                    # 写入中英文表头
                    writer.writerow([col[0] for col in group_columns])  # 中文表头
                    writer.writerow([col[1] for col in group_columns])  # 英文表头
                    # 写入数据，确保列顺序与表头一致
                    group_df[[col[1] for col in group_columns]].to_csv(f, index=False, header=False)
                
            print(f"[完成] CSV文件已导出到: {self.base_path}")
        except Exception as e:
            print(f"[错误] 导出CSV文件失败: {e}")


# =============================================================================
# 主程序入口
# =============================================================================

def main():
    """主函数（优化版）"""
    import time
    import psutil
    
    start_time = time.time()
    print(f"[启动] 开始处理，使用 {CPU_COUNT} 核心CPU，最大 {MAX_WORKERS} 个工作线程")
    
    # 监控系统资源
    process = psutil.Process()
    initial_memory = process.memory_info().rss / 1024 / 1024  # MB
    print(f"[资源] 初始内存使用: {initial_memory:.1f} MB")
    
    # 设置基础路径
    base_path = os.path.join(os.path.dirname(__file__), "预处理文件")
    db_path = os.path.join(os.path.dirname(__file__), "physio_data.db")
    
    analyzer = None
    try:
        # 创建分析器实例
        analyzer = TimeDynamicAnalyzer(base_path, db_path)
        
        # 处理所有阶段数据（多线程优化）
        print("[阶段1] 开始处理所有文件...")
        stage1_start = time.time()
        analyzer.process_all_stages()
        stage1_time = time.time() - stage1_start
        print(f"[阶段1] 文件处理完成，耗时: {stage1_time:.2f} 秒")
        
        # 生成组别统计
        print("[阶段2] 开始生成统计数据...")
        stage2_start = time.time()
        analyzer.generate_group_statistics()
        stage2_time = time.time() - stage2_start
        print(f"[阶段2] 统计生成完成，耗时: {stage2_time:.2f} 秒")
        
        # 导出CSV文件
        print("[阶段3] 开始导出CSV文件...")
        stage3_start = time.time()
        analyzer.export_csv_files()
        stage3_time = time.time() - stage3_start
        print(f"[阶段3] CSV导出完成，耗时: {stage3_time:.2f} 秒")
        
        # 资源使用统计
        final_memory = process.memory_info().rss / 1024 / 1024  # MB
        total_time = time.time() - start_time
        
        print(f"\n[完成] 所有处理完成！")
        print(f"[性能] 总耗时: {total_time:.2f} 秒")
        print(f"[性能] 内存使用: {initial_memory:.1f} MB -> {final_memory:.1f} MB (峰值: {final_memory:.1f} MB)")
        print(f"[性能] 平均CPU使用率: {psutil.cpu_percent()}%")
        
    except Exception as e:
        print(f"[错误] 程序执行失败: {e}")
        import traceback
        traceback.print_exc()
    finally:
        # 确保资源清理
        if analyzer:
            analyzer.cleanup()

if __name__ == "__main__":
    main()