import os
import numpy as np
import pandas as pd
from scipy.io import loadmat
from typing import Dict, Any, List
from denoise import butter_lowpass_filter, normalization, butter_highpass_filter, adaptive_kalman_filter_bank, remove_baseline_drift
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import threading
from queue import Queue
import multiprocessing

def process_segment(args):
    start, window_length, ecg_signal, save_path, label, file_id, segment_idx = args
    segment = ecg_signal[start : start + window_length]
    if len(segment) == window_length:
        sample = {
            'ecg': segment,
            'label': label,
            'file_id': file_id,
            'segment_idx': segment_idx
        }
        np.save(save_path, sample)
        return True
    return False

def process_file(file_name: str, data_dir: str, save_dir: str, labels_dict: Dict[str, Any], split_name: str, fs: float) -> None:
    print(f'\n开始处理文件: {file_name}')
    file_path: str = os.path.join(data_dir, file_name)
    data: Dict[str, Any] = loadmat(file_path)
    ecg_signal: np.ndarray = data.get('segmentData')
    
    if ecg_signal is None:
        print(f'文件 {file_name} 中未找到变量 "segmentData"，请检查MAT文件结构。')
        return

    ecg_signal = np.squeeze(ecg_signal)
    original_signal = ecg_signal.copy()
    
    # 使用多线程处理信号滤波
    print(f'正在对 {file_name} 进行信号滤波...')
    with ThreadPoolExecutor(max_workers=2) as executor:
        future1 = executor.submit(remove_baseline_drift, ecg_signal.copy(), fs)
        future2 = executor.submit(adaptive_kalman_filter_bank, ecg_signal.copy(), fs)
        ecg_signal = future1.result()
        ecg_signal = future2.result()

    print(f'信号滤波完成，开始分段处理...')
    total_length: int = len(ecg_signal)
    window_length: int = int(fs * 5)
    step_size: int = int(fs * 1)

    segment_count = len(range(0, total_length - window_length + 1, step_size))
    print(f'预计生成 {segment_count} 个片段')

    # 准备分段处理的参数
    segment_args = []
    
    if split_name == 'predict_data':
        file_id = file_name.split('.')[0]
        for segment_idx, start in enumerate(range(0, total_length - window_length + 1, step_size)):
            if segment_idx % 20 == 0:  # 每处理20个片段打印一次进度
                print(f'\r处理进度: {segment_idx}/{segment_count} ({(segment_idx/segment_count)*100:.1f}%)', end='')
            save_path = os.path.join(save_dir, f'{file_id}_segment_{segment_idx}.npy')
            segment_args.append((start, window_length, ecg_signal, save_path, -1, file_id, segment_idx))
    else:
        # 修改样本ID的提取方式，保留完整的文件名（不包括.mat扩展名）
        full_sample_id = file_name.split('.')[0]  # 例如: "001-1"
        base_sample_id = full_sample_id.split('-')[0].zfill(3)  # 提取基础ID（例如: "001"）
        label: Any = labels_dict.get(base_sample_id)
        if label is None:
            print(f'样本ID {base_sample_id} 在标签文件中未找到。标签字典：{labels_dict}')
            return
        
        for sample_idx, start in enumerate(range(0, total_length - window_length + 1, step_size)):
            if sample_idx % 20 == 0:  # 每处理20个片段打印一次进度
                print(f'\r处理进度: {sample_idx}/{segment_count} ({(sample_idx/segment_count)*100:.1f}%)', end='')
            # 使用完整的样本ID作为文件名前缀
            save_path = os.path.join(save_dir, f'{full_sample_id}_sample_{sample_idx}.npy')
            segment_args.append((start, window_length, ecg_signal, save_path, label, full_sample_id, sample_idx))
            

    print(f'\n正在保存处理结果...')
    # 使用线程池并行处理信号片段
    with ThreadPoolExecutor(max_workers=min(32, len(segment_args))) as executor:
        results = list(executor.map(process_segment, segment_args))
    
    processed_segments = sum(results)
    print(f'文件 {file_name} 处理完成，成功生成 {processed_segments} 个片段')

def preprocess_data(
    data_dirs: Dict[str, str],
    label_file: str,
    output_dir: str,
    segment_length: int = 2560,
    fs: float = 512.0
) -> None:
    """
    预处理ECG数据，将.mat文件读取并切割为固定长度的片段。

    Parameters:
    - data_dirs: dict，包含数据集名称和对应的目录路径，例如 {'train_data': 'data/train_data', 'test_data': 'data/test_data'}
    - label_file: str，标签文件的路径
    - output_dir: str，输出数据的根目录
    - segment_length: int，信号片段的长度，默认2560（5秒 * 512Hz）
    """

    # 1. 读取标签文件
    labels_df: pd.DataFrame = pd.read_csv(label_file)
    # 删除列名中的空格并确保ID为3位数字格式
    labels_df.columns = labels_df.columns.str.strip()
    labels_df['ID'] = labels_df['ID'].astype(str).str.zfill(3)
    labels_dict: Dict[str, Any] = dict(zip(labels_df['ID'], labels_df['Label']))
    
    #打印标签文件中的ID
    print("标签文件中的ID:", list(labels_dict.keys()))

    # 获取CPU核心数
    num_cores = multiprocessing.cpu_count()
    
    for split_name, data_dir in data_dirs.items():
        print(f'\n开始处理 {split_name} 数据集...')
        save_dir: str = os.path.join(output_dir, split_name)
        os.makedirs(save_dir, exist_ok=True)

        file_list: list = [f for f in os.listdir(data_dir) if f.endswith('.mat')]
        total_files = len(file_list)
        print(f'找到 {total_files} 个.mat文件待处理')
        
        processed_files = 0
        # 使用进程池处理文件，设置进程数为CPU核心数
        with ProcessPoolExecutor(max_workers=num_cores) as executor:
            futures = [
                executor.submit(process_file, file_name, data_dir, save_dir, labels_dict, split_name, fs)
                for file_name in file_list
            ]
            for future in futures:
                try:
                    future.result()
                    processed_files += 1
                    print(f'\n总体进度: {processed_files}/{total_files} ({(processed_files/total_files)*100:.1f}%)')
                except Exception as e:
                    print(f"\n处理文件时发生错误: {str(e)}")
        
        print(f'\n{split_name} 数据集处理完成！')

def process_signal_component(signal: np.ndarray, fs: float) -> np.ndarray:
    """使用pykalman处理信号分量"""
    # 自适应估计初始参数
    kf = estimate_parameters_em_with_pykalman(signal, num_iterations=5)
    
    # 确保信号中不包含NaN值
    if np.isnan(signal).any():
        signal = np.nan_to_num(signal)
    
    # 应用滤波和平滑
    try:
        smoothed_state_means, smoothed_state_cov = kf.smooth(signal.reshape(-1, 1))
    except ValueError as e:
        # 处理滤波过程中可能出现的错误
        print(f"Kalman filter error: {e}")
        return signal  # 返回原始信号或进行其他处理
    
    return smoothed_state_means.flatten()

if __name__ == '__main__':
    """
        Run this script under /scripts directory
    """    
    data_dirs = {
        'train_data': 'data/train_data',
        'test_data': 'data/test_data',
        'predict_data': 'data/predict_data'  # 添加预测数据目录
    }
    # 使用完整的标签文件路径
    label_file = 'data/Label.csv'
    output_dir = 'data/processed'

    preprocess_data(data_dirs, label_file, output_dir)
