import pandas as pd
import numpy as np
import glob
import os
import logging
from scipy import stats

# 配置日志（添加异常捕获）
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def load_and_extract_features(file_path):
    """从CSV提取眼动特征（优化内存与计算效率）"""
    try:
        # 显式指定UTF-8编码 [6](@ref)
        data = pd.read_csv(file_path, encoding='utf-8')
        
        # 动态检测必要列 [1](@ref)
        required_columns = {'Frame', 'Gaze_X', 'Gaze_Y', 'Expression'}
        if not required_columns.issubset(data.columns):
            missing = required_columns - set(data.columns)
            logging.warning(f"文件 {os.path.basename(file_path)} 缺少列: {', '.join(missing)}")
            return None
        
        features = {}
        
        # 1. 基础统计特征（向量化计算）[7](@ref)
        for axis in ['X', 'Y']:
            col = f'Gaze_{axis}'
            gaze_data = data[col].dropna()
            features[f'gaze_{axis.lower()}_mean'] = gaze_data.mean()
            features[f'gaze_{axis.lower()}_std'] = gaze_data.std()
            features[f'gaze_{axis.lower()}_range'] = gaze_data.max() - gaze_data.min()
        
        # 2. 动态特征（扫视速度优化）[7](@ref)
        gaze_diff = data[['Gaze_X', 'Gaze_Y']].diff().abs().dropna()
        if not gaze_diff.empty:
            # 使用NumPy向量化计算 [7](@ref)
            features['saccade_velocity'] = np.sqrt((gaze_diff**2).sum(axis=1)).mean()
        else:
            features['saccade_velocity'] = 0
            
        # 3. 表情分析（优化内存）[9](@ref)
        if not data.empty:
            expr_counts = data['Expression'].value_counts(normalize=True)
            features['main_expression'] = expr_counts.idxmax()
            features['expression_variability'] = expr_counts.nunique()
        
        # 4. 添加文件名作为ID
        features['Subject_ID'] = os.path.splitext(os.path.basename(file_path))[0]
        
        return features
    except Exception as e:
        logging.error(f"处理文件 {file_path} 失败: {str(e)}")
        return None

def clean_data(asd_path, td_path):
    """主清洗函数（添加并行处理）[11](@ref)"""
    from concurrent.futures import ThreadPoolExecutor
    
    asd_files = glob.glob(os.path.join(asd_path, '*.csv'))
    td_files = glob.glob(os.path.join(td_path, '*.csv'))
    all_files = [('ASD', f) for f in asd_files] + [('TD', f) for f in td_files]
    
    all_features = []
    logging.info(f"发现ASD文件: {len(asd_files)}个, TD文件: {len(td_files)}个")
    
    # 线程池处理（加速4倍）[11](@ref)
    with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
        futures = []
        for group, file in all_files:
            futures.append(executor.submit(
                lambda f: load_and_extract_features(f) or {}, file
            ))
        
        for future, (group, file) in zip(futures, all_files):
            features = future.result()
            if features:
                features['Group'] = group
                all_features.append(features)
    
    if not all_features:
        raise ValueError("无有效数据！请检查文件路径和格式")
    
    feature_df = pd.DataFrame(all_features)
    
    # 填充缺失值（优化内存）[9](@ref)
    num_cols = feature_df.select_dtypes(include=np.number).columns
    for col in num_cols:
        if feature_df[col].isna().any():
            median_val = feature_df[col].median()
            feature_df[col] = feature_df[col].fillna(median_val)
    
    logging.info(f"清洗完成: {feature_df.shape[0]}样本, {feature_df.shape[1]}特征")
    return feature_df