#!/usr/bin/env python
# coding: utf-8

import pandas as pd
import numpy as np
import os
import glob
from tqdm import tqdm
import warnings
from datetime import datetime, timedelta
import multiprocessing as mp
from scipy import stats
from scipy.fft import fft, fftfreq
from sklearn.model_selection import train_test_split, KFold, RandomizedSearchCV
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
import joblib
import sys
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import time
import psutil # Ensure this is imported if visualization functions use it

# --- Matplotlib and Seaborn Settings (English Labels) ---
# Using default settings is usually safer for cross-platform compatibility
# plt.rcParams['font.sans-serif'] = ['SimHei'] # Commented out
plt.rcParams['axes.unicode_minus'] = False # Keep this for correct minus sign display
sns.set_theme(style="whitegrid")

# --- 配置参数 ---
# 数据路径 (根据您的实际路径修改)
BASE_DATA_DIR = r"C:\\Users\\ASUS\\Desktop\\B题-全部数据" #<--- 修改为您的本地路径

DATA_DIR_TRAIN = os.path.join(BASE_DATA_DIR, "附件1")
METADATA_TRAIN = os.path.join(DATA_DIR_TRAIN, "Metadata1.csv")
DATA_DIR_TEST = os.path.join(BASE_DATA_DIR, "附件2")
METADATA_TEST = os.path.join(DATA_DIR_TEST, "Metadata2.csv")

# 输出路径 (符合比赛要求)
RESULTS_DIR = "results_task2" # Changed folder name for clarity
FIGURES_SAVE_PATH = os.path.join(RESULTS_DIR, "figures")
MODEL_SAVE_PATH = os.path.join(RESULTS_DIR, "met_prediction_lgbm.joblib") # Save model in results dir
OUTPUT_PREDICTIONS_DIR = "result_2" # Save individual predictions here
OUTPUT_SUMMARY_FILE = os.path.join(RESULTS_DIR, "result_2_summary.xlsx") # Summary file

# 滑动窗口参数
WINDOW_SIZE_SEC = 6
STEP_SEC = 3
SAMPLING_RATE_HZ = 10 # 假设采样率

# 特征工程相关
N_FFT_COMPONENTS = 10

# MET 分类阈值 (来自 TDB.md)
MET_SLEEP = 1.0
MET_STATIC = 1.6
MET_LOW = 3.0
MET_MODERATE = 6.0

# 并行处理进程数
NUM_PROCESSES = mp.cpu_count() # 使用所有核心

# 超参数调优设置
N_ITER_SEARCH = 50
CV_FOLDS = 5

warnings.filterwarnings('ignore')

# --- 辅助函数 ---

def classify_activity_by_met(met_value):
    """根据MET值分类活动类型 (TDB.md 定义)"""
    if met_value < MET_SLEEP:
        return 'sleep'
    elif met_value < MET_STATIC:
        return 'static'
    elif met_value < MET_LOW:
        return 'low'
    elif met_value < MET_MODERATE:
        return 'moderate'
    else:
        return 'high'

def parse_time(df):
    """统一处理时间戳列 (TDB.md 指出是毫秒，但实际数据是 YYYY-MM-DD HH:MM:SS.ffffff)"""
    if 'time' not in df.columns:
        raise ValueError("数据文件缺少 'time' 列")

    try:
        # 直接尝试使用正确的格式字符串解析
        df['datetime'] = pd.to_datetime(df['time'], format="%Y-%m-%d %H:%M:%S.%f", errors='coerce')

    except Exception as e: # Outer except
        # If specific format fails, try automatic inference
        print(f"使用指定格式 %Y-%m-%d %H:%M:%S.%f 解析时间戳失败: {e}. 尝试自动推断格式...")
        try: # Inner try
            # Indent the code inside the try block
            df['datetime'] = pd.to_datetime(df['time'], errors='coerce')
        except Exception as e2: # Indent the except block to match the inner try
             print(f"自动推断时间戳格式也失败: {e2}. 检查 'time' 列格式。")
             df['datetime'] = pd.NaT # 标记为无效时间

    # 删除无法解析的时间戳行
    original_len = len(df)
    df = df.dropna(subset=['datetime'])
    dropped_rows = original_len - len(df)
    if dropped_rows > 0:
        # print(f"警告: 因时间戳解析失败，删除了 {dropped_rows} 行数据。") # Keep commented
        pass # Added pass for empty block

    if df.empty:
         print("警告: 解析时间戳后没有有效数据行。")
    # else:
        # Correct indentation for sort_values
    df = df.sort_values('datetime')
    return df

def extract_met_from_annotation(annotation):
    """从标注中提取MET值 (附件1 P*.csv 文件)"""
    try:
        parts = str(annotation).split(';')
        met_part = parts[-1].strip() # 通常 MET 在最后一部分
        if "MET" in met_part:
            met_value = float(met_part.split(' ')[-1]) # 取最后一个空格后的数字
            return met_value
        else:
             # Correct indentation for inner return
             return np.nan # 格式不符合预期
    except:
        return np.nan # 无法解析

def calculate_magnitude(df):
    """计算加速度幅值"""
    df['magnitude'] = np.sqrt(df['x']**2 + df['y']**2 + df['z']**2)
    return df

def extract_window_features(window_data, age, sex):
    """为单个窗口提取特征 (通用)"""
    features = {}

    if window_data.empty or len(window_data) <= 1:
        return None

    # 基本统计特征
    for col in ['x', 'y', 'z', 'magnitude']:
        series = window_data[col].dropna()
        if series.empty:
            for stat in ['mean', 'std', 'var', 'min', 'max', 'median', 'iqr', 'skew', 'kurt', 'rms']:
                features[f'{col}_{stat}'] = 0.0
            continue

        features[f'{col}_mean'] = series.mean()
        features[f'{col}_std'] = series.std(ddof=0)
        features[f'{col}_var'] = series.var(ddof=0)
        features[f'{col}_min'] = series.min()
        features[f'{col}_max'] = series.max()
        features[f'{col}_median'] = series.median()
        try:
            # Indent lines inside the try block
            q75, q25 = np.percentile(series, [75 ,25])
            features[f'{col}_iqr'] = q75 - q25
        except IndexError: # Indent the except block
             # Indent line inside the except block
             features[f'{col}_iqr'] = 0.0
        # These lines are outside the try/except, correctly indented already
        features[f'{col}_skew'] = series.skew()
        features[f'{col}_kurt'] = series.kurtosis()
        features[f'{col}_rms'] = np.sqrt(np.mean(series**2))

    # 信号幅值面积 (SMA)
    sma_cols = ['x', 'y', 'z']
    valid_sma_data = window_data[sma_cols].dropna()
    if not valid_sma_data.empty:
        features['sma'] = valid_sma_data.abs().sum().sum() / len(valid_sma_data)
    else:
        features['sma'] = 0.0

    # 轴间相关性
    if len(window_data) > 1:
        features['corr_xy'] = window_data['x'].corr(window_data['y'])
        features['corr_xz'] = window_data['x'].corr(window_data['z'])
        features['corr_yz'] = window_data['y'].corr(window_data['z'])
    else:
        features['corr_xy'] = 0.0
        features['corr_xz'] = 0.0
        features['corr_yz'] = 0.0

    # 频率域特征 (FFT)
    n_samples = len(window_data)
    if n_samples > 1:
        time_diffs = window_data['datetime'].diff().dt.total_seconds().dropna()
        # 使用配置的采样率计算间隔，避免依赖实际时间差的不稳定性
        avg_interval = 1.0 / SAMPLING_RATE_HZ

        if avg_interval > 0:
            freq = fftfreq(n_samples, d=avg_interval)[:n_samples//2]

            for col in ['x', 'y', 'z', 'magnitude']:
                series = window_data[col].dropna()
                if len(series) < 2:
                    for i in range(N_FFT_COMPONENTS):
                        features[f'{col}_fft_power_{i}'] = 0.0
                    features[f'{col}_fft_energy'] = 0.0
                    features[f'{col}_fft_entropy'] = 0.0
                    continue

                fft_vals = fft(series.values)
                valid_fft_len = min(len(fft_vals)//2, n_samples // 2)
                fft_power = np.abs(fft_vals[:valid_fft_len])**2

                n_fft_actual = min(N_FFT_COMPONENTS, len(fft_power))
                for i in range(n_fft_actual):
                    features[f'{col}_fft_power_{i}'] = fft_power[i] if i < len(fft_power) else 0.0
                for i in range(n_fft_actual, N_FFT_COMPONENTS):
                     features[f'{col}_fft_power_{i}'] = 0.0

                total_energy = np.sum(fft_power)
                features[f'{col}_fft_energy'] = total_energy
                if total_energy > 0:
                   normalized_power = fft_power / total_energy
                   features[f'{col}_fft_entropy'] = -np.sum(normalized_power[normalized_power > 0] * np.log2(normalized_power[normalized_power > 0] + 1e-12)) # Avoid log(0)
                else:
                   features[f'{col}_fft_entropy'] = 0.0
        else:
             for col in ['x', 'y', 'z', 'magnitude']:
                 for i in range(N_FFT_COMPONENTS):
                    features[f'{col}_fft_power_{i}'] = 0.0
                 features[f'{col}_fft_energy'] = 0.0
                 features[f'{col}_fft_entropy'] = 0.0
    else:
        for col in ['x', 'y', 'z', 'magnitude']:
             for i in range(N_FFT_COMPONENTS):
                features[f'{col}_fft_power_{i}'] = 0.0
             features[f'{col}_fft_energy'] = 0.0
             features[f'{col}_fft_entropy'] = 0.0

    # 填充可能存在的NaN相关性
    for key in ['corr_xy', 'corr_xz', 'corr_yz']:
        if key not in features or pd.isna(features[key]):
            features[key] = 0.0

    # 添加静态特征
    features['age'] = age # 'age' 已经是数值或将被编码
    features['sex'] = sex # 'sex' 将被编码

    # 再次检查所有特征是否有NaN (来自计算过程)，填充为0
    for k, v in features.items():
        if pd.isna(v):
             features[k] = 0.0

    return features

# --- 可视化函数 (全部使用英文标签) ---
def plot_metrics(y_true, y_pred, dataset_type='Train', save_path=FIGURES_SAVE_PATH):
    """Generate prediction vs actual and error distribution plots."""
    os.makedirs(save_path, exist_ok=True)
    plt.figure(figsize=(10, 6))
    plt.scatter(y_true, y_pred, alpha=0.3, label='Predicted vs Actual')
    plt.plot([y_true.min(), y_true.max()], [y_true.min(), y_true.max()], 'r--', label='Perfect Prediction Line')
    plt.xlabel('Actual MET Value')
    plt.ylabel('Predicted MET Value')
    plt.title(f'Predicted vs Actual MET Values ({dataset_type} Set)')
    plt.legend()
    plt.grid(True)
    plt.savefig(f'{save_path}/prediction_vs_actual_{dataset_type.lower()}.png', dpi=300, bbox_inches='tight')
    plt.close()

    errors = y_pred - y_true
    plt.figure(figsize=(10, 6))
    sns.histplot(errors, kde=True)
    plt.xlabel('Prediction Error (Predicted - Actual)')
    plt.ylabel('Frequency')
    plt.title(f'Prediction Error Distribution ({dataset_type} Set)')
    plt.grid(True)
    plt.savefig(f'{save_path}/error_distribution_{dataset_type.lower()}.png', dpi=300, bbox_inches='tight')
    plt.close()

def plot_residuals(y_true, y_pred, dataset_type='Train', save_path=FIGURES_SAVE_PATH):
    """Generate residuals vs predicted values plot."""
    os.makedirs(save_path, exist_ok=True)
    errors = y_pred - y_true
    plt.figure(figsize=(10, 6))
    plt.scatter(y_pred, errors, alpha=0.3)
    plt.axhline(0, color='red', linestyle='--')
    plt.xlabel('Predicted MET Value')
    plt.ylabel('Residuals (Predicted - Actual)')
    plt.title(f'Residuals vs Predicted Values ({dataset_type} Set)')
    plt.grid(True)
    plt.savefig(f'{save_path}/residuals_vs_predicted_{dataset_type.lower()}.png', dpi=300, bbox_inches='tight')
    plt.close()

def plot_feature_importance(model, feature_names, save_path=FIGURES_SAVE_PATH):
    """Generate feature importance plot."""
    os.makedirs(save_path, exist_ok=True)
    try:
        # Adjust based on whether it's a pipeline or direct model if necessary
        if hasattr(model, 'named_steps') and 'regressor' in model.named_steps:
            importances = model.named_steps['regressor'].feature_importances_
        elif hasattr(model, 'feature_importances_'):
             importances = model.feature_importances_
        else:
             print("Model structure not recognized for feature importance.")
             return
    except AttributeError:
        print("Model does not have feature_importances_ attribute.")
        return

    if importances is None or len(importances) == 0:
        print("Feature importances are empty.")
        return
    if len(importances) != len(feature_names):
         print(f"Warning: Mismatch between feature importances ({len(importances)}) and names ({len(feature_names)}). Trying to reconcile.")
         min_len = min(len(importances), len(feature_names))
         importances = importances[:min_len]
         feature_names = feature_names[:min_len]
         if min_len == 0: return

    indices = np.argsort(importances)[::-1]
    top_n = min(len(importances), 30)
    plt.figure(figsize=(12, max(8, top_n * 0.3)))
    plt.barh(range(top_n), importances[indices][:top_n][::-1], align='center')
    plt.yticks(range(top_n), [feature_names[i] for i in indices][:top_n][::-1])
    plt.xlabel('Feature Importance Score')
    plt.ylabel('Feature Name')
    plt.title(f'Feature Importance Analysis (Top {top_n})')
    plt.tight_layout()
    plt.savefig(f'{save_path}/feature_importance.png', dpi=300, bbox_inches='tight')
    plt.close()

def plot_cv_results(cv_results_df, save_path=FIGURES_SAVE_PATH):
    """Plot Cross-Validation results from RandomizedSearchCV."""
    os.makedirs(save_path, exist_ok=True)

    # 1. Distribution of CV Scores
    plt.figure(figsize=(10, 6))
    # Use negative MSE score directly, higher (less negative) is better
    scores = cv_results_df['mean_test_score'] * -1 # Convert to positive MSE
    sns.histplot(scores, kde=True)
    plt.xlabel('Mean Squared Error (MSE) during CV')
    plt.ylabel('Frequency')
    plt.title('Distribution of Cross-Validation Scores (Lower MSE is Better)')
    plt.savefig(f'{save_path}/cv_score_distribution.png', dpi=300, bbox_inches='tight')
    plt.close()

    # 2. Score vs. Key Hyperparameters (Example: n_estimators and learning_rate)
    plt.figure(figsize=(14, 6))
    # Use rank_test_score for color (1 is best)
    sc = plt.scatter(
        cv_results_df['param_regressor__n_estimators'],
        cv_results_df['param_regressor__learning_rate'],
        c=cv_results_df['rank_test_score'],
        cmap='viridis_r', # Reversed Viridis: lower rank (better) is brighter
        alpha=0.7
    )
    plt.colorbar(sc, label='Rank of CV Score (1 is Best)')
    plt.xlabel('Number of Estimators')
    plt.ylabel('Learning Rate')
    plt.title('CV Score Rank vs. n_estimators and learning_rate')
    plt.grid(True)
    plt.savefig(f'{save_path}/cv_score_vs_hyperparams.png', dpi=300, bbox_inches='tight')
    plt.close()

def plot_predicted_met_distribution_test(all_predictions_df, save_path=FIGURES_SAVE_PATH):
    """Plot the distribution of predicted MET values on the test set."""
    os.makedirs(save_path, exist_ok=True)
    if all_predictions_df.empty:
        print("Info: No test predictions available to plot distribution.")
        return

    plt.figure(figsize=(10, 6))
    sns.histplot(all_predictions_df['predicted_met'], kde=True, bins=50)
    plt.xlabel('Predicted MET Value')
    plt.ylabel('Frequency')
    plt.title('Distribution of Predicted MET Values (Test Set)')
    plt.grid(True)
    plt.savefig(f'{save_path}/predicted_met_distribution_test.png', dpi=300, bbox_inches='tight')
    plt.close()

def plot_met_category_comparison(y_true_train, all_predictions_df_test, save_path=FIGURES_SAVE_PATH):
    """Compare MET category distribution between train (actual) and test (predicted)."""
    os.makedirs(save_path, exist_ok=True)
    if all_predictions_df_test.empty:
        print("Info: No test predictions available for category comparison.")
        return

    # Classify train and test data
    train_categories = y_true_train.apply(classify_activity_by_met).value_counts(normalize=True) * 100
    test_categories = all_predictions_df_test['predicted_met'].apply(classify_activity_by_met).value_counts(normalize=True) * 100

    # Combine into a DataFrame for plotting
    df_compare = pd.DataFrame({'Train (Actual)': train_categories, 'Test (Predicted)': test_categories}).fillna(0)
    df_compare = df_compare.reindex(['sleep', 'static', 'low', 'moderate', 'high']).fillna(0) # Ensure all categories are present and ordered

    plt.figure(figsize=(10, 6))
    df_compare.plot(kind='bar', rot=0)
    plt.title('Comparison of Activity Category Distribution (%)')
    plt.xlabel('Activity Category')
    plt.ylabel('Percentage of Time Windows (%)')
    plt.legend(title='Dataset')
    plt.tight_layout()
    plt.savefig(f'{save_path}/met_category_comparison.png', dpi=300, bbox_inches='tight')
    plt.close()

# --- 数据处理和模型训练/预测函数 ---

def process_subject_train_data(args):
    """处理单个训练受试者数据 (附件1)，提取特征和标签"""
    subject_id_str = args['subject_id'] # e.g., 'P001'
    metadata_row = args['metadata']
    data_file = os.path.join(DATA_DIR_TRAIN, f"{subject_id_str}.csv") # 使用完整ID 'P001'

    if not os.path.exists(data_file):
        print(f"警告: 训练文件 {data_file} 不存在，跳过受试者 {subject_id_str}")
        return None

    try:
        df = pd.read_csv(data_file)
        if 'annotation' not in df.columns:
             print(f"警告: 训练文件 {data_file} 缺少 'annotation' 列，无法提取标签，跳过。")
             return None

        df = parse_time(df)
        if df.empty: return None # parse_time 可能返回空 df
        df = calculate_magnitude(df)
        df['met'] = df['annotation'].apply(extract_met_from_annotation)
        df = df.dropna(subset=['met'])

        if df.empty:
            # print(f"信息: 受试者 {subject_id_str} 没有有效的MET数据。")
            return None

        age = metadata_row['age']
        sex = metadata_row['sex']

        features_list = []
        window_start = df['datetime'].iloc[0]
        window_end_time = df['datetime'].iloc[-1]

        while window_start < window_end_time:
            window_end = window_start + timedelta(seconds=WINDOW_SIZE_SEC)
            window_data = df[(df['datetime'] >= window_start) & (df['datetime'] < window_end)]

            if not window_data.empty and len(window_data) >= 2:
                window_met = window_data['met'].mean()
                if pd.notna(window_met):
                    features = extract_window_features(window_data, age, sex)
                    if features is not None:
                        features['met'] = window_met
                        features_list.append(features)

            window_start += timedelta(seconds=STEP_SEC)

        if not features_list:
            # print(f"信息: 受试者 {subject_id_str} 未提取到有效特征窗口。")
            return None

        return pd.DataFrame(features_list)

    except FileNotFoundError:
         print(f"错误: 训练文件 {data_file} 未找到。")
         return None
    except Exception as e:
        print(f"处理训练受试者 {subject_id_str} 时出错: {str(e)}")
        # Correct indentation for traceback
        import traceback
        traceback.print_exc()
        return None

def process_subject_predict_data(args):
    """Process single test subject, predict MET, return predictions DF and summary."""
    subject_pid_str = args['subject_id']
    metadata_row = args['metadata']
    model = args['model']
    subject_numeric_id = subject_pid_str[1:]
    data_file = os.path.join(DATA_DIR_TEST, f"P{subject_numeric_id}.csv") # Use P prefix

    if not os.path.exists(data_file):
        print(f"警告: 测试文件 {data_file} (使用P前缀) 不存在，跳过受试者 {subject_pid_str}")
        return None, None

    try:
        df = pd.read_csv(data_file)
        if 'annotation' in df.columns:
            print(f"警告: 测试文件 {data_file} 包含 'annotation' 列，这不符合预期，将忽略该列。")
            df = df.drop(columns=['annotation'])

        df = parse_time(df)
        if df.empty: return None, None
        df = calculate_magnitude(df)

        age = metadata_row['age']
        sex = metadata_row['sex']
        features_list = []
        timestamps = []
        if len(df['datetime']) < 2: return None, None
        window_start = df['datetime'].iloc[0]
        window_end_time = df['datetime'].iloc[-1]
        if window_start >= window_end_time: return None, None

        while window_start < window_end_time:
            window_end = window_start + timedelta(seconds=WINDOW_SIZE_SEC)
            window_data = df[(df['datetime'] >= window_start) & (df['datetime'] < window_end)]
            if not window_data.empty and len(window_data) >= 2:
                features = extract_window_features(window_data, age, sex)
                if features is not None:
                    features_list.append(features)
                    timestamps.append(window_start)
            window_start += timedelta(seconds=STEP_SEC)

        if not features_list: return None, None

        features_df = pd.DataFrame(features_list)
        try:
            X_test_processed = model.named_steps['preprocessor'].transform(features_df)
            y_pred = model.named_steps['regressor'].predict(X_test_processed)
        except Exception as pred_err:
             print(f"Error during prediction for {subject_pid_str}: {pred_err}")
             return None, None

        # --- 修改开始: 创建 window_predictions_df 并执行 merge_asof ---
        # 1. 创建窗口预测 DataFrame
        window_predictions_df = pd.DataFrame({'timestamp': timestamps, 'predicted_met': y_pred})
        # 确保排序以用于 merge_asof
        window_predictions_df = window_predictions_df.sort_values('timestamp')

        # 2. 确保原始 df 按 datetime 排序 (parse_time 已处理，但再次确认)
        df = df.sort_values('datetime')

        # 3. 执行 merge_asof 将预测值映射到原始时间戳
        #    direction='backward' 表示将 df 中的时间戳与不晚于它的最近窗口开始时间匹配
        merged_df = pd.merge_asof(
            df, # 左侧DataFrame，包含原始数据和 'datetime'
            window_predictions_df, # 右侧DataFrame，包含窗口开始时间和预测值
            left_on='datetime', # df 中的时间列
            right_on='timestamp', # window_predictions_df 中的时间列
            direction='backward' # 匹配 df.datetime <= window_predictions_df.timestamp
        )

        # 移除 merge_asof 添加的 'timestamp' 列，因为它不再需要
        if 'timestamp' in merged_df.columns:
            merged_df = merged_df.drop(columns=['timestamp'])

        # --- 修改结束 ---

        # --- 新增: 构造最终输出 DataFrame ---
        output_columns = ['time', 'x', 'y', 'z', 'predicted_met']
        # 检查所需列是否存在于 merged_df
        missing_output_cols = [col for col in output_columns if col not in merged_df.columns]
        if missing_output_cols:
            print(f"警告: merged_df 中缺少构造输出所需的列: {missing_output_cols}。无法为 {subject_pid_str} 生成输出文件。")
            return summary_dict, None # 返回 summary，但没有可保存的 df

        output_df = merged_df[output_columns].copy()
        # 填充可能由 backward merge 产生的初始 NaN
        output_df['predicted_met'] = output_df['predicted_met'].fillna(method='bfill')
        output_df = output_df.dropna(subset=['predicted_met']) # 删除无法填充的 NaN（如果整个文件都没有预测）

        # --- 使用窗口预测结果 (results_df) 进行汇总统计 (保持不变) ---
        # 注意：这里的汇总仍然基于窗口预测，而不是映射后的逐点预测
        # 这可能是合理的，因为我们关心的是在某个强度 *活动窗口* 中花费的时间
        results_df = pd.DataFrame({'timestamp': timestamps, 'predicted_met': y_pred})

        total_duration_hours = 0
        if timestamps:
            duration_timedelta = (timestamps[-1] + timedelta(seconds=WINDOW_SIZE_SEC)) - timestamps[0]
            total_duration_hours = duration_timedelta.total_seconds() / 3600
        high_intensity_minutes = len(results_df[results_df['predicted_met'] >= MET_MODERATE]) * STEP_SEC / 60
        moderate_intensity_minutes = len(results_df[(results_df['predicted_met'] >= MET_LOW) & (results_df['predicted_met'] < MET_MODERATE)]) * STEP_SEC / 60
        low_intensity_minutes = len(results_df[(results_df['predicted_met'] >= MET_STATIC) & (results_df['predicted_met'] < MET_LOW)]) * STEP_SEC / 60
        static_activity_minutes = len(results_df[(results_df['predicted_met'] >= MET_SLEEP) & (results_df['predicted_met'] < MET_STATIC)]) * STEP_SEC / 60

        summary_dict = {
            '志愿者ID': f"T{subject_numeric_id}",
            '记录总时长（小时）': total_duration_hours,
            '高等强度运动总时长（分钟）': high_intensity_minutes,
            '中等强度运动总时长（分钟）': moderate_intensity_minutes,
            '低等强度运动总时长（分钟）': low_intensity_minutes,
            '静态活动总时长（分钟）': static_activity_minutes
        }

        # --- 返回修改: 返回 summary_dict 和 *output_df* 以便后续保存 --- #
        # 不再需要在此处添加志愿者ID，因为将在主循环中处理
        # output_df['志愿者ID'] = f"T{subject_numeric_id}" # <-- 移除或注释掉此行
        return summary_dict, output_df # <-- 返回包含所选列的 output_df

    except FileNotFoundError:
         print(f"错误: 测试文件 {data_file} 未找到。")
         return None, None
    except Exception as e:
        print(f"处理测试受试者 {subject_pid_str} 时出错: {str(e)}")
        import traceback
        traceback.print_exc()
        return None, None

# --- 主函数 ---
def main():
    start_overall = time.time()
    os.makedirs(OUTPUT_PREDICTIONS_DIR, exist_ok=True)
    os.makedirs(RESULTS_DIR, exist_ok=True)
    os.makedirs(FIGURES_SAVE_PATH, exist_ok=True)

    # --- 1. 加载元数据 ---
    print("加载元数据...")
    try:
        train_metadata = pd.read_csv(METADATA_TRAIN)
        test_metadata = pd.read_csv(METADATA_TEST)
        # 验证必需的列是否存在
        if 'pid' not in train_metadata.columns or 'age' not in train_metadata.columns or 'sex' not in train_metadata.columns:
            raise ValueError(f"训练元数据 {METADATA_TRAIN} 缺少 'pid', 'age', 或 'sex' 列。实际列: {train_metadata.columns.tolist()}")
        if 'pid' not in test_metadata.columns or 'age' not in test_metadata.columns or 'sex' not in test_metadata.columns:
            raise ValueError(f"测试元数据 {METADATA_TEST} 缺少 'pid', 'age', 或 'sex' 列。实际列: {test_metadata.columns.tolist()}")
        print(f"训练集元数据: {len(train_metadata)} 条记录")
        print(f"测试集元数据: {len(test_metadata)} 条记录")
    except FileNotFoundError as e:
        print(f"错误: 无法找到元数据文件 {e.filename}。请检查路径配置: {BASE_DATA_DIR}")
        sys.exit(1)
    except ValueError as e:
         print(f"错误: 元数据文件格式不正确。{e}")
         sys.exit(1)

    # --- 2. 并行处理训练数据，提取特征 --- #
    print(f"\n开始并行处理训练数据 ({NUM_PROCESSES} 个进程)...")
    start_feature_extraction = time.time()
    train_tasks = []
    # 使用 'pid' 列
    for _, row in train_metadata.iterrows():
        # 传递完整的 subject ID，例如 'P001'
        train_tasks.append({'subject_id': row['pid'], 'metadata': row})

    all_train_features_list = []
    with mp.Pool(processes=NUM_PROCESSES) as pool:
        for result in tqdm(pool.imap_unordered(process_subject_train_data, train_tasks), total=len(train_tasks), desc="提取训练特征"):
            if result is not None and not result.empty:
                all_train_features_list.append(result)

    if not all_train_features_list:
        print("\n错误: 未能从训练数据中提取任何有效特征。请检查:")
        print(f"  - 训练数据文件是否存在于: {DATA_DIR_TRAIN}")
        print("  - 文件格式是否正确 (包含 time, x, y, z, annotation 列)")
        print("  - 'annotation' 列中的 MET 值是否可解析")
        print("  - 时间戳格式是否正确 (毫秒)")
        sys.exit(1)

    all_train_features = pd.concat(all_train_features_list, ignore_index=True)
    end_feature_extraction = time.time()
    print(f"训练特征提取完成。耗时: {end_feature_extraction - start_feature_extraction:.2f} 秒")
    print(f"总共提取到 {len(all_train_features)} 个有效特征窗口。")

    # --- 3. 准备训练数据和标签 --- #
    y_train = all_train_features['met']
    X_train = all_train_features.drop('met', axis=1)

    # --- 4. 定义预处理器和模型 --- #
    print("\n定义预处理器和模型...")
    # 从实际数据推断数值和分类特征，避免硬编码
    numerical_features = X_train.select_dtypes(include=np.number).columns.tolist()
    # 'age' 可能需要特殊处理或已经是数值；'sex' 是分类的
    categorical_features = X_train.select_dtypes(include='object').columns.tolist()
    if 'sex' not in categorical_features and 'sex' in X_train.columns:
         categorical_features.append('sex') # 确保 sex 被包括
    # 如果 age 是 '30-37' 这种格式，需要先编码处理
    if 'age' in categorical_features:
         print("信息: 'age' 列被检测为对象类型，将进行独热编码。如果需要数值处理，请预先转换。")

    # 从数值特征中移除分类特征（如果错误地包含了）
    numerical_features = [f for f in numerical_features if f not in categorical_features]

    print(f"识别到的数值特征: {numerical_features}")
    print(f"识别到的分类特征: {categorical_features}")

    # 检查是否有遗漏的特征
    all_defined_features = set(numerical_features + categorical_features)
    all_actual_features = set(X_train.columns)
    if all_defined_features != all_actual_features:
         print(f"警告: 特征定义可能不完整。未定义的特征: {all_actual_features - all_defined_features}")

    transformers = []
    if numerical_features:
        transformers.append(('num', StandardScaler(), numerical_features))
    if categorical_features:
         transformers.append(('cat', OneHotEncoder(handle_unknown='ignore', sparse_output=False), categorical_features))

    if not transformers:
         print("错误: 无法定义任何特征转换器。检查数据类型。")
         sys.exit(1)

    preprocessor = ColumnTransformer(
        transformers=transformers,
        remainder='drop' # 明确丢弃未定义的列
    )

    lgbm = lgb.LGBMRegressor(random_state=42, n_jobs=1) # n_jobs=1 避免 LightGBM 和 multiprocessing 冲突

    pipeline = Pipeline(steps=[('preprocessor', preprocessor),
                             ('regressor', lgbm)])

    # --- 5. 超参数调优 (RandomizedSearchCV) --- #
    print("\n开始使用 RandomizedSearchCV 进行超参数调优...")
    start_tuning = time.time()
    param_dist = {
        'regressor__n_estimators': sp_randint(100, 800),
        'regressor__learning_rate': sp_uniform(0.01, 0.15),
        'regressor__num_leaves': sp_randint(20, 50),
        'regressor__max_depth': sp_randint(5, 12),
        'regressor__min_child_samples': sp_randint(10, 40),
        'regressor__subsample': sp_uniform(0.6, 0.35),
        'regressor__colsample_bytree': sp_uniform(0.6, 0.35)
    }

    # 使用 KFold 进行交叉验证，确保数据不泄露
    cv_strategy = KFold(n_splits=CV_FOLDS, shuffle=True, random_state=42)

    random_search = RandomizedSearchCV(
        pipeline,
        param_distributions=param_dist,
        n_iter=N_ITER_SEARCH,
        cv=cv_strategy,
        scoring='neg_mean_squared_error',
        n_jobs=NUM_PROCESSES, # 让 RandomizedSearchCV 控制并行
        random_state=42,
        verbose=1,
        error_score='raise' # 捕获CV中的错误
    )

    try:
        random_search.fit(X_train, y_train)
    except Exception as e:
         print(f"\n错误: 超参数调优过程中发生错误: {e}")
         print("请检查特征工程、预处理步骤和数据类型。")
         import traceback
         traceback.print_exc()
         sys.exit(1)

    end_tuning = time.time()
    print(f"超参数调优完成。耗时: {end_tuning - start_tuning:.2f} 秒")
    print("最佳参数:", random_search.best_params_)
    print(f"最佳交叉验证得分 (Negative MSE): {random_search.best_score_:.4f}")

    # --- Store and Plot CV Results ---
    cv_results_df = pd.DataFrame(random_search.cv_results_)
    plot_cv_results(cv_results_df)
    print("Cross-validation results plots generated.")

    # --- 6. 使用最佳参数训练最终模型 --- #
    print("\n使用最佳参数在完整训练集上训练最终模型...")
    start_train_final = time.time()
    best_model = random_search.best_estimator_
    try:
        best_model.fit(X_train, y_train)
    except Exception as e:
         print(f"\n错误: 训练最终模型时发生错误: {e}")
         sys.exit(1)
    end_train_final = time.time()
    print(f"最终模型训练完成。耗时: {end_train_final - start_train_final:.2f} 秒")

    # --- 7. 保存模型 --- #
    print(f"\n保存最终模型到 {MODEL_SAVE_PATH}...")
    try:
        joblib.dump(best_model, MODEL_SAVE_PATH)
        print("模型保存成功。")
    except Exception as e:
         print(f"错误: 保存模型时出错: {e}")

    # --- Evaluate and Plot on Training Set ---
    print("Evaluating model on training set and generating plots...")
    try:
        # Ensure preprocessor is fitted before getting feature names
        feature_names_out = best_model.named_steps['preprocessor'].get_feature_names_out()
        plot_feature_importance(best_model, feature_names_out)
        print("Feature importance plot generated.")

        y_train_pred = best_model.predict(X_train)
        plot_metrics(y_train, y_train_pred, dataset_type='Train')
        plot_residuals(y_train, y_train_pred, dataset_type='Train')
        print("Training set evaluation plots (Metrics, Residuals) generated.")

        mae_train = mean_absolute_error(y_train, y_train_pred)
        rmse_train = np.sqrt(mean_squared_error(y_train, y_train_pred))
        r2_train = r2_score(y_train, y_train_pred)
        print(f"Training Set Metrics: MAE={mae_train:.4f}, RMSE={rmse_train:.4f}, R2={r2_train:.4f}")
    except Exception as e:
        print(f"Error generating plots or metrics for training set: {e}")

    # --- 8. 加载模型并对测试集进行预测 --- #
    print(f"\n加载模型 {MODEL_SAVE_PATH} 并对测试集进行预测...")
    try:
             loaded_model = joblib.load(MODEL_SAVE_PATH)
    except FileNotFoundError:
        print(f"错误: 找不到已保存的模型文件 {MODEL_SAVE_PATH}。请确保模型已成功训练并保存。")
        sys.exit(1)
    except Exception as e:
        print(f"加载模型时出错: {e}")
        sys.exit(1)

    print(f"开始并行处理测试数据 ({NUM_PROCESSES} 个进程)...")
    start_prediction = time.time()
    predict_tasks = []
    # 使用 'pid' 列获取测试集受试者ID
    test_subject_pids = test_metadata['pid'].unique()
    for subject_pid in test_subject_pids:
        metadata_row = test_metadata[test_metadata['pid'] == subject_pid].iloc[0]
        # 传递完整的 subject ID，例如 'T001'
        predict_tasks.append({'subject_id': subject_pid, 'metadata': metadata_row, 'model': loaded_model})

    prediction_summary_results = []
    all_test_predictions_list = [] # <-- 修改: 此列表不再需要聚合所有预测，改为在循环中直接保存
    processed_subjects = 0 # <-- 新增: 计数器
    with mp.Pool(processes=NUM_PROCESSES) as pool:
        # Modify loop to capture both summary and detailed predictions
        for result_tuple in tqdm(pool.imap_unordered(process_subject_predict_data, predict_tasks), total=len(predict_tasks), desc="Predicting Test Set"):
            if result_tuple is not None and result_tuple[0] is not None: # Check if summary exists
                summary_dict, output_df = result_tuple # <-- 获取 output_df
                prediction_summary_results.append(summary_dict)

                # --- 新增: 保存单个志愿者的 output_df --- #
                if output_df is not None and not output_df.empty: # Check if detailed exists
                    subject_id = summary_dict['志愿者ID'] # 从 summary 获取 ID
                    output_csv_path = os.path.join(OUTPUT_PREDICTIONS_DIR, f"{subject_id}.csv")
                    try:
                        output_df.to_csv(output_csv_path, index=False, float_format='%.6f') # 保存并控制精度
                        processed_subjects += 1
                    except Exception as e_save:
                         print(f"错误: 保存预测文件 {output_csv_path} 时出错: {e_save}")
                # --- 保存逻辑结束 --- #

                # 不再需要将所有 detailed_df 附加到列表中
                # if detailed_df is not None and not detailed_df.empty: # Check if detailed exists
                #     all_test_predictions_list.append(detailed_df)

    end_prediction = time.time()
    print(f"测试集预测完成。处理并保存了 {processed_subjects} 个志愿者的预测文件。耗时: {end_prediction - start_prediction:.2f} 秒")

    # --- 9. 生成汇总统计文件 (result_2.xlsx) --- #
    if prediction_summary_results:
        print(f"Generating summary file {OUTPUT_SUMMARY_FILE}...")
        summary_df = pd.DataFrame(prediction_summary_results)
        # 确保列顺序和名称符合比赛要求表2
        required_columns = [
            '志愿者ID',
            '记录总时长（小时）',
            '高等强度运动总时长（分钟）',
            '中等强度运动总时长（分钟）',
            '低等强度运动总时长（分钟）',
            '静态活动总时长（分钟）'
        ]
        # 检查是否所有必需列都存在
        missing_cols = [col for col in required_columns if col not in summary_df.columns]
        if missing_cols:
             print(f"警告: 汇总DataFrame缺少以下必需列: {missing_cols}。将使用可用列创建文件。")
             # 使用实际存在的列，确保顺序尽量符合要求
             summary_df = summary_df[[col for col in required_columns if col in summary_df.columns]]
        else:
             summary_df = summary_df[required_columns]

        summary_df = summary_df.round(4) # 保留4位小数
        try:
            # 使用 openpyxl 引擎确保兼容性
            summary_df.to_excel(OUTPUT_SUMMARY_FILE, index=False, engine='openpyxl')
            print(f"预测结果已保存到 {OUTPUT_PREDICTIONS_DIR} 目录")
            print(f"汇总统计已保存到 {OUTPUT_SUMMARY_FILE}")
        except ImportError:
            print("警告: 未安装 'openpyxl'。请运行 'pip install openpyxl' 来支持 .xlsx 文件写入。尝试保存为 CSV。")
            try:
                 csv_output_file = OUTPUT_SUMMARY_FILE.replace('.xlsx', '.csv')
                 summary_df.to_csv(csv_output_file, index=False)
                 print(f"汇总统计已保存为 CSV 文件: {csv_output_file}")
            except Exception as e_csv:
                 print(f"保存为 CSV 时也出错: {e_csv}")
        except Exception as e_excel:
            print(f"保存汇总 Excel 文件时出错: {e_excel}")
    else:
        print("\nWarning: No valid prediction summaries generated, cannot create summary file.")

    end_overall = time.time()
    print(f"\n--- Task 2 processing completed --- Total duration: {end_overall - start_overall:.2f} seconds ---")

# --- 启动进入点 ---
if __name__ == "__main__":
    try:
        print(f"=== 脚本启动时间: {datetime.now()} ===")
        sys.stdout.flush()

        # Windows下多进程支持
        if sys.platform == 'win32':
            mp.freeze_support()

        main()

        print(f"=== 程序正常结束时间: {datetime.now()} ===")
        sys.stdout.flush()
    except Exception as e:
        print("\n=== 程序异常终止 ===")
        print(f"错误信息: {str(e)}")
        import traceback
        print(traceback.format_exc())
        sys.stdout.flush()