# 完整流程代码
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report

# 1. 数据预处理函数
def process_sample(file_path, n_frames=2000):
    df = pd.read_csv(file_path).head(n_frames)
    features = df[['Gaze_X', 'Gaze_Y', 'Expression']].values
    stats = []
    for col in range(features.shape[1]):
        stats.extend([np.mean(features[:, col]), np.std(features[:, col]), 
                      np.max(features[:, col]), np.min(features[:, col])])
    velocity = np.diff(features, axis=0)
    velocity_stats = [np.mean(velocity), np.std(velocity)] if velocity.size != 0 else [0, 0]
    return np.concatenate([stats, velocity_stats])

# 2. 加载数据
asd_files = [os.path.join('ASD', f) for f in os.listdir('ASD')]
td_files = [os.path.join('TD', f) for f in os.listdir('TD')]
X_asd = [process_sample(f) for f in asd_files]
X_td = [process_sample(f) for f in td_files]
X = np.vstack([X_asd, X_td])
y = np.hstack([np.ones(len(X_asd)), np.zeros(len(X_td))])

# 3. 分层划分数据集
sss = StratifiedShuffleSplit(test_size=0.2, random_state=42)
train_idx, test_idx = next(sss.split(X, y))
X_train, X_test = X[train_idx], X[test_idx]
y_train, y_test = y[train_idx], y[test_idx]

# 4. 随机森林模型训练与调优
param_grid = {'n_estimators': [100, 200], 'max_depth': [None, 15]}
clf = GridSearchCV(RandomForestClassifier(random_state=42), param_grid, cv=3)
clf.fit(X_train, y_train)

# 5. 评估
y_pred = clf.predict(X_test)
print(classification_report(y_test, y_pred, target_names=["TD", "ASD"]))