import pandas as pd
from sklearn.model_selection import GroupKFold
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import numpy as np
from xgboost import XGBClassifier
from catboost import CatBoostClassifier

# 读取数据
data_path = 'D:\\学习&科研\\华为手表项目\\华为数据\\试验记录表\\all_stages_df_statistics.csv'
df = pd.read_csv(data_path)

# 将 polar_hr 和 polar_rr 列转换为适合模型的格式
df['polar_hr'] = df['polar_hr'].apply(lambda x: eval(x)[0] if isinstance(eval(x), list) and len(eval(x)) > 0 else 0)
df['polar_rr'] = df['polar_rr'].apply(lambda x: eval(x)[0] if isinstance(eval(x), list) and len(eval(x)) > 0 else 0)

# 筛选状态为 running 的数据
df = df[df['state'] == 'running']

# 初始化 GroupKFold
gkf = GroupKFold(n_splits=10)

# 选择特征和目标变量
X = df[['speed', 'polar_hr_mean', 'polar_hr_min', 'polar_hr_max', 'polar_hr_median',
         'polar_hr_q1', 'polar_hr_q3', 'polar_rr_mean', 'polar_rr_median', 'polar_rr_q1',
         'polar_rr_q3', 'sex', 'age', 'hight', 'weight']].values
y = df['physiology_RPE'].values
groups = df['number']  # 用于分组的列

# 将目标变量进行分类
y = np.clip(y, 0, 10)  # 确保目标值在0到10之间
y = np.round(y)  # 四舍五入到最近的整数

# 初始化模型
models = {
    'Random Forest': RandomForestClassifier(n_estimators=200, max_depth=10, random_state=42),
    'Logistic Regression': LogisticRegression(max_iter=5000),
    'Support Vector Machine': SVC(kernel='linear'),
    'Decision Tree': DecisionTreeClassifier(random_state=42),
    'Gradient Boosting': GradientBoostingClassifier(n_estimators=100, random_state=42),
    'KNN': KNeighborsClassifier(n_neighbors=5),
    'AdaBoost': AdaBoostClassifier(n_estimators=100, random_state=42),
    'XGBoost': XGBClassifier(use_label_encoder=False, eval_metric='logloss'),
    'CatBoost': CatBoostClassifier(iterations=100, learning_rate=0.1, depth=6, verbose=0)
}

# 初始化结果存储
results = {model_name: [] for model_name in models.keys()}

# 进行分组交叉验证
for fold, (train_index, test_index) in enumerate(gkf.split(X, y, groups=groups)):
    X_train, X_test = X[train_index], X[test_index]
    y_train, y_test = y[train_index], y[test_index]

    for model_name, model in models.items():
        # 训练模型
        model.fit(X_train, y_train)

        # 预测
        y_pred = model.predict(X_test)

        # 评估模型
        accuracy = accuracy_score(y_test, y_pred)
        precision = precision_score(y_test, y_pred, average='weighted', zero_division=0)
        recall = recall_score(y_test, y_pred, average='weighted', zero_division=0)
        f1 = f1_score(y_test, y_pred, average='weighted', zero_division=0)

        # 保存结果
        results[model_name].append({
            'Fold': fold + 1,
            'Accuracy': accuracy,
            'Precision': precision,
            'Recall': recall,
            'F1 Score': f1
        })

# 输出结果
for model_name, metrics in results.items():
    print(f"Model: {model_name}")
    for metric in metrics:
        print(f"Fold {metric['Fold']}: Accuracy = {metric['Accuracy']:.4f}, Precision = {metric['Precision']:.4f}, Recall = {metric['Recall']:.4f}, F1 Score = {metric['F1 Score']:.4f}")
    print()
