{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "a86e32e7",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\", message=\".*valid feature names.*\", category=UserWarning)\n",
    "\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.model_selection import StratifiedGroupKFold, GridSearchCV, cross_val_predict, cross_val_score\n",
    "from sklearn.preprocessing import StandardScaler, FunctionTransformer\n",
    "from sklearn.pipeline import Pipeline\n",
    "from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, StackingClassifier, VotingClassifier\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.feature_selection import SelectKBest, f_classif\n",
    "from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay, roc_curve, auc, precision_recall_curve\n",
    "from lightgbm import LGBMClassifier\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "import files\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "f352836d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dropped constant features: ['num_segments', 'length_to_max_ratio']\n",
      "Remaining features: 4\n",
      "\n",
      "=== Tuning RandomForest ===\n",
      "Fitting 5 folds for each of 64 candidates, totalling 320 fits\n",
      "RandomForest best params: {'clf__max_depth': 10, 'clf__max_features': 'sqrt', 'clf__n_estimators': 200}\n",
      "RandomForest CV acc: 0.8129\n",
      "\n",
      "RandomForest Classification Report:\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "          No       0.83      0.98      0.90       181\n",
      "         Yes       0.20      0.03      0.05        38\n",
      "\n",
      "    accuracy                           0.81       219\n",
      "   macro avg       0.51      0.50      0.47       219\n",
      "weighted avg       0.72      0.81      0.75       219\n",
      "\n",
      "\n",
      "=== Tuning SVM ===\n",
      "Fitting 5 folds for each of 20 candidates, totalling 100 fits\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\scien\\AppData\\Local\\Temp\\ipykernel_10684\\123070383.py:124: FutureWarning: \n",
      "\n",
      "Passing `palette` without assigning `hue` is deprecated and will be removed in v0.14.0. Assign the `y` variable to `hue` and set `legend=False` for the same effect.\n",
      "\n",
      "  sns.barplot(x=importances, y=selected_features, palette='viridis')\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVM best params: {'clf__C': 0.1, 'clf__gamma': 'scale'}\n",
      "SVM CV acc: 0.8266\n",
      "\n",
      "SVM Classification Report:\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "          No       0.83      1.00      0.91       181\n",
      "         Yes       1.00      0.00      0.00        38\n",
      "\n",
      "    accuracy                           0.83       219\n",
      "   macro avg       0.91      0.50      0.45       219\n",
      "weighted avg       0.86      0.83      0.75       219\n",
      "\n",
      "\n",
      "=== Tuning GradientBoosting ===\n",
      "Fitting 5 folds for each of 192 candidates, totalling 960 fits\n",
      "GradientBoosting best params: {'clf__learning_rate': 0.01, 'clf__max_depth': 7, 'clf__n_estimators': 100, 'clf__subsample': 0.6}\n",
      "GradientBoosting CV acc: 0.8312\n",
      "\n",
      "GradientBoosting Classification Report:\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "          No       0.83      1.00      0.91       181\n",
      "         Yes       1.00      0.03      0.05        38\n",
      "\n",
      "    accuracy                           0.83       219\n",
      "   macro avg       0.92      0.51      0.48       219\n",
      "weighted avg       0.86      0.83      0.76       219\n",
      "\n",
      "\n",
      "=== Tuning LightGBM ===\n",
      "Fitting 5 folds for each of 256 candidates, totalling 1280 fits\n",
      "LightGBM best params: {'clf__learning_rate': 0.1, 'clf__max_depth': 5, 'clf__n_estimators': 300, 'clf__num_leaves': 20}\n",
      "LightGBM CV acc: 0.7580\n",
      "\n",
      "LightGBM Classification Report:\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "          No       0.84      0.87      0.86       181\n",
      "         Yes       0.26      0.21      0.23        38\n",
      "\n",
      "    accuracy                           0.76       219\n",
      "   macro avg       0.55      0.54      0.54       219\n",
      "weighted avg       0.74      0.76      0.75       219\n",
      "\n",
      "\n",
      "All plots saved as PNG files.\n"
     ]
    }
   ],
   "source": [
    "## 2) Load & Merge Data\n",
    "feat = pd.read_csv(files.PPG_MY_OWN)\n",
    "meta = pd.read_csv(files.METADATA_PATH)\n",
    "df = feat.merge(meta[['subject_ID', 'diabetes_label']], on=\"subject_ID\", how=\"inner\")\n",
    "\n",
    "# 3) Define Features and Targets, Drop Constant Features\n",
    "FEATURE_COLS = [c for c in df.columns if c not in ('subject_ID', 'diabetes_label')]\n",
    "# Drop constant features\n",
    "df_features = df[FEATURE_COLS]\n",
    "constant_features = df_features.columns[df_features.nunique() <= 1]\n",
    "df = df.drop(columns=constant_features)\n",
    "print(f\"Dropped constant features: {list(constant_features)}\")\n",
    "FEATURE_COLS = [c for c in df.columns if c not in ('subject_ID', 'diabetes_label')]\n",
    "print(f\"Remaining features: {len(FEATURE_COLS)}\")\n",
    "\n",
    "X = df[FEATURE_COLS].values\n",
    "y = df['diabetes_label'].values\n",
    "groups = df['subject_ID'].values\n",
    "\n",
    "# 4) Plot Class Distribution\n",
    "plt.figure(figsize=(6, 4))\n",
    "sns.countplot(x='diabetes_label', data=df, hue='diabetes_label', palette='Set2', legend=False)\n",
    "plt.title('Class Distribution of Diabetes Labels', fontsize=14)\n",
    "plt.xlabel('Diabetes Label (0: No, 1: Yes)')\n",
    "plt.ylabel('Count')\n",
    "plt.savefig('class_distribution.png')\n",
    "plt.close()\n",
    "\n",
    "# 5) Subject-wise Stratified CV and Generate Splits\n",
    "cv = StratifiedGroupKFold(n_splits=5, shuffle=True, random_state=42)\n",
    "cv_splits = list(cv.split(X, y, groups))  # Generate splits once\n",
    "\n",
    "# 6) Helper to Restore DataFrame for LightGBM\n",
    "to_df = FunctionTransformer(lambda X: pd.DataFrame(X, columns=FEATURE_COLS))\n",
    "\n",
    "# 7) Define Pipelines with Adjusted Feature Selection\n",
    "models = {\n",
    "    \"RandomForest\": {\n",
    "        \"pipe\": Pipeline([\n",
    "            (\"scaler\", StandardScaler()),\n",
    "            (\"selector\", SelectKBest(score_func=f_classif, k='all')),\n",
    "            (\"clf\", RandomForestClassifier(random_state=42))\n",
    "        ]),\n",
    "        \"params\": {\n",
    "            \"clf__n_estimators\": [50, 100, 200, 300],\n",
    "            \"clf__max_depth\": [None, 10, 20, 30],\n",
    "            \"clf__max_features\": [\"sqrt\", \"log2\", 0.5, 0.7]\n",
    "        }\n",
    "    },\n",
    "    \"SVM\": {\n",
    "        \"pipe\": Pipeline([\n",
    "            (\"scaler\", StandardScaler()),\n",
    "            (\"selector\", SelectKBest(score_func=f_classif, k='all')),\n",
    "            (\"clf\", SVC(kernel=\"rbf\", probability=True, random_state=42))\n",
    "        ]),\n",
    "        \"params\": {\n",
    "            \"clf__C\": [0.1, 1, 10, 100],\n",
    "            \"clf__gamma\": [\"scale\", \"auto\", 0.01, 0.1, 1]\n",
    "        }\n",
    "    },\n",
    "    \"GradientBoosting\": {\n",
    "        \"pipe\": Pipeline([\n",
    "            (\"scaler\", StandardScaler()),\n",
    "            (\"selector\", SelectKBest(score_func=f_classif, k='all')),\n",
    "            (\"clf\", GradientBoostingClassifier(random_state=42))\n",
    "        ]),\n",
    "        \"params\": {\n",
    "            \"clf__n_estimators\": [50, 100, 200, 300],\n",
    "            \"clf__learning_rate\": [0.001, 0.01, 0.1, 0.2],\n",
    "            \"clf__max_depth\": [3, 5, 7, 9],\n",
    "            \"clf__subsample\": [0.6, 0.8, 1.0]\n",
    "        }\n",
    "    },\n",
    "    \"LightGBM\": {\n",
    "        \"pipe\": Pipeline([\n",
    "            (\"scaler\", StandardScaler()),\n",
    "            (\"selector\", SelectKBest(score_func=f_classif, k='all')),\n",
    "            (\"to_df\", to_df),\n",
    "            (\"clf\", LGBMClassifier(random_state=42, verbosity=-1, class_weight='balanced'))\n",
    "        ]),\n",
    "        \"params\": {\n",
    "            \"clf__n_estimators\": [50, 100, 200, 300],\n",
    "            \"clf__learning_rate\": [0.001, 0.01, 0.1, 0.2],\n",
    "            \"clf__num_leaves\": [20, 31, 50, 70],\n",
    "            \"clf__max_depth\": [-1, 5, 10, 15]\n",
    "        }\n",
    "    }\n",
    "}\n",
    "\n",
    "# 8) Tune Models and Collect OOF Predictions\n",
    "best_estimators = {}\n",
    "y_pred_all = {}\n",
    "y_proba_all = {}\n",
    "model_names = list(models.keys())\n",
    "\n",
    "for name, spec in models.items():\n",
    "    print(f\"\\n=== Tuning {name} ===\")\n",
    "    search = GridSearchCV(\n",
    "        estimator=spec[\"pipe\"],\n",
    "        param_grid=spec[\"params\"],\n",
    "        scoring=\"accuracy\",\n",
    "        cv=cv_splits,\n",
    "        n_jobs=-1,\n",
    "        verbose=1\n",
    "    )\n",
    "    search.fit(X, y)\n",
    "    best = search.best_estimator_\n",
    "    best_estimators[name] = best\n",
    "    print(f\"{name} best params: {search.best_params_}\")\n",
    "    print(f\"{name} CV acc: {search.best_score_:.4f}\")\n",
    "\n",
    "    y_pred_all[name] = cross_val_predict(best, X, y, cv=cv_splits, n_jobs=-1)\n",
    "    y_proba_all[name] = cross_val_predict(\n",
    "        best, X, y, method=\"predict_proba\", cv=cv_splits, n_jobs=-1\n",
    "    )[:, 1]\n",
    "\n",
    "    print(f\"\\n{name} Classification Report:\")\n",
    "    print(classification_report(y, y_pred_all[name], target_names=[\"No\", \"Yes\"], zero_division=1))\n",
    "\n",
    "    if name == \"RandomForest\":\n",
    "        importances = best.named_steps['clf'].feature_importances_\n",
    "        selected_features = FEATURE_COLS\n",
    "        plt.figure(figsize=(10, 6))\n",
    "        sns.barplot(x=importances, y=selected_features, palette='viridis')\n",
    "        plt.title('RandomForest Feature Importances', fontsize=14)\n",
    "        plt.xlabel('Importance')\n",
    "        plt.ylabel('Feature')\n",
    "        plt.savefig('randomforest_feature_importances.png')\n",
    "        plt.close()\n",
    "\n",
    "# 9) Plot Evaluation Metrics for Base Models\n",
    "plot_evaluation_metrics(y, y_pred_all, y_proba_all, model_names, plot_prefix=\"base\")\n",
    "\n",
    "# 10) Build and Evaluate Ensembles\n",
    "estimators = [(n, est) for n, est in best_estimators.items()]\n",
    "\n",
    "stack = StackingClassifier(\n",
    "    estimators=estimators,\n",
    "    final_estimator=LogisticRegression(),\n",
    "    cv=cv_splits,\n",
    "    n_jobs=-1\n",
    ")\n",
    "vote = VotingClassifier(\n",
    "    estimators=estimators,\n",
    "    voting=\"soft\",\n",
    "    n_jobs=-1\n",
    ")\n",
    "weighted_vote = VotingClassifier(\n",
    "    estimators=estimators,\n",
    "    voting=\"soft\",\n",
    "    weights=[search.best_score_ for search in [GridSearchCV(\n",
    "        estimator=spec[\"pipe\"],\n",
    "        param_grid=spec[\"params\"],\n",
    "        scoring=\"accuracy\",\n",
    "        cv=cv_splits,\n",
    "        n_jobs=-1\n",
    "    ).fit(X, y) for spec in models.values()]],\n",
    "    n_jobs=-1\n",
    ")\n",
    "\n",
    "\n",
    "\n",
    "print(\"\\nAll plots saved as PNG files.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "992d3075",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "JB",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
