{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Data loader and Feature select"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from sklearn.linear_model import LassoCV, Lasso\n",
    "from sklearn.feature_selection import SelectFromModel\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "import torch\n",
    "from sklearn.feature_selection import SelectKBest, f_classif\n",
    "from sklearn.impute import SimpleImputer\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "import torch.nn as nn\n",
    "from sklearn.model_selection import StratifiedKFold, train_test_split\n",
    "from sklearn.metrics import accuracy_score, roc_auc_score, confusion_matrix, recall_score, roc_curve\n",
    "import torch.optim as optim\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from torchgen.selective_build import selector"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DataPreprocessor:\n",
    "    def __init__(self, data_path : str = 'output_processed.xlsx', data_save_path : str = 'output_processed.xlsx'):\n",
    "        try:\n",
    "            self.df = pd.read_excel(data_path)\n",
    "            # Extract label column and feature columns\n",
    "            self.X = df.iloc[:, 1:].values      # Features start from the second column\n",
    "            self.y = df.iloc[:, 0].values       # The first column is labels\n",
    "            self.feature_names = df.columns[1:]\n",
    "            # Hyper parameter for LassoCV\n",
    "            self.alpha = 0.01  # 需要根据你的数据调整这个值\n",
    "        except Exception as e:\n",
    "            print(f\"Error loading data: {e}\")\n",
    "        self.standarlize()\n",
    "        self.feature_select()\n",
    "    def standarlize(self):\n",
    "        # 特征标准化\n",
    "        self.scaler = StandardScaler().fit(self.X)\n",
    "        self.X_scaled = scaler.transform(self.X)\n",
    "    def feature_select(self):\n",
    "        self.lasso = Lasso(alpha=self.alpha, max_iter=10000)\n",
    "        self.lasso.fit(X_scaled, y)\n",
    "        self.model = SelectFromModel(lasso, prefit=True)\n",
    "        self.X_selected = self.model.transform(X_scaled)\n",
    "        self.selected_features = self.feature_names[model.get_support()]\n",
    "    def print_selected_features(self):\n",
    "        # 打印被选择的特征名\n",
    "        print(\"Selected features:\", self.selected_features)\n",
    "    def save_selected_features(self):\n",
    "        # 如果你想将这些特征名保存到文件\n",
    "        self.selected_features.to_series().to_csv(data_save_path, index=False)\n",
    "    def handle_missing_values(self):\n",
    "        # Handle missing values\n",
    "        imputer = SimpleImputer(strategy='mean')\n",
    "        features = imputer.fit_transform(self.X)\n",
    "        return features\n",
    "    def get_tensordataset(self):\n",
    "        # Convert data to tensors\n",
    "        features_tensor = torch.tensor(handle_missing_values(), dtype=torch.float32)\n",
    "        labels_tensor = torch.tensor(self.y, dtype=torch.long)\n",
    "        # Create a TensorDataset, combining features and labels together\n",
    "        dataset = TensorDataset(features_tensor, labels_tensor)\n",
    "        return  dataset"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Modal Define and Train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define the deep learning model\n",
    "class MyModel(nn.Module):\n",
    "    def __init__(self, n_features=19, n_classes=2, n_hidden=256):\n",
    "        super(MyModel, self).__init__()\n",
    "\n",
    "        # Use a fully connected layer to process feature vectors\n",
    "        self.fc1 = nn.Linear(n_features, n_hidden)\n",
    "        self.relu = nn.ReLU()\n",
    "        self.fc2 = nn.Linear(n_hidden, n_classes)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.fc1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.fc2(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define performance metrics\n",
    "def specificity_score(y_true, y_pred):\n",
    "    tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n",
    "    specificity = tn / (tn + fp)\n",
    "    return specificity\n",
    "\n",
    "# Define validation function\n",
    "def validate_model(model, val_loader):\n",
    "    model.eval()\n",
    "    all_preds = []\n",
    "    all_labels = []\n",
    "    all_preds_prob = []\n",
    "    fixed_threshold = 0.5  # Fixed threshold\n",
    "\n",
    "    with torch.no_grad():\n",
    "        for batch_features, batch_labels in val_loader:\n",
    "            batch_features, batch_labels = batch_features.to('cuda'), batch_labels.to('cuda')\n",
    "\n",
    "            # Forward pass\n",
    "            outputs = model(batch_features)\n",
    "\n",
    "            # Compute prediction probabilities\n",
    "            preds_prob = torch.nn.functional.softmax(outputs, dim=1)[:, 1].cpu().numpy()\n",
    "            all_preds_prob.extend(preds_prob)\n",
    "            all_labels.extend(batch_labels.cpu().numpy())\n",
    "\n",
    "    # Recalculate predictions using a fixed threshold\n",
    "    fixed_preds = (np.array(all_preds_prob) > fixed_threshold).astype(int)\n",
    "\n",
    "    # Compute other performance metrics\n",
    "    acc = accuracy_score(all_labels, fixed_preds)\n",
    "    auc = roc_auc_score(all_labels, all_preds_prob)\n",
    "    cm = confusion_matrix(all_labels, fixed_preds)\n",
    "\n",
    "    return acc, auc, cm, fixed_threshold, fixed_preds, all_preds_prob"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Variable Define"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Preproocess data \n",
    "dataprocess = DataProcess()\n",
    "dataset = dataprocess.get_tensordataset()\n",
    "\n",
    "# Create DataLoader\n",
    "batch_size = 32\n",
    "\n",
    "# Split data into five folds\n",
    "num_folds = 5\n",
    "stratified_kfold = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=42)\n",
    "\n",
    "# Initialize lists of performance metrics\n",
    "train_acc_list, train_auc_list, train_sen_list, train_cutoff_list, train_spe_list = [], [], [], [], []\n",
    "test_acc_list, test_auc_list, test_sen_list, test_cutoff_list, test_spe_list = [], [], [], [], []\n",
    "\n",
    "# Create model instance\n",
    "model = MyModel()\n",
    "\n",
    "# Move model to GPU (if available)\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "model.to(device)\n",
    "\n",
    "# Define loss function and optimizer\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "\n",
    "# Initialize an empty DataFrame to store the results\n",
    "results_df = pd.DataFrame()\n",
    "\n",
    "# Load a fresh test set\n",
    "test_df_path = 'integrated_validation_set_2024-03-18.xlsx'  # Update to actual test set file path\n",
    "test_df = pd.read_excel(test_df_path)\n",
    "\n",
    "# Apply the same data preprocessing steps\n",
    "# Note: Use the previously fit imputer and scaler\n",
    "test_features = imputer.transform(test_df.iloc[:, 3:].values)  # Assuming the first three columns are not feature columns\n",
    "test_features = scaler.transform(test_features)\n",
    "test_labels = test_df['label'].values  # Adjust based on actual label column name\n",
    "\n",
    "# Convert processed data to tensors\n",
    "test_features_tensor = torch.tensor(test_features, dtype=torch.float32)\n",
    "test_labels_tensor = torch.tensor(test_labels, dtype=torch.long)\n",
    "test_dataset = TensorDataset(test_features_tensor, test_labels_tensor)\n",
    "test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Training and validation\n",
    "for fold, (train_index, val_index) in enumerate(stratified_kfold.split(features, labels)):\n",
    "    print(f\"Fold {fold + 1}/{num_folds}\")\n",
    "\n",
    "    # Split data sets\n",
    "    train_dataset = TensorDataset(features_tensor[train_index], labels_tensor[train_index])\n",
    "    val_dataset = TensorDataset(features_tensor[val_index], labels_tensor[val_index])\n",
    "\n",
    "    train_data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n",
    "    val_data_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
    "\n",
    "    # Train the model\n",
    "    num_epochs = 10\n",
    "    best_val_acc = 0.0\n",
    "    best_model = None\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        model.train()\n",
    "        for batch_features, batch_labels in train_data_loader:\n",
    "            batch_features, batch_labels = batch_features.to(device), batch_labels.to(device)\n",
    "\n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(batch_features)\n",
    "            loss = criterion(outputs, batch_labels)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "        # Validate at the end of each epoch\n",
    "        val_acc, val_auc, val_sen, val_cutoff, val_spe, _ = validate_model(model, val_data_loader)\n",
    "        print(f'Epoch {epoch + 1}/{num_epochs}, Validation Accuracy: {val_acc}')\n",
    "\n",
    "        # Save the best-performing model on the validation set\n",
    "        if val_acc > best_val_acc:\n",
    "            best_val_acc = val_acc\n",
    "            best_model = model.state_dict()\n",
    "            # Save the best model for each fold\n",
    "            fold_model_save_path = f'model_best_fold_{fold + 1}.pth'\n",
    "            torch.save(best_model, fold_model_save_path)\n",
    "\n",
    "    # At the end of training for each fold, use the best model to evaluate on that fold's test set\n",
    "    model.load_state_dict(best_model)\n",
    "    model.eval()\n",
    "\n",
    "    # Compute training set performance\n",
    "    train_dataset = TensorDataset(features_tensor[train_index], labels_tensor[train_index])\n",
    "    train_data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)\n",
    "    train_acc, train_auc, train_sen, train_cutoff, train_spe, train_cm = validate_model(model, train_data_loader)\n",
    "\n",
    "    # Compute test set performance\n",
    "    test_dataset = TensorDataset(features_tensor[val_index], labels_tensor[val_index])\n",
    "    test_data_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n",
    "    test_acc, test_auc, test_sen, test_cutoff, test_spe, test_cm = validate_model(model, test_data_loader)\n",
    "\n",
    "    # Compute confusion matrix\n",
    "    all_train_preds = []\n",
    "    all_train_labels = []\n",
    "    # Initialize a DataFrame to store results of the current fold\n",
    "    fold_results = pd.DataFrame()\n",
    "    with torch.no_grad():\n",
    "        for batch_features, batch_labels in train_data_loader:\n",
    "            batch_features, batch_labels = batch_features.to(device), batch_labels.to(device)\n",
    "\n",
    "            # Forward pass\n",
    "            outputs = model(batch_features)\n",
    "\n",
    "            # Compute prediction probabilities\n",
    "            preds_prob = torch.nn.functional.softmax(outputs, dim=1)[:, 1].cpu().numpy()\n",
    "            preds = (preds_prob > 0.5).astype(int)\n",
    "\n",
    "            all_train_preds.extend(preds)\n",
    "            all_train_labels.extend(batch_labels.cpu().numpy())\n",
    "            # Append results to the DataFrame\n",
    "\n",
    "    # Compute confusion matrix\n",
    "    train_cm = confusion_matrix(all_train_labels, all_train_preds)\n",
    "    # Compute test set confusion matrix\n",
    "    test_preds = []\n",
    "    test_labels = []\n",
    "\n",
    "    with torch.no_grad():\n",
    "        for batch_features, batch_labels in test_data_loader:\n",
    "            batch_features, batch_labels = batch_features.to(device), batch_labels.to(device)\n",
    "\n",
    "            # Forward pass\n",
    "            outputs = model(batch_features)\n",
    "\n",
    "            # Compute prediction probabilities\n",
    "            preds_prob = torch.nn.functional.softmax(outputs, dim=1)[:, 1].cpu().numpy()\n",
    "            preds = (preds_prob > 0.5).astype(int)\n",
    "\n",
    "            test_preds.extend(preds)\n",
    "            test_labels.extend(batch_labels.cpu().numpy())\n",
    "            # Append results to the current fold's results DataFrame\n",
    "\n",
    "        results_df = pd.concat([results_df, fold_results], ignore_index=True)\n",
    "\n",
    "    # Compute test set confusion matrix\n",
    "    test_cm = confusion_matrix(test_labels, test_preds)\n",
    "\n",
    "    # Print training set performance\n",
    "    print(f'Fold {fold + 1} Train Accuracy: {train_acc}')\n",
    "    print('Train Confusion Matrix:')\n",
    "    print(train_cm)\n",
    "\n",
    "    # Print test set performance\n",
    "    print(f'Fold {fold + 1} Test Accuracy: {test_acc}')\n",
    "    print('Test Confusion Matrix:')\n",
    "    print(test_cm)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Evaluation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Evaluate each fold's model and identify the one that performs best on the test set\n",
    "best_auc = 0\n",
    "best_model_path = ''\n",
    "\n",
    "for fold in range(1, 6):\n",
    "    model_path = f'model_best_fold_{fold}.pth'\n",
    "    model.load_state_dict(torch.load(model_path))\n",
    "    model.eval()\n",
    "    model.to(device)\n",
    "\n",
    "    all_preds_prob = []\n",
    "    with torch.no_grad():\n",
    "        for batch_features, _ in test_loader:\n",
    "            batch_features = batch_features.to(device)\n",
    "            outputs = model(batch_features)\n",
    "            preds_prob = torch.nn.functional.softmax(outputs, dim=1)[:, 1].cpu().numpy()\n",
    "            all_preds_prob.extend(preds_prob)\n",
    "\n",
    "    auc = roc_auc_score(test_labels, all_preds_prob)\n",
    "    print(f\"Fold {fold}, Test AUC: {auc:.4f}\")\n",
    "\n",
    "    if auc > best_auc:\n",
    "        best_auc = auc\n",
    "        best_model_path = model_path\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Use the best-performing model on the test set for final predictions\n",
    "print(f\"Best Model Path: {best_model_path}\")\n",
    "model.load_state_dict(torch.load(best_model_path))\n",
    "model.eval()\n",
    "\n",
    "all_preds_prob = []\n",
    "all_preds = []\n",
    "with torch.no_grad():\n",
    "    for batch_features, _ in test_loader:\n",
    "        batch_features = batch_features.to(device)\n",
    "        outputs = model(batch_features)\n",
    "        preds_prob = torch.nn.functional.softmax(outputs, dim=1)[:, 1].cpu().numpy()\n",
    "        preds = np.argmax(outputs.cpu().numpy(), axis=1)\n",
    "        all_preds_prob.extend(preds_prob)\n",
    "        all_preds.extend(preds)\n",
    "\n",
    "# Save test set predictions\n",
    "test_df['Probability'] = all_preds_prob  # Add prediction probability column\n",
    "test_df['Prediction'] = all_preds  # Add prediction result column\n",
    "\n",
    "# Update the list of output columns, including only the necessary columns\n",
    "output_columns = ['编号', '患者姓名', 'label', 'Probability', 'Prediction']\n",
    "\n",
    "# Create output DataFrame\n",
    "output_df = test_df.loc[:, output_columns]\n",
    "\n",
    "# Save to Excel file\n",
    "output_df.to_excel('final_test_predictions_88feature.xlsx', index=False)"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
