{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "8e7bcaae",
   "metadata": {},
   "source": [
    "# import packages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "261adf52",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import seaborn as sns\n",
    "import matplotlib.pyplot as plt\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import TensorDataset, DataLoader\n",
    "from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, StratifiedKFold\n",
    "from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier, StackingClassifier\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.metrics import f1_score, matthews_corrcoef, accuracy_score, confusion_matrix, ConfusionMatrixDisplay\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from lightgbm import LGBMClassifier\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.metrics import accuracy_score, roc_auc_score, classification_report, confusion_matrix\n",
    "from sklearn.feature_selection import SelectFromModel, RFE\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d9055486",
   "metadata": {},
   "source": [
    "# Data loading and reading "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1f783563",
   "metadata": {},
   "outputs": [],
   "source": [
    "train=pd.read_csv(r'H:\\深度学习\\Kaggle_playground_降水预测\\train.csv')\n",
    "test=pd.read_csv(r'H:\\深度学习\\Kaggle_playground_降水预测\\test.csv')\n",
    "print(train.shape,test.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "438fb5fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "train.drop_duplicates(inplace=True)\n",
    "train.isnull().sum()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c32050ee",
   "metadata": {},
   "source": [
    "# EDA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a33981b9",
   "metadata": {},
   "outputs": [],
   "source": [
    "numerical_variables=['pressure','maxtemp','temparature','mintemp','dewpoint','humidity','cloud','sunshine','windspeed']\n",
    "target_variable='rainfall'\n",
    "categorical_variables=['winddirection']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf443596",
   "metadata": {},
   "outputs": [],
   "source": [
    "custom_palette=['#3498db','#e74c3c','#2ecc71']\n",
    "train['Source']='Train'\n",
    "test['Source']='Test'\n",
    "\n",
    "def generate_feature_visualizations(feature_name):\n",
    "    sns.set(style='whitegrid')\n",
    "\n",
    "    fig,ax=plt.subplots(1,2,figsize=(14,5))\n",
    "    plt.subplot(1,2,1)\n",
    "    sns.boxplot(data=pd.concat([train,test]),x=feature_name,y='Source',palette=custom_palette)\n",
    "    plt.xlabel(feature_name)\n",
    "    plt.title(f'Box Plot for {feature_name} Across Datasets')\n",
    "    plt.subplot(1,2,2)\n",
    "    sns.histplot(\n",
    "    data=pd.concat([train,test]), x=feature_name, hue='Source', kde=True, palette=custom_palette, multiple='layer',alpha=0.6,bins=30,legend=True)\n",
    "    plt.xlabel(feature_name)\n",
    "    plt.ylabel('Frequency')\n",
    "    plt.title(f'Histogram for {feature_name}')\n",
    "    plt.legend\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "for feature in numerical_variables:\n",
    "    generate_feature_visualizations(feature)\n",
    "train.drop('Source',axis=1,inplace=True)\n",
    "test.drop('Source',axis=1,inplace=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c50cc18e",
   "metadata": {},
   "outputs": [],
   "source": [
    "custom_palette = ['#1f77b4', '#ff7f0e'] \n",
    "def create_grouped_countplot(variable):\n",
    "    sns.set_style('whitegrid')\n",
    "    train_data_copy = train.copy()\n",
    "    test_data_copy = test.copy()\n",
    "    train_data_copy['Dataset'] = 'Train'\n",
    "    test_data_copy['Dataset'] = 'Test'\n",
    "    combined_data = pd.concat([train_data_copy, test_data_copy],ignore_index=True)\n",
    "    train_counts=train[variable].value_counts().sort_values(ascending=True).index.tolist()\n",
    "    plt.figure(figsize=(10, 6))\n",
    "    sns.countplot(x=variable, hue='Dataset', data=combined_data, order=train_counts, palette=custom_palette,width=0.85)\n",
    "    plt.title(f'Grouped Countplot of {variable} (Train vs Test)')\n",
    "    plt.xlabel(variable)\n",
    "    plt.ylabel('Count')\n",
    "    plt.xticks(rotation=45, ha='right')\n",
    "    plt.show()\n",
    "for variable in categorical_variables:\n",
    "    create_grouped_countplot(variable)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2ee4825",
   "metadata": {},
   "outputs": [],
   "source": [
    "unique_palette = ['#9b59b6', '#f39c12']\n",
    "\n",
    "def generate_wind_rose_plot(ax, dataset, name, color):\n",
    "    wind_direction_radians = np.radians(dataset['winddirection'].dropna())\n",
    "\n",
    "    bins = np.linspace(0, 2 * np.pi, 37)\n",
    "    counts, bin_edges = np.histogram(wind_direction_radians, bins=bins)\n",
    "\n",
    "    bars = ax.bar(bin_edges[:-1], counts, width=np.radians(10), color=color, edgecolor='black', alpha=0.75)\n",
    "\n",
    "    ax.set_theta_zero_location(\"N\")\n",
    "    ax.set_theta_direction(-1)\n",
    "    ax.set_xticks(np.radians(np.arange(0, 360, 45)))\n",
    "    ax.set_xticklabels(['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW'], fontsize=12, fontweight='bold')\n",
    "\n",
    "    ax.yaxis.grid(True, linestyle=\"--\", alpha=0.6)\n",
    "    ax.set_yticklabels([])\n",
    "    ax.set_title(f\"Wind Direction - {name}\", fontsize=14, fontweight='bold', pad=15)\n",
    "\n",
    "fig, axes = plt.subplots(1, 2, figsize=(14, 6), subplot_kw={'projection': 'polar'})\n",
    "\n",
    "generate_wind_rose_plot(axes[0], train, \"Training Data\", unique_palette[0])\n",
    "generate_wind_rose_plot(axes[1], test, \"Test Data\", unique_palette[1])\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "777cc559",
   "metadata": {},
   "outputs": [],
   "source": [
    "fig,ax=plt.subplots(6,2,figsize=(20,20))\n",
    "ax=ax.flatten()\n",
    "i=0\n",
    "for col in train.columns:\n",
    "    if col!='rainfall':\n",
    "        sns.kdeplot(data=train,x=col,ax=ax[i],label='Train',fill=True)\n",
    "        sns.kdeplot(data=test, x=col, ax=ax[i], label=\"Test\", fill=True)\n",
    "        ax[i].set_title(col)\n",
    "        ax[i].legend()\n",
    "        i += 1\n",
    "        plt.tight_layout()\n",
    "for j in range(i,len(ax)):\n",
    "    ax[j].axis('off')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d1fca460",
   "metadata": {},
   "outputs": [],
   "source": [
    "import seaborn as sns\n",
    "import matplotlib.pyplot as plt\n",
    "import itertools\n",
    "features = ['day', 'pressure', 'maxtemp', 'temparature', 'mintemp', 'dewpoint', 'humidity', 'cloud', 'sunshine', 'winddirection', 'windspeed']\n",
    "pairs=list(itertools.combinations(features,2))\n",
    "n_cols=2\n",
    "n_rows=-(-len(pairs)//n_cols)\n",
    "fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(20, 5 * n_rows))\n",
    "axes = axes.flatten()\n",
    "for i, (x,y) in enumerate(pairs):\n",
    "    sns.scatterplot(data=train,x=x,y=y,hue='rainfall',ax=axes[i],palette='coolwarm')\n",
    "    axes[i].set_title(f'{x} vs. {y} (Hue: Rainfall)', fontsize=14)\n",
    "for j in range(i + 1, len(axes)):\n",
    "    axes[j].axis('off')\n",
    "plt.tight_layout()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4bbe55f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "variables = [col for col in train.columns if col in numerical_variables]+['day']\n",
    "\n",
    "test_variables = variables\n",
    "train_variables = variables+ ['rainfall']\n",
    "\n",
    "corr_train = train[train_variables].corr()\n",
    "corr_test = test[test_variables].corr()\n",
    "\n",
    "mask_train = np.triu(np.ones_like(corr_train, dtype=bool))\n",
    "mask_test = np.triu(np.ones_like(corr_test, dtype=bool))\n",
    "\n",
    "annot_kws = {\"size\": 8, \"rotation\": 45}\n",
    "\n",
    "plt.figure(figsize=(15, 5))\n",
    "plt.subplot(1, 2, 1)\n",
    "ax_train = sns.heatmap(corr_train, mask=mask_train, cmap='viridis', annot=True,\n",
    "                      square=True, linewidths=.5, xticklabels=1, yticklabels=1, annot_kws=annot_kws)\n",
    "plt.title('Correlation Heatmap - Train Data')\n",
    "\n",
    "plt.subplot(1, 2, 2)\n",
    "ax_test = sns.heatmap(corr_test, mask=mask_test, cmap='viridis', annot=True,\n",
    "                     square=True, linewidths=.5, xticklabels=1, yticklabels=1, annot_kws=annot_kws)\n",
    "plt.title('Correlation Heatmap - Test Data')\n",
    "\n",
    "plt.tight_layout()\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a4fda7e6",
   "metadata": {},
   "outputs": [],
   "source": [
    "#ax0: Train vs Test. Significant differences could indicate data drift.\n",
    "#ax1: Scatter plot. Showing the relationship between day, the feature and rainfall status, revealing time-based patterns and individual data points.\n",
    "#ax2: KDE plot. Showing the distribution of the feature values for each rainfall status, providing insights into the feature's distribution across different conditions.\n",
    "from matplotlib.lines import Line2D\n",
    "\n",
    "train_color = '#8e44ad'  \n",
    "test_color = '#e67e22'   \n",
    "rainfall_colors = {0: '#f39c12', 1: '#3498db'} \n",
    "\n",
    "numerical_columns = test.select_dtypes(include=['int64', 'float64']).columns.tolist()\n",
    "for col in ['id', 'day', 'rainfall']:\n",
    "    if col in numerical_columns:\n",
    "        numerical_columns.remove(col)\n",
    "\n",
    "for column in numerical_columns:\n",
    "    \n",
    "    fig = plt.figure(figsize=(16, 10))\n",
    "    gs = fig.add_gridspec(2, 2, height_ratios=[1, 1])\n",
    "\n",
    "    ax0 = fig.add_subplot(gs[0, :])\n",
    "    ax0.plot(train['id'], train[column], linestyle='-', color=train_color, label='Train Data', alpha=0.8)\n",
    "    ax0.plot(test['id'], test[column], linestyle='-', color=test_color, label='Test Data', alpha=0.8)\n",
    "\n",
    "    ax0.set_xlabel('ID', fontsize=14)\n",
    "    ax0.set_ylabel(column, fontsize=14)\n",
    "    ax0.set_title(f'Trend Plot: {column} vs ID', fontsize=16, fontweight='bold')\n",
    "    ax0.legend(fontsize=12)\n",
    "    ax0.grid(True, linestyle='--', alpha=0.5)\n",
    "\n",
    "    ax1 = fig.add_subplot(gs[1, 0])\n",
    "    scatter = ax1.scatter(\n",
    "        train['day'], train[column],\n",
    "        c=train['rainfall'].map(rainfall_colors), alpha=0.8\n",
    "    )\n",
    "    ax1.set_xlabel('Day', fontsize=14)\n",
    "    ax1.set_ylabel(column, fontsize=14)\n",
    "    ax1.set_title(f'Scatter Plot: {column} vs Day (by Rainfall)', fontsize=16, fontweight='bold')\n",
    "\n",
    "    legend_elements = [\n",
    "        Line2D([0], [0], marker='o', color='w', label='No Rainfall',\n",
    "               markersize=10, markerfacecolor=rainfall_colors[0]),\n",
    "        Line2D([0], [0], marker='o', color='w', label='Rainfall',\n",
    "               markersize=10, markerfacecolor=rainfall_colors[1])\n",
    "    ]\n",
    "    ax1.legend(handles=legend_elements, title=\"Rainfall\", fontsize=12, title_fontsize=12)\n",
    "    ax1.grid(True, linestyle='--', alpha=0.5)\n",
    "\n",
    "    ax2 = fig.add_subplot(gs[1, 1])\n",
    "    sns.kdeplot(data=train, x=column, hue='rainfall', palette=rainfall_colors, ax=ax2, fill=True, common_norm=False, alpha=0.6)\n",
    "\n",
    "    ax2.set_xlabel(column, fontsize=14)\n",
    "    ax2.set_ylabel('Density', fontsize=14)\n",
    "    ax2.set_title(f'Distribution (KDE) of {column} by Rainfall', fontsize=16, fontweight='bold')\n",
    "    ax2.legend(title='Rainfall', fontsize=12, title_fontsize=12)\n",
    "    ax2.grid(True, linestyle='--', alpha=0.5)\n",
    "\n",
    "    plt.tight_layout(pad=3.0)\n",
    "    plt.show()\n",
    "\n",
    "    plt.figure(figsize=(16, 0.3)) \n",
    "    plt.axhline(y=0, color='gray', linewidth=5, linestyle='-') \n",
    "    plt.axis('off')\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0e610e20",
   "metadata": {},
   "source": [
    "# Feature Engineering"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5404679d",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.feature_selection import mutual_info_regression\n",
    "\n",
    "X=train.drop(columns=['rainfall'])\n",
    "y=train['rainfall']\n",
    "mi=mutual_info_regression(X,y)\n",
    "mi_df=pd.DataFrame({'Cols':X.columns,'MI':mi})\n",
    "mi_df.sort_values(ascending=False, inplace=True, by='MI')\n",
    "print(mi_df['Cols'].tolist())\n",
    "plt.figure(figsize=(20, 8))\n",
    "sns.barplot(data=mi_df,x='MI',y='Cols',palette='rainbow')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fb5cc42b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def feature_engineering(df):\n",
    "    df = df.copy()\n",
    "    \n",
    "    df['hci'] = df['humidity'] * df['cloud']\n",
    "    df['hsi'] = df['humidity'] * df['sunshine']\n",
    "    df['csr'] = df['cloud'] / (df['sunshine'] + 1e-5)\n",
    "    df['rd'] = 100 - df['humidity']\n",
    "    df['sp'] = df['sunshine'] / (df['sunshine'] + df['cloud'] + 1e-5)\n",
    "    df['wi'] = (0.4 * df['humidity']) + (0.3 * df['cloud']) - (0.3 * df['sunshine'])\n",
    "    \n",
    "    df['temp_range'] = df['maxtemp'] - df['mintemp']\n",
    "    df['temp_dew_diff'] = df['temparature'] - df['dewpoint']\n",
    "    df['humidity_cloud_ratio'] = df['humidity'] / (df['cloud'] + 1e-3)\n",
    "    df['sunshine_cloud_ratio'] = df['sunshine'] / (df['cloud'] + 1e-3)\n",
    "    df['pressure_wind_interaction'] = df['pressure'] * df['winddirection']\n",
    "    df['temp_pressure_ratio'] = df['temparature'] / (df['pressure'] + 1e-3)\n",
    "    df['wind_pressure_ratio'] = df['windspeed'] / (df['pressure'] + 1e-3)\n",
    "    \n",
    "    return df\n",
    "\n",
    "train_comb = feature_engineering(train)\n",
    "test = feature_engineering(test)\n",
    "if test.isnull().sum().sum() > 0:\n",
    "    print(\"\\nHandling missing values in test data...\")\n",
    "\n",
    "    for col in test.columns:\n",
    "        if test[col].isnull().sum() > 0:\n",
    "            test[col] = test[col].fillna(train_comb[col].median())\n",
    "train_comb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ce6b7ffa",
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.figure(figsize=(20,8))\n",
    "sns.heatmap(train_comb.corr(),annot=True)\n",
    "plt.show()\n",
    "\n",
    "X = train_comb.drop(columns=['id', 'rainfall'])\n",
    "y = train_comb['rainfall']\n",
    "mi=mutual_info_regression(X,y)\n",
    "mi_df=pd.DataFrame({\"Cols\":X.columns,'MI':mi})\n",
    "mi_df.sort_values(ascending=False,inplace=True,by='MI')\n",
    "\n",
    "plt.figure(figsize=(20,8))\n",
    "sns.barplot(data=mi_df,x='MI',y='Cols',palette='rainbow')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "130acfdd",
   "metadata": {},
   "source": [
    "# Model：LSTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5077a34b",
   "metadata": {},
   "outputs": [],
   "source": [
    "from imblearn.over_sampling import SMOTE\n",
    "X=train_comb.drop(['id','rainfall'],axis=1)\n",
    "y=train_comb['rainfall']\n",
    "scaler = StandardScaler()\n",
    "X_scaled=scaler.fit_transform(X)\n",
    "smote=SMOTE(random_state=42)\n",
    "X_resampled, y_resampled = smote.fit_resample(X_scaled, y)\n",
    "X_train, X_val, y_train, y_val = train_test_split(X_resampled, y_resampled, test_size=0.2, random_state=42)\n",
    "X_train_lstm = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))\n",
    "X_val_lstm = X_val.reshape((X_val.shape[0], 1, X_val.shape[1]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9a1a021f",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import roc_curve \n",
    "from sklearn.metrics import auc\n",
    "class SimpleLSTMModel(nn.Module):\n",
    "    def __init__(self, input_dim, hidden_dim=50, output_dim=1):\n",
    "        super(SimpleLSTMModel, self).__init__()\n",
    "        \n",
    "        # First LSTM layer - PyTorch does not use return_sequences parameter\n",
    "        self.lstm1 = nn.LSTM(\n",
    "            input_size=input_dim, \n",
    "            hidden_size=hidden_dim, \n",
    "            batch_first=True\n",
    "        )\n",
    "        self.dropout1 = nn.Dropout(0.2)\n",
    "        \n",
    "        # Second LSTM layer\n",
    "        self.lstm2 = nn.LSTM(\n",
    "            input_size=hidden_dim, \n",
    "            hidden_size=hidden_dim//2, \n",
    "            batch_first=True\n",
    "        )\n",
    "        self.dropout2 = nn.Dropout(0.2)\n",
    "        \n",
    "        # Output layer\n",
    "        self.fc = nn.Linear(hidden_dim//2, output_dim)\n",
    "        self.sigmoid = nn.Sigmoid()\n",
    "    \n",
    "    def forward(self, x):\n",
    "        # First LSTM layer\n",
    "        # PyTorch LSTM returns (output, (h_n, c_n)) by default\n",
    "        lstm1_out, _ = self.lstm1(x)\n",
    "        lstm1_out = self.dropout1(lstm1_out)\n",
    "        \n",
    "        # Second LSTM layer - only take the output from the last time step\n",
    "        lstm2_out, _ = self.lstm2(lstm1_out)\n",
    "        # Get the output from the last time step\n",
    "        lstm2_out = self.dropout2(lstm2_out[:, -1, :])\n",
    "        \n",
    "        # Output layer\n",
    "        output = self.fc(lstm2_out)\n",
    "        output = self.sigmoid(output)\n",
    "        \n",
    "        return output\n",
    "\n",
    "# Initialize model and training parameters\n",
    "input_dim = X_train.shape[1]  # Input feature dimension\n",
    "model = SimpleLSTMModel(input_dim, hidden_dim=50, output_dim=1)\n",
    "\n",
    "# Define loss function and optimizer - using standard binary cross-entropy, relying on SMOTE to handle imbalance\n",
    "criterion = nn.BCELoss()  # Simple binary cross-entropy loss\n",
    "optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "\n",
    "# Training settings\n",
    "num_epochs = 80  # Consistent with the reference code\n",
    "batch_size = 16  # Consistent with the reference code\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "model.to(device)\n",
    "\n",
    "# Create data loaders - directly use X_train_lstm without TensorDataset\n",
    "train_dataset = TensorDataset(torch.FloatTensor(X_train_lstm), torch.FloatTensor(y_train.values.reshape(-1, 1)))\n",
    "val_dataset = TensorDataset(torch.FloatTensor(X_val_lstm), torch.FloatTensor(y_val.values.reshape(-1, 1)))\n",
    "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n",
    "val_loader = DataLoader(val_dataset, batch_size=batch_size)\n",
    "\n",
    "# Training loop\n",
    "train_losses = []\n",
    "val_losses = []\n",
    "train_accs = []\n",
    "val_accs = []\n",
    "train_aucs = []\n",
    "val_aucs = []\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    model.train()\n",
    "    train_loss = 0.0\n",
    "    train_preds = []\n",
    "    train_true = []\n",
    "    \n",
    "    # Training loop\n",
    "    for inputs, targets in train_loader:\n",
    "        inputs, targets = inputs.to(device), targets.to(device)\n",
    "        \n",
    "        # Forward propagation\n",
    "        outputs = model(inputs)\n",
    "        \n",
    "        # Calculate loss\n",
    "        loss = criterion(outputs, targets)\n",
    "        \n",
    "        # Backpropagation and optimization\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        # Accumulate loss\n",
    "        train_loss += loss.item() * inputs.size(0)\n",
    "        \n",
    "        # Store predictions and true values for AUC calculation\n",
    "        train_preds.extend(outputs.cpu().detach().numpy())\n",
    "        train_true.extend(targets.cpu().numpy())\n",
    "    \n",
    "    # Calculate average loss and metrics for training set\n",
    "    train_loss = train_loss / len(train_dataset)\n",
    "    train_pred_labels = (np.array(train_preds) > 0.5).astype(int)\n",
    "    train_acc = accuracy_score(np.array(train_true).flatten(), train_pred_labels.flatten())\n",
    "    train_auc = roc_auc_score(np.array(train_true).flatten(), np.array(train_preds).flatten())\n",
    "    \n",
    "    # Validation loop\n",
    "    model.eval()\n",
    "    val_loss = 0.0\n",
    "    val_preds = []\n",
    "    val_true = []\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for inputs, targets in val_loader:\n",
    "            inputs, targets = inputs.to(device), targets.to(device)\n",
    "            \n",
    "            # Forward propagation\n",
    "            outputs = model(inputs)\n",
    "            \n",
    "            # Calculate loss\n",
    "            loss = criterion(outputs, targets)\n",
    "            \n",
    "            # Accumulate loss\n",
    "            val_loss += loss.item() * inputs.size(0)\n",
    "            \n",
    "            # Store predictions and true values for AUC calculation\n",
    "            val_preds.extend(outputs.cpu().numpy())\n",
    "            val_true.extend(targets.cpu().numpy())\n",
    "    \n",
    "    # Calculate average loss and metrics for validation set\n",
    "    val_loss = val_loss / len(val_dataset)\n",
    "    val_pred_labels = (np.array(val_preds) > 0.5).astype(int)\n",
    "    val_acc = accuracy_score(np.array(val_true).flatten(), val_pred_labels.flatten())\n",
    "    val_auc = roc_auc_score(np.array(val_true).flatten(), np.array(val_preds).flatten())\n",
    "    \n",
    "    # Save metrics\n",
    "    train_losses.append(train_loss)\n",
    "    val_losses.append(val_loss)\n",
    "    train_accs.append(train_acc)\n",
    "    val_accs.append(val_acc)\n",
    "    train_aucs.append(train_auc)\n",
    "    val_aucs.append(val_auc)\n",
    "    \n",
    "    # Print training and validation metrics\n",
    "    print(f'Epoch {epoch+1}/{num_epochs}')\n",
    "    print(f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, Train AUC: {train_auc:.4f}')\n",
    "    print(f'Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}, Val AUC: {val_auc:.4f}')\n",
    "    print('-' * 50)\n",
    "\n",
    "# Plot ROC curve\n",
    "val_probs = np.array(val_preds).flatten()\n",
    "val_true_flat = np.array(val_true).flatten()\n",
    "\n",
    "fpr, tpr, thresholds = roc_curve(val_true_flat, val_probs)\n",
    "roc_auc = auc(fpr, tpr)\n",
    "\n",
    "plt.figure(figsize=(10, 8))\n",
    "plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.3f})')\n",
    "plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n",
    "plt.xlim([0.0, 1.0])\n",
    "plt.ylim([0.0, 1.05])\n",
    "plt.xlabel('False Positive Rate', fontsize=14)\n",
    "plt.ylabel('True Positive Rate', fontsize=14)\n",
    "plt.title('Receiver Operating Characteristic (ROC) Curve', fontsize=16)\n",
    "plt.legend(loc=\"lower right\", fontsize=12)\n",
    "plt.grid(alpha=0.3)\n",
    "plt.show()\n",
    "\n",
    "# Plot training and validation metrics\n",
    "epochs = np.arange(1, len(train_aucs) + 1)\n",
    "\n",
    "plt.figure(figsize=(12, 8))\n",
    "plt.subplot(2, 1, 1)\n",
    "plt.plot(epochs, train_accs, 'b-', label='Training Accuracy')\n",
    "plt.plot(epochs, val_accs, 'r-', label='Validation Accuracy')\n",
    "plt.title('Training and Validation Accuracy vs Epochs', fontsize=14)\n",
    "plt.xlabel('Epochs', fontsize=12)\n",
    "plt.ylabel('Accuracy', fontsize=12)\n",
    "plt.legend(fontsize=12)\n",
    "plt.grid(alpha=0.3)\n",
    "\n",
    "plt.subplot(2, 1, 2)\n",
    "plt.plot(epochs, train_aucs, 'b-', label='Training AUC')\n",
    "plt.plot(epochs, val_aucs, 'r-', label='Validation AUC')\n",
    "plt.title('Training and Validation AUC vs Epochs', fontsize=14)\n",
    "plt.xlabel('Epochs', fontsize=12)\n",
    "plt.ylabel('AUC Value', fontsize=12)\n",
    "plt.legend(fontsize=12)\n",
    "plt.grid(alpha=0.3)\n",
    "plt.ylim([0.5, 1.0])\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aac66416",
   "metadata": {},
   "outputs": [],
   "source": [
    "#Set time steps manually\n",
    "# Define time steps\n",
    "time_steps = 1 #It's better to have a time step of 1\n",
    "\n",
    "# Create proper sequence function for sequence data\n",
    "def create_sequences(data, time_steps):\n",
    "    # Print data shape for debugging\n",
    "    print(\"Input data shape:\", data.shape)\n",
    "    \n",
    "    # Handle if data is already 3D\n",
    "    if len(data.shape) == 3:\n",
    "        # If it's 3D format with singleton dimension, extract the actual 2D data\n",
    "        if data.shape[1] == 1:\n",
    "            data = data.reshape(data.shape[0], data.shape[2])\n",
    "            print(\"Reshaped to:\", data.shape)\n",
    "    \n",
    "    # Now create sequences from 2D data\n",
    "    n_samples = len(data) - time_steps + 1\n",
    "    X_seq = np.zeros((n_samples, time_steps, data.shape[1]))\n",
    "    for i in range(n_samples):\n",
    "        for t in range(time_steps):\n",
    "            X_seq[i, t] = data[i + t]\n",
    "    \n",
    "    return X_seq\n",
    "\n",
    "# Create sequence data\n",
    "X_train_seq = create_sequences(X_train_lstm, time_steps)\n",
    "X_val_seq = create_sequences(X_val_lstm, time_steps)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b67de1fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Adjust labels to match sequences (assuming each sequence maps to the label of its last timestep)\n",
    "y_train_seq = y_train.values[time_steps-1:]\n",
    "y_val_seq = y_val.values[time_steps-1:]\n",
    "\n",
    "# LSTM model for sequence data\n",
    "class SequenceLSTMModel(nn.Module):\n",
    "    def __init__(self, input_dim, hidden_dim=50, output_dim=1):\n",
    "        super(SequenceLSTMModel, self).__init__()\n",
    "        \n",
    "        # First LSTM layer\n",
    "        self.lstm1 = nn.LSTM(\n",
    "            input_size=input_dim, \n",
    "            hidden_size=hidden_dim, \n",
    "            batch_first=True\n",
    "        )\n",
    "        self.dropout1 = nn.Dropout(0.2)\n",
    "        \n",
    "        # Second LSTM layer\n",
    "        self.lstm2 = nn.LSTM(\n",
    "            input_size=hidden_dim, \n",
    "            hidden_size=hidden_dim//2, \n",
    "            batch_first=True\n",
    "        )\n",
    "        self.dropout2 = nn.Dropout(0.2)\n",
    "        \n",
    "        # Output layer\n",
    "        self.fc = nn.Linear(hidden_dim//2, output_dim)\n",
    "        self.sigmoid = nn.Sigmoid()\n",
    "    \n",
    "    def forward(self, x):\n",
    "        # x shape: [batch_size, time_steps, features]\n",
    "        \n",
    "        # First LSTM layer processes the entire sequence\n",
    "        lstm1_out, _ = self.lstm1(x)\n",
    "        # lstm1_out shape: [batch_size, time_steps, hidden_dim]\n",
    "        lstm1_out = self.dropout1(lstm1_out)\n",
    "        \n",
    "        # Second LSTM layer\n",
    "        lstm2_out, _ = self.lstm2(lstm1_out)\n",
    "        # lstm2_out shape: [batch_size, time_steps, hidden_dim//2]\n",
    "        \n",
    "        # We only use the output from the last time step for classification\n",
    "        lstm2_out = self.dropout2(lstm2_out[:, -1, :])\n",
    "        # lstm2_out shape: [batch_size, hidden_dim//2]\n",
    "        \n",
    "        # Output layer\n",
    "        output = self.fc(lstm2_out)\n",
    "        output = self.sigmoid(output)\n",
    "        \n",
    "        return output\n",
    "\n",
    "# Initialize model and training parameters\n",
    "# Important: Use the correct feature dimension for model initialization\n",
    "input_dim = 24  # Use the actual feature dimension of the data, or use X_train_seq.shape[2]\n",
    "model = SequenceLSTMModel(input_dim, hidden_dim=50, output_dim=1)\n",
    "\n",
    "# Define loss function and optimizer\n",
    "criterion = nn.BCELoss()  # Binary cross entropy loss\n",
    "optimizer = optim.Adam(model.parameters(), lr=0.005)\n",
    "\n",
    "# Training settings\n",
    "num_epochs = 80\n",
    "batch_size = 16\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "model.to(device)\n",
    "\n",
    "# Create data loaders\n",
    "train_dataset = TensorDataset(\n",
    "    torch.FloatTensor(X_train_seq), \n",
    "    torch.FloatTensor(y_train_seq.reshape(-1, 1))\n",
    ")\n",
    "val_dataset = TensorDataset(\n",
    "    torch.FloatTensor(X_val_seq), \n",
    "    torch.FloatTensor(y_val_seq.reshape(-1, 1))\n",
    ")\n",
    "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n",
    "val_loader = DataLoader(val_dataset, batch_size=batch_size)\n",
    "\n",
    "# Training loop\n",
    "train_losses = []\n",
    "val_losses = []\n",
    "train_accs = []\n",
    "val_accs = []\n",
    "train_aucs = []\n",
    "val_aucs = []\n",
    "\n",
    "# Early stopping parameters\n",
    "patience = 10\n",
    "best_val_auc = 0\n",
    "counter = 0\n",
    "best_model_state = None\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    model.train()\n",
    "    train_loss = 0.0\n",
    "    train_preds = []\n",
    "    train_true = []\n",
    "    \n",
    "    # Training loop\n",
    "    for inputs, targets in train_loader:\n",
    "        inputs, targets = inputs.to(device), targets.to(device)\n",
    "        \n",
    "        # Forward pass\n",
    "        outputs = model(inputs)\n",
    "        \n",
    "        # Calculate loss\n",
    "        loss = criterion(outputs, targets)\n",
    "        \n",
    "        # Backward propagation and optimization\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        # Accumulate loss\n",
    "        train_loss += loss.item() * inputs.size(0)\n",
    "        \n",
    "        # Store predictions and true values for AUC calculation\n",
    "        train_preds.extend(outputs.cpu().detach().numpy())\n",
    "        train_true.extend(targets.cpu().numpy())\n",
    "    \n",
    "    # Calculate training set average loss and metrics\n",
    "    train_loss = train_loss / len(train_dataset)\n",
    "    train_pred_labels = (np.array(train_preds) > 0.5).astype(int)\n",
    "    train_acc = accuracy_score(np.array(train_true).flatten(), train_pred_labels.flatten())\n",
    "    train_auc = roc_auc_score(np.array(train_true).flatten(), np.array(train_preds).flatten())\n",
    "    \n",
    "    # Validation loop\n",
    "    model.eval()\n",
    "    val_loss = 0.0\n",
    "    val_preds = []\n",
    "    val_true = []\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for inputs, targets in val_loader:\n",
    "            inputs, targets = inputs.to(device), targets.to(device)\n",
    "            \n",
    "            # Forward pass\n",
    "            outputs = model(inputs)\n",
    "            \n",
    "            # Calculate loss\n",
    "            loss = criterion(outputs, targets)\n",
    "            \n",
    "            # Accumulate loss\n",
    "            val_loss += loss.item() * inputs.size(0)\n",
    "            \n",
    "            # Store predictions and true values for AUC calculation\n",
    "            val_preds.extend(outputs.cpu().numpy())\n",
    "            val_true.extend(targets.cpu().numpy())\n",
    "    \n",
    "    # Calculate validation set average loss and metrics\n",
    "    val_loss = val_loss / len(val_dataset)\n",
    "    val_pred_labels = (np.array(val_preds) > 0.5).astype(int)\n",
    "    val_acc = accuracy_score(np.array(val_true).flatten(), val_pred_labels.flatten())\n",
    "    val_auc = roc_auc_score(np.array(val_true).flatten(), np.array(val_preds).flatten())\n",
    "    \n",
    "    # Save metrics\n",
    "    train_losses.append(train_loss)\n",
    "    val_losses.append(val_loss)\n",
    "    train_accs.append(train_acc)\n",
    "    val_accs.append(val_acc)\n",
    "    train_aucs.append(train_auc)\n",
    "    val_aucs.append(val_auc)\n",
    "    \n",
    "    # Print training and validation metrics\n",
    "    print(f'Epoch {epoch+1}/{num_epochs}')\n",
    "    print(f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, Train AUC: {train_auc:.4f}')\n",
    "    print(f'Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}, Val AUC: {val_auc:.4f}')\n",
    "    print('-' * 50)\n",
    "    \n",
    "    # Early stopping check\n",
    "    if val_auc > best_val_auc:\n",
    "        best_val_auc = val_auc\n",
    "        counter = 0\n",
    "        best_model_state = model.state_dict().copy()\n",
    "        print(f\"New best validation AUC: {val_auc:.4f}\")\n",
    "    else:\n",
    "        counter += 1\n",
    "        print(f\"EarlyStopping counter: {counter} out of {patience}\")\n",
    "        \n",
    "    if counter >= patience:\n",
    "        print(f\"Early stopping triggered at epoch {epoch+1}\")\n",
    "        break\n",
    "\n",
    "# Load the best model after training\n",
    "if best_model_state is not None:\n",
    "    model.load_state_dict(best_model_state)\n",
    "    print(f\"Loaded best model with validation AUC: {best_val_auc:.4f}\")\n",
    "\n",
    "# Plot ROC curve\n",
    "# Re-evaluate validation set using the best model\n",
    "model.eval()\n",
    "val_preds = []\n",
    "val_true = []\n",
    "\n",
    "with torch.no_grad():\n",
    "    for inputs, targets in val_loader:\n",
    "        inputs, targets = inputs.to(device), targets.to(device)\n",
    "        outputs = model(inputs)\n",
    "        val_preds.extend(outputs.cpu().numpy())\n",
    "        val_true.extend(targets.cpu().numpy())\n",
    "\n",
    "val_probs = np.array(val_preds).flatten()\n",
    "val_true_flat = np.array(val_true).flatten()\n",
    "\n",
    "fpr, tpr, thresholds = roc_curve(val_true_flat, val_probs)\n",
    "roc_auc = auc(fpr, tpr)\n",
    "\n",
    "plt.figure(figsize=(10, 8))\n",
    "plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.3f})')\n",
    "plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n",
    "plt.xlim([0.0, 1.0])\n",
    "plt.ylim([0.0, 1.05])\n",
    "plt.xlabel('False Positive Rate', fontsize=14)\n",
    "plt.ylabel('True Positive Rate', fontsize=14)\n",
    "plt.title('Receiver Operating Characteristic (ROC) Curve', fontsize=16)\n",
    "plt.legend(loc=\"lower right\", fontsize=12)\n",
    "plt.grid(alpha=0.3)\n",
    "plt.show()\n",
    "\n",
    "# Plot training and validation metrics\n",
    "epochs = np.arange(1, len(train_aucs) + 1)\n",
    "\n",
    "plt.figure(figsize=(12, 8))\n",
    "plt.subplot(2, 1, 1)\n",
    "plt.plot(epochs, train_accs, 'b-', label='Training Accuracy')\n",
    "plt.plot(epochs, val_accs, 'r-', label='Validation Accuracy')\n",
    "plt.title('Training and Validation Accuracy vs Epochs', fontsize=14)\n",
    "plt.xlabel('Epochs', fontsize=12)\n",
    "plt.ylabel('Accuracy', fontsize=12)\n",
    "plt.legend(fontsize=12)\n",
    "plt.grid(alpha=0.3)\n",
    "\n",
    "plt.subplot(2, 1, 2)\n",
    "plt.plot(epochs, train_aucs, 'b-', label='Training AUC')\n",
    "plt.plot(epochs, val_aucs, 'r-', label='Validation AUC')\n",
    "plt.title('Training and Validation AUC vs Epochs', fontsize=14)\n",
    "plt.xlabel('Epochs', fontsize=12)\n",
    "plt.ylabel('AUC Value', fontsize=12)\n",
    "plt.legend(fontsize=12)\n",
    "plt.grid(alpha=0.3)\n",
    "plt.ylim([0.5, 1.0])\n",
    "\n",
    "# Mark early stopping point\n",
    "if counter >= patience and len(epochs) > (len(train_aucs) - patience):\n",
    "    early_stop_epoch = len(train_aucs) - patience\n",
    "    plt.axvline(x=early_stop_epoch, color='g', linestyle='--', label='Early Stop Point')\n",
    "    plt.legend(fontsize=12)\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
