{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
    "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5",
    "execution": {
     "iopub.execute_input": "2025-06-25T00:33:17.897681Z",
     "iopub.status.busy": "2025-06-25T00:33:17.896677Z",
     "iopub.status.idle": "2025-06-25T00:33:17.906207Z",
     "shell.execute_reply": "2025-06-25T00:33:17.905542Z",
     "shell.execute_reply.started": "2025-06-25T00:33:17.897649Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "./data\\autoencoder_model.keras\n",
      "./data\\encoder_model.keras\n",
      "./data\\Fertilizer Prediction.csv\n",
      "./data\\sample_submission.csv\n",
      "./data\\test.csv\n",
      "./data\\train.csv\n"
     ]
    }
   ],
   "source": [
    "# This Python 3 environment comes with many helpful analytics libraries installed\n",
    "# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n",
    "# For example, here's several helpful packages to load\n",
    "\n",
    "import numpy as np # linear algebra\n",
    "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
    "\n",
    "# Input data files are available in the read-only \"../input/\" directory\n",
    "# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n",
    "\n",
    "import os\n",
    "for dirname, _, filenames in os.walk('./data'):\n",
    "    for filename in filenames:\n",
    "        print(os.path.join(dirname, filename))\n",
    "\n",
    "# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n",
    "# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-06-25T00:33:17.907788Z",
     "iopub.status.busy": "2025-06-25T00:33:17.907545Z",
     "iopub.status.idle": "2025-06-25T00:33:26.838864Z",
     "shell.execute_reply": "2025-06-25T00:33:26.838035Z",
     "shell.execute_reply.started": "2025-06-25T00:33:17.907769Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import gc\n",
    "import warnings\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.preprocessing import LabelEncoder, StandardScaler\n",
    "from sklearn.decomposition import PCA\n",
    "from xgboost import XGBClassifier\n",
    "from lightgbm import LGBMClassifier\n",
    "import lightgbm as lgb\n",
    "\n",
    "warnings.simplefilter(action='ignore')\n",
    "SEED = 42"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-06-25T00:33:26.840215Z",
     "iopub.status.busy": "2025-06-25T00:33:26.839711Z",
     "iopub.status.idle": "2025-06-25T00:33:28.345239Z",
     "shell.execute_reply": "2025-06-25T00:33:28.344545Z",
     "shell.execute_reply.started": "2025-06-25T00:33:26.840196Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading all datasets...\n",
      "Augmenting external data (3x)...\n"
     ]
    }
   ],
   "source": [
    "print(\"Loading all datasets...\")\n",
    "# train_df = pd.read_csv(\"/kaggle/input/playground-series-s5e6/train.csv\")\n",
    "# test_df = pd.read_csv(\"/kaggle/input/playground-series-s5e6/test.csv\")\n",
    "# original_df = pd.read_csv(\"/kaggle/input/fertilizer-prediction/Fertilizer Prediction.csv\")\n",
    "# submission = pd.read_csv(\"/kaggle/input/playground-series-s5e6/sample_submission.csv\")\n",
    "train_df = pd.read_csv(\"./data/train.csv\")  \n",
    "test_df = pd.read_csv(\"./data/test.csv\")\n",
    "original_df = pd.read_csv(\"./data/Fertilizer Prediction.csv\")\n",
    "submission = pd.read_csv(\"./data/sample_submission.csv\")\n",
    "\n",
    "for df in [train_df, test_df, original_df]:\n",
    "    df.rename(columns={'Temparature': 'Temperature'}, inplace=True)\n",
    "\n",
    "# FIX: Reduced data augmentation to prevent memory errors\n",
    "print(\"Augmenting external data (3x)...\")\n",
    "original_copy = original_df.copy()\n",
    "for _ in range(2): # Total of 3 copies (original + 2)\n",
    "    original_df = pd.concat([original_df, original_copy], axis=0, ignore_index=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-06-25T00:33:28.346936Z",
     "iopub.status.busy": "2025-06-25T00:33:28.346694Z",
     "iopub.status.idle": "2025-06-25T00:33:30.644585Z",
     "shell.execute_reply": "2025-06-25T00:33:30.643898Z",
     "shell.execute_reply.started": "2025-06-25T00:33:28.346918Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Applying lean and mean feature engineering...\n"
     ]
    }
   ],
   "source": [
    "print(\"Applying lean and mean feature engineering...\")\n",
    "train_ref = train_df.copy()\n",
    "numerical_cols = ['Temperature', 'Humidity', 'Moisture', 'Nitrogen', 'Phosphorous', 'Potassium']\n",
    "\n",
    "scaler = StandardScaler().fit(train_ref[numerical_cols])\n",
    "pca = PCA(n_components=2, random_state=SEED).fit(scaler.transform(train_ref[numerical_cols])) # Reduced PCA components\n",
    "centroids = train_ref.groupby('Fertilizer Name')[numerical_cols].mean().to_dict('index')\n",
    "\n",
    "def create_lean_features(df, scaler, pca, centroids):\n",
    "    # FIX: Using memory-efficient quantile binning instead of astype(str)\n",
    "    for col in numerical_cols:\n",
    "        df[f'{col}_qbin'] = pd.qcut(df[col], q=255, labels=False, duplicates='drop')\n",
    "        \n",
    "    # Advanced Numerical Features\n",
    "    df_scaled = scaler.transform(df[numerical_cols])\n",
    "    \n",
    "    pca_features = pca.transform(df_scaled)\n",
    "    df['PCA_1'] = pca_features[:, 0]\n",
    "    df['PCA_2'] = pca_features[:, 1]\n",
    "    \n",
    "    for fertilizer, center in centroids.items():\n",
    "        center_scaled = scaler.transform([list(center.values())])\n",
    "        distance = np.linalg.norm(df_scaled - center_scaled, axis=1)\n",
    "        df[f'dist_to_{fertilizer.replace(\" \", \"_\")}'] = distance\n",
    "    return df\n",
    "\n",
    "train = create_lean_features(train_df, scaler, pca, centroids)\n",
    "test = create_lean_features(test_df, scaler, pca, centroids)\n",
    "original = create_lean_features(original_df, scaler, pca, centroids)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-06-25T00:33:30.645526Z",
     "iopub.status.busy": "2025-06-25T00:33:30.645314Z",
     "iopub.status.idle": "2025-06-25T00:34:00.552991Z",
     "shell.execute_reply": "2025-06-25T00:34:00.552202Z",
     "shell.execute_reply.started": "2025-06-25T00:33:30.645509Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Encoding features...\n"
     ]
    }
   ],
   "source": [
    "print(\"Encoding features...\")\n",
    "train['is_test'] = 0; test['is_test'] = 1; original['is_test'] = 0\n",
    "combined = pd.concat([train, test, original], ignore_index=True)\n",
    "categorical_cols = ['Soil Type', 'Crop Type'] # Only original categoricals need encoding now\n",
    "for col in categorical_cols:\n",
    "    combined[col] = combined[col].astype('category').cat.codes\n",
    "target_encoder = LabelEncoder().fit(pd.concat([train_df['Fertilizer Name'], original_df['Fertilizer Name']]).dropna().unique())\n",
    "combined['Fertilizer Name'] = combined['Fertilizer Name'].map(lambda s: target_encoder.transform([s])[0] if pd.notnull(s) else s)\n",
    "train = combined[combined['is_test'] == 0].drop(columns=['is_test']); test = combined[combined['is_test'] == 1].drop(columns=['is_test', 'Fertilizer Name'])\n",
    "original = train[len(train_df):].copy(); train = train[:len(train_df)].copy()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-06-25T00:34:00.554113Z",
     "iopub.status.busy": "2025-06-25T00:34:00.553889Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "PCA_2               100505.617614\n",
      "PCA_1                98381.551910\n",
      "Crop Type            85704.362684\n",
      "Phosphorous          68700.563117\n",
      "Nitrogen             65873.062437\n",
      "Phosphorous_qbin     63124.268456\n",
      "Moisture             62614.303041\n",
      "Moisture_qbin        56885.333629\n",
      "Nitrogen_qbin        53384.575512\n",
      "Humidity             46135.193959\n",
      "Soil Type            46031.785227\n",
      "Potassium            45194.905888\n",
      "dist_to_DAP          39582.781877\n",
      "Humidity_qbin        37565.174050\n",
      "dist_to_17-17-17     37136.311582\n",
      "Potassium_qbin       37107.509932\n",
      "dist_to_14-35-14     35178.186342\n",
      "Temperature          33837.533496\n",
      "dist_to_10-26-26     32309.960346\n",
      "dist_to_28-28        30806.757456\n",
      "dist_to_20-20        30664.116244\n",
      "dist_to_Urea         29464.268875\n",
      "Temperature_qbin     27680.214185\n",
      "dtype: float64\n"
     ]
    }
   ],
   "source": [
    "# Define the function FIRST\n",
    "def lightgbm_gain_rank(X, y, folds=5, seed=42, n_estimators=1000):\n",
    "    gains = pd.Series(0.0, index=X.columns)\n",
    "    skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)\n",
    "\n",
    "    for tr_idx, va_idx in skf.split(X, y):\n",
    "        model = lgb.LGBMClassifier(\n",
    "            objective='multiclass',\n",
    "            num_class=y.nunique(),\n",
    "            learning_rate=0.05,\n",
    "            n_estimators=n_estimators,\n",
    "            max_depth=10,\n",
    "            subsample=0.8,\n",
    "            colsample_bytree=0.8,\n",
    "            device='gpu',\n",
    "            verbose=-1,\n",
    "            random_state=seed\n",
    "        )\n",
    "        model.fit(X.iloc[tr_idx], y.iloc[tr_idx],\n",
    "                  eval_set=[(X.iloc[va_idx], y.iloc[va_idx])],\n",
    "                  callbacks=[lgb.early_stopping(100, verbose=False)])\n",
    "        \n",
    "        gains += pd.Series(model.booster_.feature_importance(importance_type=\"gain\"), index=X.columns)\n",
    "\n",
    "    gains /= folds\n",
    "    return gains.sort_values(ascending=False)\n",
    "\n",
    "X = train.drop(columns=[\"id\", \"Fertilizer Name\"])\n",
    "y = train[\"Fertilizer Name\"].astype(int)\n",
    "X_test = test.drop(columns=[\"id\"])\n",
    "X_original = original.drop(columns=[\"id\", \"Fertilizer Name\"])\n",
    "y_original = original[\"Fertilizer Name\"].astype(int)\n",
    "\n",
    "imp = lightgbm_gain_rank(X, y)\n",
    "print(imp.head(25))  # Show top 25 for visibility\n",
    "\n",
    "# Keep top 25 features based on gain\n",
    "core_cols = imp.head(25).index.tolist()\n",
    "\n",
    "# Create filtered datasets\n",
    "X_core = X[core_cols]\n",
    "X_test_core = X_test[core_cols]\n",
    "X_original_core = X_original[core_cols]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "🚀 Training XGBoost on top features with hybrid augmentation...\n",
      "🔁 Fold 1/5\n"
     ]
    },
    {
     "ename": "TypeError",
     "evalue": "XGBClassifier.fit() got an unexpected keyword argument 'early_stopping_rounds'",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mTypeError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[7]\u001b[39m\u001b[32m, line 35\u001b[39m\n\u001b[32m     32\u001b[39m y_valid = y.iloc[valid_idx]\n\u001b[32m     34\u001b[39m model = XGBClassifier(**params)\n\u001b[32m---> \u001b[39m\u001b[32m35\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m.\u001b[49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m     36\u001b[39m \u001b[43m    \u001b[49m\u001b[43mx_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m     37\u001b[39m \u001b[43m    \u001b[49m\u001b[43meval_set\u001b[49m\u001b[43m=\u001b[49m\u001b[43m[\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx_valid\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_valid\u001b[49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m     38\u001b[39m \u001b[43m    \u001b[49m\u001b[43mearly_stopping_rounds\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m50\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[32m     39\u001b[39m \u001b[43m    \u001b[49m\u001b[43mverbose\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m1000\u001b[39;49m\n\u001b[32m     40\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     42\u001b[39m final_test_preds += model.predict_proba(X_test_core) / FOLDS\n\u001b[32m     44\u001b[39m \u001b[38;5;28;01mdel\u001b[39;00m x_train, x_valid, y_train, y_valid, model\n",
      "\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\fh\\.conda\\envs\\kaggle312\\Lib\\site-packages\\xgboost\\core.py:729\u001b[39m, in \u001b[36mrequire_keyword_args.<locals>.throw_if.<locals>.inner_f\u001b[39m\u001b[34m(*args, **kwargs)\u001b[39m\n\u001b[32m    727\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m k, arg \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(sig.parameters, args):\n\u001b[32m    728\u001b[39m     kwargs[k] = arg\n\u001b[32m--> \u001b[39m\u001b[32m729\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[31mTypeError\u001b[39m: XGBClassifier.fit() got an unexpected keyword argument 'early_stopping_rounds'"
     ]
    }
   ],
   "source": [
    "from xgboost import XGBClassifier\n",
    "\n",
    "# Winning XGBoost params\n",
    "params = {\n",
    "    'objective': 'multi:softprob',\n",
    "    'num_class': y.nunique(),\n",
    "    'max_depth': 7,\n",
    "    'learning_rate': 0.03,\n",
    "    'subsample': 0.8,\n",
    "    'max_bin': 128,\n",
    "    'colsample_bytree': 0.3,\n",
    "    'tree_method': 'hist',\n",
    "    'random_state': 42,\n",
    "    'eval_metric': 'mlogloss',\n",
    "    'device': \"cuda\",\n",
    "    'enable_categorical': True,\n",
    "    'n_estimators': 10000\n",
    "}\n",
    "\n",
    "FOLDS = 5\n",
    "skf = StratifiedKFold(n_splits=FOLDS, shuffle=True, random_state=42)\n",
    "\n",
    "final_test_preds = np.zeros((len(X_test_core), y.nunique()))\n",
    "\n",
    "print(\"\\n🚀 Training XGBoost on top features with hybrid augmentation...\")\n",
    "for fold, (train_idx, valid_idx) in enumerate(skf.split(X_core, y)):\n",
    "    print(f\"🔁 Fold {fold + 1}/{FOLDS}\")\n",
    "    \n",
    "    x_train = pd.concat([X_core.iloc[train_idx], X_original_core], axis=0)\n",
    "    y_train = pd.concat([y.iloc[train_idx], y_original], axis=0)\n",
    "    x_valid = X_core.iloc[valid_idx]\n",
    "    y_valid = y.iloc[valid_idx]\n",
    "    \n",
    "    model = XGBClassifier(**params)\n",
    "    model.fit(\n",
    "        x_train, y_train,\n",
    "        eval_set=[(x_valid, y_valid)],\n",
    "        early_stopping_rounds=50,\n",
    "        verbose=1000\n",
    "    )\n",
    "    \n",
    "    final_test_preds += model.predict_proba(X_test_core) / FOLDS\n",
    "\n",
    "    del x_train, x_valid, y_train, y_valid, model\n",
    "    gc.collect()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "trusted": true
   },
   "outputs": [],
   "source": [
    "print(\"\\nTraining complete. Creating final submission file...\")\n",
    "top_3_preds_indices = np.argsort(final_test_preds, axis=1)[:, ::-1][:, :3]\n",
    "target_encoder.fit(train_df['Fertilizer Name'].unique()) # Re-fit on original labels\n",
    "top_3_labels = target_encoder.inverse_transform(top_3_preds_indices.ravel()).reshape(top_3_preds_indices.shape)\n",
    "submission['Fertilizer Name'] = [' '.join(row) for row in top_3_labels]\n",
    "submission.to_csv('submission_lean_stack.csv', index=False)\n",
    "print(\"✅ Lean Stacking submission saved successfully!\")\n",
    "print(submission.head())"
   ]
  }
 ],
 "metadata": {
  "kaggle": {
   "accelerator": "nvidiaTeslaT4",
   "dataSources": [
    {
     "databundleVersionId": 12184666,
     "sourceId": 91717,
     "sourceType": "competition"
    },
    {
     "datasetId": 7269189,
     "sourceId": 11592231,
     "sourceType": "datasetVersion"
    }
   ],
   "dockerImageVersionId": 31041,
   "isGpuEnabled": true,
   "isInternetEnabled": true,
   "language": "python",
   "sourceType": "notebook"
  },
  "kernelspec": {
   "display_name": "kaggle312",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
