{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/sklearn/datasets/_openml.py:932: FutureWarning: The default value of `parser` will change from `'liac-arff'` to `'auto'` in 1.4. You can set `parser='auto'` to silence this warning. Therefore, an `ImportError` will be raised from 1.4 if the dataset is dense and pandas is not installed. Note that the pandas parser may return different data types. See the Notes Section in fetch_openml's API doc for details.\n",
      "  warn(\n",
      "/tmp/ipykernel_395798/923413905.py:21: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  data[col] = le.fit_transform(data[col])\n"
     ]
    }
   ],
   "source": [
    "\n",
    "from sklearn.datasets import fetch_openml\n",
    "import numpy as np\n",
    "from types import SimpleNamespace\n",
    "from typing import Tuple, List\n",
    "import pandas as pd\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.preprocessing import OneHotEncoder, MinMaxScaler\n",
    "    \n",
    "boston = fetch_openml(data_id = 531, data_home='./data_cache')\n",
    "\n",
    "data = boston.data\n",
    "\n",
    "label = pd.Series(boston.target)\n",
    "\n",
    "\n",
    "category_cols = [\"CHAS\"]\n",
    "continuous_cols = [x for x in data.columns if x not in category_cols]\n",
    "\n",
    "le = LabelEncoder()\n",
    "for col in category_cols:\n",
    "    data[col] = le.fit_transform(data[col])\n",
    "\n",
    "\n",
    "    \n",
    "    \n",
    "temp = None\n",
    "for col in category_cols:\n",
    "    oh_values = OneHotEncoder().fit_transform(data[col].values.reshape((-1, 1))).toarray()\n",
    "    new_cols = [col + \"-\" + str(i) for i in range(len(data[col].unique()))]\n",
    "    oh_values = pd.DataFrame(oh_values, columns = new_cols, dtype=np.int8, index=data.index)\n",
    "    if temp is None:\n",
    "        temp = oh_values\n",
    "    else:\n",
    "        temp = temp.merge(oh_values, left_index=True, right_index=True)\n",
    "\n",
    "data = data.merge(temp, left_index=True, right_index=True)\n",
    "data.drop(category_cols, inplace=True, axis=1)\n",
    "\n",
    "category_cols = temp.columns\n",
    "\n",
    "scaler = MinMaxScaler()\n",
    "data[continuous_cols] = scaler.fit_transform(data[continuous_cols])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import torch.nn as nn\n",
    "model_hparams = {\n",
    "    \"encoder_dim\" : data.shape[1],\n",
    "    \"predictor_hidden_dim\" : 64,\n",
    "    \"predictor_output_dim\" : 1,\n",
    "    'alpha1' : 0.5,\n",
    "    'alpha2' : 0.5,\n",
    "    'beta' : 0.5,\n",
    "    'K' : 10\n",
    "}\n",
    "data_hparams = {\n",
    "    \"K\" : 10,\n",
    "    \"p_m\" : 0.2\n",
    "}\n",
    "optim_hparams = {\n",
    "    \"lr\" : 0.05\n",
    "}\n",
    "scheduler_hparams = {\n",
    "    'gamma' : 0.3,\n",
    "    'step_size' : 30\n",
    "}\n",
    "num_categoricals = len(continuous_cols)\n",
    "num_continuous = len(continuous_cols)\n",
    "consistency_loss = nn.MSELoss\n",
    "loss_fn = nn.MSELoss\n",
    "metric =  \"mean_squared_error\"\n",
    "random_seed = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from misc.scorer import BaseScorer\n",
    "\n",
    "\n",
    "class MSEScorer(BaseScorer):\n",
    "    def __init__(self, metric: str) -> None:\n",
    "        super().__init__(metric)\n",
    "    \n",
    "    def __call__(self, y, y_hat) -> float:\n",
    "        return self.metric(y, y_hat)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "X_train, X_valid, y_train, y_valid = train_test_split(data, label, train_size = 0.7, random_state=random_seed)\n",
    "\n",
    "X_train, X_unlabeled, y_train, _ = train_test_split(X_train, y_train, train_size = 0.1, random_state=random_seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pytorch_lightning import Trainer\n",
    "from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\n",
    "from data_utils import *\n",
    "\n",
    "gpus = [1]\n",
    "n_jobs = 32\n",
    "max_epochs = 30\n",
    "batch_size = 64\n",
    "\n",
    "pretraining_patience = 10\n",
    "early_stopping_patience = 10\n",
    "\n",
    "def fit_model(\n",
    "            model,\n",
    "            data_hparams\n",
    "    ):\n",
    "    \n",
    "    train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
    "    test_ds = VIMESelfDataset(X_valid, data_hparams, continuous_cols, category_cols)\n",
    "    \n",
    "    pl_datamodule = PLDataModule(train_ds, test_ds, batch_size=batch_size, is_regression=True)\n",
    "\n",
    "    model.do_pretraining()\n",
    "\n",
    "    callbacks = [\n",
    "        EarlyStopping(\n",
    "            monitor= 'val_loss', \n",
    "            mode = 'min',\n",
    "            patience = pretraining_patience,\n",
    "            verbose = False\n",
    "        )\n",
    "    ]\n",
    "    pretraining_path = f'temporary_ckpt_data/pretraining'\n",
    "    checkpoint_callback = ModelCheckpoint(\n",
    "        monitor='val_loss',\n",
    "        dirpath=pretraining_path,\n",
    "        filename='pretraining-{epoch:02d}-{val_f1:.4f}',\n",
    "        save_top_k=1,\n",
    "        mode = 'min'\n",
    "    )\n",
    "\n",
    "    callbacks.append(checkpoint_callback)\n",
    "\n",
    "    trainer = Trainer(\n",
    "                    devices = gpus,\n",
    "                    accelerator=\"cuda\" if len(gpus) >= 1 else 'cpu',\n",
    "                    max_epochs = max_epochs,\n",
    "                    num_sanity_val_steps = 2,\n",
    "                    callbacks = callbacks,\n",
    "    )\n",
    "\n",
    "    trainer.fit(model, pl_datamodule)\n",
    "    \n",
    "    pretraining_path = checkpoint_callback.best_model_path\n",
    "\n",
    "    model = model.load_from_checkpoint(pretraining_path)\n",
    "\n",
    "    model.do_finetunning()\n",
    "    \n",
    "        \n",
    "    train_ds = VIMERegressionDataset(X_train, y_train.values, data_hparams, X_unlabeled, continuous_cols, category_cols)\n",
    "    test_ds = VIMERegressionDataset(X_valid, y_valid.values, data_hparams, None, continuous_cols, category_cols)\n",
    "\n",
    "    pl_datamodule = PLDataModule(train_ds, test_ds, batch_size = batch_size, is_regression=True, n_jobs=32)\n",
    "        \n",
    "    callbacks = [\n",
    "        EarlyStopping(\n",
    "            monitor= 'val_' + metric, \n",
    "            mode = 'min',\n",
    "            patience = early_stopping_patience,\n",
    "            verbose = False\n",
    "        )\n",
    "    ]\n",
    "\n",
    "    checkpoint_path = None\n",
    "\n",
    "    checkpoint_path = f'temporary_ckpt_data/'\n",
    "    checkpoint_callback = ModelCheckpoint(\n",
    "        monitor='val_' + metric,\n",
    "        dirpath=checkpoint_path,\n",
    "        filename='{epoch:02d}-{val_f1:.4f}',\n",
    "        save_top_k=1,\n",
    "        mode = 'min'\n",
    "    )\n",
    "\n",
    "    callbacks.append(checkpoint_callback)\n",
    "\n",
    "    trainer = Trainer(\n",
    "                    devices = gpus,\n",
    "                    accelerator = \"cuda\" if len(gpus) >= 1 else 'cpu',\n",
    "                    # replace_sampler_ddp=False,\n",
    "                    max_epochs = max_epochs,\n",
    "                    num_sanity_val_steps = 2,\n",
    "                    callbacks = callbacks,\n",
    "    )\n",
    "\n",
    "    trainer.fit(model, pl_datamodule)\n",
    "\n",
    "    model = model.load_from_checkpoint(checkpoint_callback.best_model_path)\n",
    "    \n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "hparams_range = {\n",
    "    \n",
    "    'predictor_hidden_dim' : ['suggest_int', ['predictor_hidden_dim', 16, 512]],\n",
    "    # 'predictor_output_dim' : ['suggest_int', ['emb_dim', 16, 512]],\n",
    "    \n",
    "    'p_m' : [\"suggest_float\", [\"p_m\", 0.1, 0.9]],\n",
    "    'alpha1' : [\"suggest_float\", [\"alpha1\", 0.1, 5]],\n",
    "    'alpha2' : [\"suggest_float\", [\"alpha2\", 0.1, 5]],\n",
    "    'beta' : [\"suggest_float\", [\"beta\", 0.0, 3]],\n",
    "    'K' : [\"suggest_int\", [\"K\", 2, 20]],\n",
    "\n",
    "\n",
    "    'lr' : ['suggest_float', ['lr', 0.0001, 0.05]],\n",
    "    'gamma' : ['suggest_float', ['gamma', 0.1, 0.95]],\n",
    "    'step_size' : ['suggest_int', ['step_size', 10, 100]],\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "import optuna\n",
    "import torch.nn.functional as F\n",
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "def objective(      trial: optuna.trial.Trial,\n",
    "        ) -> float:\n",
    "        \"\"\"Objective function for optuna\n",
    "\n",
    "        Args:\n",
    "            trial: A object which returns hyperparameters of a model of hyperparameter search trial.\n",
    "            train_idx: Indices of training data in self.data and self.label.\n",
    "            test_idx: Indices of test data in self.data and self.label.\n",
    "            fold_idx: A fold index that denotes which fold under the given k-fold cross validation.\n",
    "        \n",
    "        Returns:\n",
    "            A score of given hyperparameters.\n",
    "        \"\"\"\n",
    "        model_hparams = {\n",
    "            \"encoder_dim\" : data.shape[1],\n",
    "            \"predictor_hidden_dim\" : None,\n",
    "            \"predictor_output_dim\" : 1,\n",
    "            'alpha1' : None,\n",
    "            'alpha2' : None,\n",
    "            'beta' : None,\n",
    "            'K' : None\n",
    "        }\n",
    "        \n",
    "        data_hparams = {\n",
    "            \"K\" : None,\n",
    "            \"p_m\" : None\n",
    "        }\n",
    "        optim_hparams = {\n",
    "            \"lr\" : None\n",
    "        }\n",
    "        scheduler_hparams = {\n",
    "            'gamma' : None,\n",
    "            'step_size' : None\n",
    "        }\n",
    "\n",
    "        for k, v in hparams_range.items():\n",
    "            if k in model_hparams.keys():\n",
    "                model_hparams[k] = getattr(trial, v[0])(*v[1])\n",
    "            if k in data_hparams.keys():\n",
    "                data_hparams[k] = getattr(trial, v[0])(*v[1])\n",
    "            if k in optim_hparams.keys():\n",
    "                optim_hparams[k] = getattr(trial, v[0])(*v[1])\n",
    "            if k in scheduler_hparams.keys():\n",
    "                scheduler_hparams[k] = getattr(trial, v[0])(*v[1])\n",
    "\n",
    "        \n",
    "        from pl_vime import PLVIME  \n",
    "        pl_vime = PLVIME(model_hparams, \"Adam\", optim_hparams, \"StepLR\", scheduler_hparams, \n",
    "        num_categoricals, num_continuous, -1, loss_fn,\n",
    "        MSEScorer(\"mean_squared_error\"), random_seed)\n",
    "        \n",
    "        pl_vime = fit_model(pl_vime, data_hparams)\n",
    "        \n",
    "\n",
    "        trainer = Trainer(\n",
    "                    devices = gpus,\n",
    "                    accelerator = \"cuda\" if len(gpus)>= 1 else 'cpu',\n",
    "                    max_epochs = max_epochs,\n",
    "                    num_sanity_val_steps = 2,\n",
    "                    callbacks = None,\n",
    "        )\n",
    "        test_ds = VIMERegressionDataset(X_valid, y_valid.values, data_hparams, None, continuous_cols, category_cols)\n",
    "        test_dl = DataLoader(test_ds, batch_size, shuffle=False, sampler = SequentialSampler(test_ds), num_workers=n_jobs)\n",
    "\n",
    "        preds = trainer.predict(pl_vime, test_dl)\n",
    "\n",
    "        preds = np.concatenate([out.cpu().numpy() for out in preds])\n",
    "\n",
    "        from sklearn.metrics import mean_squared_error\n",
    "        mse = mean_squared_error(y_valid, preds.squeeze())\n",
    "\n",
    "        return mse"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 15:30:24,562] A new study created in memory with name: no-name-723a4137-3f79-485b-ad74-402ba56ae847\n",
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "You are using a CUDA device ('NVIDIA GeForce RTX 4090') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 88.5 K\n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "88.5 K    Trainable params\n",
      "0         Non-trainable params\n",
      "88.5 K    Total params\n",
      "0.354     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                           \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 27: 100%|██████████| 6/6 [00:03<00:00,  1.80it/s, v_num=510, train_loss=34.10, val_loss=34.50]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 88.5 K\n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "88.5 K    Trainable params\n",
      "0         Non-trainable params\n",
      "88.5 K    Total params\n",
      "0.354     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.88it/s, v_num=511, train_loss=30.70, train_mean_squared_error=17.30, val_mean_squared_error=51.30, val_loss=52.60]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.87it/s, v_num=511, train_loss=30.70, train_mean_squared_error=17.30, val_mean_squared_error=51.30, val_loss=52.60]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 69.07it/s]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 15:33:45,370] Trial 0 finished with value: 39.38805838838944 and parameters: {'predictor_hidden_dim': 288, 'p_m': 0.6721514930979355, 'alpha1': 3.053540542751055, 'alpha2': 2.769927596684795, 'beta': 1.270964398016714, 'K': 14, 'lr': 0.021935601842008354, 'gamma': 0.8580070506647678, 'step_size': 97}. Best is trial 0 with value: 39.38805838838944.\n",
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 46.6 K\n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "46.6 K    Trainable params\n",
      "0         Non-trainable params\n",
      "46.6 K    Total params\n",
      "0.186     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.92it/s, v_num=513, train_loss=30.30, val_loss=30.80]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.92it/s, v_num=513, train_loss=30.30, val_loss=30.80]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 46.6 K\n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "46.6 K    Trainable params\n",
      "0         Non-trainable params\n",
      "46.6 K    Total params\n",
      "0.186     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.94it/s, v_num=514, train_loss=90.90, train_mean_squared_error=69.20, val_mean_squared_error=57.90, val_loss=57.30]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.93it/s, v_num=514, train_loss=90.90, train_mean_squared_error=69.20, val_mean_squared_error=57.90, val_loss=57.30]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 70.23it/s] "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 15:37:08,479] Trial 1 finished with value: 57.913076579079586 and parameters: {'predictor_hidden_dim': 206, 'p_m': 0.7333800304661316, 'alpha1': 2.691585106789232, 'alpha2': 2.8834183493602685, 'beta': 2.776789914877983, 'K': 3, 'lr': 0.004447752055106882, 'gamma': 0.11718563782427686, 'step_size': 85}. Best is trial 0 with value: 39.38805838838944.\n",
      "Global seed set to 0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 169 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "169 K     Trainable params\n",
      "0         Non-trainable params\n",
      "169 K     Total params\n",
      "0.676     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.99it/s, v_num=516, train_loss=54.80, val_loss=55.60]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.99it/s, v_num=516, train_loss=54.80, val_loss=55.60]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 169 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "169 K     Trainable params\n",
      "0         Non-trainable params\n",
      "169 K     Total params\n",
      "0.676     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.72it/s, v_num=517, train_loss=66.40, train_mean_squared_error=46.50, val_mean_squared_error=49.90, val_loss=49.00]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.72it/s, v_num=517, train_loss=66.40, train_mean_squared_error=46.50, val_mean_squared_error=49.90, val_loss=49.00]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 59.80it/s] \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 15:40:35,911] Trial 2 finished with value: 49.755526691753836 and parameters: {'predictor_hidden_dim': 402, 'p_m': 0.7960097185974554, 'alpha1': 4.895229876940544, 'alpha2': 4.015876964661945, 'beta': 1.3844380867587955, 'K': 16, 'lr': 0.0060018938508597675, 'gamma': 0.6439328681283952, 'step_size': 23}. Best is trial 0 with value: 39.38805838838944.\n",
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 244 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "244 K     Trainable params\n",
      "0         Non-trainable params\n",
      "244 K     Total params\n",
      "0.976     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 19: 100%|██████████| 6/6 [00:02<00:00,  2.00it/s, v_num=519, train_loss=23.90, val_loss=24.30]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 244 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "244 K     Trainable params\n",
      "0         Non-trainable params\n",
      "244 K     Total params\n",
      "0.976     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.86it/s, v_num=520, train_loss=40.90, train_mean_squared_error=24.00, val_mean_squared_error=46.50, val_loss=45.60]   "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.86it/s, v_num=520, train_loss=40.90, train_mean_squared_error=24.00, val_mean_squared_error=46.50, val_loss=45.60]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 74.43it/s] \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 15:43:29,189] Trial 3 finished with value: 44.89095514772699 and parameters: {'predictor_hidden_dim': 485, 'p_m': 0.5174786574000574, 'alpha1': 2.1318435059535656, 'alpha2': 1.3963224993126724, 'beta': 2.32270106830265, 'K': 10, 'lr': 0.02846485404854556, 'gamma': 0.11597133037090188, 'step_size': 66}. Best is trial 0 with value: 39.38805838838944.\n",
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 108 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "108 K     Trainable params\n",
      "0         Non-trainable params\n",
      "108 K     Total params\n",
      "0.434     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 27: 100%|██████████| 6/6 [00:03<00:00,  1.80it/s, v_num=522, train_loss=52.30, val_loss=53.20]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 108 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "108 K     Trainable params\n",
      "0         Non-trainable params\n",
      "108 K     Total params\n",
      "0.434     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.74it/s, v_num=523, train_loss=32.20, train_mean_squared_error=17.00, val_mean_squared_error=40.50, val_loss=38.70]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.74it/s, v_num=523, train_loss=32.20, train_mean_squared_error=17.00, val_mean_squared_error=40.50, val_loss=38.70]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 72.66it/s] "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 15:46:49,975] Trial 4 finished with value: 38.92002251030006 and parameters: {'predictor_hidden_dim': 320, 'p_m': 0.5935471974998056, 'alpha1': 4.724365584721658, 'alpha2': 3.440919465607069, 'beta': 1.078523701721358, 'K': 10, 'lr': 0.034911796676770517, 'gamma': 0.15119165088487935, 'step_size': 70}. Best is trial 4 with value: 38.92002251030006.\n",
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 128 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "128 K     Trainable params\n",
      "0         Non-trainable params\n",
      "128 K     Total params\n",
      "0.513     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.81it/s, v_num=525, train_loss=8.330, val_loss=8.520]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.81it/s, v_num=525, train_loss=8.330, val_loss=8.520]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 128 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "128 K     Trainable params\n",
      "0         Non-trainable params\n",
      "128 K     Total params\n",
      "0.513     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.81it/s, v_num=526, train_loss=17.30, train_mean_squared_error=8.180, val_mean_squared_error=38.50, val_loss=34.20]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.81it/s, v_num=526, train_loss=17.30, train_mean_squared_error=8.180, val_mean_squared_error=38.50, val_loss=34.20]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 64.32it/s] \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 15:50:18,474] Trial 5 finished with value: 32.349984983121324 and parameters: {'predictor_hidden_dim': 349, 'p_m': 0.2683060488590727, 'alpha1': 0.7317388585087812, 'alpha2': 1.6455989195285012, 'beta': 1.0911323128278678, 'K': 12, 'lr': 0.021986215521769784, 'gamma': 0.9401177623503422, 'step_size': 19}. Best is trial 5 with value: 32.349984983121324.\n",
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 16.8 K\n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "16.8 K    Trainable params\n",
      "0         Non-trainable params\n",
      "16.8 K    Total params\n",
      "0.067     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:02<00:00,  2.12it/s, v_num=528, train_loss=36.30, val_loss=36.80]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:02<00:00,  2.11it/s, v_num=528, train_loss=36.30, val_loss=36.80]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 16.8 K\n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "16.8 K    Trainable params\n",
      "0         Non-trainable params\n",
      "16.8 K    Total params\n",
      "0.067     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.82it/s, v_num=529, train_loss=39.90, train_mean_squared_error=27.70, val_mean_squared_error=40.10, val_loss=37.10]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.82it/s, v_num=529, train_loss=39.90, train_mean_squared_error=27.70, val_mean_squared_error=40.10, val_loss=37.10]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 80.39it/s] \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 15:53:40,974] Trial 6 finished with value: 40.14257828455112 and parameters: {'predictor_hidden_dim': 119, 'p_m': 0.22904761430799703, 'alpha1': 3.3002307947804526, 'alpha2': 1.3411288524449325, 'beta': 1.3989323185689189, 'K': 6, 'lr': 0.008032582223911433, 'gamma': 0.19381886998965936, 'step_size': 69}. Best is trial 5 with value: 32.349984983121324.\n",
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 9.1 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "9.1 K     Trainable params\n",
      "0         Non-trainable params\n",
      "9.1 K     Total params\n",
      "0.036     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.99it/s, v_num=531, train_loss=21.20, val_loss=21.50]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.99it/s, v_num=531, train_loss=21.20, val_loss=21.50]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 9.1 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "9.1 K     Trainable params\n",
      "0         Non-trainable params\n",
      "9.1 K     Total params\n",
      "0.036     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.88it/s, v_num=532, train_loss=24.90, train_mean_squared_error=21.10, val_mean_squared_error=35.60, val_loss=32.20]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.88it/s, v_num=532, train_loss=24.90, train_mean_squared_error=21.10, val_mean_squared_error=35.60, val_loss=32.20]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 46.81it/s] \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 15:57:08,724] Trial 7 finished with value: 35.58855043478925 and parameters: {'predictor_hidden_dim': 84, 'p_m': 0.2572658893440428, 'alpha1': 1.9067533362387243, 'alpha2': 4.1228668262548815, 'beta': 0.2913038273791838, 'K': 17, 'lr': 0.004895310553908757, 'gamma': 0.9299905452613864, 'step_size': 52}. Best is trial 5 with value: 32.349984983121324.\n",
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 260 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "260 K     Trainable params\n",
      "0         Non-trainable params\n",
      "260 K     Total params\n",
      "1.041     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                           \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:02<00:00,  2.08it/s, v_num=534, train_loss=41.30, val_loss=42.10]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:02<00:00,  2.07it/s, v_num=534, train_loss=41.30, val_loss=42.10]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 260 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "260 K     Trainable params\n",
      "0         Non-trainable params\n",
      "260 K     Total params\n",
      "1.041     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 23: 100%|██████████| 6/6 [00:03<00:00,  1.77it/s, v_num=535, train_loss=28.20, train_mean_squared_error=15.90, val_mean_squared_error=36.20, val_loss=34.20]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 53.53it/s] \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 16:00:12,876] Trial 8 finished with value: 35.67752130200119 and parameters: {'predictor_hidden_dim': 501, 'p_m': 0.5838764157960368, 'alpha1': 3.7223915390516784, 'alpha2': 0.2920201820461713, 'beta': 0.8484208877292287, 'K': 4, 'lr': 0.014877395856355032, 'gamma': 0.20091856111110745, 'step_size': 38}. Best is trial 5 with value: 32.349984983121324.\n",
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 53.2 K\n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "53.2 K    Trainable params\n",
      "0         Non-trainable params\n",
      "53.2 K    Total params\n",
      "0.213     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.85it/s, v_num=537, train_loss=38.10, val_loss=38.80]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.85it/s, v_num=537, train_loss=38.10, val_loss=38.80]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 53.2 K\n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "53.2 K    Trainable params\n",
      "0         Non-trainable params\n",
      "53.2 K    Total params\n",
      "0.213     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.73it/s, v_num=538, train_loss=23.20, train_mean_squared_error=16.10, val_mean_squared_error=33.90, val_loss=31.20]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.73it/s, v_num=538, train_loss=23.20, train_mean_squared_error=16.10, val_mean_squared_error=33.90, val_loss=31.20]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 51.73it/s] \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2023-12-19 16:03:38,983] Trial 9 finished with value: 33.86572824976366 and parameters: {'predictor_hidden_dim': 221, 'p_m': 0.1513179970790275, 'alpha1': 3.4931133849130975, 'alpha2': 2.8763471256122184, 'beta': 0.7961684728183362, 'K': 11, 'lr': 0.00478763148684624, 'gamma': 0.5895545212227524, 'step_size': 94}. Best is trial 5 with value: 32.349984983121324.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of finished trials:  10\n",
      "Best trial:\n",
      "  MSE: 32.349984983121324\n",
      "  Best hyperparameters:  FrozenTrial(number=5, state=TrialState.COMPLETE, values=[32.349984983121324], datetime_start=datetime.datetime(2023, 12, 19, 15, 46, 49, 976058), datetime_complete=datetime.datetime(2023, 12, 19, 15, 50, 18, 474094), params={'predictor_hidden_dim': 349, 'p_m': 0.2683060488590727, 'alpha1': 0.7317388585087812, 'alpha2': 1.6455989195285012, 'beta': 1.0911323128278678, 'K': 12, 'lr': 0.021986215521769784, 'gamma': 0.9401177623503422, 'step_size': 19}, user_attrs={}, system_attrs={}, intermediate_values={}, distributions={'predictor_hidden_dim': IntDistribution(high=512, log=False, low=16, step=1), 'p_m': FloatDistribution(high=0.9, log=False, low=0.1, step=None), 'alpha1': FloatDistribution(high=5.0, log=False, low=0.1, step=None), 'alpha2': FloatDistribution(high=5.0, log=False, low=0.1, step=None), 'beta': FloatDistribution(high=3.0, log=False, low=0.0, step=None), 'K': IntDistribution(high=20, log=False, low=2, step=1), 'lr': FloatDistribution(high=0.05, log=False, low=0.0001, step=None), 'gamma': FloatDistribution(high=0.95, log=False, low=0.1, step=None), 'step_size': IntDistribution(high=100, log=False, low=10, step=1)}, trial_id=5, value=None)\n"
     ]
    }
   ],
   "source": [
    "study = optuna.create_study(direction=\"minimize\",sampler=optuna.samplers.TPESampler(seed=random_seed))\n",
    "study.optimize(objective, n_trials=10, show_progress_bar=False)\n",
    "\n",
    "print(\"Number of finished trials: \", len(study.trials))\n",
    "print(\"Best trial:\")\n",
    "\n",
    "\n",
    "trial = study.best_trial\n",
    "\n",
    "print(\"  MSE: {}\".format(trial.value))\n",
    "print(\"  Best hyperparameters: \", trial)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/tmp/ipykernel_395798/2137398929.py:18: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  train_ds = VIMESelfDataset(X_train.append(X_unlabeled), data_hparams, continuous_cols, category_cols)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data/pretraining exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 128 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "128 K     Trainable params\n",
      "0         Non-trainable params\n",
      "128 K     Total params\n",
      "0.513     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.98it/s, v_num=540, train_loss=8.330, val_loss=8.520]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.98it/s, v_num=540, train_loss=8.330, val_loss=8.520]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n",
      "/workspace/vime/data_utils.py:134: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\n",
      "  X = X.append(unlabeled_data)\n",
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:615: UserWarning: Checkpoint directory /workspace/vime/temporary_ckpt_data exists and is not empty.\n",
      "  rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "\n",
      "  | Name                      | Type             | Params\n",
      "---------------------------------------------------------------\n",
      "0 | model                     | VIME             | 128 K \n",
      "1 | pretraining_mask_loss     | BCELoss          | 0     \n",
      "2 | pretraining_feature_loss1 | CrossEntropyLoss | 0     \n",
      "3 | pretraining_feature_loss2 | MSELoss          | 0     \n",
      "4 | consistency_loss          | MSELoss          | 0     \n",
      "5 | loss_fn                   | MSELoss          | 0     \n",
      "---------------------------------------------------------------\n",
      "128 K     Trainable params\n",
      "0         Non-trainable params\n",
      "128 K     Total params\n",
      "0.513     Total estimated model params size (MB)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                                                            \r"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/py38/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:280: PossibleUserWarning: The number of training batches (6) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.73it/s, v_num=541, train_loss=17.30, train_mean_squared_error=8.180, val_mean_squared_error=38.50, val_loss=34.20]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Trainer.fit` stopped: `max_epochs=30` reached.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: 100%|██████████| 6/6 [00:03<00:00,  1.73it/s, v_num=541, train_loss=17.30, train_mean_squared_error=8.180, val_mean_squared_error=38.50, val_loss=34.20]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Global seed set to 0\n"
     ]
    }
   ],
   "source": [
    "model_hparams = {\n",
    "            \"encoder_dim\" : data.shape[1],\n",
    "            \"predictor_hidden_dim\" : None,\n",
    "            \"predictor_output_dim\" : 1,\n",
    "            'alpha1' : None,\n",
    "            'alpha2' : None,\n",
    "            'beta' : None,\n",
    "            'K' : None\n",
    "        }\n",
    "        \n",
    "data_hparams = {\n",
    "        \"K\" : None,\n",
    "        \"p_m\" : None\n",
    "}\n",
    "optim_hparams = {\n",
    "        \"lr\" : None\n",
    "}\n",
    "scheduler_hparams = {\n",
    "        'gamma' : None,\n",
    "        'step_size' : None\n",
    "}\n",
    "\n",
    "for k, v in study.best_trial.params.items():\n",
    "        if k in model_hparams.keys():\n",
    "                model_hparams[k] = study.best_trial.params[k]\n",
    "        if k in data_hparams.keys():\n",
    "                data_hparams[k] = study.best_trial.params[k]\n",
    "        if k in optim_hparams.keys():\n",
    "                optim_hparams[k] = study.best_trial.params[k]\n",
    "        if k in scheduler_hparams.keys():\n",
    "                scheduler_hparams[k] = study.best_trial.params[k]\n",
    "\n",
    "\n",
    "from pl_vime import PLVIME\n",
    "pl_vime = PLVIME(model_hparams, \"Adam\", optim_hparams, \"StepLR\", scheduler_hparams, \n",
    "       num_categoricals, num_continuous, -1, loss_fn,\n",
    "       MSEScorer(\"mean_squared_error\"), random_seed)\n",
    "pl_vime = fit_model(pl_vime, data_hparams)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "GPU available: True (cuda), used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|██████████| 3/3 [00:00<00:00, 76.57it/s] \n"
     ]
    }
   ],
   "source": [
    "trainer = Trainer(\n",
    "                    devices = gpus,\n",
    "                    accelerator = \"cuda\" if len(gpus) >= 1 else 'cpu',\n",
    "                    max_epochs = max_epochs,\n",
    "                    num_sanity_val_steps = 2,\n",
    "                    callbacks = None,\n",
    "    )\n",
    "test_ds = VIMERegressionDataset(X_valid, y_valid.values, data_hparams, None, continuous_cols, category_cols)\n",
    "test_dl = DataLoader(test_ds, batch_size, shuffle=False, sampler = SequentialSampler(test_ds), num_workers=n_jobs)\n",
    "\n",
    "preds = trainer.predict(pl_vime, test_dl)\n",
    "\n",
    "preds = np.concatenate([out.cpu().numpy() for out in preds])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "32.349984983121324"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.metrics import mean_squared_error\n",
    "mean_squared_error(y_valid, preds.squeeze())"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py37",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
