{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据读取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ai4learning/miniconda3/envs/wxy-cognitive/lib/python3.11/site-packages/torch/cuda/__init__.py:129: UserWarning: CUDA initialization: CUDA driver initialization failed, you might not have a CUDA gpu. (Triggered internally at /opt/conda/conda-bld/pytorch_1729647429097/work/c10/cuda/CUDAFunctions.cpp:108.)\n",
      "  return torch._C._cuda_getDeviceCount() > 0\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "from pathlib import Path\n",
    "from model import MCD\n",
    "from model.base import Module\n",
    "import json\n",
    "from sklearn.metrics import accuracy_score, roc_auc_score, root_mean_squared_error\n",
    "import numpy as np\n",
    "# F\n",
    "from torch.nn import functional as F\n",
    "\n",
    "\n",
    "DATASET = \"junyi\"\n",
    "\n",
    "DATASET_DIR = Path(\"./dataset\") / DATASET\n",
    "\n",
    "data = pd.read_csv(DATASET_DIR / \"train.csv\")\n",
    "data[\"user_id\"] = data[\"user_id\"] - 1\n",
    "data[\"item_id\"] = data[\"item_id\"] - 1\n",
    "\n",
    "test_data = pd.read_csv(DATASET_DIR / \"test.csv\")\n",
    "test_data[\"user_id\"] = test_data[\"user_id\"] - 1\n",
    "test_data[\"item_id\"] = test_data[\"item_id\"] - 1\n",
    "\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "# device = torch.device(\"cpu\")\n",
    "\n",
    "describe = json.load(open(DATASET_DIR / \"describe.json\"))\n",
    "KNOW_NUM = describe[\"knowledge_num\"]\n",
    "ITEM_NUM = describe[\"item_num\"]\n",
    "USER_NUM = describe[\"user_num\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class StableDRMCD(nn.Module):\n",
    "    def __init__(self, num_users, num_items, latent_dim, *args, **kwargs):\n",
    "        super().__init__()\n",
    "        self.num_users = num_users\n",
    "        self.num_items = num_items\n",
    "        self.latent_dim = latent_dim\n",
    "        self.prediction_model = MCD(\n",
    "            self.num_users, self.num_items, self.latent_dim)\n",
    "        self.imputation = MCD(\n",
    "            self.num_users, self.num_items, self.latent_dim)\n",
    "        \n",
    "        self.sigmoid = torch.nn.Sigmoid()\n",
    "        self.xent_func = torch.nn.BCELoss()\n",
    "\n",
    "    def fit(self, x, y, y_ips, mu = 0, eta = 1, stop = 5,\n",
    "        num_epoch=1000, batch_size=128, lr=0.05, lr1 = 10, lamb=0, \n",
    "        tol=1e-4, G=1, verbose = False): \n",
    "\n",
    "        mu = torch.Tensor([mu])\n",
    "        mu.requires_grad_(True)\n",
    "        mu = mu.to(device)\n",
    "        mu = torch.nn.Parameter(mu)\n",
    "        \n",
    "        optimizer_prediction = torch.optim.Adam(\n",
    "            self.prediction_model.parameters(), lr=lr, weight_decay=lamb)\n",
    "        optimizer_imputation = torch.optim.Adam(\n",
    "            self.imputation.parameters(), lr=lr, weight_decay=lamb)\n",
    "        optimizer_propensity = torch.optim.Adam(\n",
    "            [mu], lr=lr1, weight_decay=lamb)\n",
    "        \n",
    "        last_loss = 1e9\n",
    "\n",
    "        observation = torch.zeros([self.num_users, self.num_items])\n",
    "        for i in range(len(x)):\n",
    "            observation[x[i][0], x[i][1]] = 1\n",
    "        observation = observation.reshape(self.num_users * self.num_items)\n",
    "        observation = observation.to(device)\n",
    "        \n",
    "        y1 = []\n",
    "        for i in range(len(x)):\n",
    "            if y[i] == 1:\n",
    "                y1.append(self.num_items * x[i][0] + x[i][1])\n",
    "        y1 = torch.LongTensor(y1)\n",
    "        y1 = y1.to(device)\n",
    "        \n",
    "        \n",
    "        # generate all counterfactuals and factuals\n",
    "        x_all = []\n",
    "        for i in range(self.num_users):\n",
    "            x_all.extend([[i, j] for j in range(self.num_items)])\n",
    "        x_all = np.array(x_all)\n",
    "        \n",
    "        \n",
    "        num_sample = len(x) #6960 \n",
    "        total_batch = num_sample // batch_size\n",
    "\n",
    "        if y_ips is None:\n",
    "            one_over_zl = self._compute_IPS(x, y, y1, mu)\n",
    "        else:\n",
    "            one_over_zl = self._compute_IPS(x, y, y1, mu, y_ips)\n",
    "        \n",
    "        one_over_zl_obs = one_over_zl[np.where(observation.cpu() == 1)].detach()\n",
    "        \n",
    "        early_stop = 0\n",
    "        for epoch in range(num_epoch):\n",
    "            all_idx = np.arange(num_sample) # observation\n",
    "            np.random.shuffle(all_idx)\n",
    "\n",
    "            # sampling counterfactuals\n",
    "            ul_idxs = np.arange(x_all.shape[0]) # all\n",
    "            np.random.shuffle(ul_idxs)\n",
    "\n",
    "            epoch_loss = 0\n",
    "\n",
    "            for idx in range(total_batch):\n",
    "                selected_idx = all_idx[batch_size*idx:(idx+1)*batch_size]\n",
    "                sub_x = x[selected_idx]\n",
    "                sub_y = y[selected_idx]\n",
    "            # propensity score\n",
    "                inv_prop = one_over_zl_obs[selected_idx]                \n",
    "\n",
    "                sub_y = torch.Tensor(sub_y)\n",
    "\n",
    "                sub_x = sub_x.T\n",
    "  \n",
    "                pred = self.prediction_model.forward(*sub_x)\n",
    "                imputation_y = self.imputation.forward(*sub_x)\n",
    "                pred = self.sigmoid(pred)\n",
    "                imputation_y = self.sigmoid(imputation_y)\n",
    "                \n",
    "                \n",
    "                e_loss = F.binary_cross_entropy(pred.detach(), sub_y, reduction=\"none\")\n",
    "                e_hat_loss = F.binary_cross_entropy(imputation_y, pred.detach(), reduction=\"none\")\n",
    "                imp_loss = (((e_loss - e_hat_loss) ** 2) * inv_prop.detach()).sum()\n",
    "                \n",
    "                optimizer_imputation.zero_grad()\n",
    "                imp_loss.backward()\n",
    "                optimizer_imputation.step()  \n",
    "                \n",
    "                \n",
    "                x_all_idx = ul_idxs[G*idx* batch_size : G*(idx+1)*batch_size]\n",
    "                x_sampled = x_all[x_all_idx] \n",
    "\n",
    "                x_sampled = x_sampled.T             \n",
    "                x_sampled = torch.LongTensor(x_sampled)\n",
    "                x_sampled = x_sampled.to(device)\n",
    "                imputation_y1 = self.imputation(*x_sampled)  \n",
    "                imputation_y1 = self.sigmoid(imputation_y1)\n",
    "                \n",
    "                prop_loss = F.binary_cross_entropy(1/one_over_zl[x_all_idx], observation[x_all_idx], reduction=\"sum\")                \n",
    "                pred_y1 = self.prediction_model(*x_sampled)\n",
    "                pred_y1 = self.sigmoid(pred_y1)\n",
    "\n",
    "                imputation_loss = F.binary_cross_entropy(imputation_y1, pred_y1, reduction = \"none\")\n",
    "                \n",
    "                loss = prop_loss + eta * ((1 - observation[x_all_idx] * one_over_zl[x_all_idx]) * (imputation_loss - imputation_loss.mean())).sum() ** 2      \n",
    "                \n",
    "                optimizer_propensity.zero_grad()\n",
    "                loss.backward()\n",
    "                optimizer_propensity.step()\n",
    "                \n",
    "                #print(\"mu = {}\".format(mu))\n",
    "                \n",
    "                one_over_zl = self._compute_IPS(x, y, y1, mu, y_ips)        \n",
    "                one_over_zl_obs = one_over_zl[np.where(observation.cpu() == 1)]\n",
    "                inv_prop = one_over_zl_obs[selected_idx].detach()                                                \n",
    "                \n",
    "                pred = self.prediction_model.forward(*sub_x)\n",
    "                pred = self.sigmoid(pred)\n",
    "\n",
    "                xent_loss = F.binary_cross_entropy(pred, sub_y, weight = inv_prop.detach(), reduction=\"sum\")\n",
    "                xent_loss = (xent_loss)/(inv_prop.detach().sum())\n",
    "                \n",
    "                optimizer_prediction.zero_grad()\n",
    "                xent_loss.backward()\n",
    "                optimizer_prediction.step()\n",
    "\n",
    "                epoch_loss += xent_loss.detach().cpu().numpy()      \n",
    "                \n",
    "            relative_loss_div = (last_loss-epoch_loss)/(last_loss+1e-10)\n",
    "            if  relative_loss_div < tol:\n",
    "                if early_stop > stop:\n",
    "                    print(\"[MF-Stable-DR] epoch:{}, xent:{}\".format(epoch, epoch_loss))\n",
    "                    break\n",
    "                early_stop += 1\n",
    "                \n",
    "            last_loss = epoch_loss\n",
    "\n",
    "            if epoch % 10 == 0 and verbose:\n",
    "                print(\"[MF-Stable-DR] epoch:{}, xent:{}\".format(epoch, epoch_loss))\n",
    "\n",
    "            if epoch == num_epoch - 1:\n",
    "                print(\"[MF-Stable-DR] Reach preset epochs, it seems does not converge.\")\n",
    "\n",
    "    def predict(self, x):\n",
    "        x = torch.LongTensor(x).T\n",
    "        pred = self.prediction_model(*x)\n",
    "        pred = self.sigmoid(pred)\n",
    "        return pred.detach().cpu().numpy()\n",
    "\n",
    "    def _compute_IPS(self, x, y, y1, mu, y_ips=None):\n",
    "        if y_ips is None:\n",
    "            y_ips = 1\n",
    "            print(\"y_ips is none\")\n",
    "        else:\n",
    "            py1 = y_ips.sum() / len(y_ips)\n",
    "            py0 = 1 - py1\n",
    "            po1 = (len(x) + mu)/ (x[:,0].max() * x[:,1].max() + 2*mu)\n",
    "            py1o1 = (y.sum() + mu)/ (len(y) +2*mu)\n",
    "            py0o1 = 1 - py1o1\n",
    "\n",
    "            propensity = torch.zeros(self.num_users * self.num_items, device=device)\n",
    "            propensity += (py0o1 * po1) / py0\n",
    "\n",
    "            propensity[y1] = (py1o1 * po1) / py1\n",
    "            \n",
    "            one_over_zl = (1 / propensity)\n",
    "            \n",
    "        #one_over_zl = torch.Tensor(one_over_zl)\n",
    "        return one_over_zl           \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "STOP = 5\n",
    "ETA = 3\n",
    "LR = 0.003\n",
    "G = 5\n",
    "BATCH_SIZE = 256\n",
    "LR1 = 100\n",
    "LAMB = 0.00015\n",
    "TOL = 4e-5\n",
    "VERBOSE = True\n",
    "\n",
    "model = StableDRMCD(num_users=USER_NUM, num_items=ITEM_NUM, latent_dim=KNOW_NUM)\n",
    "model.to(device)\n",
    "\n",
    "x_train = data[[\"user_id\", \"item_id\"]].values\n",
    "x_train = torch.LongTensor(x_train).to(device)\n",
    "y_train = torch.Tensor(data[\"score\"].values).to(device)\n",
    "\n",
    "x_test = test_data[[\"user_id\", \"item_id\"]].values\n",
    "x_test = torch.LongTensor(x_test).to(device)\n",
    "y_test = test_data[\"score\"].values\n",
    "\n",
    "ips_idxs = np.arange(len(y_test))\n",
    "np.random.shuffle(ips_idxs)\n",
    "y_ips = y_test[ips_idxs[:int(0.05 * len(ips_idxs))]]\n",
    "y_ips = torch.Tensor(y_ips).to(device)\n",
    "\n",
    "model.fit(x_train, y_train, y_ips, mu=2, eta=ETA, stop=STOP, num_epoch=1000, batch_size=BATCH_SIZE, lr=LR, lr1=LR1, lamb=LAMB, tol=TOL, G=G, verbose=VERBOSE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RMSE: 0.42559257\n",
      "AUC: 0.8119112479045232\n",
      "ACC: 0.6923208482982727\n"
     ]
    }
   ],
   "source": [
    "y_pred = model.prediction_model(*(x_test.T))\n",
    "y_pred = torch.sigmoid(y_pred).detach().cpu().numpy()\n",
    "\n",
    "\n",
    "print(\"RMSE:\", root_mean_squared_error(y_test, y_pred))\n",
    "print(\"AUC:\", roc_auc_score(y_test, y_pred))\n",
    "print(\"ACC:\", accuracy_score(y_test, y_pred > 0.5))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "wxy-cognitive",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
