{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据读取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using CPU\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "from pathlib import Path\n",
    "from model import MCD\n",
    "from model.base import Module\n",
    "import json\n",
    "from sklearn.metrics import accuracy_score, roc_auc_score, root_mean_squared_error\n",
    "import numpy as np\n",
    "# F\n",
    "from torch.nn import functional as F\n",
    "\n",
    "\n",
    "DATASET = \"junyi\"\n",
    "# DATASET = \"a0910\"\n",
    "\n",
    "DATASET_DIR = Path(\"./dataset\") / DATASET\n",
    "\n",
    "data = pd.read_csv(DATASET_DIR / \"train.csv\")\n",
    "data[\"user_id\"] = data[\"user_id\"] - 1\n",
    "data[\"item_id\"] = data[\"item_id\"] - 1\n",
    "\n",
    "test_data = pd.read_csv(DATASET_DIR / \"test.csv\")\n",
    "test_data[\"user_id\"] = test_data[\"user_id\"] - 1\n",
    "test_data[\"item_id\"] = test_data[\"item_id\"] - 1\n",
    "\n",
    "if torch.cuda.is_available():\n",
    "    print(\"Using CUDA\")\n",
    "    device = torch.device(\"cuda:0\")\n",
    "else:\n",
    "    print(\"Using CPU\")\n",
    "    device = torch.device(\"cpu\")\n",
    "# device = torch.device(\"cpu\")\n",
    "\n",
    "describe = json.load(open(DATASET_DIR / \"describe.json\"))\n",
    "KNOW_NUM = describe[\"knowledge_num\"]\n",
    "ITEM_NUM = describe[\"item_num\"]\n",
    "USER_NUM = describe[\"user_num\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class StableDRMCD(nn.Module):\n",
    "    def __init__(self, num_users, num_items, latent_dim, *args, **kwargs):\n",
    "        super().__init__()\n",
    "        self.num_users = num_users\n",
    "        self.num_items = num_items\n",
    "        self.latent_dim = latent_dim\n",
    "\n",
    "        # 预测模型\n",
    "        self.prediction_model = MCD(\n",
    "            self.num_users, self.num_items, self.latent_dim)\n",
    "        # 填补模型\n",
    "        self.imputation = MCD(\n",
    "            self.num_users, self.num_items, self.latent_dim)\n",
    "        \n",
    "        self.sigmoid = torch.nn.Sigmoid()\n",
    "        self.xent_func = torch.nn.BCELoss()\n",
    "\n",
    "    def fit(self, x, y, y_ips, mu = 0, eta = 1, stop = 5,\n",
    "        num_epoch=1000, batch_size=128, lr=0.05, lr1 = 10, lamb=0, \n",
    "        tol=1e-4, G=1, verbose = False): \n",
    "\n",
    "        # 倾向得分，用于更新重要性采样权重（IPS）\n",
    "        mu = torch.Tensor([mu])\n",
    "        mu.requires_grad_(True)\n",
    "        mu = mu.to(device)\n",
    "        mu = torch.nn.Parameter(mu)\n",
    "        \n",
    "        optimizer_prediction = torch.optim.Adam(\n",
    "            self.prediction_model.parameters(), lr=lr, weight_decay=lamb)\n",
    "        optimizer_imputation = torch.optim.Adam(\n",
    "            self.imputation.parameters(), lr=lr, weight_decay=lamb)\n",
    "        \n",
    "        # 用于修改mu，即倾向得分\n",
    "        optimizer_propensity = torch.optim.Adam(\n",
    "            [mu], lr=lr1, weight_decay=lamb)\n",
    "\n",
    "        scheduler_prediction = torch.optim.lr_scheduler.StepLR(optimizer_prediction, step_size=1, gamma=0.2)\n",
    "        scheduler_imputation = torch.optim.lr_scheduler.StepLR(optimizer_imputation, step_size=1, gamma=0.2)\n",
    "        scheduler_propensity = torch.optim.lr_scheduler.StepLR(optimizer_propensity, step_size=1, gamma=0.2)\n",
    "\n",
    "        wait = 3\n",
    "        \n",
    "        last_loss = 1e9\n",
    "\n",
    "        # 用于记录用户是否做了某个题目\n",
    "        observation = torch.zeros([self.num_users, self.num_items])\n",
    "        for i in range(len(x)):\n",
    "            observation[x[i][0], x[i][1]] = 1\n",
    "        observation = observation.reshape(self.num_users * self.num_items)\n",
    "        observation = observation.to(device)\n",
    "        \n",
    "        # 用于记录用户做对题目的索引\n",
    "        y1 = []\n",
    "        for i in range(len(x)):\n",
    "            if y[i] == 1:\n",
    "                y1.append(self.num_items * x[i][0] + x[i][1])\n",
    "        y1 = torch.LongTensor(y1)\n",
    "        y1 = y1.to(device)\n",
    "        \n",
    "        \n",
    "        # generate all counterfactuals and factuals\n",
    "        # x_all是所有用户对所有题目的索引\n",
    "        x_all = []\n",
    "        for i in range(self.num_users):\n",
    "            x_all.extend([[i, j] for j in range(self.num_items)])\n",
    "        x_all = np.array(x_all)\n",
    "        \n",
    "        \n",
    "        num_sample = len(x)\n",
    "        total_batch = num_sample // batch_size\n",
    "\n",
    "        # y_ips是用户对题目的做题情况\n",
    "        if y_ips is None:\n",
    "            one_over_zl = self._compute_IPS(x, y, y1, mu)\n",
    "        else:\n",
    "            one_over_zl = self._compute_IPS(x, y, y1, mu, y_ips)\n",
    "        \n",
    "        one_over_zl_obs = one_over_zl[np.where(observation.cpu() == 1)].detach()\n",
    "        \n",
    "        early_stop = 0\n",
    "        for epoch in range(num_epoch):\n",
    "            all_idx = np.arange(num_sample) # observation\n",
    "            np.random.shuffle(all_idx)\n",
    "\n",
    "            # sampling counterfactuals\n",
    "            ul_idxs = np.arange(x_all.shape[0]) # all\n",
    "            np.random.shuffle(ul_idxs)\n",
    "\n",
    "            epoch_loss = 0\n",
    "\n",
    "            for idx in range(total_batch):\n",
    "                selected_idx = all_idx[batch_size*idx:(idx+1)*batch_size]\n",
    "                sub_x = x[selected_idx]\n",
    "                sub_y = y[selected_idx]\n",
    "                # propensity score\n",
    "                inv_prop = one_over_zl_obs[selected_idx]                \n",
    "\n",
    "                sub_y = torch.Tensor(sub_y)\n",
    "\n",
    "                sub_x = sub_x.T\n",
    "  \n",
    "                pred = self.prediction_model.forward(*sub_x)\n",
    "                imputation_y = self.imputation.forward(*sub_x)\n",
    "                pred = self.sigmoid(pred)\n",
    "                imputation_y = self.sigmoid(imputation_y)\n",
    "                \n",
    "                # 预测值和真实值的交叉熵\n",
    "                e_loss = F.binary_cross_entropy(pred.detach(), sub_y, reduction=\"none\")\n",
    "                # 预测值和imputation的交叉熵\n",
    "                e_hat_loss = F.binary_cross_entropy(imputation_y, pred.detach(), reduction=\"none\")\n",
    "                imp_loss = (((e_loss - e_hat_loss) ** 2) * inv_prop.detach()).sum()\n",
    "                # imp_loss = \n",
    "                \n",
    "                optimizer_imputation.zero_grad()\n",
    "                imp_loss.backward()\n",
    "                optimizer_imputation.step()  \n",
    "                \n",
    "                \n",
    "                x_all_idx = ul_idxs[G*idx* batch_size : G*(idx+1)*batch_size]\n",
    "                x_sampled = x_all[x_all_idx] \n",
    "\n",
    "                x_sampled = x_sampled.T             \n",
    "                x_sampled = torch.LongTensor(x_sampled)\n",
    "                x_sampled = x_sampled.to(device)\n",
    "                imputation_y1 = self.imputation(*x_sampled)  \n",
    "                imputation_y1 = self.sigmoid(imputation_y1)\n",
    "                \n",
    "                prop_loss = F.binary_cross_entropy(1/one_over_zl[x_all_idx], observation[x_all_idx], reduction=\"sum\")                \n",
    "                pred_y1 = self.prediction_model(*x_sampled)\n",
    "                pred_y1 = self.sigmoid(pred_y1)\n",
    "\n",
    "                imputation_loss = F.binary_cross_entropy(imputation_y1, pred_y1, reduction = \"none\")\n",
    "                \n",
    "                loss = prop_loss + eta * ((1 - observation[x_all_idx] * one_over_zl[x_all_idx]) * (imputation_loss - imputation_loss.mean())).sum() ** 2      \n",
    "                \n",
    "                optimizer_propensity.zero_grad()\n",
    "                loss.backward()\n",
    "                optimizer_propensity.step()\n",
    "                \n",
    "                #print(\"mu = {}\".format(mu))\n",
    "                \n",
    "                one_over_zl = self._compute_IPS(x, y, y1, mu, y_ips)        \n",
    "                one_over_zl_obs = one_over_zl[np.where(observation.cpu() == 1)]\n",
    "                inv_prop = one_over_zl_obs[selected_idx].detach()                                                \n",
    "                \n",
    "                # pred = self.prediction_model.forward(*sub_x)\n",
    "                # pred = self.sigmoid(pred)\n",
    "\n",
    "                xent_loss = F.binary_cross_entropy(pred, sub_y, weight = inv_prop.detach(), reduction=\"sum\")\n",
    "                xent_loss = (xent_loss)/(inv_prop.detach().sum())\n",
    "                \n",
    "                optimizer_prediction.zero_grad()\n",
    "                xent_loss.backward()\n",
    "                optimizer_prediction.step()\n",
    "\n",
    "                epoch_loss += xent_loss.detach().cpu().numpy()      \n",
    "                \n",
    "            relative_loss_div = (last_loss-epoch_loss)/(last_loss+1e-10)\n",
    "            if  relative_loss_div < tol:\n",
    "                if early_stop > stop:\n",
    "                    print(\"[MF-Stable-DR] epoch:{}, xent:{}\".format(epoch, epoch_loss))\n",
    "                    self.tmp_best_model(save=False)\n",
    "\n",
    "                    print(\"[MF-Stable-DR] Early stopping at epoch {}\".format(epoch))\n",
    "                    scheduler_imputation.step()\n",
    "                    scheduler_prediction.step()\n",
    "                    scheduler_propensity.step()\n",
    "\n",
    "                    if wait > 0:\n",
    "                        early_stop = 0\n",
    "                        wait -= 1\n",
    "                    else:\n",
    "                        break\n",
    "                    \n",
    "                early_stop += 1\n",
    "            else:\n",
    "                # early_stop = 0\n",
    "                self.tmp_best_model(save=True)\n",
    "                \n",
    "            last_loss = epoch_loss\n",
    "\n",
    "            if verbose:\n",
    "                print(\"[MF-Stable-DR] epoch:{}, xent:{}\".format(epoch, epoch_loss))\n",
    "\n",
    "            if epoch == num_epoch - 1:\n",
    "                print(\"[MF-Stable-DR] Reach preset epochs, it seems does not converge.\")\n",
    "\n",
    "    def predict(self, x):\n",
    "        x = torch.LongTensor(x).T\n",
    "        pred = self.prediction_model(*x)\n",
    "        pred = self.sigmoid(pred)\n",
    "        return pred.detach().cpu().numpy()\n",
    "    \n",
    "    def tmp_best_model(self, save=True):\n",
    "        if save:\n",
    "            self.best_prediction = self.prediction_model.state_dict()\n",
    "            self.best_imputation = self.imputation.state_dict()\n",
    "        else:\n",
    "            self.prediction_model.load_state_dict(self.best_prediction)\n",
    "            self.imputation.load_state_dict(self.best_imputation)\n",
    "\n",
    "    def _compute_IPS(self, x, y, y1, mu, y_ips=None):\n",
    "        if y_ips is None:\n",
    "            y_ips = 1\n",
    "            print(\"y_ips is none\")\n",
    "        else:\n",
    "            py1 = y_ips.sum() / len(y_ips)\n",
    "            py0 = 1 - py1\n",
    "            po1 = (len(x) + mu)/ (x[:,0].max() * x[:,1].max() + 2*mu)\n",
    "            py1o1 = (y.sum() + mu)/ (len(y) +2*mu)\n",
    "            py0o1 = 1 - py1o1\n",
    "\n",
    "            propensity = torch.zeros(self.num_users * self.num_items, device=device)\n",
    "            propensity += (py0o1 * po1) / py0\n",
    "\n",
    "            propensity[y1] = (py1o1 * po1) / py1\n",
    "            \n",
    "            one_over_zl = (1 / propensity)\n",
    "            \n",
    "        #one_over_zl = torch.Tensor(one_over_zl)\n",
    "        return one_over_zl           \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[MF-Stable-DR] epoch:0, xent:95.46868896484375\n",
      "[MF-Stable-DR] epoch:1, xent:91.76045989990234\n",
      "[MF-Stable-DR] epoch:2, xent:89.88001251220703\n",
      "[MF-Stable-DR] epoch:3, xent:88.83606719970703\n",
      "[MF-Stable-DR] epoch:4, xent:88.32862091064453\n",
      "[MF-Stable-DR] epoch:5, xent:87.99452209472656\n",
      "[MF-Stable-DR] epoch:6, xent:87.74978637695312\n",
      "[MF-Stable-DR] epoch:7, xent:87.4984130859375\n",
      "[MF-Stable-DR] epoch:8, xent:87.32357788085938\n",
      "[MF-Stable-DR] epoch:9, xent:87.09154510498047\n",
      "[MF-Stable-DR] epoch:10, xent:86.85212707519531\n",
      "[MF-Stable-DR] epoch:11, xent:86.69689178466797\n",
      "[MF-Stable-DR] epoch:12, xent:86.5179214477539\n",
      "[MF-Stable-DR] epoch:13, xent:86.33753967285156\n",
      "[MF-Stable-DR] epoch:14, xent:86.26246643066406\n",
      "[MF-Stable-DR] epoch:15, xent:86.07740783691406\n",
      "[MF-Stable-DR] epoch:16, xent:86.03898620605469\n",
      "[MF-Stable-DR] epoch:17, xent:85.95752716064453\n",
      "[MF-Stable-DR] epoch:18, xent:85.86419677734375\n",
      "[MF-Stable-DR] epoch:19, xent:85.83934783935547\n",
      "[MF-Stable-DR] epoch:20, xent:85.7505111694336\n",
      "[MF-Stable-DR] epoch:21, xent:85.7158432006836\n",
      "[MF-Stable-DR] epoch:22, xent:85.64090728759766\n",
      "[MF-Stable-DR] epoch:23, xent:85.62687683105469\n",
      "[MF-Stable-DR] epoch:24, xent:85.52005767822266\n",
      "[MF-Stable-DR] epoch:25, xent:85.50948333740234\n",
      "[MF-Stable-DR] epoch:26, xent:85.4659423828125\n",
      "[MF-Stable-DR] epoch:27, xent:85.43324279785156\n",
      "[MF-Stable-DR] epoch:28, xent:85.40422058105469\n",
      "[MF-Stable-DR] epoch:29, xent:85.3744888305664\n",
      "[MF-Stable-DR] epoch:30, xent:85.36646270751953\n",
      "[MF-Stable-DR] epoch:31, xent:85.35006713867188\n",
      "[MF-Stable-DR] epoch:32, xent:85.30716705322266\n",
      "[MF-Stable-DR] epoch:33, xent:85.26758575439453\n",
      "[MF-Stable-DR] epoch:34, xent:85.23243713378906\n",
      "[MF-Stable-DR] epoch:35, xent:85.25634002685547\n",
      "[MF-Stable-DR] epoch:36, xent:85.26348114013672\n",
      "[MF-Stable-DR] epoch:37, xent:85.21070861816406\n",
      "[MF-Stable-DR] epoch:38, xent:85.18727111816406\n",
      "[MF-Stable-DR] epoch:39, xent:85.1831283569336\n",
      "[MF-Stable-DR] epoch:40, xent:85.19416046142578\n",
      "[MF-Stable-DR] epoch:41, xent:85.1764144897461\n",
      "[MF-Stable-DR] epoch:42, xent:85.13082885742188\n",
      "[MF-Stable-DR] epoch:43, xent:85.1502456665039\n",
      "[MF-Stable-DR] epoch:44, xent:85.14313507080078\n",
      "[MF-Stable-DR] epoch:45, xent:85.08995819091797\n",
      "[MF-Stable-DR] epoch:46, xent:85.11491394042969\n",
      "[MF-Stable-DR] epoch:47, xent:85.1456069946289\n",
      "[MF-Stable-DR] epoch:48, xent:85.0567855834961\n",
      "[MF-Stable-DR] epoch:49, xent:85.1045913696289\n",
      "[MF-Stable-DR] Early stopping at epoch 49\n",
      "[MF-Stable-DR] epoch:49, xent:85.1045913696289\n",
      "[MF-Stable-DR] epoch:50, xent:84.66577911376953\n",
      "[MF-Stable-DR] epoch:51, xent:84.66436004638672\n",
      "[MF-Stable-DR] epoch:52, xent:84.66547393798828\n",
      "[MF-Stable-DR] epoch:53, xent:84.63002014160156\n",
      "[MF-Stable-DR] epoch:54, xent:84.65690612792969\n",
      "[MF-Stable-DR] epoch:55, xent:84.6043930053711\n",
      "[MF-Stable-DR] epoch:56, xent:84.62510681152344\n",
      "[MF-Stable-DR] epoch:57, xent:84.6478271484375\n",
      "[MF-Stable-DR] epoch:58, xent:84.61946105957031\n",
      "[MF-Stable-DR] epoch:59, xent:84.60752868652344\n",
      "[MF-Stable-DR] epoch:60, xent:84.61642456054688\n",
      "[MF-Stable-DR] Early stopping at epoch 60\n",
      "[MF-Stable-DR] epoch:60, xent:84.61642456054688\n",
      "[MF-Stable-DR] epoch:61, xent:84.47769165039062\n",
      "[MF-Stable-DR] epoch:62, xent:84.48358917236328\n",
      "[MF-Stable-DR] epoch:63, xent:84.50912475585938\n",
      "[MF-Stable-DR] epoch:64, xent:84.45600891113281\n",
      "[MF-Stable-DR] epoch:65, xent:84.497314453125\n",
      "[MF-Stable-DR] epoch:66, xent:84.50029754638672\n",
      "[MF-Stable-DR] epoch:67, xent:84.4963150024414\n",
      "[MF-Stable-DR] epoch:68, xent:84.48138427734375\n",
      "[MF-Stable-DR] epoch:69, xent:84.47518157958984\n",
      "[MF-Stable-DR] epoch:70, xent:84.49259948730469\n",
      "[MF-Stable-DR] epoch:71, xent:84.48889923095703\n",
      "[MF-Stable-DR] epoch:72, xent:84.48629760742188\n",
      "[MF-Stable-DR] Early stopping at epoch 72\n",
      "[MF-Stable-DR] epoch:72, xent:84.48629760742188\n",
      "[MF-Stable-DR] epoch:73, xent:84.47019958496094\n",
      "[MF-Stable-DR] epoch:74, xent:84.47410583496094\n",
      "[MF-Stable-DR] epoch:75, xent:84.47830200195312\n",
      "[MF-Stable-DR] epoch:76, xent:84.46919250488281\n",
      "[MF-Stable-DR] epoch:77, xent:84.44352722167969\n",
      "[MF-Stable-DR] epoch:78, xent:84.45503234863281\n",
      "[MF-Stable-DR] epoch:79, xent:84.44637298583984\n",
      "[MF-Stable-DR] epoch:80, xent:84.45580291748047\n",
      "[MF-Stable-DR] epoch:81, xent:84.44343566894531\n",
      "[MF-Stable-DR] epoch:82, xent:84.45579528808594\n",
      "[MF-Stable-DR] epoch:83, xent:84.4688491821289\n",
      "[MF-Stable-DR] Early stopping at epoch 83\n"
     ]
    }
   ],
   "source": [
    "STOP = 5\n",
    "ETA = 3\n",
    "LR = 0.003\n",
    "BATCH_SIZE = 256\n",
    "LR1 = 10\n",
    "LAMB = 0.00015\n",
    "TOL = 4e-5\n",
    "MU = 100\n",
    "VERBOSE = True\n",
    "\n",
    "G = 1\n",
    "\n",
    "model = StableDRMCD(num_users=USER_NUM, num_items=ITEM_NUM, latent_dim=KNOW_NUM)\n",
    "model.to(device)\n",
    "\n",
    "x_train = data[[\"user_id\", \"item_id\"]].values\n",
    "x_train = torch.LongTensor(x_train).to(device)\n",
    "y_train = torch.Tensor(data[\"score\"].values).to(device)\n",
    "\n",
    "x_test = test_data[[\"user_id\", \"item_id\"]].values\n",
    "x_test = torch.LongTensor(x_test).to(device)\n",
    "y_test = test_data[\"score\"].values\n",
    "\n",
    "ips_idxs = np.arange(len(y_test))\n",
    "np.random.shuffle(ips_idxs)\n",
    "y_ips = y_test[ips_idxs[:int(0.05 * len(ips_idxs))]]\n",
    "y_ips = torch.Tensor(y_ips).to(device)\n",
    "\n",
    "model.fit(x_train, y_train, y_ips, mu=MU, eta=ETA, stop=STOP, num_epoch=1000, batch_size=BATCH_SIZE, lr=LR, lr1=LR1, lamb=LAMB, tol=TOL, G=G, verbose=VERBOSE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RMSE: 0.42579263\n",
      "AUC: 0.8128510713297022\n",
      "ACC: 0.6923208482982727\n"
     ]
    }
   ],
   "source": [
    "model.tmp_best_model(save=False)\n",
    "\n",
    "y_pred = model.prediction_model(*(x_test.T))\n",
    "y_pred = torch.sigmoid(y_pred).detach().cpu().numpy()\n",
    "\n",
    "print(\"RMSE:\", root_mean_squared_error(y_test, y_pred))\n",
    "print(\"AUC:\", roc_auc_score(y_test, y_pred))\n",
    "print(\"ACC:\", accuracy_score(y_test, y_pred > 0.5))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "wxy-cognitive",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
