{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据准备\n",
    "\n",
    "执行dataset.ipynb中的全部代码即可"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "datasets = {\n",
    "    \"junyi\": \"junyi\",\n",
    "    \"ASSISTments 2009-2010\": \"a0910\",\n",
    "    \"ASSISTments 2017\": \"assistment-2017\",\n",
    "    \"NIPS 2020\": \"NIPS2020\",\n",
    "}\n",
    "\n",
    "#selected_dataset = \"junyi\"\n",
    "selected_dataset = \"ASSISTments 2009-2010\"\n",
    "#selected_dataset = \"ASSISTments 2017\"\n",
    "#selected_dataset = \"NIPS 2020\"\n",
    "\n",
    "dataset = datasets[selected_dataset]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Length:  186049 55760 25606\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from pathlib import Path\n",
    "import json\n",
    "\n",
    "BASE_DIR = Path('.') / 'dataset' / dataset\n",
    "\n",
    "train_data = pd.read_csv(BASE_DIR / 'train.csv')\n",
    "test_data = pd.read_csv(BASE_DIR / 'test.csv')\n",
    "valid_data = pd.read_csv(BASE_DIR / 'valid.csv')\n",
    "item_data = pd.read_csv(BASE_DIR / 'item.csv')\n",
    "\n",
    "with open(BASE_DIR / 'describe.json') as f:\n",
    "    describe = json.load(f)\n",
    "\n",
    "user_num = describe['user_num'] + 1\n",
    "item_num = describe['item_num'] + 1\n",
    "knowledge_num = describe['knowledge_num'] + 1\n",
    "\n",
    "# score 转换为整数\n",
    "train_data['score'] =  train_data['score'].apply(int)\n",
    "test_data['score'] =  test_data['score'].apply(int)\n",
    "valid_data['score'] =  valid_data['score'].apply(int)\n",
    "\n",
    "def onehot_knowledge(knowledge_code):\n",
    "    t = knowledge_code[1:-1].split(',')\n",
    "    res = [0] * knowledge_num\n",
    "    for ti in t:\n",
    "        res[int(ti) - 1] = 1\n",
    "    return res\n",
    "\n",
    "item_data['knowledge'] = item_data['knowledge_code'].apply(onehot_knowledge)\n",
    "item_data = item_data.drop(columns=['knowledge_code'])\n",
    "\n",
    "dina_train_data = pd.merge(train_data, item_data, on='item_id')\n",
    "dina_test_data = pd.merge(test_data, item_data, on='item_id')\n",
    "dina_valid_data = pd.merge(valid_data, item_data, on='item_id')\n",
    "\n",
    "print(\"Length: \", len(train_data), len(test_data), len(valid_data))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "\n",
    "batch_size = 32\n",
    "\n",
    "def transform(x, y, k, z, batch_size, **params):\n",
    "    if k is not None:\n",
    "        dataset = TensorDataset(\n",
    "            torch.tensor(x, dtype=torch.int64),\n",
    "            torch.tensor(y, dtype=torch.int64),\n",
    "            torch.tensor(k, dtype=torch.float32),\n",
    "            torch.tensor(z, dtype=torch.float32)\n",
    "        )\n",
    "    else:\n",
    "        dataset = TensorDataset(\n",
    "            torch.tensor(x, dtype=torch.int64),\n",
    "            torch.tensor(y, dtype=torch.int64),\n",
    "            torch.tensor(z, dtype=torch.float32)\n",
    "        )\n",
    "    return DataLoader(dataset, batch_size=batch_size, **params)\n",
    "\n",
    "# train, valid, test\n",
    "loaders = {\n",
    "    \"DINA\": [transform(data[\"user_id\"], data[\"item_id\"], data[\"knowledge\"], data[\"score\"], batch_size=batch_size) for data in [dina_train_data, dina_valid_data, dina_test_data]],\n",
    "    \"MIRT\": [transform(data[\"user_id\"], data[\"item_id\"], None, data[\"score\"], batch_size=batch_size) for data in [train_data, valid_data, test_data]],\n",
    "    \"NCDM\": [transform(data[\"user_id\"], data[\"item_id\"], data[\"knowledge\"], data[\"score\"], batch_size=batch_size) for data in [dina_train_data, dina_valid_data, dina_test_data]]\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "# device = torch.device(\"cpu\")\n",
    "\n",
    "from EduCDM.DINA.GD.DINA import DINANet\n",
    "from EduCDM.MIRT.MIRT import MIRTNet\n",
    "from EduCDM.NCDM.NCDM import Net as NCDMNNet\n",
    "\n",
    "dina = DINANet(user_num, item_num, knowledge_num).to(device)\n",
    "mirt = MIRTNet(user_num, item_num, knowledge_num, 1).to(device)\n",
    "ncdm = NCDMNNet(knowledge_num, item_num, user_num).to(device)\n",
    "\n",
    "models = {\n",
    "    \"DINA\": dina,\n",
    "    \"MIRT\": mirt,\n",
    "    \"NCDM\": ncdm\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm\n",
    "from sklearn.metrics import roc_auc_score, accuracy_score, root_mean_squared_error\n",
    "import numpy as np\n",
    "import os\n",
    "import time\n",
    "\n",
    "def train(model, loader, valid_loader=None, epochs=5, lr=0.01, name=None):\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n",
    "    criterion = torch.nn.BCELoss()\n",
    "    loaderLen = len(loader)\n",
    "    auc_list = []\n",
    "    acc_list = []\n",
    "    rmse_list = []\n",
    "    description = \"Epoch %d\"\n",
    "    if name is not None:\n",
    "        description = f\"[{name}] Epoch %d\"\n",
    "    \n",
    "    for epoch in range(epochs):\n",
    "        model.train()\n",
    "        total_loss = 0\n",
    "        for batch_data in tqdm(loader, description % epoch):\n",
    "            batch_data = [data.to(device) for data in batch_data]\n",
    "            input_data = batch_data[:-1]\n",
    "            score = batch_data[-1]\n",
    "            try:\n",
    "                output = model(*input_data)\n",
    "            except:\n",
    "                for i in input_data:\n",
    "                    print(i.shape)\n",
    "                    print(i.max())\n",
    "                    print(i.min())\n",
    "                print()\n",
    "                raise\n",
    "            loss = criterion(output, score)\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            total_loss += loss.item()\n",
    "        log = f\"[{name}] Epoch: {epoch}, Loss: {total_loss / loaderLen}\"\n",
    "        if valid_loader is not None:\n",
    "            auc, acc, rmse = evaluate(model, valid_loader, name = name)\n",
    "            auc_list.append(auc)\n",
    "            acc_list.append(acc)\n",
    "            rmse_list.append(rmse)\n",
    "            log += f\", AUC: {auc}, ACC: {acc}, RMSE: {rmse}\"\n",
    "        print(log)\n",
    "    return auc_list, acc_list, rmse_list\n",
    "\n",
    "def evaluate(model, loader, name=None):\n",
    "    model.eval()\n",
    "    y_pred = []\n",
    "    y_true = []\n",
    "    description = \"evaluating\"\n",
    "    if name is not None:\n",
    "        description = f\"[{name}] evaluating\"\n",
    "    progress = tqdm(loader, description)\n",
    "    for batch_data in progress:\n",
    "        batch_data = [data.to(device) for data in batch_data]\n",
    "        input_data = batch_data[:-1]\n",
    "        score = batch_data[-1]\n",
    "        output = model(*input_data)\n",
    "        y_pred.extend(output.tolist())\n",
    "        y_true.extend(score.tolist())\n",
    "    auc = roc_auc_score(y_true, y_pred)\n",
    "    acc = accuracy_score(np.array(y_true) >= 0.5, np.array(y_pred) >= 0.5)\n",
    "    rmse = root_mean_squared_error(y_true, y_pred)\n",
    "    model.train()\n",
    "    return auc, acc, rmse\n",
    "\n",
    "aucs, accs, rmses = {}, {}, {}\n",
    "\n",
    "def save(name, timestamp=True):\n",
    "    seed = torch.seed()\n",
    "    if not os.path.exists(\"outputs\"):\n",
    "        os.mkdir(\"outputs\")\n",
    "    sd = selected_dataset.replace(\" \", \"_\")\n",
    "    path = f\"outputs/{sd}\"\n",
    "    if not os.path.exists(path):\n",
    "        os.mkdir(path)\n",
    "    path += f\"/{name}\"\n",
    "    if not os.path.exists(path):\n",
    "        os.mkdir(path)\n",
    "    if timestamp:\n",
    "        path += f\"/model_{seed}_{int(time.time())}\"\n",
    "    else:\n",
    "        path += f\"/model_{seed}\"\n",
    "    torch.save(models[name].state_dict(), path + \".pt\")\n",
    "    with open(path + \".json\", \"w\") as f:\n",
    "        json.dump({\n",
    "            \"aucs\": aucs[name],\n",
    "            \"accs\": accs[name],\n",
    "            \"rmses\": rmses[name]\n",
    "        }, f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch 0: 100%|██████████| 7534/7534 [00:18<00:00, 396.57it/s]\n",
      "[DINA] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 874.44it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch: 0, Loss: 0.588148703404564, AUC: 0.7075924544892297, ACC: 0.6659623917177266, RMSE: 0.4843224464116004\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch 1: 100%|██████████| 7534/7534 [00:19<00:00, 391.72it/s]\n",
      "[DINA] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 874.81it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch: 1, Loss: 0.5844604765068229, AUC: 0.7115453722548939, ACC: 0.6692825450484441, RMSE: 0.4834774556821813\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch 2: 100%|██████████| 7534/7534 [00:18<00:00, 408.86it/s]\n",
      "[DINA] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 1002.47it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch: 2, Loss: 0.5819057790265337, AUC: 0.7149754313925637, ACC: 0.6733874618936947, RMSE: 0.4822822954009497\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch 3: 100%|██████████| 7534/7534 [00:18<00:00, 404.83it/s]\n",
      "[DINA] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 892.77it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch: 3, Loss: 0.5805011223126445, AUC: 0.716474986199194, ACC: 0.6739005764993511, RMSE: 0.4826799804696968\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch 4: 100%|██████████| 7534/7534 [00:18<00:00, 416.19it/s]\n",
      "[DINA] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 980.79it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[DINA] Epoch: 4, Loss: 0.580806904361898, AUC: 0.7184696540191242, ACC: 0.6763152334671456, RMSE: 0.48199924744133704\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[DINA] evaluating: 100%|██████████| 2248/2248 [00:02<00:00, 891.40it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[DINA]\t Test AUC:  0.7207998775907917  ACC:  0.67438496947446  RMSE:  0.4822985394360826\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch 0: 100%|██████████| 5815/5815 [00:11<00:00, 485.58it/s]\n",
      "[MIRT] evaluating: 100%|██████████| 801/801 [00:00<00:00, 1330.21it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch: 0, Loss: 1.7668689902551555, AUC: 0.6291454964306527, ACC: 0.6386784347418574, RMSE: 0.5506153094601801\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch 1: 100%|██████████| 5815/5815 [00:12<00:00, 465.88it/s]\n",
      "[MIRT] evaluating: 100%|██████████| 801/801 [00:00<00:00, 877.49it/s] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch: 1, Loss: 0.4590992196689678, AUC: 0.6604830924866489, ACC: 0.6520346793720222, RMSE: 0.5406234727521523\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch 2: 100%|██████████| 5815/5815 [00:12<00:00, 451.25it/s]\n",
      "[MIRT] evaluating: 100%|██████████| 801/801 [00:00<00:00, 908.68it/s] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch: 2, Loss: 0.17993931125626064, AUC: 0.6859600768284887, ACC: 0.669452472076857, RMSE: 0.530569480985894\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch 3: 100%|██████████| 5815/5815 [00:12<00:00, 447.48it/s]\n",
      "[MIRT] evaluating: 100%|██████████| 801/801 [00:01<00:00, 672.36it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch: 3, Loss: 0.09572137944588097, AUC: 0.7116475607919754, ACC: 0.6848785440912286, RMSE: 0.5213936909368464\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch 4: 100%|██████████| 5815/5815 [00:12<00:00, 456.20it/s]\n",
      "[MIRT] evaluating: 100%|██████████| 801/801 [00:00<00:00, 1200.79it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[MIRT] Epoch: 4, Loss: 0.05123454828187113, AUC: 0.7240891464334795, ACC: 0.6968288682340077, RMSE: 0.5145709374259774\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[MIRT] evaluating: 100%|██████████| 1743/1743 [00:01<00:00, 985.50it/s] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[MIRT]\t Test AUC:  0.7174657090391428  ACC:  0.6904591104734576  RMSE:  0.5192391156967201\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch 0: 100%|██████████| 7534/7534 [00:27<00:00, 270.60it/s]\n",
      "[NCDM] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 649.35it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch: 0, Loss: 0.6161841152654677, AUC: 0.7417888624265083, ACC: 0.7200808910084211, RMSE: 0.4354158235983963\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch 1: 100%|██████████| 7534/7534 [00:27<00:00, 270.15it/s]\n",
      "[NCDM] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 739.16it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch: 1, Loss: 0.5110198894007819, AUC: 0.7487467582215527, ACC: 0.719929974947934, RMSE: 0.43721447088761606\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch 2: 100%|██████████| 7534/7534 [00:27<00:00, 275.79it/s]\n",
      "[NCDM] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 567.23it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch: 2, Loss: 0.481716339364905, AUC: 0.7487341068309575, ACC: 0.7190848450092059, RMSE: 0.43904986460414475\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch 3: 100%|██████████| 7534/7534 [00:27<00:00, 272.97it/s]\n",
      "[NCDM] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 788.38it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch: 3, Loss: 0.46883443436027367, AUC: 0.747170795674354, ACC: 0.7172134858591651, RMSE: 0.4412705032545068\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch 4: 100%|██████████| 7534/7534 [00:26<00:00, 281.20it/s]\n",
      "[NCDM] evaluating: 100%|██████████| 1036/1036 [00:01<00:00, 798.09it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[NCDM] Epoch: 4, Loss: 0.4617279687273841, AUC: 0.745563324987293, ACC: 0.7155534091938064, RMSE: 0.4429249940793277\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[NCDM] evaluating: 100%|██████████| 2248/2248 [00:03<00:00, 697.33it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[NCDM]\t Test AUC:  0.7486970729854616  ACC:  0.7189703366848846  RMSE:  0.4422683001543817\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "for name in models:\n",
    "    aucs[name], accs[name], rmses[name] = train(models[name], loaders[name][0], loaders[name][1], epochs=5, name=name)\n",
    "    auc, acc, rmse = evaluate(models[name], loaders[name][2], name=name)\n",
    "    print(f\"[{name}]\\t\", \"Test AUC: \", auc, \" ACC: \", acc, \" RMSE: \", rmse)\n",
    "    save(name)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "cognitive",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
