{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "import json\n",
    "import sys\n",
    "from torch import nn\n",
    "import torch\n",
    "from pytorch_transformers import *\n",
    "import importlib\n",
    "from tensorboardX import SummaryWriter\n",
    "import torch.nn.utils.rnn as rnn_utils\n",
    "import numpy as np\n",
    "import TNews_Loader\n",
    "from sklearn.metrics import accuracy_score\n",
    "import pickle\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "ename": "AssertionError",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAssertionError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-2-574cc5250ff4>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0;32massert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margv\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m==\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      3\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menviron\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"CUDA_VISIBLE_DEVICES\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margv\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mAssertionError\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import os\n",
    "assert(len(sys.argv)==2)\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = sys.argv[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def WeightsForUmbalanced(data_label):\n",
    "    _, _, labels = data_label.shape\n",
    "    label_cnt = data_label.reshape([-1, labels]).sum(axis=0)\n",
    "    weights = 1.0/label_cnt\n",
    "    normalized_weights = weights/sum(weights)\n",
    "    return normalized_weights"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def sent_list2bert_tensors(sent_list, cuda):\n",
    "    sent_len = [len(sent) for sent in sent_list]\n",
    "    max_sent_len = max(sent_len)\n",
    "    sent_padding = torch.zeros([len(sent_list), max_sent_len], dtype=torch.int64)\n",
    "#     attn_mask = torch.ones_like(sent_padding)\n",
    "    input_mask = torch.zeros_like(sent_padding)\n",
    "    for i, sent in enumerate(sent_list):\n",
    "        sent_padding[i][-len(sent):] = torch.tensor(sent, dtype=torch.int32)\n",
    "        input_mask[i][:-len(sent)].fill_(1)\n",
    "    if cuda:\n",
    "        sent_padding = sent_padding.cuda()\n",
    "        input_mask = input_mask.cuda()\n",
    "    return sent_padding, input_mask\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def thu_cls_train(tr_reader, val_reader, te_reader, \n",
    "                   xlnet, classifier, dense_layer, activation=nn.ReLU, \n",
    "                   train_epochs=3, cuda=True, log_dir = \"XLNetCLS\"\n",
    "                  ):\n",
    "    cls_weights = torch.tensor(\n",
    "            WeightsForUmbalanced(\n",
    "                tr_reader.label\n",
    "            ),\n",
    "            dtype = torch.float32\n",
    "    )\n",
    "    cls_loss_fn = nn.CrossEntropyLoss(weight=cls_weights) if not cuda else nn.CrossEntropyLoss(weight=cls_weights.cuda())\n",
    "\n",
    "    optim = torch.optim.Adagrad([\n",
    "                                {'params': xlnet.parameters(), 'lr':1e-5},\n",
    "                                {'params': dense_layer.parameters(), 'lr':5e-5},\n",
    "                                {'params': classifier.parameters(), 'lr': 5e-5}\n",
    "                             ]\n",
    "    )\n",
    "    losses = np.zeros([10]) \n",
    "    accs = np.zeros([10])\n",
    "    optim.zero_grad()\n",
    "    writer = SummaryWriter(log_dir)\n",
    "    \n",
    "    tr_reader.reset_batchsize(2)\n",
    "    batches_tr, batchsize, _ = tr_reader.label.shape\n",
    "    \n",
    "    step = 0\n",
    "    for epoch in range(train_epochs):\n",
    "        for xsj, ysj, lsj in tr_reader.iter():\n",
    "#             sent_tensors, sent_mask = sent_list2bert_tensors(xsj, cuda)\n",
    "        #     wd_embs, _ = xlnet(sent_tensors, input_mask = sent_mask)\n",
    "        #     cls_feature = wd_embs[:, -1, :]\n",
    "            cls_feature = torch.cat( [ xlnet(torch.tensor([sent]).cuda())[0][0][-1:] for sent in xsj])\n",
    "            pooled_feat = activation(dense_layer(cls_feature))\n",
    "            cls_scores = classifier(pooled_feat)\n",
    "            y_label = torch.tensor(ysj.argmax(axis=1)).cuda() if cuda else torch.tensor(ysj.argmax(axis=1))\n",
    "            cls_loss = cls_loss_fn(cls_scores, y_label)\n",
    "            print(\"cls_loss:\", cls_loss, cls_scores, y_label)\n",
    "            cls_loss.backward()\n",
    "            torch.cuda.empty_cache()\n",
    "            optim.step()\n",
    "            cls_acc = accuracy_score(ysj.argmax(axis=1), cls_scores.argmax(axis=1) if not cuda else cls_scores.argmax(axis=1).cpu())\n",
    "            losses[int(step%10)] = cls_loss.cpu()\n",
    "            accs[int(step%10)] = cls_acc\n",
    "            writer.add_scalar('Train Loss', cls_loss.cpu(), step)\n",
    "            writer.add_scalar('Train Accuracy', cls_acc, step)\n",
    "            print(\"step:%d | loss/acc = %.3f/%.3f\"%(step, cls_loss, cls_acc))\n",
    "            if step %10 == 9:\n",
    "                print('classification task: %6d | %6d : [%5d/%5d], senti_loss/senti_acc = %6.8f/%6.7f ' % ( step%batches_tr,batches_tr, \n",
    "                                                                                epoch, train_epochs,\n",
    "                                                                                losses.mean(), accs.mean(),\n",
    "                                                                            )\n",
    "                         )\n",
    "                writer.add_scalar('Train Loss', losses.mean(), step/10)\n",
    "                writer.add_scalar('Train Accuracy', accs.mean(), step/10)\n",
    "                optim.step()\n",
    "                optim.zero_grad()\n",
    "            step += 1 \n",
    "        with torch.no_grad():\n",
    "            bs_cnt, bs, l_cnt = val_reader.label.shape\n",
    "            preds = []\n",
    "            losses = np.zeros(bs_cnt)\n",
    "            it = 0\n",
    "            for xsj, ysj, lsj in val_reader.iter():\n",
    "        #         sent_tensors, sent_mask = sent_list2bert_tensors(xsj, cuda)\n",
    "        #         wd_embs, _ = xlnet(sent_tensors, attention_mask = sent_mask)\n",
    "        #         cls_feature = wd_embs[:, -1, :]\n",
    "                cls_feature = torch.cat([xlnet(torch.tensor([sent]).cuda())[0][0][-1:] for sent in xsj])\n",
    "                pooled_feat = activation(dense_layer(cls_feature))\n",
    "                cls_scores = classifier(pooled_feat)\n",
    "                y_label = torch.tensor(ysj.argmax(axis=1)).cuda() if cuda else torch.tensor(ysj.argmax(axis=1))\n",
    "                cls_loss = cls_loss_fn(cls_scores, y_label)\n",
    "                losses[it] = cls_loss\n",
    "                preds.append(cls_scores)\n",
    "                torch.cuda.empty_cache()\n",
    "        val_preds = torch.cat(preds).cpu()\n",
    "        val_acc = accuracy_score(val_reader.label.reshape(bs_cnt*bs, l_cnt).argmax(axis=1), val_preds.argmax(axis=1))\n",
    "        val_loss = losses.mean()\n",
    "        writer.add_scalar('valid Loss', val_loss, epoch)\n",
    "        writer.add_scalar('valid Accuracy', val_acc, epoch)\n",
    "        cls_save_as = './%s/THUModel_epoch%03d.pkl'% (log_dir, epoch)\n",
    "        torch.save(\n",
    "                            {\n",
    "                                \"bert\":bert.state_dict(),\n",
    "                                \"dense_layer\":dense_layer.state_dict(),\n",
    "                                \"classifier\": classifier.state_dict(),\n",
    "                            },\n",
    "                            cls_save_as\n",
    "                        )\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "xt = XLNetTokenizer.from_pretrained(\"./XLBaseModel/\")\n",
    "xlnet = XLNetModel.from_pretrained(\"./XLBaseModel/\").cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_file = './THUCnews/cnews.test.txt'\n",
    "val_file = './THUCnews/cnews.val.txt'\n",
    "train_file = './THUCnews/cnews.train.txt'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "tr_data = TNews_Loader.load_data(train_file)\n",
    "tr_data = random.sample(tr_data, len(tr_data))\n",
    "tr_reader = TNews_Loader.THUReader(tr_data, 20, xt)\n",
    "\n",
    "val_data = TNews_Loader.load_data(val_file)\n",
    "val_data = random.sample(val_data, len(val_data))\n",
    "val_reader = TNews_Loader.THUReader(val_data, 20, xt)\n",
    "\n",
    "te_data = TNews_Loader.load_data(test_file)\n",
    "te_data = random.sample(te_data, len(te_data))\n",
    "te_reader = TNews_Loader.THUReader(te_data, 20, xt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"tr_reader.pkl\", \"rb\") as fr:\n",
    "    tr_reader = pickle.load(fr)\n",
    "\n",
    "with open(\"val_reader.pkl\", \"rb\") as fr:\n",
    "    val_reader = pickle.load(fr)\n",
    "\n",
    "with open(\"te_reader.pkl\", \"rb\") as fr:\n",
    "    te_reader = pickle.load(fr)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"tr_reader.pkl\", \"wb\") as fw:\n",
    "    pickle.dump(tr_reader, fw, protocol=pickle.HIGHEST_PROTOCOL)\n",
    "    \n",
    "with open(\"val_reader.pkl\", \"wb\") as fw:\n",
    "    pickle.dump(val_reader, fw, protocol=pickle.HIGHEST_PROTOCOL)\n",
    "\n",
    "with open(\"te_reader.pkl\", \"wb\") as fw:\n",
    "    pickle.dump(te_reader, fw, protocol=pickle.HIGHEST_PROTOCOL)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "classifier = nn.Linear(768, 14).cuda()\n",
    "dense_layer = nn.Linear(768, 768).cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "if torch.cuda.device_count() > 1:\n",
    "    device_ids = list( range( len( sys.argv[1].split(\",\") ) ) )\n",
    "    xlnet = nn.DataParallel(xlnet, device_ids=device_ids)\n",
    "    device_name = \"cuda:%d\"%device_ids[0]\n",
    "    device = torch.device(device_name)\n",
    "    xlnet.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/hadoop/.conda/envs/py37_torch/lib/python3.6/site-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in true_divide\n",
      "  after removing the cwd from sys.path.\n",
      "/home/hadoop/.conda/envs/py37_torch/lib/python3.6/site-packages/ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in true_divide\n",
      "  \"\"\"\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'subj_reader' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-8-53b4079d048b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m thu_cls_train(tr_reader, val_reader, te_reader, \n\u001b[1;32m      2\u001b[0m                    \u001b[0mxlnet\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mclassifier\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdense_layer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mactivation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mReLU\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m                    \u001b[0mtrain_epochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m100\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcuda\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlog_dir\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"XLNetCLS\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      4\u001b[0m                   )\n",
      "\u001b[0;32m<ipython-input-4-1bcd7297b24e>\u001b[0m in \u001b[0;36mthu_cls_train\u001b[0;34m(tr_reader, val_reader, te_reader, xlnet, classifier, dense_layer, activation, train_epochs, cuda, log_dir)\u001b[0m\n\u001b[1;32m     26\u001b[0m     \u001b[0mstep\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     27\u001b[0m     \u001b[0;32mfor\u001b[0m \u001b[0mepoch\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_epochs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 28\u001b[0;31m         \u001b[0;32mfor\u001b[0m \u001b[0mxsj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mysj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlsj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0msubj_reader\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     29\u001b[0m             \u001b[0msent_tensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msent_mask\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msenti_data2bert_tensors\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mxsj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcuda\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     30\u001b[0m             \u001b[0mwd_embs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mxlnet\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msent_tensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_mask\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msent_mask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mNameError\u001b[0m: name 'subj_reader' is not defined"
     ]
    }
   ],
   "source": [
    "thu_cls_train(tr_reader, val_reader, te_reader, \n",
    "                   xlnet, classifier, dense_layer, activation=nn.ReLU, \n",
    "                   train_epochs=100, cuda=True, log_dir = \"XLNetCLS\"\n",
    "                  )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "activation=nn.ReLU()\n",
    "train_epochs=3\n",
    "cuda=True\n",
    "log_dir = \"XLNetCLS\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/hadoop/.conda/envs/py37_torch/lib/python3.6/site-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in true_divide\n",
      "  after removing the cwd from sys.path.\n",
      "/home/hadoop/.conda/envs/py37_torch/lib/python3.6/site-packages/ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in true_divide\n",
      "  \"\"\"\n"
     ]
    }
   ],
   "source": [
    "cls_weights = torch.tensor(\n",
    "            WeightsForUmbalanced(\n",
    "                tr_reader.label\n",
    "            ),\n",
    "            dtype = torch.float32\n",
    "    )\n",
    "cls_loss_fn = nn.CrossEntropyLoss(weight=cls_weights) if not cuda else nn.CrossEntropyLoss(weight=cls_weights.cuda())\n",
    "\n",
    "optim = torch.optim.Adagrad([\n",
    "                            {'params': xlnet.parameters(), 'lr':1e-5},\n",
    "                            {'params': dense_layer.parameters(), 'lr':5e-5},\n",
    "                            {'params': classifier.parameters(), 'lr': 5e-5}\n",
    "                         ]\n",
    ")\n",
    "losses = np.zeros([10]) \n",
    "accs = np.zeros([10])\n",
    "optim.zero_grad()\n",
    "writer = SummaryWriter(log_dir)\n",
    "\n",
    "batches_tr, batchsize, _ = tr_reader.label.shape\n",
    "\n",
    "step = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with torch.no_grad():\n",
    "    bs_cnt, bs, l_cnt = val_reader.label.shape\n",
    "    preds = []\n",
    "    losses = np.zeros(bs_cnt)\n",
    "    it = 0\n",
    "    for xsj, ysj, lsj in val_reader.iter():\n",
    "#         sent_tensors, sent_mask = sent_list2bert_tensors(xsj, cuda)\n",
    "#         wd_embs, _ = xlnet(sent_tensors, attention_mask = sent_mask)\n",
    "#         cls_feature = wd_embs[:, -1, :]\n",
    "        cls_feature = torch.cat([xlnet(torch.tensor([sent]).cuda())[0][0][-1:] for sent in xsj])\n",
    "        pooled_feat = activation(dense_layer(cls_feature))\n",
    "        cls_scores = classifier(pooled_feat)\n",
    "        y_label = torch.tensor(ysj.argmax(axis=1)).cuda() if cuda else torch.tensor(ysj.argmax(axis=1))\n",
    "        cls_loss = cls_loss_fn(cls_scores, y_label)\n",
    "        losses[it] = cls_loss\n",
    "        preds.append(cls_scores)\n",
    "        torch.cuda.empty_cache()\n",
    "    val_preds = torch.cat(preds).cpu()\n",
    "    val_acc = accuracy_score(val_reader.label.reshape(bs_cnt*bs, l_cnt).argmax(axis=1), val_preds.argmax(axis=1))\n",
    "    val_loss = losses.mean()\n",
    "writer.add_scalar('valid Loss', val_loss, epoch)\n",
    "writer.add_scalar('valid Accuracy', val_acc, epoch)\n",
    "cls_save_as = './%s/THUModel_epoch%03d.pkl'% (log_dir, epoch)\n",
    "torch.save(\n",
    "                    {\n",
    "                        \"bert\":bert.state_dict(),\n",
    "                        \"dense_layer\":dense_layer.state_dict(),\n",
    "                        \"classifier\": classifier.state_dict(),\n",
    "                    },\n",
    "                    cls_save_as\n",
    "                )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.cuda.empty_cache()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
