{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# vocab object from harvardnlp/opennmt-py\n",
    "class Vocab(object):\n",
    "    def __init__(self, filename=None, data=None, lower=False):\n",
    "        self.idxToLabel = {}\n",
    "        self.labelToIdx = {}\n",
    "        self.lower = lower\n",
    "\n",
    "        # Special entries will not be pruned.\n",
    "        self.special = []\n",
    "\n",
    "        if data is not None:\n",
    "            self.addSpecials(data)\n",
    "        if filename is not None:\n",
    "            self.loadFile(filename)\n",
    "\n",
    "    def size(self):\n",
    "        return len(self.idxToLabel)\n",
    "\n",
    "    # Load entries from a file.\n",
    "    def loadFile(self, filename):\n",
    "        idx = 0\n",
    "        for line in open(filename, 'r', encoding='utf8', errors='ignore'):\n",
    "            token = line.rstrip('\\n')\n",
    "            self.add(token)\n",
    "            idx += 1\n",
    "\n",
    "    def getIndex(self, key, default=None):\n",
    "        key = key.lower() if self.lower else key\n",
    "        try:\n",
    "            return self.labelToIdx[key]\n",
    "        except KeyError:\n",
    "            return default\n",
    "\n",
    "    def getLabel(self, idx, default=None):\n",
    "        try:\n",
    "            return self.idxToLabel[idx]\n",
    "        except KeyError:\n",
    "            return default\n",
    "\n",
    "    # Mark this `label` and `idx` as special\n",
    "    def addSpecial(self, label, idx=None):\n",
    "        idx = self.add(label)\n",
    "        self.special += [idx]\n",
    "\n",
    "    # Mark all labels in `labels` as specials\n",
    "    def addSpecials(self, labels):\n",
    "        for label in labels:\n",
    "            self.addSpecial(label)\n",
    "\n",
    "    # Add `label` in the dictionary. Use `idx` as its index if given.\n",
    "    def add(self, label):\n",
    "        label = label.lower() if self.lower else label\n",
    "        if label in self.labelToIdx:\n",
    "            idx = self.labelToIdx[label]\n",
    "        else:\n",
    "            idx = len(self.idxToLabel)\n",
    "            self.idxToLabel[idx] = label\n",
    "            self.labelToIdx[label] = idx\n",
    "        return idx\n",
    "\n",
    "    # Convert `labels` to indices. Use `unkWord` if not found.\n",
    "    # Optionally insert `bosWord` at the beginning and `eosWord` at the .\n",
    "    def convertToIdx(self, labels, unkWord, bosWord=None, eosWord=None):\n",
    "        vec = []\n",
    "\n",
    "        if bosWord is not None:\n",
    "            vec += [self.getIndex(bosWord)]\n",
    "\n",
    "        unk = self.getIndex(unkWord)\n",
    "        vec += [self.getIndex(label, default=unk) for label in labels]\n",
    "\n",
    "        if eosWord is not None:\n",
    "            vec += [self.getIndex(eosWord)]\n",
    "\n",
    "        return vec\n",
    "\n",
    "    # Convert `idx` to labels. If index `stop` is reached, convert it and return.\n",
    "    def convertToLabels(self, idx, stop):\n",
    "        labels = []\n",
    "\n",
    "        for i in idx:\n",
    "            labels += [self.getLabel(i)]\n",
    "            if i == stop:\n",
    "                break\n",
    "\n",
    "        return labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "Vocab('./')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import os\n",
    "import math\n",
    "\n",
    "import torch\n",
    "\n",
    "# loading GLOVE word vectors\n",
    "# if .pth file is found, will load that\n",
    "# else will load from .txt file & save\n",
    "def load_word_vectors(path):\n",
    "    if os.path.isfile(path + '.pth') and os.path.isfile(path + '.vocab'):\n",
    "        print('==> File found, loading to memory')\n",
    "        vectors = torch.load(path + '.pth')\n",
    "        vocab = Vocab(filename=path + '.vocab')\n",
    "        return vocab, vectors\n",
    "    # saved file not found, read from txt file\n",
    "    # and create tensors for word vectors\n",
    "    print('==> File not found, preparing, be patient')\n",
    "    count = sum(1 for line in open(path + '.txt', 'r', encoding='utf8', errors='ignore'))\n",
    "    with open(path + '.txt', 'r') as f:\n",
    "        contents = f.readline().rstrip('\\n').split(' ')\n",
    "        dim = len(contents[1:])\n",
    "    words = [None] * (count)\n",
    "    vectors = torch.zeros(count, dim, dtype=torch.float, device='cpu')\n",
    "    with open(path + '.txt', 'r', encoding='utf8', errors='ignore') as f:\n",
    "        idx = 0\n",
    "        for line in f:\n",
    "            contents = line.rstrip('\\n').split(' ')\n",
    "            words[idx] = contents[0]\n",
    "            values = list(map(float, contents[1:]))\n",
    "            vectors[idx] = torch.tensor(values, dtype=torch.float, device='cpu')\n",
    "            idx += 1\n",
    "    with open(path + '.vocab', 'w', encoding='utf8', errors='ignore') as f:\n",
    "        for word in words:\n",
    "            f.write(word + '\\n')\n",
    "    vocab = Vocab(filename=path + '.vocab')\n",
    "    torch.save(vectors, path + '.pth')\n",
    "    return vocab, vectors\n",
    "\n",
    "\n",
    "# write unique words from a set of files to a new file\n",
    "def build_vocab(filenames, vocabfile):\n",
    "    vocab = set()\n",
    "    for filename in filenames:\n",
    "        with open(filename, 'r') as f:\n",
    "            for line in f:\n",
    "                tokens = line.rstrip('\\n').split(' ')\n",
    "                vocab |= set(tokens)\n",
    "    with open(vocabfile, 'w') as f:\n",
    "        for token in sorted(vocab):\n",
    "            f.write(token + '\\n')\n",
    "\n",
    "\n",
    "# mapping from scalar to vector\n",
    "def map_label_to_target(label, num_classes):\n",
    "    target = torch.zeros(1, num_classes, dtype=torch.float, device='cpu')\n",
    "    ceil = int(math.ceil(label))\n",
    "    floor = int(math.floor(label))\n",
    "    if ceil == floor:\n",
    "        target[0, floor-1] = 1\n",
    "    else:\n",
    "        target[0, floor-1] = ceil - label\n",
    "        target[0, ceil-1] = label - floor\n",
    "    return target"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "PAD = 0\n",
    "UNK = 1\n",
    "BOS = 2\n",
    "EOS = 3\n",
    "\n",
    "PAD_WORD = '<blank>'\n",
    "UNK_WORD = '<unk>'\n",
    "BOS_WORD = '<s>'\n",
    "EOS_WORD = '</s>'\n",
    "\n",
    "\n",
    "# module for childsumtreelstm\n",
    "class ChildSumTreeLSTM(nn.Module):\n",
    "    def __init__(self, in_dim, mem_dim):\n",
    "        super(ChildSumTreeLSTM, self).__init__()\n",
    "        self.in_dim = in_dim\n",
    "        self.mem_dim = mem_dim\n",
    "        self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)\n",
    "        self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)\n",
    "        self.fx = nn.Linear(self.in_dim, self.mem_dim)\n",
    "        self.fh = nn.Linear(self.mem_dim, self.mem_dim)\n",
    "\n",
    "    def node_forward(self, inputs, child_c, child_h):\n",
    "        child_h_sum = torch.sum(child_h, dim=0, keepdim=True)\n",
    "\n",
    "        iou = self.ioux(inputs) + self.iouh(child_h_sum)\n",
    "        i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)\n",
    "        i, o, u = F.sigmoid(i), F.sigmoid(o), F.tanh(u)\n",
    "\n",
    "        f = F.sigmoid(\n",
    "            self.fh(child_h) +\n",
    "            self.fx(inputs).repeat(len(child_h), 1)\n",
    "        )\n",
    "        fc = torch.mul(f, child_c)\n",
    "\n",
    "        c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)\n",
    "        h = torch.mul(o, F.tanh(c))\n",
    "        return c, h\n",
    "\n",
    "    def forward(self, tree, inputs):\n",
    "        for idx in range(tree.num_children):\n",
    "            self.forward(tree.children[idx], inputs)\n",
    "\n",
    "        if tree.num_children == 0:\n",
    "            child_c = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()\n",
    "            child_h = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()\n",
    "        else:\n",
    "            child_c, child_h = zip(* map(lambda x: x.state, tree.children))\n",
    "            child_c, child_h = torch.cat(child_c, dim=0), torch.cat(child_h, dim=0)\n",
    "\n",
    "        tree.state = self.node_forward(inputs[tree.idx], child_c, child_h)\n",
    "        return tree.state"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm\n",
    "\n",
    "import torch\n",
    "\n",
    "\n",
    "class Trainer(object):\n",
    "    def __init__(self, args, model, criterion, optimizer, device):\n",
    "        super(Trainer, self).__init__()\n",
    "        self.args = args\n",
    "        self.model = model\n",
    "        self.criterion = criterion\n",
    "        self.optimizer = optimizer\n",
    "        self.device = device\n",
    "        self.epoch = 0\n",
    "\n",
    "    # helper function for training\n",
    "    def train(self, dataset):\n",
    "        self.model.train()\n",
    "        self.optimizer.zero_grad()\n",
    "        total_loss = 0.0\n",
    "        indices = torch.randperm(len(dataset), dtype=torch.long, device='cpu')\n",
    "        for idx in tqdm(range(len(dataset)), desc='Training epoch ' + str(self.epoch + 1) + ''):\n",
    "            ltree, linput, rtree, rinput, label = dataset[indices[idx]]\n",
    "            target = utils.map_label_to_target(label, dataset.num_classes)\n",
    "            linput, rinput = linput.to(self.device), rinput.to(self.device)\n",
    "            target = target.to(self.device)\n",
    "            output = self.model(ltree, linput, rtree, rinput)\n",
    "            loss = self.criterion(output, target)\n",
    "            total_loss += loss.item()\n",
    "            loss.backward()\n",
    "            if idx % self.args.batchsize == 0 and idx > 0:\n",
    "                self.optimizer.step()\n",
    "                self.optimizer.zero_grad()\n",
    "        self.epoch += 1\n",
    "        return total_loss / len(dataset)\n",
    "\n",
    "    # helper function for testing\n",
    "    def test(self, dataset):\n",
    "        self.model.eval()\n",
    "        with torch.no_grad():\n",
    "            total_loss = 0.0\n",
    "            predictions = torch.zeros(len(dataset), dtype=torch.float, device='cpu')\n",
    "            indices = torch.arange(1, dataset.num_classes + 1, dtype=torch.float, device='cpu')\n",
    "            for idx in tqdm(range(len(dataset)), desc='Testing epoch  ' + str(self.epoch) + ''):\n",
    "                ltree, linput, rtree, rinput, label = dataset[idx]\n",
    "                target = utils.map_label_to_target(label, dataset.num_classes)\n",
    "                linput, rinput = linput.to(self.device), rinput.to(self.device)\n",
    "                target = target.to(self.device)\n",
    "                output = self.model(ltree, linput, rtree, rinput)\n",
    "                loss = self.criterion(output, target)\n",
    "                total_loss += loss.item()\n",
    "                output = output.squeeze().to('cpu')\n",
    "                predictions[idx] = torch.dot(indices, torch.exp(output))\n",
    "        return total_loss / len(dataset), predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "fakenews",
   "language": "python",
   "name": "fakenews"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
