{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n",
    "import torch.optim as optim\n",
    "import torch.nn.functional as F"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Direct ChildSumTreeLSTM Code"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class OutputModule(nn.Module):\n",
    "    def __init__(self, cuda, mem_dim, num_classes, device, dropout = False):\n",
    "        super(OutputModule, self).__init__()\n",
    "        self.cudaFlag = cuda\n",
    "        self.mem_dim = mem_dim\n",
    "        self.num_classes = num_classes\n",
    "        self.dropout = dropout\n",
    "        self.device = device\n",
    "\n",
    "        self.l1 = nn.Linear(self.mem_dim, self.num_classes)\n",
    "        self.logsoftmax = nn.LogSoftmax()\n",
    "\n",
    "    def forward(self, vec, training = False):\n",
    "        if self.dropout:\n",
    "            out = self.logsoftmax(self.l1(F.dropout(vec, training = training)))\n",
    "        else:\n",
    "            out = self.logsoftmax(self.l1(vec))\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ChildSumTreeLSTM(nn.Module):\n",
    "    def __init__(self,cuda, in_dim, mem_dim,userVects,labels,labelMap,criterion,device):\n",
    "        super(ChildSumTreeLSTM, self).__init__()\n",
    "        self.in_dim = in_dim\n",
    "        self.mem_dim = mem_dim\n",
    "        self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)\n",
    "        self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)\n",
    "        self.fx = nn.Linear(self.in_dim, self.mem_dim)\n",
    "        self.fh = nn.Linear(self.mem_dim, self.mem_dim)\n",
    "        self.fc = nn.Linear(self.mem_dim,4)\n",
    "\n",
    "    def node_forward(self, inputs, child_c, child_h):\n",
    "        child_h_sum = torch.sum(child_h, dim=0, keepdim=True)\n",
    "\n",
    "        iou = self.ioux(inputs) + self.iouh(child_h_sum)\n",
    "        i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)\n",
    "        i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)\n",
    "\n",
    "        f = torch.sigmoid(\n",
    "            self.fh(child_h) +\n",
    "            self.fx(inputs).repeat(len(child_h), 1)\n",
    "        )\n",
    "        fc = torch.mul(f, child_c)\n",
    "\n",
    "        c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)\n",
    "        h = torch.mul(o, torch.tanh(c))\n",
    "        return c, h\n",
    "\n",
    "    def forward(self, tree, inputs):\n",
    "        for idx in range(tree.num_children):\n",
    "            self.forward(tree.childrenList[idx], inputs)\n",
    "\n",
    "        if tree.num_children == 0:\n",
    "            child_c = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()\n",
    "            child_h = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()\n",
    "        else:\n",
    "            child_c, child_h = zip(* map(lambda x: tree.children[x].state, tree.children))\n",
    "            child_c, child_h = torch.cat(child_c, dim=0), torch.cat(child_h, dim=0)\n",
    "        tree.state = self.node_forward(inputs[tree.uid], child_c, child_h)\n",
    "        \n",
    "        out = self.fc(tree.state[1])\n",
    "        return tree.state, out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class treeEncoder(nn.Module):\n",
    "    def __init__(self, cuda,in_dim, mem_dim,userVects,labels,labelMap,criterion,device):\n",
    "        super(treeEncoder, self).__init__()\n",
    "        self.cudaFlag = cuda\n",
    "        self.in_dim = in_dim\n",
    "        self.mem_dim = mem_dim\n",
    "        self.device = device\n",
    "        self.labels = labels\n",
    "        self.labelMap = labelMap\n",
    "        self.criterion = criterion\n",
    "\n",
    "        self.ix = nn.Linear(self.in_dim,self.mem_dim)\n",
    "        self.ih = nn.Linear(self.mem_dim,self.mem_dim)\n",
    "\n",
    "        self.fx = nn.Linear(self.in_dim,self.mem_dim)\n",
    "        self.fh = nn.Linear(self.mem_dim, self.mem_dim)\n",
    "\n",
    "        self.ux = nn.Linear(self.in_dim,self.mem_dim)\n",
    "        self.uh = nn.Linear(self.mem_dim,self.mem_dim)\n",
    "\n",
    "        self.ox = nn.Linear(self.in_dim,self.mem_dim)\n",
    "        self.oh = nn.Linear(self.mem_dim,self.mem_dim)\n",
    "        \n",
    "        self.userVects = userVects\n",
    "        self.outputModule = OutputModule(self.cudaFlag,mem_dim,4,self.device,dropout=False)\n",
    "    \n",
    "    def predict(self,node):\n",
    "        loss = Variable(torch.zeros(1))\n",
    "        \n",
    "        for i in range(node.num_children):\n",
    "            _, _ = self.forward(node.childrenList[i])\n",
    "        child_c, child_h = self.getChildStates(node)\n",
    "        node.state = self.nodeForward(self.userVects[node.uid].to(self.device),child_c,child_h)\n",
    "        \n",
    "        output = self.outputModule.forward(node.state[1], False)\n",
    "        \n",
    "        return output\n",
    "    \n",
    "    def forward(self,node):\n",
    "        loss = Variable(torch.zeros(1))\n",
    "        \n",
    "        if self.cudaFlag:\n",
    "            loss = loss.to(self.device)\n",
    "        \n",
    "        for i in range(node.num_children):\n",
    "            _, child_loss = self.forward(node.childrenList[i])\n",
    "            loss = loss + child_loss\n",
    "        child_c, child_h = self.getChildStates(node)\n",
    "        node.state = self.nodeForward(self.userVects[node.uid].to(self.device),child_c,child_h)\n",
    "        \n",
    "        output = self.outputModule.forward(node.state[1], True)\n",
    "        node.output = output\n",
    "\n",
    "        label = Variable(torch.tensor(self.labelMap[node.label]))\n",
    "            \n",
    "        loss = loss + self.criterion(output.reshape(-1,4), label.reshape(-1))\n",
    "        \n",
    "#         print(loss)\n",
    "        return node.state,loss\n",
    "        \n",
    "    def nodeForward(self, inputs, child_c, child_h):\n",
    "        child_h_sum = torch.sum(child_h, dim=0, keepdim=True)\n",
    "\n",
    "        i = torch.sigmoid(self.ix(inputs) + self.ih(child_h_sum))\n",
    "        o = torch.sigmoid(self.ox(inputs)+self.oh(child_h_sum))\n",
    "        u = torch.tanh(self.ux(inputs)+self.uh(child_h_sum))\n",
    "        \n",
    "        f = torch.sigmoid(\n",
    "            self.fh(child_h) +\n",
    "            self.fx(inputs).repeat(len(child_h), 1)\n",
    "        )\n",
    "        fc = torch.mul(f, child_c)\n",
    "\n",
    "        c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)\n",
    "        h = torch.mul(o, torch.tanh(c))\n",
    "        return c, h\n",
    "    \n",
    "    def getChildStates(self,node):\n",
    "        if node.num_children==0:\n",
    "            child_c = Variable(torch.zeros(1,self.mem_dim))\n",
    "            child_h = Variable(torch.zeros(1,self.mem_dim))\n",
    "            if self.cudaFlag:\n",
    "                child_c, child_h = child_c.to(self.device), child_h.to(self.device)\n",
    "        \n",
    "        else:\n",
    "            child_c = Variable(torch.Tensor(node.num_children,self.mem_dim))\n",
    "            child_h = Variable(torch.Tensor(node.num_children,self.mem_dim))\n",
    "            if self.cudaFlag:\n",
    "                child_c, child_h = child_c.to(self.device), child_h.to(self.device)\n",
    "            \n",
    "            for idx in range(node.num_children):\n",
    "                child_c[idx] = node.childrenList[idx].state[0]\n",
    "                child_h[idx] = node.childrenList[idx].state[1]\n",
    "        return child_c, child_h"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Temporal Tree LSTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "a = [torch.tensor([1,2]),torch.tensor([3,4]),torch.tensor([2,5])]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [],
   "source": [
    "class lstmTreeEncoder(nn.Module):\n",
    "    def __init__(self, cuda,in_dim, mem_dim,userVects,labels,labelMap,criterion,device):\n",
    "        super(lstmTreeEncoder, self).__init__()\n",
    "        self.device = device\n",
    "#         self.hidden_size = int(mem_dim/2)\n",
    "        self.hidden_size = mem_dim\n",
    "        \n",
    "        self.treeEnc = treeEncoder(cuda,in_dim,mem_dim,userVects,labels,labelMap,criterion,device)\n",
    "        self.topLevelLSTM = nn.LSTM(mem_dim,self.hidden_size,batch_first=False)\n",
    "        self.fc = nn.Linear(self.hidden_size, 4)\n",
    "    \n",
    "    def forward(self,listOfIncTrees):\n",
    "        inp = []\n",
    "        \n",
    "        h0 = torch.zeros(1, 1, self.hidden_size).requires_grad_().to(self.device)\n",
    "        c0 = torch.zeros(1, 1, self.hidden_size).requires_grad_().to(self.device)\n",
    "        \n",
    "        for tree in listOfIncTrees[:20]:\n",
    "            inp.append(self.treeEnc(tree.root)[0][1])\n",
    "#         inp.append(self.treeEnc(listOfIncTrees[-1].root)[0][1])\n",
    "    \n",
    "        inp = torch.stack(inp)\n",
    "#         print(inp.shape)\n",
    "#         inp = inp.reshape(inp.shape[0],1,inp.shape[1])\n",
    "        out, (hn,cn) = self.topLevelLSTM(inp, (h0, c0))\n",
    "\n",
    "        out = out[-1]\n",
    "        out = self.fc(out)\n",
    "        return out\n",
    "#         return list((0,out)),0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Temporal Decay Tree Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "class decayTreeEncoder(nn.Module):\n",
    "    def __init__(self, cuda,in_dim, mem_dim,userVects,labels,labelMap,criterion,device):\n",
    "        super(decayTreeEncoder, self).__init__()\n",
    "        self.cudaFlag = cuda\n",
    "        self.in_dim = in_dim\n",
    "        self.mem_dim = mem_dim\n",
    "        self.device = device\n",
    "        self.labels = labels\n",
    "        self.labelMap = labelMap\n",
    "        self.criterion = criterion\n",
    "\n",
    "        self.ix = nn.Linear(self.in_dim,self.mem_dim)\n",
    "        self.ih = nn.Linear(self.mem_dim,self.mem_dim)\n",
    "\n",
    "        self.fx = nn.Linear(self.in_dim,self.mem_dim)\n",
    "        self.fh = nn.Linear(self.mem_dim, self.mem_dim)\n",
    "\n",
    "        self.ux = nn.Linear(self.in_dim,self.mem_dim)\n",
    "        self.uh = nn.Linear(self.mem_dim,self.mem_dim)\n",
    "\n",
    "        self.ox = nn.Linear(self.in_dim,self.mem_dim)\n",
    "        self.oh = nn.Linear(self.mem_dim,self.mem_dim)\n",
    "        \n",
    "        self.userVects = userVects\n",
    "        self.outputModule = OutputModule(self.cudaFlag,mem_dim,4,self.device,dropout=False)\n",
    "    \n",
    "    def predict(self,node):\n",
    "        self.startTime = node.time_stamp\n",
    "        loss = Variable(torch.zeros(1))\n",
    "        if self.cudaFlag:\n",
    "            loss = loss.to(self.device)\n",
    "        \n",
    "        for i in range(node.num_children):\n",
    "            _, child_loss = self.forward(node.childrenList[i])\n",
    "            loss = loss + child_loss\n",
    "        child_c, child_h = self.getChildStates(node)\n",
    "        timediff = self.startTime - node.time_stamp\n",
    "        node.state = self.nodeForward(self.userVects[node.uid],child_c,child_h,timediff)\n",
    "        \n",
    "        output = self.outputModule.forward(node.state[1], False)\n",
    "        \n",
    "        return output\n",
    "    \n",
    "#     def forward(self,node):\n",
    "        \n",
    "#         loss = Variable(torch.zeros(1))\n",
    "        \n",
    "#         for i in range(node.num_children):\n",
    "#             _, child_loss = self.forward(node.childrenList[i])\n",
    "#             loss = loss + child_loss\n",
    "#         child_c, child_h = self.getChildStates(node)\n",
    "#         timediff = self.startTime - node.time_stamp\n",
    "#         node.state = self.nodeForward(self.userVects[node.uid].to(self.device),child_c,child_h,timediff)\n",
    "        \n",
    "#         output = self.outputModule.forward(node.state[1], True)\n",
    "#         node.output = output\n",
    "\n",
    "#         label = Variable(torch.tensor(self.labelMap[node.label]))\n",
    "            \n",
    "#         loss = loss + self.criterion(output.reshape(-1,4), label.reshape(-1).to(self.device))\n",
    "#         lossval = loss.to('cpu')\n",
    "#         del loss\n",
    "#         torch.cuda.empty_cache()\n",
    "        \n",
    "#         return node.state, lossval\n",
    "    \n",
    "    def forward(self,node):\n",
    "        self.startTime = node.time_stamp\n",
    "        loss = Variable(torch.zeros(1))\n",
    "        \n",
    "        if self.cudaFlag:\n",
    "            loss = loss.to(self.device)\n",
    "        \n",
    "        for i in range(node.num_children):\n",
    "            _, child_loss = self.forward(node.childrenList[i])\n",
    "            loss = loss + child_loss\n",
    "        child_c, child_h = self.getChildStates(node)\n",
    "        timediff = self.startTime - node.time_stamp\n",
    "        node.state = self.nodeForward(self.userVects[node.uid].to(self.device),child_c,child_h,timediff)\n",
    "        \n",
    "        output = self.outputModule.forward(node.state[1], True)\n",
    "        node.output = output\n",
    "\n",
    "        label = Variable(torch.tensor(self.labelMap[node.label]))\n",
    "            \n",
    "        loss = loss + self.criterion(output.reshape(-1,4), label.reshape(-1))\n",
    "        \n",
    "#         print(loss)\n",
    "        return node.state,loss\n",
    "    \n",
    "    def nodeForward(self, inputs, child_c, child_h,timediff):\n",
    "        child_h_sum = torch.sum(child_h, dim=0, keepdim=True)\n",
    "\n",
    "        i = torch.sigmoid(self.ix(inputs) + self.ih(child_h_sum))\n",
    "        o = torch.sigmoid(self.ox(inputs)+self.oh(child_h_sum))\n",
    "        u = torch.tanh(self.ux(inputs)+self.uh(child_h_sum))\n",
    "        \n",
    "        try:\n",
    "            decayF = math.exp(timediff)\n",
    "        except OverflowError:\n",
    "            decayF = float('inf')\n",
    "        \n",
    "        f = torch.sigmoid(\n",
    "            (self.fh(child_h) +\n",
    "            self.fx(inputs).repeat(len(child_h), 1))*(math.exp(-1*decayF/60))\n",
    "        )\n",
    "        fc = torch.mul(f, child_c)\n",
    "\n",
    "        c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)\n",
    "        h = torch.mul(o, torch.tanh(c))\n",
    "        return c, h\n",
    "    \n",
    "    def getChildStates(self,node):\n",
    "        if node.num_children==0:\n",
    "            child_c = Variable(torch.zeros(1,self.mem_dim))\n",
    "            child_h = Variable(torch.zeros(1,self.mem_dim))\n",
    "            if self.cudaFlag:\n",
    "                child_c, child_h = child_c.to(self.device), child_h.to(self.device)\n",
    "        \n",
    "        else:\n",
    "            child_c = Variable(torch.Tensor(node.num_children,self.mem_dim))\n",
    "            child_h = Variable(torch.Tensor(node.num_children,self.mem_dim))\n",
    "            if self.cudaFlag:\n",
    "                child_c, child_h = child_c.to(self.device), child_h.to(self.device)\n",
    "            \n",
    "            for idx in range(node.num_children):\n",
    "                child_c[idx] = node.childrenList[idx].state[0]\n",
    "                child_h[idx] = node.childrenList[idx].state[1]\n",
    "        return child_c, child_h"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class temporalDecayTreeEncoder(nn.Module):\n",
    "    def __init__(self, cuda,in_dim, mem_dim,userVects,labels,labelMap,criterion,device):\n",
    "        super(temporalDecayTreeEncoder, self).__init__()\n",
    "        self.device = device\n",
    "        self.hidden_size = mem_dim\n",
    "        \n",
    "        self.treeEnc = decayTreeEncoder(cuda,in_dim,mem_dim,userVects,labels,labelMap,criterion,device)\n",
    "        self.topLevelLSTM = nn.LSTM(mem_dim,self.hidden_size,batch_first=False)\n",
    "        self.fc = nn.Linear(self.hidden_size, 4)\n",
    "    \n",
    "    def forward(self,listOfIncTrees):\n",
    "        inp = []\n",
    "        \n",
    "        h0 = torch.zeros(1, 1, self.hidden_size).requires_grad_().to(self.device)\n",
    "        c0 = torch.zeros(1, 1, self.hidden_size).requires_grad_().to(self.device)\n",
    "        \n",
    "        for tree in listOfIncTrees[:20]:\n",
    "            inp.append(self.treeEnc(tree.root)[0][1])\n",
    "#         inp.append(self.treeEnc(listOfIncTrees[-1].root)[0][1])\n",
    "    \n",
    "        inp = torch.stack(inp)\n",
    "#         print(inp.shape)\n",
    "#         inp = inp.reshape(inp.shape[0],1,inp.shape[1])\n",
    "        out, (hn,cn) = self.topLevelLSTM(inp, (h0, c0))\n",
    "        \n",
    "#         out = out.reshape(-1)\n",
    "        out = out[-1]\n",
    "        out = self.fc(out)\n",
    "        return out\n",
    "#         return list((0,out)),0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Tree Encoder + Text Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'nn' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-18-c6d4d07187be>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mclass\u001b[0m \u001b[0mtreeText\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mModule\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      2\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcuda\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0min_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmem_dim\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0muserVects\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mlabels\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mlabelMap\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mcriterion\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      3\u001b[0m         \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtreeText\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m         \u001b[0mcheckpoint\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'../baselines/gruTextEnc.pth'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtreeEnc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtreeEncoder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0min_dim\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmem_dim\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0muserVects\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mlabels\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mlabelMap\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mcriterion\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mNameError\u001b[0m: name 'nn' is not defined"
     ]
    }
   ],
   "source": [
    "class treeText(nn.Module):\n",
    "    def __init__(self, cuda,in_dim, mem_dim,userVects,labels,labelMap,criterion,device,vocabSize,textEncState=None):\n",
    "        super(treeText,self).__init__()\n",
    "        \n",
    "        self.treeEnc = treeEncoder(cuda,in_dim,mem_dim,userVects,labels,labelMap,criterion,device)\n",
    "        self.textEnc = RecArch(vocabSize, 256, 50, 4, 1, 'gru',device)\n",
    "        \n",
    "        if textEncState is None:\n",
    "#             checkpoint = torch.load('../baselines/gruTextEnc_twit16.pth')\n",
    "#             self.textEnc.load_state_dict(checkpoint['state_dict'])\n",
    "            pass\n",
    "        else:\n",
    "            self.textEnc.load_state_dict(textEncState)\n",
    "        \n",
    "        self.fc = nn.Linear(mem_dim+50,4)\n",
    "        \n",
    "    def forward(self,tree,text):\n",
    "        treeVec = self.treeEnc(tree)\n",
    "        treeVec = treeVec[0][1].reshape(-1)\n",
    "        \n",
    "        textVec = self.textEnc(text)\n",
    "        textVec = textVec.reshape(-1)\n",
    "#         print(treeVec.shape)\n",
    "#         print(textVec.shape)\n",
    "        combVec =  torch.cat((treeVec,textVec))\n",
    "#         combVec = textVec\n",
    "        out = self.fc(combVec)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class decayTreeText(nn.Module):\n",
    "    def __init__(self, cuda,in_dim, mem_dim,userVects,labels,labelMap,criterion,device):\n",
    "        super(decayTreeText,self).__init__()\n",
    "        checkpoint = torch.load('../baselines/gruTextEnc_twit16.pth')\n",
    "        self.treeEnc = decayTreeEncoder(cuda,in_dim,mem_dim,userVects,labels,labelMap,criterion,device)\n",
    "\n",
    "        self.textEnc = RecArch(3370, 256, 50, 4, 1, 'gru',device)\n",
    "        self.textEnc.load_state_dict(checkpoint['state_dict'])\n",
    "        \n",
    "        self.fc = nn.Linear(mem_dim+50,4)\n",
    "        \n",
    "    def forward(self,tree,text):\n",
    "        treeVec = self.treeEnc(tree)\n",
    "        treeVec = treeVec[0][1].reshape(-1)\n",
    "        \n",
    "        textVec = self.textEnc(text)\n",
    "        textVec = textVec.reshape(-1)\n",
    "#         print(treeVec.shape)\n",
    "#         print(textVec.shape)\n",
    "        combVec =  torch.cat((treeVec,textVec))\n",
    "#         combVec = textVec\n",
    "        out = self.fc(combVec)\n",
    "        return out"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "fakenews",
   "language": "python",
   "name": "fakenews"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
