{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Overwriting MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile MAS_based_Training.py\n",
    "from __future__ import print_function, division\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.autograd import Variable\n",
    "import numpy as np\n",
    "import torchvision\n",
    "from torchvision import datasets, models, transforms\n",
    "import matplotlib.pyplot as plt\n",
    "import torch.optim as optim\n",
    "import time\n",
    "import copy\n",
    "import os\n",
    "import pdb\n",
    "import math\n",
    "import shutil\n",
    "from torch.utils.data import DataLoader\n",
    "#end of imports\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "#\n",
    "class Weight_Regularized_SGD(optim.SGD):\n",
    "    r\"\"\"Implements SGD training with importance params regulization. IT inherents stochastic gradient descent (optionally with momentum).\n",
    "    Nesterov momentum is based on the formula from\n",
    "    \n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, params, lr=0.001, momentum=0, dampening=0,\n",
    "                 weight_decay=0, nesterov=False):\n",
    "        \n",
    "        super(Weight_Regularized_SGD, self).__init__(params, lr,momentum,dampening,weight_decay,nesterov)\n",
    "\n",
    "    def __setstate__(self, state):\n",
    "        super(Weight_Regularized_SGD, self).__setstate__(state)\n",
    "       \n",
    "        \n",
    "    def step(self, reg_params,closure=None):\n",
    "        \"\"\"Performs a single optimization step.\n",
    "        Arguments:\n",
    "            reg_params: omega of all the params\n",
    "            closure (callable, optional): A closure that reevaluates the model\n",
    "                and returns the loss.\n",
    "        \"\"\"\n",
    "       \n",
    "\n",
    "        loss = None\n",
    "        if closure is not None:\n",
    "            loss = closure()\n",
    "        \n",
    "        reg_lambda=reg_params.get('lambda')\n",
    "       \n",
    "        for group in self.param_groups:\n",
    "            weight_decay = group['weight_decay']\n",
    "            momentum = group['momentum']\n",
    "            dampening = group['dampening']\n",
    "            nesterov = group['nesterov']\n",
    "           \n",
    "            for p in group['params']:\n",
    "                if p.grad is None:\n",
    "                    continue\n",
    "                d_p = p.grad.data\n",
    "               \n",
    "                #MAS PART CODE GOES HERE\n",
    "                #if this param has an omega to use for regulization\n",
    "                if p in reg_params:\n",
    "                    \n",
    "                    reg_param=reg_params.get(p)\n",
    "                    #get omega for this parameter\n",
    "                    omega=reg_param.get('omega')\n",
    "                    #initial value when the training start\n",
    "                    init_val=reg_param.get('init_val')\n",
    "                    \n",
    "                    curr_wegiht_val=p.data\n",
    "                    #move the tensors to cuda\n",
    "                    init_val=init_val.cuda()\n",
    "                    omega=omega.cuda()\n",
    "                    \n",
    "                    #get the difference\n",
    "                    weight_dif=curr_wegiht_val.add(-1,init_val)\n",
    "                    #compute the MAS penalty\n",
    "                    regulizer=weight_dif.mul(2*reg_lambda*omega)\n",
    "                    del weight_dif\n",
    "                    del curr_wegiht_val\n",
    "                    del omega\n",
    "                    del init_val\n",
    "                    #add the MAS regulizer to the gradient\n",
    "                    d_p.add_(regulizer)\n",
    "                    del regulizer\n",
    "                #MAS PARAT CODE ENDS\n",
    "                if weight_decay != 0:\n",
    "                   \n",
    "                    d_p.add_(weight_decay,p.data.sign())\n",
    "                   \n",
    " \n",
    "                if momentum != 0:\n",
    "                    param_state = self.state[p]\n",
    "                    if 'momentum_buffer' not in param_state:\n",
    "                        buf = param_state['momentum_buffer'] = d_p.clone()\n",
    "                    else:\n",
    "                        buf = param_state['momentum_buffer']\n",
    "                        buf.mul_(momentum).add_(1 - dampening, d_p)\n",
    "                    if nesterov:\n",
    "                        d_p = d_p.add(momentum, buf)\n",
    "                    else:\n",
    "                        d_p = buf\n",
    "                \n",
    "               \n",
    "                \n",
    "                p.data.add_(-group['lr'], d_p)\n",
    "                \n",
    "        return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "#ELASTIC SGD\n",
    "#from  torch.optim import Optimizer, required\n",
    "#from  torch.optim import Optimizer, required\n",
    "class MAS_Omega_update(optim.SGD):\n",
    "    \"\"\"\n",
    "    Update the paramerter importance using the gradient of the function output norm. To be used at deployment time.\n",
    "    reg_params:parameters omega to be updated\n",
    "    batch_index,batch_size:used to keep a running average over the seen samples\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, params, lr=0.001, momentum=0, dampening=0,\n",
    "                 weight_decay=0, nesterov=False):\n",
    "        \n",
    "        super(MAS_Omega_update, self).__init__(params, lr,momentum,dampening,weight_decay,nesterov)\n",
    "        \n",
    "    def __setstate__(self, state):\n",
    "        super(MAS_Omega_update, self).__setstate__(state)\n",
    "       \n",
    "\n",
    "    def step(self, reg_params,batch_index,batch_size,closure=None):\n",
    "        \"\"\"\n",
    "        Performs a single parameters importance update setp\n",
    "        \"\"\"\n",
    "\n",
    "        #print('************************DOING A STEP************************')\n",
    " \n",
    "        loss = None\n",
    "        if closure is not None:\n",
    "            loss = closure()\n",
    "             \n",
    "        for group in self.param_groups:\n",
    "   \n",
    "            #if the parameter has an omega to be updated\n",
    "            for p in group['params']:\n",
    "          \n",
    "                #print('************************ONE PARAM************************')\n",
    "                \n",
    "                if p.grad is None:\n",
    "                    continue\n",
    "               \n",
    "                if p in reg_params:\n",
    "                    d_p = p.grad.data\n",
    "                  \n",
    "                    \n",
    "                    #HERE MAS IMPOERANCE UPDATE GOES\n",
    "                    #get the gradient\n",
    "                    unreg_dp = p.grad.data.clone()\n",
    "                    reg_param=reg_params.get(p)\n",
    "                    \n",
    "                    zero=torch.FloatTensor(p.data.size()).zero_()\n",
    "                    #get parameter omega\n",
    "                    omega=reg_param.get('omega')\n",
    "                    omega=omega.cuda()\n",
    "    \n",
    "                    \n",
    "                    #sum up the magnitude of the gradient\n",
    "                    prev_size=batch_index*batch_size\n",
    "                    curr_size=(batch_index+1)*batch_size\n",
    "                    omega=omega.mul(prev_size)\n",
    "                    \n",
    "                    omega=omega.add(unreg_dp.abs_())\n",
    "                    #update omega value\n",
    "                    omega=omega.div(curr_size)\n",
    "                    if omega.equal(zero.cuda()):\n",
    "                        print('omega after zero')\n",
    "\n",
    "                    reg_param['omega']=omega\n",
    "                   \n",
    "                    reg_params[p]=reg_param\n",
    "                    #HERE MAS IMPOERANCE UPDATE ENDS\n",
    "        return loss#HAS NOTHING TO DO\n",
    "\n",
    "  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "class MAS_Omega_Vector_Grad_update(optim.SGD):\n",
    "    \"\"\"\n",
    "    Update the paramerter importance using the gradient of the function output. To be used at deployment time.\n",
    "    reg_params:parameters omega to be updated\n",
    "    batch_index,batch_size:used to keep a running average over the seen samples\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, params, lr=0.001, momentum=0, dampening=0,\n",
    "                 weight_decay=0, nesterov=False):\n",
    "        \n",
    "        super(MAS_Omega_Vector_Grad_update, self).__init__(params, lr,momentum,dampening,weight_decay,nesterov)\n",
    "        \n",
    "    def __setstate__(self, state):\n",
    "        super(MAS_Omega_Vector_Grad_update, self).__setstate__(state)\n",
    "       \n",
    "\n",
    "    def step(self, reg_params,intermediate,batch_index,batch_size,closure=None):\n",
    "        \"\"\"\n",
    "        Performs a single parameters importance update setp\n",
    "        \"\"\"\n",
    "\n",
    "        #print('************************DOING A STEP************************')\n",
    "\n",
    "        loss = None\n",
    "        if closure is not None:\n",
    "            loss = closure()\n",
    "        index=0\n",
    "     \n",
    "        for group in self.param_groups:\n",
    "            weight_decay = group['weight_decay']\n",
    "            momentum = group['momentum']\n",
    "            dampening = group['dampening']\n",
    "            nesterov = group['nesterov']\n",
    "    \n",
    "            for p in group['params']:\n",
    "          \n",
    "                #print('************************ONE PARAM************************')\n",
    "                \n",
    "                if p.grad is None:\n",
    "                    continue\n",
    "                \n",
    "                if p in reg_params:\n",
    "                    d_p = p.grad.data\n",
    "                    unreg_dp = p.grad.data.clone()\n",
    "                    #HERE MAS CODE GOES\n",
    "                    reg_param=reg_params.get(p)\n",
    "                    \n",
    "                    zero=torch.FloatTensor(p.data.size()).zero_()\n",
    "                    omega=reg_param.get('omega')\n",
    "                    omega=omega.cuda()\n",
    "    \n",
    "                    \n",
    "                    #get the magnitude of the gradient\n",
    "                    if intermediate:\n",
    "                        if 'w' in reg_param.keys():\n",
    "                            w=reg_param.get('w')\n",
    "                        else:\n",
    "                            w=torch.FloatTensor(p.data.size()).zero_()\n",
    "                        w=w.cuda()\n",
    "                        w=w.add(unreg_dp.abs_())\n",
    "                        reg_param['w']=w\n",
    "                    else:\n",
    "                       \n",
    "                       #sum the magnitude of the gradients\n",
    "                        w=reg_param.get('w')\n",
    "                        prev_size=batch_index*batch_size\n",
    "                        curr_size=(batch_index+1)*batch_size\n",
    "                        omega=omega.mul(prev_size)\n",
    "                        omega=omega.add(w)\n",
    "                        omega=omega.div(curr_size)\n",
    "                        reg_param['w']=zero.cuda()\n",
    "                        \n",
    "                        if omega.equal(zero.cuda()):\n",
    "                            print('omega after zero')\n",
    "\n",
    "                    reg_param['omega']=omega\n",
    "                    #pdb.set_trace()\n",
    "                    reg_params[p]=reg_param\n",
    "                index+=1\n",
    "        return loss\n",
    "#importance_dictionary: contains all the information needed for computing the w and omega\n",
    "  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "\n",
    "  \n",
    "def train_model(model, criterion, optimizer, lr_scheduler,lr,dset_loaders,dset_sizes,use_gpu, num_epochs,exp_dir='./',resume=''):\n",
    "    \"\"\"Train a given model using MAS optimizer. The only unique thing is that it passes the importnace params to the optimizer\"\"\"\n",
    "    print('dictoinary length'+str(len(dset_loaders)))\n",
    "    #reg_params=model.reg_params\n",
    "    since = time.time()\n",
    "\n",
    "    best_model = model\n",
    "    best_acc = 0.0\n",
    "    if os.path.isfile(resume):\n",
    "        print(\"=> loading checkpoint '{}'\".format(resume))\n",
    "        checkpoint = torch.load(resume)\n",
    "        start_epoch = checkpoint['epoch']\n",
    "        model.load_state_dict(checkpoint['state_dict'])\n",
    "        print('load')\n",
    "        optimizer.load_state_dict(checkpoint['optimizer'])\n",
    "\n",
    "        \n",
    "        print(\"=> loaded checkpoint '{}' (epoch {})\"\n",
    "              .format(resume, checkpoint['epoch']))\n",
    "    else:\n",
    "            start_epoch=0\n",
    "            print(\"=> no checkpoint found at '{}'\".format(resume))\n",
    "    \n",
    "    print(str(start_epoch))\n",
    "    #pdb.set_trace()\n",
    "    for epoch in range(start_epoch, num_epochs):\n",
    "        print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n",
    "        print('-' * 10)\n",
    "\n",
    "        \n",
    "        # Each epoch has a training and validation phase\n",
    "        for phase in ['train', 'val']:\n",
    "            if phase == 'train':\n",
    "            \n",
    "                optimizer = lr_scheduler(optimizer, epoch,lr)\n",
    "                model.train(True)  # Set model to training mode\n",
    "            else:\n",
    "                model.train(False)  # Set model to evaluate mode\n",
    "\n",
    "            running_loss = 0.0\n",
    "            running_corrects = 0\n",
    "\n",
    "            # Iterate over data.\n",
    "            for data in dset_loaders[phase]:\n",
    "                # get the inputs\n",
    "                inputs, labels = data\n",
    "                #FOR MNIST DATASET\n",
    "                inputs=inputs.squeeze()\n",
    "                \n",
    "                # wrap them in Variable\n",
    "                if use_gpu:\n",
    "                    inputs, labels = Variable(inputs.cuda()), \\\n",
    "                        Variable(labels.cuda())\n",
    "                else:\n",
    "                    inputs, labels = Variable(inputs), Variable(labels)\n",
    "\n",
    "                # zero the parameter gradients\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                # forward\n",
    "                outputs = model(inputs)\n",
    "                _, preds = torch.max(outputs.data, 1)\n",
    "                loss = criterion(outputs, labels)\n",
    "\n",
    "                # backward + optimize only if in training phase\n",
    "                if phase == 'train':\n",
    "                    loss.backward()\n",
    "                    #pass omega to the optimizer to use for penalizing the weights changes\n",
    "                    optimizer.step(model.reg_params)\n",
    "\n",
    "                # statistics\n",
    "                running_loss += loss.data[0]\n",
    "                running_corrects += torch.sum(preds == labels.data)\n",
    "\n",
    "            epoch_loss = running_loss / dset_sizes[phase]\n",
    "            epoch_acc = running_corrects / dset_sizes[phase]\n",
    "\n",
    "            print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n",
    "                phase, epoch_loss, epoch_acc))\n",
    "\n",
    "            # deep copy the model\n",
    "            if phase == 'val' and epoch_acc > best_acc:\n",
    "                del outputs\n",
    "                del labels\n",
    "                del inputs\n",
    "                del loss\n",
    "                del preds\n",
    "                best_acc = epoch_acc\n",
    "                #best_model = copy.deepcopy(model)\n",
    "                torch.save(model,os.path.join(exp_dir, 'best_model.pth.tar'))\n",
    "                \n",
    "      \n",
    "        epoch_file_name=exp_dir+'/'+'epoch'+'.pth.tar'\n",
    "        save_checkpoint({\n",
    "            'epoch': epoch + 1,\n",
    "            'arch': 'alexnet',\n",
    "            'model': model,\n",
    "            'state_dict': model.state_dict(),\n",
    "            'optimizer' : optimizer.state_dict(),\n",
    "                },epoch_file_name)\n",
    "        print()\n",
    "\n",
    "    time_elapsed = time.time() - since\n",
    "    print('Training complete in {:.0f}m {:.0f}s'.format(\n",
    "        time_elapsed // 60, time_elapsed % 60))\n",
    "    print('Best val Acc: {:4f}'.format(best_acc))\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "#importance_dictionary: contains all the information needed for computing the w and omega\n",
    "\n",
    "\n",
    "def compute_importance_l2(model, optimizer, lr_scheduler,dset_loaders,use_gpu):\n",
    "    \"\"\"Mimic the depoloyment setup where the model is applied on some samples and those are used to update the importance params\n",
    "       Uses the L2norm of the function output. This is what we MAS uses as default\n",
    "    \"\"\"\n",
    "    print('dictoinary length'+str(len(dset_loaders)))\n",
    "    #reg_params=model.reg_params\n",
    "    since = time.time()\n",
    "\n",
    "    best_model = model\n",
    "    best_acc = 0.0\n",
    "    \n",
    "    \n",
    "    \n",
    "        \n",
    "    epoch=1\n",
    "    #it does nothing here\n",
    "    optimizer = lr_scheduler(optimizer, epoch,1)\n",
    "    model.eval()  # Set model to training mode so we get the gradient\n",
    "\n",
    "\n",
    "    running_loss = 0.0\n",
    "    running_corrects = 0\n",
    "   \n",
    "    # Iterate over data.\n",
    "    index=0\n",
    "    for dset_loader in dset_loaders:\n",
    "        for data in dset_loader:\n",
    "            # get the inputs\n",
    "            inputs, labels = data\n",
    "            if inputs.size(1)==1 and len(inputs.size())==3:\n",
    "                \n",
    "                #for mnist, there is no channel \n",
    "                #and  to avoid problems we remove that additional dimension generated by pytorch transformation\n",
    "                inputs=inputs.view(inputs.size(0),inputs.size(2))            \n",
    "            # wrap them in Variable\n",
    "            if use_gpu:\n",
    "                inputs, labels = Variable(inputs.cuda()), \\\n",
    "                Variable(labels.cuda())\n",
    "            else:\n",
    "                inputs, labels = Variable(inputs), Variable(labels)\n",
    "\n",
    "            # zero the parameter gradients\n",
    "            optimizer.zero_grad()\n",
    "            \n",
    "            # forward\n",
    "            outputs = model(inputs)\n",
    "            _, preds = torch.max(outputs.data, 1)\n",
    "\n",
    "\n",
    "        \n",
    "            #compute the L2 norm of output \n",
    "            Target_zeros=torch.zeros(outputs.size())\n",
    "            Target_zeros=Target_zeros.cuda()\n",
    "            Target_zeros=Variable(Target_zeros)\n",
    "            #note no avereging is happening here\n",
    "            loss = torch.nn.MSELoss(size_average=False)\n",
    "\n",
    "            targets = loss(outputs,Target_zeros) \n",
    "            #compute the gradients\n",
    "            targets.backward()\n",
    "\n",
    "            #update the parameters importance\n",
    "            optimizer.step(model.reg_params,index,labels.size(0))\n",
    "            print('batch number ',index)\n",
    "            #nessecary index to keep the running average\n",
    "            index+=1\n",
    "   \n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "\n",
    "\n",
    "def compute_importance(model, optimizer, lr_scheduler,dset_loaders,use_gpu):\n",
    "    \"\"\"Mimic the depoloyment setup where the model is applied on some samples and those are used to update the importance params\n",
    "       Uses the L1norm of the function output\n",
    "    \"\"\"\n",
    "    print('dictoinary length'+str(len(dset_loaders)))\n",
    "   \n",
    "    since = time.time()\n",
    "\n",
    "    best_model = model\n",
    "    best_acc = 0.0\n",
    "    \n",
    "    #pdb.set_trace()\n",
    "    \n",
    "\n",
    "        \n",
    "    epoch=1\n",
    "    #it does nothing here, can be removed\n",
    "    optimizer = lr_scheduler(optimizer, epoch,1)\n",
    "    model.eval()  # Set model to training mode so we get the gradient\n",
    "\n",
    "\n",
    "    running_loss = 0.0\n",
    "    running_corrects = 0\n",
    "   \n",
    "    # Iterate over data.\n",
    "    index=0\n",
    "    for dset_loader in dset_loaders:\n",
    "        #pdb.set_trace()\n",
    "        for data in dset_loader:\n",
    "            # get the inputs\n",
    "            inputs, labels = data\n",
    "\n",
    "            # wrap them in Variable\n",
    "            if use_gpu:\n",
    "                inputs, labels = Variable(inputs.cuda()), \\\n",
    "                Variable(labels.cuda())\n",
    "            else:\n",
    "                inputs, labels = Variable(inputs), Variable(labels)\n",
    "\n",
    "            # zero the parameters gradients\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            # forward\n",
    "            outputs = model(inputs)\n",
    "            _, preds = torch.max(outputs.data, 1)\n",
    "      \n",
    "\n",
    "           #compute the L1 norm of the function output\n",
    "        \n",
    "            Target_zeros=torch.zeros(outputs.size())\n",
    "            Target_zeros=Target_zeros.cuda()\n",
    "            Target_zeros=Variable(Target_zeros,requires_grad=False)\n",
    "       \n",
    "            loss = torch.nn.L1Loss(size_average=False)\n",
    "\n",
    "            targets = loss(outputs,Target_zeros) \n",
    "            #compute gradients\n",
    "            targets.backward()\n",
    "        \n",
    "            \n",
    "            print('batch number ',index)\n",
    "            #update parameters importance\n",
    "            optimizer.step(model.reg_params,index,labels.size(0))\n",
    "            #nessecary index to keep the running average\n",
    "            index+=1\n",
    "   \n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "\n",
    "\n",
    "def compute_importance_gradient_vector(model, optimizer, lr_scheduler,dset_loaders,use_gpu):\n",
    "    \"\"\"Mimic the depoloyment setup where the model is applied on some samples and those are used to update the importance params\n",
    "       Uses the gradient of the function output\n",
    "    \"\"\"\n",
    "    print('dictoinary length'+str(len(dset_loaders)))\n",
    "    #reg_params=model.reg_params\n",
    "    since = time.time()\n",
    "\n",
    "    best_model = model\n",
    "    best_acc = 0.0\n",
    "    \n",
    "    \n",
    "    \n",
    "        \n",
    "    epoch=1\n",
    "    optimizer = lr_scheduler(optimizer, epoch,1)\n",
    "    model.eval()  # Set model to training mode so we get the gradient\n",
    "\n",
    "\n",
    "    running_loss = 0.0\n",
    "    running_corrects = 0\n",
    "   \n",
    "    # Iterate over data.\n",
    "    index=0\n",
    "    for dset_loader in dset_loaders:\n",
    "        for data in dset_loader:\n",
    "            # get the inputs\n",
    "            inputs, labels = data\n",
    "\n",
    "            # wrap them in Variable\n",
    "            if use_gpu:\n",
    "                inputs, labels = Variable(inputs.cuda()), \\\n",
    "                Variable(labels.cuda())\n",
    "            else:\n",
    "                inputs, labels = Variable(inputs), Variable(labels)\n",
    "\n",
    "            # zero the parameter gradients\n",
    "            optimizer.zero_grad()\n",
    "            \n",
    "            # forward\n",
    "            outputs = model(inputs)\n",
    "            _, preds = torch.max(outputs.data, 1)\n",
    "     \n",
    "           \n",
    "            for output_i in range(0,outputs.size(1)):\n",
    "                Target_zeros=torch.zeros(outputs.size())\n",
    "                Target_zeros=Target_zeros.cuda()\n",
    "                Target_zeros[:,output_i]=1\n",
    "                Target_zeros=Variable(Target_zeros,requires_grad=False)\n",
    "                targets=torch.sum(outputs*Target_zeros)\n",
    "                if output_i==(outputs.size(1)-1):\n",
    "                    targets.backward()\n",
    "                else:\n",
    "                    targets.backward(retain_graph=True )\n",
    "                    \n",
    "                optimizer.step(model.reg_params,True,index,labels.size(0))\n",
    "                optimizer.zero_grad()\n",
    "            \n",
    "        #print('step')\n",
    "            optimizer.step(model.reg_params,False,index,labels.size(0))\n",
    "            print('batch number ',index)\n",
    "            index+=1\n",
    "   \n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "def initialize_reg_params(model,freeze_layers=[]):\n",
    "    \"\"\"initialize an omega for each parameter to zero\"\"\"\n",
    "    reg_params={}\n",
    "    for name, param in model.named_parameters():\n",
    "        if not name in freeze_layers:\n",
    "            print('initializing param',name)\n",
    "            omega=torch.FloatTensor(param.size()).zero_()\n",
    "            init_val=param.data.clone()\n",
    "            reg_param={}\n",
    "            reg_param['omega'] = omega\n",
    "            #initialize the initial value to that before starting training\n",
    "            reg_param['init_val'] = init_val\n",
    "            reg_params[param]=reg_param\n",
    "    return reg_params\n",
    "   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "\n",
    "\n",
    "def initialize_store_reg_params(model,freeze_layers=[]):\n",
    "    \"\"\"set omega to zero but after storing its value in a temp omega in which later we can accumolate them both\"\"\"\n",
    "    reg_params=model.reg_params\n",
    "    for name, param in model.named_parameters():\n",
    "        #in case there some layers that are not trained\n",
    "        if not name in freeze_layers:\n",
    "            if param in reg_params:\n",
    "                reg_param=reg_params.get(param)\n",
    "                print('storing previous omega',name)\n",
    "                prev_omega=reg_param.get('omega')\n",
    "                new_omega=torch.FloatTensor(param.size()).zero_()\n",
    "                init_val=param.data.clone()\n",
    "                reg_param['prev_omega']=prev_omega   \n",
    "                reg_param['omega'] = new_omega\n",
    "                \n",
    "                #initialize the initial value to that before starting training\n",
    "                reg_param['init_val'] = init_val\n",
    "                reg_params[param]=reg_param\n",
    "                \n",
    "        else:\n",
    "            if param in reg_params: \n",
    "                reg_param=reg_params.get(param)\n",
    "                print('removing unused omega',name)\n",
    "                del reg_param['omega'] \n",
    "                del reg_params[param]\n",
    "    return reg_params\n",
    "   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "\n",
    "\n",
    "def accumelate_reg_params(model,freeze_layers=[]):\n",
    "    \"\"\"accumelate the newly computed omega with the previously stroed one from the old previous tasks\"\"\"\n",
    "    reg_params=model.reg_params\n",
    "    for name, param in model.named_parameters():\n",
    "        if not name in freeze_layers:\n",
    "            if param in reg_params:\n",
    "                reg_param=reg_params.get(param)\n",
    "                print('restoring previous omega',name)\n",
    "                prev_omega=reg_param.get('prev_omega')\n",
    "                prev_omega=prev_omega.cuda()\n",
    "                \n",
    "                new_omega=(reg_param.get('omega')).cuda()\n",
    "                acc_omega=torch.add(prev_omega,new_omega)\n",
    "                \n",
    "                del reg_param['prev_omega']\n",
    "                reg_param['omega'] = acc_omega\n",
    "               \n",
    "                reg_params[param]=reg_param\n",
    "                del acc_omega\n",
    "                del new_omega\n",
    "                del prev_omega\n",
    "        else:\n",
    "            if param in reg_params: \n",
    "                reg_param=reg_params.get(param)\n",
    "                print('removing unused omega',name)\n",
    "                del reg_param['omega'] \n",
    "                del reg_params[param]             \n",
    "    return reg_params"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Appending to MAS_based_Training.py\n"
     ]
    }
   ],
   "source": [
    "%%writefile -a MAS_based_Training.py\n",
    "\n",
    "def save_checkpoint(state, filename='checkpoint.pth.tar'):\n",
    "    #best_model = copy.deepcopy(model)\n",
    "    torch.save(state, filename)\n",
    "   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
