{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<h2><center>Train network</center></h2>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "(c) DI Dominik Hirner BSc. \n",
    "Institute for graphics and vision (ICG)\n",
    "University of Technology Graz, Austria\n",
    "E-mail: dominik.hirner@tugraz.at\n",
    "\n",
    "This notebook is equivalent to train.py in the root of this repository"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import numpy as np\n",
    "import cv2\n",
    "import re\n",
    "import numpy.matlib\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "from PIL import Image\n",
    "from typing import Tuple\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "from torch import optim\n",
    "\n",
    "from guided_filter_pytorch.guided_filter import GuidedFilter\n",
    "import argparse\n",
    "import glob\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#MB, KITTI2012, KITTI2015 or ETH\n",
    "dataset = 'KITTI2015'\n",
    "#used as prefix for saved weights\n",
    "model_name = 'mb_64f'\n",
    "\n",
    "input_folder = '/media/HDD/TrainingsData/kitti2015/training/'\n",
    "\n",
    "#patches per batch\n",
    "batch_size = 40\n",
    "#batches per epoch\n",
    "nr_batches = 10\n",
    "nr_epochs = 20000000\n",
    "num_conv_feature_maps = 64\n",
    "#save weights every x iteration\n",
    "save_weights = 1\n",
    "#crop size for patch\n",
    "patch_size = 21\n",
    "ps_h = int(patch_size/2)\n",
    "#range for offset of o_neg\n",
    "r_low = 1\n",
    "r_high = 25"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def loadMB():\n",
    "    \n",
    "    left_filelist = glob.glob(input_folder + '*/im0.png')\n",
    "    right_filelist = glob.glob(input_folder + '*/im1.png')\n",
    "    disp_filelist = glob.glob(input_folder + '*/disp0GT.pfm')\n",
    "    \n",
    "    left_filelist = sorted(left_filelist)\n",
    "    right_filelist = sorted(right_filelist)\n",
    "    disp_filelist = sorted(disp_filelist)\n",
    "    \n",
    "    left_list = []\n",
    "    right_list = []\n",
    "    disp_list = []\n",
    "    \n",
    "    for i in range(0,len(left_filelist)):\n",
    "        \n",
    "        cur_left = cv2.imread(left_filelist[i])\n",
    "        cur_right = cv2.imread(right_filelist[i])\n",
    "        cur_disp,_ = readPFM(disp_filelist[i])\n",
    "        \n",
    "        left_list.append(cur_left)\n",
    "        right_list.append(cur_right)\n",
    "        disp_list.append(cur_disp)\n",
    "        \n",
    "    return left_list, right_list, disp_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def loadETH3D():\n",
    "    \n",
    "    left_filelist = glob.glob(input_folder + '*/im0.png')\n",
    "    right_filelist = glob.glob(input_folder + '*/im1.png')\n",
    "    disp_filelist = glob.glob(input_folder + '*/disp0GT.pfm')\n",
    "    \n",
    "    left_filelist = sorted(left_filelist)\n",
    "    right_filelist = sorted(right_filelist)\n",
    "    disp_filelist = sorted(disp_filelist)\n",
    "    \n",
    "    left_list = []\n",
    "    right_list = []\n",
    "    disp_list = []\n",
    "    \n",
    "    for i in range(0,len(left_filelist)):\n",
    "        \n",
    "        cur_left = cv2.imread(left_filelist[i])\n",
    "        cur_right = cv2.imread(right_filelist[i])\n",
    "        cur_disp,_ = readPFM(disp_filelist[i])\n",
    "        \n",
    "        left_list.append(cur_left)\n",
    "        right_list.append(cur_right)\n",
    "        disp_list.append(cur_disp)\n",
    "        \n",
    "    return left_list, right_list, disp_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def loadKitti2012():\n",
    "\n",
    "    left_filelist = glob.glob(input_folder + 'colored_0/*.png')\n",
    "    right_filelist = glob.glob(input_folder + 'colored_1/*.png')\n",
    "    disp_filelist = glob.glob(input_folder + 'disp_noc/*.png')\n",
    "    \n",
    "    left_filelist = sorted(left_filelist)\n",
    "    right_filelist = sorted(right_filelist)\n",
    "    disp_filelist = sorted(disp_filelist)\n",
    "\n",
    "    left_elem_list = []\n",
    "    for left_im in left_filelist:\n",
    "\n",
    "        left_im_el = left_im.split('/')[-1]\n",
    "        left_elem_list.append(left_im_el)\n",
    "\n",
    "    left_elem_list = sorted(left_elem_list)\n",
    "\n",
    "    right_elem_list = []\n",
    "    for right_im in right_filelist:\n",
    "\n",
    "        right_im_el = right_im.split('/')[-1]\n",
    "        right_elem_list.append(right_im_el)\n",
    "\n",
    "    right_elem_list = sorted(right_elem_list)\n",
    "\n",
    "    gt_elem_list = []\n",
    "    for gt_im in disp_filelist:\n",
    "\n",
    "        gt_im_el = gt_im.split('/')[-1]\n",
    "        gt_elem_list.append(gt_im_el)\n",
    "\n",
    "    gt_elem_list = sorted(gt_elem_list)\n",
    "    inters_list = set(left_elem_list) & set(right_elem_list) & set(gt_elem_list)\n",
    "   \n",
    "    inters_list = list(inters_list)\n",
    "    left_list = []\n",
    "    right_list = []\n",
    "    disp_list = []\n",
    "    \n",
    "    for i in range(0,len(inters_list)):\n",
    "        \n",
    "        left_im = input_folder + 'colored_0/' + inters_list[i]\n",
    "        right_im = input_folder + 'colored_1/' + inters_list[i]\n",
    "        disp_im =  input_folder + 'disp_noc/' + inters_list[i] \n",
    "       \n",
    "        cur_left = cv2.imread(left_im)\n",
    "        cur_right = cv2.imread(right_im)\n",
    "        cur_disp = cv2.imread(disp_im)\n",
    "        \n",
    "        cur_disp = np.mean(cur_disp,axis=2) \n",
    "        \n",
    "        left_list.append(cur_left)\n",
    "        right_list.append(cur_right)\n",
    "        disp_list.append(cur_disp)\n",
    "        \n",
    "    return left_list, right_list, disp_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def loadKitti2015():\n",
    "\n",
    "    left_filelist = glob.glob(input_folder + 'image_2/*.png')\n",
    "    right_filelist = glob.glob(input_folder + 'image_3/*.png')\n",
    "    disp_filelist = glob.glob(input_folder + 'disp_noc_0/*.png')\n",
    "    \n",
    "    left_filelist = sorted(left_filelist)\n",
    "    right_filelist = sorted(right_filelist)\n",
    "    disp_filelist = sorted(disp_filelist)\n",
    "\n",
    "    left_elem_list = []\n",
    "    for left_im in left_filelist:\n",
    "\n",
    "        left_im_el = left_im.split('/')[-1]\n",
    "        left_elem_list.append(left_im_el)\n",
    "\n",
    "    left_elem_list = sorted(left_elem_list)\n",
    "\n",
    "\n",
    "    right_elem_list = []\n",
    "    for right_im in right_filelist:\n",
    "\n",
    "        right_im_el = right_im.split('/')[-1]\n",
    "        right_elem_list.append(right_im_el)\n",
    "\n",
    "    right_elem_list = sorted(right_elem_list)\n",
    "\n",
    "\n",
    "\n",
    "    gt_elem_list = []\n",
    "    for gt_im in disp_filelist:\n",
    "\n",
    "        gt_im_el = gt_im.split('/')[-1]\n",
    "        gt_elem_list.append(gt_im_el)\n",
    "\n",
    "    gt_elem_list = sorted(gt_elem_list)\n",
    "\n",
    "\n",
    "    inters_list = set(left_elem_list) & set(right_elem_list) & set(gt_elem_list)\n",
    "   \n",
    "    inters_list = list(inters_list)\n",
    "    left_list = []\n",
    "    right_list = []\n",
    "    disp_list = []\n",
    "    \n",
    "    for i in range(0,len(inters_list)):\n",
    "        \n",
    "        left_im = input_folder + 'image_2/' + inters_list[i]\n",
    "        right_im = input_folder + 'image_3/' + inters_list[i]\n",
    "        disp_im =  input_folder + 'disp_noc_0/' + inters_list[i] \n",
    "       \n",
    "        cur_left = cv2.imread(left_im)\n",
    "        cur_right = cv2.imread(right_im)\n",
    "        cur_disp = cv2.imread(disp_im)\n",
    "        \n",
    "        cur_disp = np.mean(cur_disp,axis=2) \n",
    "        \n",
    "        left_list.append(cur_left)\n",
    "        right_list.append(cur_right)\n",
    "        disp_list.append(cur_disp)\n",
    "        \n",
    "    return left_list, right_list, disp_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SiameseBranch(nn.Module):\n",
    "    def __init__(self,img_ch=3):\n",
    "        super(SiameseBranch,self).__init__()\n",
    "        \n",
    "        self.Tanh = nn.Tanh()        \n",
    "        self.Conv1 = nn.Conv2d(img_ch, num_conv_feature_maps, kernel_size = 3,stride=1,padding = 1,dilation = 1, bias=True)\n",
    "        self.Conv2 = nn.Conv2d(num_conv_feature_maps, num_conv_feature_maps, kernel_size=3,stride=1,padding = 1,dilation = 1, bias=True)\n",
    "        self.Conv3 = nn.Conv2d(2*num_conv_feature_maps, num_conv_feature_maps, kernel_size=3,stride=1,padding = 1,dilation = 1, bias=True)\n",
    "        self.Conv4 = nn.Conv2d(3*num_conv_feature_maps, num_conv_feature_maps, kernel_size=3,stride=1,padding = 1,dilation = 1,bias=True)\n",
    "        self.Conv5 = nn.Conv2d(4*num_conv_feature_maps, num_conv_feature_maps, kernel_size=3,stride=1,padding = 1,dilation = 1, bias=True)        \n",
    "        \n",
    "    def forward(self,x_in):\n",
    "        \n",
    "        x1 = self.Conv1(x_in) \n",
    "        x1 = self.Tanh(x1)\n",
    "                \n",
    "        x2 = self.Conv2(x1) \n",
    "        x2 = self.Tanh(x2)\n",
    "        \n",
    "        d2 = torch.cat((x1,x2),dim=1)\n",
    "        \n",
    "        x3 = self.Conv3(d2) \n",
    "        x3 = self.Tanh(x3)\n",
    "        \n",
    "        d3 = torch.cat((x1,x2,x3),dim=1)\n",
    "        \n",
    "        x4 = self.Conv4(d3)\n",
    "        x4 = self.Tanh(x4)\n",
    "        \n",
    "        d4 = torch.cat((x1,x2,x3,x4),dim=1)\n",
    "        \n",
    "        x5 = self.Conv5(d4)\n",
    "        \n",
    "        return x5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "branch = SiameseBranch()\n",
    "branch = branch.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pytorch_total_params = sum(p.numel() for p in branch.parameters() if p.requires_grad)\n",
    "print(\"Nr feat: \" ,pytorch_total_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "##python3 version!!!!\n",
    "def readPFM(file):\n",
    "    file = open(file, 'rb')\n",
    "\n",
    "    color = None\n",
    "    width = None\n",
    "    height = None\n",
    "    scale = None\n",
    "    endian = None\n",
    "\n",
    "    header = file.readline().decode('utf-8').rstrip()\n",
    "    if header == 'PF':\n",
    "        color = True\n",
    "    elif header == 'Pf':\n",
    "        color = False\n",
    "    else:\n",
    "        raise Exception('Not a PFM file.')\n",
    "\n",
    "    dim_match = re.match(r'^(\\d+)\\s(\\d+)\\s$', file.readline().decode('utf-8'))\n",
    "    if dim_match:\n",
    "        width, height = map(int, dim_match.groups())\n",
    "    else:\n",
    "        raise Exception('Malformed PFM header.')\n",
    "\n",
    "    scale = float(file.readline().decode('utf-8').rstrip())\n",
    "    if scale < 0:  # little-endian\n",
    "        endian = '<'\n",
    "        scale = -scale\n",
    "    else:\n",
    "        endian = '>'  # big-endian\n",
    "\n",
    "    data = np.fromfile(file, endian + 'f')\n",
    "    shape = (height, width, 3) if color else (height, width)\n",
    "\n",
    "    data = np.reshape(data, shape)\n",
    "    data = np.flipud(data)\n",
    "    return data, scale"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def writePFM(file, image, scale=1):\n",
    "    file = open(file, 'wb')\n",
    "\n",
    "    color = None\n",
    "\n",
    "    if image.dtype.name != 'float32':\n",
    "        raise Exception('Image dtype must be float32.')\n",
    "\n",
    "    image = np.flipud(image)\n",
    "\n",
    "    if len(image.shape) == 3 and image.shape[2] == 3:  # color image\n",
    "        color = True\n",
    "    elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1:  # greyscale\n",
    "        color = False\n",
    "    else:\n",
    "        raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')\n",
    "\n",
    "    file.write('PF\\n'.encode() if color else 'Pf\\n'.encode())\n",
    "    file.write('%d %d\\n'.encode() % (image.shape[1], image.shape[0]))\n",
    "\n",
    "    endian = image.dtype.byteorder\n",
    "\n",
    "    if endian == '<' or endian == '=' and sys.byteorder == 'little':\n",
    "        scale = -scale\n",
    "\n",
    "    file.write('%f\\n'.encode() % scale)\n",
    "\n",
    "    image.tofile(file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "Tensor = torch.cuda.FloatTensor\n",
    "cos = torch.nn.CosineSimilarity()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def createCostVol(left_im,right_im,max_disp):\n",
    "    \n",
    "    a_h, a_w,c = left_im.shape\n",
    "    left_im = np.transpose(left_im, (2,0,1)).astype(np.uint8)\n",
    "    right_im = np.transpose(right_im, (2,0,1)).astype(np.uint8)\n",
    "    \n",
    "    left_im = np.reshape(left_im, [1,c,a_h,a_w])\n",
    "    right_im = np.reshape(right_im, [1,c,a_h,a_w])\n",
    "    \n",
    "    with torch.no_grad():\n",
    "\n",
    "        left_imT = Variable(Tensor(left_im.astype(np.uint8)))\n",
    "        right_imT = Variable(Tensor(right_im.astype(np.uint8)))\n",
    "\n",
    "        left_feat = branch(left_imT)\n",
    "        right_feat = branch(right_imT)\n",
    "        \n",
    "        _,f,h,w = left_feat.shape\n",
    "        \n",
    "        cost_vol = np.zeros((max_disp+1,a_h,a_w))\n",
    "        cost_volT = Variable(Tensor(cost_vol))   \n",
    "\n",
    "        #0 => max_disp => one less disp!\n",
    "        for disp in range(0,max_disp+1):\n",
    "            if(disp == 0):\n",
    "                sim_score = cos(left_feat, right_feat)\n",
    "                cost_volT[disp,:,:] = torch.squeeze(sim_score) \n",
    "            else:\n",
    "                right_shifted = torch.cuda.FloatTensor(1,f,h,w).fill_(0)                      \n",
    "                right_shift = torch.cuda.FloatTensor(1,f,h,disp).fill_(0)  \n",
    "                right_appended = torch.cat([right_shift,right_feat],3)\n",
    "\n",
    "                _,f,h_ap,w_ap = right_appended.shape\n",
    "                right_shifted[:,:,:,:] = right_appended[:,:,:,:(w_ap-disp)]\n",
    "                sim_score = cos(left_feat, right_shifted)\n",
    "                cost_volT[disp,:,:] = torch.squeeze(sim_score)              \n",
    "    return cost_volT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getBatch():\n",
    "    \n",
    "    batch_xl = np.zeros((batch_size,3,patch_size,patch_size))\n",
    "    batch_xr_pos = np.zeros((batch_size,3,patch_size,patch_size))\n",
    "    batch_xr_neg = np.zeros((batch_size,3,patch_size,patch_size))\n",
    "    \n",
    "    for el in range(batch_size):\n",
    "        \n",
    "        if(el % 25 == 0):\n",
    "            \n",
    "            ridx = np.random.randint(0,len(left_list),1)\n",
    "            left_im = left_list[ridx[0]]\n",
    "            right_im = right_list[ridx[0]]\n",
    "            gt_im = gt_list[ridx[0]]\n",
    "\n",
    "        \n",
    "        #get random position\n",
    "        h,w,c = left_im.shape\n",
    "        r_h = 0\n",
    "        r_w = 0\n",
    "        d = 0\n",
    "#        print('Draw for random position')\n",
    "        #also check height! should not draw corner pixels!!\n",
    "        while True:\n",
    "            r_h = random.sample(range(ps_h,h-(ps_h+1)), 1)\n",
    "            r_w = random.sample(range(ps_h,w-(ps_h+1)),1)            \n",
    "            if(not np.isinf(gt_im[r_h,r_w])):\n",
    "                d = int(np.round(gt_im[r_h,r_w]))\n",
    "                if((r_w[0]-ps_h-d-1) >= 0):\n",
    "                     if((r_w[0]+(ps_h+1)-d+1) <= w):\n",
    "                        break\n",
    "        \n",
    "        d = int(np.round(gt_im[r_h,r_w]))\n",
    "                \n",
    "        cur_left = left_im[r_h[0]-ps_h:r_h[0]+(ps_h+1), r_w[0]-ps_h:r_w[0]+(ps_h+1),:]\n",
    "        #choose offset\n",
    "        \n",
    "        o_pos = 0                \n",
    "        cur_right_pos = right_im[r_h[0]-ps_h:r_h[0]+(ps_h+1), (r_w[0]-ps_h-d+o_pos):(r_w[0]+(ps_h+1)-d+o_pos),:]\n",
    "\n",
    "        \n",
    "        #should not be too close to real match!\n",
    "        o_neg = 0\n",
    "        while True:\n",
    "            #range 6-8??? range(2,6)\n",
    "            o_neg = random.sample(range(r_low,r_high), 1)\n",
    "            if np.random.randint(-1, 1) == -1:\n",
    "                o_neg = -o_neg[0]\n",
    "            else:\n",
    "                o_neg = o_neg[0]\n",
    "            #try without d-+1   and(o_neg != (d-1)) and(o_neg != (d+1))\n",
    "            if((o_neg != d) and ((r_w[0]-ps_h-d+o_neg) > 0)  and ((r_w[0]+(ps_h+1)-d+o_neg) < w)):\n",
    "                break\n",
    "        \n",
    "        \n",
    "        cur_right_neg = right_im[r_h[0]-ps_h:r_h[0]+(ps_h+1), (r_w[0]-ps_h-d+o_neg):(r_w[0]+(ps_h+1)-d+o_neg),:]\n",
    "\n",
    "        \n",
    "        batch_xl[el,:,:,:] =  np.transpose(cur_left, (2,0,1)).astype(np.uint8)\n",
    "        batch_xr_pos[el,:,:,:] = np.transpose(cur_right_pos, (2,0,1)).astype(np.uint8)\n",
    "        batch_xr_neg[el,:,:,:] = np.transpose(cur_right_neg, (2,0,1)).astype(np.uint8)\n",
    "            \n",
    "    return batch_xl, batch_xr_pos, batch_xr_neg\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pytorch_total_params = sum(p.numel() for p in branch.parameters() if p.requires_grad)\n",
    "print(\"Nr feat: \" ,pytorch_total_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def my_hinge_loss(s_p, s_n):\n",
    "    margin = 0.2\n",
    "    relu = torch.nn.ReLU()\n",
    "    relu = relu.cuda()\n",
    "    loss = relu(-((s_p - s_n) - margin))\n",
    "\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'KITTI2012'):\n",
    "    left_list, right_list, gt_list = loadKitti2012()\n",
    "if(dataset == 'KITTI2015'):\n",
    "    left_list, right_list, gt_list = loadKitti2015()\n",
    "if(dataset == 'MB'):\n",
    "    left_list, right_list, gt_list = loadMB()    \n",
    "if(dataset == 'ETH'):\n",
    "    left_list, right_list, gt_list = loadETH3D()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "nr_samples = len(left_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#If you want to use pre-trained weights as a starting point\n",
    "#load them here!\n",
    "#branch.load_state_dict(torch.load('/media/HDD/FC-DCNN-githubICPR/eth_13700'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "optimizer_G = optim.Adam(branch.parameters(), lr=0.00006)\n",
    "\n",
    "left_patches = []\n",
    "right_pos_patches = []\n",
    "right_neg_patches = []\n",
    "\n",
    "loss_list = []\n",
    "\n",
    "for i in range(nr_epochs):\n",
    "            \n",
    "    epoch_loss = 0.0        \n",
    "    for cur_batch in range(batch_size):\n",
    "         #reset gradients\n",
    "        optimizer_G.zero_grad()\n",
    "\n",
    "        batch_xl, batch_xr_pos, batch_xr_neg = getBatch()\n",
    "        bs, c, h, w = batch_xl.shape\n",
    "        batch_loss = 0.0\n",
    "\n",
    "        xl = Variable(Tensor(batch_xl.astype(np.uint8)))\n",
    "        xr_pos = Variable(Tensor(batch_xr_pos.astype(np.uint8)))\n",
    "        xr_neg = Variable(Tensor(batch_xr_neg.astype(np.uint8)))        \n",
    "\n",
    "        left_out = branch(xl)\n",
    "        right_pos_out = branch(xr_pos)\n",
    "        right_neg_out = branch(xr_neg)\n",
    "        \n",
    "        sp = cos(left_out, right_pos_out)\n",
    "        sn = cos(left_out, right_neg_out)            \n",
    "        \n",
    "        batch_loss = my_hinge_loss(sp, sn)\n",
    "        batch_loss = batch_loss.mean()      \n",
    "\n",
    "        batch_loss.backward()\n",
    "        optimizer_G.step()\n",
    "        epoch_loss = epoch_loss + batch_loss\n",
    "    \n",
    "    epoch_loss = batch_loss/nr_batches        \n",
    "    if(i % save_weights == 0):\n",
    "        torch.save(branch.state_dict(), '../save_weights/' + model_name + '_%04i' %(i)) \n",
    "        print(\"EPOCH: {} loss: {}\".format(i,epoch_loss))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
