{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "notebook_mode = True\n",
    "viz_mode = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import json\n",
    "import argparse\n",
    "import time\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "import torch\n",
    "from torch.autograd import Variable\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "\n",
    "import matplotlib\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "import networkx as nx\n",
    "from sklearn.utils.class_weight import compute_class_weight\n",
    "\n",
    "from tensorboardX import SummaryWriter\n",
    "from fastprogress import master_bar, progress_bar\n",
    "\n",
    "# Remove warning\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\", category=UserWarning)\n",
    "from scipy.sparse import SparseEfficiencyWarning\n",
    "warnings.simplefilter('ignore', SparseEfficiencyWarning)\n",
    "\n",
    "from config import *\n",
    "from utils.graph_utils import *\n",
    "from utils.google_tsp_reader import GoogleTSPReader\n",
    "from utils.plot_utils import *\n",
    "from models.gcn_model import ResidualGatedGCNModel\n",
    "from utils.model_utils import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode == True:\n",
    "    %load_ext autoreload\n",
    "    %autoreload 2\n",
    "    %matplotlib inline\n",
    "    from IPython.display import set_matplotlib_formats\n",
    "    set_matplotlib_formats('png')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Load configurations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode==False:\n",
    "    parser = argparse.ArgumentParser(description='gcn_tsp_parser')\n",
    "    parser.add_argument('-c','--config', type=str, default=\"configs/default.json\")\n",
    "    args = parser.parse_args()\n",
    "    config_path = args.config\n",
    "elif viz_mode == True:\n",
    "    config_path = \"logs/tsp100/config.json\"\n",
    "else:\n",
    "    config_path = \"configs/default.json\"\n",
    "\n",
    "config = get_config(config_path)\n",
    "print(\"Loaded {}:\\n{}\".format(config_path, config))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Over-ride config params (for viz_mode)\n",
    "if viz_mode==True:\n",
    "    config.gpu_id = \"0\"\n",
    "    config.batch_size = 1\n",
    "    config.accumulation_steps = 1\n",
    "    config.beam_size = 1280\n",
    "    \n",
    "    # Uncomment below to evaluate generalization to variable sizes in viz_mode\n",
    "#     config.num_nodes = 50\n",
    "#     config.num_neighbors = 20\n",
    "#     config.train_filepath = f\"./data/tsp{config.num_nodes}_train_concorde.txt\"\n",
    "#     config.val_filepath = f\"./data/tsp{config.num_nodes}_val_concorde.txt\"\n",
    "#     config.test_filepath = f\"./data/tsp{config.num_nodes}_test_concorde.txt\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Configure GPU options"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(config.gpu_id)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if torch.cuda.is_available():\n",
    "    print(\"CUDA available, using GPU ID {}\".format(config.gpu_id))\n",
    "    dtypeFloat = torch.cuda.FloatTensor\n",
    "    dtypeLong = torch.cuda.LongTensor\n",
    "    torch.cuda.manual_seed(1)\n",
    "else:\n",
    "    print(\"CUDA not available\")\n",
    "    dtypeFloat = torch.FloatTensor\n",
    "    dtypeLong = torch.LongTensor\n",
    "    torch.manual_seed(1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test data loading"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode:\n",
    "    num_nodes = config.num_nodes\n",
    "    num_neighbors = config.num_neighbors\n",
    "    batch_size = config.batch_size\n",
    "    train_filepath = config.train_filepath\n",
    "    dataset = GoogleTSPReader(num_nodes, num_neighbors, batch_size, train_filepath)\n",
    "    print(\"Number of batches of size {}: {}\".format(batch_size, dataset.max_iter))\n",
    "\n",
    "    t = time.time()\n",
    "    batch = next(iter(dataset))  # Generate a batch of TSPs\n",
    "    print(\"Batch generation took: {:.3f} sec\".format(time.time() - t))\n",
    "\n",
    "    print(\"edges:\", batch.edges.shape)\n",
    "    print(\"edges_values:\", batch.edges_values.shape)\n",
    "    print(\"edges_targets:\", batch.edges_target.shape)\n",
    "    print(\"nodes:\", batch.nodes.shape)\n",
    "    print(\"nodes_target:\", batch.nodes_target.shape)\n",
    "    print(\"nodes_coord:\", batch.nodes_coord.shape)\n",
    "    print(\"tour_nodes:\", batch.tour_nodes.shape)\n",
    "    print(\"tour_len:\", batch.tour_len.shape)\n",
    "\n",
    "    idx = 0\n",
    "    f = plt.figure(figsize=(5, 5))\n",
    "    a = f.add_subplot(111)\n",
    "    plot_tsp(a, batch.nodes_coord[idx], batch.edges[idx], batch.edges_values[idx], batch.edges_target[idx])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Uncomment below to automatically compute the number of nearest neighbors to use in k-NN input graph\n",
    "# if config.num_neighbors == -1:\n",
    "#     dataset = GoogleTSPReader(config.num_nodes, config.num_neighbors, config.batch_size, config.train_filepath)\n",
    "#     config.num_neighbors = get_max_k(dataset, max_iter=10000//config.batch_size)\n",
    "#     print(f\"New num_neighbors for k-NN input: {config.num_neighbors}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Instantiate model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode == True:\n",
    "    # Instantiate the network\n",
    "    net = nn.DataParallel(ResidualGatedGCNModel(config, dtypeFloat, dtypeLong))\n",
    "    if torch.cuda.is_available():\n",
    "        net.cuda()\n",
    "    print(net)\n",
    "\n",
    "    # Compute number of network parameters\n",
    "    nb_param = 0\n",
    "    for param in net.parameters():\n",
    "        nb_param += np.prod(list(param.data.size()))\n",
    "    print('Number of parameters:', nb_param)\n",
    "    \n",
    "    # Define optimizer\n",
    "    learning_rate = config.learning_rate\n",
    "    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)\n",
    "    print(optimizer)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test forward pass"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode == True and viz_mode == False:\n",
    "    # Generate a batch of TSPs\n",
    "    num_nodes = config.num_nodes\n",
    "    num_neighbors = config.num_neighbors\n",
    "    batch_size = config.batch_size\n",
    "    train_filepath = config.train_filepath\n",
    "    dataset = iter(GoogleTSPReader(num_nodes, num_neighbors, batch_size, train_filepath))\n",
    "    batch = next(dataset)\n",
    "\n",
    "    # Convert batch to torch Variables\n",
    "    x_edges = Variable(torch.LongTensor(batch.edges).type(dtypeLong), requires_grad=False)\n",
    "    x_edges_values = Variable(torch.FloatTensor(batch.edges_values).type(dtypeFloat), requires_grad=False)\n",
    "    x_nodes = Variable(torch.LongTensor(batch.nodes).type(dtypeLong), requires_grad=False)\n",
    "    x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)\n",
    "    y_edges = Variable(torch.LongTensor(batch.edges_target).type(dtypeLong), requires_grad=False)\n",
    "    y_nodes = Variable(torch.LongTensor(batch.nodes_target).type(dtypeLong), requires_grad=False)\n",
    "    \n",
    "    # Compute class weights\n",
    "    edge_labels = y_edges.cpu().numpy().flatten()\n",
    "    edge_cw = compute_class_weight(\"balanced\", classes=np.unique(edge_labels), y=edge_labels)\n",
    "    print(\"Class weights: {}\".format(edge_cw))\n",
    "        \n",
    "    # Forward pass\n",
    "    y_preds, loss = net.forward(x_edges, x_edges_values, x_nodes, x_nodes_coord, y_edges, edge_cw)\n",
    "    loss = loss.mean()\n",
    "    print(\"Output size: {}\".format(y_preds.size()))\n",
    "    print(\"Loss value:\", loss)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test backward pass"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode == True and viz_mode == False:\n",
    "    # Backward pass\n",
    "    optimizer.zero_grad()\n",
    "    loss.backward()\n",
    "    \n",
    "    # Optimizer step\n",
    "    optimizer.step()\n",
    "    \n",
    "    # Compute error metrics \n",
    "    err_edges, err_tour, err_tsp, tour_err_idx, tsp_err_idx = edge_error(y_preds, y_edges, x_edges)\n",
    "    print(\"Edge error: {:.3f}\\nTour error: {:.3f}\\nTSP error: {:.3f}\".format(err_edges, err_tour, err_tsp))\n",
    "    \n",
    "    # Compute mean predicted and groundtruth tour length\n",
    "    pred_tour_len = mean_tour_len_edges(x_edges_values, y_preds)\n",
    "    gt_tour_len = np.mean(batch.tour_len) \n",
    "    print(\"Predicted tour length: {:.3f}\\nGroundtruth tour length: {:.3f}\".format(pred_tour_len, gt_tour_len))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Training Loop (one epoch)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_one_epoch(net, optimizer, config, master_bar):\n",
    "    # Set training mode\n",
    "    net.train()\n",
    "\n",
    "    # Assign parameters\n",
    "    num_nodes = config.num_nodes\n",
    "    num_neighbors = config.num_neighbors\n",
    "    batch_size = config.batch_size\n",
    "    batches_per_epoch = config.batches_per_epoch\n",
    "    accumulation_steps = config.accumulation_steps\n",
    "    train_filepath = config.train_filepath\n",
    "\n",
    "    # Load TSP data\n",
    "    dataset = GoogleTSPReader(num_nodes, num_neighbors, batch_size, train_filepath)\n",
    "    if batches_per_epoch != -1:\n",
    "        batches_per_epoch = min(batches_per_epoch, dataset.max_iter)\n",
    "    else:\n",
    "        batches_per_epoch = dataset.max_iter\n",
    "\n",
    "    # Convert dataset to iterable\n",
    "    dataset = iter(dataset)\n",
    "    \n",
    "    # Initially set loss class weights as None\n",
    "    edge_cw = None\n",
    "\n",
    "    # Initialize running data\n",
    "    running_loss = 0.0\n",
    "    # running_err_edges = 0.0\n",
    "    # running_err_tour = 0.0\n",
    "    # running_err_tsp = 0.0\n",
    "    running_pred_tour_len = 0.0\n",
    "    running_gt_tour_len = 0.0\n",
    "    running_nb_data = 0\n",
    "    running_nb_batch = 0\n",
    "\n",
    "    start_epoch = time.time()\n",
    "    for batch_num in progress_bar(range(batches_per_epoch), parent=master_bar):\n",
    "        # Generate a batch of TSPs\n",
    "        try:\n",
    "            batch = next(dataset)\n",
    "        except StopIteration:\n",
    "            break\n",
    "\n",
    "        # Convert batch to torch Variables\n",
    "        x_edges = Variable(torch.LongTensor(batch.edges).type(dtypeLong), requires_grad=False)\n",
    "        x_edges_values = Variable(torch.FloatTensor(batch.edges_values).type(dtypeFloat), requires_grad=False)\n",
    "        x_nodes = Variable(torch.LongTensor(batch.nodes).type(dtypeLong), requires_grad=False)\n",
    "        x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)\n",
    "        y_edges = Variable(torch.LongTensor(batch.edges_target).type(dtypeLong), requires_grad=False)\n",
    "        y_nodes = Variable(torch.LongTensor(batch.nodes_target).type(dtypeLong), requires_grad=False)\n",
    "        \n",
    "        # Compute class weights (if uncomputed)\n",
    "        if type(edge_cw) != torch.Tensor:\n",
    "            edge_labels = y_edges.cpu().numpy().flatten()\n",
    "            edge_cw = compute_class_weight(\"balanced\", classes=np.unique(edge_labels), y=edge_labels)\n",
    "        \n",
    "        # Forward pass\n",
    "        y_preds, loss = net.forward(x_edges, x_edges_values, x_nodes, x_nodes_coord, y_edges, edge_cw)\n",
    "        loss = loss.mean()  # Take mean of loss across multiple GPUs\n",
    "        loss = loss / accumulation_steps  # Scale loss by accumulation steps\n",
    "        loss.backward()\n",
    "\n",
    "        # Backward pass\n",
    "        if (batch_num+1) % accumulation_steps == 0:\n",
    "            optimizer.step()\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "        # Compute error metrics and mean tour lengths\n",
    "        # err_edges, err_tour, err_tsp, tour_err_idx, tsp_err_idx = edge_error(y_preds, y_edges, x_edges)\n",
    "        pred_tour_len = mean_tour_len_edges(x_edges_values, y_preds)\n",
    "        gt_tour_len = np.mean(batch.tour_len)\n",
    "\n",
    "        # Update running data\n",
    "        running_nb_data += batch_size\n",
    "        running_loss += batch_size* loss.data.item()* accumulation_steps  # Re-scale loss\n",
    "        # running_err_edges += batch_size* err_edges\n",
    "        # running_err_tour += batch_size* err_tour\n",
    "        # running_err_tsp += batch_size* err_tsp\n",
    "        running_pred_tour_len += batch_size* pred_tour_len\n",
    "        running_gt_tour_len += batch_size* gt_tour_len\n",
    "        running_nb_batch += 1\n",
    "        \n",
    "        # Log intermediate statistics\n",
    "        result = ('loss:{loss:.4f} pred_tour_len:{pred_tour_len:.3f} gt_tour_len:{gt_tour_len:.3f}'.format(\n",
    "            loss=running_loss/running_nb_data,\n",
    "            pred_tour_len=running_pred_tour_len/running_nb_data,\n",
    "            gt_tour_len=running_gt_tour_len/running_nb_data))\n",
    "        master_bar.child.comment = result\n",
    "\n",
    "    # Compute statistics for full epoch\n",
    "    loss = running_loss/ running_nb_data\n",
    "    err_edges = 0 # running_err_edges/ running_nb_data\n",
    "    err_tour = 0 # running_err_tour/ running_nb_data\n",
    "    err_tsp = 0 # running_err_tsp/ running_nb_data\n",
    "    pred_tour_len = running_pred_tour_len/ running_nb_data\n",
    "    gt_tour_len = running_gt_tour_len/ running_nb_data\n",
    "\n",
    "    return time.time()-start_epoch, loss, err_edges, err_tour, err_tsp, pred_tour_len, gt_tour_len\n",
    "\n",
    "\n",
    "def metrics_to_str(epoch, time, learning_rate, loss, err_edges, err_tour, err_tsp, pred_tour_len, gt_tour_len):\n",
    "    result = ( 'epoch:{epoch:0>2d}\\t'\n",
    "               'time:{time:.1f}h\\t'\n",
    "               'lr:{learning_rate:.2e}\\t'\n",
    "               'loss:{loss:.4f}\\t'\n",
    "               # 'err_edges:{err_edges:.2f}\\t'\n",
    "               # 'err_tour:{err_tour:.2f}\\t'\n",
    "               # 'err_tsp:{err_tsp:.2f}\\t'\n",
    "               'pred_tour_len:{pred_tour_len:.3f}\\t'\n",
    "               'gt_tour_len:{gt_tour_len:.3f}'.format(\n",
    "                   epoch=epoch,\n",
    "                   time=time/3600,\n",
    "                   learning_rate=learning_rate,\n",
    "                   loss=loss,\n",
    "                   # err_edges=err_edges,\n",
    "                   # err_tour=err_tour,\n",
    "                   # err_tsp=err_tsp,\n",
    "                   pred_tour_len=pred_tour_len,\n",
    "                   gt_tour_len=gt_tour_len))\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode==True and viz_mode==False:\n",
    "    epoch_bar = master_bar(range(1))\n",
    "    for epoch in epoch_bar:\n",
    "        train_time, train_loss, train_err_edges, train_err_tour, train_err_tsp, train_pred_tour_len, train_gt_tour_len = train_one_epoch(net, optimizer, config, epoch_bar)\n",
    "        epoch_bar.write('t: ' + metrics_to_str(epoch, train_time, learning_rate, train_loss, train_err_edges, train_err_tour, train_err_tsp, train_pred_tour_len, train_gt_tour_len))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Evaluation Loop (for validation and test sets)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(net, config, master_bar, mode='test'):\n",
    "    # Set evaluation mode\n",
    "    net.eval()\n",
    "\n",
    "    # Assign parameters\n",
    "    num_nodes = config.num_nodes\n",
    "    num_neighbors = config.num_neighbors\n",
    "    batch_size = config.batch_size\n",
    "    batches_per_epoch = config.batches_per_epoch\n",
    "    beam_size = config.beam_size\n",
    "    val_filepath = config.val_filepath\n",
    "    test_filepath = config.test_filepath\n",
    "\n",
    "    # Load TSP data\n",
    "    if mode == 'val':\n",
    "        dataset = GoogleTSPReader(num_nodes, num_neighbors, batch_size=batch_size, filepath=val_filepath)\n",
    "    elif mode == 'test':\n",
    "        dataset = GoogleTSPReader(num_nodes, num_neighbors, batch_size=batch_size, filepath=test_filepath)\n",
    "    batches_per_epoch = dataset.max_iter\n",
    "\n",
    "    # Convert dataset to iterable\n",
    "    dataset = iter(dataset)\n",
    "    \n",
    "    # Initially set loss class weights as None\n",
    "    edge_cw = None\n",
    "\n",
    "    # Initialize running data\n",
    "    running_loss = 0.0\n",
    "    # running_err_edges = 0.0\n",
    "    # running_err_tour = 0.0\n",
    "    # running_err_tsp = 0.0\n",
    "    running_pred_tour_len = 0.0\n",
    "    running_gt_tour_len = 0.0\n",
    "    running_nb_data = 0\n",
    "    running_nb_batch = 0\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        start_test = time.time()\n",
    "        for batch_num in progress_bar(range(batches_per_epoch), parent=master_bar):\n",
    "            # Generate a batch of TSPs\n",
    "            try:\n",
    "                batch = next(dataset)\n",
    "            except StopIteration:\n",
    "                break\n",
    "\n",
    "            # Convert batch to torch Variables\n",
    "            x_edges = Variable(torch.LongTensor(batch.edges).type(dtypeLong), requires_grad=False)\n",
    "            x_edges_values = Variable(torch.FloatTensor(batch.edges_values).type(dtypeFloat), requires_grad=False)\n",
    "            x_nodes = Variable(torch.LongTensor(batch.nodes).type(dtypeLong), requires_grad=False)\n",
    "            x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)\n",
    "            y_edges = Variable(torch.LongTensor(batch.edges_target).type(dtypeLong), requires_grad=False)\n",
    "            y_nodes = Variable(torch.LongTensor(batch.nodes_target).type(dtypeLong), requires_grad=False)\n",
    "            \n",
    "            # Compute class weights (if uncomputed)\n",
    "            if type(edge_cw) != torch.Tensor:\n",
    "                edge_labels = y_edges.cpu().numpy().flatten()\n",
    "                edge_cw = compute_class_weight(\"balanced\", classes=np.unique(edge_labels), y=edge_labels)\n",
    "\n",
    "            # Forward pass\n",
    "            y_preds, loss = net.forward(x_edges, x_edges_values, x_nodes, x_nodes_coord, y_edges, edge_cw)\n",
    "            loss = loss.mean()  # Take mean of loss across multiple GPUs\n",
    "\n",
    "            # Compute error metrics\n",
    "            # err_edges, err_tour, err_tsp, tour_err_idx, tsp_err_idx = edge_error(y_preds, y_edges, x_edges)\n",
    "\n",
    "            # Get batch beamsearch tour prediction\n",
    "            if mode == 'val':  # Validation: faster 'vanilla' beamsearch\n",
    "                bs_nodes = beamsearch_tour_nodes(\n",
    "                    y_preds, beam_size, batch_size, num_nodes, dtypeFloat, dtypeLong, probs_type='logits')\n",
    "            elif mode == 'test':  # Testing: beamsearch with shortest tour heuristic \n",
    "                bs_nodes = beamsearch_tour_nodes_shortest(\n",
    "                    y_preds, x_edges_values, beam_size, batch_size, num_nodes, dtypeFloat, dtypeLong, probs_type='logits')\n",
    "            \n",
    "            # Compute mean tour length\n",
    "            pred_tour_len = mean_tour_len_nodes(x_edges_values, bs_nodes)\n",
    "            gt_tour_len = np.mean(batch.tour_len)\n",
    "\n",
    "            # Update running data\n",
    "            running_nb_data += batch_size\n",
    "            running_loss += batch_size* loss.data.item()\n",
    "            # running_err_edges += batch_size* err_edges\n",
    "            # running_err_tour += batch_size* err_tour\n",
    "            # running_err_tsp += batch_size* err_tsp\n",
    "            running_pred_tour_len += batch_size* pred_tour_len\n",
    "            running_gt_tour_len += batch_size* gt_tour_len\n",
    "            running_nb_batch += 1\n",
    "\n",
    "            # Log intermediate statistics\n",
    "            result = ('loss:{loss:.4f} pred_tour_len:{pred_tour_len:.3f} gt_tour_len:{gt_tour_len:.3f}'.format(\n",
    "                loss=running_loss/running_nb_data,\n",
    "                pred_tour_len=running_pred_tour_len/running_nb_data,\n",
    "                gt_tour_len=running_gt_tour_len/running_nb_data))\n",
    "            master_bar.child.comment = result\n",
    "\n",
    "    # Compute statistics for full epoch\n",
    "    loss = running_loss/ running_nb_data\n",
    "    err_edges = 0 # running_err_edges/ running_nb_data\n",
    "    err_tour = 0 # running_err_tour/ running_nb_data\n",
    "    err_tsp = 0 # running_err_tsp/ running_nb_data\n",
    "    pred_tour_len = running_pred_tour_len/ running_nb_data\n",
    "    gt_tour_len = running_gt_tour_len/ running_nb_data\n",
    "\n",
    "    return time.time()-start_test, loss, err_edges, err_tour, err_tsp, pred_tour_len, gt_tour_len"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode==True and viz_mode==False:\n",
    "    epoch_bar = master_bar(range(1))\n",
    "    for epoch in epoch_bar:\n",
    "        # Validation\n",
    "        val_time, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len = test(net, config, epoch_bar, mode='val')\n",
    "        epoch_bar.write('v: ' + metrics_to_str(epoch, val_time, learning_rate, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len))\n",
    "        # Testing\n",
    "        test_time, test_loss, test_err_edges, test_err_tour, test_err_tsp, test_pred_tour_len, test_gt_tour_len = test(net, config, epoch_bar, mode='test')\n",
    "        epoch_bar.write('T: ' + metrics_to_str(epoch, test_time, learning_rate, test_loss, test_err_edges, test_err_tour, test_err_tsp, test_pred_tour_len, test_gt_tour_len))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Run full training pipeline "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def main(config):\n",
    "    # Instantiate the network\n",
    "    net = nn.DataParallel(ResidualGatedGCNModel(config, dtypeFloat, dtypeLong))\n",
    "    if torch.cuda.is_available():\n",
    "        net.cuda()\n",
    "    print(net)\n",
    "\n",
    "    # Compute number of network parameters\n",
    "    nb_param = 0\n",
    "    for param in net.parameters():\n",
    "        nb_param += np.prod(list(param.data.size()))\n",
    "    print('Number of parameters:', nb_param)\n",
    " \n",
    "    # Create log directory\n",
    "    log_dir = f\"./logs/{config.expt_name}/\"\n",
    "    os.makedirs(log_dir, exist_ok=True)\n",
    "    json.dump(config, open(f\"{log_dir}/config.json\", \"w\"), indent=4)\n",
    "    writer = SummaryWriter(log_dir)  # Define Tensorboard writer\n",
    "\n",
    "    # Training parameters\n",
    "    num_nodes = config.num_nodes\n",
    "    num_neighbors = config.num_neighbors\n",
    "    max_epochs = config.max_epochs\n",
    "    val_every = config.val_every\n",
    "    test_every = config.test_every\n",
    "    batch_size = config.batch_size\n",
    "    batches_per_epoch = config.batches_per_epoch\n",
    "    accumulation_steps = config.accumulation_steps\n",
    "    learning_rate = config.learning_rate\n",
    "    decay_rate = config.decay_rate\n",
    "    val_loss_old = 1e6  # For decaying LR based on validation loss\n",
    "    best_pred_tour_len = 1e6  # For saving checkpoints\n",
    "\n",
    "    # Define optimizer\n",
    "    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)\n",
    "    print(optimizer)\n",
    "\n",
    "    epoch_bar = master_bar(range(max_epochs))\n",
    "    for epoch in epoch_bar:\n",
    "        # Log to Tensorboard\n",
    "        writer.add_scalar('learning_rate', learning_rate, epoch)\n",
    "        \n",
    "        # Train\n",
    "        train_time, train_loss, train_err_edges, train_err_tour, train_err_tsp, train_pred_tour_len, train_gt_tour_len = train_one_epoch(net, optimizer, config, epoch_bar)\n",
    "        epoch_bar.write('t: ' + metrics_to_str(epoch, train_time, learning_rate, train_loss, train_err_edges, train_err_tour, train_err_tsp, train_pred_tour_len, train_gt_tour_len))\n",
    "        writer.add_scalar('loss/train_loss', train_loss, epoch)\n",
    "        writer.add_scalar('pred_tour_len/train_pred_tour_len', train_pred_tour_len, epoch)\n",
    "        writer.add_scalar('optimality_gap/train_opt_gap', train_pred_tour_len/train_gt_tour_len - 1, epoch)\n",
    "\n",
    "        if epoch % val_every == 0 or epoch == max_epochs-1:\n",
    "            # Validate\n",
    "            val_time, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len = test(net, config, epoch_bar, mode='val')\n",
    "            epoch_bar.write('v: ' + metrics_to_str(epoch, val_time, learning_rate, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len))\n",
    "            writer.add_scalar('loss/val_loss', val_loss, epoch)\n",
    "            writer.add_scalar('pred_tour_len/val_pred_tour_len', val_pred_tour_len, epoch)\n",
    "            writer.add_scalar('optimality_gap/val_opt_gap', val_pred_tour_len/val_gt_tour_len - 1, epoch)\n",
    "            \n",
    "            # Save checkpoint\n",
    "            if val_pred_tour_len < best_pred_tour_len:\n",
    "                best_pred_tour_len = val_pred_tour_len  # Update best prediction\n",
    "                torch.save({\n",
    "                    'epoch': epoch,\n",
    "                    'model_state_dict': net.state_dict(),\n",
    "                    'optimizer_state_dict': optimizer.state_dict(),\n",
    "                    'train_loss': train_loss,\n",
    "                    'val_loss': val_loss,\n",
    "                }, log_dir+\"best_val_checkpoint.tar\")\n",
    "            \n",
    "            # Update learning rate\n",
    "            if val_loss > 0.99 * val_loss_old:\n",
    "                learning_rate /= decay_rate\n",
    "                optimizer = update_learning_rate(optimizer, learning_rate)\n",
    "            \n",
    "            val_loss_old = val_loss  # Update old validation loss\n",
    "\n",
    "        if epoch % test_every == 0 or epoch == max_epochs-1:\n",
    "            # Test\n",
    "            test_time, test_loss, test_err_edges, test_err_tour, test_err_tsp, test_pred_tour_len, test_gt_tour_len = test(net, config, epoch_bar, mode='test')\n",
    "            epoch_bar.write('T: ' + metrics_to_str(epoch, test_time, learning_rate, test_loss, test_err_edges, test_err_tour, test_err_tsp, test_pred_tour_len, test_gt_tour_len))\n",
    "            writer.add_scalar('loss/test_loss', test_loss, epoch)\n",
    "            writer.add_scalar('pred_tour_len/test_pred_tour_len', test_pred_tour_len, epoch)\n",
    "            writer.add_scalar('optimality_gap/test_opt_gap', test_pred_tour_len/test_gt_tour_len - 1, epoch)\n",
    "        \n",
    "        # Save training checkpoint at the end of epoch\n",
    "        torch.save({\n",
    "            'epoch': epoch,\n",
    "            'model_state_dict': net.state_dict(),\n",
    "            'optimizer_state_dict': optimizer.state_dict(),\n",
    "            'train_loss': train_loss,\n",
    "            'val_loss': val_loss,\n",
    "        }, log_dir+\"last_train_checkpoint.tar\")\n",
    "        \n",
    "        # Save checkpoint after every 250 epochs\n",
    "        if epoch != 0 and (epoch % 250 == 0 or epoch == max_epochs-1):\n",
    "            torch.save({\n",
    "                'epoch': epoch,\n",
    "                'model_state_dict': net.state_dict(),\n",
    "                'optimizer_state_dict': optimizer.state_dict(),\n",
    "                'train_loss': train_loss,\n",
    "                'val_loss': val_loss,\n",
    "            }, log_dir+f\"checkpoint_epoch{epoch}.tar\")\n",
    "        \n",
    "    return net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if viz_mode==False:\n",
    "    # del net\n",
    "    net = main(config)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Load saved checkpoint"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode==True:\n",
    "    # Load checkpoint\n",
    "    log_dir = f\"./logs/{config.expt_name}/\"\n",
    "    if torch.cuda.is_available():\n",
    "        checkpoint = torch.load(log_dir+\"best_val_checkpoint.tar\")\n",
    "    else:\n",
    "        checkpoint = torch.load(log_dir+\"best_val_checkpoint.tar\", map_location='cpu')\n",
    "    # Load network state\n",
    "    net.load_state_dict(checkpoint['model_state_dict'])\n",
    "    # Load optimizer state\n",
    "    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n",
    "    # Load other training parameters\n",
    "    epoch = checkpoint['epoch']\n",
    "    train_loss = checkpoint['train_loss']\n",
    "    val_loss = checkpoint['val_loss']\n",
    "    for param_group in optimizer.param_groups:\n",
    "        learning_rate = param_group['lr']\n",
    "    print(f\"Loaded checkpoint from epoch {epoch}\")    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Visualize model predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode==True:\n",
    "    # Set evaluation mode\n",
    "    net.eval()\n",
    "    \n",
    "    batch_size = 10\n",
    "    num_nodes = config.num_nodes\n",
    "    num_neighbors = config.num_neighbors\n",
    "    beam_size = config.beam_size\n",
    "    test_filepath = config.test_filepath\n",
    "    dataset = iter(GoogleTSPReader(num_nodes, num_neighbors, batch_size, test_filepath))\n",
    "    batch = next(dataset)\n",
    "\n",
    "    with torch.no_grad():\n",
    "        # Convert batch to torch Variables\n",
    "        x_edges = Variable(torch.LongTensor(batch.edges).type(dtypeLong), requires_grad=False)\n",
    "        x_edges_values = Variable(torch.FloatTensor(batch.edges_values).type(dtypeFloat), requires_grad=False)\n",
    "        x_nodes = Variable(torch.LongTensor(batch.nodes).type(dtypeLong), requires_grad=False)\n",
    "        x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)\n",
    "        y_edges = Variable(torch.LongTensor(batch.edges_target).type(dtypeLong), requires_grad=False)\n",
    "        y_nodes = Variable(torch.LongTensor(batch.nodes_target).type(dtypeLong), requires_grad=False)\n",
    "        \n",
    "        # Compute class weights\n",
    "        edge_labels = y_edges.cpu().numpy().flatten()\n",
    "        edge_cw = compute_class_weight(\"balanced\", classes=np.unique(edge_labels), y=edge_labels)\n",
    "        print(\"Class weights: {}\".format(edge_cw))\n",
    "        \n",
    "        # Forward pass\n",
    "        y_preds, loss = net.forward(x_edges, x_edges_values, x_nodes, x_nodes_coord, y_edges, edge_cw)\n",
    "        loss = loss.mean()\n",
    "        \n",
    "        # Get batch beamsearch tour prediction\n",
    "        bs_nodes = beamsearch_tour_nodes_shortest(\n",
    "            y_preds, x_edges_values, beam_size, batch_size, num_nodes, dtypeFloat, dtypeLong, probs_type='logits')\n",
    "        #bs_nodes = beamsearch_tour_nodes(\n",
    "        #    y_preds, beam_size, batch_size, num_nodes, dtypeFloat, dtypeLong, probs_type='logits')\n",
    "\n",
    "        # Compute mean tour length\n",
    "        pred_tour_len = mean_tour_len_nodes(x_edges_values, bs_nodes)\n",
    "        gt_tour_len = np.mean(batch.tour_len)\n",
    "        print(\"Predicted tour length: {:.3f} (mean)\\nGroundtruth tour length: {:.3f} (mean)\".format(pred_tour_len, gt_tour_len))\n",
    "\n",
    "        # Sanity check\n",
    "        for idx, nodes in enumerate(bs_nodes):\n",
    "            if not is_valid_tour(nodes, num_nodes):\n",
    "                print(idx, \" Invalid tour: \", nodes)\n",
    "\n",
    "        # Plot prediction visualizations\n",
    "        plot_predictions_beamsearch(x_nodes_coord, x_edges, x_edges_values, y_edges, y_preds, bs_nodes, num_plots=batch_size)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Evaluate checkpoint on test set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if notebook_mode==True:\n",
    "    epoch_bar = master_bar(range(epoch+1, epoch+2))\n",
    "    config.batch_size = 200\n",
    "    for epoch in epoch_bar:\n",
    "        # Set validation dataset as the test dataset so that we can perform \n",
    "        # greedy and vanilla beam search on test data without hassle!\n",
    "        config.val_filepath = config.test_filepath\n",
    "        \n",
    "        # Greedy search\n",
    "        config.beam_size = 1\n",
    "        t=time.time()\n",
    "        val_time, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len = test(net, config, epoch_bar, mode='val')\n",
    "        print(\"G time: {}s\".format(time.time()-t))\n",
    "        epoch_bar.write('G: ' + metrics_to_str(epoch, val_time, learning_rate, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len))\n",
    "        \n",
    "        # Vanilla beam search\n",
    "        config.beam_size = 1280\n",
    "        t=time.time()\n",
    "        val_time, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len = test(net, config, epoch_bar, mode='val')\n",
    "        print(\"BS time: {}s\".format(time.time()-t))\n",
    "        epoch_bar.write('BS: ' + metrics_to_str(epoch, val_time, learning_rate, val_loss, val_err_edges, val_err_tour, val_err_tsp, val_pred_tour_len, val_gt_tour_len))\n",
    "        \n",
    "        # Beam search with shortest tour heuristic\n",
    "        config.beam_size = 1280\n",
    "        t=time.time()\n",
    "        test_time, test_loss, test_err_edges, test_err_tour, test_err_tsp, test_pred_tour_len, test_gt_tour_len = test(net, config, epoch_bar, mode='test')\n",
    "        print(\"BS* time: {}s\".format(time.time()-t))\n",
    "        epoch_bar.write('BS*: ' + metrics_to_str(epoch, test_time, learning_rate, test_loss, test_err_edges, test_err_tour, test_err_tsp, test_pred_tour_len, test_gt_tour_len))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
