{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline\n",
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "from common import *\n",
    "from competitions import dogscats;\n",
    "from pathlib import Path\n",
    "\n",
    "'''\n",
    "Command Line Tool\n",
    "1) User 'create_project' page with name and directory ?\n",
    "2) Redirect to the label page (with project_name as the argument) <--- interesting to figure this out\n",
    "3) Call 'init_dataset' and display images on page (refactor so it doesn't require label names ahead) - simpler ATM\n",
    "4) Manually kick off the infinite loop and monitor\n",
    "5) Start labeling images and watch the infinite loop\n",
    "6) Make sure model accuracy is increasing and label counts\n",
    "12) Create new project and test end-to-end that things are updating/training/saving etc.\n",
    "\n",
    "\n",
    "Uncertainty Filtering\n",
    "13) Update 'Next' call to return most uncertain images (update Flask API)\n",
    "    -load labelai.csv prediction into Pandas DF\n",
    "    -filter for unlabeled images (no userTags)\n",
    "    -if len(unlabeled < N_REQUESTED)\n",
    "        return all_unlabeled\n",
    "    -call get_most_uncertain(df, n)\n",
    "    -if len(uncertain < N_REQUESTED)\n",
    "        -get_basic_unlabeled(df, n)\n",
    "14) Get_most_uncertain(df, n)   <------- binary classification for now\n",
    "    -Filter for unlabeled with modelPreds\n",
    "    -shuffle\n",
    "    -min_prob = .4, max_prob = .6\n",
    "    -fnames = []\n",
    "    while len(fnames < n):\n",
    "        for i in df.size():\n",
    "            -if modelProb > min and modelProb < max:\n",
    "                fnames.append(df.iloc[i].pop()) <--- need to pop()\n",
    "        min_prob -= .1\n",
    "        max_prob += .1\n",
    "    return fnames\n",
    "''';"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "TRAIN_PATH = cfg.PATHS['datasets']['inputs']['trn_jpg'] \n",
    "TEST_PATH = cfg.PATHS['datasets']['inputs']['tst_jpg'] \n",
    "LABELS_PATH = os.path.join(cfg.PATHS['project'], 'labels')\n",
    "\n",
    "HARDWARE_CONFIG = {\n",
    "    'hostname': socket.gethostname(),\n",
    "    'random_seed': 3,\n",
    "    'gpu_device':0\n",
    "}\n",
    "torch.cuda.set_device(HARDWARE_CONFIG['gpu_device'])\n",
    "cudnn.benchmark = True\n",
    "\n",
    "DATA_CONFIG = {\n",
    "    'img_rescale': 256,\n",
    "    'dset_fold': 'labelai',\n",
    "    'n_classes': len(dogscats.LABEL_NAMES),\n",
    "    'label_names': dogscats.LABEL_NAMES\n",
    "}\n",
    "\n",
    "TRAIN_CONFIG = {\n",
    "    'initial_lr': 1e-4,\n",
    "    'weight_decay': 1e-4,\n",
    "    'n_epochs': 50,\n",
    "    'n_cycles': 9,\n",
    "    'early_stop_metric': metric.Loss().name,\n",
    "    'max_patience': 5,\n",
    "    'batch_size': 32,\n",
    "    'threshold': 0.5,\n",
    "    'save_weights_cadence': 1, #every epoch\n",
    "    'lr_schedule': {50:1e-4}\n",
    "}\n",
    "OTHER_CONFIG = {}\n",
    "\n",
    "TRANSFORMS = {\n",
    "    c.TRAIN: torchsample.transforms.Compose([\n",
    "        transforms.Scale(size=[DATA_CONFIG['img_rescale'], \n",
    "                               DATA_CONFIG['img_rescale']]),\n",
    "        transforms.RandomHorizontalFlip(),\n",
    "        transforms.ToTensor(),\n",
    "        data_aug.IMAGENET_NORMALIZE\n",
    "    ]),\n",
    "    c.VAL: torchsample.transforms.Compose([\n",
    "        transforms.Scale(size=[DATA_CONFIG['img_rescale'], \n",
    "                               DATA_CONFIG['img_rescale']]),\n",
    "        transforms.ToTensor(),\n",
    "        data_aug.IMAGENET_NORMALIZE\n",
    "    ]),\n",
    "    c.TEST: torchsample.transforms.Compose([\n",
    "        transforms.Scale(size=[DATA_CONFIG['img_rescale'], \n",
    "                               DATA_CONFIG['img_rescale']]),\n",
    "        transforms.ToTensor(),\n",
    "        data_aug.IMAGENET_NORMALIZE\n",
    "    ]),\n",
    "    c.UNLABELED: torchsample.transforms.Compose([\n",
    "        transforms.Scale(size=[DATA_CONFIG['img_rescale'], \n",
    "                               DATA_CONFIG['img_rescale']]),\n",
    "        transforms.ToTensor(),\n",
    "        data_aug.IMAGENET_NORMALIZE\n",
    "    ])\n",
    "}\n",
    "\n",
    "def get_labels_fpath(name):\n",
    "    return os.path.join(cfg.PATHS['labels'], name, 'labels.json')\n",
    "\n",
    "def get_scores_fpath(name):\n",
    "    return os.path.join(cfg.PATHS['labels'], name, 'metrics.json')\n",
    "\n",
    "def get_preds_fpath(name):\n",
    "    return os.path.join(cfg.PATHS['labels'], name, 'predictions.json')\n",
    "\n",
    "def get_uncertainty_fpath(name):\n",
    "    return os.path.join(cfg.PATHS['labels'], name, 'rankings.csv')\n",
    "\n",
    "def init_dataset(name, input_dir, file_ext, label_names=None):\n",
    "    fpaths, ids = utils.files.get_paths_to_files(input_dir, strip_ext=True)\n",
    "    label_names = [] if label_names is None else label_names\n",
    "    fold = {\n",
    "        'name': name,\n",
    "        'file_ext': file_ext,\n",
    "        'inputs_dir': input_dir,\n",
    "        'label_names': sorted(label_names),\n",
    "        'trn': {},\n",
    "        'val': {},\n",
    "        'tst': {}, #auditing purposes\n",
    "        'unlabeled': {}, #these need to be queried and popped by key\n",
    "        'metrics': {},\n",
    "        'created': time.strftime(\"%m/%d/%Y %H:%M:%S\", time.localtime())\n",
    "    }\n",
    "    for id_ in ids:\n",
    "        fold['unlabeled'][id_] = id_\n",
    "    os.makedirs(os.path.join(LABELS_PATH, name), exist_ok=True)\n",
    "    fold_fpath = get_labels_fpath(name)\n",
    "    utils.files.save_json(fold_fpath, fold)\n",
    "    return fold\n",
    "\n",
    "def make_entry(labels=None, model_labels=None, model_probs=None):\n",
    "    labels = [] if labels is None else labels\n",
    "    model_labels = [] if model_labels is None else model_labels\n",
    "    model_probs = [] if model_probs is None else model_probs\n",
    "    return {\n",
    "        'labels': labels,\n",
    "        'model_labels': model_labels,\n",
    "        'model_probs': model_probs,\n",
    "    }\n",
    "\n",
    "def add_or_update_entry(fold, dset, id_, entry):\n",
    "    fold[dset][id_] = entry\n",
    "\n",
    "def move_unlabeled_to_labeled(fold, dset, id_, entry):\n",
    "    del fold['unlabeled'][id_]\n",
    "    add_or_update_entry(fold, dset, id_, entry)\n",
    "\n",
    "def get_model(fold):\n",
    "    resnet = models.resnet.get_resnet34(pretrained=True, n_freeze=10**5, verbose=False)\n",
    "    resnet = models.builder.cut_model(resnet, -1)\n",
    "    classifier = models.builder.get_classifier(in_feat=512, n_classes=len(fold['label_names']),\n",
    "                                               activation=nn.Softmax(), p=0.5)\n",
    "    model = models.resnet.SimpleResnet(resnet, classifier)\n",
    "    return model.cuda()\n",
    "\n",
    "def get_loader(fold, dset, shuffle):\n",
    "    fpaths, targs = metadata.get_fpaths_targs_from_label_fold(fold, dset)\n",
    "    data = datasets.datasets.FileDataset(fpaths, 'pil', targs, TRANSFORMS[dset])\n",
    "    return data_loaders.get_data_loader(data, TRAIN_CONFIG['batch_size'], \n",
    "                                        shuffle=shuffle, n_workers=4, pin_memory=True)\n",
    "\n",
    "def get_criterion():\n",
    "    return F.binary_cross_entropy\n",
    "\n",
    "def get_optimizer(model):\n",
    "    return optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), \n",
    "        TRAIN_CONFIG['initial_lr'], weight_decay=TRAIN_CONFIG['weight_decay'])\n",
    "\n",
    "def get_lr_adjuster():\n",
    "    return learning_rates.ScheduledLR(TRAIN_CONFIG['initial_lr'], 'epoch', \n",
    "                                      TRAIN_CONFIG['lr_schedule'])\n",
    "\n",
    "def get_trainer(crit, optim, lr_adjuster):\n",
    "    return trainers.Trainer(crit, crit, optim, lr_adjuster)\n",
    "\n",
    "def load_weights(model, exp_name):\n",
    "    exp_utils.load_weights_by_exp_and_epoch(model, exp_name, epoch)\n",
    "    \n",
    "def make_config(proj_name, model, optimizer, criterion, lr_adjuster):\n",
    "    EXP_NAME_ARGS = [utils.general.get_class_name(model), \n",
    "                    utils.general.get_class_name(optimizer), \n",
    "                    utils.general.get_class_name(lr_adjuster),\n",
    "                    'img'+str(DATA_CONFIG['img_rescale']),\n",
    "                    'lr'+str(TRAIN_CONFIG['initial_lr']),\n",
    "                    'wd'+str(TRAIN_CONFIG['weight_decay']),\n",
    "                    'bs'+str(TRAIN_CONFIG['batch_size']),\n",
    "                    str(DATA_CONFIG['dset_fold'])]\n",
    "    EXPERIMENT_NAME = exp_utils.generate_display_name(proj_name, EXP_NAME_ARGS)\n",
    "    METRICS = [metric.Loss(), metric.Accuracy(), metric.F2Score()]\n",
    "    AUX_METRICS = [metric.AuxiliaryMetric('LearningRate', 'lr'), \n",
    "                   metric.AuxiliaryMetric('SystemMemory', 'mb')]\n",
    "    VISUALIZERS = [Viz(EXPERIMENT_NAME)]\n",
    "    return {\n",
    "        'name': EXPERIMENT_NAME,\n",
    "        'parent_dir': cfg.PATHS['experiments'],\n",
    "        'metrics': METRICS,\n",
    "        'aux_metrics': AUX_METRICS,\n",
    "        'visualizers': VISUALIZERS,\n",
    "        'data': DATA_CONFIG,\n",
    "        'training': TRAIN_CONFIG,\n",
    "        'other': OTHER_CONFIG,\n",
    "        'transforms': TRANSFORMS[c.TRAIN],\n",
    "        'hardware': HARDWARE_CONFIG,\n",
    "        'model': model,\n",
    "        'optimizer': optimizer,\n",
    "        'lr_adjuster': lr_adjuster,\n",
    "        'criterion': criterion }\n",
    "\n",
    "def create_experiment(config):\n",
    "    exp = Experiment(config['name'], cfg.PATHS['experiments'])\n",
    "    exp.init(config)\n",
    "    print(exp.name)\n",
    "    return exp\n",
    "\n",
    "def resume_experiment(name):\n",
    "    exp = Experiment(name, cfg.PATHS['experiments'])\n",
    "    exp.resume(verbose=False)\n",
    "    return exp\n",
    "\n",
    "def create_project(name, img_path, label_names):\n",
    "    fold_fpath = get_labels_fpath(name)\n",
    "    fold = init_dataset(img_path, fold_fpath, c.JPG_EXT, label_names)\n",
    "    return fold\n",
    "\n",
    "def get_img_count(fold, dset):\n",
    "    return len(fold[dset].keys())\n",
    "\n",
    "def get_img_counts(proj_name):\n",
    "    fold = load_fold(proj_name)\n",
    "    return {\n",
    "        c.TRAIN: get_img_count(fold, c.TRAIN),\n",
    "        c.VAL: get_img_count(fold, c.VAL),\n",
    "        c.TEST: get_img_count(fold, c.TEST),\n",
    "        c.UNLABELED: get_img_count(fold, c.UNLABELED)\n",
    "    }\n",
    "\n",
    "def load_scores(fpath):\n",
    "    if os.path.isfile(fpath):\n",
    "        return utils.files.load_json(fpath)\n",
    "    return {\n",
    "        \"experiments\":{}, \n",
    "        \"latest\":{},\n",
    "        \"counts\":{}\n",
    "    }\n",
    "    \n",
    "def get_preds(exp, loader):\n",
    "    probs = predictions.get_probabilities(exp.model, loader)\n",
    "    preds = predictions.get_predictions(probs, 0.5)\n",
    "    return probs, preds\n",
    "\n",
    "def save_scores(exp, proj_name, loader):\n",
    "    print(\"Saving scores\")\n",
    "    probs, preds = get_preds(exp, loader)\n",
    "    targs = loader.dataset.targets\n",
    "    loss = metric_utils.get_cross_entropy_loss(probs, targs)\n",
    "    \n",
    "    scores_fpath = get_scores_fpath(proj_name)\n",
    "    scores = load_scores(scores_fpath)\n",
    "    scores[\"experiments\"][exp.name] = exp.history.metrics_history\n",
    "    scores[\"counts\"] = get_img_counts(proj_name)\n",
    "    scores[\"experiments\"][exp.name]['created'] = time.strftime(\n",
    "        \"%m/%d/%Y %H:%M:%S\", time.localtime())\n",
    "    for m in exp.metrics:\n",
    "        scores[\"latest\"][m.name] = m.evaluate(\n",
    "            loss, preds, probs, targs)\n",
    "    utils.files.save_json(scores_fpath, scores)\n",
    "    \n",
    "def load_fold(name):\n",
    "    fpath = get_labels_fpath(name)\n",
    "    return utils.files.load_json(fpath)\n",
    "\n",
    "def save_fold(fold):\n",
    "    fpath = get_labels_fpath(fold['name'])\n",
    "    return utils.files.save_json(fpath, fold)    \n",
    "\n",
    "def uncertainty_sort(preds_df):\n",
    "    \"\"\"\n",
    "    single argmax - closest to .5\n",
    "    margin - top two argmax different\n",
    "    entropy - \n",
    "    \"\"\"\n",
    "    \n",
    "def build_argmax_df(probs, ids, labels):\n",
    "    argmax_idxs = np.argmax(probs, axis=1).astype('uint8')\n",
    "    max_vals = probs[np.arange(len(probs)), list(argmax_idxs)].reshape(-1, 1)\n",
    "    probs_w_max_vals = np.concatenate([probs, max_vals], axis=1)\n",
    "    columns = labels + [\"max_val\"]\n",
    "    pred_df = pd.DataFrame(data=probs_w_max_vals, index=ids, columns=columns)\n",
    "    pred_df.sort_values(by=\"max_val\", inplace=True, )\n",
    "    return pred_df\n",
    "\n",
    "def save_preds(exp, proj_name):\n",
    "    print(\"Saving predictions\")\n",
    "    fpaths, ids = utils.files.get_paths_to_files(TRAIN_PATH, strip_ext=True)\n",
    "    data = datasets.datasets.FileDataset(fpaths, 'pil', None, TRANSFORMS[c.VAL])\n",
    "    loader = data_loaders.get_data_loader(\n",
    "        data, TRAIN_CONFIG['batch_size'], n_workers=2)\n",
    "    probs, preds = get_preds(exp, loader)\n",
    "    tags = metadata.get_tags_from_preds(\n",
    "        preds, exp.config.data['label_names'])\n",
    "    \n",
    "    pred_df = build_argmax_df(probs, ids, dogscats.LABEL_NAMES)\n",
    "    pred_df.to_csv(get_uncertainty_fpath(proj_name))\n",
    "    pred_doc = {}\n",
    "    for idx,id_ in enumerate(ids):\n",
    "        pred_doc[id_] = {\n",
    "            'labels': tags[idx],\n",
    "            'probs': probs[idx].tolist()\n",
    "        }\n",
    "    preds_fpath = get_preds_fpath(proj_name)\n",
    "    utils.files.save_json(preds_fpath, pred_doc)\n",
    "    return probs, preds\n",
    "\n",
    "MAX_RUNS = 1000\n",
    "def run_project(proj_name):\n",
    "    n_trn_imgs = 0\n",
    "    for i in range(MAX_RUNS):\n",
    "        fold_fpath = get_labels_fpath(proj_name)\n",
    "        fold = utils.files.load_json(fold_fpath)\n",
    "        trn_count = get_img_count(fold, c.TRAIN)\n",
    "        if trn_count > n_trn_imgs:\n",
    "            print(\"Found new trn images\")\n",
    "            exp = run_experiment(proj_name)\n",
    "            del exp\n",
    "            n_trn_imgs = trn_count\n",
    "        else:\n",
    "            print(\"No new trn images, sleeping\")\n",
    "            time.sleep(10)\n",
    "\n",
    "def run_experiment(proj_name):\n",
    "    print(\"Starting Experiment\")\n",
    "    fold = utils.files.load_json(get_labels_fpath(proj_name))\n",
    "    trn_loader = get_loader(fold, c.TRAIN, shuffle=True)\n",
    "    val_loader = get_loader(fold, c.VAL, shuffle=False)\n",
    "    model = get_model(fold)\n",
    "    crit = get_criterion()\n",
    "    optim = get_optimizer(model)\n",
    "    lr_adjuster = get_lr_adjuster()\n",
    "    trainer = get_trainer(crit, optim, lr_adjuster)\n",
    "    config = make_config(proj_name, model, optim, crit, lr_adjuster)\n",
    "    exp = create_experiment(config)\n",
    "    exp.train(trainer, trn_loader, val_loader)\n",
    "    exp_utils.load_weights_by_exp_and_epoch(exp.model, exp.name, \n",
    "                                            exp.best_epoch)\n",
    "    probs, preds = save_preds(exp, fold['name'])\n",
    "    save_scores(exp, proj_name, val_loader)\n",
    "    return exp"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "PROJECT_NAME = 'test_project'\n",
    "_ = init_dataset(PROJECT_NAME, TRAIN_PATH, c.JPG_EXT, label_names=DATA_CONFIG['label_names'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found new trn images\n",
      "Starting Experiment\n",
      "test_projectSimpleResnet-Adam-ScheduledLR-img256-lr0.0001-wd0.0001-bs32-labelai-idED2D5\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1\n",
      "Trn - Loss .7498 | Accuracy .5610 | F2 .5610 | LR .0001 | Time 0.0m 0.34s\n",
      "Val - Loss .6563 | Accuracy .5833 | F2 .5833 | LR .0001 | Time 0.0m 0.15s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2\n",
      "Trn - Loss .6816 | Accuracy .5610 | F2 .5610 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .6563 | Accuracy .5833 | F2 .5833 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3\n",
      "Trn - Loss .6537 | Accuracy .5854 | F2 .5854 | LR .0001 | Time 0.0m 0.32s\n",
      "Val - Loss .6539 | Accuracy .5833 | F2 .5833 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4\n",
      "Trn - Loss .7339 | Accuracy .5854 | F2 .5854 | LR .0001 | Time 0.0m 0.29s\n",
      "Val - Loss .6532 | Accuracy .5833 | F2 .5833 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 5\n",
      "Trn - Loss .6519 | Accuracy .5488 | F2 .5488 | LR .0001 | Time 0.0m 0.32s\n",
      "Val - Loss .6520 | Accuracy .5000 | F2 .5000 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 6\n",
      "Trn - Loss .6954 | Accuracy .5122 | F2 .5122 | LR .0001 | Time 0.0m 0.26s\n",
      "Val - Loss .6507 | Accuracy .5417 | F2 .5417 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 7\n",
      "Trn - Loss .6451 | Accuracy .5976 | F2 .5976 | LR .0001 | Time 0.0m 0.31s\n",
      "Val - Loss .6453 | Accuracy .5833 | F2 .5833 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 8\n",
      "Trn - Loss .6310 | Accuracy .6341 | F2 .6341 | LR .0001 | Time 0.0m 0.31s\n",
      "Val - Loss .6380 | Accuracy .5833 | F2 .5833 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 9\n",
      "Trn - Loss .6698 | Accuracy .5976 | F2 .5976 | LR .0001 | Time 0.0m 0.31s\n",
      "Val - Loss .6347 | Accuracy .6250 | F2 .6250 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 10\n",
      "Trn - Loss .6350 | Accuracy .6098 | F2 .6098 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .6301 | Accuracy .6250 | F2 .6250 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 11\n",
      "Trn - Loss .5937 | Accuracy .6463 | F2 .6463 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .6216 | Accuracy .6667 | F2 .6667 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 12\n",
      "Trn - Loss .6146 | Accuracy .6585 | F2 .6585 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .6138 | Accuracy .6667 | F2 .6667 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 13\n",
      "Trn - Loss .6030 | Accuracy .7195 | F2 .7195 | LR .0001 | Time 0.0m 0.31s\n",
      "Val - Loss .6095 | Accuracy .6667 | F2 .6667 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 14\n",
      "Trn - Loss .5627 | Accuracy .7683 | F2 .7683 | LR .0001 | Time 0.0m 0.29s\n",
      "Val - Loss .6027 | Accuracy .6667 | F2 .6667 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 15\n",
      "Trn - Loss .6079 | Accuracy .6951 | F2 .6951 | LR .0001 | Time 0.0m 0.27s\n",
      "Val - Loss .5978 | Accuracy .6667 | F2 .6667 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 16\n",
      "Trn - Loss .5770 | Accuracy .6951 | F2 .6951 | LR .0001 | Time 0.0m 0.29s\n",
      "Val - Loss .5870 | Accuracy .7083 | F2 .7083 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 17\n",
      "Trn - Loss .5782 | Accuracy .7439 | F2 .7439 | LR .0001 | Time 0.0m 0.33s\n",
      "Val - Loss .5821 | Accuracy .7083 | F2 .7083 | LR .0001 | Time 0.0m 0.17s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 18\n",
      "Trn - Loss .4881 | Accuracy .7805 | F2 .7805 | LR .0001 | Time 0.0m 0.29s\n",
      "Val - Loss .5786 | Accuracy .7083 | F2 .7083 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 19\n",
      "Trn - Loss .5652 | Accuracy .6707 | F2 .6707 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .5750 | Accuracy .7500 | F2 .7500 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 20\n",
      "Trn - Loss .5304 | Accuracy .8049 | F2 .8049 | LR .0001 | Time 0.0m 0.27s\n",
      "Val - Loss .5668 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.15s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 21\n",
      "Trn - Loss .5585 | Accuracy .7927 | F2 .7927 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .5593 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 22\n",
      "Trn - Loss .5240 | Accuracy .7683 | F2 .7683 | LR .0001 | Time 0.0m 0.29s\n",
      "Val - Loss .5535 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 23\n",
      "Trn - Loss .5216 | Accuracy .7561 | F2 .7561 | LR .0001 | Time 0.0m 0.31s\n",
      "Val - Loss .5445 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 24\n",
      "Trn - Loss .5325 | Accuracy .8171 | F2 .8171 | LR .0001 | Time 0.0m 0.31s\n",
      "Val - Loss .5392 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 25\n",
      "Trn - Loss .4888 | Accuracy .7683 | F2 .7683 | LR .0001 | Time 0.0m 0.32s\n",
      "Val - Loss .5360 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 26\n",
      "Trn - Loss .5303 | Accuracy .7439 | F2 .7439 | LR .0001 | Time 0.0m 0.33s\n",
      "Val - Loss .5353 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 27\n",
      "Trn - Loss .4979 | Accuracy .7805 | F2 .7805 | LR .0001 | Time 0.0m 0.27s\n",
      "Val - Loss .5316 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 28\n",
      "Trn - Loss .5107 | Accuracy .8049 | F2 .8049 | LR .0001 | Time 0.0m 0.27s\n",
      "Val - Loss .5267 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 29\n",
      "Trn - Loss .5036 | Accuracy .8415 | F2 .8415 | LR .0001 | Time 0.0m 0.31s\n",
      "Val - Loss .5197 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 30\n",
      "Trn - Loss .4588 | Accuracy .8293 | F2 .8293 | LR .0001 | Time 0.0m 0.27s\n",
      "Val - Loss .5115 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 31\n",
      "Trn - Loss .4953 | Accuracy .7805 | F2 .7805 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .5088 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 32\n",
      "Trn - Loss .4812 | Accuracy .7439 | F2 .7439 | LR .0001 | Time 0.0m 0.27s\n",
      "Val - Loss .5057 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.15s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 33\n",
      "Trn - Loss .4590 | Accuracy .8780 | F2 .8780 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .4972 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 34\n",
      "Trn - Loss .4616 | Accuracy .8293 | F2 .8293 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .4884 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 35\n",
      "Trn - Loss .4607 | Accuracy .8659 | F2 .8659 | LR .0001 | Time 0.0m 0.31s\n",
      "Val - Loss .4840 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 36\n",
      "Trn - Loss .4365 | Accuracy .9024 | F2 .9024 | LR .0001 | Time 0.0m 0.31s\n",
      "Val - Loss .4813 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 37\n",
      "Trn - Loss .4307 | Accuracy .8537 | F2 .8537 | LR .0001 | Time 0.0m 0.34s\n",
      "Val - Loss .4763 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 38\n",
      "Trn - Loss .4664 | Accuracy .8780 | F2 .8780 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .4716 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 39\n",
      "Trn - Loss .4242 | Accuracy .8902 | F2 .8902 | LR .0001 | Time 0.0m 0.29s\n",
      "Val - Loss .4698 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 40\n",
      "Trn - Loss .4288 | Accuracy .8659 | F2 .8659 | LR .0001 | Time 0.0m 0.29s\n",
      "Val - Loss .4670 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 41\n",
      "Trn - Loss .4109 | Accuracy .8659 | F2 .8659 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .4603 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 42\n",
      "Trn - Loss .3951 | Accuracy .9146 | F2 .9146 | LR .0001 | Time 0.0m 0.30s\n",
      "Val - Loss .4574 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 43\n",
      "Trn - Loss .3976 | Accuracy .9024 | F2 .9024 | LR .0001 | Time 0.0m 0.34s\n",
      "Val - Loss .4497 | Accuracy .7917 | F2 .7917 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 44\n",
      "Trn - Loss .3908 | Accuracy .9024 | F2 .9024 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .4453 | Accuracy .8333 | F2 .8333 | LR .0001 | Time 0.0m 0.15s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 45\n",
      "Trn - Loss .4062 | Accuracy .8780 | F2 .8780 | LR .0001 | Time 0.0m 0.29s\n",
      "Val - Loss .4426 | Accuracy .8333 | F2 .8333 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 46\n",
      "Trn - Loss .3835 | Accuracy .8780 | F2 .8780 | LR .0001 | Time 0.0m 0.27s\n",
      "Val - Loss .4426 | Accuracy .8333 | F2 .8333 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 47\n",
      "Trn - Loss .3604 | Accuracy .9146 | F2 .9146 | LR .0001 | Time 0.0m 0.32s\n",
      "Val - Loss .4408 | Accuracy .8333 | F2 .8333 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 48\n",
      "Trn - Loss .3773 | Accuracy .9024 | F2 .9024 | LR .0001 | Time 0.0m 0.27s\n",
      "Val - Loss .4364 | Accuracy .8333 | F2 .8333 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 49\n",
      "Trn - Loss .3737 | Accuracy .8902 | F2 .8902 | LR .0001 | Time 0.0m 0.28s\n",
      "Val - Loss .4328 | Accuracy .8333 | F2 .8333 | LR .0001 | Time 0.0m 0.16s\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 50\n",
      "Trn - Loss .3711 | Accuracy .9146 | F2 .9146 | LR .0001 | Time 0.0m 0.27s\n",
      "Val - Loss .4281 | Accuracy .8750 | F2 .8750 | LR .0001 | Time 0.0m 0.15s\n",
      "Experiment Complete!\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory usage (): 2237.00 MB\n",
      "\n",
      "Saving predictions\n",
      "Saving scores\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n",
      "No new trn images, sleeping\n"
     ]
    }
   ],
   "source": [
    "run_project(PROJECT_NAME)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Predict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Load model from exp epoch\n",
    "exp.load_model_state(epoch=49)\n",
    "model = exp.model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# OR load custom model weights\n",
    "exp_name = RESUME_EXP_NAME\n",
    "w_path = os.path.join(cfg.PATHS['experiments'], exp_name, 'weights', 'weights-30.th')\n",
    "models.utils.load_weights(model, w_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "%time val_probs = predictions.get_probabilities(model, tst_loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "metric_utils.get_accuracy(val_probs > 0.5, tst_targs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "ks = list(label_fold['val'].keys())\n",
    "print(len(ks), len(val_loader.dataset.fpaths))\n",
    "for k in ks:\n",
    "    fpath = os.path.join(TRAIN_PATH, k+c.JPG_EXT)\n",
    "    assert fpath in val_fpaths\n",
    "    assert fpath in val_loader.dataset.fpaths"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "for f,v in zip(val_fpaths, val_targs):\n",
    "    print(os.path.basename(f),metadata.convert_one_hot_to_tags(v, labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "utils.imgs.plot_sample_preds(tst_fpaths, val_probs > 0.5, tst_targs, labels, shuffle=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "pred_fpath = predictions.get_prediction_fpath(basename='my_exp', dset=c.VAL)\n",
    "_ = predictions.save_or_append_pred_to_file(pred_fpath, val_probs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "thresh = TRAIN_CONFIG['threshold']\n",
    "acc = metric_utils.get_accuracy(val_probs > thresh, val_targs)\n",
    "f2 = metric_utils.get_metric_in_blocks(val_probs > thresh, val_targs, \n",
    "                                       1000, metric_utils.get_f2_score)\n",
    "loss = metric_utils.get_cross_entropy_loss(val_probs, val_targs)\n",
    "print(\"Acc\",acc,\"F2\",f2,\"BCE\",loss)\n",
    "utils.imgs.plot_sample_preds(val_fpaths, val_probs > 0.5, val_targs, \n",
    "                             dogscats.LABEL_NAMES)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "%time tst_probs = predictions.get_probabilities(model, tst_loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "pred_fpath = predictions.get_prediction_fpath(basename='my_exp', dset=c.TEST)\n",
    "_ = predictions.save_or_append_pred_to_file(pred_fpath, tst_probs)\n",
    "tst_probs = predictions.load_pred(pred_fpath, numpy=True)\n",
    "utils.imgs.plot_sample_preds(tst_fpaths, tst_probs > thresh, \n",
    "                             None, dogscats.LABEL_NAMES)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Evaluate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# Review Experiment\n",
    "exp_name = 'BaselineSimpleResnet-Adam-ScheduledLR-img256-lr0.001-wd0.0005-bs64-fold4K-id84E8D'\n",
    "exp = Experiment(exp_name, cfg.PATHS['experiments'])\n",
    "exp.review(verbose=False)\n",
    "exp.history.plot()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Load Pred\n",
    "fname = 'my_exp_val.bc'\n",
    "thresh = TRAIN_CONFIG['threshold']\n",
    "probs = predictions.load_pred(os.path.join(cfg.PATHS['predictions'], fname))\n",
    "preds = predictions.get_predictions(probs, thresh)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# View preds, probs, and targets\n",
    "eval_df = evaluate.get_evaluate_df(preds, probs, val_targs, \n",
    "                                   val_fpaths, dogscats.LABEL_NAMES)\n",
    "eval_df[:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# View preds by label\n",
    "LABEL = 'dog'\n",
    "dog_preds_by_targ = evaluate.get_preds_by_target_label(\n",
    "    eval_df, LABEL, condensed=False)\n",
    "dog_preds_by_pred = evaluate.get_preds_by_predicted_label(\n",
    "    eval_df, LABEL, condensed=False)\n",
    "dog_preds_by_targ[:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# View preds by probability\n",
    "\n",
    "# Confident dogs and right (TP)\n",
    "confident_dogs_tp = evaluate.get_preds_by_target_and_prob(\n",
    "    eval_df, 'dog', 1, p_min=0.9, p_max=1.0)\n",
    "# Confident dogs and wrong (FP)\n",
    "confident_dogs_fp = evaluate.get_preds_by_target_and_prob(\n",
    "    eval_df, 'dog', 0, p_min=0.9, p_max=1.0)\n",
    "# Unconfident dogs and right (TN)\n",
    "unconfident_dogs_tn = evaluate.get_preds_by_target_and_prob(\n",
    "    eval_df, 'dog', 0, p_min=0.0, p_max=0.1)\n",
    "# Unconfident dogs and wrong (FN)\n",
    "unconfident_dogs_fn = evaluate.get_preds_by_target_and_prob(\n",
    "    eval_df, 'dog', 1, p_min=0.0, p_max=0.1)\n",
    "\n",
    "# Annotation errors?\n",
    "evaluate.plot_predictions(unconfident_dogs_fn, dogscats.LABEL_NAMES)\n",
    "unconfident_dogs_fn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# View predictions and probabilities\n",
    "evaluate.plot_predictions(eval_df, dogscats.LABEL_NAMES)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Confusion Matrix\n",
    "evaluate.plot_label_level_cms(eval_df, dogscats.LABEL_NAMES)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Plot ROC Curve\n",
    "evaluate.plot_roc_curve(np.array(probs), val_targs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.0"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "toc": {
   "colors": {
    "hover_highlight": "#DAA520",
    "navigate_num": "#000000",
    "navigate_text": "#333333",
    "running_highlight": "#FF0000",
    "selected_highlight": "#FFD700",
    "sidebar_border": "#EEEEEE",
    "wrapper_background": "#FFFFFF"
   },
   "moveMenuLeft": true,
   "nav_menu": {
    "height": "4px",
    "width": "254px"
   },
   "navigate_menu": true,
   "number_sections": true,
   "sideBar": true,
   "threshold": 4,
   "toc_cell": false,
   "toc_section_display": "block",
   "toc_window_display": false,
   "widenNotebook": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
