{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# %reload_ext autoreload\n",
    "# %autoreload 2\n",
    "%matplotlib inline\n",
    "import matplotlib.pyplot as plt\n",
    "plt.rcParams['figure.figsize'] = [40, 30]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import fastai as fai\n",
    "from fastai.basic_data import DataLoader\n",
    "import numpy as np\n",
    "from torch.utils.data import SubsetRandomSampler\n",
    "import torch \n",
    "import pandas as pd\n",
    "import seaborn as sns\n",
    "from os import environ\n",
    "from torch import optim\n",
    "from bisect import bisect_left\n",
    "import time\n",
    "from tqdm import tqdm\n",
    "from fastai.callbacks import EarlyStoppingCallback\n",
    "from functools import partial\n",
    "from fastai import train\n",
    "from sklearn.metrics import log_loss\n",
    "\n",
    "from src.data.dataset import *\n",
    "from src.model.model_bn import *\n",
    "from src.model.model_bn_ELU import *\n",
    "from src.model.model_bn_Tanh import *\n",
    "from src.model.model_bn_LeakyReLU import *\n",
    "from src.model.model_bn_PReLU import *\n",
    "from src.model.model_bn_Sigmoid import *\n",
    "from src.model.classifier_model_bn_Tanh import *\n",
    "from src.model.classifier_model_bn_ELU import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "tags": [
     "parameters"
    ]
   },
   "outputs": [],
   "source": [
    "optimizer = environ.get('optimizer', 'Adam')\n",
    "num_workers= int(environ.get('num_workers', '8'))\n",
    "batch_size=int(environ.get('batch_size', '2048'))\n",
    "n_epochs=int(environ.get('n_epochs', '500'))\n",
    "batch_norm = environ.get('batch_norm', 'True') == 'True'\n",
    "dataset= environ.get('dataset', 'data/speedup_dataset2.pkl')\n",
    "loss_func = environ.get('loss_func', 'MSE')\n",
    "log = environ.get('log', 'True') == 'True'\n",
    "wd = float(environ.get('weight_decay', '0.01'))\n",
    "cuda_device = environ.get('cuda_device', 'cuda:0')\n",
    "\n",
    "layers_sizes = list(map(int, environ.get('layers', '300 200 120 80 30').split()))\n",
    "drops = list(map(float, environ.get('dropouts', '0.2 0.2 0.1 0.1 0.1').split()))\n",
    "device = torch.device(cuda_device if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_model(model, criterion, optimizer, dataloader, num_epochs=100):\n",
    "    since = time.time()\n",
    "    \n",
    "    losses = []\n",
    "    train_loss = 0\n",
    "    model = model.to(device)\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n",
    "        print('-' * 10)\n",
    "\n",
    "        # Each epoch has a training and validation phase\n",
    "        for phase in ['train', 'val']:\n",
    "            if phase == 'train':\n",
    "                #scheduler.step()\n",
    "                model.train()  \n",
    "            else:\n",
    "                model.eval()\n",
    "\n",
    "            running_loss = 0.0\n",
    "           \n",
    "            # Iterate over data.\n",
    "            for inputs, labels in tqdm(dataloader[phase], total=len(dataloader[phase])):       \n",
    "                inputs = inputs.to(device)\n",
    "                labels = labels.to(device)\n",
    "                # zero the parameter gradients\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                # forward\n",
    "                # track history if only in train\n",
    "                with torch.set_grad_enabled(phase == 'train'):\n",
    "                    outputs = model(inputs)  \n",
    "                    assert outputs.shape == labels.shape\n",
    "\n",
    "                    loss = criterion(outputs, labels)\n",
    "\n",
    "                    # backward + optimize only if in training phase\n",
    "                    if phase == 'train':\n",
    "                        loss.backward()\n",
    "                        optimizer.step()\n",
    "                       # print(loss.item())\n",
    "                        \n",
    "            \n",
    "                # statistics\n",
    "                running_loss += loss.item()     \n",
    "                \n",
    "                #running_corrects += torch.sum((outputs.data - labels.data) < e)/inputs.shape[0]\n",
    "\n",
    "            epoch_loss = running_loss / len(dataloader[phase])\n",
    "            \n",
    "            print('{} Loss: {:.4f}'.format(\n",
    "               phase, epoch_loss))\n",
    "\n",
    "            \n",
    "            if phase == 'val':\n",
    "                losses.append((train_loss, epoch_loss))\n",
    "            else:\n",
    "                train_loss = epoch_loss\n",
    "\n",
    "        print()\n",
    "\n",
    "    time_elapsed = time.time() - since\n",
    "    print('Training complete in {:.0f}m {:.0f}s'.format(\n",
    "        time_elapsed // 60, time_elapsed % 60))\n",
    "    \n",
    "    \n",
    "    return losses\n",
    "\n",
    "\n",
    "\n",
    "def get_results_df(dl, model, log=False):\n",
    "    df = pd.DataFrame()\n",
    "\n",
    "    indices = dl.sampler.indices\n",
    "    \n",
    "    inputs, targets = dl.dataset[indices]\n",
    "    names = [dl.dataset.programs[dl.dataset.restricted_program_indexes[i]].name for i in indices]\n",
    "    sched_names = [dl.dataset.schedules[i].name for i in indices]\n",
    "    speedups = [dl.dataset.Y_speedups[i] for i in indices]\n",
    "    inputs = torch.Tensor(inputs)\n",
    "    \n",
    "    model.eval()\n",
    "    preds = model(inputs.to(device))\n",
    "\n",
    "    interchange, tile, unroll = zip(*[dl.dataset.schedules[index].binary_repr for index in indices])\n",
    "\n",
    "    preds = preds.cpu().detach().numpy().reshape((-1,))\n",
    "    targets = targets.reshape((-1,))\n",
    "    \n",
    "    if log:\n",
    "        preds = np.exp(preds*dl.dataset.std + dl.dataset.mean)\n",
    "        targets = np.exp(targets*dl.dataset.std + dl.dataset.mean)\n",
    "\n",
    "    df['index'] = indices\n",
    "    df['name'] = names\n",
    "    df['sched_name'] = sched_names\n",
    "    df['prediction'] = preds\n",
    "    df['target'] = targets\n",
    "    df['speedup'] = np.array(speedups)\n",
    "    df['abs_diff'] = np.abs(preds - targets)\n",
    "    df['APE'] = np.abs(df.target - df.prediction)/df.target * 100\n",
    "    df['SMAPE'] = 100*np.abs(df.target - df.prediction)/((np.abs(df.target) + np.abs(df.prediction))/2)\n",
    "    \n",
    "    df['interchange'] = interchange\n",
    "    df['tile'] = tile\n",
    "    df['unroll'] = unroll\n",
    "    \n",
    "    return df\n",
    "\n",
    "#returns classification results dataframe\n",
    "def get_results_df_classif(dl, model, log=False):\n",
    "    df = pd.DataFrame()\n",
    "\n",
    "    indices = dl.sampler.indices\n",
    "    \n",
    "    inputs, targets = dl.dataset[indices]\n",
    "    names = [dl.dataset.programs[dl.dataset.restricted_program_indexes[i]].name for i in indices]\n",
    "    sched_names = [dl.dataset.schedules[i].name for i in indices]\n",
    "    speedups = [dl.dataset.Y_speedups[i] for i in indices]\n",
    "    inputs = torch.Tensor(inputs)\n",
    "    \n",
    "    model.eval()\n",
    "    preds = model(inputs.to(device))\n",
    "\n",
    "    interchange, tile, unroll = zip(*[dl.dataset.schedules[index].binary_repr for index in indices])\n",
    "\n",
    "    preds = preds.cpu().detach().numpy().reshape((-1,))\n",
    "    targets = targets.reshape((-1,))\n",
    "    \n",
    "    if log:\n",
    "        preds = np.exp(preds*dl.dataset.std + dl.dataset.mean)\n",
    "        targets = np.exp(targets*dl.dataset.std + dl.dataset.mean)\n",
    "\n",
    "    df['index'] = indices\n",
    "    df['name'] = names\n",
    "    df['sched_name'] = sched_names\n",
    "    df['prediction'] = preds\n",
    "    df['target'] = targets\n",
    "    df['speedup'] = np.array(speedups)\n",
    "    df['abs_diff'] = np.abs(preds - targets)\n",
    "    df['BCELoss'] = log_loss(df.target , df.prediction)\n",
    "    \n",
    "    \n",
    "    df['interchange'] = interchange\n",
    "    df['tile'] = tile\n",
    "    df['unroll'] = unroll\n",
    "    \n",
    "    return df\n",
    "\n",
    "def train_dev_split(dataset, batch_size, num_workers, val_size=10000, test_size=10000, log=False, seed=42):\n",
    "    \n",
    "    test_size = test_size\n",
    "    validation_size = val_size\n",
    "    ds = DatasetFromPkl(dataset, maxsize=None, log=log)\n",
    "    \n",
    "    indices = range(len(ds))\n",
    "    test_indices, val_indices, train_indices = indices[:test_size], \\\n",
    "                                                indices[test_size:test_size+validation_size], \\\n",
    "                                               indices[test_size+validation_size:]\n",
    "    train_dl = DataLoader(ds, batch_size=batch_size,\n",
    "                        sampler=SubsetRandomSampler(train_indices),\n",
    "                         num_workers=num_workers)\n",
    "\n",
    "    val_dl = DataLoader(ds, batch_size=batch_size, \n",
    "                        sampler=SubsetRandomSampler(val_indices),\n",
    "                         num_workers=num_workers)\n",
    "    \n",
    "    test_dl = DataLoader(ds, batch_size=batch_size, \n",
    "                         sampler=SubsetRandomSampler(test_indices),\n",
    "                         num_workers=num_workers)\n",
    "    return train_dl, val_dl, test_dl\n",
    "\n",
    "#loads test, val and train data using filter on speedup and excluding some functions\n",
    "def train_dev_split_filter(dataset, batch_size, num_workers, val_size=10000, test_size=10000, log=False, seed=42, speedup_lo_bound=0, speedup_up_bound=np.inf, exlude_funcs=[]):\n",
    "    \n",
    "    test_size = test_size\n",
    "    validation_size = val_size\n",
    "    ds = DatasetFromPkl_Filter(dataset, maxsize=None, log=log, speedup_lo_bound=speedup_lo_bound, speedup_up_bound=speedup_up_bound, exlude_funcs=exlude_funcs)\n",
    "    \n",
    "    indices = range(len(ds))\n",
    "    test_indices, val_indices, train_indices = indices[:test_size], \\\n",
    "                                                indices[test_size:test_size+validation_size], \\\n",
    "                                               indices[test_size+validation_size:]\n",
    "    train_dl = DataLoader(ds, batch_size=batch_size,\n",
    "                        sampler=SubsetRandomSampler(train_indices),\n",
    "                         num_workers=num_workers)\n",
    "\n",
    "    val_dl = DataLoader(ds, batch_size=batch_size, \n",
    "                        sampler=SubsetRandomSampler(val_indices),\n",
    "                         num_workers=num_workers)\n",
    "    \n",
    "    test_dl = DataLoader(ds, batch_size=batch_size, \n",
    "                         sampler=SubsetRandomSampler(test_indices),\n",
    "                         num_workers=num_workers)\n",
    "    return train_dl, val_dl, test_dl\n",
    "\n",
    "#loads test, val and train data using a transformation function on speedup and filter function \n",
    "def train_dev_split_transform(dataset, batch_size, num_workers, val_size=10000, test_size=10000, log=False, seed=42, filter_func=None, transform_func=None):\n",
    "    \n",
    "    test_size = test_size\n",
    "    validation_size = val_size\n",
    "    ds = DatasetFromPkl_Transform(dataset, maxsize=None, log=log, filter_func=filter_func, transform_func=transform_func)\n",
    "    \n",
    "    indices = range(len(ds))\n",
    "    test_indices, val_indices, train_indices = indices[:test_size], \\\n",
    "                                                indices[test_size:test_size+validation_size], \\\n",
    "                                               indices[test_size+validation_size:]\n",
    "    train_dl = DataLoader(ds, batch_size=batch_size,\n",
    "                        sampler=SubsetRandomSampler(train_indices),\n",
    "                         num_workers=num_workers)\n",
    "\n",
    "    val_dl = DataLoader(ds, batch_size=batch_size, \n",
    "                        sampler=SubsetRandomSampler(val_indices),\n",
    "                         num_workers=num_workers)\n",
    "    \n",
    "    test_dl = DataLoader(ds, batch_size=batch_size, \n",
    "                         sampler=SubsetRandomSampler(test_indices),\n",
    "                         num_workers=num_workers)\n",
    "    return train_dl, val_dl, test_dl\n",
    "\n",
    "\n",
    "def mape_criterion(inputs, targets):\n",
    "    eps = 1e-5\n",
    "    return 100*torch.mean(torch.abs(targets - inputs)/(targets+eps))\n",
    "\n",
    "def smape_criterion(inputs, targets):\n",
    "    return 100*torch.mean(torch.abs(targets - inputs)/((torch.abs(targets)+torch.abs(inputs))/2))\n",
    "\n",
    "def rmse_criterion(inputs, targets):\n",
    "    return torch.sqrt(nn.MSELoss()(inputs, targets))\n",
    "\n",
    "def mse_criterion(inputs, targets):\n",
    "    return nn.MSELoss()(inputs, targets)\n",
    "\n",
    "def bce_criterion(inputs, targets):\n",
    "    return nn.BCELoss()(inputs, targets)\n",
    "        \n",
    "def get_data_with_names(dl):\n",
    "    dataset = dl.dataset\n",
    "    names, X, Y = zip(*[(dataset.get_sched_name(index), *dataset[index]) for index in dl.sampler.indices])    \n",
    "    return names, X, Y\n",
    "\n",
    "def get_schedule_data(dl, schedule):\n",
    "    dataset = dl.dataset        \n",
    "    indices = [index for index in dl.sampler.indices \n",
    "                    if np.all(np.array(dataset.schedules[index].binary_repr) + np.array(schedule) != 1)]    \n",
    "    return indices\n",
    "    \n",
    "    \n",
    "def get_data_with_prog_names(dl):\n",
    "    dataset = dl.dataset\n",
    "    names, X, Y = zip(*[(dataset.get_prog_name(index), *dataset[index]) for index in dl.sampler.indices])\n",
    "    \n",
    "    return names, X, Y\n",
    "\n",
    "def joint_plot(df, title, val_range=list(range(-1, 15))):\n",
    "    ax = sns.jointplot('target', 'prediction', df, ).ax_joint\n",
    "    plt.suptitle(title)\n",
    "    _ = ax.set_xticks(val_range)\n",
    "    _ = ax.set_yticks(val_range)\n",
    "    _ = ax.plot(val_range, val_range, ':k')\n",
    "    \n",
    "class NameGetter(object):\n",
    "    def __init__(self, dataset):\n",
    "        self.dataset = dataset\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.dataset)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        return self.dataset.get_prog_name(index)\n",
    "    \n",
    "def get_program_data(dl, prog_name):\n",
    "    dataset = dl.dataset\n",
    "    name_g = NameGetter(dataset)\n",
    "    \n",
    "    index1 = bisect_left(name_g, prog_name, 0)\n",
    "    index2 = bisect_right(name_g, prog_name, index1)\n",
    "    \n",
    "    X, Y = zip(*[dataset[index] for index in range(index1, index2)])\n",
    "   \n",
    "    return torch.Tensor(X), torch.Tensor(Y)\n",
    "\n",
    "def joint_plot_one_program(dl, prog_name, model):\n",
    "    model.eval()\n",
    "    \n",
    "    X, Y = get_program_data(dl, prog_name)\n",
    "    \n",
    "    Y_hat = model(X.to(device))\n",
    "    df = pd.DataFrame()\n",
    "    df['prediction'] = np.array(Y_hat.view(-1,))\n",
    "    df['target'] = np.array(Y)\n",
    "    \n",
    "    joint_plot(df, prog_name)\n",
    "    \n",
    "def joint_plot_one_schedule(dl, schedule, model, log=False):\n",
    "    indices = get_schedule_data(dl, schedule)\n",
    "    \n",
    "    X, Y = dl.dataset[indices]\n",
    "    X, Y = torch.Tensor(X), torch.Tensor(Y)\n",
    "    \n",
    "    Y_hat = model(X.to(device))\n",
    "\n",
    "    \n",
    "    df = pd.DataFrame()\n",
    "    df['prediction'] = np.array(Y_hat.view(-1,))\n",
    "    df['target'] = np.array(Y)\n",
    "    \n",
    "    joint_plot(df, schedule)\n",
    "    \n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
