{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import json\n",
    "import pickle\n",
    "import matplotlib\n",
    "import matplotlib.pyplot as plt\n",
    "from IPython.display import display, HTML\n",
    "import os,sys,inspect, pickle\n",
    "import numpy as np\n",
    "import matplotlib.colors as mplcolors\n",
    "from matplotlib.ticker import MaxNLocator\n",
    "\n",
    "from utils import *\n",
    "\n",
    "pd.set_option('display.max_colwidth', 500)\n",
    "\n",
    "# all except 2,17,20 because they dominate costs by orders of magnitude\n",
    "QUERIES = [1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,18,19,21,22]\n",
    "X_AXIS_TITLE = 'Index Storage Consumption (GB)'\n",
    "LABEL_FONT_SIZE = 14\n",
    "TITLE_FONT_SIZE = 15\n",
    "MARKER_SIZE = 6\n",
    "LINE_WIDTH = 1\n",
    "GRAPH_SIZE = (5,3.45)\n",
    "\n",
    "matplotlib.rcParams['xtick.labelsize'] = LABEL_FONT_SIZE\n",
    "matplotlib.rcParams['ytick.labelsize'] = LABEL_FONT_SIZE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_costs(df):\n",
    "    costs = []\n",
    "    \n",
    "    for _, row in df.iterrows():\n",
    "        row_cost = 0\n",
    "        for column in df.columns:\n",
    "            if column[0] == 'q':\n",
    "                row_cost += float(json.loads(row[column])['Cost'])\n",
    "        costs.append(row_cost)\n",
    "    \n",
    "    return costs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def draw_cost_graph(cophy_costs=None, cophy_memory_consumption=None, legend=True):\n",
    "    no_index_df = pd.read_csv(f\"{CSV_PATH}/results_no_index_{BENCHMARK}_{len(QUERIES)}_queries.csv\", sep=';')\n",
    "    no_index_cost = get_costs(no_index_df)[0]\n",
    "    fig, ax = plt.subplots()\n",
    "    for algorithm in ALGORITHMS:\n",
    "        if algorithm == 'auto_admin_naive_2':\n",
    "            continue\n",
    "        \n",
    "        style = styles[algorithm]\n",
    "\n",
    "        if algorithm == 'cophy':\n",
    "            if cophy_costs is not None and cophy_memory_consumption is not None:\n",
    "                relative_costs = list(map(lambda x: x / no_index_cost * 100, cophy_costs))\n",
    "                memory_consumptions_gb = list(map(lambda x: mb_to_gb(x), cophy_memory_consumption))\n",
    "                memory_consumptions_gb = list(filter(lambda x: x < XLIM, memory_consumptions_gb))\n",
    "                relative_costs = relative_costs[:len(memory_consumptions_gb)]\n",
    "                ax.step(memory_consumptions_gb, relative_costs, where='post', color=style.color, linewidth=LINE_WIDTH)\n",
    "                ax.plot(memory_consumptions_gb, relative_costs, f'{style.marker}', color=style.color, label=style.label, markersize=MARKER_SIZE)\n",
    "            continue\n",
    "\n",
    "        csv_path= f\"{CSV_PATH}/results_{algorithm}_{BENCHMARK}_{len(QUERIES)}_queries.csv\"\n",
    "        try:\n",
    "            df = pd.read_csv(csv_path, sep=';')\n",
    "        except:\n",
    "            continue\n",
    "        if algorithm == 'dexter':\n",
    "            df = df.iloc[::-1]\n",
    "        df['memory consumption'] = df['memory consumption'].apply(b_to_gb)\n",
    "        df = df.query(f'`memory consumption` < {XLIM}')\n",
    "        # Don't draw measurements that did not identify any indexes\n",
    "        df = df.query('`indexed columns` != \"[]\"')\n",
    "\n",
    "        costs = get_costs(df)\n",
    "        relative_costs = list(map(lambda x: x / no_index_cost * 100, costs))\n",
    "\n",
    "        ax.step(df['memory consumption'], relative_costs, where='post', color=style.color, linewidth=LINE_WIDTH)\n",
    "        ax.plot(df['memory consumption'], relative_costs, f'{style.marker}', color=style.color, label=style.label, markersize=MARKER_SIZE)\n",
    "\n",
    "    if legend:\n",
    "        fig.legend(fontsize=LABEL_FONT_SIZE - 1)\n",
    "    plt.xlabel(X_AXIS_TITLE, fontsize=LABEL_FONT_SIZE)\n",
    "    plt.ylabel('Relative workload cost\\n(% to no index)', fontsize=LABEL_FONT_SIZE)\n",
    "    plt.ylabel('Relative workload cost (%)', fontsize=LABEL_FONT_SIZE)\n",
    "    sf_string = f' (SF {SCALE_FACTOR})' if SCALE_FACTOR is not None else ''\n",
    "#     plt.title(f'{BENCHMARK.upper()}{sf_string}: Performance vs Memory Budget', fontsize=TITLE_FONT_SIZE)\n",
    "    ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n",
    "    fig.set_size_inches(GRAPH_SIZE, forward=True)\n",
    "    fig.tight_layout()\n",
    "    ax.set_xlim([-0.1, XLIM])\n",
    "    fig.savefig(f\"{CSV_PATH}/{BENCHMARK.lower()}_cost_estimation.pdf\", bbox_inches='tight', pad_inches=0)\n",
    "\n",
    "def draw_runtime_graph(minutes=False, cophy_runtimes=None, cophy_memory_consumption=None, legend=True):\n",
    "    fig, ax = plt.subplots()\n",
    "    for algorithm in ALGORITHMS:\n",
    "        if algorithm == 'auto_admin_naive_2' or (algorithm == 'cophy' and cophy_runtimes is None):\n",
    "            continue\n",
    "        style = styles[algorithm]\n",
    "        \n",
    "        if algorithm == 'cophy':\n",
    "            cophy_runtimes = list(map(lambda x: x / 60, cophy_runtimes)) if minutes else cophy_runtimes\n",
    "            memory_consumptions_gb = list(map(lambda x: mb_to_gb(x), cophy_memory_consumption))\n",
    "            memory_consumptions_gb = list(filter(lambda x: x < XLIM, memory_consumptions_gb))\n",
    "            cophy_runtimes = cophy_runtimes[:len(memory_consumptions_gb)]\n",
    "            ax.step(memory_consumptions_gb, cophy_runtimes, where='post', color=style.color, linewidth=LINE_WIDTH)\n",
    "            ax.plot(memory_consumptions_gb, cophy_runtimes, f'{style.marker}', color=style.color, label=style.label, markersize=MARKER_SIZE)\n",
    "            continue\n",
    "            \n",
    "        \n",
    "        csv_path= f\"{CSV_PATH}/results_{algorithm}_{BENCHMARK}_{len(QUERIES)}_queries.csv\"\n",
    "        df = pd.read_csv(csv_path, sep=';')\n",
    "        df['memory consumption'] = df['memory consumption'].apply(b_to_gb)\n",
    "        df = df.query(f'`memory consumption` < {XLIM}')\n",
    "        # Don't draw measurements that did not identify any indexes\n",
    "        df = df.query('`indexed columns` != \"[]\"')\n",
    "        \n",
    "        runtime = df['algorithm runtime'].apply(s_to_m) if minutes else df['algorithm runtime']\n",
    "        ax.step(df['memory consumption'], runtime, where='post', color=style.color, linewidth=LINE_WIDTH)\n",
    "        ax.plot(df['memory consumption'], runtime, f'{style.marker}', color=style.color, label=style.label, markersize=MARKER_SIZE)\n",
    "\n",
    "    if legend:\n",
    "        fig.legend(fontsize=LABEL_FONT_SIZE - 1)\n",
    "    plt.xlabel(X_AXIS_TITLE, fontsize=LABEL_FONT_SIZE)\n",
    "    ylabel_unit = 'min' if minutes else 'sec'\n",
    "    plt.ylabel(f'Algorithm runtime ({ylabel_unit})', fontsize=LABEL_FONT_SIZE)\n",
    "    sf_string = f' (SF {SCALE_FACTOR})' if SCALE_FACTOR is not None else ''\n",
    "#     plt.title(f'{BENCHMARK.upper()}{sf_string}: Algorithm Runtime vs Memory Budget', fontsize=TITLE_FONT_SIZE)\n",
    "\n",
    "    ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n",
    "    fig.set_size_inches(GRAPH_SIZE, forward=True)\n",
    "    fig.tight_layout()\n",
    "    ax.set_xlim([-0.1, XLIM])\n",
    "    fig.savefig(f\"{CSV_PATH}/{BENCHMARK.lower()}_runtime.pdf\", bbox_inches='tight', pad_inches=0)\n",
    "    \n",
    "def draw_what_if_graph(million=False):\n",
    "    def determine_max_created_indexes_in_epic(df_epic):\n",
    "        indexed_column_steps = df['indexed columns'].values\n",
    "        numbers_of_indexes = []\n",
    "        for indexed_column_step in indexed_column_steps:\n",
    "            number_of_indexes = indexed_column_step.count('I(C')\n",
    "            numbers_of_indexes.append(number_of_indexes)\n",
    "\n",
    "        return numbers_of_indexes\n",
    "\n",
    "    print('Average cache rates:')\n",
    "    width = 0.18\n",
    "    pos = None\n",
    "    labels = None\n",
    "    fig, ax = plt.subplots()\n",
    "    idx = 0\n",
    "    for algorithm in ALGORITHMS:\n",
    "        style = styles[algorithm]\n",
    "        if algorithm == 'cophy':\n",
    "            continue\n",
    "        if algorithm not in['auto_admin', 'drop', 'auto_admin_naive_2']:\n",
    "            continue\n",
    "        csv_path= f\"{CSV_PATH}/results_{algorithm}_{BENCHMARK}_{len(QUERIES)}_queries.csv\"\n",
    "        df = pd.read_csv(csv_path, sep=';')\n",
    "        df = df.head(XLIM)\n",
    "        if idx == 0:\n",
    "            labels = df.index + 1\n",
    "            pos = np.arange(len(labels))\n",
    "        requests = df['cost requests'] / 1000000 if million else df['cost requests']\n",
    "        hits = df['cache hits'] / 1000000 if million else df['cache hits']\n",
    "        ax.bar([p + width * idx for p in pos], requests - hits, width, bottom=hits, label=style.label, color=style.color, hatch=style.hatch)\n",
    "        ax.bar([p + width * idx for p in pos], hits, width, color=style.color, alpha=0.5, hatch=style.hatch) # label=f\"{style.label} (cached)\",\n",
    "\n",
    "        avg_cache_rate = (hits / requests).mean()\n",
    "        print(f'  {style.label}: {avg_cache_rate}')\n",
    "        \n",
    "        idx += 1\n",
    "\n",
    "    for algorithm in ['extend', 'relaxation']:\n",
    "        style = styles[algorithm]\n",
    "        csv_path= f\"{CSV_PATH}/results_{algorithm}_{BENCHMARK}_{len(QUERIES)}_queries.csv\"\n",
    "        df = pd.read_csv(csv_path, sep=';')\n",
    "        df['number of indexes'] = determine_max_created_indexes_in_epic(df)\n",
    "        df = df.sort_values(by=['number of indexes'])\n",
    "        df = df.groupby('number of indexes').first()\n",
    "        cache_hits = list(df['cache hits'].values)\n",
    "        cost_requests = list(df['cost requests'].values)\n",
    "        for i in range(len(labels) - len(cache_hits)):\n",
    "            cache_hits.append(0)\n",
    "            cost_requests.append(0)\n",
    "        if len(cache_hits) > len(labels):\n",
    "            cache_hits = cache_hits[:len(labels)]\n",
    "            cost_requests = cost_requests[:len(labels)]\n",
    "        requests = list(map(lambda x: x / 1000000, cost_requests)) if million else cost_requests\n",
    "        hits = list(map(lambda x: x / 1000000, cache_hits)) if million else cache_hits\n",
    "        requests_hits_difference = list(map(lambda x, y: x - y, requests, hits))\n",
    "        ax.bar([p + width * idx for p in pos], requests_hits_difference, width, bottom=hits, label=style.label, color=style.color, hatch=style.hatch)\n",
    "        ax.bar([p + width * idx for p in pos], hits, width, color=style.color, alpha=0.5, hatch=style.hatch) #label=f\"{style.label} (cached)\"\n",
    "        idx += 1\n",
    "    \n",
    "        rates = []\n",
    "        for hit, request in zip(hits, requests):\n",
    "            if request <= 0:\n",
    "                continue\n",
    "            rates.append(hit / request)\n",
    "        avg_cache_rate = np.mean(rates)\n",
    "        print(f'  {style.label}: {avg_cache_rate}')\n",
    "\n",
    "    ax.set_xticks([p + ((idx - 1) / 2) * width for p in pos])\n",
    "    ax.set_xticklabels(labels)\n",
    "    ax.legend(fontsize=LABEL_FONT_SIZE - 1)\n",
    "    ylabel_string = '(millions)' if million else ''\n",
    "    plt.ylabel(f'Cost requests {ylabel_string}', fontsize=LABEL_FONT_SIZE)\n",
    "    plt.xlabel('Number of indexes', fontsize=LABEL_FONT_SIZE)\n",
    "    fig.tight_layout()\n",
    "    fig.set_size_inches(GRAPH_SIZE)\n",
    "    ax.set_xlim([-0.2, len(labels) - 0.2])\n",
    "    plt.show()\n",
    "\n",
    "    fig.savefig(f\"{CSV_PATH}/{BENCHMARK.lower()}_what_if_graph.pdf\", bbox_inches='tight')\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def draw_legend():\n",
    "    lines = []\n",
    "    fig, ax = plt.subplots()\n",
    "    for algorithm in ALGORITHMS:\n",
    "        if algorithm == 'auto_admin_naive_2':\n",
    "            continue\n",
    "        if algorithm == 'auto_admin':\n",
    "            marker_size = MARKER_SIZE + 5\n",
    "        else:\n",
    "            marker_size = MARKER_SIZE + 3\n",
    "        line = ax.plot([1], label=styles[algorithm].label, linewidth=0, color=styles[algorithm].color, marker=styles[algorithm].marker, markersize=marker_size)\n",
    "        lines.append(line)\n",
    "    plt.legend(loc=\"center\", ncol=len(ALGORITHMS), fontsize=LABEL_FONT_SIZE, frameon=False)\n",
    "    for line in ax.lines:\n",
    "        line.set_visible(False)\n",
    "    fig.patch.set_visible(False)\n",
    "    ax.axis('off')\n",
    "    plt.show()\n",
    "    fig.set_size_inches((8.5, 0.4))\n",
    "    fig.tight_layout()\n",
    "    fig.savefig(\"../legend.pdf\", bbox_inches='tight', pad_inches = 0)\n",
    "draw_legend()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## TPCH - Cost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CSV_PATH = '../tpch_wo_2_17_20'\n",
    "BENCHMARK = 'tpch'\n",
    "SCALE_FACTOR = 10\n",
    "# all except 2,17,20 because they dominate costs by orders of magnitude\n",
    "QUERIES = [1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,18,19,21,22]\n",
    "# QUERIES = range(1, 23)\n",
    "XLIM = 10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# old\n",
    "cophy_memory_consumptions_mb = [250, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4250, 5000, 6500]\n",
    "cophy_costs = [34396397.79, 33854446.79, 33133046.48, 32783525.42, 27986156.77, 27258434.02, 26896370.87, 26442926.89, 24993712.59, 24976459.99, 24954672.03]\n",
    "draw_cost_graph(cophy_costs, cophy_memory_consumptions_mb, legend=True)\n",
    "# new cophy\n",
    "# cophy_memory_consumptions_mb = [250,500,1000,1500,2000,2500,3000,3500,4250,5000,5750,6500,8000,10000,12500,15000]\n",
    "# cophy_costs = [34746687.52, 34380645.08, 33818483.44, 33175526.84, 32898379.09, 28243364.04, 27814746.6, 27314826.62, 26407652.82, 25598119.51, 25039547.51, 24841420.84, 24442037.82, 23998252.03, 23497725.26, 23408087.08, 23332660.05]\n",
    "# draw_cost_graph(cophy_costs, cophy_memory_consumptions_mb)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## TPCH - Runtime"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Cophy What-If time: 151.91098499298096 - cost_requests: 82676 - cache_hits: 45776 - Gurobi Times:\n",
    "cophy_what_if_time = 151.91098499298096\n",
    "cophy_solver_times = [0.067291, 0.067734, 0.066856, 0.08871, 0.102708, 0.104063, 0.122531, 0.117007, 0.21763, 0.451052, 0.481237, 0.504284, 0.502142, 0.466632, 1.860344, 1.843925]\n",
    "cophy_times = list(map(lambda x: x + cophy_what_if_time, cophy_solver_times))\n",
    "draw_runtime_graph(cophy_runtimes=cophy_times, cophy_memory_consumption=cophy_memory_consumptions_mb)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## TPCH - What If Graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "draw_what_if_graph()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## TPCH - Query Chart"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CSV_PATH = '../tpch_wo_2_17_20/all_queries'\n",
    "QUERIES = range(1, 23)\n",
    "\n",
    "currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n",
    "parentdir = os.path.dirname(currentdir)\n",
    "parentdir = os.path.dirname(parentdir)\n",
    "sys.path.insert(0,parentdir)\n",
    "\n",
    "from selection.index import Index\n",
    "from selection.workload import Workload\n",
    "from selection.cost_evaluation import CostEvaluation\n",
    "from selection.dbms.postgres_dbms import PostgresDatabaseConnector\n",
    "from selection.what_if_index_creation import WhatIfIndexCreation\n",
    "\n",
    "pdc = PostgresDatabaseConnector('indexselection_tpch___10')\n",
    "what_if = WhatIfIndexCreation(pdc)\n",
    "\n",
    "cache = {}\n",
    "\n",
    "def workload_to_str(workload):\n",
    "    return str(workload.queries)\n",
    "\n",
    "def what_if_cost(workload, index_combination):\n",
    "    cache_key = (workload_to_str(workload), frozenset(index_combination))\n",
    "    if cache_key not in cache:\n",
    "        database_connector = PostgresDatabaseConnector('indexselection_tpch___10')\n",
    "        cost_evaluation = CostEvaluation(database_connector)\n",
    "        cost = cost_evaluation.calculate_cost(workload, index_combination)\n",
    "        cache[cache_key]= cost\n",
    "        database_connector.close()\n",
    "    else:\n",
    "        cost = cache[cache_key]\n",
    "    return cost\n",
    "\n",
    "def exploit_indexes(recommended_indexes_per_query, indexes):\n",
    "    total_storage_consumption = 0\n",
    "    \n",
    "    for index in indexes:\n",
    "        what_if.simulate_index(index, store_size=True)\n",
    "        total_storage_consumption += index.estimated_size\n",
    "\n",
    "    for query in workload.queries:\n",
    "        plan = pdc.get_plan(query)\n",
    "        plan_string = str(plan)\n",
    "        cost = plan['Total Cost']\n",
    "#         recommended_indexes_per_query[query.nr] = (cost / no_index_costs[query.nr] * 100, [])\n",
    "        recommended_indexes_per_query[query.nr] = (cost, [])\n",
    "        for index in indexes:\n",
    "            if index.hypopg_name in plan_string:\n",
    "                recommended_indexes_per_query[query.nr][1].append(index)\n",
    "    what_if.drop_all_simulated_indexes()\n",
    "    \n",
    "    return total_storage_consumption\n",
    "\n",
    "def costs_from_dict(d):\n",
    "    costs = []\n",
    "    for key in sorted(d.keys()):\n",
    "        costs.append(d[key][0])\n",
    "    return costs\n",
    "\n",
    "def unpickle_indexes(file):\n",
    "    indexes = []\n",
    "    with open(file, 'rb') as f:\n",
    "        while True:\n",
    "            try:\n",
    "                indexes.append(pickle.load(f))\n",
    "            except EOFError:\n",
    "                break\n",
    "    return indexes\n",
    "\n",
    "def best_indexes_fitting_budget(algorithm):\n",
    "    csv_path= f\"{CSV_PATH}/results_{algorithm}_{BENCHMARK}_{len(QUERIES)}_queries.csv\"\n",
    "    df = pd.read_csv(csv_path, sep=';')\n",
    "    if algorithm == 'dexter':\n",
    "        df = df.iloc[::-1]\n",
    "    # idx indicates Pandas indexes while index(es) indicates database index\n",
    "    df['original idx'] = df.index\n",
    "    df = df[df['memory consumption'] < MEMORY_CONSUMPTION_FILTER_B]\n",
    "    if len(get_costs(df)) < 1:\n",
    "        return []\n",
    "    row_with_best_configuration = get_costs(df).index(min(get_costs(df)))\n",
    "    df = df.iloc[row_with_best_configuration]\n",
    "    best_config_idx = df['original idx']\n",
    "    \n",
    "    indexes_file = f\"{CSV_PATH}/indexes_{algorithm}_{BENCHMARK}_{len(QUERIES)}_queries.pickle\"\n",
    "    indexes = unpickle_indexes(indexes_file)\n",
    "    \n",
    "    return indexes[best_config_idx]\n",
    "\n",
    "\n",
    "def draw_tpch_query_graph(yscale='linear', small=False):    \n",
    "    width = 0.12\n",
    "    pos = None\n",
    "    labels = None\n",
    "    fig, ax = plt.subplots()\n",
    "    idx = 0\n",
    "    for algorithm in ALGORITHMS + ['no_index']:\n",
    "        style = styles[algorithm]\n",
    "        if style.index_history is None:\n",
    "            continue\n",
    "        recommended_indexes = {}\n",
    "        total_storage_consumption = exploit_indexes(recommended_indexes, style.index_history)\n",
    "        costs = costs_from_dict(recommended_indexes)\n",
    "        if idx == 0:\n",
    "            labels = list(recommended_indexes.keys())\n",
    "            pos = np.arange(len(labels))\n",
    "        if not small:\n",
    "            costs = list(map(lambda x: x / 1000000, costs))\n",
    "        # Calculate color so that the hatches are visible but not pushy\n",
    "        hsv = mplcolors.rgb_to_hsv(style.color)\n",
    "        hatch_color_hsv = hsv\n",
    "        hatch_color_hsv[2] = hsv[2] + 0.2 if hsv[2] < 0.5 else hsv[2] - 0.2\n",
    "        edgecolor = mplcolors.hsv_to_rgb(hatch_color_hsv)\n",
    "        bar = ax.bar([p + width * idx for p in pos], costs, width, label=(f\"{style.label} ({len(style.index_history)})\"), color=style.color, hatch=style.hatch, edgecolor=edgecolor, linewidth=0)\n",
    "        idx += 1\n",
    "\n",
    "        print(f\"{algorithm} storage comsumption: {b_to_gb(total_storage_consumption)}\")\n",
    "\n",
    "    ax.set_xticks([p + ((idx - 1) / 2) * width for p in pos])\n",
    "    ax.set_xticklabels(labels)\n",
    "    if not small:\n",
    "        legend = ax.legend(title=r'Algorithm ($|S|$)', fontsize=LABEL_FONT_SIZE - 3, loc='upper left', ncol=2)\n",
    "        plt.setp(legend.get_title(),fontsize=LABEL_FONT_SIZE - 2)\n",
    "    # plt.ylabel('Query cost in % of w/o indexes', fontsize=LABEL_FONT_SIZE)\n",
    "    plt.ylabel(f\"Query cost{'' if small else ' (million)'}\", fontsize=LABEL_FONT_SIZE)\n",
    "    plt.xlabel('Query ID', fontsize=LABEL_FONT_SIZE)\n",
    "    plt.yscale(yscale)\n",
    "    # plt.title('Performance impact of final index combination per algorithm', fontsize=TITLE_FONT_SIZE)\n",
    "    fig.tight_layout()\n",
    "    size = (12, 2.3) if not small else (3.6, 2.23)\n",
    "    fig.set_size_inches(size)\n",
    "    plt.show()\n",
    "\n",
    "    fig.savefig(f\"{CSV_PATH}/{BENCHMARK.lower()}_query_graph_{'small' if small else 'large'}.pdf\", bbox_inches='tight', pad_inches = 0)\n",
    "\n",
    "####### CONFIG\n",
    "    \n",
    "MEMORY_CONSUMPTION_FILTER_B = 5000000000\n",
    "    \n",
    "for algorithm in ALGORITHMS:\n",
    "    if algorithm == 'cophy' or algorithm == 'auto_admin_naive_2':\n",
    "        continue\n",
    "    styles[algorithm].index_history = best_indexes_fitting_budget(algorithm)\n",
    "    \n",
    "workload = pickle.load(open(f'{CSV_PATH}/workload_{BENCHMARK}_{len(QUERIES)}_queries.pickle', 'rb'))\n",
    "styles['no_index'].index_history = []\n",
    "# Filter uninteresting ones\n",
    "workload.queries = [query for query in workload.queries if query.nr not in [1, 3, 6, 7, 10, 13, 14, 15, 16]]\n",
    "# Log Scale queries\n",
    "workload.queries = [query for query in workload.queries if query.nr not in [2, 17, 20]]\n",
    "\n",
    "no_index_costs = {}\n",
    "for query in workload.queries:\n",
    "    no_index_costs[query.nr] = what_if_cost(Workload([query]), [])\n",
    "    \n",
    "draw_tpch_query_graph()\n",
    "\n",
    "workload = pickle.load(open(f'{CSV_PATH}/workload_{BENCHMARK}_{len(QUERIES)}_queries.pickle', 'rb'))\n",
    "workload.queries = [query for query in workload.queries if query.nr in [2, 17, 20]]\n",
    "\n",
    "draw_tpch_query_graph(yscale='log', small=True)\n",
    "\n",
    "pdc.close()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## TPCDS - Cost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CSV_PATH = '../tpcds_wo_4_6_9_10_11_32_35_41_95'\n",
    "BENCHMARK = 'tpcds'\n",
    "SCALE_FACTOR = 10\n",
    "QUERIES = [1, 2, 3, 5, 7, 8, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 96, 97, 98, 99]\n",
    "XLIM = 12"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cophy_memory_consumptions_mb = [250,500,1000,1500,2000,2500,3000,3500,4250,5000,5750,6500,8000,10000,12500,15000]\n",
    "cophy_costs = [93552156.13, 88577114.46, 84585359.84, 80591549.13, 79399996.85, 78585000.31, 77435060.04, 76792132.12, 75967528.65, 75365979.03, 74943369.35, 74457654.57, 73613863.17, 72363984.63, 71776031.09, 71534655.96]\n",
    "draw_cost_graph(cophy_costs, cophy_memory_consumptions_mb)\n",
    "# new cophy\n",
    "# cophy_costs = [94009579.24, 89908958.77, 86949597.35, 83122923.62, 82171561.17, 81545983.11, 80889568.68, 80249071.74, 79435135.96, 78835484.06, 78171122.91, 77577292.75, 76784020.72, 76237071.04, 75169511.67, 74351448.47]\n",
    "# draw_cost_graph(cophy_costs, cophy_memory_consumptions_mb)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## TPCDS - Runtime"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Cophy What-If time: 579.6870040893555 - cost_requests: 394317 - cache_hits: 342140 - Gurobi Times:\n",
    "cophy_what_if_time = 579.69\n",
    "cophy_solver_times = [0.715391, 2.118141, 3.091555, 3.272472, 2.53551, 27.41455, 24.389079, 2.613326, 25.794448, 25.912374, 27.815068, 26.617466, 25.222031, 25.490362, 24.821388, 25.060508]\n",
    "cophy_times = list(map(lambda x: x + cophy_what_if_time, cophy_solver_times))\n",
    "draw_runtime_graph(minutes=True, cophy_runtimes=cophy_times, cophy_memory_consumption=cophy_memory_consumptions_mb)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## TPCDS - What If Graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# draw_what_if_graph(million=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## JOB - Cost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CSV_PATH = '../job/'\n",
    "BENCHMARK = 'JOB'\n",
    "SCALE_FACTOR = None\n",
    "QUERIES = range(0, 113)\n",
    "XLIM = 12"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# draw_cost_graph()\n",
    "# new cophy\n",
    "cophy_memory_consumptions_mb = [250,500,1000,1500,2000,2500,3000,3500,5000,5750,6500,8000]\n",
    "cophy_costs = [75079495.33, 69086234.95, 54977085.42, 50687539.75, 46159506.78, 37484609.32, 28011167.16, 27618166.14, 25963809.01, 25943071.1, 24859616.99, 24763974.91]\n",
    "draw_cost_graph(cophy_costs, cophy_memory_consumptions_mb)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## JOB - Runtime"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Cophy What-If time: 822.8340845108032 - cost_requests: 305326 - cache_hits: 267996 - Gurobi Times:\n",
    "cophy_what_if_time = 822.83\n",
    "cophy_solver_times = [34.712032, 8.646469, 40.455446, 8.850123, 48.282356, 68.443695, 29.195716, 40.542401, 19.277244, 18.437171, 7.715893, 5.603071]\n",
    "cophy_times = list(map(lambda x: x + cophy_what_if_time, cophy_solver_times))\n",
    "draw_runtime_graph(minutes=True, cophy_runtimes=cophy_times, cophy_memory_consumption=cophy_memory_consumptions_mb)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## JOB - What If Graph"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# GRAPH_SIZE = (5,2.2)\n",
    "# draw_what_if_graph(million=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CSV_PATH = '../tpch_mssql'\n",
    "BENCHMARK = 'tpch'\n",
    "SCALE_FACTOR = 10\n",
    "# all except 2,17,20 because they dominate costs by orders of magnitude\n",
    "QUERIES = [1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,18,19,21,22]\n",
    "XLIM = 10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "draw_cost_graph()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
