{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Lucid: Workload Estimator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import seaborn as sns\n",
    "\n",
    "from sklearn import preprocessing, metrics\n",
    "from interpret.glassbox import ExplainableBoostingRegressor\n",
    "\n",
    "\n",
    "sns.set_style(\"ticks\")\n",
    "font = {\n",
    "    \"font.family\": \"Roboto\",\n",
    "    \"font.size\": 12,\n",
    "}\n",
    "sns.set_style(font)\n",
    "paper_rc = {\n",
    "    \"lines.linewidth\": 3,\n",
    "    \"lines.markersize\": 10,\n",
    "}\n",
    "sns.set_context(\"paper\", font_scale=1.8, rc=paper_rc)\n",
    "current_palette = sns.color_palette()\n",
    "\n",
    "pd.set_option(\"display.max_columns\", None)\n",
    "\n",
    "idx = 0\n",
    "save = False\n",
    "experiment_list = [\"Venus_Sept\", \"Saturn_Sept\", \"Philly\"]\n",
    "cluster_list = [\"Venus\", \"Saturn\", \"Philly\"]\n",
    "cluster = cluster_list[idx]\n",
    "experiment = experiment_list[idx]\n",
    "\n",
    "datapath = f\"../data/{cluster}\"\n",
    "\n",
    "\n",
    "result = pd.DataFrame()\n",
    "if cluster == \"Philly\":\n",
    "    df = pd.read_csv(\n",
    "        f\"{datapath}/cluster_full_log.csv\",\n",
    "        parse_dates=[\"submit_time\"],\n",
    "        usecols=[\n",
    "            \"job_id\",\n",
    "            \"user\",\n",
    "            \"vc\",\n",
    "            \"gpu_num\",\n",
    "            \"submit_time\",\n",
    "            \"amp\",\n",
    "            \"gpu_util\",\n",
    "            \"gmem_util\",\n",
    "            \"gmem\",\n",
    "            \"duration\",\n",
    "        ],\n",
    "    )\n",
    "else:\n",
    "    df = pd.read_csv(\n",
    "        f\"{datapath}/cluster_full_log.csv\",\n",
    "        parse_dates=[\"submit_time\"],\n",
    "        usecols=[\n",
    "            \"job_id\",\n",
    "            \"user\",\n",
    "            \"vc\",\n",
    "            # \"jobname\",\n",
    "            \"gpu_num\",\n",
    "            \"cpu_num\",\n",
    "            \"submit_time\",\n",
    "            \"month\",\n",
    "            \"day\",\n",
    "            \"hour\",\n",
    "            \"dayofweek\",\n",
    "            \"amp\",\n",
    "            \"gpu_util\",\n",
    "            \"gmem_util\",\n",
    "            \"gmem\",\n",
    "            \"duration\",\n",
    "        ],\n",
    "    )\n",
    "\n",
    "if cluster == \"Philly\":\n",
    "    trace_range = (\"2017-10-01 00:00:00\", \"2017-10-07 23:59:00\")\n",
    "    train_df = df[(df[\"submit_time\"] > trace_range[1])]\n",
    "    val_df = df[(df[\"submit_time\"] >= trace_range[0]) & (df[\"submit_time\"] <= trace_range[1])]\n",
    "else:\n",
    "    # trace_range = (\"2020-09-01 00:00:00\", \"2020-09-26 23:59:59\")\n",
    "    trace_range = (\"2020-09-01 00:00:00\", \"2020-09-27 00:10:00\")  # Add a bit more job for prediction\n",
    "    train_df = df[(df[\"submit_time\"] < trace_range[0])]\n",
    "    val_df = df[(df[\"submit_time\"] >= trace_range[0]) & (df[\"submit_time\"] <= trace_range[1])]\n",
    "\n",
    "\n",
    "train_df = train_df.sort_values(by=\"submit_time\")\n",
    "train_df.reset_index(inplace=True, drop=True)\n",
    "val_df = val_df.sort_values(by=\"submit_time\")\n",
    "val_df.reset_index(inplace=True, drop=True)\n",
    "\n",
    "train_data = train_df.drop(columns=[\"duration\", \"submit_time\"])\n",
    "test_data = val_df.drop(columns=[\"duration\", \"submit_time\"])\n",
    "train_label = train_df[[\"duration\"]]\n",
    "test_label = val_df[[\"duration\"]]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Weekly Update Lucid Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trace_range_list = [\n",
    "    (\"2020-09-01 00:00:00\", \"2020-09-07 00:00:00\"), # Week 1\n",
    "    (\"2020-09-07 00:00:00\", \"2020-09-14 00:00:00\"), # Week 2\n",
    "    (\"2020-09-14 00:00:00\", \"2020-09-21 00:00:00\"), # Week 3\n",
    "    (\"2020-09-21 00:00:00\", \"2020-09-27 00:10:00\"), # Week 4\n",
    "]\n",
    "week_df = pd.DataFrame()\n",
    "for trace_range in trace_range_list:\n",
    "\n",
    "    train_df = df[(df[\"submit_time\"] < trace_range[0])]\n",
    "    val_df = df[(df[\"submit_time\"] >= trace_range[0]) & (df[\"submit_time\"] <= trace_range[1])]\n",
    "\n",
    "\n",
    "    train_df = train_df.sort_values(by=\"submit_time\")\n",
    "    train_df.reset_index(inplace=True, drop=True)\n",
    "    val_df = val_df.sort_values(by=\"submit_time\")\n",
    "    val_df.reset_index(inplace=True, drop=True)\n",
    "\n",
    "    train_data = train_df.drop(columns=[\"duration\", \"submit_time\"])\n",
    "    test_data = val_df.drop(columns=[\"duration\", \"submit_time\"])\n",
    "    train_label = train_df[[\"duration\"]]\n",
    "    test_label = val_df[[\"duration\"]]\n",
    "\n",
    "    print(f\"Train Data Len: {len(train_data)}\")\n",
    "\n",
    "    ebm = ExplainableBoostingRegressor(learning_rate=0.01, binning=\"uniform\", interactions=20)\n",
    "    ebm.fit(train_data, train_label)\n",
    "    pred = ebm.predict(test_data)\n",
    "\n",
    "    mae_score = metrics.mean_absolute_error(test_label, pred)\n",
    "    mape_score = metrics.mean_absolute_percentage_error(test_label, pred)\n",
    "    r2_score = metrics.r2_score(test_label, pred)\n",
    "    result.at[\"ebm_r2\", cluster] = r2_score\n",
    "    print(f\"mae_score: {mae_score:.2f}, mape_score: {mape_score:.2f}, r2_score: {r2_score:.4f}\")\n",
    "\n",
    "    pred = pred.astype(int)\n",
    "    val_df.loc[:,'priority'] = pred\n",
    "    week_df = pd.concat([week_df, val_df])\n",
    "# week_df.to_csv(f\"ebm/{experiment}_Sept_ebm_weekly_updated.csv\", index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Job Name Affinity Propagation\n",
    "\n",
    "Scripts below need original jobname information, which cannot release"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import distance\n",
    "import time\n",
    "import warnings\n",
    "import random\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from itertools import groupby\n",
    "from sklearn.cluster import AffinityPropagation\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "top_high_freq_num = 100\n",
    "\n",
    "idx = 1\n",
    "experiment_list = [\"Venus_Sept\", \"Saturn_Sept\", \"Philly\"]\n",
    "cluster_list = [\"Venus\", \"Saturn\", \"Philly\"]\n",
    "cluster = cluster_list[idx]\n",
    "experiment = experiment_list[idx]\n",
    "\n",
    "df = pd.read_csv(f'../data/{cluster}/cluster_full_log.csv',\n",
    "                 parse_dates=['submit_time', 'start_time', 'end_time'])\n",
    "\n",
    "df = df[df['gpu_num']>0]\n",
    "df.drop(columns=['year', 'nodelist', 'priority', 'minute'], inplace=True)\n",
    "df.reset_index(drop=True, inplace=True)\n",
    "\n",
    "count = df['jobname'].value_counts()\n",
    "name_list = list(count.index)\n",
    "\n",
    "high_freq = name_list[:top_high_freq_num]\n",
    "to_cluster = name_list[top_high_freq_num:]\n",
    "to_cluster.sort()\n",
    "\n",
    "groups = [list(g) for k, g in groupby(to_cluster, key=lambda x: x[0])]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "label_dict = {}\n",
    "\n",
    "for group in groups:\n",
    "    if len(group) == 1:\n",
    "        label_dict.update({group[0]:group[0]})\n",
    "    else:\n",
    "        print(f\"Processing First Character: {group[0][0]}, Lenth: {len(group)}\")\n",
    "        ts = time.time()\n",
    "\n",
    "        names = np.asarray(group)\n",
    "        lev_similarity = -1 * np.array([[distance.levenshtein(w1, w2) for w1 in names] for w2 in names])\n",
    "\n",
    "        affprop = AffinityPropagation(affinity=\"precomputed\", damping=0.9, random_state=6)\n",
    "        affprop.fit(lev_similarity)\n",
    "\n",
    "        for cluster_id in np.unique(affprop.labels_):\n",
    "            exemplar = names[affprop.cluster_centers_indices_[cluster_id]]\n",
    "            cluster = np.unique(names[np.nonzero(affprop.labels_==cluster_id)])\n",
    "\n",
    "            for ori in cluster:\n",
    "                label_dict.update({ori:exemplar})\n",
    "\n",
    "        print(f\"Time Cost: {time.time()-ts} s\")\n",
    "\n",
    "for i in high_freq:\n",
    "    label_dict.update({i:i})\n",
    "assert len(label_dict) == len(name_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"Replace Name\"\"\"\n",
    "for i in range(len(df)):\n",
    "    df.at[i, 'jobname'] = label_dict[df.at[i, 'jobname']]\n",
    "\n",
    "df.to_csv(f\"./{cluster}/cluster_full_log.csv\", index=None)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.9.13 ('base')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.13"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "784914556ea7aafefbdcb0c4fefea2600a01efb6a6b7916d4154dc17a3e6434f"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
