File size: 3,456 Bytes
76b113c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c8b7c85d",
"metadata": {},
"outputs": [],
"source": [
"\n",
"from haven import haven_jupyter as hj\n",
"from haven import haven_results as hr\n",
"from haven import haven_utils as hu\n",
"\n",
"# path to where the experiments got saved\n",
"savedir_base = '/home/toolkit/trl/results'\n",
"exp_list = None\n",
"\n",
"# get experiments from the exp config\n",
"exp_config_fname = None\n",
"if exp_config_fname:\n",
" config = hu.load_py(exp_config_fname)\n",
" exp_list = []\n",
" for exp_group in [\n",
" \"example\"\n",
" ]:\n",
" exp_list += config.EXP_GROUPS[exp_group]\n",
"\n",
"# filter exps\n",
"\n",
"filterby_list = None\n",
"# filterby_list =[{'dataset':'mnist'}]\n",
"\n",
"# get experiments\n",
"rm = hr.ResultManager(exp_list=exp_list,\n",
" savedir_base=savedir_base,\n",
" filterby_list=filterby_list,\n",
" verbose=0,\n",
" exp_groups=None,\n",
" job_scheduler='slurm'\n",
" )\n",
"\n",
"# specify display parameters\n",
"\n",
"# groupby_list = ['dataset']\n",
"# title_list = ['dataset']\n",
"# legend_list = ['model']\n",
"y_metrics = ['train_loss']\n",
"x_metric = 'epoch'\n",
"\n",
"# launch dashboard\n",
"hj.get_dashboard(rm, vars(), wide_display=False, enable_datatables=False)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "87acb700",
"metadata": {},
"outputs": [],
"source": [
"\n",
"# get table \n",
"rm.get_score_df().head()\n",
"\n",
"# get latex \n",
"# print(rm.get_latex_table(legend=['dataset'], metrics=['train_loss'], decimals=1, caption=\"Results\", label='tab:results'))\n",
"\n",
"# get custom plots\n",
"fig = rm.get_plot_all(\n",
" # order='metrics_by_groups',\n",
" # avg_across='runs',\n",
" y_metric_list=y_metrics, \n",
" x_metric=x_metric,\n",
" # legend_fontsize=18,\n",
" # x_fontsize=20,\n",
" # y_fontsize=20,\n",
" # xtick_fontsize=20,\n",
" # ytick_fontsize=20,\n",
" # title_fontsize=24,\n",
" # legend_list=['model], \n",
" # title_list = ['dataset'], \n",
" # title_format='Dataset:{}',\n",
" # log_metric_list = ['train_loss'], \n",
" # groupby_list = ['dataset'],\n",
" # map_ylabel_list=[{'train_loss':'Train loss'}],\n",
" # map_xlabel_list=[{'epoch':'Epoch'}],\n",
" # figsize=(15,5),\n",
" # plot_confidence=False,\n",
" # savedir_plots='%s' % (name)\n",
")\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1820c04c",
"metadata": {},
"outputs": [],
"source": [
"\n",
" !pip install --upgrade git+https://github.com/haven-ai/haven-ai\n",
" "
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}
|