{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| default_exp hierarchical"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Hierarchical Datasets"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Here we host a collection of datasets used in previous hierarchical research by Rangapuram et al. [2021], Olivares et al. [2023], and Kamarthi et al. [2022]. The benchmark datasets utilized include Australian Monthly Labour (`Labour`), SF Bay Area daily Traffic (`Traffic`, `OldTraffic`), Quarterly Australian Tourism Visits (`TourismSmall`), Monthly Australian Tourism visits (`TourismLarge`, `OldTourismLarge`), and daily Wikipedia article views (`Wiki2`). Old datasets favor the original datasets with minimal target variable preprocessing (Rangapuram et al. [2021], Olivares et al. [2023]), while the remaining datasets follow PROFHIT experimental settings.\n",
    "\n",
    "## References<br>\n",
    "- [Syama Sundar Rangapuram, Lucien D Werner, Konstantinos Benidis, Pedro Mercado, Jan Gasthaus, Tim Januschowski. (2021). \"End-to-End Learning of Coherent Probabilistic Forecasts for Hierarchical Time Series\". Proceedings of the 38th International Conference on Machine Learning (ICML).](https://proceedings.mlr.press/v139/rangapuram21a.html)<br>\n",
    "- [Kin G. Olivares, O. Nganba Meetei, Ruijun Ma, Rohan Reddy, Mengfei Cao, Lee Dicker (2022).\"Probabilistic Hierarchical Forecasting with Deep Poisson Mixtures\". International Journal Forecasting, special issue.](https://doi.org/10.1016/j.ijforecast.2023.04.007)<br>\n",
    "- [Harshavardhan Kamarthi, Lingkai Kong, Alexander Rodriguez, Chao Zhang, and B. Prakash. PROFHIT: Probabilistic robust forecasting for hierarchical time-series. Computing Research Repository.URL https://arxiv.org/abs/2206.07940.](https://arxiv.org/abs/2206.07940)<br>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "from dataclasses import dataclass\n",
    "from pathlib import Path\n",
    "from typing import Tuple\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "from datasetsforecast.utils import download_file, Info"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class Labour:\n",
    "    freq: str = 'MS'\n",
    "    horizon: int = 8\n",
    "    papers_horizon: int = 12\n",
    "    seasonality: int = 12\n",
    "    test_size: int = 125\n",
    "    tags_names: Tuple[str] = (\n",
    "        'Country',\n",
    "        'Country/Region',\n",
    "        'Country/Gender/Region',\n",
    "        'Country/Employment/Gender/Region',\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class TourismLarge:\n",
    "    freq: str = 'MS'\n",
    "    horizon: int = 12\n",
    "    papers_horizon: int = 12\n",
    "    seasonality: int = 12\n",
    "    test_size: int = 57\n",
    "    tags_names: Tuple[str] = (\n",
    "        'Country',\n",
    "        'Country/State',\n",
    "        'Country/State/Zone',\n",
    "        'Country/State/Zone/Region',\n",
    "        'Country/Purpose',\n",
    "        'Country/State/Purpose',\n",
    "        'Country/State/Zone/Purpose',\n",
    "        'Country/State/Zone/Region/Purpose',\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class TourismSmall:\n",
    "    freq: str = 'Q'\n",
    "    horizon: int = 4\n",
    "    papers_horizon: int = 4\n",
    "    seasonality: int = 4\n",
    "    test_size: int = 9\n",
    "    tags_names: Tuple[str] = (\n",
    "        'Country',\n",
    "        'Country/Purpose',\n",
    "        'Country/Purpose/State',\n",
    "        'Country/Purpose/State/CityNonCity',\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class Traffic:\n",
    "    freq: str = 'D'\n",
    "    horizon: int = 14\n",
    "    papers_horizon: int = 7\n",
    "    seasonality: int = 7\n",
    "    test_size: int = 91\n",
    "    tags_names: Tuple[str] = (\n",
    "        'Level1',\n",
    "        'Level2',\n",
    "        'Level3',\n",
    "        'Level4',\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class Wiki2:\n",
    "    freq: str = 'D'\n",
    "    horizon: int = 14\n",
    "    papers_horizon: int = 7\n",
    "    seasonality: int = 7\n",
    "    test_size: int = 91\n",
    "    tags_names: Tuple[str] = (\n",
    "        'Views',\n",
    "        'Views/Country',\n",
    "        'Views/Country/Access',\n",
    "        'Views/Country/Access/Agent',\n",
    "        'Views/Country/Access/Agent/Topic'\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class OldTraffic:\n",
    "    freq: str = 'D'\n",
    "    horizon: int = 1\n",
    "    papers_horizon: int = 1\n",
    "    seasonality: int = 7\n",
    "    test_size: int = 91\n",
    "    tags_names: Tuple[str] = (\n",
    "        'Level1',\n",
    "        'Level2',\n",
    "        'Level3',\n",
    "        'Level4',\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class OldTourismLarge:\n",
    "    freq: str = 'MS'\n",
    "    horizon: int = 12\n",
    "    papers_horizon: int = 12\n",
    "    seasonality: int = 12\n",
    "    test_size: int = 57\n",
    "    tags_names: Tuple[str] = (\n",
    "        'Country',\n",
    "        'Country/State',\n",
    "        'Country/State/Zone',\n",
    "        'Country/State/Zone/Region',\n",
    "        'Country/Purpose',\n",
    "        'Country/State/Purpose',\n",
    "        'Country/State/Zone/Purpose',\n",
    "        'Country/State/Zone/Region/Purpose',\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "HierarchicalInfo = Info(\n",
    "    (\n",
    "        Labour, TourismLarge, \n",
    "        TourismSmall,\n",
    "        Traffic, Wiki2,\n",
    "        OldTraffic, OldTourismLarge\n",
    "    )\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class HierarchicalData:\n",
    "    \n",
    "    source_url: str = 'https://nixtla-public.s3.amazonaws.com/hierarchical-data/datasets.zip'\n",
    "    source_url_old_traffic: str ='https://www.dropbox.com/s/4nl5afkdr4djpuy/OldTraffic.zip?dl=1'\n",
    "    source_url_old_tourisml: str = 'https://www.dropbox.com/s/ye78jnujhbxyggo/OldTourismLarge.zip?dl=1'\n",
    "\n",
    "    @staticmethod\n",
    "    def load(directory: str,\n",
    "             group: str,\n",
    "             cache: bool = True) -> Tuple[pd.DataFrame, pd.DataFrame]:\n",
    "        \"\"\"\n",
    "        Downloads hierarchical forecasting benchmark datasets.\n",
    "        \n",
    "            Parameters\n",
    "            ----------\n",
    "            directory: str\n",
    "                Directory where data will be downloaded.\n",
    "            group: str\n",
    "                Group name.\n",
    "            cache: bool\n",
    "                If `True` saves and loads\n",
    "                \n",
    "            Returns\n",
    "            -------\n",
    "            Y_df: pd.DataFrame\n",
    "                Target time series with columns ['unique_id', 'ds', 'y'].\n",
    "                Containes the base time series.\n",
    "            S_df: pd.DataFrame\n",
    "                Summing matrix of size (hierarchies, bottom).\n",
    "        \"\"\"\n",
    "        if group not in HierarchicalInfo.groups:\n",
    "            raise Exception(f'group not found {group}')\n",
    "\n",
    "        path = f'{directory}/hierarchical/'\n",
    "        file_cache = Path(f'{path}/{group}.p')\n",
    "\n",
    "        if file_cache.is_file() and cache:\n",
    "            Y_df, S_df, tags = pd.read_pickle(file_cache)\n",
    "\n",
    "            return Y_df, S_df, tags\n",
    "\n",
    "        HierarchicalData.download(directory)\n",
    "        path = Path(f'{path}/{group}')\n",
    "        S_df = pd.read_csv(path / 'agg_mat.csv', index_col=0) \n",
    "        Y_df = pd.read_csv(path / 'data.csv', index_col=0).T\n",
    "        Y_df = Y_df.stack()\n",
    "        Y_df.name = 'y'\n",
    "        Y_df.index = Y_df.index.set_names(['unique_id', 'ds'])\n",
    "        Y_df = Y_df.reset_index()\n",
    "        \n",
    "        if group == 'Labour':\n",
    "            #for labour we avoid covid periods\n",
    "            Y_df = Y_df.query('ds < \"2020-01-01\"').reset_index(drop=True)\n",
    "        \n",
    "        if not all(Y_df['unique_id'].unique() == S_df.index):\n",
    "            raise Exception('mismatch order between `Y_df` and `S_df`')\n",
    "        \n",
    "        def get_levels_from_S(S_df):\n",
    "            cut_idxs, = np.where(S_df.sum(axis=1).cumsum() % S_df.shape[1] == 0.)\n",
    "            levels = [S_df.iloc[(cut_idxs[i] + 1):(cut_idxs[i+1] + 1)].index.values for i in range(cut_idxs.size-1)]\n",
    "            levels = [S_df.iloc[[0]].index.values] + levels\n",
    "            assert sum([len(lv) for lv in levels]) == S_df.shape[0]\n",
    "            return levels\n",
    "\n",
    "        cls_group = HierarchicalInfo[group]\n",
    "        tags = dict(zip(cls_group.tags_names, get_levels_from_S(S_df)))\n",
    "\n",
    "        if cache:\n",
    "            pd.to_pickle((Y_df, S_df, tags), file_cache)\n",
    "\n",
    "        return Y_df, S_df, tags\n",
    "\n",
    "    @staticmethod\n",
    "    def download(directory: str) -> None:\n",
    "        \"\"\"\n",
    "        Download Hierarchical Datasets.\n",
    "        \n",
    "            Parameters\n",
    "            ----------\n",
    "            directory: str\n",
    "                Directory path to download dataset.\n",
    "        \"\"\"\n",
    "        path = f'{directory}/hierarchical/'\n",
    "        if not Path(path).exists():\n",
    "            download_file(path, HierarchicalData.source_url, decompress=True)\n",
    "            download_file(path, HierarchicalData.source_url_old_traffic, decompress=True)\n",
    "            download_file(path, HierarchicalData.source_url_old_tourisml, decompress=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "from fastcore.test import test_close"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "for group, _ in HierarchicalInfo:\n",
    "    #if group not in ['OldTraffic', 'OldTourismLarge']:\n",
    "    Y_df, S_df, tags = HierarchicalData.load('./data', group)\n",
    "    assert all(S_df.loc[cats].values.sum() == S_df.shape[1] for _, cats in tags.items())\n",
    "    assert len(S_df) == sum(len(v) for _, v in tags.items()), group\n",
    "    S_hiers = [S_df.loc[cats].values * np.arange(1, len(cats) + 1).reshape(-1, 1) for _, cats in tags.items()]\n",
    "    S_hiers = np.vstack(S_hiers)\n",
    "    S_hiers = S_hiers.sum(axis=0)\n",
    "    is_strictly_hierarchical = np.array_equal(S_hiers, np.sort(S_hiers))\n",
    "    print(f'Is {group} strictly hierarchical? {is_strictly_hierarchical}')\n",
    "    \n",
    "    # test S recovers Y_df\n",
    "    for key, hiers in tags.items():\n",
    "        for ts, bottom_ts in S_df.loc[hiers].iterrows():\n",
    "            actual_bottom_ts = bottom_ts.loc[lambda x: x == 1].index\n",
    "            test_close(\n",
    "                Y_df.query('unique_id == @ts')['y'].sum(), \n",
    "                Y_df.query('unique_id in @actual_bottom_ts')['y'].sum()\n",
    "            )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "# Meta information\n",
    "meta = pd.DataFrame(\n",
    "    columns=['Frequency', 'Series', 'Levels', 'Observations per Series', 'Test Observations per Series', 'Horizon'],\n",
    "    index=pd.Index(HierarchicalInfo.groups, name='Dataset')\n",
    ")\n",
    "for group, cls_group in HierarchicalInfo:\n",
    "    #if group not in ['OldTraffic', 'OldTourismLarge']:\n",
    "    Y_df, S_df, tags = HierarchicalData.load('./data', group)\n",
    "    meta.loc[group, 'Frequency'] = cls_group.freq\n",
    "    meta.loc[group, 'Horizon'] = cls_group.horizon\n",
    "    meta.loc[group, 'Papers\\' horizon'] = int(cls_group.papers_horizon)\n",
    "    meta.loc[group, 'Series'] = Y_df['unique_id'].nunique()\n",
    "    meta.loc[group, 'Levels'] = len(tags)\n",
    "    meta.loc[group, 'Observations per Series'] = Y_df.groupby('unique_id').size().unique().item()\n",
    "    meta.loc[group, 'Test Observations per Series'] =  meta.loc[group, 'Observations per Series'] // 4\n",
    "meta"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
