{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| default_exp long_horizon"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Long-Horizon Datasets\n",
    "\n",
    "> Download and wrangling utility for long-horizon datasets."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "from nbdev import *\n",
    "%load_ext autoreload\n",
    "%autoreload 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "import os\n",
    "from dataclasses import dataclass\n",
    "from typing import Optional, Tuple\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "from datasetsforecast.utils import download_file, Info"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class ETTh1:\n",
    "    \"\"\"\n",
    "    The ETTh1 dataset monitors an electricity transformer \n",
    "    from a region of a province of China including oil temperature \n",
    "    and variants of load (such as high useful load and high useless load) \n",
    "    from July 2016 to July 2018 at an hourly frequency.\n",
    "    \"\"\"\n",
    "    freq: str = 'H'\n",
    "    name: str = 'ETTh1'\n",
    "    n_ts: int = 1\n",
    "    test_size: int = 11_520\n",
    "    val_size: int = 11_520\n",
    "    horizons: Tuple[int] = (96, 192, 336, 720)\n",
    "        \n",
    "@dataclass\n",
    "class ETTh2:\n",
    "    \"\"\"\n",
    "    The ETTh2 dataset monitors an electricity transformer \n",
    "    from a region of a province of China including oil temperature \n",
    "    and variants of load (such as high useful load and high useless load) \n",
    "    from July 2016 to July 2018 at an hourly frequency.\n",
    "    \"\"\"    \n",
    "    freq: str = 'H'\n",
    "    name: str = 'ETTh2'\n",
    "    n_ts: int = 1\n",
    "    test_size: int = 11_520\n",
    "    val_size: int = 11_520\n",
    "    horizons: Tuple[int] = (96, 192, 336, 720)\n",
    "\n",
    "@dataclass\n",
    "class ETTm1:\n",
    "    \"\"\"\n",
    "    The ETTm1 dataset monitors an electricity transformer \n",
    "    from a region of a province of China including oil temperature \n",
    "    and variants of load (such as high useful load and high useless load) \n",
    "    from July 2016 to July 2018 at a fifteen minute frequency.\n",
    "    \"\"\"    \n",
    "    freq: str = '15T'\n",
    "    name: str = 'ETTm1'\n",
    "    n_ts: int = 7\n",
    "    test_size: int = 11_520\n",
    "    val_size: int = 11_520\n",
    "    horizons: Tuple[int] = (96, 192, 336, 720)\n",
    "        \n",
    "@dataclass\n",
    "class ETTm2:\n",
    "    \"\"\"\n",
    "    The ETTm2 dataset monitors an electricity transformer \n",
    "    from a region of a province of China including oil temperature \n",
    "    and variants of load (such as high useful load and high useless load) \n",
    "    from July 2016 to July 2018 at a fifteen minute frequency.\n",
    "    \n",
    "        Reference:\n",
    "        Zhou, et al. Informer: Beyond Efficient Transformer \n",
    "        for Long Sequence Time-Series Forecasting. AAAI 2021.\n",
    "        https://arxiv.org/abs/2012.07436\n",
    "    \"\"\"\n",
    "    freq: str = '15T'\n",
    "    name: str = 'ETTm2'\n",
    "    n_ts: int = 7\n",
    "    test_size: int = 11_520\n",
    "    val_size: int = 11_520\n",
    "    horizons: Tuple[int] = (96, 192, 336, 720)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export    \n",
    "@dataclass\n",
    "class ECL:\n",
    "    \"\"\"\n",
    "    The Electricity dataset reports the fifteen minute electricity \n",
    "    consumption (KWh) of 321 customers from 2012 to 2014. \n",
    "    For comparability, we aggregate it hourly.\n",
    "    \n",
    "        Reference:\n",
    "        Li, S et al. Enhancing the locality and breaking the memory \n",
    "        bottleneck of Transformer on time series forecasting.\n",
    "        NeurIPS 2019. http://arxiv.org/abs/1907.00235.\n",
    "    \"\"\"\n",
    "    freq: str = '15T'\n",
    "    name: str = 'ECL'\n",
    "    n_ts: int = 321\n",
    "    test_size: int = 5_260\n",
    "    val_size: int = 2_632\n",
    "    horizons: Tuple[int] = (96, 192, 336, 720)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class Exchange:\n",
    "    \"\"\"\n",
    "    The Exchange dataset is a collection of daily exchange rates of \n",
    "    eight countries relative to the US dollar. The countries include \n",
    "    Australia, UK, Canada, Switzerland, China, Japan, New Zealand and \n",
    "    Singapore from 1990 to 2016.\n",
    "\n",
    "        Reference:\n",
    "        Lai, G., Chang, W., Yang, Y., and Liu, H. Modeling Long and\n",
    "        Short-Term Temporal Patterns with Deep Neural Networks.\n",
    "        SIGIR 2018. http://arxiv.org/abs/1703.07015.\n",
    "    \"\"\"\n",
    "    freq: str = 'D'\n",
    "    name: str = 'Exchange'\n",
    "    n_ts: int = 8\n",
    "    test_size: int = 1_517\n",
    "    val_size: int = 760\n",
    "    horizons: Tuple[int] = (96, 192, 336, 720)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class TrafficL:\n",
    "    \"\"\"\n",
    "    This large Traffic dataset was collected by the California Department \n",
    "    of Transportation, it reports road hourly occupancy rates of 862 sensors, \n",
    "    from January 2015 to December 2016.\n",
    "    \n",
    "        Reference:\n",
    "        Lai, G., Chang, W., Yang, Y., and Liu, H. Modeling Long and\n",
    "        Short-Term Temporal Patterns with Deep Neural Networks.\n",
    "        SIGIR 2018. http://arxiv.org/abs/1703.07015.\n",
    "        \n",
    "        Wu, H., Xu, J., Wang, J., and Long, M. Autoformer:\n",
    "        Decomposition Transformers with auto-correlation for\n",
    "        long-term series forecasting. NeurIPS 2021. \n",
    "        https://arxiv.org/abs/2106.13008.        \n",
    "    \"\"\"\n",
    "    freq: str = 'H'\n",
    "    name: str = 'traffic'\n",
    "    n_ts: int = 862\n",
    "    test_size: int = 3_508\n",
    "    val_size: int = 1_756\n",
    "    horizons: Tuple[int] = (96, 192, 336, 720)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class ILI:\n",
    "    \"\"\"\n",
    "    This dataset reports weekly recorded influenza-like illness (ILI) \n",
    "    patients from Centers for Disease Control and Prevention of the \n",
    "    United States from 2002 to 2021. It is measured as a ratio of ILI \n",
    "    patients versus the total patients in the week.\n",
    "    \n",
    "        Reference:\n",
    "        Wu, H., Xu, J., Wang, J., and Long, M. Autoformer:\n",
    "        Decomposition Transformers with auto-correlation for\n",
    "        long-term series forecasting. NeurIPS 2021. \n",
    "        https://arxiv.org/abs/2106.13008.    \n",
    "    \"\"\"\n",
    "    freq: str = 'W'\n",
    "    name: str = 'ili'\n",
    "    n_ts: int = 7\n",
    "    test_size: int = 193\n",
    "    val_size: int = 97\n",
    "    horizons: Tuple[int] = (24, 36, 48, 60)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class Weather:\n",
    "    \"\"\"\n",
    "    This Weather dataset contains the 2020 year of 21 meteorological \n",
    "    measurements\n",
    "    recorded every 10 minutes from the Weather Station of the Max Planck Biogeochemistry \n",
    "    Institute in Jena, Germany.\n",
    "\n",
    "        Reference:\n",
    "        Wu, H., Xu, J., Wang, J., and Long, M. Autoformer:\n",
    "        Decomposition Transformers with auto-correlation for\n",
    "        long-term series forecasting. NeurIPS 2021. \n",
    "        https://arxiv.org/abs/2106.13008.\n",
    "    \"\"\"\n",
    "    freq: str = '10M'\n",
    "    name: str = 'weather'\n",
    "    n_ts: int = 21\n",
    "    test_size: int = 10_539\n",
    "    val_size: int = 5_270\n",
    "    horizons: Tuple[int] = (96, 192, 336, 720)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "LongHorizonInfo = Info((\n",
    "        ETTh1, ETTh2, ETTm1, ETTm2, \n",
    "        ECL, Exchange, TrafficL, ILI, Weather\n",
    "))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "@dataclass\n",
    "class LongHorizon:\n",
    "    \"\"\"\n",
    "    This Long-Horizon datasets wrapper class, provides\n",
    "    with utility to download and wrangle the following datasets:    \n",
    "    ETT, ECL, Exchange, Traffic, ILI and Weather.\n",
    "    \n",
    "    - Each set is normalized with the train data mean and standard deviation.\n",
    "    - Datasets are partitioned into train, validation and test splits.\n",
    "    - For all datasets: 70%, 10%, and 20% of observations are train, validation, test, \n",
    "      except ETT that uses 20% validation.  \n",
    "    \"\"\"\n",
    "    \n",
    "    source_url: str = 'https://nhits-experiments.s3.amazonaws.com/datasets.zip'\n",
    "\n",
    "    @staticmethod\n",
    "    def load(directory: str,\n",
    "             group: str,\n",
    "             cache: bool = True) -> Tuple[pd.DataFrame, \n",
    "                                          Optional[pd.DataFrame], \n",
    "                                          Optional[pd.DataFrame]]:\n",
    "        \"\"\"\n",
    "        \n",
    "        Downloads and long-horizon forecasting benchmark datasets.\n",
    "\n",
    "            Parameters\n",
    "            ----------\n",
    "            directory: str\n",
    "                Directory where data will be downloaded.\n",
    "            group: str\n",
    "                Group name.\n",
    "                Allowed groups: 'ETTh1', 'ETTh2', \n",
    "                                'ETTm1', 'ETTm2',\n",
    "                                'ECL', 'Exchange',\n",
    "                                'Traffic', 'Weather', 'ILI'.\n",
    "            cache: bool\n",
    "                If `True` saves and loads \n",
    "\n",
    "            Returns\n",
    "            ------- \n",
    "            y_df: pd.DataFrame\n",
    "                Target time series with columns ['unique_id', 'ds', 'y'].\n",
    "            X_df: pd.DataFrame\n",
    "                Exogenous time series with columns ['unique_id', 'ds', 'y']. \n",
    "            S_df: pd.DataFrame\n",
    "                Static exogenous variables with columns ['unique_id', 'ds']. \n",
    "                and static variables. \n",
    "        \"\"\"\n",
    "        if group not in LongHorizonInfo.groups:\n",
    "            raise Exception(f'group not found {group}')\n",
    "            \n",
    "        path = f'{directory}/longhorizon/datasets'\n",
    "        file_cache = f'{path}/{group}.p'\n",
    "        \n",
    "        if os.path.exists(file_cache) and cache:\n",
    "            df, X_df, S_df = pd.read_pickle(file_cache)\n",
    "            \n",
    "            return df, X_df, S_df\n",
    "        \n",
    "        LongHorizon.download(directory)\n",
    "        path = f'{directory}/longhorizon/datasets'\n",
    "        \n",
    "        kind = 'M' if group not in ['ETTh1', 'ETTh2'] else 'S'\n",
    "        name = LongHorizonInfo[group].name\n",
    "        y_df = pd.read_csv(f'{path}/{name}/{kind}/df_y.csv')\n",
    "        y_df = y_df.sort_values(['unique_id', 'ds'], ignore_index=True)\n",
    "        y_df = y_df[['unique_id', 'ds', 'y']]\n",
    "        X_df = pd.read_csv(f'{path}/{name}/{kind}/df_x.csv')\n",
    "        X_df = y_df.drop('y', axis=1).merge(X_df, how='left', on=['ds'])\n",
    "       \n",
    "        S_df = None\n",
    "        if cache:\n",
    "            pd.to_pickle((y_df, X_df, S_df), file_cache)\n",
    "            \n",
    "        return y_df, X_df, S_df\n",
    "\n",
    "    @staticmethod\n",
    "    def download(directory: str) -> None:\n",
    "        \"\"\"\n",
    "        Download ETT Dataset.\n",
    "        \n",
    "        Parameters\n",
    "        ----------\n",
    "        directory: str\n",
    "            Directory path to download dataset.\n",
    "        \"\"\"\n",
    "        path = f'{directory}/longhorizon/datasets/'\n",
    "        if not os.path.exists(path):\n",
    "             download_file(path, LongHorizon.source_url, decompress=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "for group, meta in LongHorizonInfo:\n",
    "    data, *_ = LongHorizon.load(directory='data', group=group)\n",
    "    unique_elements = data.groupby(['unique_id', 'ds']).size()\n",
    "    unique_ts = data.groupby('unique_id').size()\n",
    "\n",
    "    assert (unique_elements != 1).sum() == 0, f'Duplicated records found: {group}'\n",
    "    assert unique_ts.shape[0] == meta.n_ts, f'Number of time series not match: {group}'"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
