{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "%set_env PYTORCH_ENABLE_MPS_FALLBACK=1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| default_exp models.rmok"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "%load_ext autoreload\n",
    "%autoreload 2"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Reversible Mixture of KAN - RMoK\n",
    "The Reversible Mixture of KAN (RMoK) is a KAN-based model for time series forecasting which uses a mixture-of-experts structure to assign variables to different KAN experts, such as WaveKAN, TaylorKAN and JacobiKAN.\n",
    "\n",
    "**References**<br>\n",
    "[Xiao Han, Xinfeng Zhang, Yiling Wu, Zhenduo Zhang, Zhe Wu.\"KAN4TSF: Are KAN and KAN-based models Effective for Time Series Forecasting?\"](https://arxiv.org/abs/2408.11306)<br>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![Figure 1. Architecture of RMoK.](imgs_models/rmok.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "import logging\n",
    "import warnings\n",
    "from fastcore.test import test_eq\n",
    "from nbdev.showdoc import show_doc\n",
    "from neuralforecast.common._model_checks import check_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "import math\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "from neuralforecast.losses.pytorch import MAE\n",
    "from neuralforecast.common._base_model import BaseModel\n",
    "from neuralforecast.common._modules import RevINMultivariate\n",
    "from typing import Optional"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1. Auxiliary functions\n",
    "### 1.1 WaveKAN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "\n",
    "class WaveKANLayer(nn.Module):\n",
    "    '''This is a sample code for the simulations of the paper:\n",
    "    Bozorgasl, Zavareh and Chen, Hao, Wav-KAN: Wavelet Kolmogorov-Arnold Networks (May, 2024)\n",
    "\n",
    "    https://arxiv.org/abs/2405.12832\n",
    "    and also available at:\n",
    "    https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4835325\n",
    "    We used efficient KAN notation and some part of the code:+\n",
    "\n",
    "    '''\n",
    "\n",
    "    def __init__(self, in_features, out_features, wavelet_type='mexican_hat', with_bn=True, device=\"cpu\"):\n",
    "        super(WaveKANLayer, self).__init__()\n",
    "        self.in_features = in_features\n",
    "        self.out_features = out_features\n",
    "        self.wavelet_type = wavelet_type\n",
    "        self.with_bn = with_bn\n",
    "\n",
    "        # Parameters for wavelet transformation\n",
    "        self.scale = nn.Parameter(torch.ones(out_features, in_features))\n",
    "        self.translation = nn.Parameter(torch.zeros(out_features, in_features))\n",
    "\n",
    "        # self.weight1 is not used; you may use it for weighting base activation and adding it like Spl-KAN paper\n",
    "        self.weight1 = nn.Parameter(torch.Tensor(out_features, in_features))\n",
    "        self.wavelet_weights = nn.Parameter(torch.Tensor(out_features, in_features))\n",
    "\n",
    "        nn.init.kaiming_uniform_(self.wavelet_weights, a=math.sqrt(5))\n",
    "        nn.init.kaiming_uniform_(self.weight1, a=math.sqrt(5))\n",
    "\n",
    "        # Base activation function #not used for this experiment\n",
    "        self.base_activation = nn.SiLU()\n",
    "\n",
    "        # Batch normalization\n",
    "        if self.with_bn:\n",
    "            self.bn = nn.BatchNorm1d(out_features)\n",
    "\n",
    "    def wavelet_transform(self, x):\n",
    "        if x.dim() == 2:\n",
    "            x_expanded = x.unsqueeze(1)\n",
    "        else:\n",
    "            x_expanded = x\n",
    "\n",
    "        translation_expanded = self.translation.unsqueeze(0).expand(x.size(0), -1, -1)\n",
    "        scale_expanded = self.scale.unsqueeze(0).expand(x.size(0), -1, -1)\n",
    "        x_scaled = (x_expanded - translation_expanded) / scale_expanded\n",
    "\n",
    "        # Implementation of different wavelet types\n",
    "        if self.wavelet_type == 'mexican_hat':\n",
    "            term1 = ((x_scaled ** 2) - 1)\n",
    "            term2 = torch.exp(-0.5 * x_scaled ** 2)\n",
    "            wavelet = (2 / (math.sqrt(3) * math.pi ** 0.25)) * term1 * term2\n",
    "            wavelet_weighted = wavelet * self.wavelet_weights.unsqueeze(0).expand_as(wavelet)\n",
    "            wavelet_output = wavelet_weighted.sum(dim=2)\n",
    "        elif self.wavelet_type == 'morlet':\n",
    "            omega0 = 5.0  # Central frequency\n",
    "            real = torch.cos(omega0 * x_scaled)\n",
    "            envelope = torch.exp(-0.5 * x_scaled ** 2)\n",
    "            wavelet = envelope * real\n",
    "            wavelet_weighted = wavelet * self.wavelet_weights.unsqueeze(0).expand_as(wavelet)\n",
    "            wavelet_output = wavelet_weighted.sum(dim=2)\n",
    "\n",
    "        elif self.wavelet_type == 'dog':\n",
    "            # Implementing Derivative of Gaussian Wavelet\n",
    "            dog = -x_scaled * torch.exp(-0.5 * x_scaled ** 2)\n",
    "            wavelet = dog\n",
    "            wavelet_weighted = wavelet * self.wavelet_weights.unsqueeze(0).expand_as(wavelet)\n",
    "            wavelet_output = wavelet_weighted.sum(dim=2)\n",
    "        elif self.wavelet_type == 'meyer':\n",
    "            # Implement Meyer Wavelet here\n",
    "            # Constants for the Meyer wavelet transition boundaries\n",
    "            v = torch.abs(x_scaled)\n",
    "            pi = math.pi\n",
    "\n",
    "            def meyer_aux(v):\n",
    "                return torch.where(v <= 1 / 2, torch.ones_like(v),\n",
    "                                   torch.where(v >= 1, torch.zeros_like(v), torch.cos(pi / 2 * nu(2 * v - 1))))\n",
    "\n",
    "            def nu(t):\n",
    "                return t ** 4 * (35 - 84 * t + 70 * t ** 2 - 20 * t ** 3)\n",
    "\n",
    "            # Meyer wavelet calculation using the auxiliary function\n",
    "            wavelet = torch.sin(pi * v) * meyer_aux(v)\n",
    "            wavelet_weighted = wavelet * self.wavelet_weights.unsqueeze(0).expand_as(wavelet)\n",
    "            wavelet_output = wavelet_weighted.sum(dim=2)\n",
    "        elif self.wavelet_type == 'shannon':\n",
    "            # Windowing the sinc function to limit its support\n",
    "            pi = math.pi\n",
    "            sinc = torch.sinc(x_scaled / pi)  # sinc(x) = sin(pi*x) / (pi*x)\n",
    "\n",
    "            # Applying a Hamming window to limit the infinite support of the sinc function\n",
    "            window = torch.hamming_window(x_scaled.size(-1), periodic=False, dtype=x_scaled.dtype,\n",
    "                                          device=x_scaled.device)\n",
    "            # Shannon wavelet is the product of the sinc function and the window\n",
    "            wavelet = sinc * window\n",
    "            wavelet_weighted = wavelet * self.wavelet_weights.unsqueeze(0).expand_as(wavelet)\n",
    "            wavelet_output = wavelet_weighted.sum(dim=2)\n",
    "            # You can try many more wavelet types ...\n",
    "        else:\n",
    "            raise ValueError(\"Unsupported wavelet type\")\n",
    "\n",
    "        return wavelet_output\n",
    "\n",
    "    def forward(self, x):\n",
    "        wavelet_output = self.wavelet_transform(x)\n",
    "        # You may like test the cases like Spl-KAN\n",
    "        # wav_output = F.linear(wavelet_output, self.weight)\n",
    "        # base_output = F.linear(self.base_activation(x), self.weight1)\n",
    "\n",
    "        # base_output = F.linear(x, self.weight1)\n",
    "        combined_output = wavelet_output  # + base_output\n",
    "\n",
    "        # Apply batch normalization\n",
    "        if self.with_bn:\n",
    "            return self.bn(combined_output)\n",
    "        else:\n",
    "            return combined_output"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1.2 TaylorKAN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "\n",
    "class TaylorKANLayer(nn.Module):\n",
    "    \"\"\"\n",
    "    https://github.com/Muyuzhierchengse/TaylorKAN/\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, input_dim, out_dim, order, addbias=True):\n",
    "        super(TaylorKANLayer, self).__init__()\n",
    "        self.input_dim = input_dim\n",
    "        self.out_dim = out_dim\n",
    "        self.order = order\n",
    "        self.addbias = addbias\n",
    "\n",
    "        self.coeffs = nn.Parameter(torch.randn(out_dim, input_dim, order) * 0.01)\n",
    "        if self.addbias:\n",
    "            self.bias = nn.Parameter(torch.zeros(1, out_dim))\n",
    "\n",
    "    def forward(self, x):\n",
    "        shape = x.shape\n",
    "        outshape = shape[0:-1] + (self.out_dim,)\n",
    "        x = torch.reshape(x, (-1, self.input_dim))\n",
    "        x_expanded = x.unsqueeze(1).expand(-1, self.out_dim, -1)\n",
    "\n",
    "        y = torch.zeros((x.shape[0], self.out_dim), device=x.device)\n",
    "\n",
    "        for i in range(self.order):\n",
    "            term = (x_expanded ** i) * self.coeffs[:, :, i]\n",
    "            y += term.sum(dim=-1)\n",
    "\n",
    "        if self.addbias:\n",
    "            y += self.bias\n",
    "\n",
    "        y = torch.reshape(y, outshape)\n",
    "        return y"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1.3. JacobiKAN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "\n",
    "class JacobiKANLayer(nn.Module):\n",
    "    \"\"\"\n",
    "    https://github.com/SpaceLearner/JacobiKAN/blob/main/JacobiKANLayer.py\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, input_dim, output_dim, degree, a=1.0, b=1.0):\n",
    "        super(JacobiKANLayer, self).__init__()\n",
    "        self.inputdim = input_dim\n",
    "        self.outdim = output_dim\n",
    "        self.a = a\n",
    "        self.b = b\n",
    "        self.degree = degree\n",
    "\n",
    "        self.jacobi_coeffs = nn.Parameter(torch.empty(input_dim, output_dim, degree + 1))\n",
    "\n",
    "        nn.init.normal_(self.jacobi_coeffs, mean=0.0, std=1 / (input_dim * (degree + 1)))\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = torch.reshape(x, (-1, self.inputdim))  # shape = (batch_size, inputdim)\n",
    "        # Since Jacobian polynomial is defined in [-1, 1]\n",
    "        # We need to normalize x to [-1, 1] using tanh\n",
    "        x = torch.tanh(x)\n",
    "        # Initialize Jacobian polynomial tensors\n",
    "        jacobi = torch.ones(x.shape[0], self.inputdim, self.degree + 1, device=x.device)\n",
    "        if self.degree > 0:  ## degree = 0: jacobi[:, :, 0] = 1 (already initialized) ; degree = 1: jacobi[:, :, 1] = x ; d\n",
    "            jacobi[:, :, 1] = ((self.a - self.b) + (self.a + self.b + 2) * x) / 2\n",
    "        for i in range(2, self.degree + 1):\n",
    "            theta_k = (2 * i + self.a + self.b) * (2 * i + self.a + self.b - 1) / (2 * i * (i + self.a + self.b))\n",
    "            theta_k1 = (2 * i + self.a + self.b - 1) * (self.a * self.a - self.b * self.b) / (\n",
    "                    2 * i * (i + self.a + self.b) * (2 * i + self.a + self.b - 2))\n",
    "            theta_k2 = (i + self.a - 1) * (i + self.b - 1) * (2 * i + self.a + self.b) / (\n",
    "                    i * (i + self.a + self.b) * (2 * i + self.a + self.b - 2))\n",
    "            jacobi[:, :, i] = (theta_k * x + theta_k1) * jacobi[:, :, i - 1].clone() - theta_k2 * jacobi[:, :,\n",
    "                                                                                                  i - 2].clone()  # 2 * x * jacobi[:, :, i - 1].clone() - jacobi[:, :, i - 2].clone()\n",
    "        # Compute the Jacobian interpolation\n",
    "        y = torch.einsum('bid,iod->bo', jacobi, self.jacobi_coeffs)  # shape = (batch_size, outdim)\n",
    "        y = y.view(-1, self.outdim)\n",
    "        return y"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2. Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "\n",
    "class RMoK(BaseModel):\n",
    "    \"\"\" Reversible Mixture of KAN\n",
    "    \n",
    "    \n",
    "    **Parameters:**<br>\n",
    "    `h`: int, Forecast horizon. <br>\n",
    "    `input_size`: int, autorregresive inputs size, y=[1,2,3,4] input_size=2 -> y_[t-2:t]=[1,2].<br>\n",
    "    `n_series`: int, number of time-series.<br>\n",
    "    `futr_exog_list`: str list, future exogenous columns.<br>\n",
    "    `hist_exog_list`: str list, historic exogenous columns.<br>\n",
    "    `stat_exog_list`: str list, static exogenous columns.<br>\n",
    "    `taylor_order`: int, order of the Taylor polynomial.<br>\n",
    "    `jacobi_degree`: int, degree of the Jacobi polynomial.<br>\n",
    "    `wavelet_function`: str, wavelet function to use in the WaveKAN. Choose from [\"mexican_hat\", \"morlet\", \"dog\", \"meyer\", \"shannon\"]<br>\n",
    "    `dropout`: float, dropout rate.<br>\n",
    "    `revin_affine`: bool=False, bool to use affine in RevIn.<br>\n",
    "    `loss`: PyTorch module, instantiated train loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>\n",
    "    `valid_loss`: PyTorch module=`loss`, instantiated valid loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>\n",
    "    `max_steps`: int=1000, maximum number of training steps.<br>\n",
    "    `learning_rate`: float=1e-3, Learning rate between (0, 1).<br>\n",
    "    `num_lr_decays`: int=-1, Number of learning rate decays, evenly distributed across max_steps.<br>\n",
    "    `early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.<br>\n",
    "    `val_check_steps`: int=100, Number of training steps between every validation loss check.<br>\n",
    "    `batch_size`: int=32, number of different series in each batch.<br>\n",
    "    `valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>\n",
    "    `windows_batch_size`: int=32, number of windows to sample in each training batch, default uses all.<br>\n",
    "    `inference_windows_batch_size`: int=32, number of windows to sample in each inference batch, -1 uses all.<br>\n",
    "    `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>\n",
    "    `step_size`: int=1, step size between each window of temporal data.<br>\n",
    "    `scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
    "    `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.<br>\n",
    "    `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.<br>\n",
    "    `alias`: str, optional,  Custom name of the model.<br>\n",
    "    `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).<br>\n",
    "    `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.<br>\n",
    "    `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).<br>\n",
    "    `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.<br>\n",
    "    `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`. <br>\n",
    "    `**trainer_kwargs`: int,  keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).<br>\n",
    "\n",
    "    **References**<br>\n",
    "    - [Xiao Han, Xinfeng Zhang, Yiling Wu, Zhenduo Zhang, Zhe Wu.\"KAN4TSF: Are KAN and KAN-based models Effective for Time Series Forecasting?\". arXiv.](https://arxiv.org/abs/2408.11306)<br>\n",
    "    \"\"\"\n",
    "\n",
    "    # Class attributes\n",
    "    EXOGENOUS_FUTR = False\n",
    "    EXOGENOUS_HIST = False\n",
    "    EXOGENOUS_STAT = False\n",
    "    MULTIVARIATE = True    # If the model produces multivariate forecasts (True) or univariate (False)\n",
    "    RECURRENT = False       # If the model produces forecasts recursively (True) or direct (False)\n",
    "\n",
    "    def __init__(self,\n",
    "                 h,\n",
    "                 input_size,\n",
    "                 n_series: int,\n",
    "                 futr_exog_list = None,\n",
    "                 hist_exog_list = None,\n",
    "                 stat_exog_list = None,\n",
    "                 taylor_order: int = 3,\n",
    "                 jacobi_degree: int = 6,\n",
    "                 wavelet_function: str = 'mexican_hat',\n",
    "                 dropout: float = 0.1,\n",
    "                 revin_affine: bool = True,\n",
    "                 loss = MAE(),\n",
    "                 valid_loss = None,\n",
    "                 max_steps: int = 1000,\n",
    "                 learning_rate: float = 1e-3,\n",
    "                 num_lr_decays: int = -1,\n",
    "                 early_stop_patience_steps: int =-1,\n",
    "                 val_check_steps: int = 100,\n",
    "                 batch_size: int = 32,\n",
    "                 valid_batch_size: Optional[int] = None,\n",
    "                 windows_batch_size = 32,\n",
    "                 inference_windows_batch_size = 32,\n",
    "                 start_padding_enabled = False,\n",
    "                 step_size: int = 1,\n",
    "                 scaler_type: str = 'identity',\n",
    "                 random_seed: int = 1,\n",
    "                 drop_last_loader: bool = False,\n",
    "                 alias: Optional[str] = None,\n",
    "                 optimizer = None,\n",
    "                 optimizer_kwargs = None,\n",
    "                 lr_scheduler = None,\n",
    "                 lr_scheduler_kwargs = None,\n",
    "                 dataloader_kwargs = None,            \n",
    "                 **trainer_kwargs):\n",
    "        \n",
    "        super(RMoK, self).__init__(h=h,\n",
    "                                   input_size=input_size,\n",
    "                                   n_series=n_series,\n",
    "                                   futr_exog_list = hist_exog_list,\n",
    "                                   hist_exog_list = stat_exog_list,\n",
    "                                   stat_exog_list = futr_exog_list,\n",
    "                                   loss=loss,\n",
    "                                   valid_loss=valid_loss,\n",
    "                                   max_steps=max_steps,\n",
    "                                   learning_rate=learning_rate,\n",
    "                                   num_lr_decays=num_lr_decays,\n",
    "                                   early_stop_patience_steps=early_stop_patience_steps,\n",
    "                                   val_check_steps=val_check_steps,\n",
    "                                   batch_size=batch_size,\n",
    "                                   valid_batch_size=valid_batch_size,\n",
    "                                   windows_batch_size=windows_batch_size,\n",
    "                                   inference_windows_batch_size=inference_windows_batch_size,\n",
    "                                   start_padding_enabled=start_padding_enabled,\n",
    "                                   step_size=step_size,\n",
    "                                   scaler_type=scaler_type,\n",
    "                                   random_seed=random_seed,\n",
    "                                   drop_last_loader=drop_last_loader,\n",
    "                                   alias=alias,\n",
    "                                   optimizer=optimizer,\n",
    "                                   optimizer_kwargs=optimizer_kwargs,\n",
    "                                   lr_scheduler=lr_scheduler,\n",
    "                                   lr_scheduler_kwargs=lr_scheduler_kwargs,\n",
    "                                   dataloader_kwargs=dataloader_kwargs,\n",
    "                                   **trainer_kwargs)\n",
    "        \n",
    "        self.input_size = input_size\n",
    "        self.h = h\n",
    "        self.n_series = n_series\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.revin_affine = revin_affine\n",
    "\n",
    "        self.taylor_order = taylor_order\n",
    "        self.jacobi_degree = jacobi_degree\n",
    "        self.wavelet_function = wavelet_function\n",
    "\n",
    "        self.experts = nn.ModuleList([\n",
    "            TaylorKANLayer(self.input_size, self.h * self.loss.outputsize_multiplier, order=self.taylor_order, addbias=True),\n",
    "            JacobiKANLayer(self.input_size, self.h * self.loss.outputsize_multiplier, degree=self.jacobi_degree),\n",
    "            WaveKANLayer(self.input_size, self.h * self.loss.outputsize_multiplier, wavelet_type=self.wavelet_function),\n",
    "            nn.Linear(self.input_size, self.h * self.loss.outputsize_multiplier),\n",
    "        ])\n",
    "        \n",
    "        self.num_experts = len(self.experts)\n",
    "        self.gate = nn.Linear(self.input_size, self.num_experts)\n",
    "        self.softmax = nn.Softmax(dim=-1)\n",
    "        self.rev = RevINMultivariate(self.n_series, affine=self.revin_affine)\n",
    "\n",
    "    def forward(self, windows_batch):\n",
    "        insample_y = windows_batch['insample_y']\n",
    "        B, L, N = insample_y.shape\n",
    "        x = self.rev(insample_y, 'norm')\n",
    "        x = self.dropout(x).transpose(1, 2).reshape(B * N, L)\n",
    "\n",
    "        score = F.softmax(self.gate(x), dim=-1)\n",
    "        expert_outputs = torch.stack([self.experts[i](x) for i in range(self.num_experts)], dim=-1)\n",
    "\n",
    "        y_pred = torch.einsum(\"BLE, BE -> BL\", expert_outputs, score).reshape(B, N, self.h * self.loss.outputsize_multiplier).permute(0, 2, 1)\n",
    "        y_pred = self.rev(y_pred, 'denorm')\n",
    "        y_pred = y_pred.reshape(B, self.h, -1)\n",
    "\n",
    "        return y_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "show_doc(RMoK)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "show_doc(RMoK.fit, name='RMoK.fit')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "show_doc(RMoK.predict, name='RMoK.predict')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| hide\n",
    "# Unit tests for models\n",
    "logging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n",
    "logging.getLogger(\"lightning_fabric\").setLevel(logging.ERROR)\n",
    "with warnings.catch_warnings():\n",
    "    warnings.simplefilter(\"ignore\")\n",
    "    check_model(RMoK, [\"airpassengers\"])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3. Usage example"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| eval: false\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "from neuralforecast import NeuralForecast\n",
    "from neuralforecast.models import RMoK\n",
    "from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic\n",
    "from neuralforecast.losses.pytorch import MSE\n",
    "\n",
    "Y_train_df = AirPassengersPanel[AirPassengersPanel.ds<AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 132 train\n",
    "Y_test_df = AirPassengersPanel[AirPassengersPanel.ds>=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12 test\n",
    "\n",
    "model = RMoK(h=12,\n",
    "             input_size=24,\n",
    "             n_series=2,\n",
    "             taylor_order=3,\n",
    "             jacobi_degree=6,\n",
    "             wavelet_function='mexican_hat',\n",
    "             dropout=0.1,\n",
    "             revin_affine=True,\n",
    "             loss=MSE(),\n",
    "             valid_loss=MAE(),\n",
    "             early_stop_patience_steps=3,\n",
    "             batch_size=32)\n",
    "\n",
    "fcst = NeuralForecast(models=[model], freq='ME')\n",
    "fcst.fit(df=Y_train_df, static_df=AirPassengersStatic, val_size=12)\n",
    "forecasts = fcst.predict(futr_df=Y_test_df)\n",
    "\n",
    "# Plot predictions\n",
    "fig, ax = plt.subplots(1, 1, figsize = (20, 7))\n",
    "Y_hat_df = forecasts.reset_index(drop=False).drop(columns=['unique_id','ds'])\n",
    "plot_df = pd.concat([Y_test_df, Y_hat_df], axis=1)\n",
    "plot_df = pd.concat([Y_train_df, plot_df])\n",
    "\n",
    "plot_df = plot_df[plot_df.unique_id=='Airline1'].drop('unique_id', axis=1)\n",
    "plt.plot(plot_df['ds'], plot_df['y'], c='black', label='True')\n",
    "plt.plot(plot_df['ds'], plot_df['RMoK'], c='blue', label='Forecast')\n",
    "ax.set_title('AirPassengers Forecast', fontsize=22)\n",
    "ax.set_ylabel('Monthly Passengers', fontsize=20)\n",
    "ax.set_xlabel('Year', fontsize=20)\n",
    "ax.legend(prop={'size': 15})\n",
    "ax.grid()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
