{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#|default_exp models.multimodal"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Multimodal"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    ">Functionality used for multiple data modalities.\n",
    "\n",
    "A common scenario in time-series related tasks is the use of multiple types of inputs:\n",
    "\n",
    "* static: data that doesn't change with time\n",
    "* observed: temporal data only available in the past\n",
    "* known: temporal data available in the past and in the future\n",
    "\n",
    "At the same time, these different modalities may contain:\n",
    "\n",
    "* categorical data\n",
    "* continuous or numerical data\n",
    "\n",
    "Based on that, there are situations where we have up to 6 different types of input features:\n",
    "\n",
    "* s_cat: static continuous variables\n",
    "* o_cat: observed categorical variables\n",
    "* o_cont: observed continuous variables\n",
    "* k_cat: known categorical variables\n",
    "* k_cont: known continuous variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "from collections import OrderedDict\n",
    "from fastcore.test import test_eq\n",
    "from fastcore.xtras import listify\n",
    "from fastcore.xtras import L\n",
    "from fastai.tabular.model import emb_sz_rule\n",
    "from tsai.imports import default_device\n",
    "from tsai.data.core import TSDataLoaders\n",
    "from tsai.data.preprocessing import PatchEncoder\n",
    "from tsai.learner import get_arch\n",
    "from tsai.models.utils import build_ts_model, output_size_calculator\n",
    "from tsai.models.layers import Reshape, LinBnDrop, get_act_fn, lin_nd_head, rocket_nd_head, GAP1d"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "def _to_list(idx):\n",
    "    if idx is None:\n",
    "        return []\n",
    "    elif isinstance(idx, int):\n",
    "        return [idx]\n",
    "    elif isinstance(idx, list):\n",
    "        return idx\n",
    "\n",
    "\n",
    "def get_o_cont_idxs(c_in, s_cat_idxs=None, s_cont_idxs=None, o_cat_idxs=None):\n",
    "    \"Calculate the indices of the observed continuous features.\"\n",
    "    all_features = np.arange(c_in).tolist()\n",
    "    for idxs in [s_cat_idxs, s_cont_idxs, o_cat_idxs]:\n",
    "        if idxs is not None:\n",
    "            if not isinstance(idxs, list): idxs = [idxs]\n",
    "            for idx in idxs:\n",
    "                all_features.remove(idx)\n",
    "    o_cont_idxs = all_features\n",
    "    return o_cont_idxs\n",
    "\n",
    "\n",
    "def get_feat_idxs(c_in, s_cat_idxs=None, s_cont_idxs=None, o_cat_idxs=None, o_cont_idxs=None):\n",
    "    \"Calculate the indices of the features used for training.\"\n",
    "    idx_list = [s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs]\n",
    "    s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs = list(map(_to_list, idx_list))\n",
    "    if not o_cont_idxs:\n",
    "        o_cont_idxs = get_o_cont_idxs(c_in, s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs, o_cat_idxs=o_cat_idxs)\n",
    "    return s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "c_in = 7\n",
    "s_cat_idxs = 3\n",
    "s_cont_idxs = [1, 4, 5]\n",
    "o_cat_idxs = None\n",
    "o_cont_idxs = None\n",
    "\n",
    "s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs = get_feat_idxs(c_in, s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs, o_cat_idxs=o_cat_idxs, o_cont_idxs=o_cont_idxs)\n",
    "\n",
    "test_eq(s_cat_idxs, [3])\n",
    "test_eq(s_cont_idxs, [1, 4, 5])\n",
    "test_eq(o_cat_idxs, [])\n",
    "test_eq(o_cont_idxs, [0, 2, 6])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class TensorSplitter(nn.Module):\n",
    "    def __init__(self,\n",
    "        s_cat_idxs:list=None, # list of indices for static categorical variables\n",
    "        s_cont_idxs:list=None, # list of indices for static continuous variables\n",
    "        o_cat_idxs:list=None, # list of indices for observed categorical variables\n",
    "        o_cont_idxs:list=None, # list of indices for observed continuous variables\n",
    "        k_cat_idxs:list=None, # list of indices for known categorical variables\n",
    "        k_cont_idxs:list=None, # list of indices for known continuous variables\n",
    "        horizon:int=None, # number of time steps to predict ahead\n",
    "        ):\n",
    "        super(TensorSplitter, self).__init__()\n",
    "        assert s_cat_idxs or s_cont_idxs or o_cat_idxs or o_cont_idxs, \"must specify at least one of s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs\"\n",
    "        if k_cat_idxs or k_cont_idxs:\n",
    "            assert horizon is not None, \"must specify horizon if using known variables\"\n",
    "        assert horizon is None or isinstance(horizon, int), \"horizon must be an integer\"\n",
    "        self.s_cat_idxs = self._to_list(s_cat_idxs)\n",
    "        self.s_cont_idxs = self._to_list(s_cont_idxs)\n",
    "        self.o_cat_idxs = self._to_list(o_cat_idxs)\n",
    "        self.o_cont_idxs = self._to_list(o_cont_idxs)\n",
    "        self.k_cat_idxs = self._to_list(k_cat_idxs)\n",
    "        self.k_cont_idxs = self._to_list(k_cont_idxs)\n",
    "        idx_list = [self.s_cat_idxs, self.s_cont_idxs, self.o_cat_idxs, self.o_cont_idxs]\n",
    "        if horizon:\n",
    "            idx_list += [self.k_cat_idxs, self.k_cont_idxs]\n",
    "        self.idx_list = list(map(self._to_list, idx_list))\n",
    "        self._check_overlap()\n",
    "        self.horizon = horizon\n",
    "\n",
    "    def _check_overlap(self):\n",
    "        indices = []\n",
    "        for idx in self.idx_list:\n",
    "            indices += idx\n",
    "        if len(indices) != len(set(indices)):\n",
    "            raise ValueError(\"Indices must not overlap between s_cat_idxs, s_cont_idxs, o_cat_idxs, and o_cont_idxs\")\n",
    "\n",
    "    @staticmethod\n",
    "    def _to_list(idx):\n",
    "        if idx is None:\n",
    "            return []\n",
    "        elif isinstance(idx, int):\n",
    "            return [idx]\n",
    "        elif isinstance(idx, list):\n",
    "            return idx\n",
    "\n",
    "    def forward(self, input_tensor):\n",
    "        slices = []\n",
    "        for idx, idxs in enumerate(self.idx_list):\n",
    "        # for idx, idxs in enumerate([self.s_cat_idxs, self.s_cont_idxs, self.o_cat_idxs, self.o_cont_idxs, self.k_cat_idxs, self.k_cont_idxs]):\n",
    "            if idxs:\n",
    "                if idx < 2:  # s_cat_idxs or s_cont_idxs\n",
    "                    slices.append(input_tensor[:, idxs, 0].long())\n",
    "                elif idx < 4 and self.horizon is not None:  # o_cat_idxs or o_cont_idxs and horizon is not None\n",
    "                    slices.append(input_tensor[:, idxs, :-self.horizon])\n",
    "                else:  # k_cat_idxs or k_cont_idxs or o_cat_idxs or o_cont_idxs and horizon is None\n",
    "                    slices.append(input_tensor[:, idxs, :])\n",
    "            else:\n",
    "                if idx < 2:  # s_cat_idxs or s_cont_idxs\n",
    "                    slices.append(torch.empty((input_tensor.size(0), 0), device=input_tensor.device))  # return 2D empty tensor\n",
    "                elif idx < 4 and self.horizon is not None: # o_cat_idxs or o_cont_idxs and horizon is not None\n",
    "                        slices.append(torch.empty((input_tensor.size(0), 0, input_tensor.size(2)-self.horizon), device=input_tensor.device))\n",
    "                else:   # k_cat_idxs or k_cont_idxs or o_cat_idxs or o_cont_idxs and horizon is None\n",
    "                    slices.append(torch.empty((input_tensor.size(0), 0, input_tensor.size(2)), device=input_tensor.device))\n",
    "        return slices\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Slice 1: torch.Size([4, 1]) torch.int64\n",
      "Slice 2: torch.Size([4, 2]) torch.int64\n",
      "Slice 3: torch.Size([4, 3, 10]) torch.float32\n",
      "Slice 4: torch.Size([4, 0, 10]) torch.float32\n"
     ]
    }
   ],
   "source": [
    "# Example usage\n",
    "bs = 4\n",
    "s_cat_idxs = 1\n",
    "s_cont_idxs = [0, 2]\n",
    "o_cat_idxs =[ 3, 4, 5]\n",
    "o_cont_idxs = None\n",
    "k_cat_idxs = None\n",
    "k_cont_idxs = None\n",
    "horizon=None\n",
    "input_tensor = torch.randn(bs, 6, 10)  # 3D input tensor\n",
    "splitter = TensorSplitter(s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs,\n",
    "                          o_cat_idxs=o_cat_idxs, o_cont_idxs=o_cont_idxs)\n",
    "slices = splitter(input_tensor)\n",
    "for i, slice_tensor in enumerate(slices):\n",
    "    print(f\"Slice {i+1}: {slice_tensor.shape} {slice_tensor.dtype}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Slice 1: torch.Size([4, 1]) torch.int64\n",
      "Slice 2: torch.Size([4, 2]) torch.int64\n",
      "Slice 3: torch.Size([4, 3, 7]) torch.float32\n",
      "Slice 4: torch.Size([4, 0, 7]) torch.float32\n",
      "Slice 5: torch.Size([4, 2, 10]) torch.float32\n",
      "Slice 6: torch.Size([4, 1, 10]) torch.float32\n"
     ]
    }
   ],
   "source": [
    "# Example usage\n",
    "bs = 4\n",
    "s_cat_idxs = 1\n",
    "s_cont_idxs = [0, 2]\n",
    "o_cat_idxs =[ 3, 4, 5]\n",
    "o_cont_idxs = None\n",
    "k_cat_idxs = [6,7]\n",
    "k_cont_idxs = 8\n",
    "horizon=3\n",
    "input_tensor = torch.randn(4, 9, 10)  # 3D input tensor\n",
    "splitter = TensorSplitter(s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs,\n",
    "                          o_cat_idxs=o_cat_idxs, o_cont_idxs=o_cont_idxs,\n",
    "                          k_cat_idxs=k_cat_idxs, k_cont_idxs=k_cont_idxs, horizon=horizon)\n",
    "slices = splitter(input_tensor)\n",
    "for i, slice_tensor in enumerate(slices):\n",
    "    print(f\"Slice {i+1}: {slice_tensor.shape} {slice_tensor.dtype}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class Embeddings(nn.Module):\n",
    "    \"Embedding layers for each categorical variable in a 2D or 3D tensor\"\n",
    "    def __init__(self,\n",
    "        n_embeddings:list, # List of num_embeddings for each categorical variable\n",
    "        embedding_dims:list=None, # List of embedding dimensions for each categorical variable\n",
    "        padding_idx:int=0, # Embedding padding_idx\n",
    "        embed_dropout:float=0., # Dropout probability for `Embedding` layer\n",
    "        **kwargs\n",
    "        ):\n",
    "        super().__init__()\n",
    "        if not isinstance(n_embeddings, list): n_embeddings = [n_embeddings]\n",
    "        if embedding_dims is None:\n",
    "            embedding_dims = [emb_sz_rule(s) for s in n_embeddings]\n",
    "        if not isinstance(embedding_dims, list): embedding_dims = [embedding_dims]\n",
    "        embedding_dims = [emb_sz_rule(s) if s is None else s for s in n_embeddings]\n",
    "        assert len(n_embeddings) == len(embedding_dims)\n",
    "        self.embedding_dims = sum(embedding_dims)\n",
    "        self.embedding_layers = nn.ModuleList([nn.Sequential(nn.Embedding(n,d,padding_idx=padding_idx, **kwargs),\n",
    "                                                             nn.Dropout(embed_dropout)) for n,d in zip(n_embeddings, embedding_dims)])\n",
    "\n",
    "    def forward(self, x):\n",
    "        if x.ndim == 2:\n",
    "            return torch.cat([e(x[:,i].long()) for i,e in enumerate(self.embedding_layers)],1)\n",
    "        elif x.ndim == 3:\n",
    "            return torch.cat([e(x[:,i].long()).transpose(1,2) for i,e in enumerate(self.embedding_layers)],1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "t1 = torch.randint(0, 7, (16, 1))\n",
    "t2 = torch.randint(0, 5, (16, 1))\n",
    "t = torch.cat([t1, t2], 1).float()\n",
    "emb = Embeddings([7, 5], None, embed_dropout=0.1)\n",
    "test_eq(emb(t).shape, (16, 12))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "t1 = torch.randint(0, 7, (16, 1))\n",
    "t2 = torch.randint(0, 5, (16, 1))\n",
    "t = torch.cat([t1, t2], 1).float()\n",
    "emb = Embeddings([7, 5], [4, 3])\n",
    "test_eq(emb(t).shape, (16, 12))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "t1 = torch.randint(0, 7, (16, 1, 10))\n",
    "t2 = torch.randint(0, 5, (16, 1, 10))\n",
    "t = torch.cat([t1, t2], 1).float()\n",
    "emb = Embeddings([7, 5], None)\n",
    "test_eq(emb(t).shape, (16, 12, 10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class StaticBackbone(nn.Module):\n",
    "    \"Static backbone model to embed static features\"\n",
    "    def __init__(self, c_in, c_out, seq_len, d=None, layers=[200, 100], dropouts=[0.1, 0.2], act=nn.ReLU(inplace=True), use_bn=False, lin_first=False):\n",
    "        super().__init__()\n",
    "        layers, dropouts = L(layers), L(dropouts)\n",
    "        if len(dropouts) <= 1: dropouts = dropouts * len(layers)\n",
    "        assert len(layers) == len(dropouts), '#layers and #dropout must match'\n",
    "        self.flatten = Reshape()\n",
    "        nf = [c_in * seq_len] + layers\n",
    "        self.mlp = nn.ModuleList()\n",
    "        for i in range(len(layers)): self.mlp.append(LinBnDrop(nf[i], nf[i+1], bn=use_bn, p=dropouts[i], act=get_act_fn(act), lin_first=lin_first))\n",
    "        self.head_nf = nf[-1]\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.flatten(x)\n",
    "        for mlp in self.mlp: x = mlp(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Input shape: torch.Size([4, 6, 10]) Output shape: torch.Size([4, 100])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "StaticBackbone(\n",
       "  (flatten): Reshape(bs)\n",
       "  (mlp): ModuleList(\n",
       "    (0): LinBnDrop(\n",
       "      (0): Dropout(p=0.1, inplace=False)\n",
       "      (1): Linear(in_features=60, out_features=200, bias=True)\n",
       "      (2): ReLU(inplace=True)\n",
       "    )\n",
       "    (1): LinBnDrop(\n",
       "      (0): Dropout(p=0.2, inplace=False)\n",
       "      (1): Linear(in_features=200, out_features=100, bias=True)\n",
       "      (2): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Example usage\n",
    "bs = 4\n",
    "c_in = 6\n",
    "c_out = 8\n",
    "seq_len = 10\n",
    "input_tensor = torch.randn(bs, c_in, seq_len)  # 3D input tensor\n",
    "backbone = StaticBackbone(c_in, c_out, seq_len)\n",
    "output_tensor = backbone(input_tensor)\n",
    "print(f\"Input shape: {input_tensor.shape} Output shape: {output_tensor.shape}\")\n",
    "backbone"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# class MultInputWrapper(nn.Module):\n",
    "#     \"Model wrapper for input tensors with static and/ or observed, categorical and/ or numerical features.\"\n",
    "\n",
    "#     def __init__(self,\n",
    "#         arch,\n",
    "#         c_in:int=None, # number of input variables\n",
    "#         c_out:int=None, # number of output variables\n",
    "#         seq_len:int=None, # input sequence length\n",
    "#         d:tuple=None, # shape of the output tensor\n",
    "#         dls:TSDataLoaders=None, # TSDataLoaders object\n",
    "#         s_cat_idxs:list=None, # list of indices for static categorical variables\n",
    "#         s_cat_embeddings:list=None, # list of num_embeddings for each static categorical variable\n",
    "#         s_cat_embedding_dims:list=None, # list of embedding dimensions for each static categorical variable\n",
    "#         s_cont_idxs:list=None, # list of indices for static continuous variables\n",
    "#         o_cat_idxs:list=None, # list of indices for observed categorical variables\n",
    "#         o_cat_embeddings:list=None, # list of num_embeddings for each observed categorical variable\n",
    "#         o_cat_embedding_dims:list=None, # list of embedding dimensions for each observed categorical variable\n",
    "#         o_cont_idxs:list=None, # list of indices for observed continuous variables. All features not in s_cat_idxs, s_cont_idxs, o_cat_idxs are considered observed continuous variables.\n",
    "#         patch_len:int=None, # Number of time steps in each patch.\n",
    "#         patch_stride:int=None, # Stride of the patch.\n",
    "#         flatten:bool=False, # boolean indicating whether to flatten bacbone's output tensor\n",
    "#         use_bn:bool=False, # boolean indicating whether to use batch normalization in the head\n",
    "#         fc_dropout:float=0., # dropout probability for the fully connected layer in the head\n",
    "#         custom_head=None, # custom head to replace the default head\n",
    "#         **kwargs\n",
    "#     ):\n",
    "#         super().__init__()\n",
    "\n",
    "#         # attributes\n",
    "#         c_in = c_in or dls.vars\n",
    "#         c_out = c_out or dls.c\n",
    "#         seq_len = seq_len or dls.len\n",
    "#         d = d or (dls.d if dls is not None else None)\n",
    "#         self.c_in, self.c_out, self.seq_len, self.d = c_in, c_out, seq_len, d\n",
    "\n",
    "#         # tensor splitter\n",
    "#         if o_cont_idxs is None:\n",
    "#             o_cont_idxs = get_o_cont_idxs(c_in, s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs, o_cat_idxs=o_cat_idxs)\n",
    "#         self.splitter = TensorSplitter(s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs)\n",
    "#         s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs = self.splitter.s_cat_idxs, self.splitter.s_cont_idxs, self.splitter.o_cat_idxs, self.splitter.o_cont_idxs\n",
    "#         assert c_in == sum([len(s_cat_idxs), len(s_cont_idxs), len(o_cat_idxs), len(o_cont_idxs)])\n",
    "\n",
    "#         # embeddings\n",
    "#         self.s_embeddings = Embeddings(s_cat_embeddings, s_cat_embedding_dims)\n",
    "#         self.o_embeddings = Embeddings(o_cat_embeddings, o_cat_embedding_dims)\n",
    "\n",
    "#         # patch encoder\n",
    "#         if patch_len is not None:\n",
    "#             patch_stride = patch_stride or patch_len\n",
    "#             self.patch_encoder = PatchEncoder(patch_len, patch_stride, seq_len=seq_len)\n",
    "#             c_mult = patch_len\n",
    "#             seq_len = (seq_len + self.patch_encoder.pad_size - patch_len) // patch_stride + 1\n",
    "#         else:\n",
    "#             self.patch_encoder = nn.Identity()\n",
    "#             c_mult = 1\n",
    "\n",
    "#         # backbone\n",
    "#         n_s_features = len(s_cont_idxs) + self.s_embeddings.embedding_dims\n",
    "#         n_o_features = (len(o_cont_idxs) + self.o_embeddings.embedding_dims) * c_mult\n",
    "#         s_backbone = StaticBackbone(c_in=n_s_features, c_out=c_out, seq_len=1, **kwargs)\n",
    "#         if isinstance(arch, str):\n",
    "#             arch = get_arch(arch)\n",
    "#         if isinstance(arch, nn.Module):\n",
    "#             o_model = arch\n",
    "#         else:\n",
    "#             o_model = build_ts_model(arch, c_in=n_o_features, c_out=c_out, seq_len=seq_len, d=d, **kwargs)\n",
    "#         assert hasattr(o_model, \"backbone\"), \"the selected arch must have a backbone\"\n",
    "#         o_backbone = getattr(o_model, \"backbone\")\n",
    "\n",
    "#         # head\n",
    "#         o_head_nf = output_size_calculator(o_backbone, n_o_features, seq_len)[0]\n",
    "#         s_head_nf = s_backbone.head_nf\n",
    "#         self.backbone = nn.ModuleList([o_backbone, s_backbone])\n",
    "#         self.head_nf = o_head_nf + s_head_nf\n",
    "#         if custom_head is not None:\n",
    "#             if isinstance(custom_head, nn.Module): self.head = custom_head\n",
    "#             else:self. head = custom_head(self.head_nf, c_out, seq_len, d=d)\n",
    "#         else:\n",
    "#             if \"rocket\" in o_model.__name__.lower():\n",
    "#                 self.head = rocket_nd_head(self.head_nf, c_out, seq_len=seq_len, d=d, use_bn=use_bn, fc_dropout=fc_dropout)\n",
    "#             else:\n",
    "#                 self.head = lin_nd_head(self.head_nf, c_out, seq_len=seq_len, d=d, flatten=flatten, use_bn=use_bn, fc_dropout=fc_dropout)\n",
    "\n",
    "#     def forward(self, x):\n",
    "#         # split x into static cat, static cont, observed cat, and observed cont\n",
    "#         s_cat, s_cont, o_cat, o_cont = self.splitter(x)\n",
    "\n",
    "#         # create categorical embeddings\n",
    "#         s_cat = self.s_embeddings(s_cat)\n",
    "#         o_cat = self.o_embeddings(o_cat)\n",
    "\n",
    "#         # contatenate static and observed features\n",
    "#         s_x = torch.cat([s_cat, s_cont], 1)\n",
    "#         o_x = torch.cat([o_cat, o_cont], 1)\n",
    "\n",
    "#         # patch encoder\n",
    "#         o_x = self.patch_encoder(o_x)\n",
    "\n",
    "#         # pass static and observed features through their respective backbones\n",
    "#         for i,(b,xi) in enumerate(zip(self.backbone, [o_x, s_x])):\n",
    "#             if i == 0:\n",
    "#                 x = b(xi)\n",
    "#                 if x.ndim == 2:\n",
    "#                     x = x[..., None]\n",
    "#             else:\n",
    "#                 x = torch.cat([x,  b(xi)[..., None].repeat(1, 1, x.shape[-1])], 1)\n",
    "\n",
    "#         # head\n",
    "#         x = self.head(x)\n",
    "#         return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from tsai.models.InceptionTimePlus import InceptionTimePlus"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# c_in = 6\n",
    "# c_out = 3\n",
    "# seq_len = 97\n",
    "# d = None\n",
    "\n",
    "# s_cat_idxs=2\n",
    "# s_cont_idxs=4\n",
    "# o_cat_idxs=[0, 3]\n",
    "# o_cont_idxs=None\n",
    "# s_cat_embeddings = 5\n",
    "# s_cat_embedding_dims = None\n",
    "# o_cat_embeddings = [7, 3]\n",
    "# o_cat_embedding_dims = [3, None]\n",
    "\n",
    "# t0 = torch.randint(0, 7, (16, 1, seq_len)) # cat\n",
    "# t1 = torch.randn(16, 1, seq_len)\n",
    "# t2 = torch.randint(0, 5, (16, 1, seq_len)) # cat\n",
    "# t3 = torch.randint(0, 3, (16, 1, seq_len)) # cat\n",
    "# t4 = torch.randn(16, 1, seq_len)\n",
    "# t5 = torch.randn(16, 1, seq_len)\n",
    "\n",
    "# t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float()\n",
    "\n",
    "# patch_lens = [None, 5, 5, 5, 5]\n",
    "# patch_strides = [None, None, 1, 3, 5]\n",
    "# for patch_len, patch_stride in zip(patch_lens, patch_strides):\n",
    "#     for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"MultiRocketPlus\"]:\n",
    "#         print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n",
    "\n",
    "#         model = MultInputWrapper(\n",
    "#             arch=arch,\n",
    "#             c_in=c_in,\n",
    "#             c_out=c_out,\n",
    "#             seq_len=seq_len,\n",
    "#             d=d,\n",
    "#             s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n",
    "#             s_cont_idxs=s_cont_idxs,\n",
    "#             o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n",
    "#             o_cont_idxs=o_cont_idxs,\n",
    "#             patch_len=patch_len,\n",
    "#             patch_stride=patch_stride,\n",
    "#         )\n",
    "\n",
    "#         test_eq(model(t).shape, (16,3))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class FusionMLP(nn.Module):\n",
    "    def __init__(self, comb_dim, layers, act='relu', dropout=0., use_bn=True):\n",
    "        super().__init__()\n",
    "        self.avg_pool = GAP1d(1)\n",
    "        layers = listify(layers)\n",
    "        if not isinstance(dropout, list): dropout = [dropout]\n",
    "        if len(dropout) != len(layers): dropout = dropout * len(layers)\n",
    "        l = []\n",
    "        for i,s in enumerate(layers):\n",
    "            if use_bn: l.append(nn.BatchNorm1d(comb_dim if i == 0 else prev_s))\n",
    "            if dropout[i]: l.append(nn.Dropout(dropout[i]))\n",
    "            l.append(nn.Linear(comb_dim if i == 0 else prev_s, s))\n",
    "            if act: l.append(get_act_fn(act))\n",
    "            prev_s = s\n",
    "        if l:\n",
    "            self.mlp = nn.Sequential(*l)\n",
    "        else:\n",
    "            self.mlp = nn.Identity()\n",
    "\n",
    "    def forward(self, x_cat, x_cont, x_emb):\n",
    "        if x_emb.ndim == 3:\n",
    "            x_emb = self.avg_pool(x_emb)\n",
    "        output = torch.cat([x_cat, x_cont, x_emb], 1)\n",
    "        output = self.mlp(output)\n",
    "        return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "bs = 16\n",
    "emb_dim = 128\n",
    "seq_len = 20\n",
    "cat_dim = 24\n",
    "cont_feat = 3\n",
    "\n",
    "comb_dim = emb_dim + cat_dim + cont_feat\n",
    "emb = torch.randn(bs, emb_dim, seq_len)\n",
    "cat = torch.randn(bs, cat_dim)\n",
    "cont = torch.randn(bs, cont_feat)\n",
    "fusion_mlp = FusionMLP(comb_dim, layers=comb_dim, act='relu', dropout=.1)\n",
    "output = fusion_mlp(cat, cont, emb)\n",
    "test_eq(output.shape, (bs, comb_dim))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "bs = 16\n",
    "emb_dim = 50000\n",
    "cat_dim = 24\n",
    "cont_feat = 3\n",
    "\n",
    "comb_dim = emb_dim + cat_dim + cont_feat\n",
    "emb = torch.randn(bs, emb_dim)\n",
    "cat = torch.randn(bs, cat_dim)\n",
    "cont = torch.randn(bs, cont_feat)\n",
    "fusion_mlp = FusionMLP(comb_dim, layers=[128], act='relu', dropout=.1)\n",
    "output = fusion_mlp(cat, cont, emb)\n",
    "test_eq(output.shape, (bs, 128))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class MultInputBackboneWrapper(nn.Module):\n",
    "    \"Model backbone wrapper for input tensors with static and/ or observed, categorical and/ or numerical features.\"\n",
    "\n",
    "    def __init__(self,\n",
    "        arch,\n",
    "        c_in:int=None, # number of input variables\n",
    "        seq_len:int=None, # input sequence length\n",
    "        d:tuple=None, # shape of the output tensor\n",
    "        dls:TSDataLoaders=None, # TSDataLoaders object\n",
    "        s_cat_idxs:list=None, # list of indices for static categorical variables\n",
    "        s_cat_embeddings:list=None, # list of num_embeddings for each static categorical variable\n",
    "        s_cat_embedding_dims:list=None, # list of embedding dimensions for each static categorical variable\n",
    "        s_cont_idxs:list=None, # list of indices for static continuous variables\n",
    "        o_cat_idxs:list=None, # list of indices for observed categorical variables\n",
    "        o_cat_embeddings:list=None, # list of num_embeddings for each observed categorical variable\n",
    "        o_cat_embedding_dims:list=None, # list of embedding dimensions for each observed categorical variable\n",
    "        o_cont_idxs:list=None, # list of indices for observed continuous variables. All features not in s_cat_idxs, s_cont_idxs, o_cat_idxs are considered observed continuous variables.\n",
    "        patch_len:int=None, # Number of time steps in each patch.\n",
    "        patch_stride:int=None, # Stride of the patch.\n",
    "        fusion_layers:list=[128], # list of layer dimensions for the fusion MLP\n",
    "        fusion_act:str='relu', # activation function for the fusion MLP\n",
    "        fusion_dropout:float=0., # dropout probability for the fusion MLP\n",
    "        fusion_use_bn:bool=True, # boolean indicating whether to use batch normalization in the fusion MLP\n",
    "        **kwargs\n",
    "    ):\n",
    "        super().__init__()\n",
    "\n",
    "        # attributes\n",
    "        c_in = c_in or dls.vars\n",
    "        seq_len = seq_len or dls.len\n",
    "        d = d or (dls.d if dls is not None else None)\n",
    "        self.c_in, self.seq_len, self.d = c_in, seq_len, d\n",
    "\n",
    "        # tensor splitter\n",
    "        if o_cont_idxs is None:\n",
    "            o_cont_idxs = get_o_cont_idxs(c_in, s_cat_idxs=s_cat_idxs, s_cont_idxs=s_cont_idxs, o_cat_idxs=o_cat_idxs)\n",
    "        self.splitter = TensorSplitter(s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs)\n",
    "        s_cat_idxs, s_cont_idxs, o_cat_idxs, o_cont_idxs = self.splitter.s_cat_idxs, self.splitter.s_cont_idxs, self.splitter.o_cat_idxs, self.splitter.o_cont_idxs\n",
    "        assert c_in == sum([len(s_cat_idxs), len(s_cont_idxs), len(o_cat_idxs), len(o_cont_idxs)])\n",
    "\n",
    "        # embeddings\n",
    "        self.s_embeddings = Embeddings(s_cat_embeddings, s_cat_embedding_dims) if s_cat_idxs else nn.Identity()\n",
    "        self.o_embeddings = Embeddings(o_cat_embeddings, o_cat_embedding_dims) if o_cat_idxs else nn.Identity()\n",
    "\n",
    "        # patch encoder\n",
    "        if patch_len is not None:\n",
    "            patch_stride = patch_stride or patch_len\n",
    "            self.patch_encoder = PatchEncoder(patch_len, patch_stride, seq_len=seq_len)\n",
    "            c_mult = patch_len\n",
    "            seq_len = (seq_len + self.patch_encoder.pad_size - patch_len) // patch_stride + 1\n",
    "        else:\n",
    "            self.patch_encoder = nn.Identity()\n",
    "            c_mult = 1\n",
    "\n",
    "        # backbone\n",
    "        n_s_features = len(s_cont_idxs) + (self.s_embeddings.embedding_dims if s_cat_idxs else 0)\n",
    "        n_o_features = (len(o_cont_idxs) + (self.o_embeddings.embedding_dims if o_cat_idxs else 0)) * c_mult\n",
    "        if isinstance(arch, str):\n",
    "            arch = get_arch(arch)\n",
    "        if isinstance(arch, nn.Module):\n",
    "            o_model = arch\n",
    "        else:\n",
    "            o_model = build_ts_model(arch, c_in=n_o_features, c_out=1, seq_len=seq_len, d=d, **kwargs)\n",
    "        assert hasattr(o_model, \"backbone\"), \"the selected arch must have a backbone\"\n",
    "        o_backbone = getattr(o_model, \"backbone\")\n",
    "        self.o_backbone = o_backbone\n",
    "        backbone_features = output_size_calculator(o_backbone, n_o_features, seq_len)[0]\n",
    "\n",
    "        # fusion layer\n",
    "        fusion_layers = listify(fusion_layers)\n",
    "        self.fusion_layer = FusionMLP(n_s_features + backbone_features, layers=fusion_layers, act=fusion_act, dropout=fusion_dropout, use_bn=fusion_use_bn)\n",
    "        self.head_nf = fusion_layers[-1]\n",
    "\n",
    "\n",
    "    def forward(self, x):\n",
    "        # split x into static cat, static cont, observed cat, and observed cont\n",
    "        s_cat, s_cont, o_cat, o_cont = self.splitter(x)\n",
    "\n",
    "        # create categorical embeddings\n",
    "        s_cat = self.s_embeddings(s_cat)\n",
    "        o_cat = self.o_embeddings(o_cat)\n",
    "\n",
    "        # contatenate observed features\n",
    "        o_x = torch.cat([o_cat, o_cont], 1)\n",
    "\n",
    "        # patch encoder\n",
    "        o_x = self.patch_encoder(o_x)\n",
    "\n",
    "        # pass static and observed features through their respective backbones\n",
    "        o_x = self.o_backbone(o_x)\n",
    "\n",
    "        # fusion layer\n",
    "        x = self.fusion_layer(s_cat, s_cont, o_x)\n",
    "\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#| export\n",
    "class MultInputWrapper(nn.Sequential):\n",
    "    def __init__(self,\n",
    "        arch,\n",
    "        c_in:int=None, # number of input variables\n",
    "        c_out:int=1, # number of output variables\n",
    "        seq_len:int=None, # input sequence length\n",
    "        d:tuple=None, # shape of the output tensor\n",
    "        dls:TSDataLoaders=None, # TSDataLoaders object\n",
    "        s_cat_idxs:list=None, # list of indices for static categorical variables\n",
    "        s_cat_embeddings:list=None, # list of num_embeddings for each static categorical variable\n",
    "        s_cat_embedding_dims:list=None, # list of embedding dimensions for each static categorical variable\n",
    "        s_cont_idxs:list=None, # list of indices for static continuous variables\n",
    "        o_cat_idxs:list=None, # list of indices for observed categorical variables\n",
    "        o_cat_embeddings:list=None, # list of num_embeddings for each observed categorical variable\n",
    "        o_cat_embedding_dims:list=None, # list of embedding dimensions for each observed categorical variable\n",
    "        o_cont_idxs:list=None, # list of indices for observed continuous variables. All features not in s_cat_idxs, s_cont_idxs, o_cat_idxs are considered observed continuous variables.\n",
    "        patch_len:int=None, # Number of time steps in each patch.\n",
    "        patch_stride:int=None, # Stride of the patch.\n",
    "        fusion_layers:list=128, # list of layer dimensions for the fusion MLP\n",
    "        fusion_act:str='relu', # activation function for the fusion MLP\n",
    "        fusion_dropout:float=0., # dropout probability for the fusion MLP\n",
    "        fusion_use_bn:bool=True, # boolean indicating whether to use batch normalization in the fusion MLP\n",
    "        custom_head=None, # custom head to replace the default head\n",
    "        **kwargs\n",
    "    ):\n",
    "\n",
    "        # create backbone\n",
    "        backbone = MultInputBackboneWrapper(arch, c_in=c_in, seq_len=seq_len, d=d, dls=dls, s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n",
    "                                            s_cont_idxs=s_cont_idxs, o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims, o_cont_idxs=o_cont_idxs,\n",
    "                                            patch_len=patch_len, patch_stride=patch_stride, fusion_layers=fusion_layers, fusion_act=fusion_act, fusion_dropout=fusion_dropout, fusion_use_bn=fusion_use_bn, **kwargs)\n",
    "\n",
    "        # create head\n",
    "        self.head_nf = backbone.head_nf\n",
    "        self.c_out = c_out\n",
    "        self.seq_len = seq_len\n",
    "        if custom_head:\n",
    "            if isinstance(custom_head, nn.Module): head = custom_head\n",
    "            else: head = custom_head(self.head_nf, c_out, seq_len, d=d)\n",
    "        else:\n",
    "            head = nn.Linear(self.head_nf, c_out)\n",
    "        super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tsai.models.InceptionTimePlus import InceptionTimePlus"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "arch: InceptionTimePlus, patch_len: None, patch_stride: None\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None\n",
      "arch: TSiTPlus, patch_len: None, patch_stride: None\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: None\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: None\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 1\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 1\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 3\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 3\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 5\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 5\n"
     ]
    }
   ],
   "source": [
    "bs = 8\n",
    "c_in = 6\n",
    "c_out = 3\n",
    "seq_len = 97\n",
    "d = None\n",
    "\n",
    "s_cat_idxs=2\n",
    "s_cont_idxs=4\n",
    "o_cat_idxs=[0, 3]\n",
    "o_cont_idxs=None\n",
    "s_cat_embeddings = 5\n",
    "s_cat_embedding_dims = None\n",
    "o_cat_embeddings = [7, 3]\n",
    "o_cat_embedding_dims = [3, None]\n",
    "\n",
    "fusion_layers = 128\n",
    "\n",
    "t0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat\n",
    "t1 = torch.randn(bs, 1, seq_len)\n",
    "t2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat\n",
    "t3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat\n",
    "t4 = torch.randn(bs, 1, seq_len)\n",
    "t5 = torch.randn(bs, 1, seq_len)\n",
    "\n",
    "t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())\n",
    "\n",
    "patch_lens = [None, 5, 5, 5, 5]\n",
    "patch_strides = [None, None, 1, 3, 5]\n",
    "for patch_len, patch_stride in zip(patch_lens, patch_strides):\n",
    "    for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"TSiTPlus\"]:\n",
    "        print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n",
    "\n",
    "        model = MultInputWrapper(\n",
    "            arch=arch,\n",
    "            c_in=c_in,\n",
    "            c_out=c_out,\n",
    "            seq_len=seq_len,\n",
    "            d=d,\n",
    "            s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n",
    "            s_cont_idxs=s_cont_idxs,\n",
    "            o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n",
    "            o_cont_idxs=o_cont_idxs,\n",
    "            patch_len=patch_len,\n",
    "            patch_stride=patch_stride,\n",
    "            fusion_layers=fusion_layers,\n",
    "        ).to(default_device())\n",
    "\n",
    "        test_eq(model(t).shape, (bs, c_out))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "arch: InceptionTimePlus, patch_len: None, patch_stride: None\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None\n",
      "arch: TSiTPlus, patch_len: None, patch_stride: None\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: None\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: None\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 1\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 1\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 3\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 3\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 5\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 5\n"
     ]
    }
   ],
   "source": [
    "bs = 8\n",
    "c_in = 6\n",
    "c_out = 3\n",
    "seq_len = 97\n",
    "d = None\n",
    "\n",
    "s_cat_idxs=None\n",
    "s_cont_idxs=4\n",
    "o_cat_idxs=[0, 3]\n",
    "o_cont_idxs=None\n",
    "s_cat_embeddings = None\n",
    "s_cat_embedding_dims = None\n",
    "o_cat_embeddings = [7, 3]\n",
    "o_cat_embedding_dims = [3, None]\n",
    "\n",
    "fusion_layers = 128\n",
    "\n",
    "t0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat\n",
    "t1 = torch.randn(bs, 1, seq_len)\n",
    "t2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat\n",
    "t3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat\n",
    "t4 = torch.randn(bs, 1, seq_len)\n",
    "t5 = torch.randn(bs, 1, seq_len)\n",
    "\n",
    "t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())\n",
    "\n",
    "patch_lens = [None, 5, 5, 5, 5]\n",
    "patch_strides = [None, None, 1, 3, 5]\n",
    "for patch_len, patch_stride in zip(patch_lens, patch_strides):\n",
    "    for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"TSiTPlus\"]:\n",
    "        print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n",
    "\n",
    "        model = MultInputWrapper(\n",
    "            arch=arch,\n",
    "            c_in=c_in,\n",
    "            c_out=c_out,\n",
    "            seq_len=seq_len,\n",
    "            d=d,\n",
    "            s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n",
    "            s_cont_idxs=s_cont_idxs,\n",
    "            o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n",
    "            o_cont_idxs=o_cont_idxs,\n",
    "            patch_len=patch_len,\n",
    "            patch_stride=patch_stride,\n",
    "            fusion_layers=fusion_layers,\n",
    "        ).to(default_device())\n",
    "\n",
    "        test_eq(model(t).shape, (bs, c_out))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "arch: InceptionTimePlus, patch_len: None, patch_stride: None\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None\n",
      "arch: TSiTPlus, patch_len: None, patch_stride: None\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: None\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: None\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 1\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 1\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 3\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 3\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 5\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 5\n"
     ]
    }
   ],
   "source": [
    "bs = 8\n",
    "c_in = 6\n",
    "c_out = 3\n",
    "seq_len = 97\n",
    "d = None\n",
    "\n",
    "s_cat_idxs=2\n",
    "s_cont_idxs=4\n",
    "o_cat_idxs=None\n",
    "o_cont_idxs=None\n",
    "s_cat_embeddings = 5\n",
    "s_cat_embedding_dims = None\n",
    "o_cat_embeddings = None\n",
    "o_cat_embedding_dims = None\n",
    "\n",
    "fusion_layers = 128\n",
    "\n",
    "t0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat\n",
    "t1 = torch.randn(bs, 1, seq_len)\n",
    "t2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat\n",
    "t3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat\n",
    "t4 = torch.randn(bs, 1, seq_len)\n",
    "t5 = torch.randn(bs, 1, seq_len)\n",
    "\n",
    "t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())\n",
    "\n",
    "patch_lens = [None, 5, 5, 5, 5]\n",
    "patch_strides = [None, None, 1, 3, 5]\n",
    "for patch_len, patch_stride in zip(patch_lens, patch_strides):\n",
    "    for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"TSiTPlus\"]:\n",
    "        print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n",
    "\n",
    "        model = MultInputWrapper(\n",
    "            arch=arch,\n",
    "            c_in=c_in,\n",
    "            c_out=c_out,\n",
    "            seq_len=seq_len,\n",
    "            d=d,\n",
    "            s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n",
    "            s_cont_idxs=s_cont_idxs,\n",
    "            o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n",
    "            o_cont_idxs=o_cont_idxs,\n",
    "            patch_len=patch_len,\n",
    "            patch_stride=patch_stride,\n",
    "            fusion_layers=fusion_layers,\n",
    "        ).to(default_device())\n",
    "\n",
    "        test_eq(model(t).shape, (bs, c_out))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "arch: InceptionTimePlus, patch_len: None, patch_stride: None\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: None, patch_stride: None\n",
      "arch: TSiTPlus, patch_len: None, patch_stride: None\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: None\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: None\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: None\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 1\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 1\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 1\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 3\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 3\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 3\n",
      "arch: InceptionTimePlus, patch_len: 5, patch_stride: 5\n",
      "arch: <class 'tsai.models.InceptionTimePlus.InceptionTimePlus'>, patch_len: 5, patch_stride: 5\n",
      "arch: TSiTPlus, patch_len: 5, patch_stride: 5\n"
     ]
    }
   ],
   "source": [
    "bs = 8\n",
    "c_in = 6\n",
    "c_out = 3\n",
    "seq_len = 97\n",
    "d = None\n",
    "\n",
    "s_cat_idxs=None\n",
    "s_cont_idxs=None\n",
    "o_cat_idxs=None\n",
    "o_cont_idxs=None\n",
    "s_cat_embeddings = None\n",
    "s_cat_embedding_dims = None\n",
    "o_cat_embeddings = None\n",
    "o_cat_embedding_dims = None\n",
    "\n",
    "fusion_layers = 128\n",
    "\n",
    "t0 = torch.randint(0, 7, (bs, 1, seq_len)) # cat\n",
    "t1 = torch.randn(bs, 1, seq_len)\n",
    "t2 = torch.randint(0, 5, (bs, 1, seq_len)) # cat\n",
    "t3 = torch.randint(0, 3, (bs, 1, seq_len)) # cat\n",
    "t4 = torch.randn(bs, 1, seq_len)\n",
    "t5 = torch.randn(bs, 1, seq_len)\n",
    "\n",
    "t = torch.cat([t0, t1, t2, t3, t4, t5], 1).float().to(default_device())\n",
    "\n",
    "patch_lens = [None, 5, 5, 5, 5]\n",
    "patch_strides = [None, None, 1, 3, 5]\n",
    "for patch_len, patch_stride in zip(patch_lens, patch_strides):\n",
    "    for arch in [\"InceptionTimePlus\", InceptionTimePlus, \"TSiTPlus\"]:\n",
    "        print(f\"arch: {arch}, patch_len: {patch_len}, patch_stride: {patch_stride}\")\n",
    "\n",
    "        model = MultInputWrapper(\n",
    "            arch=arch,\n",
    "            c_in=c_in,\n",
    "            c_out=c_out,\n",
    "            seq_len=seq_len,\n",
    "            d=d,\n",
    "            s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims,\n",
    "            s_cont_idxs=s_cont_idxs,\n",
    "            o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims,\n",
    "            o_cont_idxs=o_cont_idxs,\n",
    "            patch_len=patch_len,\n",
    "            patch_stride=patch_stride,\n",
    "            fusion_layers=fusion_layers,\n",
    "        ).to(default_device())\n",
    "\n",
    "        test_eq(model(t).shape, (bs, c_out))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/javascript": "IPython.notebook.save_checkpoint();",
      "text/plain": [
       "<IPython.core.display.Javascript object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/Users/nacho/notebooks/tsai/nbs/077_models.multimodal.ipynb saved at 2024-02-10 21:58:47\n",
      "Correct notebook to script conversion! 😃\n",
      "Saturday 10/02/24 21:58:50 CET\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "                <audio  controls=\"controls\" autoplay=\"autoplay\">\n",
       "                    <source src=\"data:audio/wav;base64,UklGRvQHAABXQVZFZm10IBAAAAABAAEAECcAACBOAAACABAAZGF0YdAHAAAAAPF/iPh/gOoOon6w6ayCoR2ZeyfbjobxK+F2Hs0XjKc5i3DGvzaTlEaraE+zz5uLUl9f46fHpWJdxVSrnfmw8mYEScqUP70cb0Q8X41uysJ1si6Eh1jYzXp9IE2DzOYsftYRyoCY9dJ/8QICgIcEun8D9PmAaBPlfT7lq4MFIlh61tYPiCswIHX+yBaOqT1QbuW7qpVQSv9lu6+xnvRVSlyopAypbGBTUdSalrSTaUBFYpInwUpxOzhti5TOdndyKhCGrdwAfBUcXIJB69p+Vw1egB76+n9q/h6ADglbf4LvnIHfF/981ODThF4m8HiS0riJVjQ6c+/EOZCYQfJrGrhBmPVNMmNArLKhQlkXWYqhbaxXY8ZNHphLuBJsZUEckCTFVHMgNKGJytIDeSUmw4QN4Qx9pReTgb3vYX/TCBuApf75f+P5Y4CRDdN+B+tngk8c8nt03CKGqipgd13OhotwOC5x9MCAknFFcmlmtPmagFFFYOCo0qRzXMhVi57pryNmIEqJlRi8bm52PfuNM8k4dfQv+4cO12l6zCGdg3jl730uE/KAPvS+f0wEAoAsA89/XfXQgBESIn6S5luDtiC8eh/YmIfpLqt1OMp5jXg8/24MveqUNUnPZsqw0Z3yVDldnaUOqIZfXlKrm36zzWhjRhaT+r+ncHI5/otUzfd2uSt7hl/bqXtoHaCC6+mqfrAOeoDD+PJ/xf8RgLMHfH/b8GeBihZIfSXidoQSJWB52NM1iRkzz3MkxpKPbUCrbDu5d5fgTAxkSK3JoEhYD1p2omere2LZTuqYLbdWa49Cx5Dww7tyXDUnioXRkHhwJyKFvd/AfPoYy4Fl7j1/LQorgEr9/X89+0qAOAwAf13sJoL8Gkd8wt25hWIp3Heez/eKODfPcSPCzpFNRDVqf7UlmnNQKGHgqd+jgVvJVm2f265QZTpLS5byur1tpT6ajvrHq3Q2MXWIxtUCehoj8YMk5LB9hRQegeTypn+nBQWA0QHgf7f2q4C5EFt+5ucOg2YfHXtq2SSHpS0ydnTL4IxFO6pvNb4ulBdInWfcsfSc7VMmXpSmE6eeXmZThJxpsgRohEfOk86+AHCoOpOMFsx1dv8s6oYT2k17uR7ngpXod34IEJqAaPfnfyABCIBZBpl/NPI2gTQVjX134x2ExSPMeR7VtYjZMWJ0W8ftjkA/YW1durCWykvjZFKu4p9LVwVbZKNkqpxh6U+6mRC2mGq2Q3SRvsIgcpc2sIpD0Bp4uiiFhW3ecXxOGgaCDe0Vf4cLPoDv+/5/mfw1gN4KKX+17emBqBmYfBHfVYUZKFR44NBtiv41bHJUwx+RJkP1apu2VJlkTwli4qrwoo1ax1dToNCtemRSTBGXz7kJbdM/PY/Dxht0dTLziH7Ul3loJEiE0uJsfdsVTYGL8Yt/AgcMgHYA7X8S+IqAYA+QfjzpxIIVHnp7tdqzhmAstXaxzEqMETpScGC/dJP3Rmdo8LIZnOVSEF+Opxumsl1sVF+dVrE5Z6NIiZSkvVdv2zsqjdnK8HVDLlyHyNjuegogM4NA5z9+YRG9gA722H97AgOA/gSyf43zCIHdE899yuTIg3ciNXpm1jmImTDwdJPITI4RPhRugbvslbFKt2Vfr/6eTFb4W1WkY6m6YPdQjJr2tNZp3EQlko7BgXHRNz2LAc+gdwMq7IUf3R58ohtFgrbr6n7hDFWAlPr8f/T9I4CECU9/De+vgVQY5nxh4POEzybJeCTS5YnCNAZzhsRzkP1Bsmu4t4aYU07nYuerA6KWWcJYO6HHrKJjaE3Zl624UWz/QOOPjcWHc7QzdIk40yl5tCWjhIDhJX0xF4CBMvBsf10IF4Ac//Z/bPlsgAcOwn6S6n6CwxzUewLcRoYaKzV38M23i9o493CNwL6S1UUuaQe0QpvbUfdfiqglpcRccFU+nkWwambASUiVfLyqbg49xY2eyWh1hy/Sh37XjHpaIYKD7OUEfrgS5IC09MV/1gMBgKMDyH/n9N6AhhINfh7mdoMoIZt6r9fAh1cvfHXNya6N4DzDbqi8K5WWSYlmbbAdnkpV6FxJpWSo1V8DUmGb3rMRaQBG2JJgwN9wCDnNi8HNI3dKK1aG0dvHe/UciIJf6rt+Og5wgDn59X9P/xWAKQhxf2XweYH+FjB9suGVhIMlOnlo02GJhTOdc7vFyo/TQGxs2Li7lz9NwmPurBihnVi7WSWiwKvGYntOpJiOt5drKUKMkFnE8HLxNPmJ9NG4eP8mAYUv4Np8hhi3gdruSX+3CSWAwP38f8f6UoCuDPF+6Os8gnAbKnxQ3d2F0imydzDPKIuiN5lxu8EKkrFE82kftW2az1DbYImpMqTUW3FWIJ83r5hl2koJlla7+m0+PmSOZcjcdMgwS4g11iZ6qCLUg5jkxn0QFA6BWvOvfzEFBIBHAtp/Qfa3gC4RSH5y5yeD2B/8evnYS4cULgR2CMsUja47cG/QvW6UeEhXZ3+xP51GVNVdP6Zpp+1eDFM5nMeySWghR4+TNL85cD46YIyCzKJ2kCzEhoTabXtGHs+CCemJfpMPjoDe9+t/qQALgM8Gj3++8UaBqRV2fQTjO4Q3JKd5r9TgiEYyMHTxxiWPpz8jbfq585YpTJpk960xoKFXsVoTo7yq6GGMTw==\" type=\"audio/wav\" />\n",
       "                    Your browser does not support the audio element.\n",
       "                </audio>\n",
       "              "
      ],
      "text/plain": [
       "<IPython.lib.display.Audio object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "#|eval: false\n",
    "#|hide\n",
    "from tsai.export import get_nb_name; nb_name = get_nb_name(locals())\n",
    "from tsai.imports import create_scripts; create_scripts(nb_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
