{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Author: Pedro Herruzo\n",
    "# Copyright 2021 Institute of Advanced Research in Artificial Intelligence (IARAI) GmbH.\n",
    "# IARAI licenses this file to You under the Apache License, Version 2.0\n",
    "# (the \"License\"); you may not use this file except in compliance with\n",
    "# the License. You may obtain a copy of the License at\n",
    "#\n",
    "# http://www.apache.org/licenses/LICENSE-2.0\n",
    "#\n",
    "# Unless required by applicable law or agreed to in writing, software\n",
    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
    "# See the License for the specific language governing permissions and\n",
    "# limitations under the License."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Create a submission using pretrained UNet models\n",
    "\n",
    "In this notebook we will perform the following actions:\n",
    "* Create a valid submission for the core-competition (R1, R2, R3, R7, and R8) using pretrained UNets per region i.e. individual models per region\n",
    "* Create a valid submission for the transfer-learning-competition (R4, R5, R6, R9, R610, and R11) using a single UNet trained on region R1\n",
    "* Use the ensamble of models trained in regions R1, R2, R3, R7, and R8 to generate a valid submission transfer-learning-competition (R4, R5, R6, R9, R610, and R11) by averaging their predictions\n",
    "\n",
    "[Download the weights for the pre-trained models here](https://www.iarai.ac.at/weather4cast/forums/topic/weights-of-unet-baselines-trained-on-regions-r1-r2-and-r3/)\n",
    "\n",
    "Dependencies required:\n",
    "* torch \n",
    "* pytorch_lightning\n",
    "* numpy\n",
    "\n",
    "The model is defined in weather4cast/benchmarks:\n",
    "* unet.py: architecture definition \n",
    "* FeaturesSysUNet.py:  Inherits from pytorch_lightning.LightningModule. In this notebook we only use it for the forward pass\n",
    "\n",
    "Please, refer to those files if you want to know more about the architecture. You can also read the [docs of pytorch_lightning](https://pytorch-lightning.readthedocs.io/en/latest/), but it is not necessary in order to understand how to produce predictions, which is the main topic of this notebook.\n",
    "\n",
    "In this notebook, we will use the folder paths and parameters defined in **weather4cast/config.py**. Please, set there the paths to the data before starting with the notebook (check the `Start here` section in the README.md if you want to see how).\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Let us fisrt define the functions that will perform the main tasks:\n",
    "* create the submission directory structure\n",
    "* load the data & the model \n",
    "* compute predictions per day in the test split for a given region"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "import pathlib\n",
    "import sys\n",
    "import os\n",
    "module_dir = str(pathlib.Path(os.getcwd()).parent)\n",
    "sys.path.append(module_dir)\n",
    "\n",
    "import data_utils\n",
    "import config as cf\n",
    "from w4c_dataloader import create_dataset\n",
    "from benchmarks.FeaturesSysUNet import FeaturesSysUNet as Model\n",
    "\n",
    "# ------------\n",
    "# 1. create folder structures for the submission\n",
    "# ------------\n",
    "def create_directory_structure(root, folder_name='submission'):\n",
    "    \"\"\"\n",
    "    create competition output directory structure at given root path. \n",
    "    \"\"\"\n",
    "    challenges = {'core-w4c': ['R1', 'R2', 'R3', 'R7', 'R8'], 'transfer-learning-w4c': ['R4', 'R5', 'R6', 'R9', 'R10', 'R11']}\n",
    "    \n",
    "    for f_name, regions in challenges.items():\n",
    "        for region in regions:\n",
    "            r_path = os.path.join(root, folder_name, f_name, region, 'test')\n",
    "            try:\n",
    "                os.makedirs(r_path)\n",
    "                print(f'created path: {r_path}')\n",
    "            except:\n",
    "                print(f'failed to create directory structure, maybe they already exist: {r_path}')\n",
    "\n",
    "# ------------\n",
    "# 2. load data & model\n",
    "# ------------\n",
    "def get_data_iterator(region_id='R1', data_split= 'test', collapse_time=True, \n",
    "                      batch_size=32, shuffle=False, num_workers=0):\n",
    "    \"\"\" creates an iterator for data in region 'region_id' for the 'data_split' data partition \"\"\"\n",
    "    \n",
    "    params = cf.get_params(region_id=region_id)\n",
    "    params['data_params']['collapse_time'] = collapse_time\n",
    "\n",
    "    ds = create_dataset(data_split, params['data_params'])\n",
    "    dataloader = DataLoader(ds, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)\n",
    "    \n",
    "    data_splits, test_sequences = data_utils.read_splits(params['data_params']['train_splits'], params['data_params']['test_splits'])\n",
    "    test_dates = data_splits[data_splits.split=='test'].id_date.sort_values().values\n",
    "\n",
    "    return iter(dataloader), test_dates, params\n",
    "\n",
    "def load_model(Model, params, checkpoint_path='', device=None):\n",
    "    \"\"\" loads a model from a checkpoint or from scratch if checkpoint_path='' \"\"\"\n",
    "    \n",
    "    if checkpoint_path == '':\n",
    "        model = Model(params['model_params'], **params['data_params'])            \n",
    "    else:\n",
    "        print(\"model:\", Model)\n",
    "        print(f'-> Loading model checkpoint: {checkpoint_path}')\n",
    "        model = Model.load_from_checkpoint(checkpoint_path)\n",
    "        \n",
    "    if device is not None:\n",
    "        model = model.eval().cuda(device)\n",
    "        \n",
    "    return model\n",
    "\n",
    "# ------------\n",
    "# 3. make predictions & loop over regions\n",
    "# ------------\n",
    "def get_preds(model, batch, device=None):\n",
    "    \"\"\" computes the output of the model on the next iterator's batch \n",
    "        returns the prediction and the date of it\n",
    "    \"\"\"\n",
    "    \n",
    "    in_seq, out, metadata = batch\n",
    "    day_in_year = metadata['in']['day_in_year'][0][0].item()\n",
    "    \n",
    "    if device is not None:\n",
    "        in_seq = in_seq.cuda(device=device)\n",
    "    y_hat = model(in_seq)\n",
    "    y_hat = y_hat.data.cpu().numpy()  \n",
    "    \n",
    "    return y_hat, day_in_year\n",
    "\n",
    "def predictions_per_day(test_dates, model, ds_iterator, device, file_path, data_params):\n",
    "    \"\"\" computes predictions of all dates and saves them to disk \"\"\"\n",
    "    \n",
    "    for target_date in test_dates:\n",
    "        print(f'generating submission for date: {target_date}...')\n",
    "        batch = next(ds_iterator)\n",
    "        y_hat, predicted_day = get_preds(model, batch, device)\n",
    "        \n",
    "        # force data to be in the valid range\n",
    "        y_hat[y_hat>1] = 1\n",
    "        y_hat[y_hat<0] = 0\n",
    "        \n",
    "        # batches are sorted by date for the dataloader, that's why they coincide\n",
    "        assert predicted_day==target_date, f\"Error, the loaded date {predicted_day} is different than the target: {target_date}\"\n",
    "\n",
    "        f_path = os.path.join(file_path, f'{predicted_day}.h5')\n",
    "        y_hat = data_utils.postprocess_fn(y_hat, data_params['target_vars'], data_params['preprocess']['source'])\n",
    "        data_utils.write_data(y_hat, f_path)\n",
    "        print(f'--> saved in: {f_path}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Let us now generate and save the predictions for the core-competition:\n",
    "\n",
    "Note that you need to specify the path to save your predictions (root) and the path to the weights for the already trained model, You can find these files in the following section of our competition website: https://www.iarai.ac.at/weather4cast/forums/topic/weights-of-unet-baselines-trained-on-regions-r1-r2-and-r3/\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. Define model's checkpoints, regions per task & gpu id to use\n",
    "# Attention, if you work on Windows OS, modify the following paths accordingly\n",
    "root_to_ckps = '~/projects/weather4cast/ligh_logs_old/old'\n",
    "checkpoint_paths = {'R1': f'{root_to_ckps}/version_21/checkpoints/epoch=03-val_loss_epoch=0.027697.ckpt', \n",
    "                    'R2': f'{root_to_ckps}/version_19/checkpoints/epoch=01-val_loss_epoch=0.042129.ckpt', \n",
    "                    'R3': f'{root_to_ckps}/version_20/checkpoints/epoch=06-val_loss_epoch=0.058147.ckpt', \n",
    "                    'R7': f'{root_to_ckps}/version_8/checkpoints/epoch=02-val_loss_epoch=0.043889.ckpt', \n",
    "                    'R8': f'{root_to_ckps}/version_8/checkpoints/R8epoch=02-val_loss_epoch=0.049549.ckpt'}\n",
    "challenges = {'core-w4c': ['R1', 'R2', 'R3', 'R7', 'R8'], 'transfer-learning-w4c': ['R4', 'R5', 'R6', 'R9', 'R10', 'R11']}\n",
    "\n",
    "device = 0 # gpu id - SET THE ID OF THE GPU YOU WANT TO USE ---> Use `None` for CPU\n",
    "\n",
    "# 2. define root and name of the submission to create the folders' structure\n",
    "root = '/iarai/home/pedro.herruzo/projects/Weather4cast2021/utils/submission_examples'\n",
    "folder_name = 'UNet_submission'\n",
    "create_directory_structure(root, folder_name=folder_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 3. compute and save predictions for each reagion for all dates in the test split\n",
    "task_name = 'core-w4c'\n",
    "for region in challenges[task_name]:\n",
    "    # load data and model\n",
    "    ds_iterator, test_dates, params = get_data_iterator(region_id=region)\n",
    "    model = load_model(Model, params, checkpoint_path=checkpoint_paths[region], device=device)\n",
    "\n",
    "    r_path = os.path.join(root, folder_name, task_name, region, 'test')\n",
    "    predictions_per_day(test_dates, model, ds_iterator, device, r_path, params['data_params']) "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "We have just computed a valid submission for all regions using a pretrained UNet model. \n",
    "\n",
    "Now we should submit a zip containing all regions and we are done. Please, follow the instructions in weather4cast/README.md to generate the zip file."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Transfer Learning competition submission\n",
    "\n",
    "### Let us first generate the predictions using only a single model from the core-competition"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 4. compute and save predictions for each reagion for all dates in the test split\n",
    "region_model = 'R1' # use this region weights to compute predictions on all transfer learning regions\n",
    "task_name = 'transfer-learning-w4c'\n",
    "for region in challenges[task_name]:\n",
    "    # load data and model\n",
    "    ds_iterator, test_dates, params = get_data_iterator(region_id=region)\n",
    "    model = load_model(Model, params, checkpoint_path=checkpoint_paths[region_model], device=device)\n",
    "\n",
    "    r_path = os.path.join(root, folder_name, task_name, region, 'test')\n",
    "    predictions_per_day(test_dates, model, ds_iterator, device, r_path, params['data_params']) "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Let use an ensemble of the models learned in regions (1, 2, 3, 7, and 8) by getting individual predictions and just averaging across them"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# create a new folder structure (note that we will only use R4-6)\n",
    "folder_name = 'transfer-ensample'\n",
    "create_directory_structure(root, folder_name=folder_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "def predictions_per_day_ensamble(test_dates, models, ds_iterator, device, file_path, data_params):\n",
    "    \"\"\" computes predictions of all dates and saves them to disk. It uses the average of predictions across all models provided\n",
    "        models (list): list of models to be used in the ensample\n",
    "    \"\"\"\n",
    "    \n",
    "    for target_date in test_dates:\n",
    "        print(f'generating submission for date: {target_date}...')\n",
    "        batch = next(ds_iterator)\n",
    "        \n",
    "        ensamble = []\n",
    "        for model in models:\n",
    "            y_hat, predicted_day = get_preds(model, batch, device)\n",
    "\n",
    "            # force data to be in the valid range\n",
    "            y_hat[y_hat>1] = 1\n",
    "            y_hat[y_hat<0] = 0\n",
    "\n",
    "            # batches are sorted by date for the dataloader, that's why they coincide\n",
    "            assert predicted_day==target_date, f\"Error, the loaded date {predicted_day} is different than the target: {target_date}\"\n",
    "            \n",
    "            ensamble.append(y_hat)\n",
    "            \n",
    "        ensamble = np.asarray(ensamble)\n",
    "        y_hat = np.mean(ensamble, axis=0)\n",
    "\n",
    "        f_path = os.path.join(file_path, f'{predicted_day}.h5')\n",
    "        y_hat = data_utils.postprocess_fn(y_hat, data_params['target_vars'], data_params['preprocess']['source'])\n",
    "        data_utils.write_data(y_hat, f_path)\n",
    "        print(f'--> saved in: {f_path}')\n",
    "        \n",
    "# load all 3 models into a list\n",
    "models = [load_model(Model, params, checkpoint_path=checkpoint_paths[reg_id], device=device) for reg_id in challenges['core-w4c']]\n",
    "\n",
    "# compute and save averaged predictions with the ensamble of models\n",
    "task_name = 'transfer-learning-w4c'\n",
    "for region in challenges[task_name]:\n",
    "    # load data\n",
    "    ds_iterator, test_dates, params = get_data_iterator(region_id=region)\n",
    "\n",
    "    # compute predictions in the ensamble of models and save them to disk\n",
    "    r_path = os.path.join(root, folder_name, task_name, region, 'test')\n",
    "    predictions_per_day_ensamble(test_dates, models, ds_iterator, device, r_path, params['data_params']) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "bfd871e7596a95c98d3089431820ee0f5836f58f8f402f3f1aefb00e4fb50f2a"
  },
  "kernelspec": {
   "display_name": "Python 3.7.10 64-bit ('w4c': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}