{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "1c28ae25",
   "metadata": {},
   "source": [
    "### This example is almost equivalent to 05.Federated_Training_Inference. The differences are: i) preprocessing is performed using a global maximum and minimum, ii)  we perform predictions on a testing set and iii) we save the model to a .h5 format"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3bef5ebe",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:30.927751Z",
     "end_time": "2023-08-14T20:07:31.547930Z"
    }
   },
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "\n",
    "from pathlib import Path\n",
    "\n",
    "parent = Path(os.path.abspath(\"\")).resolve().parents[0]\n",
    "if parent not in sys.path:\n",
    "    sys.path.insert(0, str(parent))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c9a35493",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:30.940279Z",
     "end_time": "2023-08-14T20:07:31.673933Z"
    }
   },
   "outputs": [],
   "source": [
    "import copy\n",
    "\n",
    "import random\n",
    "\n",
    "from collections import OrderedDict\n",
    "\n",
    "import numpy as np\n",
    "import torch\n",
    "import pandas as pd\n",
    "\n",
    "from matplotlib import pyplot as plt\n",
    "\n",
    "from argparse import Namespace"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a4d24d90",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:30.957279Z",
     "end_time": "2023-08-14T20:07:31.791934Z"
    }
   },
   "outputs": [],
   "source": [
    "from ml.utils.data_utils import read_data, generate_time_lags, time_to_feature, handle_nans, to_Xy, \\\n",
    "    to_torch_dataset, to_timeseries_rep, assign_statistics, \\\n",
    "    to_train_val, scale_features, get_data_by_area, remove_identifiers, get_exogenous_data_by_area, handle_outliers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "347b9dc6",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:30.973279Z",
     "end_time": "2023-08-14T20:07:31.791934Z"
    }
   },
   "outputs": [],
   "source": [
    "from ml.utils.train_utils import train, test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "30840402",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:30.990287Z",
     "end_time": "2023-08-14T20:07:31.874365Z"
    }
   },
   "outputs": [],
   "source": [
    "from ml.models.mlp import MLP\n",
    "from ml.models.rnn import RNN\n",
    "from ml.models.lstm import LSTM\n",
    "from ml.models.gru import GRU\n",
    "from ml.models.cnn import CNN\n",
    "from ml.models.rnn_autoencoder import DualAttentionAutoEncoder"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "85c123cc",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:31.004279Z",
     "end_time": "2023-08-14T20:07:31.874365Z"
    }
   },
   "outputs": [],
   "source": [
    "from ml.fl.defaults import create_regression_client\n",
    "from ml.fl.client_proxy import SimpleClientProxy\n",
    "from ml.fl.server.server import Server\n",
    "from ml.utils.helpers import accumulate_metric"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1b96f7d1",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:31.021282Z",
     "end_time": "2023-08-14T20:07:32.048367Z"
    }
   },
   "outputs": [],
   "source": [
    "args = Namespace(\n",
    "    data_path='../mydatase/marketData/train_dataset.csv', # dataset\n",
    "\n",
    "    test_size=0.2, # validation size \n",
    "    targets=['Weekly_Sales'], # the target columns\n",
    "    num_lags=5, # the number of past observations to feed as input\n",
    "\n",
    "    identifier='District', # the column name that identifies a bs\n",
    "\n",
    "    nan_constant=0, # the constant to transform nan values\n",
    "    x_scaler='standard', # x_scaler\n",
    "    y_scaler='standard', # y_scaler\n",
    "    outlier_detection=None, # whether to perform flooring and capping\n",
    "\n",
    "    criterion='mse', # optimization criterion, mse or l1\n",
    "    fl_rounds=50, # the number of federated rounds\n",
    "    fraction=1., # the percentage of available client to consider for random selection\n",
    "    aggregation=\"fedavg\", # federated aggregation algorithm\n",
    "    epochs=5, # the number of maximum local epochs\n",
    "    lr=0.001, # learning rate\n",
    "    optimizer='adam', # the optimizer, it can be sgd or adam\n",
    "    batch_size=128, # the batch size to use\n",
    "    local_early_stopping=False, # whether to use early stopping\n",
    "    local_patience=50, # patience value for the early stopping parameter (if specified)\n",
    "    max_grad_norm=0.0, # whether to clip grad norm\n",
    "    reg1=0.0, # l1 regularization\n",
    "    reg2=0.0, # l2 regularization\n",
    "\n",
    "    cuda=True, # whether to use gpu\n",
    "    \n",
    "    seed=0, # reproducibility\n",
    "\n",
    "    assign_stats=None, # whether to use statistics as exogenous data, [\"mean\", \"median\", \"std\", \"variance\", \"kurtosis\", \"skew\"]\n",
    "    use_time_features=False # whether to use datetime features\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4cf1b32a",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:31.035279Z",
     "end_time": "2023-08-14T20:07:32.049367Z"
    }
   },
   "outputs": [],
   "source": [
    "print(f\"Script arguments: {args}\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f8d3f353",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:31.051281Z",
     "end_time": "2023-08-14T20:07:32.049367Z"
    }
   },
   "outputs": [],
   "source": [
    "device = \"cuda\" if args.cuda and torch.cuda.is_available() else \"cpu\"\n",
    "print(f\"Using {device}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "33528856",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:31.067930Z",
     "end_time": "2023-08-14T20:07:32.050366Z"
    }
   },
   "outputs": [],
   "source": [
    "# Outlier detection specification\n",
    "# if args.outlier_detection is not None:\n",
    "#     outlier_columns = ['rb_down', 'rb_up', 'down', 'up']\n",
    "#     outlier_kwargs = {\"ElBorn\": (10, 90), \"LesCorts\": (10, 90), \"PobleSec\": (5, 95)}\n",
    "#     args.outlier_columns = outlier_columns\n",
    "#     args.outlier_kwargs = outlier_kwargs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b45e8da8",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:31.083931Z",
     "end_time": "2023-08-14T20:07:32.050366Z"
    }
   },
   "outputs": [],
   "source": [
    "def seed_all():\n",
    "    # ensure reproducibility\n",
    "    random.seed(args.seed)\n",
    "    np.random.seed(args.seed)\n",
    "    torch.manual_seed(args.seed)\n",
    "    torch.cuda.manual_seed_all(args.seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "49abed55",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:31.099930Z",
     "end_time": "2023-08-14T20:07:32.050366Z"
    }
   },
   "outputs": [],
   "source": [
    "seed_all()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5f8dcc6a",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:31.120930Z",
     "end_time": "2023-08-14T20:07:32.050366Z"
    }
   },
   "outputs": [],
   "source": [
    "def make_preprocessing():\n",
    "    \"\"\"Preprocess a given .csv\"\"\"\n",
    "    # read data\n",
    "    df = read_data(args.data_path)\n",
    "    # handle nans\n",
    "    df = handle_nans(train_data=df, constant=args.nan_constant,\n",
    "                     identifier=args.identifier)\n",
    "    # split to train/validation\n",
    "    train_data, val_data = to_train_val(df)\n",
    "    \n",
    "    # handle outliers (if specified)\n",
    "    if args.outlier_detection is not None:\n",
    "        train_data = handle_outliers(df=train_data, columns=args.outlier_columns,\n",
    "                                     identifier=args.identifier, kwargs=args.outlier_kwargs)\n",
    "    \n",
    "    # get X and y\n",
    "    X_train, X_val, y_train, y_val = to_Xy(train_data=train_data, val_data=val_data,\n",
    "                                          targets=args.targets)\n",
    "    \n",
    "    # scale X\n",
    "    X_train, X_val, x_scaler = scale_features(train_data=X_train, val_data=X_val,\n",
    "                                              scaler=args.x_scaler,\n",
    "                                              # per_area=True,\n",
    "                                              identifier=args.identifier)\n",
    "    # scale y\n",
    "    y_train, y_val, y_scaler = scale_features(train_data=y_train, val_data=y_val,\n",
    "                                              scaler=args.y_scaler, \n",
    "                                              # per_area=True,\n",
    "                                              identifier=args.identifier)\n",
    "    \n",
    "    # generate time lags\n",
    "    X_train = generate_time_lags(X_train, args.num_lags)\n",
    "    X_val = generate_time_lags(X_val, args.num_lags)\n",
    "    y_train = generate_time_lags(y_train, args.num_lags, is_y=True)\n",
    "    y_val = generate_time_lags(y_val, args.num_lags, is_y=True)\n",
    "    \n",
    "    # get datetime features as exogenous data\n",
    "    date_time_df_train = time_to_feature(\n",
    "        X_train, args.use_time_features, identifier=args.identifier\n",
    "    )\n",
    "    date_time_df_val = time_to_feature(\n",
    "        X_val, args.use_time_features, identifier=args.identifier\n",
    "    )\n",
    "    \n",
    "    # get statistics as exogenous data\n",
    "    stats_df_train = assign_statistics(X_train, args.assign_stats, args.num_lags,\n",
    "                                       targets=args.targets, identifier=args.identifier)\n",
    "    stats_df_val = assign_statistics(X_val, args.assign_stats, args.num_lags, \n",
    "                                       targets=args.targets, identifier=args.identifier)\n",
    "    \n",
    "    # concat the exogenous features (if any) to a single dataframe\n",
    "    if date_time_df_train is not None or stats_df_train is not None:\n",
    "        exogenous_data_train = pd.concat([date_time_df_train, stats_df_train], axis=1)\n",
    "        # remove duplicate columns (if any)\n",
    "        exogenous_data_train = exogenous_data_train.loc[:, ~exogenous_data_train.columns.duplicated()].copy()\n",
    "        assert len(exogenous_data_train) == len(X_train) == len(y_train)\n",
    "    else:\n",
    "        exogenous_data_train = None\n",
    "    if date_time_df_val is not None or stats_df_val is not None:\n",
    "        exogenous_data_val = pd.concat([date_time_df_val, stats_df_val], axis=1)\n",
    "        exogenous_data_val = exogenous_data_val.loc[:, ~exogenous_data_val.columns.duplicated()].copy()\n",
    "        assert len(exogenous_data_val) == len(X_val) == len(y_val)\n",
    "    else:\n",
    "        exogenous_data_val = None\n",
    "        \n",
    "    return X_train, X_val, y_train, y_val, exogenous_data_train, exogenous_data_val, x_scaler, y_scaler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ee3b8085",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:31.132931Z",
     "end_time": "2023-08-14T20:07:32.614941Z"
    }
   },
   "outputs": [],
   "source": [
    "X_train, X_val, y_train, y_val, exogenous_data_train, exogenous_data_val, x_scaler, y_scaler = make_preprocessing()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "X_train.head()"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.615940Z",
     "end_time": "2023-08-14T20:07:32.630722Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "354196ed",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.630722Z",
     "end_time": "2023-08-14T20:07:32.657119Z"
    }
   },
   "outputs": [],
   "source": [
    "def make_postprocessing(X_train, X_val, y_train, y_val, exogenous_data_train, exogenous_data_val, x_scaler, y_scaler):\n",
    "    \"\"\"Make data ready to be fed into ml algorithms\"\"\"\n",
    "    # if there are more than one specified areas, get the data per area\n",
    "    if X_train[args.identifier].nunique() != 1:\n",
    "        area_X_train, area_X_val, area_y_train, area_y_val = get_data_by_area(X_train, X_val,\n",
    "                                                                              y_train, y_val, \n",
    "                                                                              identifier=args.identifier)\n",
    "    else:\n",
    "        area_X_train, area_X_val, area_y_train, area_y_val = None, None, None, None\n",
    "\n",
    "    # Get the exogenous data per area.\n",
    "    if exogenous_data_train is not None:\n",
    "        exogenous_data_train, exogenous_data_val = get_exogenous_data_by_area(exogenous_data_train,\n",
    "                                                                              exogenous_data_val)\n",
    "    # transform to np\n",
    "    if area_X_train is not None:\n",
    "        for area in area_X_train:\n",
    "            tmp_X_train, tmp_y_train, tmp_X_val, tmp_y_val = remove_identifiers(\n",
    "                area_X_train[area], area_y_train[area], area_X_val[area], area_y_val[area])\n",
    "            tmp_X_train, tmp_y_train = tmp_X_train.to_numpy(), tmp_y_train.to_numpy()\n",
    "            tmp_X_val, tmp_y_val = tmp_X_val.to_numpy(), tmp_y_val.to_numpy()\n",
    "            area_X_train[area] = tmp_X_train\n",
    "            area_X_val[area] = tmp_X_val\n",
    "            area_y_train[area] = tmp_y_train\n",
    "            area_y_val[area] = tmp_y_val\n",
    "    \n",
    "    if exogenous_data_train is not None:\n",
    "        for area in exogenous_data_train:\n",
    "            exogenous_data_train[area] = exogenous_data_train[area].to_numpy()\n",
    "            exogenous_data_val[area] = exogenous_data_val[area].to_numpy()\n",
    "    \n",
    "    # remove identifiers from features, targets\n",
    "    X_train, y_train, X_val, y_val = remove_identifiers(X_train, y_train, X_val, y_val)\n",
    "    assert len(X_train.columns) == len(X_val.columns)\n",
    "    \n",
    "    num_features = len(X_train.columns) // args.num_lags\n",
    "    \n",
    "    # to timeseries representation\n",
    "    X_train = to_timeseries_rep(X_train.to_numpy(), num_lags=args.num_lags,\n",
    "                                            num_features=num_features)\n",
    "    X_val = to_timeseries_rep(X_val.to_numpy(), num_lags=args.num_lags,\n",
    "                                          num_features=num_features)\n",
    "    \n",
    "    if area_X_train is not None:\n",
    "        area_X_train = to_timeseries_rep(area_X_train, num_lags=args.num_lags,\n",
    "                                                     num_features=num_features)\n",
    "        area_X_val = to_timeseries_rep(area_X_val, num_lags=args.num_lags,\n",
    "                                                   num_features=num_features)\n",
    "    \n",
    "    # transform targets to numpy\n",
    "    y_train, y_val = y_train.to_numpy(), y_val.to_numpy()\n",
    "    \n",
    "    if exogenous_data_train is not None:\n",
    "        exogenous_data_train_combined, exogenous_data_val_combined = [], []\n",
    "        for area in exogenous_data_train:\n",
    "            exogenous_data_train_combined.extend(exogenous_data_train[area])\n",
    "            exogenous_data_val_combined.extend(exogenous_data_val[area])\n",
    "        exogenous_data_train_combined = np.stack(exogenous_data_train_combined)\n",
    "        exogenous_data_val_combined = np.stack(exogenous_data_val_combined)\n",
    "        exogenous_data_train[\"all\"] = exogenous_data_train_combined\n",
    "        exogenous_data_val[\"all\"] = exogenous_data_val_combined\n",
    "    return X_train, X_val, y_train, y_val, area_X_train, area_X_val, area_y_train, area_y_val, exogenous_data_train, exogenous_data_val"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "47e3286f",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.647120Z",
     "end_time": "2023-08-14T20:07:32.814691Z"
    }
   },
   "outputs": [],
   "source": [
    "X_train, X_val, y_train, y_val, client_X_train, client_X_val, client_y_train, client_y_val, exogenous_data_train, exogenous_data_val = make_postprocessing(X_train, X_val, y_train, y_val, exogenous_data_train, exogenous_data_val, x_scaler, y_scaler)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "283dca1b",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.744120Z",
     "end_time": "2023-08-14T20:07:32.815691Z"
    }
   },
   "outputs": [],
   "source": [
    "for client in client_X_train:\n",
    "    print(f\"\\nClient: {client}\")\n",
    "    print(f\"X_train shape: {client_X_train[client].shape}, y_train shape: {client_y_train[client].shape}\")\n",
    "    print(f\"X_val shape: {client_X_val[client].shape}, y_val shape: {client_y_val[client].shape}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4252029a",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.760692Z",
     "end_time": "2023-08-14T20:07:32.815691Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_input_dims(X_train, exogenous_data_train):\n",
    "    if args.model_name == \"mlp\":\n",
    "        input_dim = X_train.shape[1] * X_train.shape[2]\n",
    "    else:\n",
    "        input_dim = X_train.shape[2]\n",
    "    \n",
    "    if exogenous_data_train is not None:\n",
    "        if len(exogenous_data_train) == 1:\n",
    "            cid = next(iter(exogenous_data_train.keys()))\n",
    "            exogenous_dim = exogenous_data_train[cid].shape[1]\n",
    "        else:\n",
    "            exogenous_dim = exogenous_data_train[\"all\"].shape[1]\n",
    "    else:\n",
    "        exogenous_dim = 0\n",
    "    \n",
    "    return input_dim, exogenous_dim"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c9e9a0b",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.776692Z",
     "end_time": "2023-08-14T20:07:32.816691Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_model(model: str,\n",
    "              input_dim: int,\n",
    "              out_dim: int,\n",
    "              lags: int = 5,\n",
    "              exogenous_dim: int = 0,\n",
    "              seed=0):\n",
    "    if model == \"mlp\":\n",
    "        model = MLP(input_dim=input_dim, layer_units=[256, 128, 64], num_outputs=out_dim)\n",
    "    elif model == \"rnn\":\n",
    "        model = RNN(input_dim=input_dim, rnn_hidden_size=128, num_rnn_layers=1, rnn_dropout=0.0,\n",
    "                    layer_units=[128], num_outputs=out_dim, matrix_rep=True, exogenous_dim=exogenous_dim)\n",
    "    elif model == \"lstm\":\n",
    "        model = LSTM(input_dim=input_dim, lstm_hidden_size=128, num_lstm_layers=1, lstm_dropout=0.0,\n",
    "                     layer_units=[128], num_outputs=out_dim, matrix_rep=True, exogenous_dim=exogenous_dim)\n",
    "    elif model == \"gru\":\n",
    "        model = GRU(input_dim=input_dim, gru_hidden_size=128, num_gru_layers=1, gru_dropout=0.0,\n",
    "                    layer_units=[128], num_outputs=out_dim, matrix_rep=True, exogenous_dim=exogenous_dim)\n",
    "    elif model == \"cnn\":\n",
    "        model = CNN(num_features=input_dim, lags=lags, exogenous_dim=exogenous_dim, out_dim=out_dim)\n",
    "    elif model == \"da_encoder_decoder\":\n",
    "        model = DualAttentionAutoEncoder(input_dim=input_dim, architecture=\"lstm\", matrix_rep=True)\n",
    "    else:\n",
    "        raise NotImplementedError(\"Specified model is not implemented. Plese define your own model or choose one from ['mlp', 'rnn', 'lstm', 'gru', 'cnn', 'da_encoder_decoder']\")\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7154f923",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.790690Z",
     "end_time": "2023-08-14T20:07:32.816691Z"
    }
   },
   "outputs": [],
   "source": [
    "# define the model\n",
    "args.model_name = \"lstm\"\n",
    "\n",
    "input_dim, exogenous_dim = get_input_dims(X_train, exogenous_data_train)\n",
    "\n",
    "print(input_dim, exogenous_dim)\n",
    "\n",
    "model = get_model(model=args.model_name,\n",
    "                  input_dim=input_dim,\n",
    "                  out_dim=y_train.shape[1],\n",
    "                  lags=args.num_lags,\n",
    "                  exogenous_dim=exogenous_dim,\n",
    "                  seed=args.seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "741ca873",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.807692Z",
     "end_time": "2023-08-14T20:07:32.857692Z"
    }
   },
   "outputs": [],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6f8e41d8",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.828692Z",
     "end_time": "2023-08-14T20:07:32.929691Z"
    }
   },
   "outputs": [],
   "source": [
    "def fit(model, X_train, y_train, X_val, y_val, \n",
    "        exogenous_data_train=None, exogenous_data_val=None, \n",
    "        idxs=[1], # the indices of our targets in X\n",
    "        log_per=1,\n",
    "        client_creation_fn = None, # client specification\n",
    "        local_train_params=None, # local params\n",
    "        aggregation_params=None, # aggregation params\n",
    "        use_carbontracker=True\n",
    "       ):\n",
    "    # client creation definition\n",
    "    if client_creation_fn is None:\n",
    "        client_creation_fn = create_regression_client\n",
    "    # local params\n",
    "    if local_train_params is None:\n",
    "        local_train_params = {\n",
    "            \"epochs\": args.epochs, \"optimizer\": args.optimizer, \"lr\": args.lr,\n",
    "            \"criterion\": args.criterion, \"early_stopping\": args.local_early_stopping,\n",
    "            \"patience\": args.local_patience, \"device\": device\n",
    "        }\n",
    "    \n",
    "    train_loaders, val_loaders = [], []\n",
    "    \n",
    "    # get data per client\n",
    "    for client in X_train:\n",
    "        if client == \"all\":\n",
    "            continue\n",
    "        if exogenous_data_train is not None:\n",
    "            tmp_exogenous_data_train = exogenous_data_train[client]\n",
    "            tmp_exogenous_data_val = tmp_exogenous_data_val[client]\n",
    "        else:\n",
    "            tmp_exogenous_data_train = None\n",
    "            tmp_exogenous_data_val = None\n",
    "    \n",
    "        num_features = len(X_train[client][0][0])\n",
    "        \n",
    "        # to torch loader\n",
    "        train_loaders.append(\n",
    "            to_torch_dataset(\n",
    "                X_train[client], y_train[client],\n",
    "                num_lags=args.num_lags,\n",
    "                num_features=num_features,\n",
    "                exogenous_data=tmp_exogenous_data_train,\n",
    "                indices=idxs,\n",
    "                batch_size=args.batch_size,\n",
    "                shuffle=False\n",
    "            )\n",
    "        )\n",
    "        val_loaders.append(\n",
    "            to_torch_dataset(\n",
    "                X_val[client], y_val[client],\n",
    "                num_lags=args.num_lags,\n",
    "                exogenous_data=tmp_exogenous_data_val,\n",
    "                indices=idxs,\n",
    "                batch_size=args.batch_size,\n",
    "                shuffle=False\n",
    "            )\n",
    "            \n",
    "        )\n",
    "        \n",
    "    # create clients with their local data\n",
    "    cids = [k for k in X_train.keys() if k != \"all\"]\n",
    "    clients = [\n",
    "        client_creation_fn(\n",
    "            cid=cid, # client id\n",
    "            model=model, # the global model\n",
    "            train_loader=train_loader, # the local train loader\n",
    "            test_loader=val_loader, # the local val loader\n",
    "            local_params=local_train_params # local parameters\n",
    "        )\n",
    "        for cid, train_loader, val_loader in zip(cids, train_loaders, val_loaders)\n",
    "    ]\n",
    "    \n",
    "    # represent clients to server\n",
    "    client_proxies = [\n",
    "        SimpleClientProxy(cid, client) for cid, client in zip(cids, clients)\n",
    "    ]\n",
    "    \n",
    "    # represent the server\n",
    "    server = Server(\n",
    "        client_proxies=client_proxies, # the client representations\n",
    "        aggregation=args.aggregation, # the aggregation algorithm\n",
    "        aggregation_params=aggregation_params, # aggregation specific params\n",
    "        local_params_fn=None, # we can change the local params on demand\n",
    "    )\n",
    "    # Note that the client manager instance will be initialized automatically. You can define your own client manager.\n",
    "\n",
    "    # train with FL\n",
    "    model_params, history = server.fit(args.fl_rounds, args.fraction, use_carbontracker=use_carbontracker)\n",
    "    \n",
    "    params_dict = zip(model.state_dict().keys(), model_params)\n",
    "    state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n",
    "    model = copy.deepcopy(model)\n",
    "    model.load_state_dict(state_dict, strict=True)\n",
    "    \n",
    "    return model, history"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "786dd0da",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.838692Z",
     "end_time": "2023-08-14T20:07:32.944691Z"
    }
   },
   "outputs": [],
   "source": [
    "# federated local params\n",
    "local_train_params = {\"epochs\": args.epochs, \"optimizer\": args.optimizer, \"lr\": args.lr,\n",
    "                      \"criterion\": args.criterion, \"early_stopping\": args.local_early_stopping,\n",
    "                      \"patience\": args.local_patience, \"device\": device\n",
    "                      }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "35a8e842",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:07:32.854692Z",
     "end_time": "2023-08-14T20:09:57.822698Z"
    }
   },
   "outputs": [],
   "source": [
    "global_model, history = fit(\n",
    "    model,\n",
    "    client_X_train,\n",
    "    client_y_train, \n",
    "    client_X_val, \n",
    "    client_y_val, \n",
    "    local_train_params=local_train_params\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "429c7d8d",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:09:57.824772Z",
     "end_time": "2023-08-14T20:09:57.853183Z"
    }
   },
   "outputs": [],
   "source": [
    "def transform_preds(y_pred_train, y_pred_val):\n",
    "    if not isinstance(y_pred_train, np.ndarray):\n",
    "        y_pred_train = y_pred_train.cpu().numpy()\n",
    "    if not isinstance(y_pred_val, np.ndarray):\n",
    "        y_pred_val = y_pred_val.cpu().numpy()\n",
    "    return y_pred_train, y_pred_val\n",
    "\n",
    "def round_predictions(y_pred_train, y_pred_val, dims):\n",
    "    # round to closest integer\n",
    "    if dims is None or len(dims) == 0:\n",
    "        return y_pred_train, y_pred_val\n",
    "    for dim in dims:\n",
    "        y_pred_train[:, dim] = np.rint(y_pred_train[:, dim])\n",
    "        y_pred_val[:, dim] = np.rint(y_pred_val[:, dim])\n",
    "    return y_pred_train, y_pred_val\n",
    "\n",
    "def inverse_transform(y_train, y_val, y_pred_train, y_pred_val,\n",
    "                     y_scaler=None, \n",
    "                     round_preds=False, dims=None):\n",
    "    y_pred_train, y_pred_val = transform_preds(y_pred_train, y_pred_val)\n",
    "    \n",
    "    if y_scaler is not None:\n",
    "        y_train = y_scaler.inverse_transform(y_train)\n",
    "        y_val = y_scaler.inverse_transform(y_val)\n",
    "        y_pred_train = y_scaler.inverse_transform(y_pred_train)\n",
    "        y_pred_val = y_scaler.inverse_transform(y_pred_val)\n",
    "    \n",
    "    # to zeroes\n",
    "    y_pred_train[y_pred_train < 0.] = 0.\n",
    "    y_pred_val[y_pred_val < 0.] = 0.\n",
    "    \n",
    "    if round_preds:\n",
    "        y_pred_train, y_pred_val = round_predictions(y_pred_train, y_pred_val, dims)\n",
    "    \n",
    "    return y_train, y_val, y_pred_train, y_pred_val"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "08a3e1e0",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:09:57.832178Z",
     "end_time": "2023-08-14T20:09:57.854183Z"
    }
   },
   "outputs": [],
   "source": [
    "def make_plot(y_true, y_pred, \n",
    "              title, \n",
    "              feature_names=None, \n",
    "              client=None):\n",
    "    if feature_names is None:\n",
    "        feature_names = [f\"feature_{i}\" for i in range(y_pred.shape[1])]\n",
    "    assert len(feature_names) == y_pred.shape[1]\n",
    "\n",
    "    for i in range(y_pred.shape[1]):\n",
    "        plt.figure(figsize=(8, 6))\n",
    "        plt.ticklabel_format(style='plain')\n",
    "        plt.plot(y_true[:, i], label=\"Actual\")\n",
    "        plt.plot(y_pred[:, i], label=\"Predicted\")\n",
    "        if client is not None:\n",
    "            plt.title(f\"[{client} {title}] {feature_names[i]} prediction\")\n",
    "        else:\n",
    "            plt.title(f\"[{title}] {feature_names[i]} prediction\")\n",
    "        plt.legend()\n",
    "        plt.show()\n",
    "        plt.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a54aa4db",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:09:57.848187Z",
     "end_time": "2023-08-14T20:09:57.872184Z"
    }
   },
   "outputs": [],
   "source": [
    "def inference(\n",
    "    model, # the global model\n",
    "    client_X_train, # train data per client\n",
    "    client_y_train,\n",
    "    client_X_val, # val data per client\n",
    "    client_y_val,\n",
    "    exogenous_data_train, # exogenous data per client\n",
    "    exogenous_data_val,\n",
    "    y_scaler, # the scaler used to transform the targets\n",
    "    idxs=[1],\n",
    "    apply_round=True, # round to closest integer\n",
    "    round_dimensions=[0], # the dimensions to apply rounding\n",
    "    plot=False, # plot predictions\n",
    "):\n",
    "    # load per client data to torch\n",
    "    train_loaders, val_loaders = [], []\n",
    "    \n",
    "    # get data per client\n",
    "    for client in client_X_train:\n",
    "        if client == \"all\":\n",
    "            continue\n",
    "        if exogenous_data_train is not None:\n",
    "            tmp_exogenous_data_train = exogenous_data_train[client]\n",
    "            tmp_exogenous_data_val = exogenous_data_val[client]\n",
    "        else:\n",
    "            tmp_exogenous_data_train = None\n",
    "            tmp_exogenous_data_val = None\n",
    "    \n",
    "        num_features = len(client_X_train[client][0][0])\n",
    "        \n",
    "        # to torch loader\n",
    "        train_loaders.append(\n",
    "            to_torch_dataset(\n",
    "                client_X_train[client], client_y_train[client],\n",
    "                num_lags=args.num_lags,\n",
    "                num_features=num_features,\n",
    "                exogenous_data=tmp_exogenous_data_train,\n",
    "                indices=idxs,\n",
    "                batch_size=1,\n",
    "                shuffle=False\n",
    "            )\n",
    "        )\n",
    "        val_loaders.append(\n",
    "            to_torch_dataset(\n",
    "                client_X_val[client], client_y_val[client],\n",
    "                num_lags=args.num_lags,\n",
    "                exogenous_data=tmp_exogenous_data_val,\n",
    "                indices=idxs,\n",
    "                batch_size=1,\n",
    "                shuffle=False\n",
    "            )\n",
    "            \n",
    "        )\n",
    "        \n",
    "    # get client ids\n",
    "    cids = [k for k in client_X_train.keys() if k != \"all\"]\n",
    "        \n",
    "    # predict per client using the global model\n",
    "    y_preds_train, y_preds_val = dict(), dict()\n",
    "    for cid, train_loader, val_loader in zip(cids, train_loaders, val_loaders):\n",
    "        print(f\"Prediction on {cid}\")\n",
    "        train_mse, train_rmse, train_mae, train_r2, train_nrmse, y_pred_train = test(\n",
    "            model, train_loader, None, device=device\n",
    "        )\n",
    "        val_mse, val_rmse, val_mae, val_r2, val_nrmse, y_pred_val = test(\n",
    "            model, val_loader, None, device=device\n",
    "        )\n",
    "        y_preds_train[cid] = y_pred_train\n",
    "        y_preds_val[cid] = y_pred_val\n",
    "\n",
    "        # 计算平均预测效果\n",
    "    val_mse_avg ,val_rmse_avg ,val_mae_avg, val_r2_avg, val_nrmse_avg = 0,0,0,0,0\n",
    "    for cid in cids:\n",
    "        y_train, y_val = client_y_train[cid], client_y_val[cid]\n",
    "        y_pred_train, y_pred_val = y_preds_train[cid], y_preds_val[cid]\n",
    "\n",
    "        # y_scaler = y_scalers[cid]\n",
    "        y_train, y_val, y_pred_train, y_pred_val = inverse_transform(\n",
    "            y_train, y_val, y_pred_train, y_pred_val,\n",
    "            y_scaler, round_preds=apply_round, dims=round_dimensions\n",
    "        )\n",
    "        train_mse, train_rmse, train_mae, train_r2, train_nrmse, train_res_per_dim = accumulate_metric(\n",
    "            y_train, y_pred_train, True, return_all=True\n",
    "        )\n",
    "        val_mse, val_rmse, val_mae, val_r2, val_nrmse, val_res_per_dim = accumulate_metric(\n",
    "            y_val, y_pred_val, True, return_all=True\n",
    "        )\n",
    "        \n",
    "        print(f\"\\nFinal Prediction on {cid} (Inference Stage)\")\n",
    "        print(f\"[Train]: mse: {train_mse}, \"\n",
    "              f\"rmse: {train_rmse}, mae {train_mae}, r2: {train_r2}, nrmse: {train_nrmse}\")\n",
    "        print(f\"[Val]: mse: {val_mse}, \"\n",
    "              f\"rmse: {val_rmse}, mae {val_mae}, r2: {val_r2}, nrmse: {val_nrmse}\\n\\n\")\n",
    "\n",
    "        val_mse_avg = val_mse_avg+val_mse\n",
    "        val_rmse_avg = val_rmse_avg+val_rmse\n",
    "        val_mae_avg = val_mae_avg + val_mae\n",
    "        val_r2_avg = val_r2_avg+ val_r2\n",
    "        val_nrmse_avg = val_nrmse_avg + val_nrmse\n",
    "\n",
    "        if plot:\n",
    "            make_plot(y_train, y_pred_train, title=\"Train\", feature_names=args.targets, client=cid)\n",
    "            make_plot(y_val, y_pred_val, title=\"Val\", feature_names=args.targets, client=cid)\n",
    "    print(\"总长度：\" + str(len(cids)))\n",
    "    val_mse_avg = val_mse_avg /  len(cids)\n",
    "    val_rmse_avg = val_rmse_avg / len(cids)\n",
    "    val_mae_avg = val_mae_avg / len(cids)\n",
    "    val_r2_avg = val_r2_avg / len(cids)\n",
    "    val_nrmse_avg = val_nrmse_avg / len(cids)\n",
    "    print(f\"[Val]: val_mse_avg: {val_mse_avg}, \"\n",
    "              f\"val_rmse_avg: {val_rmse_avg}, val_mae_avg: {val_mae_avg}, val_r2_avg: {val_r2_avg}, val_nrmse_avg: {val_nrmse_avg}\\n\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "daf5c488",
   "metadata": {
    "scrolled": false,
    "ExecuteTime": {
     "start_time": "2023-08-14T20:09:57.864185Z",
     "end_time": "2023-08-14T20:10:00.276404Z"
    }
   },
   "outputs": [],
   "source": [
    "inference(\n",
    "    global_model,\n",
    "    client_X_train, \n",
    "    client_y_train,\n",
    "    client_X_val, \n",
    "    client_y_val,\n",
    "    exogenous_data_train, \n",
    "    exogenous_data_val,\n",
    "    y_scaler\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "10890c8c",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:00.266403Z",
     "end_time": "2023-08-14T20:10:00.326088Z"
    }
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "161c9dfb",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:00.282403Z",
     "end_time": "2023-08-14T20:10:00.330090Z"
    }
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "2931ee5a",
   "metadata": {},
   "source": [
    "### Here we test our trained model on previous unseen test data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "be358dc6",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:00.301403Z",
     "end_time": "2023-08-14T20:10:00.330090Z"
    }
   },
   "outputs": [],
   "source": [
    "def transform_preds_test(y_pred_test):\n",
    "    if not isinstance(y_pred_test, np.ndarray):\n",
    "        y_pred_test = y_pred_test.cpu().numpy()\n",
    "    return y_pred_test\n",
    "\n",
    "def round_predictions_test(y_pred_test, dims):\n",
    "    # round to closest integer\n",
    "    if dims is None or len(dims) == 0:\n",
    "        return y_pred_test\n",
    "    for dim in dims:\n",
    "        y_pred_test[:, dim] = np.rint(y_pred_test[:, dim])\n",
    "    return y_pred_test\n",
    "\n",
    "def inverse_transform_test(\n",
    "    y_test, y_pred_test,\n",
    "    y_scaler=None, \n",
    "    round_preds=False,\n",
    "    dims=None):\n",
    "    \n",
    "    y_pred_test = transform_preds_test(y_pred_test)\n",
    "    \n",
    "    if y_scaler is not None:\n",
    "        y_test = y_scaler.inverse_transform(y_test)\n",
    "        y_pred_test = y_scaler.inverse_transform(y_pred_test)\n",
    "    \n",
    "    # to zeroes\n",
    "    y_pred_test[y_pred_test < 0.] = 0.\n",
    "    \n",
    "    if round_preds:\n",
    "        y_pred_test = round_predictions_test(y_pred_test, dims)\n",
    "    \n",
    "    return y_test, y_pred_test\n",
    "\n",
    "def predict(\n",
    "            model,\n",
    "            cid, \n",
    "            X_test,\n",
    "            y_test,\n",
    "            exogenous_data_test,\n",
    "            plot,\n",
    "            idxs=[1],\n",
    "            apply_round=True,\n",
    "            round_dimensions=[0]\n",
    "           ):\n",
    "    \n",
    "    if \"test\" in cid:\n",
    "        tmp_cid = cid.split(\"_\")\n",
    "        for s_t in tmp_cid:\n",
    "            if s_t != \"test\":\n",
    "                cid = s_t\n",
    "                break\n",
    "    \n",
    "    num_features = len(X_test[0][0])\n",
    "    \n",
    "    test_loader = to_torch_dataset(\n",
    "        X_test, y_test,\n",
    "        num_lags=args.num_lags,\n",
    "        num_features=num_features,\n",
    "        exogenous_data=exogenous_data_test,\n",
    "        indices=idxs,\n",
    "        batch_size=1,\n",
    "        shuffle=False\n",
    "    )\n",
    "    test_mse, test_rmse, test_mae, test_r2, test_nrmse, y_pred_test = test(\n",
    "                model, test_loader, None, device=device\n",
    "    )\n",
    "    \n",
    "    y_test, y_pred_test = inverse_transform_test(\n",
    "        y_test, y_pred_test, y_scaler, round_preds=apply_round, dims=round_dimensions\n",
    "    )\n",
    "    \n",
    "    test_mse, test_rmse, test_mae, test_r2, test_nrmse, test_res_per_dim = accumulate_metric(\n",
    "        y_test, y_pred_test, log_per_output=True, return_all=True\n",
    "    )\n",
    "    print(f\"Final Prediction in {cid}\")\n",
    "    print(f\"[Test]: mse: {test_mse}, rmse: {test_rmse}, mae {test_mae}, \"\n",
    "        f\"r2: {test_r2}, nrmse: {test_nrmse}\\n\\n\")\n",
    "    \n",
    "    if plot:\n",
    "        make_plot(y_test, y_pred_test, title=\"Test\", feature_names=args.targets, client=cid)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "79d5c5e1",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:00.313090Z",
     "end_time": "2023-08-14T20:10:00.330090Z"
    }
   },
   "outputs": [],
   "source": [
    "def model_test(\n",
    "    model, \n",
    "    data_paths, \n",
    "    x_scaler, \n",
    "    y_scaler,\n",
    "    plot=True,\n",
    "    idxs=[1],\n",
    "    round_predictions=False,\n",
    "    round_dimensions=[0, 3, 4],\n",
    "):\n",
    "    # In testing we should pre-process and post-process our data and\n",
    "    # instruct our model to perform predictions\n",
    "    \n",
    "    # read the data\n",
    "    for client in data_paths:\n",
    "        df = read_data(client)\n",
    "    \n",
    "        if args.identifier not in df.columns:\n",
    "            cid = os.path.splitext(os.path.basename(client))[0]\n",
    "            df[args.identifier] = cid\n",
    "        \n",
    "        test_data = df.copy()\n",
    "        \n",
    "        # get X_test, y_test\n",
    "        X_test, y_test = to_Xy(test_data, targets=args.targets)\n",
    "        \n",
    "        # scale features, targets\n",
    "        X_test = scale_features(X_test, scaler=x_scaler, per_area=False)\n",
    "        y_test = scale_features(y_test, scaler=y_scaler, per_area=False)\n",
    "        \n",
    "        # generate time lags\n",
    "        X_test = generate_time_lags(X_test, args.num_lags)\n",
    "        y_test = generate_time_lags(y_test, args.num_lags, is_y=True)\n",
    "        \n",
    "        # get datetime features (if specified)\n",
    "        date_time_df_test = time_to_feature(\n",
    "            X_test, args.use_time_features, identifier=args.identifier\n",
    "        )\n",
    "        \n",
    "        # get statistics as features (if specified)\n",
    "        stats_df_test = assign_statistics(X_test, args.assign_stats, args.num_lags,\n",
    "                          targets=args.targets, identifier=args.identifier)\n",
    "    \n",
    "        if date_time_df_test is not None or stats_df_test is not None:\n",
    "            exogenous_data_test = pd.concat([date_time_df_test, stats_df_test], axis=1)\n",
    "            exogenous_data_test = exogenous_data_test.loc[:, ~exogenous_data_test.columns.duplicated()].copy()\n",
    "        else:\n",
    "            exogenous_data_test = None\n",
    "        \n",
    "        # transform to numpy\n",
    "        if exogenous_data_test is not None:\n",
    "            exogenous_data_test = get_exogenous_data_by_area(\n",
    "                exogenous_data_test, identifier=args.identifier\n",
    "            )\n",
    "            \n",
    "            for cid in exogenous_data_test:\n",
    "                exogenous_data_test[cid] = exogenous_data_test[cid].to_numpy()\n",
    "        \n",
    "        # remove identifiers\n",
    "        X_test, y_test = remove_identifiers(X_test, y_test)\n",
    "        \n",
    "        num_features = len(X_test.columns) // args.num_lags\n",
    "        \n",
    "        # to timeseries representation\n",
    "        X_test = to_timeseries_rep(X_test.to_numpy(), args.num_lags, num_features=num_features)\n",
    "        \n",
    "        y_test = y_test.to_numpy()\n",
    "        \n",
    "        if exogenous_data_test is not None:\n",
    "            assert len(exogenous_data_test) == 1\n",
    "            exogenous_data_test = exogenous_data_test[next(iter(exogenous_data_test))]\n",
    "            \n",
    "        # make predictions\n",
    "        predict(model, cid, X_test, y_test, exogenous_data_test, plot, idxs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8108d7ea",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:00.331091Z",
     "end_time": "2023-08-14T20:10:00.344090Z"
    }
   },
   "outputs": [],
   "source": [
    "test_data_paths = [\"../mydatase/marketData/Stort1_test.csv\", \"../mydatase/marketData/Stort2_test.csv\", \"../mydatase/marketData/Stort3_test.csv\",\"../mydatase/marketData/Stort4_test.csv\",\"../mydatase/marketData/Stort5_test.csv\",\"../mydatase/marketData/Stort6_test.csv\",\"../mydatase/marketData/Stort7_test.csv\",\"../mydatase/marketData/Stort8_test.csv\",\"../mydatase/marketData/Stort9_test.csv\",\"../mydatase/marketData/Stort10_test.csv\",\"../mydatase/marketData/Stort11_test.csv\",\"../mydatase/marketData/Stort12_test.csv\",\"../mydatase/marketData/Stort13_test.csv\",\"../mydatase/marketData/Stort14_test.csv\",\"../mydatase/marketData/Stort15_test.csv\",\"../mydatase/marketData/Stort16_test.csv\",\"../mydatase/marketData/Stort17_test.csv\",\"../mydatase/marketData/Stort18_test.csv\",\"../mydatase/marketData/Stort19_test.csv\",\"../mydatase/marketData/Stort20_test.csv\",\"../mydatase/marketData/Stort21_test.csv\",\"../mydatase/marketData/Stort22_test.csv\",\"../mydatase/marketData/Stort23_test.csv\",\"../mydatase/marketData/Stort24_test.csv\",\"../mydatase/marketData/Stort25_test.csv\",\"../mydatase/marketData/Stort26_test.csv\",\"../mydatase/marketData/Stort27_test.csv\",\"../mydatase/marketData/Stort28_test.csv\",\"../mydatase/marketData/Stort29_test.csv\",\"../mydatase/marketData/Stort30_test.csv\",\"../mydatase/marketData/Stort31_test.csv\",\"../mydatase/marketData/Stort32_test.csv\",\"../mydatase/marketData/Stort33_test.csv\",\"../mydatase/marketData/Stort34_test.csv\",\"../mydatase/marketData/Stort35_test.csv\",\"../mydatase/marketData/Stort36_test.csv\",\"../mydatase/marketData/Stort37_test.csv\",\"../mydatase/marketData/Stort38_test.csv\",\"../mydatase/marketData/Stort39_test.csv\",\"../mydatase/marketData/Stort40_test.csv\",\"../mydatase/marketData/Stort41_test.csv\",\"../mydatase/marketData/Stort42_test.csv\",\"../mydatase/marketData/Stort43_test.csv\",\"../mydatase/marketData/Stort44_test.csv\",\"../mydatase/marketData/Stort45_test.csv\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e90538b6",
   "metadata": {
    "scrolled": false,
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:00.345090Z",
     "end_time": "2023-08-14T20:10:06.673943Z"
    }
   },
   "outputs": [],
   "source": [
    "model_test(global_model, \n",
    "           test_data_paths, \n",
    "           x_scaler, y_scaler,\n",
    "           round_predictions=True,\n",
    "          )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4fefe332",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.673943Z",
     "end_time": "2023-08-14T20:10:06.719943Z"
    }
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1ae5bb49",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.692945Z",
     "end_time": "2023-08-14T20:10:06.745944Z"
    }
   },
   "outputs": [],
   "source": [
    "x_scaler, y_scaler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "44bd4afc",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.705943Z",
     "end_time": "2023-08-14T20:10:06.745944Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_minmax_attrs(scaler):\n",
    "    scaler_attrs = dict()\n",
    "    scaler_attrs[\"min_\"] = scaler.min_\n",
    "    scaler_attrs[\"scale_\"] = scaler.scale_\n",
    "    scaler_attrs[\"data_min_\"] = scaler.data_min_\n",
    "    scaler_attrs[\"data_max_\"] = scaler.data_max_\n",
    "    scaler_attrs[\"data_range_\"] = scaler.data_range_\n",
    "    scaler_attrs[\"n_features_in_\"] = np.array([scaler.n_features_in_])\n",
    "    scaler_attrs[\"n_samples_seen_\"] = np.array([scaler.n_samples_seen_])\n",
    "    scaler_attrs[\"feature_names_in_\"] = scaler.feature_names_in_\n",
    "    \n",
    "    print(scaler_attrs)\n",
    "    \n",
    "    return scaler_attrs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7b5339b8",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.725946Z",
     "end_time": "2023-08-14T20:10:06.745944Z"
    }
   },
   "outputs": [],
   "source": [
    "# x_scaler_attrs = get_minmax_attrs(x_scaler)\n",
    "# print(\"\\n\\n\")\n",
    "# y_scaler_attrs = get_minmax_attrs(y_scaler)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "da61bf32",
   "metadata": {},
   "source": [
    "#### Store to hdf5 format"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "688a67e4",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.739945Z",
     "end_time": "2023-08-14T20:10:06.815944Z"
    }
   },
   "outputs": [],
   "source": [
    "# import h5py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4777ca40",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.758945Z",
     "end_time": "2023-08-14T20:10:06.821947Z"
    }
   },
   "outputs": [],
   "source": [
    "# class ModelSerializer:\n",
    "#     def __init__(self,\n",
    "#                  model_state,\n",
    "#                  x_scaler_state,\n",
    "#                  y_scaler_state,\n",
    "#                  save_path: str):\n",
    "#         self.model_state = model_state\n",
    "#         self.save_path = save_path\n",
    "#         self.x_scaler = x_scaler_state\n",
    "#         self.y_scaler = y_scaler_state\n",
    "#\n",
    "#     def save(self):\n",
    "#         np_weights = self.state_to_numpy(self.model_state)\n",
    "#         with h5py.File(self.save_path, \"w\") as f:\n",
    "#             group = f.create_group(\"model_weights\", track_order=True)\n",
    "#             for k, v in np_weights.items():\n",
    "#                 group[k] = v\n",
    "#             group = f.create_group(\"x_scaler\", track_order=True)\n",
    "#             for k, v in self.x_scaler.items():\n",
    "#                 group[k] = v\n",
    "#             group=f.create_group(\"y_scaler\", track_order=True)\n",
    "#             for k, v in self.y_scaler.items():\n",
    "#                 group[k] = v\n",
    "#\n",
    "#\n",
    "#     @staticmethod\n",
    "#     def state_to_numpy(model_state):\n",
    "#         assert type(model_state) in (dict, OrderedDict), \\\n",
    "#             f\"Model state must be of type dictionary. Received {type(model_state)}\"\n",
    "#         k = next(iter(model_state))\n",
    "#         assert type(model_state[k]) in (torch.tensor, torch.Tensor, np.ndarray), \\\n",
    "#             f\"Model weights must be of type torch.tensor or numpy.ndarray. Received {type(model_state[k])}\"\n",
    "#         if type(model_state[k]) == np.ndarray:\n",
    "#             return model_state\n",
    "#         np_ordered_dict = OrderedDict()\n",
    "#         for k, v in model_state.items():\n",
    "#             np_ordered_dict[k] = v.cpu().numpy().astype(np.float64)\n",
    "#         return np_ordered_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a195498d",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.770943Z",
     "end_time": "2023-08-14T20:10:06.821947Z"
    }
   },
   "outputs": [],
   "source": [
    "# serializer = ModelSerializer(global_model.state_dict(), x_scaler_attrs, y_scaler_attrs, save_path=\"model_checkpoint/demo_model.h5\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "51d8d597",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.785943Z",
     "end_time": "2023-08-14T20:10:06.821947Z"
    }
   },
   "outputs": [],
   "source": [
    "# serializer.save()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5c796b99",
   "metadata": {},
   "source": [
    "#### Load back"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7c58c933",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.803944Z",
     "end_time": "2023-08-14T20:10:06.821947Z"
    }
   },
   "outputs": [],
   "source": [
    "# class ModelLoader:\n",
    "#     def __init__(self, model_path: str,\n",
    "#                  to_torch: bool = True):\n",
    "#         self.model_path = model_path\n",
    "#         self.to_torch = to_torch\n",
    "#\n",
    "#     def load(self):\n",
    "#         state_dict = OrderedDict()\n",
    "#         x_scaler, y_scaler = OrderedDict(), OrderedDict()\n",
    "#         with h5py.File(self.model_path, \"r\") as f:\n",
    "#             keys = list(f.keys())\n",
    "#             assert \"model_weights\" in keys\n",
    "#             assert \"x_scaler\" in keys\n",
    "#             assert \"y_scaler\" in keys\n",
    "#             model_weights = f[\"model_weights\"]\n",
    "#             named_params = model_weights.keys()\n",
    "#             for k in named_params:\n",
    "#                 state_dict[k] = model_weights[k][:]\n",
    "#\n",
    "#             x_scaler_state = f[\"x_scaler\"]\n",
    "#             named_params = x_scaler_state.keys()\n",
    "#             for k in named_params:\n",
    "#                 print(k)\n",
    "#                 x_scaler[k] = x_scaler_state[k][:]\n",
    "#\n",
    "#             y_scaler_state = f[\"y_scaler\"]\n",
    "#             named_params = y_scaler_state.keys()\n",
    "#             for k in named_params:\n",
    "#                 y_scaler[k] = y_scaler_state[k][:]\n",
    "#\n",
    "#         if self.to_torch:\n",
    "#             state_dict = self.state_to_torch(state_dict)\n",
    "#         return state_dict, x_scaler, y_scaler\n",
    "#\n",
    "#     @staticmethod\n",
    "#     def state_to_torch(model_state):\n",
    "#         assert type(model_state) in (dict, OrderedDict), \\\n",
    "#             f\"Model state must be of type dictionary. Received {type(model_state)}\"\n",
    "#         k = next(iter(model_state))\n",
    "#         assert type(model_state[k]) in (torch.tensor, torch.Tensor, np.ndarray), \\\n",
    "#             f\"Model weights must be of type torch.tensor or numpy.ndarray. Received {type(model_state[k])}\"\n",
    "#         if type(model_state[k]) in (torch.tensor, torch.Tensor):\n",
    "#             return model_state\n",
    "#         torch_ordered_dict = OrderedDict()\n",
    "#         for k, v in model_state.items():\n",
    "#             torch_ordered_dict[k] = torch.tensor(v).float()\n",
    "#         return torch_ordered_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8f4dcae9",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.820952Z",
     "end_time": "2023-08-14T20:10:06.833944Z"
    }
   },
   "outputs": [],
   "source": [
    "# model_loader = ModelLoader(\"model_checkpoint/demo_model.h5\")\n",
    "# state_dict, x_scaler, y_scaler = model_loader.load()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5c979aa2",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.836948Z",
     "end_time": "2023-08-14T20:10:06.891943Z"
    }
   },
   "outputs": [],
   "source": [
    "# state_dict[next(iter(state_dict))]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6ad1f87",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.850946Z",
     "end_time": "2023-08-14T20:10:06.892945Z"
    }
   },
   "outputs": [],
   "source": [
    "# x_scaler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3fc5dd39",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.866944Z",
     "end_time": "2023-08-14T20:10:06.892945Z"
    }
   },
   "outputs": [],
   "source": [
    "# y_scaler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6e69b5f3",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.882944Z",
     "end_time": "2023-08-14T20:10:06.927943Z"
    }
   },
   "outputs": [],
   "source": [
    "# from sklearn.preprocessing import MinMaxScaler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1dc22101",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.898944Z",
     "end_time": "2023-08-14T20:10:06.928947Z"
    }
   },
   "outputs": [],
   "source": [
    "# def set_minmax_attrs(scaler, scaler_attrs):\n",
    "#     scaler.min_ = scaler_attrs[\"min_\"]\n",
    "#     scaler.scale_ = scaler_attrs[\"scale_\"]\n",
    "#     scaler.data_min_ = scaler_attrs[\"data_min_\"]\n",
    "#     scaler.data_max_ = scaler_attrs[\"data_max_\"]\n",
    "#     scaler.data_range_ = scaler_attrs[\"data_range_\"]\n",
    "#     scaler.n_features_in_ = scaler_attrs[\"n_features_in_\"][0]\n",
    "#     scaler.n_samples_seen_ = scaler_attrs[\"n_samples_seen_\"][0]\n",
    "#     scaler.feature_names_in_ = scaler_attrs[\"feature_names_in_\"]\n",
    "#\n",
    "#     return scaler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1151374b",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.914944Z",
     "end_time": "2023-08-14T20:10:06.961946Z"
    }
   },
   "outputs": [],
   "source": [
    "# tmp_scaler = MinMaxScaler()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f609084b",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.930944Z",
     "end_time": "2023-08-14T20:10:06.963955Z"
    }
   },
   "outputs": [],
   "source": [
    "# tmp_scaler = set_minmax_attrs(tmp_scaler, y_scaler)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e29e7cd9",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.945947Z",
     "end_time": "2023-08-14T20:10:06.963955Z"
    }
   },
   "outputs": [],
   "source": [
    "# tmp_scaler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4a3bc097",
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-08-14T20:10:06.963955Z",
     "end_time": "2023-08-14T20:10:06.976945Z"
    }
   },
   "outputs": [],
   "source": [
    "# tmp_scaler.inverse_transform(np.array(\n",
    "#     [[1., 1., 1., 1., 1.],\n",
    "#     [0.5, 0.5, 0.5, 0.5, 0.5]]\n",
    "# ))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.9.10 64-bit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.10"
  },
  "vscode": {
   "interpreter": {
    "hash": "a39106e1a9d6d153b7400628e7589ff266b5caee5b0db427f0903be982155882"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
