{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Import"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import pandas as pd\n",
    "import torch.optim as optim\n",
    "import numpy as np\n",
    "import argparse\n",
    "from torch.utils.data import DataLoader\n",
    "from tqdm import tqdm\n",
    "from contextlib import nullcontext\n",
    "import argparse\n",
    "\n",
    "\n",
    "from utils import load_dataset, criterion_mape, set_seed\n",
    "from TSL_models import CRIB\n",
    "from TSL_models import DLinear, SegRNN, Transformer, iTransformer, PatchTST, TSMixer,WPMixer, PAttn, KANAD, MultiPatchFormer, FreTS # want input [Batch, seq_len, Channels]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Initial Attributes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def parse_args(args=None):\n",
    "    parser = argparse.ArgumentParser(description='Hyperparameter settings for MyVar.')\n",
    "\n",
    "    parser.add_argument('--model', type=str, default='CRIB', choices=[\"CRIB\", \"STJTransformer\", \"DLinear\", \"PAttn\", \"SegRNN\", \"Transformer\", \"iTransformer\", \"PatchTST\", \"TSMixer\", \"WPMixer\", \"PAttn\"], help='Model name')\n",
    "\n",
    "    parser.add_argument('--dataset', type=str, default='PEMS', choices=['Elec', 'ETTh1', 'PEMS', 'PEMS08', 'Metr', 'BeijingAir', 'Elec_imputed', 'ETTh1_imputed', 'PEMS_imputed', 'Metr_imputed'], help='Dataset name')\n",
    "    \n",
    "    parser.add_argument('--missing_rate', type=float, default=0.2, help='Missing data rate')\n",
    "    parser.add_argument('--missing_pattern', type=str, default='point', choices=['point', 'block', 'col'], help='Missing data pattern')\n",
    "    \n",
    "    parser.add_argument('--loss_type', type=str, default='123', choices=['1', '2', '3', '12', '13', '23', '123'], help='Loss Type')\n",
    "    parser.add_argument('--IB_weight', type=float, default=1.0, help='IB weight')\n",
    "    parser.add_argument('--KL_weight', type=float, default=1e-6, help='KL weight')\n",
    "    parser.add_argument('--Consis_weight', type=float, default=1.0, help='Consistency weight')\n",
    "    \n",
    "    parser.add_argument('--missing_block_width', type=int, default=5, help='Missing block width for block pattern')\n",
    "    parser.add_argument('--missing_block_height', type=int, default=5, help='Missing block height for block pattern')\n",
    "    \n",
    "    parser.add_argument('--train_epochs', type=int, default=10, help='Number of training epochs')\n",
    "    parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate')\n",
    "    parser.add_argument('--weight_decay', type=float, default=0, help='Weight decay')\n",
    "    parser.add_argument('--use_amp', type=bool, default=True, help='Use automatic mixed precision (AMP)')\n",
    "    parser.add_argument('--mask_rate', type=float, default=0.5, help='Mask rate')\n",
    "    parser.add_argument('--std_flag', type=bool, default=True, help='Standardization flag')\n",
    "    parser.add_argument('--batch_size', type=int, default=256, help='Batch size')\n",
    "    parser.add_argument('--shuffle', type=bool, default=True, help='Shuffle the dataset')\n",
    "    parser.add_argument('--num_workers', type=int, default=80, help='Number of workers for data loading')\n",
    "    parser.add_argument('--seq_len', type=int, default=24, help='Input sequence length')\n",
    "    parser.add_argument('--pred_len', type=int, default=24, help='Prediction sequence length')\n",
    "    parser.add_argument('--patch_len', type=int, default=8, help='Patch length')\n",
    "    parser.add_argument('--model_dim', type=int, default=128, help='Model dimension')\n",
    "    parser.add_argument('--dropout', type=float, default=0.1, help='Dropout rate')\n",
    "    parser.add_argument('--output_attention', type=bool, default=True, help='Output attention flag')\n",
    "    parser.add_argument('--activation', type=str, default=\"relu\", help='Activation function')\n",
    "    parser.add_argument('--heads_num', type=int, default=4, help='Number of attention heads')\n",
    "    parser.add_argument('--enc_num', type=int, default=3, help='Number of encoder layers')\n",
    "    parser.add_argument('--dec_num', type=int, default=1, help='Number of decoder layers')\n",
    "    parser.add_argument('--val_ratio', type=float, default=0.2, help='Validation set ratio')\n",
    "    parser.add_argument('--test_ratio', type=float, default=0.2, help='Test set ratio')\n",
    "    parser.add_argument('--seed', type=int, default=123, help='Random seed')\n",
    "    parser.add_argument('--iter', type=str, default='1', help='Iteration')\n",
    "    parser.add_argument('--csv_path', type=str, default='./result_1.csv', help='Result path')\n",
    "    parser.add_argument('--exp_type', type=str, default='Train', help='Exp result')\n",
    "    \n",
    "    # arguments for TSL models\n",
    "    parser.add_argument('--task_name', type=str, default='long_term_forecast', help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')\n",
    "    parser.add_argument('--moving_avg', type=int, default=3, help='window size of moving average')\n",
    "    parser.add_argument('--seg_len', type=int, default=24, help='the length of segmen-wise iteration of SegRNN')\n",
    "    parser.add_argument('--embed', type=str, default='timeF', help='time features encoding, options:[timeF, fixed, learned]')\n",
    "    parser.add_argument('--freq', type=str, default='h', help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')\n",
    "    parser.add_argument('--factor', type=int, default=1, help='attn factor')\n",
    "    parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\n",
    "    parser.add_argument('--label_len', type=int, default=12, help='start token length')\n",
    "    parser.add_argument('--features', type=str, default='M', help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')\n",
    "    parser.add_argument('--use_norm', type=int, default=1, help='whether to use normalize; True 1 False 0')\n",
    "    parser.add_argument('--channel_independence', type=int, default=0,help='0: channel dependence 1: channel independence for FreTS model')\n",
    "\n",
    "    \n",
    "    return parser.parse_args(args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "args_list=[\n",
    "    '--dataset', \"ETTh1\",\n",
    "    '--model', \"CRIB\",\n",
    "    '--batch_size', \"32\",\n",
    "    '--model_dim', \"32\",\n",
    "    '--missing_pattern', \"point\",\n",
    "    '--missing_rate', \"0.2\",\n",
    "    '--seq_len', '24',\n",
    "    '--pred_len', '24',\n",
    "    '--train_epochs', \"10\",\n",
    "    '--iter', \"1\",\n",
    "    '--csv_path', \"./result_1.csv\",\n",
    "    '--exp_type', 'Train'\n",
    "]\n",
    "\n",
    "args=parse_args(args=args_list)\n",
    "\n",
    "if args.seq_len % args.patch_len != 0:\n",
    "    raise ValueError(f\"seq_len {args.seq_len} must be divisible by patch_len {args.patch_len}\")\n",
    "\n",
    "args.patch_num = args.seq_len // args.patch_len\n",
    "\n",
    "args.n_heads = args.heads_num\n",
    "args.d_model = args.model_dim\n",
    "args.e_layers = args.enc_num\n",
    "args.d_layers = args.dec_num\n",
    "\n",
    "criterion_mae=nn.L1Loss()\n",
    "criterion_mse=nn.MSELoss()\n",
    "\n",
    "if args.seed!=-1:\n",
    "    set_seed(args.seed)\n",
    "\n",
    "args.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "if 'Metr' in args.dataset:\n",
    "    node_number=207\n",
    "    args.var_num=207\n",
    "    args.enc_in=207\n",
    "    args.dec_in=207\n",
    "    args.c_out=207\n",
    "elif 'PEMS' in args.dataset:\n",
    "    node_number=325\n",
    "    args.var_num=325\n",
    "    args.enc_in = 325\n",
    "    args.dec_in = 325\n",
    "    args.c_out = 325\n",
    "elif 'ETTh1' in args.dataset:\n",
    "    node_number=7\n",
    "    args.var_num=7\n",
    "    args.enc_in = 7\n",
    "    args.dec_in = 7\n",
    "    args.c_out = 7\n",
    "elif 'Elec' in args.dataset:\n",
    "    node_number=321\n",
    "    args.var_num=321\n",
    "    args.enc_in = 321\n",
    "    args.dec_in = 321\n",
    "    args.c_out = 321\n",
    "elif 'BeijingAir_old' in args.dataset:\n",
    "    node_number=36\n",
    "    args.var_num=36\n",
    "    args.enc_in = 36\n",
    "    args.dec_in = 36\n",
    "    args.c_out = 36\n",
    "elif 'PEMS08' in args.dataset:\n",
    "    node_number=170\n",
    "    args.var_num=170\n",
    "    args.enc_in = 170\n",
    "    args.dec_in = 170\n",
    "    args.c_out = 170\n",
    "elif 'BeijingAir' in args.dataset:\n",
    "    node_number=7\n",
    "    args.var_num=7\n",
    "    args.enc_in = 7\n",
    "    args.dec_in = 7\n",
    "    args.c_out = 7"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Train"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Load Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_dataset, val_dataset, test_dataset, data_scaler=load_dataset(args=args, scaler=None)\n",
    "\n",
    "if 'imputed' in args.dataset:\n",
    "    print(f\"\\n\\nLoading original raw data for {args.dataset.replace('_imputed','')} ...\\n\")\n",
    "    temp_args=parse_args()\n",
    "    temp_args.dataset=args.dataset.replace('_imputed','')\n",
    "\n",
    "    train_dataset_ori, val_dataset_ori, test_dataset_ori, _ = load_dataset(args=temp_args, scaler=None)\n",
    "\n",
    "    train_dataset.data=train_dataset.data*train_dataset.mask_1[:,:3,...]+train_dataset_ori.data*(1-train_dataset.mask_1[:,:3,...])\n",
    "    val_dataset.data=val_dataset.data*val_dataset.mask_1[:,:3,...]+val_dataset_ori.data*(1-val_dataset.mask_1[:,:3,...])\n",
    "    test_dataset.data=test_dataset.data*test_dataset.mask_1[:,:3,...]+test_dataset_ori.data*(1-test_dataset.mask_1[:,:3,...])\n",
    "\n",
    "    train_dataset.pred=train_dataset_ori.pred\n",
    "    val_dataset.pred=val_dataset_ori.pred\n",
    "    test_dataset.pred=test_dataset_ori.pred\n",
    "\n",
    "train_loader=DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers, drop_last=False)\n",
    "val_loader=DataLoader(dataset=val_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers, drop_last=False)\n",
    "test_loader=DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers, drop_last=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Load Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if args.model==\"CRIB\":\n",
    "    model=CRIB(args).to(args.device)\n",
    "elif args.model==\"DLinear\":\n",
    "    model = DLinear(args).to(args.device)\n",
    "elif args.model==\"SegRNN\":\n",
    "    model = SegRNN(args).to(args.device)\n",
    "elif args.model==\"Transformer\":\n",
    "    model = Transformer(args).to(args.device)\n",
    "elif args.model==\"iTransformer\":\n",
    "    model = iTransformer(args).to(args.device)\n",
    "elif args.model==\"PatchTST\":\n",
    "    model = PatchTST(args, patch_len=args.patch_len).to(args.device)\n",
    "elif args.model==\"TSMixer\":\n",
    "    model = TSMixer(args).to(args.device)\n",
    "elif args.model==\"WPMixer\":\n",
    "    model = WPMixer(args).to(args.device)\n",
    "elif args.model==\"PAttn\":\n",
    "    model = PAttn(args).to(args.device)\n",
    "elif args.model==\"KANAD\":\n",
    "    model = KANAD(args).to(args.device)\n",
    "elif args.model==\"MultiPatchFormer\":\n",
    "    model = MultiPatchFormer(args).to(args.device)\n",
    "elif args.model==\"FreTS\":\n",
    "    model = FreTS(args).to(args.device)\n",
    "else:\n",
    "    raise ValueError(f\"Model-{args.model} not found\")\n",
    "\n",
    "print(f\"Model-{args.model} is loaded\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Set optimizer and amp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer=optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if args.use_amp:\n",
    "    scaler = torch.cuda.amp.GradScaler()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Val Function"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def validation(args, model, val_loader, criterion):\n",
    "    total_loss = []\n",
    "    total_mae = []\n",
    "    total_mse = []\n",
    "    total_mape = []\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        for i, (batch_x, batch_y, mask_1) in enumerate(val_loader):\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            batch_x=batch_x.float().to(args.device) # [batch_size, patch_num, var_num, patch_len]\n",
    "            batch_y=batch_y.to(args.device) # [batch_size, pred_len, var_num]\n",
    "            mask_1=mask_1.to(args.device) # [batch_size, patch_num, var_num, patch_len]\n",
    "\n",
    "            B, P, N, L = batch_x.shape\n",
    "\n",
    "            autocast_context = torch.cuda.amp.autocast() if args.use_amp else nullcontext()\n",
    "            \n",
    "            with autocast_context:\n",
    "                if args.model == \"CRIB\":\n",
    "                    # make mask\n",
    "                    batch_x = batch_x * mask_1[:, :P, ...]\n",
    "                    # forward\n",
    "                    enc_out_1, enc_attns_1, enc_out_2, enc_attns_2, preds, py_z, kl = model(batch_x, x_mark=None, test_flag=True)\n",
    "                \n",
    "                else:  # TSL models: DLinear etc.\n",
    "                    batch_x = batch_x.permute(0, 2, 1, 3).reshape(B, N, P * L).permute(0, 2, 1)\n",
    "                    mask_1 = mask_1[:, :P, ...].permute(0, 2, 1, 3).reshape(B, N, P * L).permute(0, 2, 1)\n",
    "                    \n",
    "                    batch_x_new = batch_x * mask_1 if 'imputed' not in args.dataset else batch_x\n",
    "                    \n",
    "                    preds = model(x_enc=batch_x_new, x_mark_enc=None, x_dec=batch_x_new, x_mark_dec=None)\n",
    "                    \n",
    "            # outputs = data_scaler.inverse_transform(outputs)\n",
    "            # batch_y = data_scaler.inverse_transform(batch_y)\n",
    "\n",
    "            metric=criterion(preds, batch_y)\n",
    "\n",
    "            mae, mse, mape = criterion_mae(preds, batch_y), criterion_mse(preds, batch_y), criterion_mape(preds, batch_y)\n",
    "\n",
    "            total_loss.append(metric.cpu())\n",
    "            total_mae.append(mae.cpu())\n",
    "            total_mse.append(mse.cpu())\n",
    "            total_mape.append(mape.cpu())\n",
    "\n",
    "    total_loss = np.average(total_loss)\n",
    "    mae=np.average(total_mae)\n",
    "    mae=(mae, np.var(total_mae))\n",
    "\n",
    "    mse=np.average(total_mse)\n",
    "    mse=(mse, np.var(total_mse))\n",
    "\n",
    "    mape=np.average(total_mape)\n",
    "    mape=(mape, np.var(total_mape))\n",
    "    \n",
    "    model.train()\n",
    "    \n",
    "    return total_loss, mae, mse, mape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_validation_and_print(args, model, val_loader, test_loader, criterion):\n",
    "    model.eval()\n",
    "    with torch.no_grad(): \n",
    "        val_loss, val_mae, val_mse, val_mape = validation(args, model, val_loader, criterion)\n",
    "        test_loss, test_mae, test_mse, test_mape = validation(args, model, test_loader, criterion)\n",
    "    \n",
    "    print(\n",
    "        f'val_loss: {val_loss:.4f}, test_loss: {test_loss:.4f}, '\n",
    "        f'test_mae: {test_mae[0].item():.4f}+{test_mae[1].item():.4f}, '\n",
    "        f'test_mse: {test_mse[0].item():.4f}+{test_mse[1].item():.4f}, '\n",
    "        f'test_mape: {test_mape[0].item():.4f}+{test_mape[1].item():.4f}'\n",
    "    )\n",
    "    model.train()\n",
    "    return val_loss, val_mae, val_mse, val_mape, test_loss, test_mae, test_mse, test_mape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(f'{args.exp_type}-{args.dataset}-{args.model}, seed:{args.seed}, missing_pattern:{args.missing_pattern}, missing_rate: {args.missing_rate}, loss_type: {args.loss_type}, seq: {args.seq_len}, pred: {args.pred_len}')\n",
    "\n",
    "# val_loss, val_mae, val_mse, val_mape, test_loss, test_mae, test_mse, test_mape = run_validation_and_print(args, model, val_loader, test_loader, criterion_mae)\n",
    "val_loss, val_mae, val_mse, val_mape, test_loss, test_mae, test_mse, test_mape = 0, 0, 0, 0, 0, 0, 0, 0\n",
    "\n",
    "autocast_context = torch.cuda.amp.autocast() if args.use_amp else nullcontext()\n",
    "\n",
    "for iter_count, epoch in (enumerate(range(args.train_epochs))):\n",
    "    model.train()\n",
    "    train_loss_list=[]\n",
    "    \n",
    "    pbar = tqdm(enumerate(train_loader), total=len(train_loader), desc=f\"Epoch {epoch + 1}/{args.train_epochs}\")\n",
    "    for i, (batch_x, batch_y, mask_1) in pbar:\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        batch_x=batch_x.float().to(args.device) # [batch_size, patch_num, var_num, patch_len]\n",
    "        batch_y=batch_y.to(args.device) # [batch_size, pred_len, var_num]\n",
    "        mask_1=mask_1.to(args.device) # [batch_size, patch_num, var_num, patch_len]\n",
    "\n",
    "        B, P, N, L = batch_x.shape\n",
    "        # args.var_num = N\n",
    "        \n",
    "        with autocast_context:\n",
    "            if args.model == \"CRIB\":\n",
    "                batch_x = batch_x * mask_1[:, :P, ...]\n",
    "                enc_out_1, enc_attns_1, enc_out_2, enc_attns_2, preds, py_z, kl = model(batch_x, x_mark=None, test_flag=False) # [batch_size, patch_num, var_num, patch_len]\n",
    "            else:  # TSL models\n",
    "                batch_x_flat = batch_x.permute(0, 2, 1, 3).reshape(B, N, P * L).permute(0, 2, 1)\n",
    "                mask_flat = mask_1[:, :P, ...].permute(0, 2, 1, 3).reshape(B, N, P * L).permute(0, 2, 1)\n",
    "                batch_x_new = batch_x_flat * mask_flat if 'imputed' not in args.dataset else batch_x_flat\n",
    "                preds = model(x_enc=batch_x_new, x_mark_enc=None, x_dec=batch_x_new, x_mark_dec=None, mask=None)\n",
    "                \n",
    "            \n",
    "            tra_metric = criterion_mae(preds, batch_y)\n",
    "            \n",
    "            if args.model==\"CRIB\":\n",
    "                kl_norm=kl/kl.detach()*tra_metric.detach()\n",
    "                behavior_consistency=criterion_mse(enc_out_1,enc_out_2)\n",
    "                behavior_consistency_norm=behavior_consistency/behavior_consistency.detach()*tra_metric.detach()\n",
    "            \n",
    "            \n",
    "            loss = 0\n",
    "            if '1' in args.loss_type:\n",
    "                loss += tra_metric\n",
    "            if args.model == \"CRIB\":\n",
    "                if '2' in args.loss_type:\n",
    "                    behavior_consistency = criterion_mse(enc_out_1, enc_out_2)\n",
    "                    loss += args.Consis_weight * behavior_consistency\n",
    "                if '3' in args.loss_type:\n",
    "                    loss += args.KL_weight * kl\n",
    "                    \n",
    "            train_loss_list.append(loss.item())\n",
    "\n",
    "        if args.use_amp:\n",
    "            scaler.scale(loss).backward()\n",
    "            scaler.step(optimizer)\n",
    "            scaler.update()\n",
    "        else:\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "        pbar.set_postfix(\n",
    "            train_loss=f'{tra_metric.item():.4f}', \n",
    "            val_loss=f'{val_loss:.4f}', \n",
    "            test_loss=f'{test_loss:.4f}', \n",
    "            epoch=f' {epoch + 1} / {args.train_epochs}'\n",
    "        )\n",
    "        pbar.update(1)\n",
    "            \n",
    "    avg_train_loss = np.average(train_loss_list)\n",
    "    print(f\"Epoch {epoch + 1} Average Train Loss: {avg_train_loss:.4f}\")\n",
    "    \n",
    "    val_loss, val_mae, val_mse, val_mape, test_loss, test_mae, test_mse, test_mape = run_validation_and_print(args, model, val_loader, test_loader, criterion_mae)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"Final Validation Results:\")\n",
    "val_loss, val_mae, val_mse, val_mape, test_loss, test_mae, test_mse, test_mape = run_validation_and_print(args, model, val_loader, test_loader, criterion_mae)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(f\"test MAE: {test_loss:.4f}\")\n",
    "print(f\"test MSE: {test_mse[0].item():.4f}\")\n",
    "\n",
    "total_params = sum(p.numel() for p in model.parameters())\n",
    "print(f\"Total trainable parameters: {total_params}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Save Result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# res={\n",
    "#     'Setting': [f'{args.exp_type}-{args.dataset}-{args.model}-{args.missing_pattern}-missing{args.missing_rate}-loss{args.loss_type}-seq{args.seq_len}-pred{args.pred_len}-seed{args.seed}-iter{args.iter}'],\n",
    "#     'Exp_type': [args.exp_type],\n",
    "#     'Dataset': [args.dataset],\n",
    "#     'Model': [args.model],\n",
    "#     'Missing_pattern': [args.missing_pattern],\n",
    "#     'Missing_rate': [args.missing_rate],\n",
    "#     'Loss_type': [args.loss_type],\n",
    "#     'Seed': [args.seed],\n",
    "#     'Seq_len': [args.seq_len],\n",
    "#     'Pred_len': [args.pred_len],\n",
    "#     'Test_MAE': [test_mae[0].item()],\n",
    "#     'Test_MAE_var': [test_mae[1].item()],\n",
    "#     'Test_MSE': [test_mse[0].item()],\n",
    "#     'Test_MSE_var': [test_mse[1].item()],\n",
    "#     'Test_MAPE': [test_mape[0].item()],\n",
    "#     'Test_MAPE_var': [test_mape[1].item()],\n",
    "# }\n",
    "\n",
    "# df = pd.DataFrame(res)\n",
    "# csv_file = args.csv_path\n",
    "\n",
    "# try:\n",
    "#     df_existing = pd.read_csv(csv_file)\n",
    "#     df.to_csv(csv_file, mode='a', index=False, header=False)\n",
    "# except FileNotFoundError:\n",
    "#     df.to_csv(csv_file, mode='w', index=False)\n",
    "\n",
    "# print(\"Results have been appended to:\", csv_file)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "itrans",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
