{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "3e04475f-5b04-4f44-91e4-00583b958f78",
   "metadata": {},
   "source": [
    "## Import Libraries and Functions and Define the Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a978bf6c-0334-4ae9-8226-21a7b61bef9d",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-10T08:49:36.525492500Z",
     "start_time": "2024-07-10T08:49:32.461006500Z"
    }
   },
   "outputs": [],
   "source": [
    "# Physics Informed Neural Network with Taylor Series\n",
    "\n",
    "from utils import *\n",
    "import math\n",
    "import torch.nn as nn\n",
    "\n",
    "'''\n",
    "N_INPUT: The number of input bio-z dimensions for one heartbeat\n",
    "N_FEAT: The number of physiological features\n",
    "N_EXT: The number of features extracted by the CNN\n",
    "'''\n",
    "if torch.cuda.is_available():\n",
    "    device = torch.device('cuda')\n",
    "else:\n",
    "    device = torch.device('cpu')\n",
    "\n",
    "\n",
    "class Configs:\n",
    "    def __init__(self):\n",
    "        # General parameters\n",
    "        self.task_name = 'regression'  # or 'short_term_forecast', 'imputation', 'anomaly_detection', 'regression'\n",
    "        self.seq_len = 32  # Input sequence length\n",
    "        self.label_len = 48  # Label length for forecasting\n",
    "        self.pred_len = 24  # Prediction length\n",
    "        self.e_layers = 2  # Number of encoder layers\n",
    "        self.d_model = 512  # Dimension of the model\n",
    "        self.embed = 'timeF'  # Embedding type, e.g., 'timeF' for time features\n",
    "        self.freq = 's'  # Frequency of the data, e.g., 'h' for hourly data\n",
    "        self.dropout = 0.1  # Dropout rate\n",
    "        self.top_k = 1\n",
    "        self.d_ff = 512  # Dimension of the feedforward network model\n",
    "        self.num_kernels = 6 # Number of kernels in the Inception block\n",
    "        self.enc_in = 32  # Number of input features (for forecasting and imputation)\n",
    "        self.output_attention = False  # Whether to output attention weights\n",
    "        self.factor = 1 # 'attn factor'\n",
    "        self.n_heads = 8  # Number of heads in the multi-head attention\n",
    "        self.activation = 'gelu'  # Activation function\n",
    "\n",
    "\n",
    "class PITN(nn.Module):\n",
    "    def __init__(self, configs):\n",
    "        super(PITN, self).__init__()\n",
    "        self.configs = configs\n",
    "        self.task_name = configs.task_name\n",
    "        self.seq_len = configs.seq_len\n",
    "        self.model = nn.ModuleList([TemporalBlock(configs) for _ in range(configs.e_layers)])\n",
    "        self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq,\n",
    "                                           configs.dropout)\n",
    "        self.layer = configs.e_layers\n",
    "        self.layer_norm = nn.LayerNorm(configs.d_model)\n",
    "        # add layer_norm_adv for adversarial input\n",
    "        self.layer_norm_adv = nn.LayerNorm(configs.d_model)\n",
    "        self.act = F.gelu\n",
    "        self.dropout = nn.Dropout(configs.dropout)\n",
    "        self.projection = nn.Linear(512, 64)  # Adjust the output dimension for regression\n",
    "        self.decision = nn.Linear(67, 1)\n",
    "        \n",
    "    def regression(self, X, adv_flag=False):\n",
    "        # embedding\n",
    "        x_enc, feat_1, feat_2, feat_3 = X[:, :32], X[:, 32], X[:, 33], X[:, 34]\n",
    "\n",
    "        x_enc = x_enc.unsqueeze(1)\n",
    "        feat_1 = feat_1.unsqueeze(1)\n",
    "        feat_2 = feat_2.unsqueeze(1)\n",
    "        feat_3 = feat_3.unsqueeze(1)\n",
    "\n",
    "        enc_out = self.enc_embedding(x_enc)  # [B, T, C]\n",
    "        # TimesNet\n",
    "        for i in range(self.layer):\n",
    "            if adv_flag:\n",
    "                enc_out = self.layer_norm_adv(self.model[i](enc_out))\n",
    "            else:\n",
    "                enc_out = self.layer_norm(self.model[i](enc_out))\n",
    "        # Output\n",
    "        # the output transformer encoder/decoder embeddings don't include non-linearity\n",
    "        output = self.act(enc_out)\n",
    "        output = self.dropout(output)\n",
    "        # (batch_size, seq_length * d_model)\n",
    "        output = output.reshape(output.shape[0], -1)\n",
    "        hidden_feature = output\n",
    "        output = self.projection(output)  # (batch_size, 64)\n",
    "        output = torch.cat((output, feat_1, feat_2, feat_3), 1)\n",
    "        output = self.decision(output)\n",
    "        return output, hidden_feature\n",
    "\n",
    "    def regression_net(self, x_enc, feat_1, feat_2, feat_3):\n",
    "        # embedding\n",
    "        return self.regression(torch.cat((x_enc, feat_1, feat_2, feat_3), 1))\n",
    "\n",
    "    def Physics_net(self, feature, feat_1, feat_2, feat_3):\n",
    "        # No need to calculate the gradient of adversarial loss\n",
    "        u, _ = self.regression_net(feature, feat_1, feat_2, feat_3)\n",
    "        u_feat_1 = torch.autograd.grad(u, feat_1,\n",
    "                                       grad_outputs=torch.ones_like(u), create_graph=True)[0]\n",
    "        u_feat_2 = torch.autograd.grad(u, feat_2,\n",
    "                                       grad_outputs=torch.ones_like(u), create_graph=True)[0]\n",
    "        u_feat_3 = torch.autograd.grad(u, feat_3,\n",
    "                                       grad_outputs=torch.ones_like(u), create_graph=True)[0]\n",
    "        pred_physics = (u[:-1, 0]\n",
    "                        + (u_feat_1[:-1, 0] * (feat_1[1:, 0] - feat_1[:-1, 0]))\n",
    "                        + (u_feat_2[:-1, 0] * (feat_2[1:, 0] - feat_2[:-1, 0]))\n",
    "                        + (u_feat_3[:-1, 0] * (feat_3[1:, 0] - feat_3[:-1, 0]))\n",
    "                        )\n",
    "        return u, pred_physics\n",
    "\n",
    "    def forward(self, X, flag, adv_flag):\n",
    "        # flag to show calculate physics loss or not\n",
    "        self.feature = X[:, :32].clone().detach().requires_grad_(True)\n",
    "        self.feat_1 = X[:, 32].clone().detach().requires_grad_(True).unsqueeze(1)\n",
    "        self.feat_2 = X[:, 33].clone().detach().requires_grad_(True).unsqueeze(1)\n",
    "        self.feat_3 = X[:, 34].clone().detach().requires_grad_(True).unsqueeze(1)\n",
    "\n",
    "        if flag:\n",
    "            u, pred_physics = self.Physics_net(self.feature, self.feat_1, self.feat_2, self.feat_3)\n",
    "            return u, pred_physics\n",
    "        else:\n",
    "            u, hidden_feature = self.regression(X)\n",
    "            return u, hidden_feature\n",
    "\n",
    "    def function(self, X, train_out):\n",
    "        u, _ = self.regression(X, adv_flag=True)\n",
    "        pred_loss = torch.mean(torch.square(u - train_out))\n",
    "        return pred_loss\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c3147e93-a796-4077-b31c-248be2d20914",
   "metadata": {},
   "source": [
    "### Import a Demo Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "199f70e4-06d2-4848-9ae3-57deed7785bf",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-10T08:49:36.930105400Z",
     "start_time": "2024-07-10T08:49:36.527486900Z"
    }
   },
   "outputs": [],
   "source": [
    "# load an example data for demo\n",
    "import pandas as pd\n",
    "df_demo_data = pd.read_pickle('data_demo.pkl')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a40a95ce-5e05-4499-a4da-27394b896c3f",
   "metadata": {},
   "source": [
    "### Preprocess and Prepare the Train/Test Datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "bb49b2b3-be56-4654-b4ec-0134c8486bf3",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-10T08:49:41.094136300Z",
     "start_time": "2024-07-10T08:49:38.873117500Z"
    }
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from sklearn import preprocessing\n",
    "import random\n",
    "import torch.utils.data as Data\n",
    "import warnings\n",
    "# Initialize a SEED value to ensure that the random processes in the code can be reproduced.\n",
    "SEED = 123\n",
    "\n",
    "# Call the function with seed value\n",
    "warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n",
    "\n",
    "\n",
    "# The keys for the beat data (beat_key), the target (out_key), and the features (feat_keys) are defined\n",
    "beat_key = 'bioz_beats'\n",
    "out_key = 'DBP'\n",
    "shift = 2.0\n",
    "beta = 1\n",
    "feat_keys = ['phys_feat_1','phys_feat_2','phys_feat_3']\n",
    "\n",
    "# Data scaling of BP, input beats, and input features\n",
    "# This scaler standardizes by removing the mean and scaling to unit variance\n",
    "# This is done to ensure having the same scale, which can improve the performance of machine learning algorithms\n",
    "scaler_out = preprocessing.StandardScaler().fit(df_demo_data[out_key].to_numpy()[:, None])\n",
    "mean_value = scaler_out.mean_\n",
    "std_value = scaler_out.scale_\n",
    "\n",
    "contra_shift = (shift - mean_value) / (std_value ** 2)\n",
    "scaler_beats = preprocessing.StandardScaler().fit(np.concatenate(df_demo_data[beat_key].to_numpy())[:, None])\n",
    "scaler_X = [preprocessing.StandardScaler().fit(df_demo_data[a].to_numpy()[:, None]) for a in feat_keys]\n",
    "\n",
    "# Apply Scaling\n",
    "# The scaled versions of the BP, input beats, and input features are then added to the dataframe\n",
    "df_demo_data.loc[df_demo_data.index, beat_key + '_scaled'] = df_demo_data.apply(\n",
    "    lambda x: np.concatenate(scaler_beats.transform(x[beat_key][:, None])), axis=1).to_numpy()\n",
    "\n",
    "def scale_output(row):\n",
    "    output = np.array([row[out_key]])[:, None]\n",
    "    output = output.reshape(-1, 1)\n",
    "    scaled_output = scaler_out.transform(output)\n",
    "    return np.concatenate(scaled_output)[0]\n",
    "\n",
    "df_demo_data[out_key + '_scaled'] = df_demo_data.apply(scale_output, axis=1).to_numpy()\n",
    "\n",
    "# df_demo_data.loc[df_demo_data.index, out_key + '_scaled'] = df_demo_data.apply(\n",
    "#     lambda x: np.concatenate(scaler_out.transform(np.array([x[out_key]])[:, None]))[0], axis=1).to_numpy()\n",
    "\n",
    "def transform_scaler_X(x, tmp_key, tmp_count):\n",
    "    value = np.array([x[tmp_key]])[:, None]\n",
    "    value = value.reshape(-1, 1)\n",
    "    scaled_value = scaler_X[tmp_count].transform(value)\n",
    "    concatenated_value = np.concatenate(scaled_value)\n",
    "    return concatenated_value\n",
    "\n",
    "for tmp_key, tmp_count in zip(feat_keys, range(len(feat_keys))):\n",
    "    # df_demo_data.loc[df_demo_data.index, tmp_key + '_scaled'] = df_demo_data.apply(\n",
    "    #     lambda x: np.concatenate(scaler_X[tmp_count].transform(np.array([x[tmp_key]])[:, None])), axis=1).to_numpy()\n",
    "    df_demo_data[tmp_key + '_scaled'] = df_demo_data.apply(\n",
    "        lambda x, key=tmp_key, count=tmp_count: transform_scaler_X(x, key, count), axis=1).to_numpy()\n",
    "# Fetch scaled feature names\n",
    "X_keys = [a + '_scaled' for a in feat_keys]\n",
    "\n",
    "# Prepare train/test using minimal training the BP\n",
    "# Fetch data shapes\n",
    "length_seq_x = df_demo_data.apply(lambda x: len(x[beat_key + '_scaled']), axis=1).unique()[0]\n",
    "\n",
    "# Set the length of the target to 1\n",
    "length_seq_y = 1\n",
    "\n",
    "# Start with all points\n",
    "# Reshape the scaled beat data into a 2D array where each row corresponds to a sample and each column corresponds to a time point in the beat sequence\n",
    "# The same is done for the features and the target\n",
    "all_beats = np.reshape(np.concatenate(df_demo_data[beat_key + '_scaled'].values), (len(df_demo_data), length_seq_x))\n",
    "[all_feat1, all_feat2, all_feat3] = [df_demo_data[a].values[:, None] for a in X_keys]\n",
    "all_out = df_demo_data[out_key + '_scaled'].values[:, None]\n",
    "\n",
    "# Used only for plotting purposes\n",
    "out_max_rescaled = np.concatenate(scaler_out.inverse_transform(all_out[:, 0][:, None])).max()\n",
    "out_min_rescaled = np.concatenate(scaler_out.inverse_transform(all_out[:, 0][:, None])).min()\n",
    "# Given different trials have time gaps, ignore first 3 instances from indices to prevent discontiunity in training\n",
    "list_all_length = [0]\n",
    "for _, df_tmp in df_demo_data.groupby(['trial_id']):\n",
    "    list_all_length.append(len(df_tmp))\n",
    "ix_ignore_all = np.concatenate(np.array([np.arange(a, a + 3, 1) for a in list(np.cumsum(list_all_length)[:-1])]))\n",
    "# Update the final indices set\n",
    "ix_all = list(set(np.arange(len(df_demo_data))) - set(ix_ignore_all))\n",
    "\n",
    "# Separate train/test based on minimal training criterion\n",
    "random.seed(0)\n",
    "bp_dist = df_demo_data[out_key].values\n",
    "\n",
    "# Find indices for train and test datasets\n",
    "# The target values are sorted in ascending order, and the sorted indices are split into multiple subsets\n",
    "# For each subset, a random index is selected as a training index\n",
    "ix_split = np.split([a for a in np.argsort(bp_dist) if a not in set(ix_ignore_all)], np.cumsum(\n",
    "    np.histogram(bp_dist[ix_all], bins=np.arange(bp_dist[ix_all].min(), bp_dist[ix_all].max(), 1))[0]))\n",
    "ix_train = [random.Random(4).choice(a) if len(a) > 0 else -1 for a in ix_split]\n",
    "ix_train = list(set(ix_train) - set([-1]))\n",
    "# Test set is all remaining points not used for training\n",
    "ix_test = list(set(ix_all) - set(ix_train))\n",
    "\n",
    " # Build train and test datasets based on the indices\n",
    "train_beats = all_beats[ix_train, :]\n",
    "test_beats = all_beats[ix_test, :]\n",
    "[train_feat1, train_feat2, train_feat3] = [all_feat1[ix_train, :], all_feat2[ix_train, :], all_feat3[ix_train, :]]\n",
    "[test_feat1, test_feat2, test_feat3] = [all_feat1[ix_test, :], all_feat2[ix_test, :], all_feat3[ix_test, :]]\n",
    "train_out = all_out[ix_train, :]\n",
    "test_out = all_out[ix_test, :]\n",
    "train_out = [float(item) for item in train_out]\n",
    "test_out = [float(item) for item in test_out]\n",
    "train_out = torch.tensor(train_out, dtype=torch.float32)\n",
    "test_out = torch.tensor(test_out, dtype=torch.float32)\n",
    "\n",
    "train_feat1 = [float(item) for item in train_feat1]\n",
    "train_feat2 = [float(item) for item in train_feat2]\n",
    "train_feat3 = [float(item) for item in train_feat3]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "ab85496f-ad0e-4413-8ad5-9c84f38ba47e",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-07-10T08:49:45.626014300Z",
     "start_time": "2024-07-10T08:49:45.587116400Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "#### Define model input tensors\n",
    "# The training, testing, and all data are converted to TensorFlow tensors\n",
    "# The tensors for the different datasets are grouped into lists \n",
    "model_inp = torch.tensor(train_beats, dtype=torch.float32)\n",
    "feat1_inp = torch.tensor(train_feat1, dtype=torch.float32)\n",
    "feat2_inp = torch.tensor(train_feat2, dtype=torch.float32)\n",
    "feat3_inp = torch.tensor(train_feat3, dtype=torch.float32)\n",
    "\n",
    "inp_feat1 = feat1_inp.unsqueeze(1)\n",
    "inp_feat2 = feat2_inp.unsqueeze(1)\n",
    "inp_feat3 = feat3_inp.unsqueeze(1)\n",
    "train_out = train_out.unsqueeze(1)\n",
    "test_out = test_out.unsqueeze(1)\n",
    "\n",
    "\n",
    "inp_comb = torch.cat((model_inp, inp_feat1, inp_feat2, inp_feat3), dim=1)\n",
    "input_comb_np = inp_comb.clone().detach().numpy()\n",
    "\n",
    "train_dataset = Data.TensorDataset(inp_comb, train_out)\n",
    "train_data_iter = Data.DataLoader(train_dataset, batch_size=64, shuffle=True, drop_last=True)\n",
    "\n",
    "test_feat1 = [float(item) for item in test_feat1]\n",
    "test_feat2 = [float(item) for item in test_feat2]\n",
    "test_feat3 = [float(item) for item in test_feat3]\n",
    "\n",
    "model_inp_test = torch.tensor(test_beats, dtype=torch.float32)\n",
    "feat1_inp_test = torch.tensor(test_feat1, dtype=torch.float32)\n",
    "feat2_inp_test = torch.tensor(test_feat2, dtype=torch.float32)\n",
    "feat3_inp_test = torch.tensor(test_feat3, dtype=torch.float32)\n",
    "\n",
    "feat1_inp_test = feat1_inp_test.unsqueeze(1)\n",
    "feat2_inp_test = feat2_inp_test.unsqueeze(1)\n",
    "feat3_inp_test = feat3_inp_test.unsqueeze(1)\n",
    "\n",
    "inp_comb_test = torch.cat((model_inp_test, feat1_inp_test, feat2_inp_test, feat3_inp_test), dim=1)\n",
    "\n",
    "all_feat1 = [float(item) for item in all_feat1]\n",
    "all_feat2 = [float(item) for item in all_feat2]\n",
    "all_feat3 = [float(item) for item in all_feat3]\n",
    "\n",
    "model_inp_all = torch.tensor(all_beats, dtype=torch.float32)\n",
    "feat1_inp_all = torch.tensor(all_feat1, dtype=torch.float32)\n",
    "feat2_inp_all = torch.tensor(all_feat2, dtype=torch.float32)\n",
    "feat3_inp_all = torch.tensor(all_feat3, dtype=torch.float32)\n",
    "\n",
    "feat1_inp_all = feat1_inp_all.unsqueeze(1)\n",
    "feat2_inp_all = feat2_inp_all.unsqueeze(1)\n",
    "feat3_inp_all = feat3_inp_all.unsqueeze(1)\n",
    "\n",
    "inp_comb_all = torch.cat((model_inp_all, feat1_inp_all, feat2_inp_all, feat3_inp_all), dim=1)\n",
    "ix_all = torch.tensor(ix_all)\n",
    "ix_train = torch.tensor(ix_train)\n",
    "ix_test = torch.tensor(ix_test)\n",
    "\n",
    "test_dataset = Data.TensorDataset(inp_comb_test, test_out)\n",
    "test_data_iter = Data.DataLoader(test_dataset, batch_size=len(inp_comb), drop_last=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1e868349-c5ed-4c6c-9b2a-666e84cd1439",
   "metadata": {},
   "source": [
    "### Train PITN model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7543d519-1fd8-47c3-bee8-2fd9d2afe632",
   "metadata": {},
   "outputs": [],
   "source": [
    "configs = Configs()\n",
    "model_PITN = PITN(configs)\n",
    "import time\n",
    "\n",
    "# A Deep Neural Network model is initialized with the dimension of the beats, the diemnsion of each feature, and the number of neurons in the first dense layer\n",
    "N0 = 200\n",
    "\n",
    "x_train, x_boundary, u_boundary = training_data_latin_hypercube(inp_comb, train_out, N_inner=N0)\n",
    "x_adv = np.array([]).reshape((0, 35))\n",
    "x_train = np.vstack([x_train, x_adv])\n",
    "# No need to retrain, directly generate adversarial samples\n",
    "x_adv = generate_attack_samples(model_PITN, device, x_train, N0, train_out)\n",
    "# using diffusion to generate adversarial samples\n",
    "foward_diffusion = diffusion_foward()\n",
    "x_diffusion = foward_diffusion.q_sample(x_train, torch.tensor([20]))\n",
    "\n",
    "# Two lists are initialized to keep track of the training and testing loss during each epoch\n",
    "loss_list_pinn = []\n",
    "test_loss_list_pinn = []\n",
    "loss_total_epoch = []\n",
    "loss_physics_epoch = []\n",
    "loss_dnn_epoch = []\n",
    "criterion = MultiPosConLoss()\n",
    "print(\"PITN model training started\")\n",
    "optimizer = optim.Adam(model_PITN.parameters(), lr=10e-4)\n",
    "\n",
    "best_loss = float(\"inf\")\n",
    "# Two lists are initialized to keep track of the training and testing loss during each epoch\n",
    "epochs = 2000\n",
    "inp_comb_adv = x_adv\n",
    "for epoch in range(epochs):\n",
    "    start = time.time()\n",
    "    train_l_sum = 0.0\n",
    "    optimizer.zero_grad()\n",
    "    # Traditional out\n",
    "    model_PITN.to(device=device)\n",
    "    # train on clean data\n",
    "    # if inp_comb and train_out are not tensor, then turn them into tensor\n",
    "    if not torch.is_tensor(inp_comb):\n",
    "        inp_comb = torch.tensor(inp_comb, dtype=torch.float32)\n",
    "    if not torch.is_tensor(train_out):\n",
    "        train_out = torch.tensor(train_out, dtype=torch.float32)\n",
    "    inp_comb, train_out = inp_comb.to(device=device), train_out.to(device=device)\n",
    "    # model_dnn_pinn的参数意思是，输入数据，是否计算physics loss，是否使用auxiliary BN\n",
    "    output, feature_clean = model_PITN(inp_comb, False, False)\n",
    "    loss_dnn_clean = output - train_out\n",
    "    loss_dnn_clean = torch.mean(torch.square(loss_dnn_clean))\n",
    "\n",
    "    if not torch.is_tensor(inp_comb_adv):\n",
    "        inp_comb_adv = torch.tensor(inp_comb_adv, dtype=torch.float32)\n",
    "    inp_comb_adv = inp_comb_adv.to(device=device)\n",
    "    output, feature_adv = model_PITN(inp_comb_adv, False, True)\n",
    "    # concat train_out by twice, and then use it as labels\n",
    "    adv_labels = train_out\n",
    "    loss_dnn_adv = output - adv_labels\n",
    "    loss_dnn_adv = torch.mean(torch.square(loss_dnn_adv))\n",
    "\n",
    "    # using contrastive learning\n",
    "    bsz = len(feature_clean)\n",
    "    features = torch.cat((feature_clean, feature_adv), dim=0)\n",
    "    label = 0\n",
    "    labels = np.zeros([1, bsz])\n",
    "    mask = [True] * bsz\n",
    "    for i in range(bsz):\n",
    "        if mask[i]:  # if the sample is not used\n",
    "            if i == bsz - 1:\n",
    "                labels[0][i] = label\n",
    "                mask[i] = False\n",
    "                break\n",
    "            else:\n",
    "                labels[0][i] = label\n",
    "                for j in range(i + 1, bsz):\n",
    "                    if abs((train_out[i] - train_out[j]).item()) <= abs(contra_shift):\n",
    "                        labels[0][j] = label\n",
    "                        mask[j] = False\n",
    "            label += 1\n",
    "            mask[i] = False\n",
    "    # concat more if samples are used multiple times\n",
    "    labels = np.concatenate((labels, labels), axis=1)\n",
    "    labels = torch.tensor(labels, dtype=torch.int).to(device=device)\n",
    "    loss_con = criterion({'feats': features, 'labels': labels})\n",
    "\n",
    "    # train on adversarial data and contrastive learning\n",
    "    loss_dnn = loss_dnn_clean + loss_dnn_adv + loss_con\n",
    "\n",
    "    # train on adversarial data\n",
    "    loss_dnn = loss_dnn_clean + loss_dnn_adv\n",
    "\n",
    "    # train on only clean data\n",
    "    # loss_dnn = loss_dnn_clean\n",
    "\n",
    "    # Physics loss\n",
    "    inp_comb_all = inp_comb_all.to(device=device)\n",
    "    y, pred_physics = model_PITN(inp_comb_all, True, False)\n",
    "    physics_loss_ini = pred_physics - y[1:, 0]\n",
    "    physics_loss = torch.mean(torch.square(physics_loss_ini[ix_all[:-1].view(-1, 1)]))\n",
    "\n",
    "    # physics_loss = K.mean(K.square(tf.gather_nd(physics_loss_ini, indices=np.array(ix_all[:-1])[:, None])))\n",
    "    loss_total = loss_dnn + physics_loss * beta\n",
    "    loss_total_epoch.append(np.round(loss_total.cpu().detach().numpy(), 4))\n",
    "    loss_physics_epoch.append(np.round(physics_loss.cpu().detach().numpy(), 4))\n",
    "    loss_dnn_epoch.append(np.round(loss_dnn.cpu().detach().numpy(), 4))\n",
    "\n",
    "    loss_total.backward()\n",
    "    optimizer.step()\n",
    "    train_l_sum += loss_total.item()\n",
    "    loss_list_pinn.append(float(loss_dnn.item()))\n",
    "    loss_final = np.min(loss_list_pinn)\n",
    "\n",
    "    # model evaluation\n",
    "    inp_comb_test, test_out = inp_comb_test.to(device=device), test_out.to(device=device)\n",
    "    pred_out, _ = model_PITN(inp_comb_test, False, False)\n",
    "    test_loss = pred_out - test_out\n",
    "    test_loss = torch.mean(torch.square(test_loss))\n",
    "    test_loss_list_pinn.append(float(test_loss))\n",
    "    end = time.time()\n",
    "    # print every 100 epochs\n",
    "    if epoch % 100 == 0:\n",
    "        print(\n",
    "            \"epoch: {}, time: {}, loss_dnn:{}, loss_physics: {}, test_loss: {}\".format(epoch, round(end - start, 2),\n",
    "                                                                                       np.round(loss_final, 4),\n",
    "                                                                                       np.round(\n",
    "                                                                                           physics_loss.cpu().detach().numpy(),\n",
    "                                                                                           4), test_loss))\n",
    "\n",
    "    if test_loss < best_loss:\n",
    "        best_loss = test_loss\n",
    "\n",
    "        torch.save(model_PITN.state_dict(), \"best.pth\")\n",
    "\n",
    "    # if epoch == epochs - 1:\n",
    "    if (loss_final <= 0.01) | (epoch == epochs - 1):\n",
    "        # wandb.finish()\n",
    "        torch.save(model_PITN.state_dict(),\n",
    "                   \"last.pth\")\n",
    "\n",
    "        print(\"PITN model training Completed. Epoch %d/%d -- loss: %.4f\" % (\n",
    "            epoch, epochs, np.round(loss_total.cpu().detach().numpy(), 4)))\n",
    "        \n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "72c83887-d459-4504-9904-146bac14cfb3",
   "metadata": {},
   "source": [
    "### Train the PITN model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7776218f-592d-4278-9ed6-da9326f2fd66",
   "metadata": {},
   "outputs": [],
   "source": [
    "#The trained model's predictions on the test dataset are computed\n",
    "pred_out = PITN(inp_comb_test)\n",
    "\n",
    "#The Pearson correlation coefficient and the Root Mean Square Error are calculated between the actual and predicted test outcomes\n",
    "corr_conv = np.corrcoef(np.concatenate(test_out)[:], np.concatenate(pred_out)[:])[0][1]\n",
    "rmse_conv = np.sqrt(np.mean(np.square\n",
    "                           (np.concatenate(scaler_out.inverse_transform(np.concatenate(test_out)[:][:, None]))-\n",
    "                            np.concatenate(scaler_out.inverse_transform(np.concatenate(pred_out)[:][:, None])))))\n",
    "\n",
    "pred_out = PITN(inp_comb_test)\n",
    "corr_pinn = np.corrcoef(np.concatenate(test_out)[:], np.concatenate(pred_out)[:])[0][1]\n",
    "rmse_pinn = np.sqrt(np.mean(np.square(\n",
    "    np.concatenate(scaler_out.inverse_transform(np.concatenate(test_out)[:][:, None]))-\n",
    "    np.concatenate(scaler_out.inverse_transform(np.concatenate(pred_out)[:][:, None])))))\n",
    "\n",
    "print('#### PITN Performance ####')\n",
    "print('Corr: %.2f,  RMSE: %.1f'%(corr_pinn, rmse_pinn))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.17"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
