{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Import"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch.utils.data import Dataset\n",
    "import torch.fft as fft\n",
    "import torch.nn.functional as F\n",
    "from torch import nn\n",
    "from torch.nn import TransformerEncoder, TransformerEncoderLayer\n",
    "\n",
    "from sklearn.metrics import roc_auc_score, classification_report, confusion_matrix, \\\n",
    "    average_precision_score, accuracy_score, precision_score,f1_score,recall_score\n",
    "from sklearn.metrics import cohen_kappa_score,accuracy_score\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "\n",
    "import numpy as np\n",
    "from datetime import datetime\n",
    "import argparse\n",
    "import sys\n",
    "from tqdm import tqdm\n",
    "import os\n",
    "import random\n",
    "import pandas as pd\n",
    "import logging\n",
    "from shutil import copy\n",
    "from transformers import BertModel, BertTokenizer\n",
    "\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### config_files.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Configs(object):\n",
    "    def __init__(self):\n",
    "        # model configs\n",
    "        self.input_channels = 1\n",
    "        self.kernel_size = 8\n",
    "        self.stride = 1\n",
    "        self.final_out_channels = 128\n",
    "\n",
    "        self.num_classes = 4\n",
    "        self.dropout = 0.35\n",
    "        self.features_len = 18\n",
    "\n",
    "        # training configs\n",
    "        self.num_epoch = 1  # 40 # 100  # 1\n",
    "\n",
    "        # optimizer parameters\n",
    "        self.beta1 = 0.9\n",
    "        self.beta2 = 0.99\n",
    "        self.lr = 3e-6  # 3e-4\n",
    "\n",
    "        # data parameters\n",
    "        self.drop_last = True\n",
    "        self.batch_size = 128\n",
    "\n",
    "        self.Context_Cont = Context_Cont_configs()\n",
    "        self.TC = TC()\n",
    "        self.augmentation = augmentations()\n",
    "\n",
    "        \"\"\"New hyperparameters\"\"\"\n",
    "        self.TSlength_aligned = 1500  # 1500 # 186\n",
    "        self.lr_f = self.lr\n",
    "        self.target_batch_size = 41  # 82 # 41\n",
    "        self.increased_dim = 1\n",
    "        self.final_out_channels = 128\n",
    "        self.num_classes_target = 3\n",
    "        self.features_len_f = self.features_len\n",
    "        self.CNNoutput_channel = 190  # 751\n",
    "\n",
    "\n",
    "class augmentations(object):\n",
    "    def __init__(self):\n",
    "        self.jitter_scale_ratio = 1.1\n",
    "        self.jitter_ratio = 0.8\n",
    "        self.max_seg = 8\n",
    "\n",
    "\n",
    "class Context_Cont_configs(object):\n",
    "    def __init__(self):\n",
    "        self.temperature = 0.2\n",
    "        self.use_cosine_similarity = True\n",
    "\n",
    "\n",
    "class TC(object):\n",
    "    def __init__(self):\n",
    "        self.hidden_dim = 100\n",
    "        self.timesteps = 6"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### augmentations.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def one_hot_encoding(X):\n",
    "    X = [int(x) for x in X]\n",
    "    n_values = np.max(X) + 1\n",
    "    b = np.eye(n_values)[X]\n",
    "    return b\n",
    "\n",
    "\n",
    "def DataTransform(sample, config):\n",
    "    \"\"\"Weak and strong augmentations\"\"\"\n",
    "    weak_aug = scaling(sample, config.augmentation.jitter_scale_ratio)\n",
    "    # weak_aug = permutation(sample, max_segments=config.augmentation.max_seg)\n",
    "    strong_aug = jitter(permutation(sample, max_segments=config.augmentation.max_seg), config.augmentation.jitter_ratio)\n",
    "\n",
    "    return weak_aug, strong_aug\n",
    "\n",
    "# def DataTransform_TD(sample, config):\n",
    "#     \"\"\"Weak and strong augmentations\"\"\"\n",
    "#     weak_aug = sample\n",
    "#     strong_aug = jitter(permutation(sample, max_segments=config.augmentation.max_seg),\n",
    "# config.augmentation.jitter_ratio) #masking(sample)\n",
    "#     return weak_aug, strong_aug\n",
    "#\n",
    "# def DataTransform_FD(sample, config):\n",
    "#     \"\"\"Weak and strong augmentations in Frequency domain \"\"\"\n",
    "#     # weak_aug =  remove_frequency(sample, 0.1)\n",
    "#     strong_aug = add_frequency(sample, 0.1)\n",
    "#     return weak_aug, strong_aug\n",
    "\n",
    "\n",
    "def DataTransform_TD(sample, config):\n",
    "    \"\"\"Simplely use the jittering augmentation. Feel free to add more autmentations you want,\n",
    "    but we noticed that in TF-C framework, the augmentation has litter impact on the final tranfering performance.\"\"\"\n",
    "    aug = jitter(sample, config.augmentation.jitter_ratio)\n",
    "    return aug\n",
    "\n",
    "\n",
    "def DataTransform_TD_bank(sample, config):\n",
    "    \"\"\"Augmentation bank that includes four augmentations and randomly select one as the positive sample.\n",
    "    You may use this one the replace the above DataTransform_TD function.\"\"\"\n",
    "    aug_1 = jitter(sample, config.augmentation.jitter_ratio)\n",
    "    aug_2 = scaling(sample, config.augmentation.jitter_scale_ratio)\n",
    "    aug_3 = permutation(sample, max_segments=config.augmentation.max_seg)\n",
    "    aug_4 = masking(sample, keepratio=0.9)\n",
    "\n",
    "    li = np.random.randint(0, 4, size=[sample.shape[0]])\n",
    "    li_onehot = one_hot_encoding(li)\n",
    "    aug_1 = aug_1 * li_onehot[:, 0][:, None, None]  # the rows that are not selected are set as zero.\n",
    "    aug_2 = aug_2 * li_onehot[:, 0][:, None, None]\n",
    "    aug_3 = aug_3 * li_onehot[:, 0][:, None, None]\n",
    "    aug_4 = aug_4 * li_onehot[:, 0][:, None, None]\n",
    "    aug_T = aug_1 + aug_2 + aug_3 + aug_4\n",
    "    return aug_T\n",
    "\n",
    "\n",
    "def DataTransform_FD(sample, config):\n",
    "    \"\"\"Weak and strong augmentations in Frequency domain \"\"\"\n",
    "    aug_1 = remove_frequency(sample, pertub_ratio=0.1)\n",
    "    aug_2 = add_frequency(sample, pertub_ratio=0.1)\n",
    "    aug_F = aug_1 + aug_2\n",
    "    return aug_F\n",
    "\n",
    "\n",
    "def remove_frequency(x, pertub_ratio=0.0):\n",
    "    mask = torch.FloatTensor(x.shape).uniform_() > pertub_ratio  # maskout_ratio are False\n",
    "    mask = mask.to(x.device)\n",
    "    return x*mask\n",
    "\n",
    "\n",
    "def add_frequency(x, pertub_ratio=0.0):\n",
    "\n",
    "    mask = torch.FloatTensor(x.shape).uniform_() > (1-pertub_ratio)  # only pertub_ratio of all values are True\n",
    "    mask = mask.to(x.device)\n",
    "    max_amplitude = x.max()\n",
    "    random_am = torch.rand(mask.shape)*(max_amplitude*0.1)\n",
    "    pertub_matrix = mask*random_am\n",
    "    return x+pertub_matrix\n",
    "\n",
    "\n",
    "def generate_binomial_mask(B, T, D, p=0.5):  # p is the ratio of not zero\n",
    "    return torch.from_numpy(np.random.binomial(1, p, size=(B, T, D))).to(torch.bool)\n",
    "\n",
    "\n",
    "def masking(x, keepratio=0.9, mask='binomial'):\n",
    "    global mask_id\n",
    "    nan_mask = ~x.isnan().any(axis=-1)\n",
    "    x[~nan_mask] = 0\n",
    "    # x = self.input_fc(x)  # B x T x Ch\n",
    "\n",
    "    if mask == 'binomial':\n",
    "        mask_id = generate_binomial_mask(x.size(0), x.size(1), x.size(2), p=keepratio).to(x.device)\n",
    "    # elif mask == 'continuous':\n",
    "    #     mask = generate_continuous_mask(x.size(0), x.size(1)).to(x.device)\n",
    "    # elif mask == 'all_true':\n",
    "    #     mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n",
    "    # elif mask == 'all_false':\n",
    "    #     mask = x.new_full((x.size(0), x.size(1)), False, dtype=torch.bool)\n",
    "    # elif mask == 'mask_last':\n",
    "    #     mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)\n",
    "    #     mask[:, -1] = False\n",
    "\n",
    "    # mask &= nan_mask\n",
    "    x[~mask_id] = 0\n",
    "    return x\n",
    "\n",
    "\n",
    "def jitter(x, sigma=0.8):\n",
    "    # https://arxiv.org/pdf/1706.00527.pdf\n",
    "    return x + np.random.normal(loc=0., scale=sigma, size=x.shape)\n",
    "\n",
    "\n",
    "def scaling(x, sigma=1.1):\n",
    "    # https://arxiv.org/pdf/1706.00527.pdf\n",
    "    factor = np.random.normal(loc=2., scale=sigma, size=(x.shape[0], x.shape[2]))\n",
    "    ai = []\n",
    "    for i in range(x.shape[1]):\n",
    "        xi = x[:, i, :]\n",
    "        ai.append(np.multiply(xi, factor[:, :])[:, np.newaxis, :])\n",
    "    return np.concatenate((ai), axis=1)\n",
    "\n",
    "\n",
    "def permutation(x, max_segments=5, seg_mode=\"random\"):\n",
    "    orig_steps = np.arange(x.shape[2])\n",
    "\n",
    "    num_segs = np.random.randint(1, max_segments, size=(x.shape[0]))\n",
    "\n",
    "    ret = np.zeros_like(x)\n",
    "    for i, pat in enumerate(x):\n",
    "        if num_segs[i] > 1:\n",
    "            if seg_mode == \"random\":\n",
    "                split_points = np.random.choice(x.shape[2] - 2, num_segs[i] - 1, replace=False)\n",
    "                split_points.sort()\n",
    "                splits = np.split(orig_steps, split_points)\n",
    "            else:\n",
    "                splits = np.array_split(orig_steps, num_segs[i])\n",
    "            warp = np.concatenate(np.random.permutation(splits)).ravel()\n",
    "            ret[i] = pat[0, warp]\n",
    "        else:\n",
    "            ret[i] = pat\n",
    "    return torch.from_numpy(ret)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### dataloader.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_freq(dataset, config):\n",
    "    X_train = dataset[\"samples\"]\n",
    "    y_train = dataset['labels']\n",
    "    # shuffle\n",
    "    data = list(zip(X_train, y_train))\n",
    "    np.random.shuffle(data)\n",
    "    data = data[:10000]  # take a subset for testing.\n",
    "    X_train, y_train = zip(*data)\n",
    "    X_train, y_train = torch.stack(list(X_train), dim=0), torch.stack(list(y_train), dim=0)\n",
    "\n",
    "    if len(X_train.shape) < 3:\n",
    "        X_train = X_train.unsqueeze(2)\n",
    "\n",
    "    if X_train.shape.index(min(X_train.shape)) != 1:  # make sure the Channels in second dim\n",
    "        X_train = X_train.permute(0, 2, 1)\n",
    "\n",
    "    \"\"\"Align the TS length between source and target datasets\"\"\"\n",
    "    X_train = X_train[:, :1, :int(config.TSlength_aligned)]  # take the first 178 samples\n",
    "\n",
    "    if isinstance(X_train, np.ndarray):\n",
    "        x_data = torch.from_numpy(X_train)\n",
    "    else:\n",
    "        x_data = X_train\n",
    "\n",
    "    \"\"\"Transfer x_data to Frequency Domain. If use fft.fft, the output has the same shape; if use fft.rfft,\n",
    "    the output shape is half of the time window.\"\"\"\n",
    "\n",
    "    x_data_f = fft.fft(x_data).abs()  # /(window_length) # rfft for real value inputs.\n",
    "    return (X_train, y_train, x_data_f)\n",
    "\n",
    "\n",
    "class Load_Dataset(Dataset):\n",
    "    # Initialize your data, download, etc.\n",
    "    def __init__(self, dataset, config, training_mode, target_dataset_size=64, subset=False):\n",
    "        super(Load_Dataset, self).__init__()\n",
    "        self.training_mode = training_mode\n",
    "        X_train = dataset[\"samples\"]\n",
    "        y_train = dataset[\"labels\"]\n",
    "        # shuffle\n",
    "        data = list(zip(X_train, y_train))\n",
    "        np.random.shuffle(data)\n",
    "        X_train, y_train = zip(*data)\n",
    "        X_train, y_train = torch.stack(list(X_train), dim=0), torch.stack(list(y_train), dim=0)\n",
    "\n",
    "        if len(X_train.shape) < 3:\n",
    "            X_train = X_train.unsqueeze(2)\n",
    "\n",
    "        if X_train.shape.index(min(X_train.shape)) != 1:  # make sure the Channels in second dim\n",
    "            X_train = X_train.permute(0, 2, 1)\n",
    "\n",
    "        \"\"\"Align the TS length between source and target datasets\"\"\"\n",
    "        X_train = X_train[:, :1, :int(config.TSlength_aligned)]  # take the first 178 samples\n",
    "\n",
    "        \"\"\"Subset for debugging\"\"\"\n",
    "        if subset:\n",
    "            subset_size = target_dataset_size * 10  # 30 #7 # 60*1\n",
    "            \"\"\"if the dimension is larger than 178, take the first 178 dimensions.\n",
    "            If multiple channels, take the first channel\"\"\"\n",
    "            X_train = X_train[:subset_size]\n",
    "            y_train = y_train[:subset_size]\n",
    "            print('Using subset for debugging, the datasize is:', y_train.shape[0])\n",
    "\n",
    "        if isinstance(X_train, np.ndarray):\n",
    "            self.x_data = torch.from_numpy(X_train)\n",
    "            self.y_data = torch.from_numpy(y_train).long()\n",
    "        else:\n",
    "            self.x_data = X_train\n",
    "            self.y_data = y_train\n",
    "\n",
    "        \"\"\"Transfer x_data to Frequency Domain. If use fft.fft, the output has the same shape; if use fft.rfft,\n",
    "        the output shape is half of the time window.\"\"\"\n",
    "\n",
    "        # window_length = self.x_data.shape[-1]\n",
    "        self.x_data_f = fft.fft(self.x_data).abs()  # /(window_length) # rfft for real value inputs.\n",
    "        self.len = X_train.shape[0]\n",
    "\n",
    "        \"\"\"Augmentation\"\"\"\n",
    "        if training_mode == \"pre_train\":  # no need to apply Augmentations in other modes\n",
    "            self.aug1 = DataTransform_TD(self.x_data, config)\n",
    "            self.aug1_f = DataTransform_FD(self.x_data_f, config)  # [7360, 1, 90]\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        if self.training_mode == \"pre_train\":\n",
    "            return self.x_data[index], self.y_data[index], self.aug1[index],  \\\n",
    "                   self.x_data_f[index], self.aug1_f[index]\n",
    "        else:\n",
    "            return self.x_data[index], self.y_data[index], self.x_data[index], \\\n",
    "                   self.x_data_f[index], self.x_data_f[index]\n",
    "\n",
    "    def __len__(self):\n",
    "        return self.len\n",
    "\n",
    "\n",
    "def data_generator(sourcedata_path, targetdata_path, configs, training_mode, subset=True):\n",
    "    train_dataset = torch.load(os.path.join(sourcedata_path, \"train.pt\"))\n",
    "    finetune_dataset = torch.load(os.path.join(targetdata_path, \"train.pt\"))  # train.pt\n",
    "    test_dataset = torch.load(os.path.join(targetdata_path, \"test.pt\"))  # test.pt\n",
    "    \"\"\"In pre-training:\n",
    "    train_dataset: [371055, 1, 178] from SleepEEG.\n",
    "    finetune_dataset: [60, 1, 178], test_dataset: [11420, 1, 178] from Epilepsy\"\"\"\n",
    "\n",
    "    # subset = True # if true, use a subset for debugging.\n",
    "    train_dataset = Load_Dataset(train_dataset,\n",
    "                                 configs,\n",
    "                                 training_mode,\n",
    "                                 target_dataset_size=configs.batch_size,\n",
    "                                 subset=subset)  # for self-supervised, the data are augmented here\n",
    "    finetune_dataset = Load_Dataset(finetune_dataset,\n",
    "                                    configs,\n",
    "                                    training_mode,\n",
    "                                    target_dataset_size=configs.target_batch_size,\n",
    "                                    subset=subset)\n",
    "    test_dataset = Load_Dataset(test_dataset, configs, training_mode,\n",
    "                                target_dataset_size=configs.target_batch_size, subset=False)\n",
    "\n",
    "    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=configs.batch_size,\n",
    "                                               shuffle=True, drop_last=configs.drop_last,\n",
    "                                               num_workers=0)\n",
    "    finetune_loader = torch.utils.data.DataLoader(dataset=finetune_dataset, batch_size=configs.target_batch_size,\n",
    "                                                  shuffle=True, drop_last=configs.drop_last,\n",
    "                                                  num_workers=0)\n",
    "    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=configs.target_batch_size,\n",
    "                                              shuffle=True, drop_last=False,\n",
    "                                              num_workers=0)\n",
    "\n",
    "    return train_loader, finetune_loader, test_loader\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### loss.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class NTXentLoss(torch.nn.Module):\n",
    "    def __init__(self, device, batch_size, temperature, use_cosine_similarity):\n",
    "        super(NTXentLoss, self).__init__()\n",
    "        self.batch_size = batch_size\n",
    "        self.temperature = temperature\n",
    "        self.device = device\n",
    "        self.softmax = torch.nn.Softmax(dim=-1)\n",
    "        self.mask_samples_from_same_repr = self._get_correlated_mask().type(torch.bool)\n",
    "        self.similarity_function = self._get_similarity_function(use_cosine_similarity)\n",
    "        self.criterion = torch.nn.CrossEntropyLoss(reduction=\"sum\")\n",
    "\n",
    "    def _get_similarity_function(self, use_cosine_similarity):\n",
    "        if use_cosine_similarity:\n",
    "            self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1)\n",
    "            return self._cosine_simililarity\n",
    "        else:\n",
    "            return self._dot_simililarity\n",
    "\n",
    "    def _get_correlated_mask(self):\n",
    "        diag = np.eye(2 * self.batch_size)\n",
    "        l1 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=-self.batch_size)\n",
    "        l2 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=self.batch_size)\n",
    "        mask = torch.from_numpy((diag + l1 + l2))\n",
    "        mask = (1 - mask).type(torch.bool)\n",
    "        return mask.to(self.device)\n",
    "\n",
    "    @staticmethod\n",
    "    def _dot_simililarity(x, y):\n",
    "        v = torch.tensordot(x.unsqueeze(1), y.T.unsqueeze(0), dims=2)\n",
    "        # x shape: (N, 1, C)\n",
    "        # y shape: (1, C, 2N)\n",
    "        # v shape: (N, 2N)\n",
    "        return v\n",
    "\n",
    "    def _cosine_simililarity(self, x, y):\n",
    "        # x shape: (N, 1, C)\n",
    "        # y shape: (1, 2N, C)\n",
    "        # v shape: (N, 2N)\n",
    "        v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))\n",
    "        return v\n",
    "\n",
    "    def forward(self, zis, zjs):\n",
    "        representations = torch.cat([zjs, zis], dim=0)\n",
    "\n",
    "        similarity_matrix = self.similarity_function(representations, representations)\n",
    "\n",
    "        # filter out the scores from the positive samples\n",
    "        l_pos = torch.diag(similarity_matrix, self.batch_size)\n",
    "        r_pos = torch.diag(similarity_matrix, -self.batch_size)\n",
    "        positives = torch.cat([l_pos, r_pos]).view(2 * self.batch_size, 1)\n",
    "\n",
    "        negatives = similarity_matrix[self.mask_samples_from_same_repr].view(2 * self.batch_size, -1)\n",
    "\n",
    "        logits = torch.cat((positives, negatives), dim=1)\n",
    "        logits /= self.temperature\n",
    "\n",
    "        \"\"\"Criterion has an internal one-hot function. Here, make all positives as 1 while all negatives as 0. \"\"\"\n",
    "        labels = torch.zeros(2 * self.batch_size).to(self.device).long()\n",
    "        loss = self.criterion(logits, labels)\n",
    "\n",
    "        return loss / (2 * self.batch_size)\n",
    "\n",
    "\n",
    "class NTXentLoss_poly(torch.nn.Module):\n",
    "\n",
    "    def __init__(self, device, batch_size, temperature, use_cosine_similarity):\n",
    "        super(NTXentLoss_poly, self).__init__()\n",
    "        self.batch_size = batch_size\n",
    "        self.temperature = temperature\n",
    "        self.device = device\n",
    "        self.softmax = torch.nn.Softmax(dim=-1)\n",
    "        self.mask_samples_from_same_repr = self._get_correlated_mask().type(torch.bool)\n",
    "        self.similarity_function = self._get_similarity_function(use_cosine_similarity)\n",
    "        self.criterion = torch.nn.CrossEntropyLoss(reduction=\"sum\")\n",
    "\n",
    "    def _get_similarity_function(self, use_cosine_similarity):\n",
    "        if use_cosine_similarity:\n",
    "            self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1)\n",
    "            return self._cosine_simililarity\n",
    "        else:\n",
    "            return self._dot_simililarity\n",
    "\n",
    "    def _get_correlated_mask(self):\n",
    "        diag = np.eye(2 * self.batch_size)\n",
    "        l1 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=-self.batch_size)\n",
    "        l2 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=self.batch_size)\n",
    "        mask = torch.from_numpy((diag + l1 + l2))\n",
    "        mask = (1 - mask).type(torch.bool)\n",
    "        return mask.to(self.device)\n",
    "\n",
    "    @staticmethod\n",
    "    def _dot_simililarity(x, y):\n",
    "        v = torch.tensordot(x.unsqueeze(1), y.T.unsqueeze(0), dims=2)\n",
    "        # x shape: (N, 1, C)\n",
    "        # y shape: (1, C, 2N)\n",
    "        # v shape: (N, 2N)\n",
    "        return v\n",
    "\n",
    "    def _cosine_simililarity(self, x, y):\n",
    "        # x shape: (N, 1, C)\n",
    "        # y shape: (1, 2N, C)\n",
    "        # v shape: (N, 2N)\n",
    "        v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))\n",
    "        return v\n",
    "\n",
    "    def forward(self, zis, zjs):\n",
    "        representations = torch.cat([zjs, zis], dim=0)\n",
    "\n",
    "        similarity_matrix = self.similarity_function(representations, representations)\n",
    "\n",
    "        # filter out the scores from the positive samples\n",
    "        l_pos = torch.diag(similarity_matrix, self.batch_size)\n",
    "        r_pos = torch.diag(similarity_matrix, -self.batch_size)\n",
    "        positives = torch.cat([l_pos, r_pos]).view(2 * self.batch_size, 1)\n",
    "\n",
    "        negatives = similarity_matrix[self.mask_samples_from_same_repr].view(2 * self.batch_size, -1)\n",
    "\n",
    "        logits = torch.cat((positives, negatives), dim=1)\n",
    "        logits /= self.temperature\n",
    "\n",
    "        \"\"\"Criterion has an internal one-hot function. Here, make all positives as 1 while all negatives as 0. \"\"\"\n",
    "        labels = torch.zeros(2 * self.batch_size).to(self.device).long()\n",
    "        CE = self.criterion(logits, labels)\n",
    "\n",
    "        onehot_label = torch.cat((torch.ones(2 * self.batch_size, 1),\n",
    "                                 torch.zeros(2 * self.batch_size, negatives.shape[-1])),\n",
    "                                 dim=-1).to(self.device).long()\n",
    "        # Add poly loss\n",
    "        pt = torch.mean(onehot_label * torch.nn.functional.softmax(logits, dim=-1))\n",
    "\n",
    "        epsilon = self.batch_size\n",
    "        # loss = CE/ (2 * self.batch_size) + epsilon*(1-pt) # replace 1 by 1/self.batch_size\n",
    "        loss = CE / (2 * self.batch_size) + epsilon * (1/self.batch_size - pt)\n",
    "        # loss = CE / (2 * self.batch_size)\n",
    "\n",
    "        return loss\n",
    "\n",
    "\n",
    "class hierarchical_contrastive_loss(torch.nn.Module):\n",
    "\n",
    "    def __init__(self, device):\n",
    "        super(hierarchical_contrastive_loss, self).__init__()\n",
    "        self.device = device\n",
    "\n",
    "    def instance_contrastive_loss(self, z1, z2):\n",
    "        B, T = z1.size(0), z1.size(1)\n",
    "        if B == 1:\n",
    "            return z1.new_tensor(0.)\n",
    "        z = torch.cat([z1, z2], dim=0)  # 2B x T x C\n",
    "        z = z.transpose(0, 1)  # T x 2B x C\n",
    "        sim = torch.matmul(z, z.transpose(1, 2))  # T x 2B x 2B\n",
    "        logits = torch.tril(sim, diagonal=-1)[:, :, :-1]  # T x 2B x (2B-1)\n",
    "        logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n",
    "        logits = -F.log_softmax(logits, dim=-1)\n",
    "\n",
    "        i = torch.arange(B)\n",
    "        loss = (logits[:, i, B + i - 1].mean() + logits[:, B + i, i].mean()) / 2\n",
    "        return loss\n",
    "\n",
    "    def temporal_contrastive_loss(self, z1, z2):\n",
    "        B, T = z1.size(0), z1.size(1)\n",
    "        if T == 1:\n",
    "            return z1.new_tensor(0.)\n",
    "        z = torch.cat([z1, z2], dim=1)  # B x 2T x C\n",
    "        sim = torch.matmul(z, z.transpose(1, 2))  # B x 2T x 2T\n",
    "        logits = torch.tril(sim, diagonal=-1)[:, :, :-1]  # B x 2T x (2T-1)\n",
    "        logits += torch.triu(sim, diagonal=1)[:, :, 1:]\n",
    "        logits = -F.log_softmax(logits, dim=-1)\n",
    "\n",
    "        t = torch.arange(T)\n",
    "        loss = (logits[:, t, T + t - 1].mean() + logits[:, T + t, t].mean()) / 2\n",
    "        return loss\n",
    "\n",
    "    def forward(self, z1, z2, alpha=0.5, temporal_unit=0):\n",
    "        loss = torch.tensor(0., device=self.device)  # , device=z1.device\n",
    "        d = 0\n",
    "        while z1.size(1) > 1:\n",
    "            if alpha != 0:\n",
    "                loss += alpha * self.instance_contrastive_loss(z1, z2)\n",
    "            if d >= temporal_unit:\n",
    "                if 1 - alpha != 0:\n",
    "                    loss += (1 - alpha) * self.temporal_contrastive_loss(z1, z2)\n",
    "            d += 1\n",
    "            z1 = F.max_pool1d(z1.transpose(1, 2), kernel_size=2).transpose(1, 2)\n",
    "            z2 = F.max_pool1d(z2.transpose(1, 2), kernel_size=2).transpose(1, 2)\n",
    "        if z1.size(1) == 1:\n",
    "            if alpha != 0:\n",
    "                loss += alpha * self.instance_contrastive_loss(z1, z2)\n",
    "            d += 1\n",
    "        return loss / d\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### model.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "BERT_PRETRAIN_PATH = \"./BERT_pretrain/\"\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "\n",
    "class TFC(nn.Module):\n",
    "    \"\"\"\n",
    "    ...\n",
    "    \"\"\"\n",
    "    def __init__(self, configs):\n",
    "        super(TFC, self).__init__()\n",
    "\n",
    "        self.adaptive_avgpool = nn.AdaptiveAvgPool1d(configs.TSlength_aligned)\n",
    "\n",
    "        encoder_layers_t = TransformerEncoderLayer(configs.TSlength_aligned,\n",
    "                                                   dim_feedforward=2*configs.TSlength_aligned,\n",
    "                                                   nhead=2, )\n",
    "        self.transformer_encoder_t = TransformerEncoder(encoder_layers_t, 2)\n",
    "\n",
    "        self.projector_t = nn.Sequential(\n",
    "            nn.Linear(configs.TSlength_aligned, 256),\n",
    "            nn.BatchNorm1d(256),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(256, 128)\n",
    "        )\n",
    "\n",
    "        encoder_layers_f = TransformerEncoderLayer(configs.TSlength_aligned,\n",
    "                                                   dim_feedforward=2*configs.TSlength_aligned,\n",
    "                                                   nhead=2,)\n",
    "        self.transformer_encoder_f = TransformerEncoder(encoder_layers_f, 2)\n",
    "\n",
    "        self.projector_f = nn.Sequential(\n",
    "            nn.Linear(configs.TSlength_aligned, 256),\n",
    "            nn.BatchNorm1d(256),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(256, 128)\n",
    "        )\n",
    "\n",
    "    def forward(self, x_in_t, x_in_f):\n",
    "        \"\"\"\n",
    "        ...\n",
    "        \"\"\"\n",
    "        # Adaptive average pooling\n",
    "        x_in_t = self.adaptive_avgpool(x_in_t)\n",
    "        x_in_f = self.adaptive_avgpool(x_in_f)\n",
    "\n",
    "        # Use Transformer\n",
    "        x = self.transformer_encoder_t(x_in_t)\n",
    "        h_time = x.reshape(x.shape[0], -1)\n",
    "\n",
    "        # Cross-space projector\n",
    "        z_time = self.projector_t(h_time)\n",
    "\n",
    "        # Frequency-based contrastive encoder\n",
    "        f = self.transformer_encoder_f(x_in_f)\n",
    "        h_freq = f.reshape(f.shape[0], -1)\n",
    "\n",
    "        # Cross-space projector\n",
    "        z_freq = self.projector_f(h_freq)\n",
    "\n",
    "        return h_time, z_time, h_freq, z_freq\n",
    "\n",
    "\n",
    "# Downstream classifier only used in finetuning\n",
    "# class target_classifier(nn.Module):\n",
    "#     def __init__(self, configs):\n",
    "#         super(target_classifier, self).__init__()\n",
    "#         self.logits = nn.Linear(2 * 128, 64)\n",
    "#         self.logits_simple = nn.Linear(64, configs.num_classes_target)\n",
    "\n",
    "#     def forward(self, emb):\n",
    "#         emb_flat = emb.reshape(emb.shape[0], -1)\n",
    "#         emb = torch.sigmoid(self.logits(emb_flat))\n",
    "#         pred = self.logits_simple(emb)\n",
    "#         return pred\n",
    "\n",
    "class FrozenLanguageModel(nn.Module):\n",
    "    \"\"\"\n",
    "    Description:\n",
    "        A frozen version of the language model.\n",
    "    \"\"\"\n",
    "    def __init__(self):\n",
    "        super(FrozenLanguageModel, self).__init__()\n",
    "        self.language_model = BertModel.from_pretrained(\n",
    "            'emilyalsentzer/Bio_ClinicalBERT',\n",
    "            cache_dir=BERT_PRETRAIN_PATH\n",
    "        )\n",
    "        for param in self.language_model.parameters():\n",
    "            param.requires_grad = False\n",
    "        self.dimension_reducer = nn.Linear(768, 256)\n",
    "\n",
    "    def forward(self, input_ids, attention_mask) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        Description:\n",
    "            Forward pass of the frozen language model.\n",
    "        Args:\n",
    "            input_ids: The input ids of the language model.\n",
    "            attention_mask: The attention mask of the language model.\n",
    "        Returns:\n",
    "            The last hidden state of the language model.\n",
    "        \"\"\"\n",
    "        outputs = self.language_model(input_ids=input_ids, attention_mask=attention_mask)\n",
    "        sentence_representation = outputs.last_hidden_state[:, 0, :]\n",
    "        reduced_representation = self.dimension_reducer(sentence_representation)\n",
    "        return reduced_representation\n",
    "\n",
    "\n",
    "class TargetClassifier(nn.Module):\n",
    "    \"\"\"\n",
    "    ...\n",
    "    \"\"\"\n",
    "    def __init__(self, configs):\n",
    "        super(TargetClassifier, self).__init__()\n",
    "        self.logits = nn.Linear(2 * 128, 64)\n",
    "        self.logits_simple = nn.Linear(64, configs.num_classes_target)\n",
    "        self.text_encoder = FrozenLanguageModel()\n",
    "        self.embedding_dim = self.text_encoder.language_model.config.hidden_size\n",
    "        self.tokenizer = BertTokenizer.from_pretrained('emilyalsentzer/Bio_ClinicalBERT',\n",
    "                                                       cache_dir=BERT_PRETRAIN_PATH)\n",
    "\n",
    "    @staticmethod\n",
    "    def get_diagnostic_string(label: int):\n",
    "        class_names = {\n",
    "            0: \"Normal ECG\",  # \"Normal beat\"\n",
    "            1: \"Myocardial Infarction\",  # \"Supraventricular premature beat\"\n",
    "            2: \"ST/T change\",  # \"Premature ventricular contraction\"\n",
    "            3: \"Hypertrophy\",  # \"Fusion of ventricular and normal beat\"\n",
    "            4: \"Conducion Disturbance\"  # \"Unclassifiable beat\"\n",
    "        }\n",
    "\n",
    "        if label in class_names:\n",
    "            diagnostic_type = class_names[label]\n",
    "            return f\"The ECG of {diagnostic_type}, a type of diagnostic\"\n",
    "        else:\n",
    "            return \"Invalid label\"\n",
    "\n",
    "    def zero_shot_process_text(self, labels) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        Description:\n",
    "            Process the text data for zero-shot learning.\n",
    "\n",
    "        Args:\n",
    "            text_data: The text data to be processed.\n",
    "\n",
    "        Returns:\n",
    "            torch.Tensor: The processed text data.\n",
    "        \"\"\"\n",
    "        categories = [\n",
    "            \"Normal ECG\",\n",
    "            \"Myocardial Infarction\",\n",
    "            \"ST/T change\",\n",
    "            \"Hypertrophy\",\n",
    "            \"Conducion Disturbance\"\n",
    "        ]\n",
    "\n",
    "        prompts = [self.get_diagnostic_string(label.item()) for label in labels]\n",
    "        tokens = self.tokenizer(prompts, padding=True, truncation=True, return_tensors='pt', max_length=100)\n",
    "\n",
    "        input_ids, attention_mask = tokens['input_ids'], tokens['attention_mask']\n",
    "        text_representation = self.text_encoder(input_ids, attention_mask)\n",
    "\n",
    "        class_text_representation = {\n",
    "            label: feature for label, feature in zip(categories, text_representation)\n",
    "        }\n",
    "\n",
    "        class_text_rep_tensor = torch.stack(list(class_text_representation.values()))\n",
    "\n",
    "        return class_text_rep_tensor\n",
    "\n",
    "    def similarity_classify(self, fea_concat: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        ...\n",
    "        \"\"\"\n",
    "        # Get text embeddings from Language Model\n",
    "        class_text_rep_tensor = self.zero_shot_process_text(labels)\n",
    "\n",
    "        # Calculate cosine similarity between the concatenated features and the text representation\n",
    "        similarities = [F.cosine_similarity(elem.unsqueeze(0), class_text_rep_tensor) for elem in fea_concat]\n",
    "        similarities = torch.stack(similarities)\n",
    "\n",
    "        # probabilities = F.softmax(similarities, dim=1).cpu().detach().numpy()\n",
    "        # max_probability_class = np.argmax(probabilities, axis=1)\n",
    "        # max_probability_class = torch.tensor(max_probability_class).long()\n",
    "\n",
    "        # return max_probability_class\n",
    "\n",
    "        return similarities.to(device)\n",
    "\n",
    "    def forward(self, fea_concat: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        ...\n",
    "        \"\"\"\n",
    "        pred = self.similarity_classify(fea_concat, labels)\n",
    "        return pred\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### trainer.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def Trainer(model,  model_optimizer, classifier,\n",
    "            classifier_optimizer, train_dl, valid_dl,\n",
    "            test_dl, device, logger,\n",
    "            config, experiment_log_dir, training_mode):\n",
    "    \"\"\"\n",
    "    Description:\n",
    "        The main training function.\n",
    "        This function trains the model and the classifier.\n",
    "        Trainer is divided into three stages:\n",
    "            1) pretrain\n",
    "            2) finetune\n",
    "            3) test\n",
    "\n",
    "    Args:\n",
    "        model: The model used for training.\n",
    "        model_optimizer: The optimizer used for training.\n",
    "        classifier: The classifier used for training.\n",
    "        classifier_optimizer: The optimizer used for training.\n",
    "        train_dl: The training dataloader.\n",
    "        valid_dl: The validation dataloader.\n",
    "        test_dl: The test dataloader.\n",
    "        device: The device used for training.\n",
    "        logger: The logger used for logging.\n",
    "        config: The configuration dictionary.\n",
    "        experiment_log_dir: The directory where the experiment logs will be stored.\n",
    "        training_mode: The training mode.\n",
    "\n",
    "    Returns:\n",
    "        None\n",
    "    \"\"\"\n",
    "    # Start training\n",
    "    logger.debug(\"Training started ....\")\n",
    "\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer, 'min')\n",
    "    if training_mode == 'pre_train':\n",
    "        print('Pretraining on source dataset')\n",
    "        for epoch in range(1, config.num_epoch + 1):\n",
    "            # Train and validate\n",
    "            \"\"\"Train. In fine-tuning, this part is also trained???\"\"\"\n",
    "            train_loss = model_pretrain(model, model_optimizer, criterion, train_dl, config, device, training_mode)\n",
    "            logger.debug(f\"\\nPre-training Epoch : {epoch}/{config.num_epoch}, Train Loss : {train_loss.item():.4f}\")\n",
    "\n",
    "        # Save pretrained model\n",
    "        os.makedirs(os.path.join(experiment_log_dir, \"saved_models\"), exist_ok=True)\n",
    "        chkpoint = {'model_state_dict': model.state_dict()}\n",
    "        torch.save(chkpoint, os.path.join(experiment_log_dir, \"saved_models\", \"ckp_last.pt\"))\n",
    "        print(f\"Pretrained model is stored at folder:{experiment_log_dir+'/saved_models'+'/ckp_last.pt'}\")\n",
    "\n",
    "    # Fine-tuning and Test\n",
    "    if training_mode != 'pre_train':\n",
    "        # fine-tune\n",
    "        print('Fine-tune on Fine-tuning set')\n",
    "        performance_list = []\n",
    "        total_f1 = []\n",
    "        KNN_f1 = []\n",
    "        global emb_finetune, label_finetune, emb_test, label_test\n",
    "\n",
    "        for epoch in tqdm(range(1, config.num_epoch + 1), desc=\"Epochs\"):\n",
    "            logger.debug(f'\\nEpoch : {epoch}')\n",
    "\n",
    "            valid_loss, emb_finetune, label_finetune, F1 = model_finetune(model, model_optimizer, valid_dl,\n",
    "                                                                          config, device, training_mode,\n",
    "                                                                          classifier=classifier,\n",
    "                                                                          classifier_optimizer=classifier_optimizer)\n",
    "            scheduler.step(valid_loss)\n",
    "\n",
    "            # save best fine-tuning model\n",
    "            global arch\n",
    "            arch = 'ecg2mit-bih'\n",
    "            if len(total_f1) == 0 or F1 > max(total_f1):\n",
    "                print('update fine-tuned model')\n",
    "                os.makedirs('experiments_logs/finetunemodel/', exist_ok=True)\n",
    "                torch.save(model.state_dict(), 'experiments_logs/finetunemodel/' + arch + '_model.pt')\n",
    "                torch.save(classifier.state_dict(), 'experiments_logs/finetunemodel/' + arch + '_classifier.pt')\n",
    "            total_f1.append(F1)\n",
    "\n",
    "            # evaluate on the test set\n",
    "            # Testing set\n",
    "            logger.debug('Test on Target datasts test set')\n",
    "            model.load_state_dict(torch.load('experiments_logs/finetunemodel/' + arch + '_model.pt'))\n",
    "            classifier.load_state_dict(torch.load('experiments_logs/finetunemodel/' + arch + '_classifier.pt'))\n",
    "            test_loss, test_acc, test_auc, test_prc, emb_test, label_test, performance = model_test(\n",
    "                model, test_dl, config, device, training_mode,\n",
    "                classifier=classifier, classifier_optimizer=classifier_optimizer\n",
    "            )\n",
    "            performance_list.append(performance)\n",
    "\n",
    "            # Use KNN as another classifier; it's an alternation of the MLP classifier in function model_test.\n",
    "            # Experiments show KNN and MLP may work differently in different settings, so here we provide both.\n",
    "            # train classifier: KNN\n",
    "            neigh = KNeighborsClassifier(n_neighbors=5)\n",
    "            neigh.fit(emb_finetune, label_finetune)\n",
    "            knn_acc_train = neigh.score(emb_finetune, label_finetune)\n",
    "            # print('KNN finetune acc:', knn_acc_train)\n",
    "            representation_test = emb_test.detach().cpu().numpy()\n",
    "\n",
    "            knn_result = neigh.predict(representation_test)\n",
    "            knn_result_score = neigh.predict_proba(representation_test)\n",
    "            one_hot_label_test = one_hot_encoding(label_test)\n",
    "            # print(classification_report(label_test, knn_result, digits=4))\n",
    "            # print(confusion_matrix(label_test, knn_result))\n",
    "            knn_acc = accuracy_score(label_test, knn_result)\n",
    "            precision = precision_score(label_test, knn_result, average='macro', )\n",
    "            recall = recall_score(label_test, knn_result, average='macro', )\n",
    "            F1 = f1_score(label_test, knn_result, average='macro')\n",
    "            auc = roc_auc_score(one_hot_label_test, knn_result_score, average=\"macro\", multi_class=\"ovr\")\n",
    "            prc = average_precision_score(one_hot_label_test, knn_result_score, average=\"macro\")\n",
    "            print('KNN Testing: Acc=%.4f| Precision = %.4f | Recall = %.4f | F1 = %.4f | AUROC= %.4f | AUPRC=%.4f' %\n",
    "                  (knn_acc, precision, recall, F1, auc, prc))\n",
    "            KNN_f1.append(F1)\n",
    "\n",
    "        logger.debug(\"\\n################## Best testing performance! #########################\")\n",
    "        performance_array = np.array(performance_list)\n",
    "        best_performance = performance_array[np.argmax(performance_array[:, 0], axis=0)]\n",
    "        # print('Best Testing Performance: Acc=%.4f| Precision = %.4f | Recall = %.4f | F1 = %.4f | AUROC= %.4f '\n",
    "        #       '| AUPRC=%.4f' % (best_performance[0], best_performance[1], best_performance[2], best_performance[3],\n",
    "        #                         best_performance[4], best_performance[5]))\n",
    "        # print('Best KNN F1', max(KNN_f1))\n",
    "\n",
    "        logger.debug('Best Testing Performance: Acc=%.4f | Precision = %.4f | Recall = %.4f | F1 = %.4f | AUROC= %.4f | AUPRC=%.4f' %\n",
    "                     (best_performance[0], best_performance[1], best_performance[2], best_performance[3],\n",
    "                      best_performance[4], best_performance[5]))\n",
    "\n",
    "        logger.debug('Best KNN F1: %.4f' % max(KNN_f1))\n",
    "\n",
    "    logger.debug(\"\\n################## Training is Done! #########################\")\n",
    "\n",
    "\n",
    "def model_pretrain(model, model_optimizer, criterion, train_loader, config, device, training_mode,):\n",
    "    \"\"\"\n",
    "    Description:\n",
    "        This function is used for pre-training.\n",
    "\n",
    "    Args:\n",
    "        model: the model to be trained\n",
    "        model_optimizer: the optimizer of the model\n",
    "        criterion: the loss function\n",
    "        train_loader: the training data loader\n",
    "\n",
    "    Returns:\n",
    "        None\n",
    "    \"\"\"\n",
    "    total_loss = []\n",
    "    model.train()\n",
    "    global loss, loss_t, loss_f, l_TF, loss_c, data_test, data_f_test\n",
    "\n",
    "    # optimizer\n",
    "    model_optimizer.zero_grad()\n",
    "\n",
    "    for batch_idx, (data, labels, aug1, data_f, aug1_f) in enumerate(tqdm(train_loader)):\n",
    "        data, labels = data.float().to(device), labels.long().to(device)  # data: [128, 1, 178], labels: [128]\n",
    "        aug1 = aug1.float().to(device)  # aug1 = aug2 : [128, 1, 178]\n",
    "        data_f, aug1_f = data_f.float().to(device), aug1_f.float().to(device)  # aug1 = aug2 : [128, 1, 178]\n",
    "\n",
    "        \"\"\"Produce embeddings\"\"\"\n",
    "        h_t, z_t, h_f, z_f = model(data, data_f)\n",
    "        h_t_aug, z_t_aug, h_f_aug, z_f_aug = model(aug1, aug1_f)\n",
    "\n",
    "        \"\"\"Compute Pre-train loss\"\"\"\n",
    "        \"\"\"NTXentLoss: normalized temperature-scaled cross entropy loss. From SimCLR\"\"\"\n",
    "        nt_xent_criterion = NTXentLoss_poly(device, config.batch_size, config.Context_Cont.temperature,\n",
    "                                            config.Context_Cont.use_cosine_similarity)  # device, 128, 0.2, True\n",
    "\n",
    "        loss_t = nt_xent_criterion(h_t, h_t_aug)\n",
    "        loss_f = nt_xent_criterion(h_f, h_f_aug)\n",
    "        l_TF = nt_xent_criterion(z_t, z_f)  # this is the initial version of TF loss\n",
    "\n",
    "        l_1 = nt_xent_criterion(z_t, z_f_aug)\n",
    "        l_2 = nt_xent_criterion(z_t_aug, z_f)\n",
    "        l_3 = nt_xent_criterion(z_t_aug, z_f_aug)\n",
    "        loss_c = (1 + l_TF - l_1) + (1 + l_TF - l_2) + (1 + l_TF - l_3)\n",
    "\n",
    "        lam = 0.2\n",
    "        loss = lam*(loss_t + loss_f) + l_TF\n",
    "\n",
    "        total_loss.append(loss.item())\n",
    "        loss.backward()\n",
    "        model_optimizer.step()\n",
    "\n",
    "    print(f\"Pretraining: overall loss: {loss:.4f}, l_t: {loss_t:.4f}, l_f: {loss_f:.4f}, l_TF: {l_TF:.4f}\")\n",
    "\n",
    "    ave_loss = torch.tensor(total_loss).mean()\n",
    "\n",
    "    return ave_loss\n",
    "\n",
    "\n",
    "def model_finetune(model, model_optimizer, val_dl,\n",
    "                   config, device, training_mode,\n",
    "                   classifier=None, classifier_optimizer=None):\n",
    "    \"\"\"\n",
    "    Description:\n",
    "        This function is used for finetuning.\n",
    "\n",
    "    Args:\n",
    "        model: the model to be trained\n",
    "        model_optimizer: the optimizer of the model\n",
    "        criterion: the loss function\n",
    "        train_loader: the training data loader\n",
    "\n",
    "    Returns:\n",
    "        None\n",
    "    \"\"\"\n",
    "    global labels, pred_numpy, fea_concat_flat\n",
    "    model.train()\n",
    "    classifier.train()\n",
    "\n",
    "    total_loss = []\n",
    "    total_acc = []\n",
    "    total_auc = []  # it should be outside of the loop\n",
    "    total_prc = []\n",
    "\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    outs = np.array([])\n",
    "    trgs = np.array([])\n",
    "    feas = np.array([])\n",
    "\n",
    "    for data, labels, aug1, data_f, aug1_f in tqdm(val_dl):\n",
    "        # print('Fine-tuning: {} of target samples'.format(labels.shape[0]))\n",
    "        data, labels = data.float().to(device), labels.long().to(device)\n",
    "        data_f = data_f.float().to(device)\n",
    "        aug1 = aug1.float().to(device)\n",
    "        aug1_f = aug1_f.float().to(device)\n",
    "\n",
    "        \"\"\"if random initialization:\"\"\"\n",
    "        model_optimizer.zero_grad()  # The gradients are zero, but the parameters are still randomly initialized.\n",
    "        classifier_optimizer.zero_grad()  # the classifier is newly added and randomly initialized\n",
    "\n",
    "        \"\"\"Produce embeddings\"\"\"\n",
    "        # Get time and frequency embeddings from the encoder\n",
    "        h_t, z_t, h_f, z_f = model(data, data_f)\n",
    "        h_t_aug, z_t_aug, h_f_aug, z_f_aug = model(aug1, aug1_f)\n",
    "\n",
    "        nt_xent_criterion = NTXentLoss_poly(device, config.target_batch_size, config.Context_Cont.temperature,\n",
    "                                            config.Context_Cont.use_cosine_similarity)\n",
    "        loss_t = nt_xent_criterion(h_t, h_t_aug)\n",
    "        loss_f = nt_xent_criterion(h_f, h_f_aug)\n",
    "        l_TF = nt_xent_criterion(z_t, z_f)\n",
    "\n",
    "        l_1 = nt_xent_criterion(z_t, z_f_aug)\n",
    "        l_2 = nt_xent_criterion(z_t_aug, z_f)\n",
    "        l_3 = nt_xent_criterion(z_t_aug, z_f_aug)\n",
    "        loss_c = (1 + l_TF - l_1) + (1 + l_TF - l_2) + (1 + l_TF - l_3)\n",
    "\n",
    "        \"\"\"Add supervised classifier: 1) it's unique to finetuning. 2) this classifier will also be used in test.\"\"\"\n",
    "        # Get text embeddings from Language Model\n",
    "\n",
    "        fea_concat = torch.cat((z_t, z_f), dim=1)\n",
    "        predictions = classifier(fea_concat, labels)\n",
    "        fea_concat_flat = fea_concat.reshape(fea_concat.shape[0], -1)\n",
    "        loss_p = criterion(predictions, labels)\n",
    "\n",
    "        lam = 0.1\n",
    "        loss = loss_p + l_TF + lam * (loss_t + loss_f)\n",
    "\n",
    "        acc_bs = labels.eq(predictions.detach().argmax(dim=1)).float().mean()\n",
    "        onehot_label = F.one_hot(labels, num_classes=5)\n",
    "        pred_numpy = predictions.detach().cpu().numpy()\n",
    "\n",
    "        try:\n",
    "            auc_bs = roc_auc_score(onehot_label.detach().cpu().numpy(), pred_numpy, average=\"macro\", multi_class=\"ovr\")\n",
    "        except ValueError:\n",
    "            auc_bs = float(0)\n",
    "    \n",
    "        prc_bs = average_precision_score(onehot_label.detach().cpu().numpy(), pred_numpy)\n",
    "\n",
    "        total_acc.append(acc_bs)\n",
    "        total_auc.append(auc_bs)\n",
    "        total_prc.append(prc_bs)\n",
    "        total_loss.append(loss.item())\n",
    "        loss.backward()\n",
    "        model_optimizer.step()\n",
    "        classifier_optimizer.step()\n",
    "\n",
    "        if training_mode != \"pre_train\":\n",
    "            pred = predictions.max(1, keepdim=True)[1]  # get the index of the max log-probability\n",
    "            outs = np.append(outs, pred.cpu().numpy())\n",
    "            trgs = np.append(trgs, labels.data.cpu().numpy())\n",
    "            feas = np.append(feas, fea_concat_flat.data.cpu().numpy())\n",
    "\n",
    "    feas = feas.reshape([len(trgs), -1])  # produce the learned embeddings\n",
    "\n",
    "    labels_numpy = labels.detach().cpu().numpy()\n",
    "    pred_numpy = np.argmax(pred_numpy, axis=1)\n",
    "    precision = precision_score(labels_numpy, pred_numpy, average='macro', )\n",
    "    recall = recall_score(labels_numpy, pred_numpy, average='macro', )\n",
    "    F1 = f1_score(labels_numpy, pred_numpy, average='macro', )\n",
    "    ave_loss = torch.tensor(total_loss).mean()\n",
    "    ave_acc = torch.tensor(total_acc).mean()\n",
    "    ave_auc = torch.tensor(total_auc).mean()\n",
    "    ave_prc = torch.tensor(total_prc).mean()\n",
    "\n",
    "    print(' Finetune: loss = %.4f| Acc=%.4f | Precision = %.4f | Recall = %.4f | F1 = %.4f| AUROC=%.4f | AUPRC = %.4f'\n",
    "          % (ave_loss, ave_acc * 100, precision * 100, recall * 100, F1 * 100, ave_auc * 100, ave_prc * 100))\n",
    "\n",
    "    return ave_loss, feas, trgs, F1\n",
    "\n",
    "\n",
    "def model_test(model, test_dl, config,  device, training_mode, classifier=None, classifier_optimizer=None):\n",
    "    \"\"\"\n",
    "    Description:\n",
    "        This function is used for testing.\n",
    "        model_test is divided into two stages:\n",
    "            1) test\n",
    "            2) test_classifier\n",
    "\n",
    "    Args:\n",
    "        model: The model used for testing.\n",
    "        test_dl: The testing dataloader.\n",
    "        config: The configuration dictionary.\n",
    "        device: The device used for testing.\n",
    "        training_mode: The training mode.\n",
    "\n",
    "    Returns:\n",
    "        None\n",
    "    \"\"\"\n",
    "    model.eval()\n",
    "    classifier.eval()\n",
    "\n",
    "    total_loss = []\n",
    "    total_acc = []\n",
    "    total_auc = []\n",
    "    total_prc = []\n",
    "\n",
    "    criterion = nn.CrossEntropyLoss()  # the loss for downstream classifier\n",
    "    outs = np.array([])\n",
    "    trgs = np.array([])\n",
    "    emb_test_all = []\n",
    "\n",
    "    with torch.no_grad():\n",
    "        labels_numpy_all, pred_numpy_all = np.zeros(1), np.zeros(1)\n",
    "        for data, labels, _, data_f, _ in tqdm(test_dl):\n",
    "            data, labels = data.float().to(device), labels.long().to(device)\n",
    "            data_f = data_f.float().to(device)\n",
    "\n",
    "            \"\"\"Add supervised classifier: 1) it's unique to finetuning. 2) this classifier will also be used in test\"\"\"\n",
    "            h_t, z_t, h_f, z_f = model(data, data_f)\n",
    "            fea_concat = torch.cat((z_t, z_f), dim=1)\n",
    "            predictions_test = classifier(fea_concat, labels)\n",
    "            fea_concat_flat = fea_concat.reshape(fea_concat.shape[0], -1)\n",
    "            emb_test_all.append(fea_concat_flat)\n",
    "\n",
    "            loss = criterion(predictions_test, labels)\n",
    "            acc_bs = labels.eq(predictions_test.detach().argmax(dim=1)).float().mean()\n",
    "            onehot_label = F.one_hot(labels, num_classes=5)\n",
    "            pred_numpy = predictions_test.detach().cpu().numpy()\n",
    "            labels_numpy = labels.detach().cpu().numpy()\n",
    "            try:\n",
    "                auc_bs = roc_auc_score(onehot_label.detach().cpu().numpy(), pred_numpy,\n",
    "                                       average=\"macro\", multi_class=\"ovr\")\n",
    "            except ValueError:\n",
    "                auc_bs = float(0)\n",
    "            prc_bs = average_precision_score(onehot_label.detach().cpu().numpy(), pred_numpy, average=\"macro\")\n",
    "            pred_numpy = np.argmax(pred_numpy, axis=1)\n",
    "\n",
    "            total_acc.append(acc_bs)\n",
    "            total_auc.append(auc_bs)\n",
    "            total_prc.append(prc_bs)\n",
    "\n",
    "            total_loss.append(loss.item())\n",
    "            pred = predictions_test.max(1, keepdim=True)[1]  # get the index of the max log-probability\n",
    "            outs = np.append(outs, pred.cpu().numpy())\n",
    "            trgs = np.append(trgs, labels.data.cpu().numpy())\n",
    "            labels_numpy_all = np.concatenate((labels_numpy_all, labels_numpy))\n",
    "            pred_numpy_all = np.concatenate((pred_numpy_all, pred_numpy))\n",
    "\n",
    "    labels_numpy_all = labels_numpy_all[1:]\n",
    "    pred_numpy_all = pred_numpy_all[1:]\n",
    "\n",
    "    # print('Test classification report', classification_report(labels_numpy_all, pred_numpy_all))\n",
    "    # print(confusion_matrix(labels_numpy_all, pred_numpy_all))\n",
    "    precision = precision_score(labels_numpy_all, pred_numpy_all, average='macro', )\n",
    "    recall = recall_score(labels_numpy_all, pred_numpy_all, average='macro', )\n",
    "    F1 = f1_score(labels_numpy_all, pred_numpy_all, average='macro', )\n",
    "    acc = accuracy_score(labels_numpy_all, pred_numpy_all, )\n",
    "\n",
    "    total_loss = torch.tensor(total_loss).mean()\n",
    "    total_acc = torch.tensor(total_acc).mean()\n",
    "    total_auc = torch.tensor(total_auc).mean()\n",
    "    total_prc = torch.tensor(total_prc).mean()\n",
    "\n",
    "    performance = [acc * 100, precision * 100, recall * 100, F1 * 100, total_auc * 100, total_prc * 100]\n",
    "    print('MLP Testing: Acc=%.4f| Precision = %.4f | Recall = %.4f | F1 = %.4f | AUROC= %.4f | AUPRC=%.4f'\n",
    "          % (acc * 100, precision * 100, recall * 100, F1 * 100, total_auc * 100, total_prc * 100))\n",
    "    emb_test_all = torch.concat(tuple(emb_test_all))\n",
    "    return total_loss, total_acc, total_auc, total_prc, emb_test_all, trgs, performance"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### utils.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def set_requires_grad(model, dict_, requires_grad=True):\n",
    "    for param in model.named_parameters():\n",
    "        if param[0] in dict_:\n",
    "            param[1].requires_grad = requires_grad\n",
    "\n",
    "\n",
    "def fix_randomness(SEED):\n",
    "    random.seed(SEED)\n",
    "    np.random.seed(SEED)\n",
    "    torch.manual_seed(SEED)\n",
    "    torch.cuda.manual_seed(SEED)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "\n",
    "\n",
    "def epoch_time(start_time, end_time):\n",
    "    elapsed_time = end_time - start_time\n",
    "    elapsed_mins = int(elapsed_time / 60)\n",
    "    elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n",
    "    return elapsed_mins, elapsed_secs\n",
    "\n",
    "\n",
    "def _calc_metrics(pred_labels, true_labels, log_dir, home_path):\n",
    "    pred_labels = np.array(pred_labels).astype(int)\n",
    "    true_labels = np.array(true_labels).astype(int)\n",
    "\n",
    "    # save targets\n",
    "    labels_save_path = os.path.join(log_dir, \"labels\")\n",
    "    os.makedirs(labels_save_path, exist_ok=True)\n",
    "    np.save(os.path.join(labels_save_path, \"predicted_labels.npy\"), pred_labels)\n",
    "    np.save(os.path.join(labels_save_path, \"true_labels.npy\"), true_labels)\n",
    "\n",
    "    r = classification_report(true_labels, pred_labels, digits=6, output_dict=True)\n",
    "    cm = confusion_matrix(true_labels, pred_labels)\n",
    "    df = pd.DataFrame(r)\n",
    "    df[\"cohen\"] = cohen_kappa_score(true_labels, pred_labels)\n",
    "    df[\"accuracy\"] = accuracy_score(true_labels, pred_labels)\n",
    "    df = df * 100\n",
    "\n",
    "    # save classification report\n",
    "    exp_name = os.path.split(os.path.dirname(log_dir))[-1]\n",
    "    training_mode = os.path.basename(log_dir)\n",
    "    file_name = f\"{exp_name}_{training_mode}_classification_report.xlsx\"\n",
    "    report_Save_path = os.path.join(home_path, log_dir, file_name)\n",
    "    df.to_excel(report_Save_path)\n",
    "\n",
    "    # save confusion matrix\n",
    "    cm_file_name = f\"{exp_name}_{training_mode}_confusion_matrix.torch\"\n",
    "    cm_Save_path = os.path.join(home_path, log_dir, cm_file_name)\n",
    "    torch.save(cm, cm_Save_path)\n",
    "\n",
    "\n",
    "def _logger(logger_name, level=logging.DEBUG):\n",
    "    \"\"\"\n",
    "    Method to return a custom logger with the given name and level\n",
    "    \"\"\"\n",
    "    logger = logging.getLogger(logger_name)\n",
    "    logger.setLevel(level)\n",
    "    # format_string = (\"%(asctime)s — %(name)s — %(levelname)s — %(funcName)s:\"\n",
    "    #                 \"%(lineno)d — %(message)s\")\n",
    "    format_string = \"%(message)s\"\n",
    "    log_format = logging.Formatter(format_string)\n",
    "    # Creating and adding the console handler\n",
    "    console_handler = logging.StreamHandler(sys.stdout)\n",
    "    console_handler.setFormatter(log_format)\n",
    "    logger.addHandler(console_handler)\n",
    "    # Creating and adding the file handler\n",
    "    file_handler = logging.FileHandler(logger_name, mode='a')\n",
    "    file_handler.setFormatter(log_format)\n",
    "    logger.addHandler(file_handler)\n",
    "    return logger\n",
    "\n",
    "\n",
    "def copy_Files(destination, data_type):\n",
    "    # destination: 'experiments_logs/Exp1/run1'\n",
    "    destination_dir = os.path.join(destination, \"model_files\")\n",
    "    os.makedirs(destination_dir, exist_ok=True)\n",
    "    copy(\"main.py\", os.path.join(destination_dir, \"main.py\"))\n",
    "    copy(\"trainerfun/trainer.py\", os.path.join(destination_dir, \"trainerfun.py\"))\n",
    "    copy(f\"config_files/{data_type}_Configs.py\", os.path.join(destination_dir, f\"{data_type}_Configs.py\"))\n",
    "    copy(\"dataloader/augmentations.py\", os.path.join(destination_dir, \"augmentations.py\"))\n",
    "    copy(\"dataloader/dataloader.py\", os.path.join(destination_dir, \"dataloader.py\"))\n",
    "    copy(\"models/model.py\", os.path.join(destination_dir, \"model.py\"))\n",
    "    copy(\"models/loss.py\", os.path.join(destination_dir, \"loss.py\"))\n",
    "    copy(\"models/TC.py\", os.path.join(destination_dir, \"TC.py\"))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### main.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "We are using cpu now.\n",
      "=============================================\n",
      "Pre-training Dataset: ECG\n",
      "Target (fine-tuning) Dataset: MIT-BIH\n",
      "Method:  TF-C\n",
      "Mode: fine_tune_test\n",
      "=============================================\n",
      "Using subset for debugging, the datasize is: 1280\n",
      "Using subset for debugging, the datasize is: 410\n",
      "Data loaded ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/torch/nn/modules/transformer.py:306: UserWarning: enable_nested_tensor is True, but self.use_nested_tensor is False because encoder_layer.self_attn.batch_first was not True(use batch_first for better inference performance)\n",
      "  warnings.warn(f\"enable_nested_tensor is True, but self.use_nested_tensor is False because {why_not_sparsity_fast_path}\")\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The loading file path ./experiments_logs/ECG_2_MIT-BIH/run1/pre_train_seed_42_2layertransformer/saved_models\n",
      "Training started ....\n",
      "Fine-tune on Fine-tuning set\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epochs:   0%|          | 0/1 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Epoch : 1\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "100%|██████████| 10/10 [00:10<00:00,  1.10s/it]\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1517: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.\n",
      "  _warn_prf(average, modifier, f\"{metric.capitalize()} is\", len(result))\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " Finetune: loss = 9.0177| Acc=20.0000 | Precision = 14.7826 | Recall = 11.3333 | F1 = 12.8302| AUROC=0.0000 | AUPRC = 22.7315\n",
      "update fine-tuned model\n",
      "Test on Target datasts test set\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n",
      "/home/vitalii/projects/ecg-analysis-with-self-supervised-learning-and-multimodality/.venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py:1030: UserWarning: No positive class found in y_true, recall is set to one for all thresholds.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "# Args selections\n",
    "start_time = datetime.now()\n",
    "parser = argparse.ArgumentParser()\n",
    "\n",
    "# Model parameters\n",
    "home_dir = os.getcwd()\n",
    "\n",
    "\n",
    "# Set up command line arguments and create parser\n",
    "parser.add_argument('--run_description', default='run1', type=str,\n",
    "                    help='Experiment Description')\n",
    "parser.add_argument('--seed', default=42, type=int,\n",
    "                    help='seed value')\n",
    "parser.add_argument('--training_mode', default='fine_tune_test', type=str,\n",
    "                    help='pre_train, fine_tune_test')\n",
    "parser.add_argument('--pretrain_dataset', default='ECG', type=str,\n",
    "                    help='Dataset of choice: ECG')\n",
    "parser.add_argument('--target_dataset', default='MIT-BIH', type=str,\n",
    "                    help='Dataset of choice: EMG, MIT-BIH, PTB-XL-Superclass, PTB-XL-Form, PTB-XL-Rhythm')\n",
    "parser.add_argument('--logs_save_dir', default='./experiments_logs', type=str,\n",
    "                    help='saving directory')\n",
    "parser.add_argument('--device', default='cpu', type=str,\n",
    "                    help='cpu or cuda')\n",
    "parser.add_argument('--home_path', default=home_dir, type=str,\n",
    "                    help='Project home directory')\n",
    "\n",
    "args, unknown = parser.parse_known_args()\n",
    "\n",
    "# Set up device\n",
    "with_gpu = torch.cuda.is_available()\n",
    "if with_gpu:\n",
    "    device = torch.device(\"cuda\")\n",
    "else:\n",
    "    device = torch.device(\"cpu\")\n",
    "\n",
    "print(f\"We are using {device} now.\")\n",
    "\n",
    "# Set up paths, experiment description and loggers\n",
    "pretrain_dataset = args.pretrain_dataset\n",
    "target_data = args.target_dataset\n",
    "experiment_description = str(pretrain_dataset) + '_2_' + str(target_data)\n",
    "\n",
    "method = 'TF-C'\n",
    "training_mode = args.training_mode\n",
    "run_description = args.run_description\n",
    "logs_save_dir = args.logs_save_dir\n",
    "os.makedirs(logs_save_dir, exist_ok=True)\n",
    "\n",
    "# Use ECG_Configs\n",
    "# exec(f'from config_files.{pretrain_dataset}_Configs import Config as Configs')\n",
    "configs = Configs()\n",
    "\n",
    "# fix random seeds for reproducibility\n",
    "SEED = args.seed\n",
    "torch.manual_seed(SEED)\n",
    "torch.backends.cudnn.deterministic = False\n",
    "torch.backends.cudnn.benchmark = False\n",
    "np.random.seed(SEED)\n",
    "\n",
    "# Set up experiment log directory and initialize logger\n",
    "experiment_log_dir = os.path.join(logs_save_dir, experiment_description, run_description,\n",
    "                                  training_mode + f\"_seed_{SEED}_2layertransformer\")\n",
    "# 'experiments_logs/Exp1/run1/train_linear_seed_0'\n",
    "os.makedirs(experiment_log_dir, exist_ok=True)\n",
    "\n",
    "# loop through domains\n",
    "counter = 0\n",
    "src_counter = 0\n",
    "\n",
    "# Logging\n",
    "log_file_name = os.path.join(experiment_log_dir, f\"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log\")\n",
    "# 'experiments_logs/Exp1/run1/train_linear_seed_0/logs_14_04_2022_15_13_12.log'\n",
    "logger = _logger(log_file_name)\n",
    "logger.debug(\"=\" * 45)\n",
    "logger.debug('Pre-training Dataset: %s', pretrain_dataset)\n",
    "logger.debug('Target (fine-tuning) Dataset: %s', target_data)\n",
    "logger.debug('Method:  %s', method)\n",
    "logger.debug('Mode: %s', training_mode)\n",
    "logger.debug(\"=\" * 45)\n",
    "\n",
    "# Load datasets\n",
    "sourcedata_path = f\"./datasets/{pretrain_dataset}\"\n",
    "target_data_path = f\"./datasets/{target_data}\"\n",
    "subset = True  # if subset=True, use a subset for debugging.\n",
    "train_dl, valid_dl, test_dl = data_generator(sourcedata_path, target_data_path,\n",
    "                                             configs, training_mode, subset=subset)\n",
    "logger.debug(\"Data loaded ...\")\n",
    "\n",
    "# Load Model\n",
    "# Here are two models, one basemodel, another is temporal contrastive model\n",
    "TFC_model = TFC(configs).to(device)\n",
    "classifier = TargetClassifier(configs).to(device)\n",
    "temporal_contr_model = None\n",
    "\n",
    "if training_mode == \"fine_tune_test\":\n",
    "    # load saved model of this experiment\n",
    "    load_from = os.path.join(os.path.join(logs_save_dir, experiment_description, run_description,\n",
    "                             f\"pre_train_seed_{SEED}_2layertransformer\", \"saved_models\"))\n",
    "    print(\"The loading file path\", load_from)\n",
    "    chkpoint = torch.load(os.path.join(load_from, \"ckp_last.pt\"), map_location=device)\n",
    "    pretrained_dict = chkpoint[\"model_state_dict\"]\n",
    "    TFC_model.load_state_dict(pretrained_dict)\n",
    "\n",
    "model_optimizer = torch.optim.Adam(TFC_model.parameters(), lr=configs.lr,\n",
    "                                   betas=(configs.beta1, configs.beta2), weight_decay=3e-4)\n",
    "classifier_optimizer = torch.optim.Adam(classifier.parameters(), lr=configs.lr,\n",
    "                                        betas=(configs.beta1, configs.beta2), weight_decay=3e-4)\n",
    "\n",
    "# Trainer\n",
    "Trainer(TFC_model, model_optimizer, classifier,\n",
    "        classifier_optimizer, train_dl, valid_dl,\n",
    "        test_dl, device, logger,\n",
    "        configs, experiment_log_dir, training_mode)\n",
    "\n",
    "logger.debug(\"Training time is : %s\", datetime.now() - start_time)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
