{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Tutorial on training a HTS-AT model for audio classification on the ESC-50 Dataset\n",
    "\n",
    "Referece: \n",
    "\n",
    "[HTS-AT: A Hierarchical Token-Semantic Audio Transformer for Sound Classification and Detection, ICASSP 2022](https://arxiv.org/abs/2202.00874)\n",
    "\n",
    "Following the HTS-AT's paper, in this tutorial, we would show how to use the HST-AT in the training of the ESC-50 Dataset.\n",
    "\n",
    "The [ESC-50 dataset](https://github.com/karolpiczak/ESC-50) is a labeled collection of 2000 environmental audio recordings suitable for benchmarking methods of environmental sound classification. The dataset consists of 5-second-long recordings organized into 50 semantical classes (with 40 examples per class) loosely arranged into 5 major categories\n",
    "\n",
    "Before running this tutorial, please make sure that you install the below packages by following steps:\n",
    "\n",
    "1. download [the codebase](https://github.com/RetroCirce/HTS-Audio-Transformer), and put this tutorial notebook inside the codebase folder.\n",
    "\n",
    "2. In the github code folder:\n",
    "\n",
    "    > pip install -r requirements.txt\n",
    "\n",
    "3. We do not include the installation of PyTorch in the requirment, since different machines require different vereions of CUDA and Toolkits. So make sure you install the PyTorch from [the official guidance](https://pytorch.org/).\n",
    "\n",
    "4. Install the 'SOX' and the 'ffmpeg', we recommend that you run this code in Linux inside the Conda environment. In that, you can install them by:\n",
    "\n",
    "    > sudo apt install sox\n",
    "    \n",
    "    > conda install -c conda-forge ffmpeg\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# import basic packages\n",
    "import os\n",
    "import numpy as np\n",
    "import wget\n",
    "import sys\n",
    "import gdown\n",
    "import zipfile\n",
    "import librosa\n",
    "# in the notebook, we only can use one GPU\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build the workspace and download the needed files\n",
    "\n",
    "def create_path(path):\n",
    "    if not os.path.exists(path):\n",
    "        os.mkdir(path)\n",
    "\n",
    "workspace = \"./workspace\"\n",
    "dataset_path = os.path.join(workspace, \"esc-50\")\n",
    "checkpoint_path = os.path.join(workspace, \"ckpt\")\n",
    "esc_raw_path = os.path.join(dataset_path, 'raw')\n",
    "\n",
    "\n",
    "create_path(workspace)\n",
    "create_path(dataset_path)\n",
    "create_path(checkpoint_path)\n",
    "create_path(esc_raw_path)\n",
    "\n",
    "\n",
    "# download the esc-50 dataset\n",
    "\n",
    "# if not os.path.exists(os.path.join(dataset_path, 'ESC-50-master.zip')):\n",
    "#     print(\"-------------Downloading ESC-50 Dataset-------------\")\n",
    "#     wget.download('https://github.com/karoldvl/ESC-50/archive/master.zip', out=dataset_path)\n",
    "#     with zipfile.ZipFile(os.path.join(dataset_path, 'ESC-50-master.zip'), 'r') as zip_ref:\n",
    "#         zip_ref.extractall(esc_raw_path)\n",
    "#     print(\"-------------Success-------------\")\n",
    "\n",
    "if not os.path.exists(os.path.join(checkpoint_path,'htsat_audioset_pretrain.ckpt')):\n",
    "    gdown.download(id='1OK8a5XuMVLyeVKF117L8pfxeZYdfSDZv', output=os.path.join(checkpoint_path,'htsat_audioset_pretrain.ckpt'))\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-------------Resample ESC-50-------------\n",
      "-------------Success-------------\n",
      "-------------Build Dataset-------------\n",
      "-------------Success-------------\n"
     ]
    }
   ],
   "source": [
    "# Process ESC-50 Dataset\n",
    "meta_path = os.path.join(esc_raw_path, 'ESC-50-master', 'meta', 'esc50.csv')\n",
    "audio_path = os.path.join(esc_raw_path, 'ESC-50-master', 'audio')\n",
    "resample_path = os.path.join(dataset_path, 'resample')\n",
    "savedata_path = os.path.join(dataset_path, 'esc-50-data.npy')\n",
    "create_path(resample_path)\n",
    "\n",
    "meta = np.loadtxt(meta_path , delimiter=',', dtype='str', skiprows=1)\n",
    "audio_list = os.listdir(audio_path)\n",
    "\n",
    "# resample\n",
    "print(\"-------------Resample ESC-50-------------\")\n",
    "for f in audio_list:\n",
    "    full_f = os.path.join(audio_path, f)\n",
    "    resample_f = os.path.join(resample_path, f)\n",
    "    if not os.path.exists(resample_f):\n",
    "        os.system('sox -V1 ' + full_f + ' -r 32000 ' + resample_f)\n",
    "print(\"-------------Success-------------\")\n",
    "\n",
    "print(\"-------------Build Dataset-------------\")\n",
    "output_dict = [[] for _ in range(5)]\n",
    "for label in meta:\n",
    "    name = label[0]\n",
    "    fold = label[1]\n",
    "    target = label[2]\n",
    "    y, sr = librosa.load(os.path.join(resample_path, name), sr = None)\n",
    "    output_dict[int(fold) - 1].append(\n",
    "        {\n",
    "            \"name\": name,\n",
    "            \"target\": int(target),\n",
    "            \"waveform\": y\n",
    "        }\n",
    "    )\n",
    "np.save(savedata_path, output_dict)\n",
    "print(\"-------------Success-------------\")\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load the model package\n",
    "import torch\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.utils.data.distributed import DistributedSampler\n",
    "import pytorch_lightning as pl\n",
    "from pytorch_lightning.callbacks import ModelCheckpoint\n",
    "import warnings\n",
    "\n",
    "from utils import create_folder, dump_config, process_idc\n",
    "import esc_config as config\n",
    "from sed_model import SEDWrapper, Ensemble_SEDWrapper\n",
    "from data_generator import ESC_Dataset\n",
    "from model.htsat import HTSAT_Swin_Transformer\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Data Preparation\n",
    "class data_prep(pl.LightningDataModule):\n",
    "    def __init__(self, train_dataset, eval_dataset, device_num):\n",
    "        super().__init__()\n",
    "        self.train_dataset = train_dataset\n",
    "        self.eval_dataset = eval_dataset\n",
    "        self.device_num = device_num\n",
    "\n",
    "    def train_dataloader(self):\n",
    "        train_sampler = DistributedSampler(self.train_dataset, shuffle = False) if self.device_num > 1 else None\n",
    "        train_loader = DataLoader(\n",
    "            dataset = self.train_dataset,\n",
    "            num_workers = config.num_workers,\n",
    "            batch_size = config.batch_size // self.device_num,\n",
    "            shuffle = False,\n",
    "            sampler = train_sampler\n",
    "        )\n",
    "        return train_loader\n",
    "    def val_dataloader(self):\n",
    "        eval_sampler = DistributedSampler(self.eval_dataset, shuffle = False) if self.device_num > 1 else None\n",
    "        eval_loader = DataLoader(\n",
    "            dataset = self.eval_dataset,\n",
    "            num_workers = config.num_workers,\n",
    "            batch_size = config.batch_size // self.device_num,\n",
    "            shuffle = False,\n",
    "            sampler = eval_sampler\n",
    "        )\n",
    "        return eval_loader\n",
    "    def test_dataloader(self):\n",
    "        test_sampler = DistributedSampler(self.eval_dataset, shuffle = False) if self.device_num > 1 else None\n",
    "        test_loader = DataLoader(\n",
    "            dataset = self.eval_dataset,\n",
    "            num_workers = config.num_workers,\n",
    "            batch_size = config.batch_size // self.device_num,\n",
    "            shuffle = False,\n",
    "            sampler = test_sampler\n",
    "        )\n",
    "        return test_loader\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "each batch size: 32\n",
      "Using ESC\n"
     ]
    }
   ],
   "source": [
    "# Set the workspace\n",
    "device_num = torch.cuda.device_count()\n",
    "print(\"each batch size:\", config.batch_size // device_num)\n",
    "\n",
    "full_dataset = np.load(os.path.join(config.dataset_path, \"esc-50-data.npy\"), allow_pickle = True)\n",
    "\n",
    "# set exp folder\n",
    "exp_dir = os.path.join(config.workspace, \"results\", config.exp_name)\n",
    "checkpoint_dir = os.path.join(config.workspace, \"results\", config.exp_name, \"checkpoint\")\n",
    "if not config.debug:\n",
    "    create_folder(os.path.join(config.workspace, \"results\"))\n",
    "    create_folder(exp_dir)\n",
    "    create_folder(checkpoint_dir)\n",
    "    dump_config(config, os.path.join(exp_dir, config.exp_name), False)\n",
    "\n",
    "print(\"Using ESC\")\n",
    "dataset = ESC_Dataset(\n",
    "    dataset = full_dataset,\n",
    "    config = config,\n",
    "    eval_mode = False\n",
    ")\n",
    "eval_dataset = ESC_Dataset(\n",
    "    dataset = full_dataset,\n",
    "    config = config,\n",
    "    eval_mode = True\n",
    ")\n",
    "\n",
    "audioset_data = data_prep(dataset, eval_dataset, device_num)\n",
    "checkpoint_callback = ModelCheckpoint(\n",
    "    monitor = \"acc\",\n",
    "    filename='l-{epoch:d}-{acc:.3f}',\n",
    "    save_top_k = 20, # 只保留前 20 个最好的模型\n",
    "    mode = \"max\"\n",
    ")\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "d:\\conda\\miniconda\\envs\\whisper\\lib\\site-packages\\torch\\functional.py:554: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\native\\TensorShape.cpp:4316.)\n",
      "  return _VF.meshgrid(tensors, **kwargs)  # type: ignore[attr-defined]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Load Checkpoint from  ./workspace/ckpt/l-epoch=18-acc=0.968.ckpt\n"
     ]
    }
   ],
   "source": [
    "# Set the Trainer\n",
    "trainer = pl.Trainer(\n",
    "    deterministic=False,\n",
    "    default_root_dir = checkpoint_dir,\n",
    "    gpus = device_num, \n",
    "    val_check_interval = 1.0,\n",
    "    max_epochs = config.max_epoch,\n",
    "    auto_lr_find = True,    \n",
    "    sync_batchnorm = True,\n",
    "    callbacks = [checkpoint_callback],\n",
    "    accelerator = \"ddp\" if device_num > 1 else None,\n",
    "    num_sanity_val_steps = 0,\n",
    "    resume_from_checkpoint = None, \n",
    "    replace_sampler_ddp = False,\n",
    "    gradient_clip_val=1.0\n",
    ")\n",
    "\n",
    "sed_model = HTSAT_Swin_Transformer(\n",
    "    spec_size=config.htsat_spec_size,\n",
    "    patch_size=config.htsat_patch_size,\n",
    "    in_chans=1,\n",
    "    num_classes=config.classes_num,\n",
    "    window_size=config.htsat_window_size,\n",
    "    config = config,\n",
    "    depths = config.htsat_depth,\n",
    "    embed_dim = config.htsat_dim,\n",
    "    patch_stride=config.htsat_stride,\n",
    "    num_heads=config.htsat_num_head\n",
    ")\n",
    "\n",
    "model = SEDWrapper(\n",
    "    sed_model = sed_model, \n",
    "    config = config,\n",
    "    dataset = dataset\n",
    ")\n",
    "\n",
    "if config.resume_checkpoint is not None:\n",
    "    print(\"Load Checkpoint from \", config.resume_checkpoint)\n",
    "    ckpt = torch.load(config.resume_checkpoint, map_location=\"cpu\")\n",
    "    ckpt[\"state_dict\"].pop(\"sed_model.head.weight\")\n",
    "    ckpt[\"state_dict\"].pop(\"sed_model.head.bias\")\n",
    "    # finetune on the esc and spv2 dataset\n",
    "    ckpt[\"state_dict\"].pop(\"sed_model.tscam_conv.weight\")\n",
    "    ckpt[\"state_dict\"].pop(\"sed_model.tscam_conv.bias\")\n",
    "    model.load_state_dict(ckpt[\"state_dict\"], strict=False)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n",
      "\n",
      "  | Name      | Type                   | Params\n",
      "-----------------------------------------------------\n",
      "0 | sed_model | HTSAT_Swin_Transformer | 28.9 M\n",
      "-----------------------------------------------------\n",
      "27.8 M    Trainable params\n",
      "1.1 M     Non-trainable params\n",
      "28.9 M    Total params\n",
      "115.404   Total estimated model params size (MB)\n",
      "d:\\conda\\miniconda\\envs\\whisper\\lib\\site-packages\\pytorch_lightning\\trainer\\data_loading.py:132: UserWarning: The dataloader, train_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 20 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\n",
      "  rank_zero_warn(\n",
      "d:\\conda\\miniconda\\envs\\whisper\\lib\\site-packages\\pytorch_lightning\\trainer\\data_loading.py:132: UserWarning: The dataloader, val_dataloader 0, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 20 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0:   0%|          | 0/63 [00:00<?, ?it/s] "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\conda\\miniconda\\envs\\whisper\\lib\\site-packages\\pytorch_lightning\\utilities\\data.py:59: UserWarning: Trying to infer the `batch_size` from an ambiguous collection. The batch size we found is 15. To avoid any miscalculations, use `self.log(..., batch_size=batch_size)`.\n",
      "  warning_cache.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0:   2%|▏         | 1/63 [00:00<00:57,  1.09it/s, loss=3.91, v_num=8, loss_step=3.910]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\conda\\miniconda\\envs\\whisper\\lib\\site-packages\\pytorch_lightning\\utilities\\data.py:59: UserWarning: Trying to infer the `batch_size` from an ambiguous collection. The batch size we found is 16. To avoid any miscalculations, use `self.log(..., batch_size=batch_size)`.\n",
      "  warning_cache.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0:   3%|▎         | 2/63 [00:01<00:45,  1.33it/s, loss=3.92, v_num=8, loss_step=3.930]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\conda\\miniconda\\envs\\whisper\\lib\\site-packages\\pytorch_lightning\\utilities\\data.py:59: UserWarning: Trying to infer the `batch_size` from an ambiguous collection. The batch size we found is 17. To avoid any miscalculations, use `self.log(..., batch_size=batch_size)`.\n",
      "  warning_cache.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0:  98%|█████████▊| 62/63 [00:30<00:00,  2.02it/s, loss=3.76, v_num=8, loss_step=3.740]cuda:0 {'acc': 0.5075}\n",
      "Epoch 1:  98%|█████████▊| 62/63 [00:30<00:00,  2.05it/s, loss=2.63, v_num=8, loss_step=2.260, acc=0.507, loss_epoch=3.820]cuda:0 {'acc': 0.9125}\n",
      "Epoch 2: 100%|██████████| 63/63 [00:30<00:00,  2.08it/s, loss=0.716, v_num=8, loss_step=0.668, acc=0.912, loss_epoch=3.080]cuda:0 {'acc': 0.935}\n",
      "Epoch 3: 100%|██████████| 63/63 [00:30<00:00,  2.10it/s, loss=0.242, v_num=8, loss_step=0.183, acc=0.935, loss_epoch=1.170]cuda:0 {'acc': 0.92}\n",
      "Epoch 4: 100%|██████████| 63/63 [00:30<00:00,  2.07it/s, loss=0.161, v_num=8, loss_step=0.241, acc=0.920, loss_epoch=0.298] cuda:0 {'acc': 0.8925}\n",
      "Epoch 5: 100%|██████████| 63/63 [00:30<00:00,  2.06it/s, loss=0.0743, v_num=8, loss_step=0.0271, acc=0.892, loss_epoch=0.150]cuda:0 {'acc': 0.925}\n",
      "Epoch 6: 100%|██████████| 63/63 [00:30<00:00,  2.04it/s, loss=0.077, v_num=8, loss_step=0.0196, acc=0.925, loss_epoch=0.112] cuda:0 {'acc': 0.93}\n",
      "Epoch 7: 100%|██████████| 63/63 [00:31<00:00,  2.03it/s, loss=0.0409, v_num=8, loss_step=0.0487, acc=0.930, loss_epoch=0.0681] cuda:0 {'acc': 0.9125}\n",
      "Epoch 8: 100%|██████████| 63/63 [00:29<00:00,  2.13it/s, loss=0.0436, v_num=8, loss_step=0.0133, acc=0.912, loss_epoch=0.0485] cuda:0 {'acc': 0.9275}\n",
      "Epoch 9:   6%|▋         | 4/63 [00:02<00:32,  1.80it/s, loss=0.0495, v_num=8, loss_step=0.0135, acc=0.927, loss_epoch=0.0664] "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\conda\\miniconda\\envs\\whisper\\lib\\site-packages\\pytorch_lightning\\utilities\\data.py:59: UserWarning: Trying to infer the `batch_size` from an ambiguous collection. The batch size we found is 14. To avoid any miscalculations, use `self.log(..., batch_size=batch_size)`.\n",
      "  warning_cache.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 9: 100%|██████████| 63/63 [00:29<00:00,  2.11it/s, loss=0.0411, v_num=8, loss_step=0.156, acc=0.927, loss_epoch=0.0664]  cuda:0 {'acc': 0.93}\n",
      "Epoch 10: 100%|██████████| 63/63 [00:29<00:00,  2.13it/s, loss=0.0321, v_num=8, loss_step=0.00835, acc=0.930, loss_epoch=0.044]cuda:0 {'acc': 0.935}\n",
      "Epoch 11: 100%|██████████| 63/63 [00:29<00:00,  2.14it/s, loss=0.00573, v_num=8, loss_step=0.00198, acc=0.935, loss_epoch=0.041]cuda:0 {'acc': 0.96}\n",
      "Epoch 12: 100%|██████████| 63/63 [00:29<00:00,  2.13it/s, loss=0.00919, v_num=8, loss_step=0.00246, acc=0.960, loss_epoch=0.0112]cuda:0 {'acc': 0.96}\n",
      "Epoch 13: 100%|██████████| 63/63 [00:29<00:00,  2.11it/s, loss=0.00602, v_num=8, loss_step=0.00287, acc=0.960, loss_epoch=0.0069]cuda:0 {'acc': 0.9575}\n",
      "Epoch 14: 100%|██████████| 63/63 [00:30<00:00,  2.10it/s, loss=0.015, v_num=8, loss_step=0.00242, acc=0.958, loss_epoch=0.0135]  cuda:0 {'acc': 0.9525}\n",
      "Epoch 15: 100%|██████████| 63/63 [00:29<00:00,  2.11it/s, loss=0.00318, v_num=8, loss_step=0.00184, acc=0.953, loss_epoch=0.0104]cuda:0 {'acc': 0.955}\n",
      "Epoch 16: 100%|██████████| 63/63 [00:29<00:00,  2.12it/s, loss=0.0101, v_num=8, loss_step=0.00218, acc=0.955, loss_epoch=0.0031] cuda:0 {'acc': 0.955}\n",
      "Epoch 17: 100%|██████████| 63/63 [00:29<00:00,  2.12it/s, loss=0.00467, v_num=8, loss_step=0.00178, acc=0.955, loss_epoch=0.00976]cuda:0 {'acc': 0.97}\n",
      "Epoch 18: 100%|██████████| 63/63 [00:29<00:00,  2.12it/s, loss=0.00525, v_num=8, loss_step=0.00209, acc=0.970, loss_epoch=0.00673] cuda:0 {'acc': 0.9675}\n",
      "Epoch 19: 100%|██████████| 63/63 [00:29<00:00,  2.13it/s, loss=0.00168, v_num=8, loss_step=0.00133, acc=0.968, loss_epoch=0.00343] cuda:0 {'acc': 0.9675}\n",
      "Epoch 20: 100%|██████████| 63/63 [00:29<00:00,  2.12it/s, loss=0.00153, v_num=8, loss_step=0.0012, acc=0.968, loss_epoch=0.00186]  cuda:0 {'acc': 0.965}\n",
      "Epoch 21: 100%|██████████| 63/63 [00:29<00:00,  2.11it/s, loss=0.00152, v_num=8, loss_step=0.00316, acc=0.965, loss_epoch=0.00364] cuda:0 {'acc': 0.9625}\n",
      "Epoch 22: 100%|██████████| 63/63 [00:29<00:00,  2.10it/s, loss=0.00135, v_num=8, loss_step=0.000882, acc=0.963, loss_epoch=0.00842]cuda:0 {'acc': 0.965}\n",
      "Epoch 23: 100%|██████████| 63/63 [00:29<00:00,  2.12it/s, loss=0.00203, v_num=8, loss_step=0.00116, acc=0.965, loss_epoch=0.00314] cuda:0 {'acc': 0.9725}\n",
      "Epoch 24: 100%|██████████| 63/63 [00:29<00:00,  2.12it/s, loss=0.00235, v_num=8, loss_step=0.00308, acc=0.973, loss_epoch=0.00158] cuda:0 {'acc': 0.97}\n",
      "Epoch 25: 100%|██████████| 63/63 [00:29<00:00,  2.12it/s, loss=0.00156, v_num=8, loss_step=0.00107, acc=0.970, loss_epoch=0.00162]  cuda:0 {'acc': 0.97}\n",
      "Epoch 26: 100%|██████████| 63/63 [00:29<00:00,  2.12it/s, loss=0.00111, v_num=8, loss_step=0.000886, acc=0.970, loss_epoch=0.00211]cuda:0 {'acc': 0.9675}\n",
      "Epoch 27: 100%|██████████| 63/63 [00:31<00:00,  2.03it/s, loss=0.00251, v_num=8, loss_step=0.0013, acc=0.968, loss_epoch=0.00271]  cuda:0 {'acc': 0.9675}\n",
      "Epoch 28: 100%|██████████| 63/63 [00:31<00:00,  2.01it/s, loss=0.00272, v_num=8, loss_step=0.000553, acc=0.968, loss_epoch=0.00347]cuda:0 {'acc': 0.965}\n",
      "Epoch 29: 100%|██████████| 63/63 [00:31<00:00,  2.01it/s, loss=0.00174, v_num=8, loss_step=0.000545, acc=0.965, loss_epoch=0.00234]cuda:0 {'acc': 0.97}\n",
      "Epoch 30: 100%|██████████| 63/63 [00:30<00:00,  2.06it/s, loss=0.00105, v_num=8, loss_step=0.000843, acc=0.970, loss_epoch=0.0031] cuda:0 {'acc': 0.975}\n",
      "Epoch 31: 100%|██████████| 63/63 [00:30<00:00,  2.04it/s, loss=0.00367, v_num=8, loss_step=0.0135, acc=0.975, loss_epoch=0.00115]  cuda:0 {'acc': 0.965}\n",
      "Epoch 32: 100%|██████████| 63/63 [00:30<00:00,  2.08it/s, loss=0.00144, v_num=8, loss_step=0.000708, acc=0.965, loss_epoch=0.00615]cuda:0 {'acc': 0.97}\n",
      "Epoch 33: 100%|██████████| 63/63 [00:30<00:00,  2.08it/s, loss=0.00082, v_num=8, loss_step=0.00049, acc=0.970, loss_epoch=0.00245]  cuda:0 {'acc': 0.9725}\n",
      "Epoch 34: 100%|██████████| 63/63 [00:30<00:00,  2.08it/s, loss=0.00429, v_num=8, loss_step=0.00178, acc=0.973, loss_epoch=0.00105]  cuda:0 {'acc': 0.9675}\n",
      "Epoch 35: 100%|██████████| 63/63 [00:30<00:00,  2.09it/s, loss=0.0125, v_num=8, loss_step=0.000731, acc=0.968, loss_epoch=0.0025] cuda:0 {'acc': 0.965}\n",
      "Epoch 36: 100%|██████████| 63/63 [00:30<00:00,  2.07it/s, loss=0.000775, v_num=8, loss_step=0.00184, acc=0.965, loss_epoch=0.00826] cuda:0 {'acc': 0.9725}\n",
      "Epoch 37: 100%|██████████| 63/63 [00:30<00:00,  2.05it/s, loss=0.00184, v_num=8, loss_step=0.000502, acc=0.973, loss_epoch=0.0014] cuda:0 {'acc': 0.9675}\n",
      "Epoch 38: 100%|██████████| 63/63 [00:30<00:00,  2.08it/s, loss=0.00105, v_num=8, loss_step=0.00127, acc=0.968, loss_epoch=0.00129]  cuda:0 {'acc': 0.955}\n",
      "Epoch 39: 100%|██████████| 63/63 [00:30<00:00,  2.06it/s, loss=0.000941, v_num=8, loss_step=0.000473, acc=0.955, loss_epoch=0.00311]cuda:0 {'acc': 0.955}\n",
      "Epoch 40: 100%|██████████| 63/63 [00:31<00:00,  1.97it/s, loss=0.00494, v_num=8, loss_step=0.000855, acc=0.955, loss_epoch=0.000919] cuda:0 {'acc': 0.96}\n",
      "Epoch 41: 100%|██████████| 63/63 [00:31<00:00,  1.98it/s, loss=0.0021, v_num=8, loss_step=0.0143, acc=0.960, loss_epoch=0.00227]    cuda:0 {'acc': 0.955}\n",
      "Epoch 42: 100%|██████████| 63/63 [00:31<00:00,  2.02it/s, loss=0.00151, v_num=8, loss_step=0.00106, acc=0.955, loss_epoch=0.00459] cuda:0 {'acc': 0.9475}\n",
      "Epoch 43: 100%|██████████| 63/63 [00:31<00:00,  2.02it/s, loss=0.000816, v_num=8, loss_step=0.000363, acc=0.948, loss_epoch=0.00317]cuda:0 {'acc': 0.9675}\n",
      "Epoch 44: 100%|██████████| 63/63 [00:31<00:00,  2.01it/s, loss=0.000589, v_num=8, loss_step=0.00051, acc=0.968, loss_epoch=0.00264] cuda:0 {'acc': 0.965}\n",
      "Epoch 45: 100%|██████████| 63/63 [00:31<00:00,  1.98it/s, loss=0.00613, v_num=8, loss_step=0.000374, acc=0.965, loss_epoch=0.00241]cuda:0 {'acc': 0.96}\n",
      "Epoch 46: 100%|██████████| 63/63 [00:31<00:00,  1.98it/s, loss=0.00765, v_num=8, loss_step=0.000459, acc=0.960, loss_epoch=0.0039] cuda:0 {'acc': 0.96}\n",
      "Epoch 47: 100%|██████████| 63/63 [00:32<00:00,  1.96it/s, loss=0.000812, v_num=8, loss_step=0.000608, acc=0.960, loss_epoch=0.00489]cuda:0 {'acc': 0.955}\n",
      "Epoch 48: 100%|██████████| 63/63 [00:31<00:00,  2.00it/s, loss=0.00794, v_num=8, loss_step=0.000278, acc=0.955, loss_epoch=0.00106] cuda:0 {'acc': 0.96}\n",
      "Epoch 49: 100%|██████████| 63/63 [00:31<00:00,  1.98it/s, loss=0.00818, v_num=8, loss_step=0.000888, acc=0.960, loss_epoch=0.0036] cuda:0 {'acc': 0.96}\n",
      "Epoch 50: 100%|██████████| 63/63 [00:32<00:00,  1.94it/s, loss=0.00265, v_num=8, loss_step=0.00166, acc=0.960, loss_epoch=0.00955]  cuda:0 {'acc': 0.96}\n",
      "Epoch 51: 100%|██████████| 63/63 [00:32<00:00,  1.97it/s, loss=0.000601, v_num=8, loss_step=0.000261, acc=0.960, loss_epoch=0.00448]cuda:0 {'acc': 0.9625}\n",
      "Epoch 52: 100%|██████████| 63/63 [00:32<00:00,  1.95it/s, loss=0.00055, v_num=8, loss_step=0.00025, acc=0.963, loss_epoch=0.00307]  cuda:0 {'acc': 0.9675}\n",
      "Epoch 53: 100%|██████████| 63/63 [00:31<00:00,  2.03it/s, loss=0.000308, v_num=8, loss_step=0.000349, acc=0.968, loss_epoch=0.00125]cuda:0 {'acc': 0.9675}\n",
      "Epoch 54: 100%|██████████| 63/63 [00:30<00:00,  2.05it/s, loss=0.000354, v_num=8, loss_step=0.000564, acc=0.968, loss_epoch=0.00267]cuda:0 {'acc': 0.9675}\n",
      "Epoch 55: 100%|██████████| 63/63 [00:30<00:00,  2.07it/s, loss=0.00172, v_num=8, loss_step=0.000288, acc=0.968, loss_epoch=0.000617] cuda:0 {'acc': 0.9675}\n",
      "Epoch 56: 100%|██████████| 63/63 [00:30<00:00,  2.07it/s, loss=0.000447, v_num=8, loss_step=0.000284, acc=0.968, loss_epoch=0.000954]cuda:0 {'acc': 0.96}\n",
      "Epoch 57: 100%|██████████| 63/63 [00:31<00:00,  2.03it/s, loss=0.00305, v_num=8, loss_step=0.00025, acc=0.960, loss_epoch=0.00652]   cuda:0 {'acc': 0.9625}\n",
      "Epoch 58: 100%|██████████| 63/63 [00:31<00:00,  2.00it/s, loss=0.0192, v_num=8, loss_step=0.000274, acc=0.963, loss_epoch=0.0024]  cuda:0 {'acc': 0.9625}\n",
      "Epoch 59: 100%|██████████| 63/63 [00:31<00:00,  2.01it/s, loss=0.0039, v_num=8, loss_step=0.000261, acc=0.963, loss_epoch=0.00998] cuda:0 {'acc': 0.95}\n",
      "Epoch 60: 100%|██████████| 63/63 [00:31<00:00,  2.00it/s, loss=0.000995, v_num=8, loss_step=0.00594, acc=0.950, loss_epoch=0.00337]cuda:0 {'acc': 0.9625}\n",
      "Epoch 61: 100%|██████████| 63/63 [00:31<00:00,  1.99it/s, loss=0.000891, v_num=8, loss_step=0.000546, acc=0.963, loss_epoch=0.00734]cuda:0 {'acc': 0.95}\n",
      "Epoch 62: 100%|██████████| 63/63 [00:31<00:00,  2.02it/s, loss=0.00201, v_num=8, loss_step=0.000187, acc=0.950, loss_epoch=0.00462] cuda:0 {'acc': 0.96}\n",
      "Epoch 63: 100%|██████████| 63/63 [00:31<00:00,  2.00it/s, loss=0.00652, v_num=8, loss_step=0.000159, acc=0.960, loss_epoch=0.00854]cuda:0 {'acc': 0.9625}\n",
      "Epoch 64: 100%|██████████| 63/63 [00:31<00:00,  2.03it/s, loss=0.0206, v_num=8, loss_step=0.000169, acc=0.963, loss_epoch=0.0048]  cuda:0 {'acc': 0.9575}\n",
      "Epoch 65: 100%|██████████| 63/63 [00:30<00:00,  2.04it/s, loss=0.000304, v_num=8, loss_step=0.000173, acc=0.958, loss_epoch=0.0148]cuda:0 {'acc': 0.9525}\n",
      "Epoch 66: 100%|██████████| 63/63 [00:30<00:00,  2.04it/s, loss=0.00729, v_num=8, loss_step=0.00249, acc=0.953, loss_epoch=0.00125]  cuda:0 {'acc': 0.9475}\n",
      "Epoch 67: 100%|██████████| 63/63 [00:31<00:00,  1.98it/s, loss=0.00106, v_num=8, loss_step=0.000136, acc=0.948, loss_epoch=0.00591] cuda:0 {'acc': 0.9575}\n",
      "Epoch 68: 100%|██████████| 63/63 [00:31<00:00,  2.01it/s, loss=0.000553, v_num=8, loss_step=0.000315, acc=0.958, loss_epoch=0.000717]cuda:0 {'acc': 0.96}\n",
      "Epoch 69: 100%|██████████| 63/63 [00:31<00:00,  2.02it/s, loss=0.000503, v_num=8, loss_step=0.00012, acc=0.960, loss_epoch=0.000522] cuda:0 {'acc': 0.96}\n",
      "Epoch 70: 100%|██████████| 63/63 [00:30<00:00,  2.04it/s, loss=0.0021, v_num=8, loss_step=0.000122, acc=0.960, loss_epoch=0.000918]  cuda:0 {'acc': 0.955}\n",
      "Epoch 71: 100%|██████████| 63/63 [00:30<00:00,  2.05it/s, loss=0.00364, v_num=8, loss_step=0.000189, acc=0.955, loss_epoch=0.00643] cuda:0 {'acc': 0.955}\n",
      "Epoch 72: 100%|██████████| 63/63 [00:30<00:00,  2.05it/s, loss=0.00282, v_num=8, loss_step=0.000555, acc=0.955, loss_epoch=0.00154] cuda:0 {'acc': 0.9575}\n",
      "Epoch 73: 100%|██████████| 63/63 [00:31<00:00,  1.99it/s, loss=0.00243, v_num=8, loss_step=0.000314, acc=0.958, loss_epoch=0.0015] cuda:0 {'acc': 0.955}\n",
      "Epoch 74: 100%|██████████| 63/63 [00:32<00:00,  1.97it/s, loss=0.00288, v_num=8, loss_step=0.000173, acc=0.955, loss_epoch=0.00172] cuda:0 {'acc': 0.9525}\n",
      "Epoch 75: 100%|██████████| 63/63 [00:31<00:00,  2.00it/s, loss=0.000384, v_num=8, loss_step=0.00024, acc=0.953, loss_epoch=0.00149] cuda:0 {'acc': 0.955}\n",
      "Epoch 76: 100%|██████████| 63/63 [00:31<00:00,  1.98it/s, loss=0.000808, v_num=8, loss_step=0.00683, acc=0.955, loss_epoch=0.000354] cuda:0 {'acc': 0.945}\n",
      "Epoch 77: 100%|██████████| 63/63 [00:31<00:00,  2.02it/s, loss=0.000899, v_num=8, loss_step=0.000139, acc=0.945, loss_epoch=0.00068]cuda:0 {'acc': 0.9475}\n",
      "Epoch 78: 100%|██████████| 63/63 [00:31<00:00,  2.00it/s, loss=0.000257, v_num=8, loss_step=0.000101, acc=0.948, loss_epoch=0.0129] cuda:0 {'acc': 0.955}\n",
      "Epoch 79: 100%|██████████| 63/63 [00:31<00:00,  1.99it/s, loss=0.000298, v_num=8, loss_step=0.000148, acc=0.955, loss_epoch=0.000219]cuda:0 {'acc': 0.9525}\n",
      "Epoch 80: 100%|██████████| 63/63 [00:32<00:00,  1.96it/s, loss=0.00541, v_num=8, loss_step=0.104, acc=0.953, loss_epoch=0.000383]    cuda:0 {'acc': 0.955}\n",
      "Epoch 81: 100%|██████████| 63/63 [00:32<00:00,  1.95it/s, loss=0.000367, v_num=8, loss_step=9.8e-5, acc=0.955, loss_epoch=0.00235]  cuda:0 {'acc': 0.9525}\n",
      "Epoch 82: 100%|██████████| 63/63 [00:32<00:00,  1.96it/s, loss=0.000177, v_num=8, loss_step=8.09e-5, acc=0.953, loss_epoch=0.000724] cuda:0 {'acc': 0.9625}\n",
      "Epoch 83: 100%|██████████| 63/63 [00:32<00:00,  1.96it/s, loss=0.00159, v_num=8, loss_step=0.0176, acc=0.963, loss_epoch=0.000503]   cuda:0 {'acc': 0.9625}\n",
      "Epoch 84: 100%|██████████| 63/63 [00:32<00:00,  1.95it/s, loss=0.00134, v_num=8, loss_step=0.000198, acc=0.963, loss_epoch=0.000736] cuda:0 {'acc': 0.955}\n",
      "Epoch 85: 100%|██████████| 63/63 [00:32<00:00,  1.95it/s, loss=0.000232, v_num=8, loss_step=0.000286, acc=0.955, loss_epoch=0.00118]cuda:0 {'acc': 0.945}\n",
      "Epoch 86: 100%|██████████| 63/63 [00:31<00:00,  1.99it/s, loss=0.021, v_num=8, loss_step=0.000169, acc=0.945, loss_epoch=0.000411]   cuda:0 {'acc': 0.96}\n",
      "Epoch 87: 100%|██████████| 63/63 [00:30<00:00,  2.04it/s, loss=0.000708, v_num=8, loss_step=9.97e-5, acc=0.960, loss_epoch=0.00905] cuda:0 {'acc': 0.95}\n",
      "Epoch 88: 100%|██████████| 63/63 [00:31<00:00,  2.03it/s, loss=0.000687, v_num=8, loss_step=0.000346, acc=0.950, loss_epoch=0.000857]cuda:0 {'acc': 0.9625}\n",
      "Epoch 89: 100%|██████████| 63/63 [00:31<00:00,  2.00it/s, loss=0.00463, v_num=8, loss_step=9.22e-5, acc=0.963, loss_epoch=0.00511]   cuda:0 {'acc': 0.955}\n",
      "Epoch 90: 100%|██████████| 63/63 [00:30<00:00,  2.05it/s, loss=0.000193, v_num=8, loss_step=0.00012, acc=0.955, loss_epoch=0.00203] cuda:0 {'acc': 0.955}\n",
      "Epoch 91: 100%|██████████| 63/63 [00:30<00:00,  2.04it/s, loss=0.000701, v_num=8, loss_step=0.000903, acc=0.955, loss_epoch=0.00123]cuda:0 {'acc': 0.9575}\n",
      "Epoch 92: 100%|██████████| 63/63 [00:30<00:00,  2.07it/s, loss=0.000785, v_num=8, loss_step=0.00013, acc=0.958, loss_epoch=0.00148] cuda:0 {'acc': 0.96}\n",
      "Epoch 93: 100%|██████████| 63/63 [00:30<00:00,  2.07it/s, loss=0.00256, v_num=8, loss_step=0.000194, acc=0.960, loss_epoch=0.000559] cuda:0 {'acc': 0.9525}\n",
      "Epoch 94: 100%|██████████| 63/63 [00:31<00:00,  2.02it/s, loss=0.0142, v_num=8, loss_step=8.01e-5, acc=0.953, loss_epoch=0.00215]   cuda:0 {'acc': 0.955}\n",
      "Epoch 95: 100%|██████████| 63/63 [00:32<00:00,  1.96it/s, loss=0.0157, v_num=8, loss_step=7.38e-5, acc=0.955, loss_epoch=0.00918]   cuda:0 {'acc': 0.95}\n",
      "Epoch 96: 100%|██████████| 63/63 [00:32<00:00,  1.96it/s, loss=0.000403, v_num=8, loss_step=0.000237, acc=0.950, loss_epoch=0.00984]cuda:0 {'acc': 0.9525}\n",
      "Epoch 97: 100%|██████████| 63/63 [00:32<00:00,  1.97it/s, loss=0.00565, v_num=8, loss_step=0.000103, acc=0.953, loss_epoch=0.00184] cuda:0 {'acc': 0.96}\n",
      "Epoch 98: 100%|██████████| 63/63 [00:31<00:00,  1.99it/s, loss=0.000602, v_num=8, loss_step=9.78e-5, acc=0.960, loss_epoch=0.00625] cuda:0 {'acc': 0.9525}\n",
      "Epoch 99: 100%|██████████| 63/63 [00:31<00:00,  1.99it/s, loss=0.000194, v_num=8, loss_step=9.18e-5, acc=0.953, loss_epoch=0.00455] cuda:0 {'acc': 0.95}\n",
      "Epoch 99: 100%|██████████| 63/63 [00:32<00:00,  1.97it/s, loss=0.000194, v_num=8, loss_step=9.18e-5, acc=0.950, loss_epoch=0.000146]\n"
     ]
    }
   ],
   "source": [
    "# Training the model\n",
    "# You can set different fold index by setting 'esc_fold' to any number from 0-4 in esc_config.py\n",
    "trainer.fit(model, audioset_data)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Now Let us Check the Result\n",
    "\n",
    "Find the path of your saved checkpoint and paste it in the below variable.\n",
    "Then you are able to follow the below code for checking the prediction result of any sample you like."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# infer the single data to check the result\n",
    "# get a model you saved\n",
    "model_path = './workspace/results/exp_htsat_esc_50/checkpoint/lightning_logs/version_8/checkpoints/l-epoch=30-acc=0.975.ckpt'\n",
    "\n",
    "# get the groundtruth\n",
    "meta = np.loadtxt(meta_path , delimiter=',', dtype='str', skiprows=1)\n",
    "gd = {}\n",
    "for label in meta:\n",
    "    name = label[0]\n",
    "    target = label[2]\n",
    "    gd[name] = target\n",
    "\n",
    "class Audio_Classification:\n",
    "    def __init__(self, model_path, config):\n",
    "        super().__init__()\n",
    "\n",
    "        self.device = torch.device('cuda')\n",
    "        self.sed_model = HTSAT_Swin_Transformer(\n",
    "            spec_size=config.htsat_spec_size,\n",
    "            patch_size=config.htsat_patch_size,\n",
    "            in_chans=1,\n",
    "            num_classes=config.classes_num,\n",
    "            window_size=config.htsat_window_size,\n",
    "            config = config,\n",
    "            depths = config.htsat_depth,\n",
    "            embed_dim = config.htsat_dim,\n",
    "            patch_stride=config.htsat_stride,\n",
    "            num_heads=config.htsat_num_head\n",
    "        )\n",
    "        ckpt = torch.load(model_path, map_location=\"cpu\")\n",
    "        temp_ckpt = {}\n",
    "        for key in ckpt[\"state_dict\"]:\n",
    "            temp_ckpt[key[10:]] = ckpt['state_dict'][key]\n",
    "        self.sed_model.load_state_dict(temp_ckpt)\n",
    "        self.sed_model.to(self.device)\n",
    "        self.sed_model.eval()\n",
    "\n",
    "\n",
    "    def predict(self, audiofile):\n",
    "\n",
    "        if audiofile:\n",
    "            waveform, sr = librosa.load(audiofile, sr=32000)\n",
    "\n",
    "            with torch.no_grad():\n",
    "                x = torch.from_numpy(waveform).float().to(self.device)\n",
    "                output_dict = self.sed_model(x[None, :], None, True)\n",
    "                pred = output_dict['clipwise_output']\n",
    "                pred_post = pred[0].detach().cpu().numpy()\n",
    "                pred_label = np.argmax(pred_post)\n",
    "                pred_prob = np.max(pred_post)\n",
    "            return pred_label, pred_prob\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Audiocls predict output:  13 11.332836 13\n"
     ]
    }
   ],
   "source": [
    "# Inference\n",
    "Audiocls = Audio_Classification(model_path, config)\n",
    "\n",
    "# pick any audio you like in the ESC-50 testing set (cross-validation)\n",
    "pred_label, pred_prob = Audiocls.predict(\"./workspace/esc-50/raw/ESC-50-master/audio/1-7456-A-13.wav\")\n",
    "\n",
    "print('Audiocls predict output: ', pred_label, pred_prob, gd[\"1-7456-A-13.wav\"])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "whisper",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.21"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
