{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Assignment 2 Self-attention for Speaker Classification\n",
    "\n",
    "Task Description\n",
    "- Task: Classify the speakers of given features.\n",
    "- Main goal: Learn how to use transformer.\n",
    "- TODO:\n",
    "    - Run sample code and know how to use transformer.\n",
    "    - Know how to adjust parameters of transformer.\n",
    "    - Construct conformer which is a variety of transformer.\n",
    "    - Implement triplet loss for classification.\n",
    "\n",
    "Import Some Packages\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(472, 40)\n",
      "(397, 40)\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import json\n",
    "import math\n",
    "import random\n",
    "from pathlib import Path\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "import csv\n",
    "\n",
    "import paddle\n",
    "from paddle.io import Dataset, DataLoader\n",
    "import paddle.nn as nn\n",
    "from paddle.optimizer import AdamW\n",
    "from paddle.optimizer.lr import LambdaDecay\n",
    "mel = np.load(os.path.join('work/Dataset_npy', \"uttr-fff438a358244d59a577e8d83b5f9da3.npy\"), allow_pickle=True)\n",
    "print(mel.shape)\n",
    "mel = np.load(os.path.join('work/Dataset_npy', \"uttr-0a0ae35b50c74f04ad88c5b3087ba58e.npy\"), allow_pickle=True)\n",
    "print(mel.shape)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Unzip Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "vscode": {
     "languageId": "powershell"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "# Takes about 2m, please wait patiently\n",
    "# Please make sure the data is unzipped correctly, if something goes wrong, you can delete the dir and re-run this command.\n",
    "if not os.path.exists(\"./work/Dataset_npy\"):\n",
    "    !unzip -d ./work/ ./data/data177383/Dataset_npy.zip"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "powershell"
    }
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Dataset\n",
    "- Original dataset is Voxceleb2.\n",
    "- The license and complete version of Voxceleb2.\n",
    "- We randomly select 600 speakers from Voxceleb1.\n",
    "- Then preprocess the raw waveforms into mel-spectrograms.\n",
    "- Args:\n",
    "    - data_dir: The path to the data directory.\n",
    "    - metadata_path: The path to the metadata.\n",
    "    - segment_len: The length of audio segment for training.\n",
    "- The architecture of data directory\n",
    "    - data directory\n",
    "        - metadata.json\n",
    "        - testdata.json\n",
    "        - mapping.json\n",
    "        - uttr-{random string}.pt\n",
    "- The information in metadata\n",
    "    - \"n_mels\": The dimention of mel-spectrogram.\n",
    "    - \"speakers\": A dictionary.\n",
    "        - Key: speaker ids.\n",
    "        - value: \"feature_path\" and \"mel_len\"\n",
    "\n",
    "For efficiency, we segment the mel-spectrograms into segments in the traing step."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "vscode": {
     "languageId": "powershell"
    }
   },
   "outputs": [],
   "source": [
    "# split the dataset into train and validation\n",
    "# 90% train, 10% validation\n",
    "train_split = 0.9"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class myDataset(Dataset):\n",
    "    def __init__(self, meta_dir, meta_file, data_dir, segment_len=128, is_train=True):\n",
    "        self.is_train = is_train\n",
    "        self.data_dir = data_dir\n",
    "        self.segment_len = segment_len\n",
    "\n",
    "        # Load the mapping from speaker neme to their corresponding id.\n",
    "        mapping_path = Path(meta_dir) / \"mapping.json\"\n",
    "        mapping = json.load(mapping_path.open())\n",
    "        self.speaker2id = mapping[\"speaker2id\"]\n",
    "\n",
    "        # Load metadata of training data.\n",
    "        metadata_path = Path(meta_dir) / meta_file\n",
    "        metadata = json.load(open(metadata_path))[\"speakers\"]\n",
    "\n",
    "        # Get the total number of speaker.\n",
    "        self.speaker_num = len(metadata.keys())\n",
    "        self.data = []\n",
    "        for speaker in metadata.keys():\n",
    "            if is_train:\n",
    "                all_utterances = metadata[speaker][: int(len(metadata[speaker]) * train_split)]\n",
    "            else:\n",
    "                all_utterances = metadata[speaker][int(len(metadata[speaker]) * train_split) :]\n",
    "            for utterances in all_utterances:\n",
    "                self.data.append(\n",
    "                    [\n",
    "                        utterances[\"feature_path\"].replace(\".pt\", \".npy\"),\n",
    "                        self.speaker2id[speaker],\n",
    "                    ]\n",
    "                )\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        feat_path, speaker = self.data[index]\n",
    "        # Load preprocessed mel-spectrogram.\n",
    "        mel = np.load(os.path.join(self.data_dir, feat_path), allow_pickle=True)\n",
    "\n",
    "        # Segmemt mel-spectrogram into \"segment_len\" frames.\n",
    "        if len(mel) > self.segment_len:\n",
    "            # Randomly get the starting point of the segment.\n",
    "            start = random.randint(0, len(mel) - self.segment_len)\n",
    "            # Get a segment with \"segment_len\" frames.\n",
    "            mel = paddle.to_tensor(mel[start : start + self.segment_len])\n",
    "        else:\n",
    "            mel = paddle.to_tensor(mel)\n",
    "        # Turn the speaker id into long for computing loss later.\n",
    "        speaker = paddle.to_tensor([speaker])\n",
    "        return mel, speaker\n",
    "\n",
    "    def get_speaker_number(self):\n",
    "        return self.speaker_num"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class myMetricLearningDataset(Dataset):\n",
    "    def __init__(self, meta_dir, meta_file, data_dir, segment_len=128, is_train=True):\n",
    "        self.is_train = is_train\n",
    "        self.data_dir = data_dir\n",
    "        self.segment_len = segment_len\n",
    "\n",
    "        # Load the mapping from speaker neme to their corresponding id.\n",
    "        mapping_path = Path(meta_dir) / \"mapping.json\"\n",
    "        mapping = json.load(mapping_path.open())\n",
    "        self.speaker2id = mapping[\"speaker2id\"]\n",
    "\n",
    "        # Load metadata of training data.\n",
    "        metadata_path = Path(meta_dir) / meta_file\n",
    "        metadata = json.load(open(metadata_path))[\"speakers\"]\n",
    "\n",
    "        # Get the total number of speaker.\n",
    "        self.speaker_num = len(metadata.keys())\n",
    "        self.speaker2feat = {}\n",
    "        for speaker in metadata.keys():\n",
    "            self.speaker2feat[self.speaker2id[speaker]] = []\n",
    "            if is_train:\n",
    "                all_utterances = metadata[speaker][\n",
    "                    : int(len(metadata[speaker]) * train_split)\n",
    "                ]\n",
    "            else:\n",
    "                all_utterances = metadata[speaker][\n",
    "                    int(len(metadata[speaker]) * train_split) :\n",
    "                ]\n",
    "            for utterances in all_utterances:\n",
    "                self.speaker2feat[self.speaker2id[speaker]].append(\n",
    "                    utterances[\"feature_path\"].replace(\".pt\", \".npy\")\n",
    "                )\n",
    "\n",
    "    def __len__(self):\n",
    "        return self.speaker_num\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        anchor_speaker = index\n",
    "        # Randomly choose negative speaker\n",
    "        negative_speaker = random.randint(0, self.speaker_num - 1)\n",
    "        while negative_speaker == anchor_speaker:\n",
    "            negative_speaker = random.randint(0, self.speaker_num - 1)\n",
    "\n",
    "        # Randomly choose 2 different utterances from the anchor speaker.\n",
    "        anchor_utterances = random.sample(self.speaker2feat[anchor_speaker], 2)\n",
    "        # Randomly choose 1 utterance from the negative speaker.\n",
    "        negative_utterance = random.choice(self.speaker2feat[negative_speaker])\n",
    "\n",
    "        # get mel spectrogram of all the utterances.\n",
    "        anchor_mel = self.get_mel_by_feat(anchor_utterances[0])\n",
    "        positive_mel = self.get_mel_by_feat(anchor_utterances[1])\n",
    "        negative_mel = self.get_mel_by_feat(negative_utterance)\n",
    "\n",
    "        # return: anchor_mel, positive_mel, negative_mel, anchor_speaker, positive_speaker, negative_speaker\n",
    "        # return: x1, x2, x3, y1, y2, y3\n",
    "        return (\n",
    "            anchor_mel,\n",
    "            positive_mel,\n",
    "            negative_mel,\n",
    "            paddle.to_tensor([anchor_speaker]),\n",
    "            paddle.to_tensor([anchor_speaker]),\n",
    "            paddle.to_tensor([negative_speaker]),\n",
    "        )\n",
    "\n",
    "    def get_speaker_number(self):\n",
    "        return self.speaker_num\n",
    "\n",
    "    def get_mel_by_feat(self, feat_path):\n",
    "        # Load preprocessed mel-spectrogram.\n",
    "        mel = np.load(os.path.join(self.data_dir, feat_path), allow_pickle=True)\n",
    "\n",
    "        # Segmemt mel-spectrogram into \"segment_len\" frames.\n",
    "        if len(mel) > self.segment_len:\n",
    "            # Randomly get the starting point of the segment.\n",
    "            start = random.randint(0, len(mel) - self.segment_len)\n",
    "            # Get a segment with \"segment_len\" frames.\n",
    "            mel = paddle.to_tensor(mel[start : start + self.segment_len])\n",
    "        else:\n",
    "            mel = paddle.to_tensor(mel)\n",
    "\n",
    "        return mel"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Dataloader\n",
    "- Create dataloader to iterate the data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def pad_sequence(sequences, padding_value=0):\n",
    "    max_size = sequences[0].shape\n",
    "    trailing_dims = max_size[1:]\n",
    "    max_len = max([s.shape[0] for s in sequences])\n",
    "    out_dims = (len(sequences), max_len) + tuple(trailing_dims)\n",
    "    out_tensor = paddle.empty(out_dims, dtype=\"float32\").fill_(padding_value)\n",
    "    for i, tensor in enumerate(sequences):\n",
    "        length = tensor.shape[0]\n",
    "        # use index notation to prevent duplicate references to the tensor\n",
    "        out_tensor[i, :length, ...] = tensor\n",
    "    return out_tensor\n",
    "\n",
    "\n",
    "def collate_batch(batch):\n",
    "    # Process features within a batch.\n",
    "    \"\"\"Collate a batch of data.\"\"\"\n",
    "    mel, speaker = zip(*batch)\n",
    "    # Because we train the model batch by batch, we need to pad the features in the same batch to make their lengths the same.\n",
    "    mel = pad_sequence(mel, padding_value=-20)\n",
    "    # mel: (batch size, length, 40)\n",
    "    return mel, paddle.to_tensor(speaker, dtype=\"int64\")\n",
    "\n",
    "\n",
    "def get_dataloader(meta_dir, meta_file, data_dir, batch_size):\n",
    "    \"\"\"Generate dataloader\"\"\"\n",
    "    trainset = myDataset(meta_dir, meta_file, data_dir)\n",
    "    validset = myDataset(meta_dir, meta_file, data_dir, is_train=False)\n",
    "    speaker_num = trainset.get_speaker_number()\n",
    "    train_loader = DataLoader(\n",
    "        trainset,\n",
    "        batch_size=batch_size,\n",
    "        shuffle=True,\n",
    "        collate_fn=collate_batch,\n",
    "    )\n",
    "    valid_loader = DataLoader(validset, batch_size=batch_size, collate_fn=collate_batch)\n",
    "    return train_loader, valid_loader, speaker_num\n",
    "\n",
    "\n",
    "def collate_metric_learning_batch(batch):\n",
    "    # Process features within a batch.\n",
    "    \"\"\"Collate a batch of data.\"\"\"\n",
    "    (\n",
    "        anchor_mel,\n",
    "        positive_mel,\n",
    "        negative_mel,\n",
    "        anchor_speaker,\n",
    "        positive_speaker,\n",
    "        negative_speaker,\n",
    "    ) = zip(*batch)\n",
    "    # Because we train the model batch by batch, we need to pad the features in the same batch to make their lengths the same.\n",
    "    anchor_mel = pad_sequence(anchor_mel, padding_value=-20)\n",
    "    positive_mel = pad_sequence(positive_mel, padding_value=-20)\n",
    "    negative_mel = pad_sequence(negative_mel, padding_value=-20)\n",
    "    # mel: (batch size, length, 40)\n",
    "    anchor_speaker = paddle.to_tensor(anchor_speaker, dtype=\"int64\")\n",
    "    positive_speaker = paddle.to_tensor(positive_speaker, dtype=\"int64\")\n",
    "    negative_speaker = paddle.to_tensor(negative_speaker, dtype=\"int64\")\n",
    "    return (\n",
    "        anchor_mel,\n",
    "        positive_mel,\n",
    "        negative_mel,\n",
    "        anchor_speaker,\n",
    "        positive_speaker,\n",
    "        negative_speaker,\n",
    "    )\n",
    "\n",
    "\n",
    "def get_metric_learning_dataloader(meta_dir, meta_file, data_dir, batch_size):\n",
    "    \"\"\"Generate dataloader\"\"\"\n",
    "    trainset = myMetricLearningDataset(meta_dir, meta_file, data_dir)\n",
    "    # use the same dataset for validation\n",
    "    validset = myDataset(meta_dir, meta_file, data_dir, is_train=False)\n",
    "    speaker_num = trainset.get_speaker_number()\n",
    "    train_loader = DataLoader(\n",
    "        trainset,\n",
    "        shuffle=True,\n",
    "        batch_size=batch_size,\n",
    "        collate_fn=collate_metric_learning_batch,\n",
    "    )\n",
    "    valid_loader = DataLoader(validset, batch_size=batch_size, collate_fn=collate_batch)\n",
    "    return train_loader, valid_loader, speaker_num"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Model\n",
    "- TransformerEncoderLayer:\n",
    "    - Base transformer encoder layer in Attention Is All You Need\n",
    "    - Parameters:\n",
    "        - d_model: the number of expected features of the input (required).\n",
    "        - nhead: the number of heads of the multiheadattention models (required).\n",
    "        - dim_feedforward: the dimension of the feedforward network model (default=2048).\n",
    "        - dropout: the dropout value (default=0.1).\n",
    "        - activation: the activation function of intermediate layer, relu or gelu (default=relu).\n",
    "\n",
    "- TransformerEncoder:\n",
    "    - TransformerEncoder is a stack of N transformer encoder layers\n",
    "    - Parameters:\n",
    "        - encoder_layer: an instance of the TransformerEncoderLayer() class (required).\n",
    "        - num_layers: the number of sub-encoder-layers in the encoder (required).\n",
    "        - norm: the layer normalization component (optional)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Classifier(nn.Layer):\n",
    "    def __init__(self, d_model=80, n_spks=600, dropout=0.1):\n",
    "        super().__init__()\n",
    "        # Project the dimension of features from that of input into d_model.\n",
    "        self.prenet = nn.Linear(40, d_model)\n",
    "\n",
    "        ###########################################################################\n",
    "        # TODO:\n",
    "        #   1. Run sample code.\n",
    "        #   2. Adjust parameters of transformer.\n",
    "        #   3. Change Transformer to [Conformer](https://arxiv.org/abs/2005.08100).\n",
    "        # HINT:\n",
    "        #   You can refer to the [Conformer Analysis](https://zhuanlan.zhihu.com/p/319881884) for more details.\n",
    "        ###########################################################################\n",
    "        self.encoder_layer = nn.TransformerEncoderLayer(\n",
    "            d_model=d_model, dim_feedforward=256, nhead=2\n",
    "        )\n",
    "        # self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=2)\n",
    "\n",
    "        # Project the the dimension of features from d_model into speaker nums.\n",
    "        self.pred_layer = nn.Sequential(\n",
    "            nn.Linear(d_model, d_model),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(d_model, n_spks),\n",
    "        )\n",
    "\n",
    "    def forward(self, mels):\n",
    "        \"\"\"\n",
    "        args:\n",
    "          mels: (batch size, length, 40)\n",
    "        return:\n",
    "          out: (batch size, n_spks)\n",
    "        \"\"\"\n",
    "        # out: (batch size, length, d_model)\n",
    "        out = self.prenet(mels)\n",
    "        # out: (length, batch size, d_model)\n",
    "        # The encoder layer expect features in the shape of (length, batch size, d_model).\n",
    "        out = self.encoder_layer(out)\n",
    "        # out: (batch size, length, d_model)\n",
    "        # mean pooling\n",
    "        stats = out.mean(axis=1)\n",
    "\n",
    "        # out: (batch, n_spks)\n",
    "        out = self.pred_layer(stats)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Learning Rate Schedule\n",
    "- For transformer architecture, the design of learning rate schedule is different from that of CNN.\n",
    "- Previous works show that the warmup of learning rate is useful for training models with transformer architectures.\n",
    "- The warmup schedule\n",
    "    - Set learning rate to 0 in the beginning.\n",
    "    - The learning rate increases linearly from 0 to initial learning rate during warmup period."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_cosine_schedule_with_warmup(\n",
    "    learning_rate,\n",
    "    num_warmup_steps: int,\n",
    "    num_training_steps: int,\n",
    "    num_cycles: float = 0.5,\n",
    "    last_epoch: int = -1,\n",
    "):\n",
    "    \"\"\"\n",
    "    Create a schedule with a learning rate that decreases following the values of the cosine function between the\n",
    "    initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\n",
    "    initial lr set in the optimizer.\n",
    "\n",
    "    Args:\n",
    "      optimizer (:class:`~paddle.optim.Optimizer`):\n",
    "          The optimizer for which to schedule the learning rate.\n",
    "      num_warmup_steps (:obj:`int`):\n",
    "          The number of steps for the warmup phase.\n",
    "      num_training_steps (:obj:`int`):\n",
    "          The total number of training steps.\n",
    "      num_cycles (:obj:`float`, `optional`, defaults to 0.5):\n",
    "          The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0\n",
    "        following a half-cosine).\n",
    "      last_epoch (:obj:`int`, `optional`, defaults to -1):\n",
    "          The index of the last epoch when resuming training.\n",
    "\n",
    "    Return:\n",
    "      :obj:`paddle.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n",
    "    \"\"\"\n",
    "\n",
    "    def lr_lambda(current_step):\n",
    "        # Warmup\n",
    "        if current_step < num_warmup_steps:\n",
    "            return float(current_step) / float(max(1, num_warmup_steps))\n",
    "        # decadence\n",
    "        progress = float(current_step - num_warmup_steps) / float(\n",
    "            max(1, num_training_steps - num_warmup_steps)\n",
    "        )\n",
    "        return max(\n",
    "            0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))\n",
    "        )\n",
    "\n",
    "    return LambdaDecay(learning_rate, lr_lambda, last_epoch)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Model Function\n",
    "- Model forward function."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_fn(batch, model, criterion):\n",
    "    \"\"\"Forward a batch through the model.\"\"\"\n",
    "\n",
    "    mels, labels = batch\n",
    "    labels = paddle.squeeze(labels)\n",
    "\n",
    "    outs = model(mels)\n",
    "    loss = criterion(outs, labels)\n",
    "\n",
    "    return loss\n",
    "\n",
    "\n",
    "def model_fn_metric_learning(batch, model, distance_metric, margin=0.2):\n",
    "    \"\"\"Forward a batch through the model.\"\"\"\n",
    "\n",
    "    anchor_mel, positive_mel, negative_mel, anchor_speaker, positive_speaker, negative_speaker = batch\n",
    "    anchor_speaker = paddle.squeeze(anchor_speaker)\n",
    "    positive_speaker = paddle.squeeze(positive_speaker)\n",
    "    negative_speaker = paddle.squeeze(negative_speaker)\n",
    "\n",
    "    # forward pass\n",
    "    anchor_outs = model(anchor_mel)\n",
    "    positive_outs = model(positive_mel)\n",
    "    negative_outs = model(negative_mel)\n",
    "\n",
    "    ###########################################################################\n",
    "    # TODO:\n",
    "    #   Implement triplet loss for classification.\n",
    "    # HINT:\n",
    "    #   Triplet loss is used to learn a distance metric between samples with margin.\n",
    "    ###########################################################################"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Validate\n",
    "- Calculate accuracy on the validation set."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def valid(\n",
    "    valid_loader,\n",
    "    model,\n",
    "):\n",
    "    \"\"\"Validate on validation set.\"\"\"\n",
    "\n",
    "    model.eval()\n",
    "    running_accuracy = 0.0\n",
    "    pbar = tqdm(total=len(valid_loader.dataset), ncols=0, desc=\"Valid\", unit=\" uttr\")\n",
    "\n",
    "    for i, batch in enumerate(valid_loader):\n",
    "        with paddle.no_grad():\n",
    "            mels, labels = batch\n",
    "            labels = paddle.squeeze(labels)\n",
    "            outs = model(mels)\n",
    "            preds = outs.argmax(1)\n",
    "            accuracy = paddle.mean(paddle.to_tensor((preds == labels), dtype=\"float32\"))\n",
    "            running_accuracy += accuracy.item()\n",
    "\n",
    "        pbar.update(valid_loader.batch_size)\n",
    "        pbar.set_postfix(accuracy=f\"{running_accuracy / (i+1):.2f}\")\n",
    "\n",
    "    pbar.close()\n",
    "    model.train()\n",
    "    return running_accuracy / len(valid_loader)\n",
    "\n",
    "def valid_metric_learning(\n",
    "        train_loader,\n",
    "        valid_loader,\n",
    "        model,\n",
    "):\n",
    "    \"\"\"Validate on validation set.\"\"\"\n",
    "\n",
    "    model.eval()\n",
    "    running_accuracy = 0.0\n",
    "\n",
    "    # generate all the embeddings for the training set\n",
    "    train_embeddings = []\n",
    "    train_labels = []\n",
    "    pbar = tqdm(total=len(train_loader.dataset), ncols=0, desc=\"Valid\", unit=\" uttr\")\n",
    "    for i, batch in enumerate(train_loader):\n",
    "        with paddle.no_grad():\n",
    "            mels, labels = batch\n",
    "            labels = paddle.squeeze(labels)\n",
    "            outs = model(mels)\n",
    "            train_embeddings.append(outs)\n",
    "            train_labels.append(labels)\n",
    "        pbar.update(train_loader.batch_size)\n",
    "    pbar.close()\n",
    "    train_embeddings = paddle.concat(train_embeddings, axis=0)\n",
    "    train_labels = paddle.concat(train_labels, axis=0)\n",
    "\n",
    "    # find the nearest neighbor for each embedding in the validation set\n",
    "    pbar = tqdm(total=len(valid_loader.dataset), ncols=0, desc=\"Valid\", unit=\" uttr\")\n",
    "    for i, batch in enumerate(valid_loader):\n",
    "        with paddle.no_grad():\n",
    "            mels, labels = batch\n",
    "            labels = paddle.squeeze(labels)\n",
    "            outs = model(mels)\n",
    "            # compute the distance between the embeddings and the training set\n",
    "            # distances: (batch size, train set size)\n",
    "            distances = paddle.cdist(outs, train_embeddings)\n",
    "            # find the nearest neighbor\n",
    "            nearest_idx = paddle.argmin(distances, axis=1)\n",
    "            # get the labels of the nearest neighbor as predictions\n",
    "            preds = train_labels[nearest_idx]\n",
    "            accuracy = paddle.mean(paddle.to_tensor((preds == labels), dtype=\"float32\"))   \n",
    "            running_accuracy += accuracy.item()\n",
    "\n",
    "        pbar.update(valid_loader.batch_size)\n",
    "        pbar.set_postfix(accuracy=f\"{running_accuracy / (i+1):.2f}\")\n",
    "\n",
    "    pbar.close()\n",
    "    model.train()\n",
    "    return running_accuracy / len(valid_loader)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Training and Validation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "vscode": {
     "languageId": "powershell"
    }
   },
   "outputs": [],
   "source": [
    "# whether to use metric learning or not\n",
    "do_metric_learning = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Info]: Finish loading data!\n",
      "[Info]: Finish creating model!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Train: 100% 2000/2000 [02:48<00:00, 11.90 step/s, loss=3.17, step=2000]\n",
      "Valid:   0% 0/4817 [00:00<?, ? uttr/s]C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_17256\\3555821022.py:17: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach(), rather than paddle.to_tensor(sourceTensor).\n",
      "  accuracy = paddle.mean(paddle.to_tensor((preds == labels), dtype=\"float32\"))\n",
      "Valid: 4920 uttr [00:05, 910.69 uttr/s, accuracy=0.24]           \n",
      "Train: 100% 2000/2000 [02:52<00:00, 11.56 step/s, loss=2.28, step=4000]\n",
      "Valid: 4920 uttr [00:02, 1657.86 uttr/s, accuracy=0.32]           \n",
      "Train: 100% 2000/2000 [02:50<00:00, 11.72 step/s, loss=1.83, step=6000]\n",
      "Valid: 4920 uttr [00:03, 1610.37 uttr/s, accuracy=0.37]           \n",
      "Train: 100% 2000/2000 [02:50<00:00, 11.76 step/s, loss=2.02, step=8000]\n",
      "Valid: 4920 uttr [00:02, 1746.79 uttr/s, accuracy=0.39]           \n",
      "Train: 100% 2000/2000 [02:45<00:00, 12.06 step/s, loss=1.15, step=1e+4]\n",
      "Valid: 4920 uttr [00:02, 1683.54 uttr/s, accuracy=0.41]           \n",
      "Train:   0% 5/2000 [00:00<01:13, 27.14 step/s, loss=1.33, step=1e+4]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 10000, best model saved. (accuracy=0.4140)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Train: 100% 2000/2000 [02:51<00:00, 11.66 step/s, loss=1.32, step=12000]\n",
      "Valid: 4920 uttr [00:02, 1645.81 uttr/s, accuracy=0.43]           \n",
      "Train: 100% 2000/2000 [02:52<00:00, 11.58 step/s, loss=1.11, step=14000]\n",
      "Valid: 4920 uttr [00:02, 1653.51 uttr/s, accuracy=0.45]           \n",
      "Train: 100% 2000/2000 [02:52<00:00, 11.57 step/s, loss=0.74, step=16000]\n",
      "Valid: 4920 uttr [00:03, 1592.23 uttr/s, accuracy=0.46]           \n",
      "Train: 100% 2000/2000 [03:19<00:00, 10.04 step/s, loss=0.88, step=18000]\n",
      "Valid: 4920 uttr [00:04, 1079.20 uttr/s, accuracy=0.47]           \n",
      "Train: 100% 2000/2000 [04:08<00:00,  8.05 step/s, loss=0.88, step=2e+4] \n",
      "Valid: 4920 uttr [00:04, 1060.73 uttr/s, accuracy=0.46]           \n",
      "Train:   0% 0/2000 [00:00<?, ? step/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 20000, best model saved. (accuracy=0.4704)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "# parameters\n",
    "meta_dir = \"./work\"\n",
    "meta_file = \"metadata_train.json\"\n",
    "data_dir = \"./work/Dataset_npy\"\n",
    "save_path = \"model.ckpt\"\n",
    "batch_size = 120\n",
    "valid_steps = 2000\n",
    "warmup_steps = 1000\n",
    "save_steps = 10000\n",
    "total_steps = 20000\n",
    "\n",
    "# 设置设备\n",
    "try:\n",
    "    paddle.device.set_device('gpu:0' if paddle.device.cuda.device_count() != 0 else 'cpu')\n",
    "except:\n",
    "    print(\"GPU不可用，使用CPU进行训练\")\n",
    "    paddle.device.set_device('cpu')\n",
    "\n",
    "train_loader, valid_loader, speaker_num = get_dataloader(\n",
    "    meta_dir, meta_file, data_dir, batch_size\n",
    ")\n",
    "train_loader_metric_learning, _, _ = get_metric_learning_dataloader(\n",
    "    meta_dir, meta_file, data_dir, batch_size\n",
    ")\n",
    "if do_metric_learning:\n",
    "    train_iterator = iter(train_loader_metric_learning)\n",
    "else:\n",
    "    train_iterator = iter(train_loader)\n",
    "print(\"[Info]: Finish loading data!\", flush=True)\n",
    "\n",
    "model = Classifier(n_spks=speaker_num)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "distance_metric = nn.PairwiseDistance(p=2)\n",
    "scheduler = get_cosine_schedule_with_warmup(1e-3, warmup_steps, total_steps)\n",
    "optimizer = AdamW(parameters=model.parameters(), learning_rate=scheduler)\n",
    "print(\"[Info]: Finish creating model!\", flush=True)\n",
    "\n",
    "best_accuracy = -1.0\n",
    "best_state_dict = None\n",
    "\n",
    "pbar = tqdm(total=valid_steps, ncols=0, desc=\"Train\", unit=\" step\")\n",
    "\n",
    "for step in range(total_steps):\n",
    "    # Get data\n",
    "    try:\n",
    "        batch = next(train_iterator)\n",
    "    except StopIteration:\n",
    "        if do_metric_learning:\n",
    "            train_iterator = iter(train_loader_metric_learning)\n",
    "        else:\n",
    "            train_iterator = iter(train_loader)\n",
    "        batch = next(train_iterator)\n",
    "\n",
    "    # Compute loss\n",
    "    if do_metric_learning:\n",
    "        loss = model_fn_metric_learning(batch, model, distance_metric)\n",
    "    else:\n",
    "        loss = model_fn(batch, model, criterion)\n",
    "    batch_loss = loss.item()\n",
    "\n",
    "    # Updata model\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    scheduler.step()\n",
    "    optimizer.clear_grad()\n",
    "\n",
    "    # Log\n",
    "    pbar.update()\n",
    "    pbar.set_postfix(\n",
    "        loss=f\"{batch_loss:.2f}\",\n",
    "        step=step + 1,\n",
    "    )\n",
    "\n",
    "    # Do validation\n",
    "    if (step + 1) % valid_steps == 0:\n",
    "        pbar.close()\n",
    "\n",
    "        if do_metric_learning:\n",
    "            valid_accuracy = valid_metric_learning(train_loader, valid_loader, model)\n",
    "        else:\n",
    "            valid_accuracy = valid(valid_loader, model)\n",
    "\n",
    "        # keep the best model\n",
    "        if valid_accuracy > best_accuracy:\n",
    "            best_accuracy = valid_accuracy\n",
    "            best_state_dict = model.state_dict()\n",
    "\n",
    "        pbar = tqdm(total=valid_steps, ncols=0, desc=\"Train\", unit=\" step\")\n",
    "\n",
    "    # Save the best model so far.\n",
    "    if (step + 1) % save_steps == 0 and best_state_dict is not None:\n",
    "        paddle.save(best_state_dict, save_path)\n",
    "        pbar.write(f\"Step {step + 1}, best model saved. (accuracy={best_accuracy:.4f})\")\n",
    "\n",
    "pbar.close()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Test Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "class InferenceDataset(Dataset):\n",
    "    def __init__(self, meta_file, data_dir):\n",
    "        testdata_path = Path(meta_file)\n",
    "        metadata = json.load(testdata_path.open())\n",
    "        self.data_dir = data_dir\n",
    "        self.data = metadata[\"utterances\"]\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        utterance = self.data[index]\n",
    "        feat_path = utterance[\"feature_path\"].replace(\".pt\", \".npy\")\n",
    "        # mel = paddle.load(os.path.join(self.data_dir, feat_path))\n",
    "        mel = paddle.to_tensor(\n",
    "            np.load(os.path.join(self.data_dir, feat_path), allow_pickle=True)\n",
    "        )\n",
    "\n",
    "        return feat_path, mel\n",
    "\n",
    "\n",
    "def inference_collate_batch(batch):\n",
    "    \"\"\"Collate a batch of data.\"\"\"\n",
    "    feat_paths, mels = zip(*batch)\n",
    "\n",
    "    return feat_paths, paddle.stack(mels)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Public Testing\n",
    "Notice that you shoule write the result of public test on report."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Info]: Finish loading data!\n",
      "[Info]: Finish creating model!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\ProgramData\\anaconda3\\envs\\paddle_cuda\\lib\\site-packages\\paddle\\io\\reader.py:493: UserWarning: DataLoader with multi-process mode is not supported on MacOs and Windows currently. Please use single-process mode with num_workers = 0 instead\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "# parameters\n",
    "meta_file = \"work/metadata_public_test_with_answer.json\"\n",
    "data_dir = \"work/Dataset_npy\"\n",
    "model_path = \"model.ckpt\"\n",
    "output_path = \"output_public_test.csv\"\n",
    "\n",
    "\n",
    "mapping_path = Path(\"work/mapping.json\")\n",
    "mapping = json.load(mapping_path.open())\n",
    "\n",
    "public_test_loader = DataLoader(\n",
    "    InferenceDataset(meta_file, data_dir),\n",
    "    batch_size=1,\n",
    "    shuffle=False,\n",
    "    drop_last=False,\n",
    "    num_workers=8,\n",
    "    collate_fn=inference_collate_batch,\n",
    ")\n",
    "print(\"[Info]: Finish loading data!\", flush=True)\n",
    "\n",
    "speaker_num = len(mapping[\"id2speaker\"])\n",
    "model = Classifier(n_spks=speaker_num)\n",
    "model.set_state_dict(paddle.load(model_path))\n",
    "model.eval()\n",
    "print(\"[Info]: Finish creating model!\", flush=True)\n",
    "\n",
    "if do_metric_learning:\n",
    "    # generate all the embeddings for the training set\n",
    "    train_embeddings = []\n",
    "    train_labels = []\n",
    "    pbar = tqdm(total=len(train_loader.dataset), ncols=0, desc=\"Test\", unit=\" uttr\")\n",
    "    for i, batch in enumerate(train_loader):\n",
    "        with paddle.no_grad():\n",
    "            mels, labels = batch\n",
    "            labels = paddle.squeeze(labels)\n",
    "            outs = model(mels)\n",
    "            train_embeddings.append(outs)\n",
    "            train_labels.append(labels)\n",
    "        pbar.update(train_loader.batch_size)\n",
    "    pbar.close()\n",
    "    train_embeddings = paddle.concat(train_embeddings, axis=0)\n",
    "    train_labels = paddle.concat(train_labels, axis=0)\n",
    "\n",
    "results = [[\"Id\", \"Category\"]]\n",
    "for feat_paths, mels in public_test_loader:\n",
    "    with paddle.no_grad():\n",
    "        outs = model(mels)\n",
    "        if do_metric_learning:\n",
    "            # compute the distance between the embeddings and the training set\n",
    "            out_expand = paddle.unsqueeze(outs, axis=1)\n",
    "            train_embeddings_expand = paddle.unsqueeze(train_embeddings, axis=0)\n",
    "            distances = paddle.linalg.norm(out_expand - train_embeddings_expand, p=2, axis=2)\n",
    "            # distances: (batch size, train set size)\n",
    "            # find the nearest neighbor\n",
    "            nearest_idx = paddle.argmin(distances, axis=1)\n",
    "            # get the labels of the nearest neighbor as predictions\n",
    "            preds = train_labels[nearest_idx].cpu().numpy()\n",
    "        else:\n",
    "            preds = outs.argmax(1).cpu().numpy()\n",
    "        for feat_path, pred in zip(feat_paths, preds):\n",
    "            results.append([feat_path, mapping[\"id2speaker\"][str(pred)]])\n",
    "\n",
    "with open(output_path, \"w\", newline=\"\") as csvfile:\n",
    "    writer = csv.writer(csvfile)\n",
    "    writer.writerows(results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "acc@1: 0.5531952247191011 3151 5696\n"
     ]
    }
   ],
   "source": [
    "# print acc@1\n",
    "import json\n",
    "\n",
    "num_correct = 0\n",
    "num_wrong = 0\n",
    "\n",
    "with open(\"work/metadata_public_test_with_answer.json\") as file:\n",
    "    answer = json.load(file)\n",
    "query_map = {}\n",
    "for meta in answer[\"utterances\"]:\n",
    "    query_map[meta[\"feature_path\"]] = meta[\"id\"]\n",
    "\n",
    "with open(\"output_public_test.csv\") as f:\n",
    "    for line in f:\n",
    "        if line.startswith(\"Id,\"):\n",
    "            continue\n",
    "        id, category = line.split(\",\")\n",
    "        id = id.replace(\".npy\", \".pt\")\n",
    "        category = category.replace(\"\\n\", \"\")\n",
    "        if query_map[id] == category:\n",
    "            num_correct += 1\n",
    "        else:\n",
    "            num_wrong += 1\n",
    "\n",
    "print(\n",
    "    \"acc@1:\",\n",
    "    num_correct / float(num_correct + num_wrong),\n",
    "    num_correct,\n",
    "    num_correct + num_wrong,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Private Testing\n",
    "Notice that you should submit the csv file generated here for private test."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Info]: Finish loading data!\n",
      "[Info]: Finish creating model!\n"
     ]
    }
   ],
   "source": [
    "# parameters\n",
    "meta_file = \"work/metadata_private_test_no_answer.json\"\n",
    "data_dir = \"./work/Dataset_npy\"\n",
    "model_path = \"model.ckpt\"\n",
    "output_path = \"output.csv\"\n",
    "\n",
    "\n",
    "mapping_path = Path(\"work/mapping.json\")\n",
    "mapping = json.load(mapping_path.open())\n",
    "\n",
    "private_test_loader = DataLoader(\n",
    "    InferenceDataset(meta_file, data_dir),\n",
    "    batch_size=1,\n",
    "    shuffle=False,\n",
    "    drop_last=False,\n",
    "    num_workers=8,\n",
    "    collate_fn=inference_collate_batch,\n",
    ")\n",
    "print(\"[Info]: Finish loading data!\", flush=True)\n",
    "\n",
    "speaker_num = len(mapping[\"id2speaker\"])\n",
    "model = Classifier(n_spks=speaker_num)\n",
    "model.set_state_dict(paddle.load(model_path))\n",
    "model.eval()\n",
    "print(\"[Info]: Finish creating model!\", flush=True)\n",
    "\n",
    "results = [[\"Id\", \"Category\"]]\n",
    "for feat_paths, mels in private_test_loader:\n",
    "    with paddle.no_grad():\n",
    "        outs = model(mels)\n",
    "        if do_metric_learning:\n",
    "            # compute the distance between the embeddings and the training set\n",
    "            out_expand = paddle.unsqueeze(outs, axis=1)\n",
    "            train_embeddings_expand = paddle.unsqueeze(train_embeddings, axis=0)\n",
    "            distances = paddle.norm(out_expand - train_embeddings_expand, p=2, axis=2)\n",
    "            # distances: (batch size, train set size)\n",
    "            # find the nearest neighbor\n",
    "            nearest_idx = paddle.argmin(distances, axis=1)\n",
    "            # get the labels of the nearest neighbor as predictions\n",
    "            preds = train_labels[nearest_idx].cpu().numpy()\n",
    "        else:\n",
    "            preds = outs.argmax(1).cpu().numpy()\n",
    "        for feat_path, pred in zip(feat_paths, preds):\n",
    "            results.append([feat_path, mapping[\"id2speaker\"][str(pred)]])\n",
    "\n",
    "with open(output_path, \"w\", newline=\"\") as csvfile:\n",
    "    writer = csv.writer(csvfile)\n",
    "    writer.writerows(results)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "paddle_cuda",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
