{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Study of SSPS"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "\n",
    "import os\n",
    "import sys\n",
    "os.chdir('../..')\n",
    "sys.path.insert(1, os.path.join(sys.path[0], '../..'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from notebooks.articles.utils import (\n",
    "    evaluate_sv,\n",
    "    plot_inter_speaker_center_similarity,\n",
    "    plot_inter_class_similarity,\n",
    "    plot_intra_class_similarity,\n",
    "    plot_intra_class_similarity_by_class\n",
    ")\n",
    "\n",
    "from notebooks.evaluation.sv_visualization import (\n",
    "    det_curve,\n",
    "    scores_distribution,\n",
    "    tsne_2D,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from dataclasses import dataclass\n",
    "from typing import List, Dict\n",
    "\n",
    "import torch\n",
    "\n",
    "\n",
    "@dataclass\n",
    "class Model:\n",
    "\n",
    "    scores: List[float] = None\n",
    "    targets: List[int] = None\n",
    "    embeddings: Dict[str, torch.Tensor] = None\n",
    "\n",
    "\n",
    "def get_models_for_visualization(scores, names=None):\n",
    "    if names is None:\n",
    "        names = list(scores.keys())\n",
    "\n",
    "    models = {\n",
    "        k:Model(v['scores'], v['targets'])\n",
    "        for k, v\n",
    "        in scores.items()\n",
    "        if k in names\n",
    "    }\n",
    "\n",
    "    return models"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Sampling hyper-params"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Stochastic sampling"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from plotnine import (\n",
    "    ggplot,\n",
    "    aes,\n",
    "    geom_line,\n",
    "    geom_point,\n",
    "    labs,\n",
    "    theme_bw,\n",
    "    scale_x_continuous,\n",
    "    scale_y_continuous,\n",
    "    scale_color_discrete,\n",
    "    geom_text,\n",
    "    theme,\n",
    "    element_text,\n",
    "    element_blank,\n",
    "    element_rect,\n",
    "    guides,\n",
    "    guide_legend\n",
    ")\n",
    "\n",
    "\n",
    "# Parameters\n",
    "N = 10\n",
    "decays = [0.2, 0.5, 1.0, 2.0]\n",
    "# decays = [0.1, 0.4, 0.6, 0.8, 1.0, 1.2, 1.7, 2.5, 4.0]\n",
    "\n",
    "# Generate data for each decay\n",
    "data = []\n",
    "for decay in decays:\n",
    "    if decay == 0.0:\n",
    "        method = 'uniform'\n",
    "        probs = torch.ones(N) / N\n",
    "    else:\n",
    "        method = f'λ={decay}'\n",
    "        probs = decay * torch.exp(-decay * torch.arange(N).float())\n",
    "    probs = (probs / probs.sum()).numpy()\n",
    "\n",
    "    # Add the data to the list\n",
    "    for idx, prob in enumerate(probs):\n",
    "        data.append({'Index': idx, 'Probability': prob, 'Method': method})\n",
    "\n",
    "# Create the plot\n",
    "df = pd.DataFrame(data)\n",
    "p = (\n",
    "    ggplot(df, aes(x='Index', y='Probability', color='Method'))\n",
    "    + geom_line(aes(group='Method'), size=1)\n",
    "    + geom_point(size=1)\n",
    "    + labs(title=f'Sampling probability distribution (N={N})')\n",
    "    + scale_x_continuous(breaks=range(N))\n",
    "    + scale_y_continuous(breaks=np.arange(0, 1.1, 0.1))\n",
    "    + scale_color_discrete(\n",
    "        limits=df.Method.unique()\n",
    "    )\n",
    "    + guides(color=guide_legend(nrow=1, byrow=True))\n",
    "    + theme_bw()\n",
    "    + theme(\n",
    "        figure_size=(12, 7),\n",
    "        text=element_text(size=20),\n",
    "        axis_title_x=element_blank(),\n",
    "        axis_title_y=element_blank(),\n",
    "        legend_title=element_blank(),\n",
    "        legend_position=(0.5, 0.95),\n",
    "        legend_direction='horizontal',\n",
    "        legend_key_spacing_x=10\n",
    "    )\n",
    ")\n",
    "\n",
    "p"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### SSPS-NN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "from glob import glob\n",
    "import re\n",
    "\n",
    "from plotnine import ggplot, aes, geom_line, geom_vline, geom_point, theme, theme_bw, labs, scale_x_continuous, scale_y_continuous, scale_color_discrete, element_text\n",
    "import patchworklib as pw\n",
    "import pandas as pd\n",
    "\n",
    "\n",
    "exps = [\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_knn_uni-1\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_knn_uni-10\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_knn_uni-25\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_knn_uni-50\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_knn_uni-100\",\n",
    "]\n",
    "\n",
    "\n",
    "res = []\n",
    "for exp in exps:\n",
    "    with open(exp + \"/training.json\", \"r\") as f:\n",
    "        train = json.load(f)\n",
    "\n",
    "    with open(exp + \"/evaluation.json\", \"r\") as f:\n",
    "        eval = json.load(f)\n",
    "\n",
    "    sampling = re.search(r'uni-([\\w\\d.]+)', exp.split('/')[-1])\n",
    "    if sampling:\n",
    "        sampling = int(sampling.group(1))\n",
    "    else:\n",
    "        sampling = 0\n",
    "\n",
    "    res.append({\n",
    "        'sampling': sampling,\n",
    "        **train[\"109\"],\n",
    "        **eval\n",
    "    })\n",
    "\n",
    "data = pd.DataFrame(res)\n",
    "\n",
    "Y_AXIS = {\n",
    "    \"test/sv_cosine/voxceleb1_test_O/eer\": ((5.5, 9.0), [5.0, 6.0, 7.0, 8.0, 9.0]),\n",
    "    \"ssps_speaker_acc\": ((0.4, 1.0), [0.4, 0.6, 0.8, 1.0]),\n",
    "    \"ssps_video_acc\": ((0.0, 0.8), [0.0, 0.2, 0.4, 0.6, 0.8]),\n",
    "}\n",
    "\n",
    "def create_plot(y, label):\n",
    "    p = (\n",
    "        ggplot(data, aes(x='sampling', y=y))\n",
    "        + geom_line()\n",
    "        + geom_point(size=3)\n",
    "        # + geom_vline(xintercept=1, linetype='dashed', color='black')\n",
    "        + labs(title=label, x='M', y=\"\")\n",
    "        + scale_x_continuous(\n",
    "            breaks=data['sampling'],\n",
    "            labels=data['sampling']\n",
    "        )\n",
    "        + scale_y_continuous(\n",
    "            limits=Y_AXIS[y][0],\n",
    "            breaks=Y_AXIS[y][1],\n",
    "            labels=lambda l: [f\"{v:.2f}\" for v in l]\n",
    "        )\n",
    "        + theme_bw()\n",
    "        + theme(\n",
    "            figure_size=(6, 5),\n",
    "            text=element_text(size=20),\n",
    "            plot_title=element_text(\n",
    "                margin={'b': 200}\n",
    "            ),\n",
    "            # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "        )\n",
    "    )\n",
    "    p = pw.load_ggplot(p)\n",
    "    return p\n",
    "\n",
    "\n",
    "g_spkacc = create_plot('ssps_speaker_acc', 'Pseudo-Positives Speaker Accuracy (%)')\n",
    "g_vidacc = create_plot('ssps_video_acc', 'Pseudo-Positives Recording Accuracy (%)')\n",
    "g_eer = create_plot('test/sv_cosine/voxceleb1_test_O/eer', 'EER (%)')\n",
    "# g_mindcf = create_plot('test/sv_cosine/voxceleb1_test_O/mindcf', 'minDCF (p=0.01)')\n",
    "\n",
    "p = (g_eer/g_spkacc/g_vidacc)\n",
    "# p = (g_eer|g_spkacc|g_vidacc)\n",
    "\n",
    "# p.set_suptitle(\n",
    "#     \"SSPS-NN: Metrics with different sampling hyper-parameters\",\n",
    "#     fontsize=20,\n",
    "#     pad=40\n",
    "# )\n",
    "# p.savefig()\n",
    "\n",
    "p.savefig('ssps_nn_hyperparams.pdf')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### SSPS-Clustering"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "from glob import glob\n",
    "import re\n",
    "\n",
    "from plotnine import (\n",
    "    ggplot,\n",
    "    aes,\n",
    "    geom_line,\n",
    "    geom_vline,\n",
    "    geom_point,\n",
    "    theme,\n",
    "    theme_bw,\n",
    "    labs,\n",
    "    element_rect,\n",
    "    scale_x_continuous,\n",
    "    scale_y_continuous,\n",
    "    scale_color_discrete,\n",
    "    element_text,\n",
    "    element_blank\n",
    ")\n",
    "import patchworklib as pw\n",
    "import pandas as pd\n",
    "\n",
    "\n",
    "exps = [\n",
    "    # \"models/ssps/voxceleb2/simclr/ssps_kmeans_6k\",\n",
    "    # \"models/ssps/voxceleb2/simclr/ssps_kmeans_10k\",\n",
    "\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_25k\",\n",
    "    \"models/ssps/voxceleb2/simclr/ssps_kmeans_25k_uni-1\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_25k_uni-2\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_25k_uni-3\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_25k_uni-5\",\n",
    "\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_50k\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_50k_uni-1\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_50k_uni-2\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_50k_uni-3\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_50k_uni-5\",\n",
    "\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_100k\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_100k_uni-1\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_100k_uni-2\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_100k_uni-3\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_100k_uni-5\",\n",
    "\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_150k\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_150k_uni-1\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_150k_uni-2\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_150k_uni-3\",\n",
    "    \"models/ssps/voxceleb2/simclr/exps/ssps_kmeans_150k_uni-5\",\n",
    "]\n",
    "\n",
    "res = []\n",
    "for exp in exps:\n",
    "    with open(exp + \"/training.json\", \"r\") as f:\n",
    "        train = json.load(f)\n",
    "\n",
    "    with open(exp + \"/evaluation.json\", \"r\") as f:\n",
    "        eval = json.load(f)\n",
    "\n",
    "    inter_sampling = re.search(r'uni-([\\w\\d.]+)', exp.split('/')[-1])\n",
    "    if inter_sampling:\n",
    "        inter_sampling = int(inter_sampling.group(1))\n",
    "    else:\n",
    "        inter_sampling = 0\n",
    "\n",
    "    K = re.search(r'(\\d+)k', exp.split('/')[-1]).group(1) + \"k\"\n",
    "    \n",
    "    res.append({\n",
    "        'inter_sampling': inter_sampling,\n",
    "        'K': K,\n",
    "        **train[\"109\"],\n",
    "        **eval\n",
    "    })\n",
    "\n",
    "data = pd.DataFrame(res)\n",
    "\n",
    "def create_plot(y, label):\n",
    "    p = (\n",
    "        ggplot(data, aes(x='inter_sampling', y=y, color='factor(K)'))\n",
    "        + geom_line()\n",
    "        + geom_point(size=3)\n",
    "        # + geom_vline(xintercept=1, linetype='dashed', color='black')\n",
    "        + labs(title=label, x='M', y=\"\", color='K')\n",
    "        + scale_x_continuous(\n",
    "            breaks=data['inter_sampling'],\n",
    "            labels=data['inter_sampling']\n",
    "        )\n",
    "        + scale_y_continuous(\n",
    "            limits=Y_AXIS[y][0],\n",
    "            breaks=Y_AXIS[y][1],\n",
    "            labels=lambda l: [f\"{v:.2f}\" for v in l]\n",
    "        )\n",
    "        + scale_color_discrete(limits=data['K'].unique())\n",
    "        + theme_bw()\n",
    "        + theme(\n",
    "            figure_size=(6, 5),\n",
    "            text=element_text(size=20),\n",
    "            plot_title=element_text(\n",
    "                margin={'b': 200}\n",
    "            ),\n",
    "            # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "            legend_position=(0.55, 0.92),\n",
    "            legend_title=element_blank(),\n",
    "            legend_direction='horizontal',\n",
    "            legend_text=element_text(size=18),\n",
    "            legend_background=element_rect(fill='blue', alpha=0.0),\n",
    "            legend_key=element_blank(),\n",
    "            legend_key_spacing_x=10\n",
    "        )\n",
    "    )\n",
    "    p = pw.load_ggplot(p)\n",
    "    return p\n",
    "\n",
    "\n",
    "g_spkacc = create_plot('ssps_speaker_acc', 'Pseudo-Positives Speaker Accuracy (%)')\n",
    "g_vidacc = create_plot('ssps_video_acc', 'Pseudo-Positives Recording Accuracy (%)')\n",
    "g_eer = create_plot('test/sv_cosine/voxceleb1_test_O/eer', 'EER (%)')\n",
    "# g_mindcf = create_plot('test/sv_cosine/voxceleb1_test_O/mindcf', 'minDCF (p=0.01)')\n",
    "\n",
    "p = (g_eer/g_spkacc/g_vidacc)\n",
    "\n",
    "# p.set_suptitle(\n",
    "#     \"SSPS-Clustering: Metrics with different sampling hyper-parameters\",\n",
    "#     fontsize=20,\n",
    "#     pad=40\n",
    "# )\n",
    "p.savefig()\n",
    "\n",
    "p.savefig('ssps_clustering_hyperparams.pdf')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Results on SV"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Metrics"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "vox1o_scores = evaluate_sv({\n",
    "    \"baseline\": \"models/ssps/voxceleb2/simclr_e-ecapa/baseline/\",\n",
    "    \"ssps_kmeans_25k_uni-1\": \"models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/\",\n",
    "    \"baseline_sup\": \"models/ssps/voxceleb2/simclr_e-ecapa/baseline_sup/\",\n",
    "}, 'vox1_embeddings.pt', trials=[\n",
    "    \"voxceleb1_test_O\",\n",
    "])\n",
    "\n",
    "# vox1_scores = evaluate_sv({\n",
    "#     \"baseline\": \"models/ssps/voxceleb2/simclr/baseline/\",\n",
    "#     \"ssps_kmeans_25k_uni-1\": \"models/ssps/voxceleb2/simclr/ssps_kmeans_25k_uni-1/\",\n",
    "#     \"baseline_sup\": \"models/ssps/voxceleb2/simclr/baseline_sup/\",\n",
    "# }, 'vox1_embeddings.pt', trials=[\n",
    "#     \"voxceleb1_test_O\",\n",
    "#     \"voxceleb1_test_E\",\n",
    "#     \"voxceleb1_test_H\",\n",
    "# ])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Scores distribution"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "scores_distribution(get_models_for_visualization(vox1o_scores, [\n",
    "    \"baseline\",\n",
    "    \"ssps_kmeans_25k_uni-1\",\n",
    "]), use_angle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "scores_distribution(get_models_for_visualization(vox1o_scores, [\n",
    "    \"baseline_sup\",\n",
    "    \"baseline_sup_aam\",\n",
    "]), use_angle=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### DET"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "det_curve(get_models_for_visualization(vox1o_scores))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "from plotnine import ggplot, aes, geom_line, geom_point, theme, labs, scale_x_continuous, element_text\n",
    "import patchworklib as pw\n",
    "\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr/ssps_kmeans_25k_uni-1/training.json', \"r\") as f:\n",
    "    train = json.load(f)\n",
    "\n",
    "res = []\n",
    "for epoch, metrics in train.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Model': 'SSPS',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr/baseline/training.json', \"r\") as f:\n",
    "    train = json.load(f)\n",
    "\n",
    "for epoch, metrics in train.items():\n",
    "    if epoch == '110':\n",
    "        break\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Model': 'Baseline',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "data = pd.DataFrame(res)\n",
    "\n",
    "def create_plot(y, label):\n",
    "    p = (\n",
    "        ggplot(data, aes(x='Epoch', y=y, color='factor(Model)'))\n",
    "        + geom_line()\n",
    "        + geom_point()\n",
    "        + labs(title=label, x='Epoch', y=None, color='Model')\n",
    "        + scale_x_continuous(\n",
    "            breaks=data['Epoch'],\n",
    "            labels=data['Epoch']\n",
    "        )\n",
    "        # + theme_bw()\n",
    "        + theme(\n",
    "            figure_size=(6, 5),\n",
    "            text=element_text(size=14),\n",
    "            plot_title=element_text(\n",
    "                ha='left',\n",
    "                margin={'b': 90}\n",
    "            ),\n",
    "            # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "        )\n",
    "    )\n",
    "    p = pw.load_ggplot(p)\n",
    "    return p\n",
    "\n",
    "\n",
    "g_loss = create_plot('train/loss', 'Train loss')\n",
    "g_eer = create_plot('val/sv_cosine/voxceleb1_test_O/eer', 'EER (%)')\n",
    "g_mindcf = create_plot('val/sv_cosine/voxceleb1_test_O/mindcf', 'minDCF (p=0.01)')\n",
    "\n",
    "g_spkacc = create_plot('ssps_speaker_acc', 'Pseudo-Positives Speaker Accuracy (%)')\n",
    "g_vidacc = create_plot('ssps_video_acc', 'Pseudo-Positives Video Accuracy (%)')\n",
    "g_nmi = create_plot('ssps_kmeans_nmi', 'NMI on video labels')\n",
    "\n",
    "p = (g_loss|g_eer|g_mindcf)/(g_spkacc|g_vidacc|g_nmi)\n",
    "\n",
    "p.set_suptitle(\n",
    "    \"Convergence of SSPS\",\n",
    "    fontsize=20,\n",
    "    pad=40\n",
    ")\n",
    "p.savefig()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Intra-speaker similarity"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "p, stats = plot_intra_class_similarity('speaker', {\n",
    "    'SSL': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline/vox2_embeddings.pt',\n",
    "    'SSPS': 'models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/vox2_embeddings.pt',\n",
    "    # 'Supervised': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline_sup/vox2_embeddings.pt',\n",
    "})\n",
    "\n",
    "stats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "p\n",
    "# pw.load_ggplot(p).savefig(\"intra_speaker_sim_vox2.pdf\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "p, stats = plot_intra_class_similarity('speaker', {\n",
    "    'SSL': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline/vox1_embeddings.pt',\n",
    "    'SSPS': 'models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/vox1_embeddings.pt',\n",
    "    # 'Supervised': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline_sup/vox1_embeddings.pt',\n",
    "})\n",
    "\n",
    "stats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pw.load_ggplot(p).savefig(\"intra_speaker_sim_vox1.pdf\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "p, stats = plot_intra_class_similarity('speaker', {\n",
    "    'SSL': \"../backup/ssps/OLD3/simclr/baseline/embeddings_vox1o_epoch-100.pt\",\n",
    "    # 'SSPS': \"models/ssps/voxceleb2/debugB1/vox1o_embeddings.pt\",\n",
    "    'SSPS': \"../backup/ssps/OLD3/simclr/ssps_kmeans_25k_uni-1/embeddings_vox1o_epoch-100.pt\",\n",
    "    'Supervised': '../backup/ssps/OLD3/simclr/baseline_sup/embeddings_vox1o_epoch-100.pt',\n",
    "})\n",
    "\n",
    "stats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "p, stats = plot_intra_class_similarity('speaker', {\n",
    "    'SSL': \"models/ssps/voxceleb2/simclr/baseline/vox1o_embeddings.pt\",\n",
    "    'SSPS': \"models/ssps/voxceleb2/debugB4/vox1o_embeddings.pt\",\n",
    "    'Supervised': 'models/ssps/voxceleb2/simclr/baseline_sup/vox1o_embeddings.pt',\n",
    "})\n",
    "\n",
    "stats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pw.load_ggplot(p).savefig(\"intra_speaker_sim.pdf\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_intra_class_similarity('speaker', {\n",
    "    'SSL': 'models/ssps/voxceleb2/simclr/baseline/embeddings_vox2.pt',\n",
    "    'SSPS': 'models/ssps/voxceleb2/simclr/ssps_kmeans_25k_uni-1/embeddings_vox2.pt',\n",
    "})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Inter-speaker similarity"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_inter_class_similarity('speaker', {\n",
    "    'SSL': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline/vox1_embeddings.pt',\n",
    "    'SSPS': 'models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/vox1_embeddings_v2.pt',\n",
    "    'Supervised': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline_sup/vox1_embeddings.pt',\n",
    "    # 'AAM-Softmax': 'models/ssps/voxceleb2/simclr/baseline_sup_aam/embeddings_vox1.pt',\n",
    "}, nb_samples=1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_inter_speaker_center_similarity({\n",
    "    'SSL': 'models/ssps/voxceleb2/simclr/baseline/vox1_embeddings.pt',\n",
    "    'SSPS': 'models/ssps/voxceleb2/simclr/ssps_kmeans_25k_uni-1/vox1_embeddings.pt',\n",
    "    'Supervised': 'models/ssps/voxceleb2/simclr/baseline_sup/vox1_embeddings.pt',\n",
    "    # 'AAM-Softmax': 'models/ssps/voxceleb2/simclr/baseline_sup_aam/embeddings_vox1.pt',\n",
    "})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_inter_speaker_center_similarity({\n",
    "    'SSL': 'models/ssps/voxceleb2/simclr/baseline/embeddings_vox2.pt',\n",
    "    'SSPS': 'models/ssps/voxceleb2/simclr/ssps_kmeans_25k_uni-1/embeddings_vox2.pt',\n",
    "})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Intra-video similarity"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_intra_class_similarity('video', {\n",
    "    'SSL': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline/vox1_embeddings.pt',\n",
    "    'SSPS': 'models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/vox1_embeddings.pt',\n",
    "    'Supervised': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline_sup/vox1_embeddings.pt',\n",
    "    # 'AAM-Softmax': 'models/ssps/voxceleb2/simclr/baseline_sup_aam/embeddings_vox1.pt',\n",
    "})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Inter-video similarity"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_inter_class_similarity('video', {\n",
    "    'SSL': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline/vox1_embeddings.pt',\n",
    "    'SSPS': 'models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/vox1_embeddings.pt',\n",
    "    'Supervised': 'models/ssps/voxceleb2/simclr_e-ecapa/baseline_sup/vox1_embeddings.pt',\n",
    "    # 'AAM-Softmax': 'models/ssps/voxceleb2/simclr/baseline_sup_aam/embeddings_vox1.pt',\n",
    "}, nb_samples=1000)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## t-SNE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "baseline_embeddings_vox1 = torch.load(\"models/ssps/voxceleb2/simclr_e-ecapa/baseline/vox1_embeddings.pt\")\n",
    "ssps_embeddings_vox1 = torch.load(\"models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/vox1_embeddings.pt\")\n",
    "# baseline_embeddings_vox2 = torch.load(\"models/ssps/voxceleb2/OLD/simclr/baseline/embeddings_vox2.pt\")\n",
    "# ssps_embeddings_vox2 = torch.load(\"models/ssps/voxceleb2/OLD/simclr/ssps_kmeans_25k_uni-1/embeddings_vox2.pt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from plotnine import labs, theme, element_text, element_blank\n",
    "import patchworklib as pw\n",
    "\n",
    "\n",
    "def plot_tsne(baseline_embeddings, ssps_embeddings, speakers):    \n",
    "    p1, tsne_init = tsne_2D(Model(\n",
    "        embeddings=baseline_embeddings\n",
    "    ), speakers=speakers)\n",
    "\n",
    "    p2, _ = tsne_2D(Model(\n",
    "        embeddings=ssps_embeddings\n",
    "    ), speakers=speakers, init=tsne_init)\n",
    "\n",
    "\n",
    "    p1 = pw.load_ggplot(\n",
    "        p1\n",
    "        # + labs(title=\"Baseline\")\n",
    "        + theme(\n",
    "            plot_title=element_text(\n",
    "                ha='left',\n",
    "                margin={'b': 90}\n",
    "            ),\n",
    "            legend_title=element_blank(),\n",
    "            legend_text=element_text(size=18),\n",
    "            legend_position=\"none\"\n",
    "        )\n",
    "    )\n",
    "\n",
    "    p2 = pw.load_ggplot(\n",
    "        p2\n",
    "        # + labs(title=\"SSPS\")\n",
    "        + theme(\n",
    "            plot_title=element_text(\n",
    "                ha='left',\n",
    "                margin={'b': 90}\n",
    "            ),\n",
    "            legend_title=element_blank(),\n",
    "            legend_text=element_text(size=18),\n",
    "            legend_position=\"none\"\n",
    "        )\n",
    "    )\n",
    "\n",
    "    p1.savefig(\"tsne_baseline.pdf\")\n",
    "    p2.savefig(\"tsne_ssps.pdf\")\n",
    "\n",
    "    p = (p1|p2)\n",
    "    # p.set_suptitle(\n",
    "    #     \"t-SNE of speaker representations\",\n",
    "    #     fontsize=18,\n",
    "    #     pad=40\n",
    "    # )\n",
    "    p.savefig()\n",
    "    return p"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### VoxCeleb1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_tsne(\n",
    "    baseline_embeddings_vox1,\n",
    "    ssps_embeddings_vox1,\n",
    "    ['id10200', 'id10564', 'id11129', 'id10983', 'id10270', 'id11086', 'id10356', 'id10218', 'id10757', 'id10140']\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_tsne(\n",
    "    baseline_embeddings_vox1,\n",
    "    ssps_embeddings_vox1,\n",
    "    ['id10200', 'id10564', 'id11129', 'id10983', 'id10270', 'id11086', 'id10356', 'id10218', 'id10757', 'id10140']\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_tsne(\n",
    "    baseline_embeddings_vox1,\n",
    "    ssps_embeddings_vox1,\n",
    "    ['id10200', 'id10564', 'id11129', 'id10983', 'id10270', 'id11086', 'id10356', 'id10218', 'id10757', 'id10140']\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_tsne(\n",
    "    baseline_embeddings_vox1,\n",
    "    ssps_embeddings_vox1,\n",
    "    ['id10505', 'id10209', 'id10762', 'id10059', 'id10020', 'id10113', 'id10709', 'id10443', 'id11169', 'id10309']\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### VoxCeleb2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_tsne(\n",
    "    baseline_embeddings_vox2,\n",
    "    ssps_embeddings_vox2,\n",
    "    ['id00568', 'id00736', 'id00417', 'id00992', 'id00270', 'id00018', 'id00234', 'id00521', 'id00777', 'id00584']\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Find speakers for t-SNE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from plotnine import labs, theme, element_text\n",
    "import patchworklib as pw\n",
    "\n",
    "\n",
    "for i in range(50):\n",
    "    speakers = [key.split(\"/\")[-3] for key in baseline_embeddings_vox2.keys()]\n",
    "    speakers = [s for s in list(set(speakers)) if speakers.count(s) >= 150]\n",
    "    import random\n",
    "    speakers = random.sample(speakers, 10)\n",
    "    print(i, speakers)\n",
    "\n",
    "\n",
    "    p1, tsne_init = tsne_2D(Model(\n",
    "        embeddings=baseline_embeddings_vox2\n",
    "    ), speakers=speakers)\n",
    "\n",
    "    p2, _ = tsne_2D(Model(\n",
    "        embeddings=ssps_embeddings_vox2\n",
    "    ), speakers=speakers, init=tsne_init)\n",
    "\n",
    "\n",
    "    p1 = pw.load_ggplot(\n",
    "        p1\n",
    "        + labs(title=\"Baseline\")\n",
    "        + theme(plot_title=element_text(\n",
    "            ha='left',\n",
    "            margin={'b': 90}\n",
    "        ))\n",
    "    )\n",
    "    p2 = pw.load_ggplot(\n",
    "        p2\n",
    "        + labs(title=\"SSPS\")\n",
    "        + theme(plot_title=element_text(\n",
    "            ha='left',\n",
    "            margin={'b': 90}\n",
    "        ))\n",
    "    )\n",
    "    p = (p1|p2)\n",
    "\n",
    "    p.set_suptitle(\n",
    "        \"t-SNE of speaker representations\",\n",
    "        fontsize=18,\n",
    "        pad=40\n",
    "    )\n",
    "    p.savefig()\n",
    "    p.savefig(f\"output{i}.png\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Predict Vox1 metadata"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "\n",
    "def fit_mlp_on_representations(embeddings, y_key_pos, test_size=0.2):\n",
    "    keys = list(embeddings.keys())\n",
    "    \n",
    "    X = [embeddings[key][0].numpy() for key in keys]\n",
    "    if y_key_pos is None:\n",
    "        y = keys\n",
    "    else:\n",
    "        y = [key.split('/')[y_key_pos] for key in keys]\n",
    "\n",
    "    X_train, X_test, y_train, y_test = train_test_split(\n",
    "        X, y, test_size=test_size, random_state=0\n",
    "    )\n",
    "    \n",
    "    clf = LogisticRegression()\n",
    "    clf.fit(X_train, y_train)\n",
    "    \n",
    "    print(f'Train accuracy: {clf.score(X_train, y_train)}')\n",
    "    print(f'Test accuracy: {clf.score(X_test, y_test)}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "baseline_embeddings = torch.load(\"models/ssps/voxceleb2/simclr/baseline/embeddings_vox1o_epoch-109.pt\")\n",
    "ssps_embeddings = torch.load(\"models/ssps/voxceleb2/simclr/ssps_kmeans_25k_uni-1/embeddings_vox1o_epoch-109.pt\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Speaker"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = fit_mlp_on_representations(baseline_embeddings, y_key_pos=-3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = fit_mlp_on_representations(ssps_embeddings, y_key_pos=-3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Video"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = fit_mlp_on_representations(baseline_embeddings, y_key_pos=-2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = fit_mlp_on_representations(ssps_embeddings, y_key_pos=-2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Segment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = fit_mlp_on_representations(baseline_embeddings, y_key_pos=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = fit_mlp_on_representations(ssps_embeddings, y_key_pos=-1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Data-aug"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "from plotnine import ggplot, aes, geom_line, geom_point, theme, theme_bw, labs, scale_color_manual, scale_linetype_manual, scale_y_continuous, scale_x_continuous, element_text\n",
    "import patchworklib as pw\n",
    "\n",
    "\n",
    "res = []\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/baseline/training.json', \"r\") as f:\n",
    "    train_baseline_aug = json.load(f)\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/exps/baseline_aug-none/training.json', \"r\") as f:\n",
    "    train_baseline_noaug = json.load(f)\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/training.json', \"r\") as f:\n",
    "    train_ssps_aug = json.load(f)\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/exps/ssps_aug-none/training.json', \"r\") as f:\n",
    "    train_ssps_noaug = json.load(f)\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSL',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSL (w/o aug)',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSPS',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSPS (w/o aug)',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "for epoch, metrics in train_baseline_aug.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSL',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "for epoch, metrics in train_baseline_noaug.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSL (w/o aug)',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "for epoch, metrics in train_ssps_aug.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSPS',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "for epoch, metrics in train_ssps_noaug.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSPS (w/o aug)',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "data = pd.DataFrame(res)\n",
    "\n",
    "def create_plot(y, label):\n",
    "    p = (\n",
    "        ggplot(data, aes(x='Epoch', y=y, group='Method', color='factor(Method)', linetype='factor(Method)'))\n",
    "        + geom_line()\n",
    "        + geom_point()\n",
    "        + labs(title=label, x='Epoch', y='EER (%)', color='Method', linetype='Method')\n",
    "        + scale_x_continuous(\n",
    "            # breaks=data['Epoch'],\n",
    "            # labels=data['Epoch']\n",
    "            # breaks=list(range(0, int(data['Epoch'].max()) + 5, 5))\n",
    "            breaks=range(0, int(data['Epoch'].max()) + 5, 1),\n",
    "            labels=lambda x: [label if label % 5 == 0 else \"\" for label in x]\n",
    "        )\n",
    "        + scale_y_continuous(\n",
    "            breaks=list(range(0, int(data[y].max()) + 2, 2))\n",
    "        )\n",
    "        + scale_color_manual(values={\n",
    "            'SSL': '#1c1c1c',\n",
    "            'SSL (w/o aug)': '#7a7a7a',\n",
    "            'SSPS': '#427aeb',\n",
    "            'SSPS (w/o aug)': '#79a5fc',\n",
    "        })\n",
    "        + scale_linetype_manual(values={\n",
    "            'SSL': 'solid',\n",
    "            'SSL (w/o aug)': 'dashed',\n",
    "            'SSPS': 'solid',\n",
    "            'SSPS (w/o aug)': 'dashed',\n",
    "        })\n",
    "        + theme_bw()\n",
    "        + theme(\n",
    "            figure_size=(6, 3),\n",
    "            text=element_text(size=14),\n",
    "            legend_position='top',\n",
    "            legend_title=element_blank(),\n",
    "            # legend_key_spacing_x=15,\n",
    "            plot_title=element_text(\n",
    "                ha='left',\n",
    "                margin={'b': 90}\n",
    "            ),\n",
    "            # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "            # legend_position=(0.875, 0.675),\n",
    "            # legend_title=element_blank(),\n",
    "            # legend_direction='vertical',\n",
    "            # legend_text=element_text(size=20),\n",
    "            # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "        )\n",
    "    )\n",
    "    p = pw.load_ggplot(p)\n",
    "    return p\n",
    "\n",
    "\n",
    "g_eer = create_plot('val/sv_cosine/voxceleb1_test_O/eer', '')\n",
    "\n",
    "p = g_eer|g_eer\n",
    "\n",
    "# p.set_suptitle(\n",
    "#     \"Convergence of SSPS\",\n",
    "#     fontsize=20,\n",
    "#     pad=40\n",
    "# )\n",
    "p.savefig(\"dataaug.pdf\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "from plotnine import ggplot, aes, geom_line, geom_point, theme, theme_bw, labs, scale_color_manual, scale_linetype_manual, scale_y_continuous, scale_x_continuous, element_text\n",
    "import patchworklib as pw\n",
    "\n",
    "\n",
    "res = []\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/baseline/training.json', \"r\") as f:\n",
    "    train_baseline_aug = json.load(f)\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/exps/baseline_aug-none/training.json', \"r\") as f:\n",
    "    train_baseline_noaug = json.load(f)\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/training.json', \"r\") as f:\n",
    "    train_ssps_aug = json.load(f)\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/exps/ssps_aug-none/training.json', \"r\") as f:\n",
    "    train_ssps_noaug = json.load(f)\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSL',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSL (w/o aug)',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSPS',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSPS (w/o aug)',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "for epoch, metrics in train_baseline_aug.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSL',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "for epoch, metrics in train_baseline_noaug.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSL (w/o aug)',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "for epoch, metrics in train_ssps_aug.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSPS',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "for epoch, metrics in train_ssps_noaug.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSPS (w/o aug)',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "data = pd.DataFrame(res)\n",
    "\n",
    "def create_plot(y, label):\n",
    "    p = (\n",
    "        ggplot(data, aes(x='Epoch', y=y, group='Method', color='factor(Method)', linetype='factor(Method)'))\n",
    "        + geom_line(size=0.75)\n",
    "        + geom_point(size=2)\n",
    "        + labs(title=label, x='Epoch', y='EER (%)', color='Method', linetype='Method')\n",
    "        + scale_x_continuous(\n",
    "            # breaks=data['Epoch'],\n",
    "            # labels=data['Epoch']\n",
    "            # breaks=list(range(0, int(data['Epoch'].max()) + 5, 5))\n",
    "            breaks=range(int(data['Epoch'].min()), int(data['Epoch'].max()) + 1, 1),\n",
    "            labels=lambda x: [label if label % 5 == 0 else \"\" for label in x]\n",
    "        )\n",
    "        + scale_y_continuous(\n",
    "            breaks=list(range(0, int(data[y].max()) + 2, 2))\n",
    "        )\n",
    "        + scale_color_manual(values={\n",
    "            'SSL': '#1c1c1c',\n",
    "            'SSL (w/o aug)': '#7a7a7a',\n",
    "            'SSPS': '#427aeb',\n",
    "            'SSPS (w/o aug)': '#79a5fc',\n",
    "        })\n",
    "        + scale_linetype_manual(values={\n",
    "            'SSL': 'solid',\n",
    "            'SSL (w/o aug)': 'dashed',\n",
    "            'SSPS': 'solid',\n",
    "            'SSPS (w/o aug)': 'dashed',\n",
    "        })\n",
    "        + theme_bw()\n",
    "        + theme(\n",
    "            figure_size=(8, 4.75),\n",
    "            text=element_text(size=14),\n",
    "            legend_position='top',\n",
    "            legend_title=element_blank(),\n",
    "            legend_key_spacing_x=15,\n",
    "            plot_title=element_text(\n",
    "                ha='left',\n",
    "                margin={'b': 90}\n",
    "            ),\n",
    "            # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "            # legend_position=(0.875, 0.675),\n",
    "            # legend_title=element_blank(),\n",
    "            # legend_direction='vertical',\n",
    "            # legend_text=element_text(size=20),\n",
    "            # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "        )\n",
    "    )\n",
    "    # p = pw.load_ggplot(p)\n",
    "    return p\n",
    "\n",
    "\n",
    "p = create_plot('val/sv_cosine/voxceleb1_test_O/eer', '')\n",
    "p.save(\"dataaug.pdf\")\n",
    "p"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "from plotnine import ggplot, aes, geom_line, geom_point, theme, theme_bw, labs, scale_y_log10, coord_cartesian, scale_color_manual, scale_linetype_manual, scale_y_continuous, scale_x_continuous, element_blank, element_text\n",
    "import patchworklib as pw\n",
    "\n",
    "\n",
    "res = []\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/baseline/training.json', \"r\") as f:\n",
    "    train_baseline_aug = json.load(f)\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/exps/baseline_aug-none/training.json', \"r\") as f:\n",
    "    train_baseline_noaug = json.load(f)\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/training.json', \"r\") as f:\n",
    "    train_ssps_aug = json.load(f)\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/exps/ssps_aug-none/training.json', \"r\") as f:\n",
    "    train_ssps_noaug = json.load(f)\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSL',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSL (w/o data-aug.)',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSPS',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "res.append({\n",
    "    'Epoch': 100,\n",
    "    'Method': 'SSPS (w/o data-aug.)',\n",
    "    \"val/sv_cosine/voxceleb1_test_O/eer\": 6.41,\n",
    "    \"val/sv_cosine/voxceleb1_test_O/mindcf\": 0.5160\n",
    "})\n",
    "\n",
    "for epoch, metrics in train_baseline_aug.items():\n",
    "    if int(epoch) == 119:\n",
    "        metrics[\"val/sv_cosine/voxceleb1_test_O/eer\"] = 6.30\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSL',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "for epoch, metrics in train_baseline_noaug.items():\n",
    "    if int(epoch) == 119:\n",
    "        metrics[\"val/sv_cosine/voxceleb1_test_O/eer\"] = 15.00\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSL (w/o data-aug.)',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "for epoch, metrics in train_ssps_aug.items():\n",
    "    if int(epoch) == 119:\n",
    "        metrics[\"val/sv_cosine/voxceleb1_test_O/eer\"] = 2.57\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSPS',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "for epoch, metrics in train_ssps_noaug.items():\n",
    "    if int(epoch) == 119:\n",
    "        metrics[\"val/sv_cosine/voxceleb1_test_O/eer\"] = 2.77\n",
    "    res.append({\n",
    "        'Epoch': int(epoch) + 1,\n",
    "        'Method': 'SSPS (w/o data-aug.)',\n",
    "        **metrics\n",
    "    })\n",
    "\n",
    "\n",
    "data = pd.DataFrame(res)\n",
    "\n",
    "def create_plot(y, label):\n",
    "    p = (\n",
    "        ggplot(data, aes(x='Epoch', y=y, group='Method', color='factor(Method)', linetype='factor(Method)'))\n",
    "        + geom_line(size=0.75)\n",
    "        + geom_point(size=2)\n",
    "        + labs(title=label, x='Epoch', y='EER (%)', color='Method', linetype='Method')\n",
    "        + scale_x_continuous(\n",
    "            # breaks=data['Epoch'],\n",
    "            # labels=data['Epoch']\n",
    "            # breaks=list(range(0, int(data['Epoch'].max()) + 5, 5))\n",
    "            breaks=range(int(data['Epoch'].min()), int(data['Epoch'].max()) + 1, 1),\n",
    "            labels=lambda x: [label if label % 5 == 0 else \"\" for label in x]\n",
    "        )\n",
    "        + scale_y_continuous(\n",
    "            # breaks=list(range(0, int(data[y].max()) + 2, 2))\n",
    "            breaks=[0, 2, 4, 6, 8, 10, 12, 15]\n",
    "        )\n",
    "        + scale_color_manual(values={\n",
    "            'SSL': '#1c1c1c',\n",
    "            'SSL (w/o data-aug.)': '#7d7d7d',\n",
    "            'SSPS': '#2458bf',\n",
    "            'SSPS (w/o data-aug.)': '#82acff',\n",
    "        })\n",
    "        + scale_linetype_manual(values={\n",
    "            'SSL': 'solid',\n",
    "            'SSL (w/o data-aug.)': 'dotted',\n",
    "            'SSPS': 'solid',\n",
    "            'SSPS (w/o data-aug.)': 'dotted',\n",
    "        })\n",
    "        + coord_cartesian(ylim=(0.3, 1.2))\n",
    "        + scale_y_log10(breaks=[2, 3, 5, 7, 10, 15])\n",
    "        + theme_bw()\n",
    "        + theme(\n",
    "            figure_size=(8, 4.75),\n",
    "            text=element_text(size=14),\n",
    "            legend_position='top',\n",
    "            legend_title=element_blank(),\n",
    "            legend_key_spacing_x=15,\n",
    "            plot_title=element_text(\n",
    "                ha='left',\n",
    "                margin={'b': 90}\n",
    "            ),\n",
    "            # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "            # legend_position=(0.875, 0.675),\n",
    "            # legend_title=element_blank(),\n",
    "            # legend_direction='vertical',\n",
    "            # legend_text=element_text(size=20),\n",
    "            # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "        )\n",
    "    )\n",
    "    # p = pw.load_ggplot(p)\n",
    "    return p\n",
    "\n",
    "\n",
    "p = create_plot('val/sv_cosine/voxceleb1_test_O/eer', '')\n",
    "p.save(\"dataaug.pdf\")\n",
    "p"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## NMI convergence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "from plotnine import ggplot, aes, geom_line, geom_point, theme, theme_bw, labs, scale_y_continuous, scale_color_manual, scale_linetype_manual, scale_x_continuous, element_text\n",
    "import patchworklib as pw\n",
    "\n",
    "\n",
    "res = []\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/training.json', \"r\") as f:\n",
    "    train_ssps = json.load(f)\n",
    "\n",
    "for epoch, metrics in train_ssps.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Labels': 'Speaker',\n",
    "        'Method': 'SSPS',\n",
    "        'NMI': metrics[\"ssps_kmeans_6k_nmi_speaker\"]\n",
    "    })\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Labels': 'Recording',\n",
    "        'Method': 'SSPS',\n",
    "        'NMI': metrics[\"ssps_kmeans_6k_nmi_video\"]\n",
    "    })\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/exps/baseline_nmi/training.json', \"r\") as f:\n",
    "    train_baseline = json.load(f)\n",
    "\n",
    "for epoch, metrics in train_baseline.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Labels': 'Speaker',\n",
    "        'Method': 'SSL',\n",
    "        'NMI': metrics[\"ssps_kmeans_nmi_speaker\"]\n",
    "    })\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Labels': 'Recording',\n",
    "        'Method': 'SSL',\n",
    "        'NMI': metrics[\"ssps_kmeans_nmi_video\"]\n",
    "    })\n",
    "\n",
    "data = pd.DataFrame(res)\n",
    "\n",
    "p = (\n",
    "    ggplot(data, aes(x='Epoch', y='NMI', color='factor(Labels)', linetype='factor(Method)'))\n",
    "    + geom_line()\n",
    "    + geom_point()\n",
    "    + labs(title=\"\", x='Epoch', y='NMI', color='Labels', linetype='Method')\n",
    "    + scale_x_continuous(\n",
    "        breaks=range(0, int(data['Epoch'].max()) + 5, 1),\n",
    "        labels=lambda x: [label if label % 5 == 0 or label == 119 else \"\" for label in x]\n",
    "    )\n",
    "    + scale_linetype_manual(values={\n",
    "        'SSPS': 'solid',\n",
    "        'SSL': 'dashed',\n",
    "    })\n",
    "    + theme_bw()\n",
    "    + theme(\n",
    "        figure_size=(6, 3),\n",
    "        text=element_text(size=14),\n",
    "        plot_title=element_text(\n",
    "            ha='left',\n",
    "            margin={'b': 90, 'l': 200}\n",
    "        ),\n",
    "        # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "        # legend_position=(0.875, 0.675),\n",
    "        # legend_title=element_blank(),\n",
    "        # legend_direction='vertical',\n",
    "        # legend_text=element_text(size=20),\n",
    "        # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "    )\n",
    ")\n",
    "p = pw.load_ggplot(p)\n",
    "\n",
    "\n",
    "# p.set_suptitle(\n",
    "#     \"Convergence of SSPS\",\n",
    "#     fontsize=20,\n",
    "#     pad=40\n",
    "# )\n",
    "p.savefig(\"nmi.pdf\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "from plotnine import ggplot, aes, geom_line, geom_point, theme, theme_bw, labs, scale_y_continuous, scale_color_manual, scale_linetype_manual, scale_x_continuous, element_text\n",
    "import patchworklib as pw\n",
    "\n",
    "\n",
    "res = []\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/training.json', \"r\") as f:\n",
    "    train_ssps = json.load(f)\n",
    "\n",
    "for epoch, metrics in train_ssps.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Method': 'SSPS',\n",
    "        'NMI': metrics[\"ssps_kmeans_6k_nmi_speaker\"] / metrics[\"ssps_kmeans_6k_nmi_video\"]\n",
    "    })\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/exps/baseline_nmi/training.json', \"r\") as f:\n",
    "    train_baseline = json.load(f)\n",
    "\n",
    "for epoch, metrics in train_baseline.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Method': 'SSL',\n",
    "        'NMI': metrics[\"ssps_kmeans_nmi_speaker\"] / metrics[\"ssps_kmeans_nmi_video\"]\n",
    "    })\n",
    "\n",
    "data = pd.DataFrame(res)\n",
    "\n",
    "p = (\n",
    "    ggplot(data, aes(x='Epoch', y='NMI', color='factor(Method)'))\n",
    "    + geom_line()\n",
    "    + geom_point()\n",
    "    + labs(title=\"\", x='Epoch', y='Speaker-to-Recording NMI Ratio', color='Method')\n",
    "    + scale_x_continuous(\n",
    "        breaks=range(0, int(data['Epoch'].max()) + 5, 1),\n",
    "        labels=lambda x: [label if label % 5 == 0 or label == 119 else \"\" for label in x]\n",
    "    )\n",
    "    + theme_bw()\n",
    "    + theme(\n",
    "        figure_size=(6, 3),\n",
    "        text=element_text(size=14),\n",
    "        plot_title=element_text(\n",
    "            ha='left',\n",
    "            margin={'b': 90, 'l': 200}\n",
    "        ),\n",
    "        # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "        # legend_position=(0.875, 0.675),\n",
    "        # legend_title=element_blank(),\n",
    "        # legend_direction='vertical',\n",
    "        # legend_text=element_text(size=20),\n",
    "        # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "    )\n",
    ")\n",
    "p = pw.load_ggplot(p)\n",
    "\n",
    "\n",
    "# p.set_suptitle(\n",
    "#     \"Convergence of SSPS\",\n",
    "#     fontsize=20,\n",
    "#     pad=40\n",
    "# )\n",
    "p.savefig(\"nmi_ratio.pdf\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "from plotnine import ggplot, aes, geom_line, geom_point, theme, theme_bw, labs, scale_y_continuous, scale_color_manual, scale_linetype_manual, scale_x_continuous, element_text\n",
    "import patchworklib as pw\n",
    "\n",
    "\n",
    "res = []\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/ssps_kmeans_25k_uni-1/training.json', \"r\") as f:\n",
    "    train_ssps = json.load(f)\n",
    "\n",
    "for epoch, metrics in train_ssps.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Method': 'SSPS',\n",
    "        'NMI': metrics[\"ssps_kmeans_6k_nmi_speaker\"] / metrics[\"ssps_kmeans_6k_nmi_video\"]\n",
    "    })\n",
    "\n",
    "with open('models/ssps/voxceleb2/simclr_e-ecapa/exps/baseline_nmi/training.json', \"r\") as f:\n",
    "    train_baseline = json.load(f)\n",
    "\n",
    "for epoch, metrics in train_baseline.items():\n",
    "    res.append({\n",
    "        'Epoch': int(epoch),\n",
    "        'Method': 'SSL',\n",
    "        'NMI': metrics[\"ssps_kmeans_nmi_speaker\"] / metrics[\"ssps_kmeans_nmi_video\"]\n",
    "    })\n",
    "\n",
    "data = pd.DataFrame(res)\n",
    "\n",
    "p = (\n",
    "    ggplot(data, aes(x='Epoch', y='NMI', color='factor(Method)'))\n",
    "    + geom_line(size=0.75)\n",
    "    + geom_point(size=2)\n",
    "    + labs(title=\"\", x='Epoch', y='Speaker-to-Recording\\nNMI Ratio', color='Method')\n",
    "    + scale_x_continuous(\n",
    "        breaks=range(0, int(data['Epoch'].max()) + 5, 1),\n",
    "        labels=lambda x: [label if label % 5 == 0 or label == 119 else \"\" for label in x]\n",
    "    )\n",
    "    + scale_y_continuous(\n",
    "        breaks=[1.01, 1.03, 1.05, 1.07, 1.09]\n",
    "    )\n",
    "    + theme_bw()\n",
    "    + theme(\n",
    "        figure_size=(8, 3.75),\n",
    "        text=element_text(size=14),\n",
    "        plot_title=element_text(\n",
    "            ha='left',\n",
    "            margin={'b': 90, 'l': 200}\n",
    "        ),\n",
    "        legend_position='top',\n",
    "        legend_title=element_blank(),\n",
    "        legend_key_spacing_x=15,\n",
    "        # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "        # legend_position=(0.875, 0.675),\n",
    "        # legend_title=element_blank(),\n",
    "        # legend_direction='vertical',\n",
    "        # legend_text=element_text(size=20),\n",
    "        # axis_text_x=element_text(angle=45, ha=\"right\")\n",
    "    )\n",
    ")\n",
    "\n",
    "p.save(\"nmi_ratio.pdf\")\n",
    "\n",
    "p"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "env_py-3.13.3_torch-2.7.1",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
