{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "FBQd2mLgScts"
      },
      "outputs": [],
      "source": [
        "# Copyright 2022 Google LLC\n",
        "\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "\n",
        "#     https://www.apache.org/licenses/LICENSE-2.0\n",
        "\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License.\n",
        "\n",
        "from colabtools import adhoc_import\n",
        "from typing import Any, Sequence, Optional\n",
        "import ml_collections\n",
        "import numpy as np\n",
        "import jax\n",
        "import jax.numpy as jnp\n",
        "import tensorflow as tf\n",
        "import tensorflow_datasets as tfds\n",
        "import functools\n",
        "import itertools\n",
        "from scipy.stats import mode\n",
        "from collections import defaultdict\n",
        "import matplotlib.pyplot as plt\n",
        "import time\n",
        "import pickle\n",
        "from sklearn.cluster import KMeans, MiniBatchKMeans\n",
        "from sklearn.decomposition import PCA\n",
        "from sklearn.cluster import AgglomerativeClustering\n",
        "from sklearn.metrics import adjusted_mutual_info_score\n",
        "import optax\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "9E_DqSUcSr3H"
      },
      "outputs": [],
      "source": [
        "def predict(model, state, batch):\n",
        "  \"\"\"Get intermediate representations from a model.\"\"\"\n",
        "  variables = {\n",
        "      \"params\": state.ema_params,\n",
        "      \"batch_stats\": state.batch_stats\n",
        "  }\n",
        "  _, state = model.apply(variables, batch['image'], capture_intermediates=True, mutable=[\"intermediates\"], train=False)\n",
        "  intermediates = state['intermediates']#['stage4']['__call__'][0]\n",
        "  return intermediates\n",
        "\n",
        "\n",
        "def compute_purity(clusters, classes):\n",
        "  \"\"\"Compute purity of the cluster.\"\"\"\n",
        "  n_cluster_points = 0\n",
        "  for cluster_idx in set(clusters):\n",
        "    instance_idx = np.where(clusters == cluster_idx)[0]\n",
        "    subclass_labels = classes[instance_idx]\n",
        "    mode_stats = mode(subclass_labels)\n",
        "    n_cluster_points += mode_stats[1][0]\n",
        "  purity = n_cluster_points / len(clusters)\n",
        "  return purity\n",
        "\n",
        "def get_learning_rate(step: int,\n",
        "                      *,\n",
        "                      base_learning_rate: float,\n",
        "                      steps_per_epoch: int,\n",
        "                      num_epochs: int,\n",
        "                      warmup_epochs: int = 5):\n",
        "  \"\"\"Cosine learning rate schedule.\"\"\"\n",
        "  logging.info(\n",
        "      \"get_learning_rate(step=%s, base_learning_rate=%s, steps_per_epoch=%s, num_epochs=%s\",\n",
        "      step, base_learning_rate, steps_per_epoch, num_epochs)\n",
        "  if steps_per_epoch \u003c= 0:\n",
        "    raise ValueError(f\"steps_per_epoch should be a positive integer but was \"\n",
        "                     f\"{steps_per_epoch}.\")\n",
        "  if warmup_epochs \u003e= num_epochs:\n",
        "    raise ValueError(f\"warmup_epochs should be smaller than num_epochs. \"\n",
        "                     f\"Currently warmup_epochs is {warmup_epochs}, \"\n",
        "                     f\"and num_epochs is {num_epochs}.\")\n",
        "  epoch = step / steps_per_epoch\n",
        "  lr = cosine_decay(base_learning_rate, epoch - warmup_epochs,\n",
        "                    num_epochs - warmup_epochs)\n",
        "  warmup = jnp.minimum(1., epoch / warmup_epochs)\n",
        "  return lr * warmup\n",
        "\n",
        "config = get_config()\n",
        "learning_rate_fn = functools.partial(\n",
        "      get_learning_rate,\n",
        "      base_learning_rate=0.1,\n",
        "      steps_per_epoch=40,\n",
        "      num_epochs=config.num_epochs,\n",
        "      warmup_epochs=config.warmup_epochs)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "1QxEaQEWvqOQ"
      },
      "source": [
        "## Load dataset"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "VMCAYvNmi7d_"
      },
      "outputs": [],
      "source": [
        "ret = make_breeds_dataset(\"living17\", BREEDS_INFO_DIR, BREEDS_INFO_DIR, \n",
        "                          split=None)\n",
        "superclasses, subclass_split, label_map = ret\n",
        "train_subclasses = subclass_split[0]\n",
        "num_classes = len(train_subclasses)\n",
        "print(train_subclasses)\n",
        "print(\"Num_classes:\", num_classes)\n",
        "print(label_map)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "qvkNcDdvShw-"
      },
      "outputs": [],
      "source": [
        "DATASET = 'imagenet'\n",
        "if DATASET == 'imagenet':\n",
        "  all_subclasses = list(itertools.chain(*train_subclasses))\n",
        "  new_label_map = {}\n",
        "  for subclass_idx, sub in enumerate(all_subclasses):\n",
        "    new_label_map.update({sub: subclass_idx})\n",
        "  print(new_label_map)\n",
        "  lookup_table = tf.lookup.StaticHashTable(\n",
        "      initializer=tf.lookup.KeyValueTensorInitializer(\n",
        "          keys=tf.constant(list(new_label_map.keys()), dtype=tf.int64),\n",
        "          values=tf.constant(list(new_label_map.values()), dtype=tf.int64),\n",
        "      ),\n",
        "      default_value=tf.constant(-1, dtype=tf.int64))\n",
        "\n",
        "  dataset_builder = tfds.builder(\"imagenet2012\", try_gcs=True)\n",
        "  eval_preprocess = preprocess_spec.PreprocessFn([\n",
        "      RescaleValues(),\n",
        "      ResizeSmall(256),\n",
        "      CentralCrop(224),\n",
        "      #LabelMappingOp(lookup_table=lookup_table)\n",
        "      ], only_jax_types=True)\n",
        "else:\n",
        "  dataset_builder = tfds.builder(\"celeb_a\", try_gcs=True)\n",
        "  eval_preprocess = preprocess_spec.PreprocessFn([\n",
        "      RescaleValues(),\n",
        "      ResizeSmall(256),\n",
        "      CentralCrop(224),\n",
        "      LabelMapping()\n",
        "      ], only_jax_types=True)\n",
        "\n",
        "\n",
        "dataset_options = tf.data.Options()\n",
        "dataset_options.experimental_optimization.map_parallelization = True\n",
        "dataset_options.experimental_threading.private_threadpool_size = 48\n",
        "dataset_options.experimental_threading.max_intra_op_parallelism = 1\n",
        "\n",
        "read_config = tfds.ReadConfig(shuffle_seed=None, options=dataset_options)\n",
        "eval_ds = dataset_builder.as_dataset(\n",
        "    split=tfds.Split.VALIDATION,\n",
        "    shuffle_files=False,\n",
        "    read_config=read_config,\n",
        "    decoders=None)\n",
        "\n",
        "batch_size = 64\n",
        "if DATASET == 'imagenet':\n",
        "  eval_ds = eval_ds.filter(functools.partial(predicate, all_subclasses=all_subclasses))\n",
        "eval_ds = eval_ds.cache()\n",
        "eval_ds = eval_ds.map(eval_preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n",
        "eval_ds = eval_ds.batch(batch_size, drop_remainder=False)\n",
        "eval_ds = eval_ds.prefetch(tf.data.experimental.AUTOTUNE)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "LRoDEUIU3uGN"
      },
      "source": [
        "## Compute purity of clusters using layer_name "
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "vp9ALYoF3qKi"
      },
      "outputs": [],
      "source": [
        "model_dir = os.path.join(BASE_DIR, 'breeds/living17_400_epochs_ema_0.99_bn_0.99_squared_loss_hyperparam_tuned/')\n",
        "checkpoint_path = os.path.join(model_dir, 'checkpoints-0/ckpt-41.flax')\n",
        "model, state = create_train_state(config, jax.random.PRNGKey(0), input_shape=(8, 224, 224, 3), num_classes=num_classes, learning_rate_fn=learning_rate_fn)\n",
        "state = checkpoints.restore_checkpoint(checkpoint_path, state)\n",
        "print(\"step:\", state.step)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "iBBg848sprUl"
      },
      "outputs": [],
      "source": [
        "overcluster_factors = [1, 2, 3, 4, 5]\n",
        "metric = 'purity'\n",
        "\n",
        "for stage_prefix in ['stage4', 'stage2', 'stage3', 'stage1']:\n",
        "  all_layer_intermediates = {}\n",
        "  all_subclass_labels = []\n",
        "  all_filenames = []\n",
        "  all_images = []\n",
        "  for step, batch in enumerate(eval_ds):\n",
        "    if step % 20 == 0:\n",
        "      print(step)\n",
        "    intermediates = predict(model, state, batch)\n",
        "    labels = batch['label'].numpy()\n",
        "    bs = labels.shape[0]\n",
        "    all_subclass_labels.append(labels)\n",
        "    all_images.append(batch['image'].numpy())\n",
        "    if 'file_name' in batch:\n",
        "      all_filenames.append(batch['file_name'].numpy())\n",
        "    \n",
        "    count = 0\n",
        "    for stage in sorted(intermediates.keys()):\n",
        "      if not stage.startswith(stage_prefix):\n",
        "        continue\n",
        "      for block in sorted(intermediates[stage].keys()):\n",
        "        if not block.startswith('block'):\n",
        "          continue\n",
        "        key = '_'.join([stage, block])\n",
        "        if key not in all_layer_intermediates:\n",
        "          all_layer_intermediates[key] = []\n",
        "        all_layer_intermediates[key].append(intermediates[stage][block]['__call__'][0].reshape(bs, -1))\n",
        "  print(all_layer_intermediates.keys())\n",
        "\n",
        "  all_subclass_labels = np.hstack(all_subclass_labels)\n",
        "  all_images = np.vstack(all_images)\n",
        "  if len(all_filenames) \u003e 0:\n",
        "    all_filenames = np.hstack(all_filenames)\n",
        "    all_filenames = [f.decode(\"utf-8\") for f in all_filenames]\n",
        "  print(all_subclass_labels.shape)\n",
        "\n",
        "  for key, all_intermediates in all_layer_intermediates.items():\n",
        "      n_subclasses = len(train_subclasses[0])\n",
        "      all_intermediates = np.vstack(all_intermediates)\n",
        "      print(all_intermediates.shape)\n",
        "      result_dict = {}\n",
        "\n",
        "      for overcluster_factor in overcluster_factors:\n",
        "        all_clfs = []\n",
        "\n",
        "        for subclasses in train_subclasses:\n",
        "          subclass_idx = np.array([i for i in range(len(all_subclass_labels)) if all_subclass_labels[i] in subclasses])\n",
        "          hier_clustering = AgglomerativeClustering(n_clusters=len(subclasses)*overcluster_factor,\n",
        "                                                linkage='ward').fit(all_intermediates[subclass_idx])\n",
        "          all_clfs.append(hier_clustering)\n",
        "\n",
        "\n",
        "        metric_list = []\n",
        "        all_clf_labels = []\n",
        "        for i, clf in enumerate(all_clfs):\n",
        "          all_clf_labels.append(clf.labels_)\n",
        "          subclasses = train_subclasses[i]\n",
        "          subclass_idx = np.array([\n",
        "              i for i in range(len(all_subclass_labels))\n",
        "              if all_subclass_labels[i] in subclasses\n",
        "          ])\n",
        "          subclass_labels = all_subclass_labels[subclass_idx]\n",
        "          if metric == 'purity':\n",
        "            metric = compute_purity(clf.labels_, subclass_labels)\n",
        "          elif metric == 'ami':\n",
        "            metric = adjusted_mutual_info_score(subclass_labels, clf.labels_)\n",
        "          metric_list.append(metric)\n",
        "\n",
        "        result_dict[overcluster_factor] = metric_list\n",
        "\n",
        "      print(result_dict)\n",
        "      if metric == 'purity':\n",
        "        with gfile.Open(os.path.join(model_dir, f'class_purity_ckpt_{key}.pkl'), 'wb') as f:\n",
        "          pickle.dump(metric_list, f)\n",
        "        with gfile.Open(os.path.join(model_dir, f'clf_labels_ckpt_{key}.pkl'), 'wb') as f:\n",
        "          pickle.dump(all_clf_labels, f)\n",
        "      elif metric == 'ami':\n",
        "        with gfile.Open(os.path.join(model_dir, f'adjusted_mutual_info_ckpt_{key}.pkl'), 'wb') as f:\n",
        "          pickle.dump(result_dict, f)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ukYZ3L1PmxwP"
      },
      "source": [
        "## Compute purity of clusters over time using second-to-last layer representations"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Hs4VutTYMJKd"
      },
      "outputs": [],
      "source": [
        "model_dir = os.path.join(BASE_DIR, 'breeds/living17_400_epochs_ema_0.99_bn_0.99/')\n",
        "ckpt_list = list(range(11, 172, 10)) \n",
        "overcluster_factors = [1, 2, 3, 4, 5]\n",
        "n_subclasses = len(train_subclasses[0])\n",
        "for ckpt_number in ckpt_list:\n",
        "  checkpoint_path = os.path.join(model_dir, f'checkpoints-0/ckpt-{ckpt_number}.flax')\n",
        "  model, state = create_train_state(config, jax.random.PRNGKey(0), input_shape=(8, 224, 224, 3), \n",
        "                                    num_classes=num_classes, learning_rate_fn=learning_rate_fn)\n",
        "  state = checkpoints.restore_checkpoint(checkpoint_path, state)\n",
        "  print(\"Ckpt number\", ckpt_number, \"Ckpt step:\", state.step)\n",
        "\n",
        "  result_dict = {}\n",
        "  all_intermediates = []\n",
        "  all_subclass_labels = []\n",
        "  all_filenames = []\n",
        "  all_images = []\n",
        "  for step, batch in enumerate(eval_ds):\n",
        "    if step % 20 == 0:\n",
        "      print(step)\n",
        "    intermediates = predict(model, state, batch)\n",
        "    labels = batch['label'].numpy()\n",
        "    bs = labels.shape[0]\n",
        "    all_subclass_labels.append(labels)\n",
        "    all_images.append(batch['image'].numpy())\n",
        "    if 'file_name' in batch:\n",
        "      all_filenames.append(batch['file_name'].numpy())\n",
        "    all_intermediates.append(np.mean(intermediates['stage4']['__call__'][0], axis=(1,2)).reshape(bs, -1))\n",
        "\n",
        "  all_intermediates = np.vstack(all_intermediates)\n",
        "  all_subclass_labels = np.hstack(all_subclass_labels)\n",
        "  all_images = np.vstack(all_images)\n",
        "  if len(all_filenames) \u003e 0:\n",
        "    all_filenames = np.hstack(all_filenames)\n",
        "    all_filenames = [f.decode(\"utf-8\") for f in all_filenames]\n",
        "  print(all_intermediates.shape)\n",
        "  print(all_subclass_labels.shape)\n",
        "\n",
        "  for overcluster_factor in overcluster_factors:\n",
        "    all_clfs = []\n",
        "\n",
        "    for subclasses in train_subclasses:\n",
        "      subclass_idx = np.array([i for i in range(len(all_subclass_labels)) if all_subclass_labels[i] in subclasses])\n",
        "      hier_clustering = AgglomerativeClustering(n_clusters=len(subclasses)*overcluster_factor,\n",
        "                                            linkage='ward').fit(all_intermediates[subclass_idx])\n",
        "      all_clfs.append(hier_clustering)\n",
        "\n",
        "\n",
        "    metric_list = []\n",
        "    all_clf_labels = []\n",
        "    for i, clf in enumerate(all_clfs):\n",
        "      all_clf_labels.append(clf.labels_)\n",
        "      subclasses = train_subclasses[i]\n",
        "      subclass_idx = np.array([\n",
        "          i for i in range(len(all_subclass_labels))\n",
        "          if all_subclass_labels[i] in subclasses\n",
        "      ])\n",
        "      subclass_labels = all_subclass_labels[subclass_idx]\n",
        "      if metric == 'purity':\n",
        "        metric = compute_purity(clf.labels_, subclass_labels)\n",
        "      elif metric == 'ami':\n",
        "        metric = adjusted_mutual_info_score(subclass_labels, clf.labels_)\n",
        "      metric_list.append(metric)\n",
        "\n",
        "    result_dict[overcluster_factor] = metric_list\n",
        "\n",
        "  print(result_dict)\n",
        "  if metric == 'purity':\n",
        "    with gfile.Open(os.path.join(model_dir, f'class_purity_ckpt_{ckpt_number}.pkl'), 'wb') as f:\n",
        "      pickle.dump(metric_list, f)\n",
        "    with gfile.Open(os.path.join(model_dir, f'clf_labels_ckpt_{ckpt_number}}.pkl'), 'wb') as f:\n",
        "      pickle.dump(all_clf_labels, f)\n",
        "  elif metric == 'ami':\n",
        "    with gfile.Open(os.path.join(model_dir, f'adjusted_mutual_info_ckpt_{ckpt_number}.pkl'), 'wb') as f:\n",
        "      pickle.dump(result_dict, f)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "v1FS57CS0-pm"
      },
      "source": [
        "### CelebA"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "4I9UTqJaALSl"
      },
      "outputs": [],
      "source": [
        "model_dir = os.path.join(BASE_DIR, 'celebA/lr_0.0001_reg_0.0001_ema_0.99/')\n",
        "ckpt_list = list(range(2, 22, 2))\n",
        "overcluster_factor = 10\n",
        "num_classes = 2\n",
        "for ckpt_number in ckpt_list:\n",
        "  checkpoint_path = os.path.join(model_dir, f'checkpoints-0/ckpt-{ckpt_number}.flax')\n",
        "  model, state = create_train_state(config, jax.random.PRNGKey(0), input_shape=(8, 224, 224, 3), \n",
        "                                    num_classes=2, learning_rate_fn=learning_rate_fn)\n",
        "  state = checkpoints.restore_checkpoint(checkpoint_path, state)\n",
        "  print(\"Ckpt step:\", state.step)\n",
        "\n",
        "  all_intermediates = []\n",
        "  all_labels = []\n",
        "  all_filenames = []\n",
        "  all_images = []\n",
        "  for step, batch in enumerate(eval_ds):\n",
        "    if step % 50 == 0:\n",
        "      print(step)\n",
        "    intermediates = predict(model, state, batch)\n",
        "    labels = batch['label'].numpy()\n",
        "    bs = labels.shape[0]\n",
        "    all_labels.append(labels)\n",
        "    all_images.append(batch['image'].numpy())\n",
        "    if 'file_name' in batch:\n",
        "      all_filenames.append(batch['file_name'].numpy())\n",
        "    all_intermediates.append(np.mean(intermediates['stage4']['__call__'][0], axis=(1,2)).reshape(bs, -1))\n",
        "\n",
        "  all_intermediates = np.vstack(all_intermediates)\n",
        "  all_labels = np.hstack(all_labels)\n",
        "  all_images = np.vstack(all_images)\n",
        "  if len(all_filenames) \u003e 0:\n",
        "    all_filenames = np.hstack(all_filenames)\n",
        "    all_filenames = [f.decode(\"utf-8\") for f in all_filenames]\n",
        "  print(all_intermediates.shape)\n",
        "  print(all_labels.shape)\n",
        "\n",
        "  all_clfs = []\n",
        "\n",
        "  for subclass in range(num_classes):\n",
        "    subclass_idx = np.array([i for i in range(len(all_labels)) if all_labels[i] == subclass])\n",
        "    #print(len(subclass_idx))\n",
        "    hier_clustering = AgglomerativeClustering(n_clusters=overcluster_factor,\n",
        "                                          linkage='ward').fit(all_intermediates[subclass_idx])\n",
        "    all_clfs.append(hier_clustering)\n",
        "\n",
        "\n",
        "  purity_list = []\n",
        "  all_clf_labels = []\n",
        "  for i, clf in enumerate(all_clfs):\n",
        "    all_clf_labels.append(clf.labels_)\n",
        "    subclass = list(range(num_classes))[i]\n",
        "    subclass_idx = np.array([\n",
        "        i for i in range(len(all_labels))\n",
        "        if all_labels[i] == subclass\n",
        "    ])\n",
        "    subclass_labels = all_labels[subclass_idx]\n",
        "    purity = compute_purity(clf.labels_, subclass_labels)\n",
        "    purity_list.append(purity)\n",
        "\n",
        "  with gfile.Open(os.path.join(model_dir, f'class_purity_ckpt_{ckpt_number}.pkl'), 'wb') as f:\n",
        "    pickle.dump(purity_list, f)\n",
        "  with gfile.Open(os.path.join(model_dir, f'clf_labels_ckpt_{ckpt_number}.pkl'), 'wb') as f:\n",
        "    pickle.dump(all_clf_labels, f)\n",
        "  print(ckpt_number)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GINPXcupmmPR"
      },
      "source": [
        "## Concat second-to-last representations from 2 checkpoints"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "x5TFZP1vAGhU"
      },
      "outputs": [],
      "source": [
        "model_dir = os.path.join(BASE_DIR, 'breeds/breeds_training_level_3_nsubclass_20_400_epochs_ema_0.99_seed_2/')\n",
        "concat_intermediates = []\n",
        "for ckpt_number in [151, 161]:\n",
        "  checkpoint_path = f'checkpoints-0/ckpt-{ckpt_number}.flax'\n",
        "  model, state = create_train_state(config, jax.random.PRNGKey(0), input_shape=(8, 224, 224, 3), \n",
        "                                    num_classes=num_classes, learning_rate_fn=learning_rate_fn)\n",
        "  state = checkpoints.restore_checkpoint(checkpoint_path, state)\n",
        "  print(\"Ckpt step:\", state.step)\n",
        "\n",
        "  all_intermediates = []\n",
        "  all_subclass_labels = []\n",
        "  all_filenames = []\n",
        "  all_images = []\n",
        "  for step, batch in enumerate(eval_ds):\n",
        "    if step % 20 == 0:\n",
        "      print(step)\n",
        "    intermediates = predict(model, state, batch)\n",
        "    labels = batch['label'].numpy()\n",
        "    bs = labels.shape[0]\n",
        "    all_subclass_labels.append(labels)\n",
        "    all_images.append(batch['image'].numpy())\n",
        "    if 'file_name' in batch:\n",
        "      all_filenames.append(batch['file_name'].numpy())\n",
        "    all_intermediates.append(np.mean(intermediates['stage4']['__call__'][0], axis=(1,2)).reshape(bs, -1))\n",
        "\n",
        "\n",
        "  overcluster_factor = 5\n",
        "  n_subclasses = len(train_subclasses[0])\n",
        "  all_intermediates = np.vstack(all_intermediates)\n",
        "  concat_intermediates.append(all_intermediates)\n",
        "  all_subclass_labels = np.hstack(all_subclass_labels)\n",
        "  all_images = np.vstack(all_images)\n",
        "  if len(all_filenames) \u003e 0:\n",
        "    all_filenames = np.hstack(all_filenames)\n",
        "    all_filenames = [f.decode(\"utf-8\") for f in all_filenames]\n",
        "  print(all_intermediates.shape)\n",
        "  print(all_subclass_labels.shape)\n",
        "\n",
        "all_clfs = []\n",
        "concat_intermediates = np.hstack(concat_intermediates)\n",
        "for subclasses in train_subclasses:\n",
        "  subclass_idx = np.array([i for i in range(len(all_subclass_labels)) if all_subclass_labels[i] in subclasses])\n",
        "  #print(len(subclass_idx))\n",
        "  hier_clustering = AgglomerativeClustering(n_clusters=len(subclasses)*overcluster_factor,\n",
        "                                        linkage='ward').fit(concat_intermediates[subclass_idx])\n",
        "  all_clfs.append(hier_clustering)\n",
        "\n",
        "\n",
        "purity_list = []\n",
        "all_clf_labels = []\n",
        "for i, clf in enumerate(all_clfs):\n",
        "  all_clf_labels.append(clf.labels_)\n",
        "  subclasses = train_subclasses[i]\n",
        "  subclass_idx = np.array([\n",
        "      i for i in range(len(all_subclass_labels))\n",
        "      if all_subclass_labels[i] in subclasses\n",
        "  ])\n",
        "  subclass_labels = all_subclass_labels[subclass_idx]\n",
        "  purity = compute_purity(clf.labels_, subclass_labels)\n",
        "  purity_list.append(purity)\n",
        "\n",
        "print(ckpt_number)\n",
        "print(purity_list)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [],
      "last_runtime": {
        "build_target": "//learning/deepmind/dm_python:dm_notebook3",
        "kind": "private"
      },
      "private_outputs": true,
      "provenance": [
        {
          "file_id": "1G2dgBquSTGNiNSFSs-5Jo7VQj23g9-Xr",
          "timestamp": 1664583252985
        }
      ]
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
