{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {},
        "colab_type": "code",
        "id": "iuV1r5o4WBcc"
      },
      "outputs": [],
      "source": [
        "##### Copyright 2020 Google LLC. All Rights Reserved.\n",
        "\n",
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "# https://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "OAPlCvmmWRJt"
      },
      "outputs": [],
      "source": [
        "# A fast numpy reference implementation of GHT, as per\n",
        "# \"A Generalization of Otsu's Method and Minimum Error Thresholding\"\n",
        "# Jonathan T. Barron, ECCV, 2020\n",
        "\n",
        "\n",
        "import numpy as np\n",
        "\n",
        "csum = lambda z: np.cumsum(z)[:-1]\n",
        "dsum = lambda z: np.cumsum(z[::-1])[-2::-1]\n",
        "argmax = lambda x, f: np.mean(x[:-1][f == np.max(f)])  # Use the mean for ties.\n",
        "clip = lambda z: np.maximum(1e-30, z)\n",
        "\n",
        "def preliminaries(n, x):\n",
        "  \"\"\"Some math that is shared across multiple algorithms.\"\"\"\n",
        "  assert np.all(n \u003e= 0)\n",
        "  x = np.arange(len(n), dtype=n.dtype) if x is None else x\n",
        "  assert np.all(x[1:] \u003e= x[:-1])\n",
        "  w0 = clip(csum(n))\n",
        "  w1 = clip(dsum(n))\n",
        "  p0 = w0 / (w0 + w1)\n",
        "  p1 = w1 / (w0 + w1)\n",
        "  mu0 = csum(n * x) / w0\n",
        "  mu1 = dsum(n * x) / w1\n",
        "  d0 = csum(n * x**2) - w0 * mu0**2\n",
        "  d1 = dsum(n * x**2) - w1 * mu1**2\n",
        "  return x, w0, w1, p0, p1, mu0, mu1, d0, d1\n",
        "\n",
        "def GHT(n, x=None, nu=0, tau=0, kappa=0, omega=0.5):\n",
        "  assert nu \u003e= 0\n",
        "  assert tau \u003e= 0\n",
        "  assert kappa \u003e= 0\n",
        "  assert omega \u003e= 0 and omega \u003c= 1\n",
        "  x, w0, w1, p0, p1, _, _, d0, d1 = preliminaries(n, x)\n",
        "  v0 = clip((p0 * nu * tau**2 + d0) / (p0 * nu + w0))\n",
        "  v1 = clip((p1 * nu * tau**2 + d1) / (p1 * nu + w1))\n",
        "  f0 = -d0 / v0 - w0 * np.log(v0) + 2 * (w0 + kappa *      omega)  * np.log(w0)\n",
        "  f1 = -d1 / v1 - w1 * np.log(v1) + 2 * (w1 + kappa * (1 - omega)) * np.log(w1)\n",
        "  return argmax(x, f0 + f1), f0 + f1"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {},
        "colab_type": "code",
        "id": "1AVU-8lrr7rV"
      },
      "outputs": [],
      "source": [
        "# Implementations of the existing algorithms that are special cases of GHT().\n",
        "\n",
        "def Otsu(n, x=None):\n",
        "  \"\"\"Otsu's method.\"\"\"\n",
        "  x, w0, w1, _, _, mu0, mu1, _, _ = preliminaries(n, x)\n",
        "  o = w0 * w1 * (mu0 - mu1)**2\n",
        "  return argmax(x, o), o\n",
        "\n",
        "def Otsu_equivalent(n, x=None):\n",
        "  \"\"\"Equivalent to Otsu's method.\"\"\"\n",
        "  x, _, _, _, _, _, _, d0, d1 = preliminaries(n, x)\n",
        "  o = np.sum(n) * np.sum(n * x**2) - np.sum(n * x)**2 - np.sum(n) * (d0 + d1)\n",
        "  return argmax(x, o), o\n",
        "\n",
        "def MET(n, x=None):\n",
        "  \"\"\"Minimum Error Thresholding.\"\"\"\n",
        "  x, w0, w1, _, _, _, _, d0, d1 = preliminaries(n, x)\n",
        "  ell = (1 + w0 * np.log(clip(d0 / w0)) + w1 * np.log(clip(d1 / w1))\n",
        "      - 2 * (w0 * np.log(clip(w0))      + w1 * np.log(clip(w1))))\n",
        "  return argmax(x, -ell), ell  # argmin()\n",
        "\n",
        "def wprctile(n, x=None, omega=0.5):\n",
        "  \"\"\"Weighted percentile, with weighted median as default.\"\"\"\n",
        "  assert omega \u003e= 0 and omega \u003c= 1\n",
        "  x, _, _, p0, p1, _, _, _, _ = preliminaries(n, x)\n",
        "  h = -omega * np.log(clip(p0)) - (1. - omega) * np.log(clip(p1))\n",
        "  return argmax(x, -h), h  # argmin()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "code",
        "colab": {},
        "colab_type": "code",
        "id": "HN2ry8wCrC9p"
      },
      "outputs": [],
      "source": [
        "# An additional equivalent implementation of GHT that uses an explicit for-loop\n",
        "# over splits of the histogram instead of the cumulative sum approach used by\n",
        "# the paper. This additional implementation is intended to allow for easier\n",
        "# comparisons with similar implementations of MET or Otsu's method, and to\n",
        "# allow existing implementations of MET or Otsu's method to be easily\n",
        "# generalized into implementations of GHT.\n",
        "\n",
        "def GHT_forloop(n, x=None, nu=0, tau=0, kappa=0, omega=0.5):\n",
        "  \"\"\"An implementation of GHT() written using for loops.\"\"\"\n",
        "  assert np.all(n \u003e= 0)\n",
        "  x = np.arange(len(n), dtype=n.dtype) if x is None else x\n",
        "  assert np.all(x[1:] \u003e= x[:-1])\n",
        "  assert nu \u003e= 0\n",
        "  assert tau \u003e= 0\n",
        "  assert kappa \u003e= 0\n",
        "  assert omega \u003e= 0 and omega \u003c= 1\n",
        "\n",
        "  n_sum = np.sum(n)\n",
        "  nx_sum = np.sum(n * x)\n",
        "  nxx_sum = np.sum(n * x**2)\n",
        "\n",
        "  max_score, n_c, nx_c, nxx_c = -np.inf, 0, 0, 0\n",
        "  for i in range(len(n) - 1):\n",
        "    n_c += n[i]\n",
        "    nx_c += n[i] * x[i]\n",
        "    nxx_c += n[i] * x[i]**2\n",
        "    w0 = clip(n_c)\n",
        "    w1 = clip(n_sum - n_c)\n",
        "    p0 = w0 / n_sum\n",
        "    p1 = w1 / n_sum\n",
        "    d0 = np.maximum(0, nxx_c - nx_c**2 / w0)\n",
        "    d1 = np.maximum(0, (nxx_sum - nxx_c) - (nx_sum - nx_c)**2 / w1)\n",
        "    v0 = clip((p0 * nu * tau**2 + d0) / (p0 * nu + w0))\n",
        "    v1 = clip((p1 * nu * tau**2 + d1) / (p1 * nu + w1))\n",
        "    f0 = -d0 / v0 - w0 * np.log(v0) + 2 * (w0 + kappa *      omega)  * np.log(w0)\n",
        "    f1 = -d1 / v1 - w1 * np.log(v1) + 2 * (w1 + kappa * (1 - omega)) * np.log(w1)\n",
        "    score = f0 + f1\n",
        "\n",
        "    # Argmax where the mean() is used for ties.\n",
        "    if score \u003e max_score:\n",
        "      max_score, t_numer, t_denom = score, 0, 0\n",
        "    if score == max_score:\n",
        "      t_numer += x[i]\n",
        "      t_denom += 1\n",
        "  return t_numer / t_denom"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {},
        "colab_type": "code",
        "id": "3ivNtyEjrOh3"
      },
      "outputs": [],
      "source": [
        "# An implementation of GHT in terms of the actual underlying ECLL maximization\n",
        "# that reduces to GHT() above. This implementation is highly inefficient, but\n",
        "# can be used to verify the correctness of GHT in terms of its Bayesian\n",
        "# motivation, and may be useful in deriving further probabilistic extensions.\n",
        "\n",
        "from tensorflow_probability import distributions as tfd\n",
        "\n",
        "def sichi2_var(n, resid, nu, tau):\n",
        "  \"\"\"Posterior estimate of variance for a scaled inverse chi-squared.\"\"\"\n",
        "  return (nu * tau**2 + np.sum(n * resid**2)) / (nu + np.sum(n))\n",
        "\n",
        "def GHT_prob(n, x=None, nu=0, tau=0, kappa=0, omega=0.5):\n",
        "  \"\"\"An implementation of GHT() using probability distributions.\"\"\"\n",
        "  assert np.all(n \u003e= 0)\n",
        "  x = np.arange(len(n), dtype=n.dtype) if x is None else x\n",
        "  assert np.all(x[1:] \u003e= x[:-1])\n",
        "  assert nu \u003e= 0\n",
        "  assert tau \u003e= 0\n",
        "  assert kappa \u003e= 0\n",
        "  assert omega \u003e= 0 and omega \u003c= 1\n",
        "\n",
        "  n_sum = np.sum(n)\n",
        "  lls = np.zeros(len(n) - 1)\n",
        "  for i in range(len(lls)):\n",
        "    n0, n1 = n[:(i+1)], n[(i+1):]\n",
        "    x0, x1 = x[:(i+1)], x[(i+1):]\n",
        "    w0 = clip(np.sum(n0))\n",
        "    w1 = clip(np.sum(n1))\n",
        "    p0 = clip(w0 / n_sum)\n",
        "    p1 = clip(w1 / n_sum)\n",
        "    mu0 = np.sum(n0 * x0) / w0\n",
        "    mu1 = np.sum(n1 * x1) / w1\n",
        "    var0 = sichi2_var(n0, x0 - mu0, p0 * nu, tau)\n",
        "    var1 = sichi2_var(n1, x1 - mu1, p1 * nu, tau)\n",
        "    lls[i] = ((np.sum(n0 * (np.log(p0) + tfd.Normal(mu0, np.sqrt(var0)).log_prob(x0)))\n",
        "             + np.sum(n1 * (np.log(p1) + tfd.Normal(mu1, np.sqrt(var1)).log_prob(x1))))\n",
        "             + tfd.Beta(kappa * omega + 1, kappa * (1 - omega) + 1).log_prob(np.minimum(p0, 1-1e-15)))\n",
        "  return np.mean(x[:-1][lls == np.max(lls)]), lls"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {},
        "colab_type": "code",
        "id": "vofQsGEZsbKC"
      },
      "outputs": [],
      "source": [
        "# Unit tests for verifying that the math in the paper is right.\n",
        "\n",
        "import numpy as np\n",
        "import scipy.special\n",
        "import tensorflow as tf\n",
        "from tensorflow_probability import distributions as tfd\n",
        "\n",
        "\n",
        "def run_tests():\n",
        "  def fit_affine(x, y):\n",
        "    aff = np.linalg.lstsq(np.stack([x, np.ones_like(x)]).T, y)[0]\n",
        "    y_ = x * aff[0] + aff[1]\n",
        "    resid = y - y_\n",
        "    return aff, resid\n",
        "\n",
        "\n",
        "  class GhtTest(tf.test.TestCase):\n",
        "\n",
        "    def affine_equivalence(self, x, y, atol=1e-4, rtol=1e-4):\n",
        "\n",
        "      def center(z):\n",
        "        z = z - np.mean(z)\n",
        "        z = z / np.maximum(1e-15, np.max(np.abs(z)))\n",
        "        return z\n",
        "\n",
        "      x = center(x)\n",
        "      y = center(y)\n",
        "      aff = fit_affine(x, y)[0]\n",
        "      self.assertGreater(aff[0], 0)\n",
        "      y_ = x * aff[0] + aff[1]\n",
        "      x_ = (y - aff[1]) / aff[0]\n",
        "      x_max = np.abs(np.maximum(np.max(x), np.max(x_)))\n",
        "      y_max = np.abs(np.maximum(np.max(y), np.max(y_)))\n",
        "      self.assertAllClose(x / x_max, x_ / x_max, atol=atol, rtol=rtol)\n",
        "      self.assertAllClose(y / y_max, y_ / y_max, atol=atol, rtol=rtol)\n",
        "\n",
        "    def setUp(self):\n",
        "      super(GhtTest, self).setUp()\n",
        "      np.random.seed(0)\n",
        "      self.data = []\n",
        "      for _ in range(1000):\n",
        "        n = np.maximum(0., np.random.uniform(low=-1., high=5., size=100))\n",
        "        dx = np.random.uniform(low=0.1, high=1., size=len(n))\n",
        "        x = np.cumsum(dx)\n",
        "        x = x - np.random.uniform() * x[-1]\n",
        "        self.data.append((n, x))\n",
        "\n",
        "    def testOtsuImplementationsMatch(self):\n",
        "      for (n, x) in self.data:\n",
        "        t, scores = Otsu(n, x)\n",
        "        t_, scores_ = Otsu_equivalent(n, x)\n",
        "        self.assertAllClose(t, t_)\n",
        "        self.assertAllClose(scores, scores_)\n",
        "\n",
        "    def testMetLlMatchesScore(self):\n",
        "      \"\"\"Sanity check an equation.\"\"\"\n",
        "      for (n, x) in self.data:\n",
        "        _, score = GHT(n, x)\n",
        "        _, ell = MET(n, x)\n",
        "        self.assertAllClose(\n",
        "            score[5:-5], 1 - np.sum(n) - ell[5:-5], atol=1e-2, rtol=1e-2)\n",
        "\n",
        "    def testGhtApproachesOtsu(self):\n",
        "      \"\"\"Ght with nu \u003e\u003e 0 and tau near 0 is equivalent to otsu.\"\"\"\n",
        "      nu = 1e30\n",
        "      for (n, x) in self.data:\n",
        "        x, w0, w1, _, _, _, _, d0, d1 = preliminaries(n, x)\n",
        "\n",
        "        tau = 7.\n",
        "        _, thresh_scores = GHT(n, x, nu=nu, tau=tau)\n",
        "        ref_scores = (-(d0 + d1) / tau**2 + 2 * w0 * np.log(w0 / tau) +\n",
        "                      2 * w1 * np.log(w1 / tau))\n",
        "        self.assertAllClose(thresh_scores, ref_scores)\n",
        "\n",
        "        tau = 1e-6\n",
        "        t, thresh_scores = GHT(n, x, nu=nu, tau=tau)\n",
        "        ref_scores = -(d0 + d1) / tau**2\n",
        "        self.assertAllClose(thresh_scores, ref_scores)\n",
        "\n",
        "        t_, _ = Otsu(n, x)\n",
        "        self.assertAllClose(t, t_)\n",
        "\n",
        "    def testGhtApproachesEntropy(self):\n",
        "      \"\"\"Ght with nu \u003e\u003e 0 and tau \u003e\u003e 0 is equivalent to stupid entropy.\"\"\"\n",
        "      nu = 1e30\n",
        "      tau = 1e15\n",
        "      for (n, x) in self.data:\n",
        "        _, scores = GHT(n, x, nu=nu, tau=tau)\n",
        "\n",
        "        x, _, _, p0, p1, _, _, _, _ = preliminaries(n, x)\n",
        "\n",
        "        n_sum = np.sum(n)\n",
        "        neg_h = p0 * np.log(p0) + p1 * np.log(p1)\n",
        "        self.assertAllClose(neg_h,\n",
        "                            scores / (2 * n_sum) - np.log(n_sum) + np.log(tau))\n",
        "\n",
        "    def testGhtApproachesMet(self):\n",
        "      \"\"\"Ght with default params is ~MET.\"\"\"\n",
        "      err_count = 0\n",
        "      for (n, x) in self.data:\n",
        "        t, eclls = GHT(n, x)\n",
        "        t_, eclls_ = MET(n, x)\n",
        "        self.affine_equivalence(-eclls[5:-5], eclls_[5:-5], atol=1e-2, rtol=1e-2)\n",
        "        if t != t_:\n",
        "          err_count += 1\n",
        "      print('err_count = {}/{}'.format(err_count, len(self.data)))\n",
        "      self.assertLess(err_count, (len(self.data) // 10))\n",
        "\n",
        "    def testGhtDefaultWorks(self):\n",
        "      \"\"\"Ght with no x argument works.\"\"\"\n",
        "      for (n, _) in self.data:\n",
        "        x = np.arange(len(n))\n",
        "        t, scores = GHT(n, x)\n",
        "        t_, scores_ = GHT(n)\n",
        "        self.assertAllClose(t, t_)\n",
        "        self.affine_equivalence(scores, scores_)\n",
        "\n",
        "    def testGhtApproachesPercentile(self):\n",
        "      # Cranking up the regularizer reproduces a percentile.\n",
        "      for (n, x) in self.data:\n",
        "        omega = np.random.uniform(0.01, 0.99)\n",
        "        t, _ = GHT(n, x, kappa=1e30, omega=omega)\n",
        "        t_, _ = wprctile(n, x, omega)\n",
        "        self.assertAllClose(t, t_, atol=1e-3, rtol=1e-3)\n",
        "\n",
        "    def testOtsuIsScaleInvariant(self):\n",
        "      for (n, x) in self.data:\n",
        "        n_mult = np.exp(np.random.normal())\n",
        "        t, _ = Otsu(n, x)\n",
        "        t_, _ = Otsu(n_mult * n, x)\n",
        "        self.assertAllClose(t, t_)\n",
        "\n",
        "    def testGhtIsScaleInvariantWrtN(self):\n",
        "      for (n, x) in self.data:\n",
        "        nu = 2.**np.random.normal(scale=6)\n",
        "        tau = 2.**np.random.normal(scale=3)\n",
        "        kappa = 2.**np.random.normal(scale=6)\n",
        "        omega = np.random.uniform(low=0.001, high=0.999)\n",
        "        mult = np.exp(np.random.normal())\n",
        "        t, _ = GHT(n, x, nu, tau, kappa, omega)\n",
        "        t_, _ = GHT(mult * n, x, mult * nu, tau, mult * kappa, omega)\n",
        "        self.assertAllClose(t, t_)\n",
        "\n",
        "    def testGhtWithLargeKappaMatchesApproximation(self):\n",
        "      for (n, x) in self.data:\n",
        "        kappa = 1e30\n",
        "        omega = np.random.uniform(low=0.01, high=0.99)\n",
        "        t, scores = GHT(n, x, 0, 0, kappa, omega)\n",
        "        _, _, _, p0, p1, _, _, _, _ = preliminaries(n, x)\n",
        "        n_sum = np.sum(n)\n",
        "        scores_ = 2 * (\n",
        "            np.log(n_sum) * (n_sum + kappa) + kappa * (omega * np.log(p0) +\n",
        "                                                      (1 - omega) * np.log(p1)))\n",
        "        t_ = argmax(x, scores_)\n",
        "        self.assertAllClose(t, t_)\n",
        "        self.assertAllClose(scores / kappa, scores_ / kappa)\n",
        "\n",
        "    def testGhtWithLargeKappaMatchesBetaDistribution(self):\n",
        "      for (n, x) in self.data:\n",
        "        kappa = 1e30\n",
        "        omega = np.random.uniform(low=0.01, high=0.99)\n",
        "        _, scores = GHT(n, x, 0, 0, kappa, omega)\n",
        "        _, _, _, p0, _, _, _, _, _ = preliminaries(n, x)\n",
        "        n_sum = np.sum(n)\n",
        "        alpha = omega * kappa + 1\n",
        "        beta = (1 - omega) * kappa + 1\n",
        "        scores_ = 2 * (\n",
        "            np.log(n_sum) *\n",
        "            (n_sum + kappa) + tfd.Beta(alpha, beta).log_prob(p0) +\n",
        "            scipy.special.betaln(alpha, beta))\n",
        "        mask = np.isfinite(scores_)\n",
        "        assert np.mean(mask) \u003e 0.9\n",
        "        self.assertAllClose(\n",
        "            scores[mask] / kappa, scores_[mask] / kappa, atol=1e-3, rtol=1e-3)\n",
        "\n",
        "    def testMoGIsScaleInvariant(self):\n",
        "      err_count = 0\n",
        "      for (n, x) in self.data:\n",
        "        n_mult = np.exp(np.random.normal())\n",
        "        t, _ = MET(n, x)\n",
        "        t_, _ = MET(n_mult * n, x)\n",
        "        if t != t_:\n",
        "          err_count += 1\n",
        "      # This test is flaky because of numerical weirdness when variances are low.\n",
        "      print('err_count = {}/{}'.format(err_count, len(self.data)))\n",
        "      self.assertLess(err_count, (len(self.data) // 5))\n",
        "\n",
        "    def testPercentileIsScaleInvariant(self):\n",
        "      for (n, x) in self.data:\n",
        "        omega = np.random.uniform(low=0.001, high=0.999)\n",
        "        n_mult = np.exp(np.random.normal())\n",
        "        t, _ = wprctile(n, x, omega)\n",
        "        t_, _ = wprctile(n_mult * n, x, omega)\n",
        "        self.assertAllClose(t, t_)\n",
        "\n",
        "    def testDecsumCanBeImplementedInASweep(self):\n",
        "      for (n, _) in self.data:\n",
        "        n_cum = csum(n)\n",
        "        n_dec = dsum(n)\n",
        "        n_dec_ = np.sum(n) - n_cum\n",
        "        self.assertAllClose(n_dec, n_dec_)\n",
        "\n",
        "    def testGhtIsAffineInvariantWrtX(self):\n",
        "      for (n, x) in self.data:\n",
        "        nu = 2.**np.random.normal(scale=6)\n",
        "        tau = 2.**np.random.normal(scale=3)\n",
        "        kappa = 2.**np.random.normal(scale=6)\n",
        "        omega = np.random.uniform(low=0.001, high=0.999)\n",
        "        shift = np.random.normal()\n",
        "        t, scores = GHT(n, x, nu, tau, kappa, omega)\n",
        "\n",
        "        mult = np.exp(np.random.normal())\n",
        "        shift = np.random.normal()\n",
        "\n",
        "        f = lambda z: mult * z + shift\n",
        "        t_, scores_ = GHT(n, f(x), nu, mult * tau, kappa, omega)\n",
        "\n",
        "        self.assertAllClose(f(t), t_)\n",
        "\n",
        "        # `shift` has no effect on scores, `mult` introduces a global shift.\n",
        "        shift = np.mean(scores_ - scores)\n",
        "        self.assertAllClose(scores + shift, scores_)\n",
        "\n",
        "    def testGhtMatchesGhtForLoop(self):\n",
        "      err_count = 0\n",
        "      for (n, x) in self.data:\n",
        "        nu = 2.**np.random.normal(scale=6)\n",
        "        tau = 2.**np.random.normal(scale=3)\n",
        "        kappa = 2.**np.random.normal(scale=6)\n",
        "        omega = np.random.uniform(low=0.001, high=0.999)\n",
        "        t = GHT(n, x, nu, tau, kappa, omega)[0]\n",
        "        t_ = GHT_forloop(n, x, nu, tau, kappa, omega)\n",
        "        if t != t_:\n",
        "          err_count += 1\n",
        "      print('err_count = {}/{}'.format(err_count, len(self.data)))\n",
        "      self.assertLess(err_count, (len(self.data) // 500))\n",
        "\n",
        "    def testGhtMatchesGhtProb(self):\n",
        "      # Subsampling the data, because this test is slow.\n",
        "      self.data = self.data[0:30]\n",
        "      err_count = 0\n",
        "      for (n, x) in self.data:\n",
        "        nu = 2.**np.random.normal(scale=6)\n",
        "        tau = 2.**np.random.normal(scale=3)\n",
        "        kappa = np.float64(2.**np.random.normal(scale=6))\n",
        "        omega = np.float64(np.random.uniform(low=0.001, high=0.999))\n",
        "        t, scores = GHT(n, x, nu, tau, kappa, omega)\n",
        "        t_, lls = GHT_prob(n, x, nu, tau, kappa, omega)\n",
        "        if t != t_:\n",
        "          err_count += 1\n",
        "        # There's some flaky floating point math at the edges.\n",
        "        self.affine_equivalence(scores[5:-5], lls[5:-5])\n",
        "      print('err_count = {}/{}'.format(err_count, len(self.data)))\n",
        "      self.assertLess(err_count, (len(self.data) // 3))\n",
        "\n",
        "  # There is almost certainly a better way to do this...\n",
        "  ght_test = GhtTest()\n",
        "  ght_test.setUp()\n",
        "  ght_test.testOtsuImplementationsMatch()\n",
        "  ght_test.testMetLlMatchesScore()\n",
        "  ght_test.testGhtApproachesOtsu()\n",
        "  ght_test.testGhtApproachesEntropy()\n",
        "  ght_test.testGhtApproachesMet()\n",
        "  ght_test.testGhtDefaultWorks()\n",
        "  ght_test.testGhtApproachesPercentile()\n",
        "  ght_test.testOtsuIsScaleInvariant()\n",
        "  ght_test.testGhtIsScaleInvariantWrtN()\n",
        "  ght_test.testGhtWithLargeKappaMatchesApproximation()\n",
        "  ght_test.testGhtWithLargeKappaMatchesBetaDistribution()\n",
        "  ght_test.testMoGIsScaleInvariant()\n",
        "  ght_test.testPercentileIsScaleInvariant()\n",
        "  ght_test.testDecsumCanBeImplementedInASweep()\n",
        "  ght_test.testGhtIsAffineInvariantWrtX()\n",
        "  ght_test.testGhtMatchesGhtForLoop()\n",
        "  ght_test.testGhtMatchesGhtProb()\n",
        "\n",
        "run_tests()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "FJZmr1ChFTb1"
      },
      "outputs": [],
      "source": [
        "# Code for reproducing Table 1 in the paper.\n",
        "\n",
        "import PIL\n",
        "import sklearn.metrics\n",
        "\n",
        "FOLDER = '/path/to/HDIBCO2016/'\n",
        "\n",
        "def process(image, gt):\n",
        "  image = np.amax(image, 2)\n",
        "  hist_n, hist_edge = np.histogram(image, np.arange(-0.5, 256))\n",
        "  hist_x = (hist_edge[1:] + hist_edge[:-1]) / 2.\n",
        "  return (image, hist_x, hist_n), gt \u003e 0\n",
        "\n",
        "\n",
        "def compute_psnr(x, y, max_val):\n",
        "  mse = np.mean((np.float32(x) - np.float32(y))**2)\n",
        "  return 10 * np.log10(max_val**2 / mse)\n",
        "\n",
        "\n",
        "def compute_drd(gt_flipped, pred_flipped, n=2, b=8):\n",
        "  pred = ~pred_flipped\n",
        "  gt = ~gt_flipped\n",
        "\n",
        "  m = 2 * n + 1\n",
        "\n",
        "  ns = np.arange(-n, n + 1)\n",
        "  ij = np.stack(np.meshgrid(ns, ns, indexing='ij'), -1)\n",
        "  weight = 1. / np.maximum(1, np.sqrt(np.sum(ij**2, -1)))\n",
        "  weight[n, n] = 0\n",
        "  weight /= np.sum(weight)\n",
        "\n",
        "  import tensorflow as tf  # Because numpy doesn't have im2col\n",
        "\n",
        "  gt_pad = np.pad(np.float32(gt), [(n,n), (n,n)], 'constant', constant_values=np.nan)\n",
        "  gt_blocks = tf.image.extract_patches(gt_pad[None,:,:,None],\n",
        "                [1, m, m, 1], strides=[1, 1, 1, 1], rates=[1, 1, 1, 1], padding='VALID')[0].numpy()\n",
        "  diff = np.nan_to_num((pred[..., None] -  gt_blocks) * np.reshape(weight,  [1, 1, -1]) * (gt != pred)[...,None], 0)\n",
        "  drdk = np.sum(np.abs(np.reshape(diff, [-1])))\n",
        "\n",
        "  pad = np.int32(np.ceil(np.array(gt.shape) / b) * b) - np.array(gt.shape)\n",
        "  gt_pad = np.pad(gt, [(0, pad[0]), (0, pad[1])], 'constant')\n",
        "  gt_blocks = tf.image.extract_patches(np.int32(gt_pad)[None,:,:,None],\n",
        "                [1, b, b, 1], strides=[1, b, b, 1], rates=[1, 1, 1, 1], padding='VALID')[0].numpy()\n",
        "  nubn = np.sum(np.any(gt_blocks[..., 0:1] != gt_blocks, -1))\n",
        "\n",
        "  drd = drdk / nubn\n",
        "  return drd\n",
        "\n",
        "\n",
        "def eval_dataset(data, eval_fun):\n",
        "  metrics = []\n",
        "  levels = []\n",
        "  for (image, hist_x, hist_n), gt in data:\n",
        "    t = eval_fun(hist_n, hist_x)\n",
        "    pred = image \u003e t\n",
        "    metrics.append((\n",
        "        100. * sklearn.metrics.f1_score(\n",
        "            np.reshape(~gt, [-1]), np.reshape(~pred, [-1])),\n",
        "        compute_psnr(gt, pred, 1.),\n",
        "        compute_drd(gt, pred),\n",
        "    ))\n",
        "    levels.append(t)\n",
        "  return np.array(metrics), np.array(levels)\n",
        "\n",
        "\n",
        "def stat_fun(z):\n",
        "  return list(np.reshape(np.stack([np.mean(z, 0), np.std(z, 0)], 1), [-1]))\n",
        "\n",
        "\n",
        "data = []\n",
        "for i in range(10):\n",
        "  image = np.array(PIL.Image.open(FOLDER + 'images/' + str(i) + '.png'))\n",
        "  gt = np.array(PIL.Image.open(FOLDER + 'gt/' + str(i) + '.png'))\n",
        "  data.append(process(image, gt))\n",
        "\n",
        "oracle_levels = [104, 155, 123, 151, 127, 145, 202, 179, 177, 129]\n",
        "oracle_scores = []\n",
        "for datum, level in zip(data, oracle_levels):\n",
        "  oracle_scores.append(eval_dataset([datum], lambda x, n: level)[0])\n",
        "oracle_scores = np.concatenate(oracle_scores, 0)\n",
        "\n",
        "# Taken from \\cite{dibco2016} \n",
        "raw_table = [\n",
        "  ['Kligler \\cite{Kligler2017,Katz2007,Howe2013}', 87.61, 6.99, 18.11, 4.27, 5.21, 5.28],\n",
        "  ['Hassa{\\\\\"i}ne \\etal - 3 \\cite{Hassane2011,Hassane2012,dibco2016}', 88.72, 4.68, 18.45, 3.41, 3.86, 1.57],\n",
        "  ['Hassa{\\\\\"i}ne \\etal - 2 \\cite{Hassane2012,dibco2016}', 88.47, 4.45, 18.29, 3.35, 3.93, 1.37],\n",
        "  ['Roe \\\u0026 de Mello \\cite{dibco2016}', 87.97, 5.17, 18.00, 3.68, 4.49, 2.65],\n",
        "  ['Hassa{\\\\\"i}ne \\etal- 1 \\cite{Hassane2011,dibco2016}', 88.22, 4.80, 18.22, 3.41, 4.01, 1.49 ],\n",
        "  ['Nafchi \\etal - 2 \\cite{dibco2016,Nafchi}', 88.11, 4.63, 18.00, 3.41, 4.38, 1.65],\n",
        "  ['Nafchi \\etal - 1 \\cite{dibco2016,Nafchi}', 87.60, 4.85, 17.86, 3.51, 4.51, 1.62],\n",
        "  ['Tensmeyer \\\u0026 Martinez \\cite{tensmeyer2017,Wolf2002TextLE,FCN}', 85.57, 6.75, 17.50, 3.43, 5.00, 2.60],\n",
        "  ['de Almeida \\\u0026 de Mello \\cite{dibco2016}', 86.24, 5.79, 17.52, 3.42, 5.25, 2.88],\n",
        "  ['Khan \\\u0026 Mollah \\cite{dibco2016}', 84.32, 6.81, 16.59, 2.99, 6.94, 3.33],\n",
        "  ['Raza \\cite{dibco2016}', 76.28, 9.71, 14.21, 2.21, 15.14, 9.42],\n",
        "  ['Kefali \\etal \\cite{Sari2014TextEF,dibco2016}', 76.10, 13.81, 15.35, 3.19, 9.16, 4.87],\n",
        "  ['Otsu\\'s Method \\cite{dibco2016,Otsu}', 86.61, 7.26, 17.80, 4.51, 5.56, 4.44],\n",
        "  ['Sauvola \\cite{dibco2016,Sauvola2000AdaptiveDI}', 82.52, 9.65, 16.42, 2.87, 7.49, 3.97]]\n",
        "\n",
        "experiments = [\n",
        "  ('Otsu\\'s Method (Our Impl.) \\\\cite{Otsu}', Otsu, None),\n",
        "  ('{\\\\bf GHT (Otsu Case)}', GHT, (1e60, 1e-15, 0, 0)),\n",
        "  ('{\\\\bf GHT (MET Case)}', GHT, (0, 0, 0, 0)),\n",
        "  ('{\\\\bf GHT (wprctile Case)}', GHT, (0, 0, 1e60, 2**-3.75)),\n",
        "  ('{\\\\bf GHT (No wprctile)}', GHT, (2.**50.5, 2**0.125, 0, 0)),\n",
        "  ('{\\\\bf GHT}', GHT, (2.**29.5, 2**3.125, 2.**22.25, 2.**-3.25)),\n",
        "]\n",
        "\n",
        "raw_table = [(name + ' \u0026\u0026\u0026\u0026 ', a, b, c, d, e, f) for name, a, b, c, d, e, f in raw_table]\n",
        "\n",
        "experiment_results = []\n",
        "for name, algo, params in experiments:\n",
        "  if params is None:\n",
        "    name += ' \u0026\u0026\u0026\u0026'\n",
        "    eval_fun = lambda x, n: algo(x, n)[0]\n",
        "  else:\n",
        "    for p in params:\n",
        "      if p == 0:\n",
        "        name += ' \u0026 - '\n",
        "      else:\n",
        "        if p \u003e= 1e25 or p \u003c= 1e-14:\n",
        "          name += ' \u0026 $10^{{{:g}}}$ '.format(np.log10(p))\n",
        "        else:\n",
        "          name += ' \u0026 $2^{{{:g}}}$ '.format(np.log2(p))\n",
        "    eval_fun = lambda x, n: algo(x, n, *params)[0]\n",
        "  metrics, levels = eval_dataset(data, eval_fun)\n",
        "  raw_table += [[name] + stat_fun(metrics)]\n",
        "  experiment_results.append(levels)\n",
        "\n",
        "sorted1 = np.unique([z[1] for z in raw_table])[::-1]\n",
        "sorted2 = np.unique([z[3] for z in raw_table])[::-1]\n",
        "sorted3 = np.unique([z[5] for z in raw_table])\n",
        "for i in np.argsort([z[1] for z in raw_table]):\n",
        "  row = raw_table[i]\n",
        "  if sorted3[0] == row[5]:\n",
        "    tag3 = '\\\\cellcolor{red}'\n",
        "  elif sorted3[1] == row[5]:\n",
        "    tag3 = '\\\\cellcolor{orange}'\n",
        "  elif sorted3[2] == row[5]:\n",
        "    tag3 = '\\\\cellcolor{yellow}'\n",
        "  else:\n",
        "    tag3 = ''\n",
        "  if sorted2[0] == row[3]:\n",
        "    tag2 = '\\\\cellcolor{red}'\n",
        "  elif sorted2[1] == row[3]:\n",
        "    tag2 = '\\\\cellcolor{orange}'\n",
        "  elif sorted2[2] == row[3]:\n",
        "    tag2 = '\\\\cellcolor{yellow}'\n",
        "  else:\n",
        "    tag2 = ''\n",
        "  if sorted1[0] == row[1]:\n",
        "    tag1 = '\\\\cellcolor{red}'\n",
        "  elif sorted1[1] == row[1]:\n",
        "    tag1 = '\\\\cellcolor{orange}'\n",
        "  elif sorted1[2] == row[1]:\n",
        "    tag1 = '\\\\cellcolor{yellow}'\n",
        "  else:\n",
        "    tag1 = ''\n",
        "  print(\n",
        "      '{:90} \u0026 {:18} ${:0.2f} \\\\pm {:0.2f}$ \u0026 {:18} ${:0.2f} \\\\pm {:0.2f}$ \u0026 {:18} ${:0.2f} \\\\pm {:0.2f}$ \\\\\\\\'\n",
        "      .format(row[0], tag1, row[1], row[2], tag2, row[3], row[4], tag3, row[5], row[6]))\n",
        "\n",
        "print('\\\\hline')\n",
        "print('{:90} \u0026 ${:0.2f} \\\\pm {:0.2f}$ \u0026 ${:0.2f} \\\\pm {:0.2f}$ \u0026 ${:0.2f} \\\\pm {:0.2f}$'.format(\n",
        "    *tuple(['\\\\rowcolor{lightgray} Oracle Global Threshold \u0026\u0026\u0026\u0026 '] +\n",
        "            list(stat_fun(oracle_scores)))))\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {},
        "colab_type": "code",
        "id": "e-LrclVL6q0i"
      },
      "outputs": [],
      "source": [
        "# Let's make some figures.\n",
        "\n",
        "import matplotlib.pyplot as plt\n",
        "from matplotlib import rc\n",
        "\n",
        "plt.rc('font', family='serif')\n",
        "rc('text', usetex=True)\n",
        "plt.rcParams['text.latex.preamble']=[r\"\\usepackage{amsmath}\"]\n",
        "\n",
        "colors = [(0.8680, 0.4061, 0.2064), (0.9236, 0.6709, 0.0588),\n",
        "          (0.4791, 0.6820, 0.2079), (0.0643, 0.6587, 0.9103),\n",
        "          (0.1465, 0.5280, 0.7790), (0.5217, 0.2286, 0.5803),\n",
        "          (0.7158, 0.2820, 0.3646)]\n",
        "\n",
        "SAVE_FIGURES = False"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {},
        "colab_type": "code",
        "id": "3vDJxU5RncV4"
      },
      "outputs": [],
      "source": [
        "# Make all result figures.\n",
        "\n",
        "all_names = [e[0][5:-1] for e in experiments[1:]]\n",
        "all_names[0] = 'Otsu\\'s Method'\n",
        "all_names[1] = 'MET'\n",
        "all_names[2] = r'wprctile ($\\omega={0.0743}$)'\n",
        "all_levels = experiment_results[1:]\n",
        "all_names.insert(0, 'Oracle Threshold')\n",
        "all_levels.insert(0, oracle_levels)\n",
        "\n",
        "del all_names[3:5]\n",
        "del all_levels[3:5]\n",
        "\n",
        "for i_data in range(10):\n",
        "\n",
        "  (im, x, n), gt = data[i_data]\n",
        "  ts = [r[i_data] for r in all_levels]\n",
        "\n",
        "  plt.figure(i_data*2, figsize=(16,10))\n",
        "\n",
        "  plt.subplot(3, 3, 1)\n",
        "  plt.imshow(im, cmap='gray')\n",
        "  PIL.Image.fromarray(im).save(f'/tmp/results_{i_data}_im.png')\n",
        "  plt.axis('off')\n",
        "  plt.title('Input Image')\n",
        "\n",
        "  plt.subplot(3, 3, 2)\n",
        "  vis = 255*np.uint8(gt)\n",
        "  PIL.Image.fromarray(vis).save(f'/tmp/results_{i_data}_gt.png')\n",
        "  plt.imshow(vis, cmap='gray')\n",
        "  plt.axis('off')\n",
        "  plt.title('Ground Truth Mask');\n",
        "\n",
        "  for i, (t, name) in enumerate(zip(ts, all_names)):\n",
        "    plt.subplot(3, 3, i+3)\n",
        "    vis = 255*np.uint8(np.stack([im \u003e t, gt \u0026 (im \u003e t), gt], -1))\n",
        "    PIL.Image.fromarray(vis).save(f'/tmp/results_{i_data}_{name[:3]}_output.png')\n",
        "    plt.imshow(vis)\n",
        "    plt.axis('off')\n",
        "    plt.title(name)\n",
        "\n",
        "  n = np.histogram(im, np.arange(-0.5, 256))[0]\n",
        "  n1 = np.histogram(im[~gt], np.arange(-0.5, 256))[0]\n",
        "  top = np.max(n)*1.05\n",
        "\n",
        "  plt.figure(i_data*2 + 1, figsize=(12,3))\n",
        "  plt.bar(x, n, width=x[1] - x[0], color=[0.7, 0.7, 0.7], edgecolor=None, zorder=1)\n",
        "  plt.bar(x, n1, width=x[1] - x[0], color=[0.4, 0.4, 0.4], edgecolor=None, zorder=3)\n",
        "  plt.gca().set_xlim([0, 256])\n",
        "  plt.gca().set_xlabel(r'$\\boldsymbol{x}$', fontsize=15)\n",
        "  plt.gca().set_ylabel(r'$\\boldsymbol{n}$', fontsize=15)\n",
        "  plt.gca().set_ylim([0, top])\n",
        "\n",
        "  cols = [(0,0,0), colors[0], colors[2], colors[4]]\n",
        "\n",
        "  for i, (t, name) in enumerate(zip(ts, all_names)):\n",
        "    plt.plot([t,t], [0, top], label=name, color=cols[i], linewidth=2, zorder=2, linestyle='dotted' if i == 0 else None)\n",
        "  plt.legend(loc='upper left')\n",
        "\n",
        "  if SAVE_FIGURES:\n",
        "    plt.savefig(f'/tmp/results_{i_data}_bar.png', dpi=300, facecolor='w', edgecolor='w',\n",
        "            orientation='portrait', papertype=None, format=None,\n",
        "            transparent=False, bbox_inches='tight', pad_inches=0.0,\n",
        "            frameon=None, metadata=None)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {},
        "colab_type": "code",
        "id": "im1wjpbRIsIr"
      },
      "outputs": [],
      "source": [
        "x = np.linspace(0, 1, 64)\n",
        "n = np.zeros_like(x)\n",
        "n[4:-9] = 1.\n",
        "n[-5:-2] = 1\n",
        "n = n[::-1]\n",
        "\n",
        "np.random.seed(0)\n",
        "n *= np.random.uniform(low=0.97, high=1.03, size=len(n))\n",
        "\n",
        "tau = 0.01\n",
        "\n",
        "plt.figure(-1, figsize=(10, 5))\n",
        "plt.Axes(plt.gcf(), [0., 0., 1., 1.])\n",
        "plt.subplots_adjust(wspace=0.1)\n",
        "plt.subplot(1,2,1)\n",
        "\n",
        "log_nus = np.linspace(-2, 16, 10000)\n",
        "marker_size=8\n",
        "\n",
        "shift = (x[1] - x[0]) / 2\n",
        "\n",
        "levels = [GHT(n, x, nu=1e60, tau=1e-30)[0]]\n",
        "plt.scatter(log_nus, shift + levels * np.ones_like(log_nus), marker_size, colors[6], label=r'$\\operatorname{Otsu}$')\n",
        "\n",
        "level, scores = GHT(n, x)\n",
        "plt.scatter(log_nus, [shift + level] * np.ones_like(log_nus), marker_size, colors[4], label=r'$\\operatorname{MET}$')\n",
        "\n",
        "levels = []\n",
        "for log_nu in log_nus:\n",
        "  nu = 2**log_nu\n",
        "  level, scores = GHT(n, x, nu=nu, tau=tau)\n",
        "  levels.append(level)\n",
        "plt.scatter(log_nus, shift + levels, marker_size, [0.25]*3, label=r'$\\operatorname{GHT}$')\n",
        "\n",
        "plt.xlim(-1, 15)\n",
        "plt.ylim(0, 1.)\n",
        "plt.gca().legend(loc='upper left')\n",
        "plt.gca().set_xlabel(r'$\\nu$', fontsize=15)\n",
        "plt.grid(True)\n",
        "plt.gca().yaxis.set_label_position(\"right\")\n",
        "plt.gca().yaxis.tick_right()\n",
        "z = plt.gca().set_ylabel(r'$\\boldsymbol{x}$', fontsize=15)\n",
        "z.set_rotation(0)\n",
        "plt.gca().yaxis.set_label_coords(1.05, 0.53)\n",
        "plt.yticks([0., 0.2, 0.4, 0.6, 0.8, 1.], ['']*6)\n",
        "xtics = plt.xticks()[0][1:-1]\n",
        "plt.xticks(xtics, ['$2^{{{:g}}}$'.format(x) for x in xtics])\n",
        "\n",
        "plt.subplot(1,2,2)\n",
        "plt.ylim(0, 1.)\n",
        "plt.barh(x, n, height=x[1] - x[0], color=[0.7, 0.7, 0.7], edgecolor=[0.4, 0.4, 0.4])\n",
        "plt.gca().set_xlabel(r'$\\boldsymbol{n}$', fontsize=15)\n",
        "\n",
        "if SAVE_FIGURES:\n",
        "  plt.savefig('/tmp/nu_sweep.png', dpi=300, facecolor='w', edgecolor='w',\n",
        "          orientation='portrait', papertype=None, format=None,\n",
        "          transparent=False, bbox_inches='tight', pad_inches=0.0,\n",
        "          frameon=None, metadata=None)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {},
        "colab_type": "code",
        "id": "3I4tDUthxGzR"
      },
      "outputs": [],
      "source": [
        "np.random.seed(3)\n",
        "\n",
        "span = 0.2\n",
        "count = 3\n",
        "data = []\n",
        "for i in range(count):\n",
        "  mu = (i+0.5)/count\n",
        "  scale = span/6\n",
        "  data.append(mu + np.clip(np.random.normal(0, 1, 10000000), -4, 4) * scale)\n",
        "data = np.concatenate(data)\n",
        "n, hist_edge = np.histogram(data, np.linspace(0, 1, 64))\n",
        "n = n / np.sum(n)\n",
        "x = (hist_edge[1:] + hist_edge[:-1]) / 2\n",
        "\n",
        "plt.figure(-1, figsize=(10, 5))\n",
        "plt.Axes(plt.gcf(), [0., 0., 1., 1.])\n",
        "plt.subplots_adjust(wspace=0.1)\n",
        "plt.subplot(1,2,1)\n",
        "\n",
        "shift = (x[1] - x[0]) / 2\n",
        "\n",
        "omegas = np.linspace(1e-16, 1 - 1e-16, 10000)\n",
        "marker_size=8\n",
        "\n",
        "levels = [GHT(n, x, nu=1e60, tau=1e-30)[0]]\n",
        "plt.scatter(omegas, shift + levels * np.ones_like(omegas), marker_size, colors[6], label=r'$\\operatorname{Otsu}$')\n",
        "\n",
        "level, scores = GHT(n, x)\n",
        "plt.scatter(omegas, [shift + level] * np.ones_like(omegas), marker_size, colors[4], label=r'$\\operatorname{MET}$')\n",
        "\n",
        "tau = 0.01\n",
        "kappa = 1e80\n",
        "levels = []\n",
        "for omega in omegas:\n",
        "  levels.append(GHT(n, x, nu=nu, tau=tau, kappa=kappa, omega=omega)[0])\n",
        "plt.scatter(omegas, shift + levels, marker_size, colors[1], label=r'$\\operatorname{wprctile}$')\n",
        "\n",
        "nu = 200\n",
        "kappa = .1\n",
        "tau = 0.01\n",
        "levels = []\n",
        "for omega in omegas:\n",
        "  levels.append(GHT(n, x, nu=nu, tau=tau, kappa=kappa, omega=omega)[0])\n",
        "plt.scatter(omegas, shift + levels, marker_size, [0.25]*3, label=r'$\\operatorname{GHT}$')\n",
        "\n",
        "\n",
        "plt.ylim(0, 1.)\n",
        "plt.xlim(0, 1.)\n",
        "plt.gca().legend()\n",
        "plt.gca().yaxis.set_label_position(\"right\")\n",
        "plt.gca().yaxis.tick_right()\n",
        "z.set_rotation(0)\n",
        "plt.gca().yaxis.set_label_coords(1.05, 0.53)\n",
        "plt.gca().grid(True)\n",
        "z = plt.gca().set_ylabel(r'$\\boldsymbol{x}$', fontsize=15)\n",
        "z.set_rotation(0)\n",
        "plt.gca().yaxis.set_label_coords(1.05, 0.53)\n",
        "plt.xticks(np.linspace(0, 1, 7), ['$0$', '$1/6$', '$1/3$', '$1/2$', '$2/3$', '$5/6$', '$1$'])\n",
        "plt.yticks(np.linspace(0, 1, 7), [''] * 7)\n",
        "plt.gca().set_xlabel(r'$\\omega$', fontsize=15)\n",
        "\n",
        "plt.subplot(1,2,2)\n",
        "plt.barh(x, n, height=x[1] - x[0], color=[0.7, 0.7, 0.7], edgecolor=[0.4, 0.4, 0.4])\n",
        "plt.ylim(0, 1.)\n",
        "plt.yticks(np.linspace(0, 1, 7), ['$0$', '$1/6$', '$1/3$', ' ', '$2/3$', '$5/6$', '$1$'])\n",
        "plt.gca().set_xlabel(r'$\\boldsymbol{n}$', fontsize=15)\n",
        "\n",
        "if SAVE_FIGURES:\n",
        "  plt.savefig('/tmp/omega_sweep.png', dpi=300, facecolor='w', edgecolor='w',\n",
        "          orientation='portrait', papertype=None, format=None,\n",
        "          transparent=False, bbox_inches=None, pad_inches=0.0,\n",
        "          frameon=None, metadata=None)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [],
      "last_runtime": {
        "build_target": "",
        "kind": "local"
      },
      "name": "GHT.ipynb",
      "provenance": [
        {
          "file_id": "1rxkAaSu7g2gItgzWINJygEOZX8GVYpvX",
          "timestamp": 1594164409706
        },
        {
          "file_id": "1NB-4F3bLMRerL8-_RSEYHwDwQcsWMqZd",
          "timestamp": 1576085049758
        }
      ]
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
