{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "%matplotlib inline"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "\n# Map data to a normal distribution\n\n\n.. currentmodule:: sklearn.preprocessing\n\nThis example demonstrates the use of the Box-Cox and Yeo-Johnson transforms\nthrough :class:`~PowerTransformer` to map data from various\ndistributions to a normal distribution.\n\nThe power transform is useful as a transformation in modeling problems where\nhomoscedasticity and normality are desired. Below are examples of Box-Cox and\nYeo-Johnwon applied to six different probability distributions: Lognormal,\nChi-squared, Weibull, Gaussian, Uniform, and Bimodal.\n\nNote that the transformations successfully map the data to a normal\ndistribution when applied to certain datasets, but are ineffective with others.\nThis highlights the importance of visualizing the data before and after\ntransformation.\n\nAlso note that even though Box-Cox seems to perform better than Yeo-Johnson for\nlognormal and chi-squared distributions, keep in mind that Box-Cox does not\nsupport inputs with negative values.\n\nFor comparison, we also add the output from\n:class:`~QuantileTransformer`. It can force any arbitrary\ndistribution into a gaussian, provided that there are enough training samples\n(thousands). Because it is a non-parametric method, it is harder to interpret\nthan the parametric ones (Box-Cox and Yeo-Johnson).\n\nOn \"small\" datasets (less than a few hundred points), the quantile transformer\nis prone to overfitting. The use of the power transform is then recommended.\n\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "# Author: Eric Chang <ericchang2017@u.northwestern.edu>\n#         Nicolas Hug <contact@nicolas-hug.com>\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.model_selection import train_test_split\n\nprint(__doc__)\n\n\nN_SAMPLES = 1000\nFONT_SIZE = 6\nBINS = 30\n\n\nrng = np.random.RandomState(304)\nbc = PowerTransformer(method='box-cox')\nyj = PowerTransformer(method='yeo-johnson')\n# n_quantiles is set to the training set size rather than the default value\n# to avoid a warning being raised by this example\nqt = QuantileTransformer(n_quantiles=500, output_distribution='normal',\n                         random_state=rng)\nsize = (N_SAMPLES, 1)\n\n\n# lognormal distribution\nX_lognormal = rng.lognormal(size=size)\n\n# chi-squared distribution\ndf = 3\nX_chisq = rng.chisquare(df=df, size=size)\n\n# weibull distribution\na = 50\nX_weibull = rng.weibull(a=a, size=size)\n\n# gaussian distribution\nloc = 100\nX_gaussian = rng.normal(loc=loc, size=size)\n\n# uniform distribution\nX_uniform = rng.uniform(low=0, high=1, size=size)\n\n# bimodal distribution\nloc_a, loc_b = 100, 105\nX_a, X_b = rng.normal(loc=loc_a, size=size), rng.normal(loc=loc_b, size=size)\nX_bimodal = np.concatenate([X_a, X_b], axis=0)\n\n\n# create plots\ndistributions = [\n    ('Lognormal', X_lognormal),\n    ('Chi-squared', X_chisq),\n    ('Weibull', X_weibull),\n    ('Gaussian', X_gaussian),\n    ('Uniform', X_uniform),\n    ('Bimodal', X_bimodal)\n]\n\ncolors = ['#D81B60', '#0188FF', '#FFC107',\n          '#B7A2FF', '#000000', '#2EC5AC']\n\nfig, axes = plt.subplots(nrows=8, ncols=3, figsize=plt.figaspect(2))\naxes = axes.flatten()\naxes_idxs = [(0, 3, 6, 9), (1, 4, 7, 10), (2, 5, 8, 11), (12, 15, 18, 21),\n             (13, 16, 19, 22), (14, 17, 20, 23)]\naxes_list = [(axes[i], axes[j], axes[k], axes[l])\n             for (i, j, k, l) in axes_idxs]\n\n\nfor distribution, color, axes in zip(distributions, colors, axes_list):\n    name, X = distribution\n    X_train, X_test = train_test_split(X, test_size=.5)\n\n    # perform power transforms and quantile transform\n    X_trans_bc = bc.fit(X_train).transform(X_test)\n    lmbda_bc = round(bc.lambdas_[0], 2)\n    X_trans_yj = yj.fit(X_train).transform(X_test)\n    lmbda_yj = round(yj.lambdas_[0], 2)\n    X_trans_qt = qt.fit(X_train).transform(X_test)\n\n    ax_original, ax_bc, ax_yj, ax_qt = axes\n\n    ax_original.hist(X_train, color=color, bins=BINS)\n    ax_original.set_title(name, fontsize=FONT_SIZE)\n    ax_original.tick_params(axis='both', which='major', labelsize=FONT_SIZE)\n\n    for ax, X_trans, meth_name, lmbda in zip(\n            (ax_bc, ax_yj, ax_qt),\n            (X_trans_bc, X_trans_yj, X_trans_qt),\n            ('Box-Cox', 'Yeo-Johnson', 'Quantile transform'),\n            (lmbda_bc, lmbda_yj, None)):\n        ax.hist(X_trans, color=color, bins=BINS)\n        title = 'After {}'.format(meth_name)\n        if lmbda is not None:\n            title += r'\\n$\\lambda$ = {}'.format(lmbda)\n        ax.set_title(title, fontsize=FONT_SIZE)\n        ax.tick_params(axis='both', which='major', labelsize=FONT_SIZE)\n        ax.set_xlim([-3.5, 3.5])\n\n\nplt.tight_layout()\nplt.show()"
      ]
    }
  ],
  "metadata": {
    "kernelspec": {
      "display_name": "Python 3",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.6.9"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}