{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%run NB01-Load.ipynb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Current Data-Frame\n",
    "df_current = 'application_train'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Master Data-Frame\n",
    "df_master = 'application_train'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Count of Rows\n",
    "# Count of Columns\n",
    "df_row_count, df_column_count = df.shape\n",
    "df_row_count, df_column_count"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Total Values Count\n",
    "df_values_count_total = df_row_count * df_column_count\n",
    "df_values_count_total"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Column Names\n",
    "df_column_names = sorted(df.columns.tolist())\n",
    "df_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Column Data Types\n",
    "df_column_dtypes = df.dtypes\n",
    "df_column_dtypes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# Column Data Types Groups\n",
    "#df_column_dtype_groups = df.columns.to_series().groupby(df.dtypes).groups\n",
    "#df_column_dtype_groups\n",
    "\n",
    "#> TypeError: data type not understood\n",
    "df_column_dtype_groups = df.columns.groupby(df.dtypes)\n",
    "df_column_dtype_groups"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Any Missing Values\n",
    "df_missing_values_flag = df.isnull().values.any()\n",
    "df_missing_values_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Total Missing Values Count\n",
    "df_missing_values_count_total = df.isnull().sum().sum()\n",
    "df_missing_values_count_total"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Total Missing Values Percentage\n",
    "df_missing_values_percentage_total = df_missing_values_count_total / df_values_count_total\n",
    "df_missing_values_percentage_total"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Count of Columns with/without Missing Values\n",
    "#df_missing_values_column_count"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Count of Rows with/without Missing Values\n",
    "#df_missing_values_row_count"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Percentage of Columns with/without Missing Values\n",
    "#df_missing_values_column_percentage"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Percentage of Rows with/without Missing Values\n",
    "#df_missing_values_row_percentage"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Count of Rows Per Column\n",
    "df_columns_row_count = {column_name:df_row_count for column_name in df_column_names}\n",
    "df_columns_row_count"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Count of Unique Values Per Column\n",
    "df_columns_number_of_unique_values = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_number_of_unique_values:\n",
    "    df_columns_number_of_unique_values[column_name] = df[column_name].nunique()\n",
    "df_columns_number_of_unique_values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Percentage of Unique Values Per Column\n",
    "df_columns_percentage_of_unique_values = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_percentage_of_unique_values:\n",
    "    df_columns_percentage_of_unique_values[column_name] = df_columns_number_of_unique_values[column_name]/df_row_count\n",
    "df_columns_percentage_of_unique_values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Any Missing Values Per Column\n",
    "df_columns_missing_values_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_missing_values_flag:\n",
    "    df_columns_missing_values_flag[column_name] = df[column_name].isnull().any()\n",
    "\n",
    "for key, value in df_columns_missing_values_flag.items():\n",
    "    if value:\n",
    "        print(key)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Count of Missing Values Count Per Column\n",
    "df_columns_missing_values_count = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_missing_values_count:\n",
    "    df_columns_missing_values_count[column_name] = df[column_name].isnull().sum()\n",
    "df_columns_missing_values_count"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Percentage of Missing Values Per Column\n",
    "df_columns_missing_values_percentage = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_missing_values_percentage:\n",
    "    df_columns_missing_values_percentage[column_name] = df_columns_missing_values_count[column_name]/df_row_count\n",
    "df_columns_missing_values_percentage"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Any Missing Values Per Row\n",
    "#df_rows_missing_values_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Count of Missing Values Per Row\n",
    "#df_rows_missing_values_count"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Percentage of Missing Values Per Row\n",
    "#df_rows_missing_values_count"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#df['SK_ID_CURR'].str.isdigit().all()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#df['SK_ID_CURR'].str.isalpha().all()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_dtype_boolean_column_names = sorted(list(df.select_dtypes(include=['bool']).columns))\n",
    "df_dtype_boolean_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_dtype_float64_column_names = sorted(list(df.select_dtypes(include=['float64']).columns))\n",
    "df_dtype_float64_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_dtype_int64_column_names = sorted(list(df.select_dtypes(include=['int64']).columns))\n",
    "df_dtype_int64_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_dtype_object_column_names = sorted(list(df.select_dtypes(include=['O']).columns))\n",
    "df_dtype_object_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_dtype_category_column_names = sorted(list(df.select_dtypes(include=['category']).columns))\n",
    "df_dtype_category_column_names"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test if values are letters or numbers\n",
    "# Then, test if numbers are integers or decimals\n",
    "\n",
    "```python\n",
    "# pandas.Series.str.isnumeric\n",
    "# Series.str.isnumeric()\n",
    "# Check whether all characters in each string in the Series/Index are numeric. Equivalent to str.isnumeric().\n",
    "# Returns:\tis : Series/array of boolean values\n",
    "\n",
    "string.isdecimal()  \n",
    "string.isdigit()\n",
    "string.isnumeric()\n",
    "```\n",
    "\n",
    "...\\pandas\\core\\strings.py  \n",
    "```python\n",
    "from pandas.core.dtypes.common import (\n",
    "    is_bool_dtype,\n",
    "    is_categorical_dtype,\n",
    "    is_object_dtype,\n",
    "    is_string_like,\n",
    "    is_list_like,\n",
    "    is_scalar,\n",
    "    is_integer,\n",
    "    is_re\n",
    ")\n",
    "isalnum\n",
    "isalpha\n",
    "isdecimal\n",
    "isdigit\n",
    "isnumeric\n",
    "islower\n",
    "isspace\n",
    "isupper\n",
    "istitle\n",
    "```\n",
    "\n",
    "...\\string.py\n",
    "```python\n",
    "# Some strings for ctype-style character classification\n",
    "whitespace = ' \\t\\n\\r\\v\\f'\n",
    "ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'\n",
    "ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n",
    "ascii_letters = ascii_lowercase + ascii_uppercase\n",
    "digits = '0123456789'\n",
    "hexdigits = digits + 'abcdef' + 'ABCDEF'\n",
    "octdigits = '01234567'\n",
    "punctuation = r\"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\"\"\"\n",
    "printable = digits + ascii_letters + punctuation + whitespace\n",
    "```\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "string = \"wallawalla 101\"\n",
    "\n",
    "try:\n",
    "    i = float(str)\n",
    "except (ValueError, TypeError):\n",
    "    print('\\nNot numeric')\n",
    "\n",
    "print(string.isdecimal())\n",
    "\n",
    "def isNumeric(s):\n",
    "    return s.isnumeric()\n",
    "\n",
    "print(isNumeric(\"1234124\"))\n",
    "\n",
    "if string.isdigit():\n",
    "    print(\"Your message includes numbers only.\")\n",
    "else:\n",
    "    print(\"Your message does not include numbers.\")\n",
    "\n",
    "# Iterating the string and checking for numeric characters\n",
    "# Incrementing the counter if a numeric character is found\n",
    "# And adding the character to new string if not numeric\n",
    "# Finally printing the count and the newstring\n",
    "count = int()\n",
    "newstring1 = str()\n",
    "for a in string:\n",
    "    if (a.isnumeric()) == True:\n",
    "        count += 1\n",
    "    else:\n",
    "        newstring1 += a\n",
    "print(count)\n",
    "print(newstring1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# column_name = 'ORGANIZATION_TYPE'\n",
    "# float(df[column_name])\n",
    "# #> TypeError: cannot convert the series to <class 'float'>\n",
    "\n",
    "column_name = 'ORGANIZATION_TYPE'\n",
    "try:\n",
    "    float(df[column_name])\n",
    "except (TypeError):\n",
    "    print('Non-Numeric: ' + column_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def is_numeric_scalar(scalar_value):\n",
    "    # Iterating the string and checking for numeric characters\n",
    "    # Incrementing the counter if a numeric character is found\n",
    "    # And adding the character to new string if not numeric\n",
    "    # NOTE: iteration over a string is actually iteration over the individual characters\n",
    "    \n",
    "    scalar_value_is_numeric_scalar = None\n",
    "    \n",
    "    for single_character in scalar_value:\n",
    "        #print(scalar_value)\n",
    "        #print(single_character)\n",
    "        if (single_character.isnumeric()) == True:\n",
    "            scalar_value_is_numeric_scalar = True\n",
    "            #print(True)\n",
    "        else:\n",
    "            scalar_value_is_numeric_scalar = False\n",
    "            #print(False)\n",
    "            #continue\n",
    "            break\n",
    "        \n",
    "        #if scalar_value_is_numeric_scalar == False:\n",
    "        #    continue\n",
    "    \n",
    "    return scalar_value_is_numeric_scalar\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# column_name = 'ORGANIZATION_TYPE'\n",
    "# df[column_name].apply(is_numeric_each_character)\n",
    "#> Business Entity Type 3\n",
    "#> B\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> u\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> s\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> i\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> n\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> e\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> s\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> s\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#>  \n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> E\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> n\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> t\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> i\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> t\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> y\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#>  \n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> T\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> y\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> p\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> e\n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#>  \n",
    "#> False\n",
    "#> Business Entity Type 3\n",
    "#> 3\n",
    "#> True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# column_name = 'ORGANIZATION_TYPE'\n",
    "# df[column_name].apply(is_numeric_scalar)\n",
    "#> 0          True\n",
    "#> 1         False\n",
    "#> 2         False\n",
    "#> 3          True\n",
    "#> 4         False\n",
    "#> 5         False\n",
    "#> 6          True\n",
    "#> 7         False\n",
    "#> 8         False\n",
    "#> 9         False\n",
    "#> 10        False\n",
    "#> 11        False\n",
    "#> 12         True\n",
    "#> 13        False\n",
    "#> 14         True\n",
    "#> 15         True\n",
    "#> 16        False\n",
    "#> 17        False\n",
    "#> 18        False\n",
    "#> 19        False\n",
    "#> 20        False\n",
    "#> 21         True\n",
    "#> 22        False\n",
    "#> 23        False\n",
    "#> 24         True\n",
    "#> 25         True\n",
    "#> 26         True\n",
    "#> 27         True\n",
    "#> 28        False\n",
    "#> 29         True\n",
    "#>           ...  \n",
    "#> 307481     True\n",
    "#> 307482    False\n",
    "#> 307483    False\n",
    "#> 307484     True\n",
    "#> 307485     True\n",
    "#> 307486     True\n",
    "#> 307487    False\n",
    "#> 307488    False\n",
    "#> 307489     True\n",
    "#> 307490     True\n",
    "#> 307491    False\n",
    "#> 307492    False\n",
    "#> 307493    False\n",
    "#> 307494    False\n",
    "#> 307495     True\n",
    "#> 307496    False\n",
    "#> 307497     True\n",
    "#> 307498    False\n",
    "#> 307499    False\n",
    "#> 307500     True\n",
    "#> 307501     True\n",
    "#> 307502    False\n",
    "#> 307503    False\n",
    "#> 307504    False\n",
    "#> 307505    False\n",
    "#> 307506    False\n",
    "#> 307507    False\n",
    "#> 307508    False\n",
    "#> 307509     True\n",
    "#> 307510     True\n",
    "#> Name: ORGANIZATION_TYPE, Length: 307511, dtype: bool"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def is_numeric_series(pandas_series):\n",
    "    # Iterating the string and checking for numeric characters\n",
    "    # Incrementing the counter if a numeric character is found\n",
    "    # And adding the character to new string if not numeric\n",
    "    # NOTE: iteration over a string is actually iteration over the individual characters\n",
    "    \n",
    "    pandas_series_is_numeric_series = None\n",
    "    \n",
    "    #TODO(JamesBalcomb): decide on early-exit when dtype is bool, float, int, etc. (.:. TypeError: 'numpy.float64' object is not iterable)\n",
    "    #if pandas_series.dtype == numpy.number:\n",
    "    #if pandas.api.types.is_numeric_dtype(pandas_series):\n",
    "    \n",
    "    if pandas.api.types.is_numeric_dtype(pandas_series):\n",
    "        pandas_series_is_numeric_series = True\n",
    "    else:\n",
    "        for index_number in pandas_series.index:\n",
    "            pandas_series_value = pandas_series.loc[index_number]\n",
    "            #print(pandas_series_value)\n",
    "            if is_numeric_scalar(pandas_series_value):\n",
    "                pandas_series_is_numeric_series = True\n",
    "                #print(True)\n",
    "            else:\n",
    "                pandas_series_is_numeric_series = False\n",
    "                #print(False)\n",
    "                break\n",
    "    \n",
    "    return pandas_series_is_numeric_series\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# column_name = 'ORGANIZATION_TYPE'\n",
    "# pandas_series = df[column_name]\n",
    "\n",
    "# type(pandas_series)\n",
    "#> <class 'pandas.core.series.Series'>\n",
    "# pandas_series.iloc[0]\n",
    "#> 'Business Entity Type 3'\n",
    "# pandas_series.iloc[1]\n",
    "#> 'School'\n",
    "# pandas_series.iloc[0].iat[0]\n",
    "#> AttributeError: 'str' object has no attribute 'iat'\n",
    "\n",
    "# pandas.Series.iloc\n",
    "# Series.iloc\n",
    "# Purely integer-location based indexing for selection by position.\n",
    "# .iloc[] is primarily integer position based (from 0 to length-1 of the axis), but may also be used with a boolean array.\n",
    "\n",
    "# len(pandas_series)\n",
    "#> 307511\n",
    "\n",
    "# pandas_series.index\n",
    "#> RangeIndex(start=0, stop=307511, step=1)\n",
    "\n",
    "# for index_id in pandas_series.index:\n",
    "#     print(pandas_series.loc[index_id])\n",
    "#> Business Entity Type 3\n",
    "#> School\n",
    "#> Government\n",
    "#> Business Entity Type 3\n",
    "#> Religion\n",
    "#> ...\n",
    "#> Services\n",
    "#> XNA\n",
    "#> School\n",
    "#> Business Entity Type 1\n",
    "#> Business Entity Type 3\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# column_name = 'ORGANIZATION_TYPE'\n",
    "# pandas_series = df[column_name]\n",
    "# is_numeric_series(pandas_series)\n",
    "#> False\n",
    "\n",
    "# column_name = 'AMT_INCOME_TOTAL'\n",
    "# pandas_series = df[column_name]\n",
    "# is_numeric_series(pandas_series)\n",
    "#> TypeError                                 Traceback (most recent call last)\n",
    "#> <ipython-input-49-ff847204bea6> in <module>()\n",
    "#> ---> 10 is_numeric_series(pandas_series)\n",
    "#> <ipython-input-46-45f2861e88f6> in is_numeric_series(pandas_series)\n",
    "#> ---> 12         if is_numeric_scalar(pandas_series_value):\n",
    "#> <ipython-input-43-33965b580f01> in is_numeric_scalar(scalar_value)\n",
    "#> ----> 9     for single_character in scalar_value:\n",
    "#> TypeError: 'numpy.float64' object is not iterable\n",
    "\n",
    "# column_name = 'ORGANIZATION_TYPE'\n",
    "# pandas_series = df[column_name]\n",
    "##import numpy\n",
    "# pandas_series.dtype == numpy.number\n",
    "#> False\n",
    "##import pandas\n",
    "####from pandas.api.types import is_string_dtype\n",
    "####from pandas.api.types import is_numeric_dtype\n",
    "# pandas.api.types.is_numeric_dtype(pandas_series)\n",
    "#> False\n",
    "\n",
    "# column_name = 'AMT_INCOME_TOTAL'\n",
    "# pandas_series = df[column_name]\n",
    "# pandas_series.dtype == numpy.number\n",
    "#> True\n",
    "# pandas.api.types.is_numeric_dtype(pandas_series)\n",
    "#> True\n",
    "\n",
    "# column_name = 'ORGANIZATION_TYPE'\n",
    "# pandas_series = df[column_name]\n",
    "# is_numeric_series(pandas_series)\n",
    "#> False\n",
    "\n",
    "# column_name = 'AMT_INCOME_TOTAL'\n",
    "# pandas_series = df[column_name]\n",
    "# is_numeric_series(pandas_series)\n",
    "#> True\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# EDA - Meta-Data - Manual\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_target_column_name = ['TARGET']\n",
    "df_target_column_name"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_record_id_column_name = ['SK_ID_CURR']\n",
    "df_record_id_column_name"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_record_key_column_name = ['SK_ID_CURR']\n",
    "df_record_key_column_name"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_nondata_column_names = ['SK_ID_CURR']\n",
    "df_nondata_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_useless_column_names = ['FLAG_MOBIL']\n",
    "df_useless_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_nonmodel_column_names = []\n",
    "df_nonmodel_column_names = sorted(list(set(df_target_column_name + df_record_id_column_name + df_record_key_column_name + df_nondata_column_names + df_useless_column_names)))\n",
    "df_nonmodel_column_names"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Lists of Column Names\n",
    "df_nondata_column_names  \n",
    "df_useless_column_names  \n",
    "df_nonmodel_column_names  \n",
    "df_int_column_names  \n",
    "df_float_column_names  \n",
    "df_datetime_column_names  \n",
    "df_date_column_names  \n",
    "df_object_column_names  \n",
    "df_boolean_column_names  \n",
    "df_categorical_column_names  \n",
    "df_numerical_column_names  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_int_column_names = []\n",
    "df_int_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_float_column_names = []\n",
    "df_float_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_currency_column_names = []\n",
    "df_currency_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_timestamp_column_names = []\n",
    "df_timestamp_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_datetime_column_names = []\n",
    "df_datetime_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_date_column_names = []\n",
    "df_date_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_object_column_names = [\n",
    "'CODE_GENDER',\n",
    "'EMERGENCYSTATE_MODE',\n",
    "'FONDKAPREMONT_MODE',\n",
    "'HOUSETYPE_MODE',\n",
    "'NAME_CONTRACT_TYPE',\n",
    "'NAME_TYPE_SUITE',\n",
    "'NAME_INCOME_TYPE',\n",
    "'NAME_EDUCATION_TYPE',\n",
    "'NAME_FAMILY_STATUS',\n",
    "'NAME_HOUSING_TYPE',\n",
    "'OCCUPATION_TYPE',\n",
    "'ORGANIZATION_TYPE',\n",
    "'WALLSMATERIAL_MODE',\n",
    "'WEEKDAY_APPR_PROCESS_START',\n",
    "]\n",
    "df_object_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_boolean_column_names = [\n",
    "'FLAG_CONT_MOBILE',\n",
    "'FLAG_DOCUMENT_2',\n",
    "'FLAG_DOCUMENT_3',\n",
    "'FLAG_DOCUMENT_4',\n",
    "'FLAG_DOCUMENT_5',\n",
    "'FLAG_DOCUMENT_6',\n",
    "'FLAG_DOCUMENT_7',\n",
    "'FLAG_DOCUMENT_8',\n",
    "'FLAG_DOCUMENT_9',\n",
    "'FLAG_DOCUMENT_10',\n",
    "'FLAG_DOCUMENT_11',\n",
    "'FLAG_DOCUMENT_12',\n",
    "'FLAG_DOCUMENT_13',\n",
    "'FLAG_DOCUMENT_14',\n",
    "'FLAG_DOCUMENT_15',\n",
    "'FLAG_DOCUMENT_16',\n",
    "'FLAG_DOCUMENT_17',\n",
    "'FLAG_DOCUMENT_18',\n",
    "'FLAG_DOCUMENT_19',\n",
    "'FLAG_DOCUMENT_20',\n",
    "'FLAG_DOCUMENT_21',\n",
    "'FLAG_EMAIL',\n",
    "'FLAG_EMP_PHONE',\n",
    "'FLAG_MOBIL',\n",
    "'FLAG_OWN_CAR',\n",
    "'FLAG_OWN_REALTY',\n",
    "'FLAG_PHONE',\n",
    "'FLAG_WORK_PHONE',\n",
    "'LIVE_CITY_NOT_WORK_CITY',\n",
    "'LIVE_REGION_NOT_WORK_REGION',\n",
    "'REG_CITY_NOT_LIVE_CITY',\n",
    "'REG_CITY_NOT_WORK_CITY',\n",
    "'REG_REGION_NOT_LIVE_REGION',\n",
    "'REG_REGION_NOT_WORK_REGION'\n",
    "]\n",
    "df_boolean_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_categorical_column_names = [\n",
    "'CODE_GENDER',\n",
    "'EMERGENCYSTATE_MODE',\n",
    "'FLAG_CONT_MOBILE',\n",
    "'FLAG_DOCUMENT_2',\n",
    "'FLAG_DOCUMENT_3',\n",
    "'FLAG_DOCUMENT_4',\n",
    "'FLAG_DOCUMENT_5',\n",
    "'FLAG_DOCUMENT_6',\n",
    "'FLAG_DOCUMENT_7',\n",
    "'FLAG_DOCUMENT_8',\n",
    "'FLAG_DOCUMENT_9',\n",
    "'FLAG_DOCUMENT_10',\n",
    "'FLAG_DOCUMENT_11',\n",
    "'FLAG_DOCUMENT_12',\n",
    "'FLAG_DOCUMENT_13',\n",
    "'FLAG_DOCUMENT_14',\n",
    "'FLAG_DOCUMENT_15',\n",
    "'FLAG_DOCUMENT_16',\n",
    "'FLAG_DOCUMENT_17',\n",
    "'FLAG_DOCUMENT_18',\n",
    "'FLAG_DOCUMENT_19',\n",
    "'FLAG_DOCUMENT_20',\n",
    "'FLAG_DOCUMENT_21',\n",
    "'FLAG_EMAIL',\n",
    "'FLAG_EMP_PHONE',\n",
    "'FLAG_MOBIL',\n",
    "'FLAG_OWN_CAR',\n",
    "'FLAG_OWN_REALTY',\n",
    "'FLAG_PHONE',\n",
    "'FLAG_WORK_PHONE',\n",
    "'FONDKAPREMONT_MODE',\n",
    "'HOUSETYPE_MODE',\n",
    "'HOUR_APPR_PROCESS_START',\n",
    "'LIVE_CITY_NOT_WORK_CITY',\n",
    "'LIVE_REGION_NOT_WORK_REGION',\n",
    "'NAME_CONTRACT_TYPE',\n",
    "'NAME_EDUCATION_TYPE',\n",
    "'NAME_FAMILY_STATUS',\n",
    "'NAME_HOUSING_TYPE',\n",
    "'NAME_INCOME_TYPE',\n",
    "'NAME_TYPE_SUITE',\n",
    "'OCCUPATION_TYPE',\n",
    "'ORGANIZATION_TYPE',\n",
    "'REG_CITY_NOT_LIVE_CITY',\n",
    "'REG_CITY_NOT_WORK_CITY',\n",
    "'REG_REGION_NOT_LIVE_REGION',\n",
    "'REG_REGION_NOT_WORK_REGION',\n",
    "'WALLSMATERIAL_MODE',\n",
    "'WEEKDAY_APPR_PROCESS_START'\n",
    "]\n",
    "df_categorical_column_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_numerical_column_names = [\n",
    "'AMT_ANNUITY',\n",
    "'AMT_CREDIT',\n",
    "'AMT_GOODS_PRICE',\n",
    "'AMT_INCOME_TOTAL',\n",
    "'AMT_REQ_CREDIT_BUREAU_DAY',\n",
    "'AMT_REQ_CREDIT_BUREAU_HOUR',\n",
    "'AMT_REQ_CREDIT_BUREAU_MON',\n",
    "'AMT_REQ_CREDIT_BUREAU_QRT',\n",
    "'AMT_REQ_CREDIT_BUREAU_WEEK',\n",
    "'AMT_REQ_CREDIT_BUREAU_YEAR',\n",
    "'APARTMENTS_AVG',\n",
    "'APARTMENTS_MEDI',\n",
    "'APARTMENTS_MODE',\n",
    "'BASEMENTAREA_AVG',\n",
    "'BASEMENTAREA_MEDI',\n",
    "'BASEMENTAREA_MODE',\n",
    "'CNT_CHILDREN',\n",
    "'CNT_FAM_MEMBERS',\n",
    "'COMMONAREA_AVG',\n",
    "'COMMONAREA_MEDI',\n",
    "'COMMONAREA_MODE',\n",
    "'DAYS_BIRTH',\n",
    "'DAYS_EMPLOYED',\n",
    "'DAYS_ID_PUBLISH',\n",
    "'DAYS_LAST_PHONE_CHANGE',\n",
    "'DAYS_REGISTRATION',\n",
    "'DEF_30_CNT_SOCIAL_CIRCLE',\n",
    "'DEF_60_CNT_SOCIAL_CIRCLE',\n",
    "'ELEVATORS_AVG',\n",
    "'ELEVATORS_MEDI',\n",
    "'ELEVATORS_MODE',\n",
    "'ENTRANCES_AVG',\n",
    "'ENTRANCES_MEDI',\n",
    "'ENTRANCES_MODE',\n",
    "'EXT_SOURCE_1',\n",
    "'EXT_SOURCE_2',\n",
    "'EXT_SOURCE_3',\n",
    "'FLOORSMAX_AVG',\n",
    "'FLOORSMAX_MEDI',\n",
    "'FLOORSMAX_MODE',\n",
    "'FLOORSMIN_AVG',\n",
    "'FLOORSMIN_MEDI',\n",
    "'FLOORSMIN_MODE',\n",
    "'HOUR_APPR_PROCESS_START',\n",
    "'LANDAREA_AVG',\n",
    "'LANDAREA_MEDI',\n",
    "'LANDAREA_MODE',\n",
    "'LIVINGAPARTMENTS_AVG',\n",
    "'LIVINGAPARTMENTS_MEDI',\n",
    "'LIVINGAPARTMENTS_MODE',\n",
    "'LIVINGAREA_AVG',\n",
    "'LIVINGAREA_MEDI',\n",
    "'LIVINGAREA_MODE',\n",
    "'NONLIVINGAPARTMENTS_AVG',\n",
    "'NONLIVINGAPARTMENTS_MEDI',\n",
    "'NONLIVINGAPARTMENTS_MODE',\n",
    "'NONLIVINGAREA_AVG',\n",
    "'NONLIVINGAREA_MEDI',\n",
    "'NONLIVINGAREA_MODE',\n",
    "'OBS_30_CNT_SOCIAL_CIRCLE',\n",
    "'OBS_60_CNT_SOCIAL_CIRCLE',\n",
    "'OWN_CAR_AGE',\n",
    "'REGION_POPULATION_RELATIVE',\n",
    "'REGION_RATING_CLIENT',\n",
    "'REGION_RATING_CLIENT_W_CITY',\n",
    "'TOTALAREA_MODE',\n",
    "'YEARS_BEGINEXPLUATATION_AVG',\n",
    "'YEARS_BEGINEXPLUATATION_MEDI',\n",
    "'YEARS_BEGINEXPLUATATION_MODE',\n",
    "'YEARS_BUILD_AVG',\n",
    "'YEARS_BUILD_MEDI',\n",
    "'YEARS_BUILD_MODE'\n",
    "]\n",
    "df_numerical_column_names"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "1) Always Positive / Never Negative  \n",
    "2) Always Negative / Never Positive  \n",
    "3) Always Between 1 and 0  \n",
    "4) Has Zero  \n",
    "5) Never Zero  \n",
    "6) Has Mean of 0  \n",
    "7) Has Standard Deviation of 1  \n",
    "8) Is Mean Centered  \n",
    "9) Is Scaled  \n",
    "10) Is Z-Score (AKA Standardized, Normalized, Centered and Scaled)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "```python\n",
    "# Test if column has dtype of category\n",
    "if df[column_name].dtype.name == 'category':\n",
    "# Or\n",
    "if isinstance(df.[column_name].dtype, pd.core.common.CategoricalDtype):\n",
    "# Or\n",
    "if pd.core.common.is_categorical_dtype(df.[column_name]):\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Always Positive / Never Negative\n",
    "#all(df['TARGET'] >= 0)\n",
    "df_columns_always_positive_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_always_positive_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_always_positive_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_always_positive_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_always_positive_flag[column_name] = all(df[column_name] > 0)\n",
    "df_columns_always_positive_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Always Negative / Never Positive\n",
    "#all(df['TARGET'] <= 0)\n",
    "df_columns_always_negative_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_always_negative_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_always_negative_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_always_negative_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_always_negative_flag[column_name] = all(df[column_name] < 0)\n",
    "df_columns_always_negative_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Always Between 1 and 0\n",
    "#all(df[(df['TARGET'] >= 0) & (df['TARGET'] <= 1)])\n",
    "# all(df['TARGET'].between(0, 1, inclusive=True))\n",
    "# df['TARGET'].between(0, 1, inclusive=True).any()\n",
    "df_columns_always_between_one_and_zero_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_always_between_one_and_zero_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_always_between_one_and_zero_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_always_between_one_and_zero_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_always_between_one_and_zero_flag[column_name] = all(df[(df[column_name] >= 0) & (df[column_name] <= 1)])\n",
    "df_columns_always_between_one_and_zero_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Has Zero\n",
    "#any(df['TARGET'] == 0)\n",
    "df_columns_has_zero_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_has_zero_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_has_zero_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_has_zero_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_has_zero_flag[column_name] = any(df[column_name] == 0)\n",
    "df_columns_has_zero_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Never Zero\n",
    "#all(df['TARGET'] != 0)\n",
    "df_columns_never_zero_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_never_zero_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_never_zero_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_never_zero_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_never_zero_flag[column_name] = all(df[column_name] != 0)\n",
    "df_columns_never_zero_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Has Mean of 0\n",
    "#df['TARGET'].mean() == 0\n",
    "df_columns_has_mean_of_zero_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_has_mean_of_zero_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_has_mean_of_zero_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_has_mean_of_zero_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_has_mean_of_zero_flag[column_name] = df[column_name].mean() == 0\n",
    "df_columns_has_mean_of_zero_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Has Standard Deviation of 1\n",
    "#df['TARGET'].std() == 1\n",
    "df_columns_has_standard_deviation_of_one_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_has_standard_deviation_of_one_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_has_standard_deviation_of_one_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_has_standard_deviation_of_one_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_has_standard_deviation_of_one_flag[column_name] = df[column_name].std() == 1\n",
    "df_columns_has_standard_deviation_of_one_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Is Mean Centered (i.e., the mean of all values has been subtracted from each value)\n",
    "#df['TARGET'].mean() == 0\n",
    "df_columns_is_mean_centered_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_is_mean_centered_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_is_mean_centered_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_is_mean_centered_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_is_mean_centered_flag[column_name] = df[column_name].mean() == 0\n",
    "df_columns_is_mean_centered_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Is Scaled\n",
    "#df['TARGET'].std() == 1\n",
    "df_columns_is_scaled_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_is_scaled_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_is_scaled_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_is_scaled_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_is_scaled_flag[column_name] = df[column_name].std() == 1\n",
    "df_columns_is_scaled_flag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Is Z-Score (AKA Standardized, Normalized, Centered and Scaled)\n",
    "#((df['TARGET'].mean() == 0) & (df['TARGET'].std() == 1))\n",
    "df_columns_is_z_score_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_is_z_score_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        df_columns_is_z_score_flag[column_name] = None\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        df_columns_is_z_score_flag[column_name] = None\n",
    "    else:\n",
    "        df_columns_is_z_score_flag[column_name] = ((df[column_name].mean() == 0) & (df[column_name].std() == 1))\n",
    "df_columns_is_z_score_flag"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Skew & Kurtosis"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_columns_skew_value = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_skew_value:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        # df_columns_skew_value[column_name] = None\n",
    "        df_columns_skew_value[column_name] = 0\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        #df_columns_skew_value[column_name] = None\n",
    "        df_columns_skew_value[column_name] = 0\n",
    "    else:\n",
    "        df_columns_skew_value[column_name] = sp.stats.skew(df[column_name], nan_policy='omit')\n",
    "df_columns_skew_value"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for key in sorted(df_columns_skew_value, key=df_columns_skew_value.get):\n",
    "    print(\"{key}: {value}\".format(key=key, value=df_columns_skew_value[key]))\n",
    "#> TypeError: '<' not supported between instances of 'NoneType' and 'float'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_columns_kurtosis_value = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_kurtosis_value:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        #df_columns_kurtosis_value[column_name] = None\n",
    "        df_columns_kurtosis_value[column_name] = 0\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        #df_columns_kurtosis_value[column_name] = None\n",
    "        df_columns_kurtosis_value[column_name] = 0\n",
    "    else:\n",
    "        df_columns_kurtosis_value[column_name] = sp.stats.kurtosis(df[column_name], nan_policy='omit')\n",
    "df_columns_kurtosis_value"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for key in sorted(df_columns_kurtosis_value, key=df_columns_kurtosis_value.get):\n",
    "    print(\"{key}: {value}\".format(key=key, value=df_columns_kurtosis_value[key]))\n",
    "#> TypeError: '<' not supported between instances of 'NoneType' and 'float'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Outliers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(np.std(df['AMT_INCOME_TOTAL']))\n",
    "print(np.std(df['AMT_INCOME_TOTAL']) * 3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(sp.stats.iqr(df['AMT_INCOME_TOTAL']))\n",
    "print(sp.stats.iqr(df['AMT_INCOME_TOTAL']) * 1.5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "column_name = 'AMT_INCOME_TOTAL'\n",
    "# Computing IQR\n",
    "Q1 = df[column_name].quantile(0.25)\n",
    "Q3 = df[column_name].quantile(0.75)\n",
    "IQR = Q3 - Q1\n",
    "IQR150 = IQR * 1.50\n",
    "Q1IQR150 = Q1 - IQR150\n",
    "Q3IQR150 = Q3 + IQR150\n",
    "#df[column_name] < Q1 - IQR * 1.5\n",
    "#df[column_name] > Q3 + IQR * 1.5\n",
    "((df[column_name] < Q1IQR150) | (df[column_name] > Q3IQR150))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_columns_outlier_flag = {column_name:None for column_name in df_column_names}\n",
    "for column_name in df_columns_outlier_flag:\n",
    "    if df[column_name].dtype.name == 'object':\n",
    "        #df_columns_outlier_flag[column_name] = None\n",
    "        df_columns_outlier_flag[column_name] = 0\n",
    "    elif df[column_name].dtype.name == 'category':\n",
    "        #df_columns_outlier_flag[column_name] = None\n",
    "        df_columns_outlier_flag[column_name] = 0\n",
    "    else:\n",
    "        Q1 = df[column_name].quantile(0.25)\n",
    "        Q3 = df[column_name].quantile(0.75)\n",
    "        IQR = Q3 - Q1\n",
    "        IQR150 = IQR * 1.50\n",
    "        Q1IQR150 = Q1 - IQR150\n",
    "        Q3IQR150 = Q3 + IQR150\n",
    "        df_columns_outlier_flag[column_name] = ((df[column_name] < Q1IQR150) | (df[column_name] > Q3IQR150))\n",
    "df_columns_outlier_flag"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
