{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "view-in-github"
   },
   "source": [
    "<a href=\"https://colab.research.google.com/github/mikexcohen/Statistics_book/blob/main/stats_ch13_confidenceIntervals.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Modern statistics: Intuition, Math, Python, R\n",
    "## Mike X Cohen (sincxpress.com)\n",
    "### https://www.amazon.com/dp/B0CQRGWGLY\n",
    "#### Code for chapter 14 (confidence intervals)\n",
    "\n",
    "---\n",
    "\n",
    "# About this code file:\n",
    "\n",
    "### This notebook will reproduce most of the figures in this chapter (some figures were made in Inkscape), and illustrate the statistical concepts explained in the text. The point of providing the code is not just for you to recreate the figures, but for you to modify, adapt, explore, and experiment with the code.\n",
    "\n",
    "### Solutions to all exercises are at the bottom of the notebook.\n",
    "\n",
    "#### This code was written in google-colab. The notebook may require some modifications if you use a different IDE."
   ],
   "metadata": {
    "id": "mzfXc9E3Xq7k"
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "dFT1qwVEyxTW"
   },
   "outputs": [],
   "source": [
    "# import libraries and define global settings\n",
    "import numpy as np\n",
    "import scipy.stats as stats\n",
    "import pandas as pd\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# define global figure properties used for publication\n",
    "import matplotlib_inline.backend_inline\n",
    "matplotlib_inline.backend_inline.set_matplotlib_formats('svg') # display figures in vector format\n",
    "plt.rcParams.update({'font.size':14,             # font size\n",
    "                     'savefig.dpi':300,          # output resolution\n",
    "                     'axes.titlelocation':'left',# title location\n",
    "                     'axes.spines.right':False,  # remove axis bounding box\n",
    "                     'axes.spines.top':False,    # remove axis bounding box\n",
    "                     })"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "o2L4hFGNKrkW"
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 13.1: Visualization of confidence intervals"
   ],
   "metadata": {
    "id": "3mzhioCcFfPc"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "### Note about the code in this cell: This is to illustrate the concept of confidence\n",
    "#   intervals. You will learn all of the code here later in this chapter, so don't worry\n",
    "#   if it doesn't make sense now. You can come back after reading the chapter and you'll\n",
    "#   understand all of it!\n",
    "\n",
    "\n",
    "\n",
    "# a population and its mean\n",
    "popdata = np.random.randn(100000) + 2\n",
    "popmean = np.mean(popdata)\n",
    "\n",
    "# a bunch of samples and their confidence intervals\n",
    "nSamples   = 20\n",
    "sampleSize = 50\n",
    "\n",
    "\n",
    "# setup the figure\n",
    "fig = plt.figure(figsize=(8,5))\n",
    "gs = plt.GridSpec(5,1)\n",
    "ax1 = fig.add_subplot(gs[0])\n",
    "ax2 = fig.add_subplot(gs[1:])\n",
    "\n",
    "# draw the population distribution and its mean\n",
    "ax1.hist(popdata,bins='fd',color='gray')\n",
    "ax1.axvline(x=popmean,linestyle='--',color=(.7,.7,.7))\n",
    "ax2.axvline(x=popmean,linestyle='--',color=(.7,.7,.7))\n",
    "\n",
    "# run the experiment\n",
    "for i in range(nSamples):\n",
    "\n",
    "  # draw a sample\n",
    "  sample = np.random.choice(popdata,sampleSize,replace=False)\n",
    "\n",
    "  # compute its mean and stdev\n",
    "  mean = np.mean(sample)\n",
    "  sem  = np.std(sample,ddof=1) / np.sqrt(sampleSize)\n",
    "\n",
    "  # confidence interval from scipy\n",
    "  CI = stats.t.interval(.95,sampleSize-1,loc=mean,scale=sem)\n",
    "\n",
    "  # plot it\n",
    "  if popmean>CI[0] and popmean<CI[1]:\n",
    "    c,s = 'k','s'\n",
    "  else:\n",
    "    c,s = 'w','o'\n",
    "  ax2.errorbar(mean,i,xerr=mean-CI[0],color='k',marker=s,markerfacecolor=c,markersize=8)\n",
    "\n",
    "\n",
    "\n",
    "ax1.set_xlim([0,4])\n",
    "ax1.axis('off')\n",
    "\n",
    "ax2.set_ylabel('Samples')\n",
    "ax2.set_yticks([])\n",
    "ax2.set_xlim([0,4])\n",
    "ax2.set_xlabel('Data value')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_whatIsCI.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "FqfBPaLEFeQV"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "PaHhsBWKFeFD"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 13.2: CI vs std"
   ],
   "metadata": {
    "id": "qMTOSM8Yk4X4"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "sampleSizes = [100,1000]\n",
    "confLevel = .95\n",
    "\n",
    "_,axs = plt.subplots(2,1,figsize=(8,5))\n",
    "\n",
    "\n",
    "for ax,N,t in zip(axs,sampleSizes,['A','B']):\n",
    "\n",
    "  # Generate a random sample of size N\n",
    "  data = np.random.randn(N)*2\n",
    "  # force the mean to be zero\n",
    "  data -= np.mean(data)\n",
    "\n",
    "  # mean and standard deviation\n",
    "  mean = np.mean(data)\n",
    "  stdev = np.std(data,ddof=1)\n",
    "\n",
    "  # Calculate 95% confidence interval\n",
    "  stderr = stdev / np.sqrt(len(data))\n",
    "  conf_interval = stats.t.interval(confLevel, N-1, loc=mean, scale=stderr)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "  # Plot the histogram\n",
    "  ax.hist(data,bins='fd',color=(.9,.9,.9))\n",
    "\n",
    "  # Plot the mean\n",
    "  ax.axvline(mean,color='k',linewidth=3,label='Mean')\n",
    "\n",
    "  # one standard deviation of the mean\n",
    "  ax.axvline(mean-stdev,color=(.6,.6,.6),linewidth=2,linestyle='--',label='1 std')\n",
    "  ax.axvline(mean+stdev,color=(.6,.6,.6),linewidth=2,linestyle='--')\n",
    "\n",
    "  # Plot the confidence interval\n",
    "  ax.axvline(conf_interval[0],color=(.3,.3,.3),linewidth=2,linestyle=':', label='95% CI')\n",
    "  ax.axvline(conf_interval[1],color=(.3,.3,.3),linewidth=2,linestyle=':')\n",
    "\n",
    "  ax.set(xlim=[-6,6],ylabel='Count')\n",
    "  ax.set_title(rf'$\\bf{{{t}}}$)  Sample size = {N}')\n",
    "  ax.legend()\n",
    "\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_stdVsCI.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "Rlv1MqiNk4Ux"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "8SWPi_wEEHNA"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Analytic confidence interval"
   ],
   "metadata": {
    "id": "g6iaC9IdEHKQ"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "conflevel = .95\n",
    "n = 20\n",
    "tStar = stats.t.isf((1-conflevel)/2,n-1)\n",
    "print(tStar)"
   ],
   "metadata": {
    "id": "XDJKx3myEJPd"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# simulation parameters\n",
    "mean = 2.3\n",
    "stdev = 3.2\n",
    "N = 48\n",
    "conflevel = .95\n",
    "\n",
    "# confidence interval from formula\n",
    "tStar = stats.t.isf((1-conflevel)/2,N-1)\n",
    "conf_int_me = [ mean - tStar*(stdev/np.sqrt(N)), \\\n",
    "                mean + tStar*(stdev/np.sqrt(N)) ]\n",
    "\n",
    "# confidence interval from scipy\n",
    "conf_int_sp = stats.t.interval(confLevel,N-1,\n",
    "                               loc=mean,scale=stdev/np.sqrt(N))\n",
    "\n",
    "print(conf_int_me)\n",
    "print(conf_int_sp)"
   ],
   "metadata": {
    "id": "miXnf5UaEHHq"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "uMHJ3nC3EHFC"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Bootstrapping"
   ],
   "metadata": {
    "id": "jYtMuxWhk4O7"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "S = [1,2,3,4]\n",
    "S = [2,2,3,3]\n",
    "\n",
    "print('    Sample    |  Mean')\n",
    "print('----------------------')\n",
    "print(f'{S}  |  {np.mean(S):.2f}')\n",
    "\n",
    "for i in range(5):\n",
    "\n",
    "  # bootstrap a random sample\n",
    "  b = np.random.choice(S,len(S),replace=True)\n",
    "  # note: replace=True is the default setting; I set it here to emphasize its importance.\n",
    "\n",
    "  # and print it and its mean\n",
    "  print(f'{list(np.sort(b))}  |  {np.mean(b):.2f}')"
   ],
   "metadata": {
    "id": "aO8P6rZRk6af"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "kpKN25NCk6d6"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# FYI, function to compute nChooseK\n",
    "from scipy.special import comb\n",
    "comb(3,3,repetition=True) # annoying, the \"replace\" parameter is called \"repetition\" and defaults to False \u00af\\_(\u30c4)_/\u00af"
   ],
   "metadata": {
    "id": "Pjvq4m8SKuyG"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "pjd4LRIIk6hI"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# [Don't peak!] How many boots?"
   ],
   "metadata": {
    "id": "7U90LQw8Q6MR"
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### Note about the code below: This code includes the solution to Exercise 6 (empirical confidence intervals),\n",
    "### which produces Figure 13.3.\n",
    "### If you want to challenge yourself on Exercise 6, don't look at the code here :P"
   ],
   "metadata": {
    "id": "FWSbW6HzRFIz"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# parameters\n",
    "samplesize = 50\n",
    "\n",
    "# draw a random sample from the population\n",
    "dataSample = np.random.randn(samplesize)**2\n",
    "dataSample -= np.mean(dataSample)\n",
    "\n",
    "numboots = np.arange(50,5051,step=200)\n",
    "CIs = np.zeros((len(numboots),2))\n",
    "bmm = np.zeros(len(numboots))\n",
    "\n",
    "for i,nb in enumerate(numboots):\n",
    "  bootmeans = [np.mean(np.random.choice(dataSample,samplesize)) for booti in range(nb)]\n",
    "  CIs[i,:] = np.percentile(bootmeans,[2.5,97.5])\n",
    "  bmm[i] = np.mean(bootmeans)\n",
    "\n",
    "\n",
    "# and plot\n",
    "plt.figure(figsize=(4,5))\n",
    "plt.errorbar(bmm, numboots, xerr=[bmm-CIs[:,0],CIs[:,1]-bmm],\n",
    "             marker='s', color='k', markerfacecolor='w', linestyle='None')\n",
    "plt.xlim([-.5,.5])\n",
    "\n",
    "plt.ylabel('Number of bootstrap samples')\n",
    "plt.xlabel('Data value')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_nBoots.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "D7xYO8IOk4MW"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "np.percentile(bootmeans,[2.5,97.5])"
   ],
   "metadata": {
    "id": "eBdzaVuyk4Jp"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "lWhqpSeFk4Gw"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# CI for hypothesis testing"
   ],
   "metadata": {
    "id": "YqLlw6NG45-U"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# simulation parameters\n",
    "mean = 1.3\n",
    "stdev = 5.2\n",
    "N = 48\n",
    "conflevel = .95\n",
    "\n",
    "# confidence interval from scipy\n",
    "confint = stats.t.interval(confLevel,N-1,loc=mean,scale=stdev/np.sqrt(N))\n",
    "\n",
    "print(confint)"
   ],
   "metadata": {
    "id": "ZTaYU6jU47zJ"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "OnekMXPH45th"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 13.4: Qualitative interpretation of confidence intervals"
   ],
   "metadata": {
    "id": "zehwfB9p2ync"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "eSizes = [6,1,6,1]\n",
    "means = [6.6,7,5,0]\n",
    "\n",
    "plt.figure(figsize=(3,3))\n",
    "plt.errorbar(range(4),means,eSizes,marker='s',color='k', markerfacecolor=(.8,.8,.8),capsize=5,linestyle='None')\n",
    "plt.axhline(y=0,color=(.7,.7,.7),linestyle='--',zorder=-1)\n",
    "plt.xticks(range(4),labels=['A','B','C','D'])\n",
    "plt.xlim([-.5,3.5])\n",
    "\n",
    "plt.savefig('confint_qualitative.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "I9tNRGDD23ns"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "osSa1slV45rI"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 1"
   ],
   "metadata": {
    "id": "qCk7ecFrk4Dy"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# parameters\n",
    "samplesizes = np.arange(50,1001,step=50)\n",
    "stdevs = np.linspace(.1,7,41)\n",
    "\n",
    "# initialization\n",
    "CIs = np.zeros((len(samplesizes),len(stdevs)))\n",
    "\n",
    "# experiment\n",
    "for ni,N in enumerate(samplesizes):\n",
    "  for si,s in enumerate(stdevs):\n",
    "\n",
    "    # SEM\n",
    "    sem = s/np.sqrt(N)\n",
    "\n",
    "    # When mean=0, the CI is symmetric, so its width is double the positive side\n",
    "    CIs[ni,si] = 2*stats.t.interval(.95,N-1,loc=0,scale=sem)[1]\n",
    "\n",
    "\n",
    "# draw the figure\n",
    "fig,ax = plt.subplots(1,figsize=(8,5))\n",
    "cax = ax.imshow(CIs,origin='lower',aspect='auto',vmin=0,vmax=1,cmap='gray_r',\n",
    "          extent=[stdevs[0],stdevs[-1],samplesizes[0],samplesizes[-1]])\n",
    "ax.set(xlabel='Standard deviation',ylabel='Sample size')\n",
    "\n",
    "# colorbar\n",
    "cbar_ax = fig.add_axes([.91,.11,.015,.77])\n",
    "cbar = plt.colorbar(cax,cax=cbar_ax,label='C.I. width')\n",
    "\n",
    "plt.savefig('confint_ex1.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "uuroPRKwhqlz"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "hcVrQ7F7k3-i"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 2"
   ],
   "metadata": {
    "id": "obJsyt5St46z"
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "ajqMA-7lKrkX"
   },
   "outputs": [],
   "source": [
    "## simulate data\n",
    "\n",
    "popN = int(1e7)  # lots and LOTS of data!!\n",
    "\n",
    "# the data\n",
    "population = (4*np.random.randn(popN))**1 # the \"1\" here is for exercise 2-4; change to \"2\" for exercise 5\n",
    "\n",
    "# we can calculate the exact population mean\n",
    "popMean = np.mean(population)\n",
    "\n",
    "# let's see it\n",
    "fig,ax = plt.subplots(2,1,figsize=(6,4))\n",
    "\n",
    "# TIP: plot only every 1000th sample\n",
    "ax[0].plot(population[::1000],'k.')\n",
    "ax[0].set_xlabel('Data index')\n",
    "ax[0].set_ylabel('Data value')\n",
    "\n",
    "ax[1].hist(population,bins='fd')\n",
    "ax[1].set_ylabel('Count')\n",
    "ax[1].set_xlabel('Data value')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "_JubFsgRKrkY"
   },
   "outputs": [],
   "source": [
    "## draw a random sample\n",
    "\n",
    "# parameters\n",
    "samplesize = 500\n",
    "confidence =  95 # in percent\n",
    "\n",
    "# compute sample mean\n",
    "dataSample = np.random.choice(population,samplesize)\n",
    "samplemean = np.mean(dataSample)\n",
    "samplestd  = np.std(dataSample,ddof=1)\n",
    "\n",
    "# compute confidence intervals\n",
    "confint = stats.t.interval(confidence/100,samplesize-1,\n",
    "                           loc=samplemean,scale=samplestd/np.sqrt(samplesize))\n",
    "\n",
    "\n",
    "# graph everything\n",
    "fig,ax = plt.subplots(1,figsize=(8,4))\n",
    "\n",
    "# the histogram\n",
    "h = ax.hist(dataSample,bins='fd',color='k',alpha=.1,label='Sample histogram')\n",
    "ytop = np.max(h[0]) # convenient variable for histogram peak value\n",
    "\n",
    "# confidence interval area\n",
    "ax.fill_between([confint[0],confint[1]],[0,0],[ytop,ytop],color='k',alpha=.4,label=f'{confidence}% CI region')\n",
    "\n",
    "# now add the linesindicating population means\n",
    "ax.plot([popMean,popMean],[0,ytop*1.1],'k:',linewidth=2,label='Pop. mean')\n",
    "ax.plot([samplemean,samplemean],[0,ytop],'k--',linewidth=2,label='Sample mean')\n",
    "\n",
    "# uncomment to zoom in\n",
    "# ax.set_xlim([confint[0]-np.diff(confint),confint[1]+np.diff(confint)])\n",
    "\n",
    "# some more adjustments\n",
    "ax.legend()\n",
    "ax.set(yticks=[],xlabel='Data values',ylabel='Count (a.u.)')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_ex2.png')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "6wJq8k0TuZvm"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 3"
   ],
   "metadata": {
    "id": "XGFOwLV8uZtD"
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "XRWdmwmfKrkZ"
   },
   "outputs": [],
   "source": [
    "## Test whether more samples have the population mean inside their CI\n",
    "\n",
    "# parameters\n",
    "numExperiments = 5000\n",
    "samplesize = 500\n",
    "\n",
    "# initialize the matrix of whether the population mean is inside the CI\n",
    "withinCI = np.zeros(numExperiments)\n",
    "\n",
    "\n",
    "# run the experiment\n",
    "for expi in range(numExperiments):\n",
    "\n",
    "  # compute sample mean and CI\n",
    "  dataSample = np.random.choice(population,samplesize)\n",
    "  samplemean = np.mean(dataSample)\n",
    "  samplestd  = np.std(dataSample,ddof=1)\n",
    "  confint    = stats.t.interval(confidence/100,samplesize-1,\n",
    "                           loc=samplemean,scale=samplestd/np.sqrt(samplesize))\n",
    "\n",
    "  # determine whether the true mean is inside this CI\n",
    "  if popMean>confint[0] and popMean<confint[1]:\n",
    "    withinCI[expi] = 1\n",
    "\n",
    "\n",
    "print('%g%% of sample C.I.''s contained the true population mean.'%(100*np.mean(withinCI)))"
   ]
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "jrfnASOqHQle"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 4"
   ],
   "metadata": {
    "id": "49S9Ra2QHRCA"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# print out some sample confint's for different sample sizes:\n",
    "confint"
   ],
   "metadata": {
    "id": "tCKAzD-DvBx9"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "hgDrwjPIvo27"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 5"
   ],
   "metadata": {
    "id": "LvFPkxnIxZbM"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# The discrepency here is due to the assumptions of the analytic formula for computing confidence intervals.\n",
    "#\n",
    "# In Exercises 2-4, the normality assumption was met. Even when the sample size was tiny, the purely random\n",
    "# sampling in combination with the purely Gaussian distribution meant that the assumptions underlying the\n",
    "# confidence intervals were still met (although with a small sample size, the confidence intervals were so huge\n",
    "# as to be completely useless from a practical perspective, but still valid mathematically).\n",
    "#\n",
    "# In contrast, Exerise 5 violated the normality assumption. Now, with large samples, the CLT kicked in and still\n",
    "# gave us a good result. But the small sample sizes stretched the CLT to its limits, meaning it was no longer applicable.\n",
    "# And that in turn meant that the confidence intervals were junk and not reliable.\n",
    "#"
   ],
   "metadata": {
    "id": "4-ded51wxauD"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "bCWrtlZnvoxA"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 6"
   ],
   "metadata": {
    "id": "IvZ0N-4XyX4m"
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "_kTOHEuoV9KQ"
   },
   "outputs": [],
   "source": [
    "# parameters\n",
    "samplesize = 500\n",
    "numBoots   = 1000\n",
    "\n",
    "# draw a random sample from the population\n",
    "dataSample = np.random.choice(population,samplesize)\n",
    "\n",
    "# we'll need these statistics later\n",
    "samplemean = np.mean(dataSample)\n",
    "samplestd  = np.std(dataSample,ddof=1)\n",
    "\n",
    "\n",
    "# initialize a vector to store the bootstrapped means\n",
    "bootmeans = np.zeros(numBoots)\n",
    "\n",
    "## now for bootstrapping\n",
    "for booti in range(numBoots):\n",
    "\n",
    "  # create a bootstrap sample\n",
    "  bootsample = np.random.choice(dataSample,samplesize)\n",
    "\n",
    "  # and compute its mean\n",
    "  bootmeans[booti] = np.mean(bootsample)\n",
    "\n",
    "\n",
    "# Coding note: I used a multi-line for-loop above for procedural clarity. A list-comprehension is more compact:\n",
    "#bootmeans = [np.mean(np.random.choice(dataSample,samplesize)) for booti in range(numBoots)]\n",
    "\n",
    "\n",
    "# find confidence intervals (hard-coded to 95%!)\n",
    "confintB = np.percentile(bootmeans,[2.5,97.5]) # B for bootstrap\n",
    "confintB"
   ]
  },
  {
   "cell_type": "code",
   "source": [
    "# graph everything\n",
    "fig,ax = plt.subplots(1,figsize=(8,4))\n",
    "\n",
    "# the histogram\n",
    "h = ax.hist(dataSample,bins='fd',color='k',alpha=.1,label='Data histogram')\n",
    "ax.hist(bootmeans,bins='fd',color='k',alpha=.5,label='Bootstrap means')\n",
    "ytop = np.max(h[0]) # convenient variable for histogram peak value\n",
    "\n",
    "# confidence interval area\n",
    "ax.fill_between([confintB[0],confintB[1]],[0,0],[ytop,ytop],color='k',alpha=.4,label=f'{confidence}% CI region')\n",
    "\n",
    "# lines indicating population means\n",
    "ax.plot([popMean,popMean],[0,ytop*1.1],'k:',linewidth=2,label='Pop. mean')\n",
    "ax.plot([samplemean,samplemean],[0,ytop],'k--',linewidth=2,label='Sample mean')\n",
    "\n",
    "# uncomment to zoom in\n",
    "ax.set_xlim([confintB[0]-np.diff(confintB),confintB[1]+np.diff(confintB)])\n",
    "\n",
    "# some more adjustments\n",
    "ax.legend()\n",
    "ax.set(yticks=[],xlabel='Data values',ylabel='Count (a.u.)')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_ex6.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "n_EuZbhY3RPC"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "DaCiRbFCV9KR"
   },
   "outputs": [],
   "source": [
    "## compare against the analytic confidence interval\n",
    "\n",
    "# compute confidence intervals (again, hard-coding to 95%)\n",
    "confintA = stats.t.interval(.975,samplesize-1,\n",
    "                           loc=samplemean,scale=samplestd/np.sqrt(samplesize))\n",
    "\n",
    "print(f'Empirical CI(95%) = ({confintB[0]:.3f},{confintB[1]:.3f})')\n",
    "print(f'Analytic  CI(95%) = ({confintA[0]:.3f},{confintA[1]:.3f})')"
   ]
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "rO4F0C0v6ijb"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 7"
   ],
   "metadata": {
    "id": "PS9wLjbM6igj"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# simulation params\n",
    "samplesize = 100\n",
    "trueR = .3 # true population correlation\n",
    "\n",
    "# generate the data\n",
    "X = np.random.randn(samplesize,2)\n",
    "X[:,1] = X[:,0]*trueR + X[:,1]*np.sqrt(1-trueR**2)\n",
    "\n",
    "# confirmation\n",
    "np.corrcoef(X.T)"
   ],
   "metadata": {
    "id": "VDthgTdvCkE4"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "### the python function (note: hard-coded to 95% confidence level)\n",
    "def corr_CI(X,nBoots=1000):\n",
    "\n",
    "  # initialize bootstrap sample-mean differences\n",
    "  bootstrap_r = np.zeros(nBoots)\n",
    "\n",
    "  # empirical sample size\n",
    "  samplesize = X.shape[0]\n",
    "\n",
    "  # generate bootstrap samples and correlate\n",
    "  for i in range(nBoots):\n",
    "    boot_idx = np.random.choice(range(samplesize), size=samplesize)\n",
    "    bootstrap_r[i] = np.corrcoef(X[boot_idx,:].T)[0,1]\n",
    "    # I prefer numpy's matrix input here for dealing with sampled data\n",
    "\n",
    "\n",
    "  # Compute the percentiles for the bootstrapped coefficients distribution\n",
    "  return np.percentile(bootstrap_r,[2.5,97.5]),bootstrap_r\n",
    "### end function definition\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# observed correlation coefficient (here using scipy to get a p-value)\n",
    "obs_r,obs_p = stats.pearsonr(X[:,0],X[:,1])\n",
    "\n",
    "# get empirical confidence intervals (using default of 1000 bootstraps)\n",
    "CI,bootstrap_r = corr_CI(X)\n",
    "\n",
    "\n",
    "# choose a color for the CI area based on significance\n",
    "areacolor = 'gray' if np.sign(CI[0])==np.sign(CI[1]) else 'red'\n",
    "\n",
    "\n",
    "### plotting\n",
    "plt.figure(figsize=(8,5))\n",
    "\n",
    "# histogram of the bootstrapped coefficients\n",
    "plt.hist(bootstrap_r, bins=30, edgecolor='k', color='gray', alpha=.6,label='Bootstrap correl. hist.')\n",
    "\n",
    "# area for confidence interval (using fill_betweenx here for some variety :P )\n",
    "plt.fill_betweenx([0, plt.gca().get_ylim()[1]], CI[0], CI[1], color=areacolor, alpha=.5, label='95% CI')\n",
    "\n",
    "# lines indicating coefficients\n",
    "plt.axvline(obs_r,color='k',linestyle='--',linewidth=3, label='Sample correlation')\n",
    "plt.axvline(trueR,color='k',linestyle=':',linewidth=3, label='Pop. correlation')\n",
    "\n",
    "plt.legend()\n",
    "plt.xlim([-1,1])\n",
    "plt.xlabel('Correlation coefficient')\n",
    "plt.ylabel('Count')\n",
    "plt.title(f'Bootstrap distribution of correlation coefficients\\nPearson r = {obs_r:.2f}, p = {obs_p:.3f}',loc='center')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_ex7.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "PtaaOpucCkH2"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "bdo6WuRZCkKw"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 8"
   ],
   "metadata": {
    "id": "mEXfo492CkNf"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# simulation params\n",
    "samplesizes = np.arange(10,3011,step=100)\n",
    "\n",
    "# matrix to store the sample sizes\n",
    "bootCI = np.zeros((len(samplesizes),2))\n",
    "obs_r = np.zeros(len(samplesizes))\n",
    "\n",
    "# reduce the number of bootstraps\n",
    "nBoots = 500 # number of samples\n",
    "\n",
    "# run the experiment!\n",
    "for idx,N in enumerate(samplesizes):\n",
    "\n",
    "  # generate the data\n",
    "  X = np.random.randn(N,2)\n",
    "  X[:,1] = X[:,0]*trueR + X[:,1]*np.sqrt(1-trueR**2)\n",
    "\n",
    "  # observed correlation coefficient\n",
    "  obs_r[idx] = np.corrcoef(X.T)[0,1]\n",
    "\n",
    "  # get confidence intervals\n",
    "  bootCI[idx,:] = corr_CI(X,nBoots)[0]\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "## the plot\n",
    "_,axs = plt.subplots(1,2,figsize=(10,3))\n",
    "axs[0].errorbar(samplesizes, obs_r, yerr=[obs_r-bootCI[:,0],bootCI[:,1]-obs_r],\n",
    "                marker='.',color='k',linestyle='None')\n",
    "axs[0].axhline(y=trueR,color=(.8,.8,.8),linestyle='--',zorder=-10)\n",
    "axs[0].set(xlabel='Sample size',ylabel='Correlation coef.')\n",
    "axs[0].set_title(r'$\\bf{A}$)  Correlations with 95% C.I.')\n",
    "\n",
    "axs[1].plot(samplesizes,np.diff(bootCI,axis=1),'ks',markerfacecolor=(.7,.7,.7))\n",
    "axs[1].set(xlabel='Sample size',ylabel='C.I. range')\n",
    "axs[1].set_title(r'$\\bf{B}$)  C.I. ranges')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_ex8.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "TkoRKvu_EKji"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "omdnC8upEKgR"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 9"
   ],
   "metadata": {
    "id": "OVCZ3qjCEKds"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# simulation params\n",
    "samplesize = 50\n",
    "\n",
    "# range of correlations\n",
    "coefs = np.linspace(0,.99,42)\n",
    "\n",
    "# matrix to store the sample sizes\n",
    "bootCI = np.zeros((len(coefs),2))\n",
    "obs_r = np.zeros(len(coefs))\n",
    "\n",
    "\n",
    "# run the experiment!\n",
    "for idx,r in enumerate(coefs):\n",
    "\n",
    "  # generate the data\n",
    "  X = np.random.randn(samplesize,2)\n",
    "  X[:,1] = X[:,0]*r + X[:,1]*np.sqrt(1-r**2)\n",
    "\n",
    "  # observed correlation coefficient\n",
    "  obs_r[idx] = np.corrcoef(X.T)[0,1]\n",
    "\n",
    "  # confidence intervals\n",
    "  bootCI[idx,:] = corr_CI(X,nBoots)[0]\n",
    "\n",
    "\n",
    "\n",
    "## the plot\n",
    "_,axs = plt.subplots(1,2,figsize=(10,3))\n",
    "axs[0].errorbar(coefs, obs_r, yerr=[obs_r-bootCI[:,0],bootCI[:,1]-obs_r],\n",
    "                marker='.',color='k',linestyle='None')\n",
    "axs[0].plot(coefs,coefs,color=(.8,.8,.8),linestyle='--',zorder=-10)\n",
    "axs[0].set(xlabel='Population correlation',ylabel='Correlation coef.')\n",
    "axs[0].set_title(r'$\\bf{A}$)  Correlations with 95% C.I.')\n",
    "\n",
    "axs[1].plot(coefs,np.diff(bootCI,axis=1),'ks',markerfacecolor=(.7,.7,.7))\n",
    "axs[1].set(xlabel='Population correlation',ylabel='C.I. range')\n",
    "axs[1].set_title(r'$\\bf{B}$)  C.I. ranges')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_ex9.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "Qbjq4Y9VIyoO"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "5l19KcKOEKa0"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 10"
   ],
   "metadata": {
    "id": "WIVrjE6DIbIi"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# simulation parameters\n",
    "means = np.linspace(0,2.5,41)\n",
    "stds = np.linspace(.5,5,51)\n",
    "sampsize = 30\n",
    "\n",
    "# initialize output matrix\n",
    "statsmatrix = np.zeros((len(means),len(stds)))\n",
    "\n",
    "\n",
    "# run the experiment!\n",
    "for mi in range(len(means)):\n",
    "  for si in range(len(stds)):\n",
    "\n",
    "    # SEM\n",
    "    sem = stds[si]/np.sqrt(sampsize)\n",
    "\n",
    "    # confidence interval\n",
    "    CI = stats.t.interval(.95,sampsize-1,loc=means[mi],scale=sem)\n",
    "\n",
    "    # t/p values\n",
    "    tval = means[mi] / sem\n",
    "    pval = 2*stats.t.sf(tval,sampsize-1)\n",
    "\n",
    "    # build up the stats matrix according to significances\n",
    "    statsmatrix[mi,si] += int(CI[0]>0) # only positive means, so we need only test whether the lower bound is negative\n",
    "    statsmatrix[mi,si] += int(pval<.05)\n",
    "\n",
    "\n",
    "# customized colormap\n",
    "import matplotlib.colors as colors\n",
    "cmap = plt.get_cmap('gray',3)\n",
    "\n",
    "plt.figure(figsize=(8,6))\n",
    "plt.imshow(statsmatrix,vmin=0,vmax=2,cmap=cmap,origin='lower',aspect='auto',\n",
    "           extent=[stds[0],stds[-1],means[0],means[-1]])\n",
    "\n",
    "plt.xlabel('Standard deviations')\n",
    "plt.ylabel('Means')\n",
    "plt.gca().spines[['right','top']].set_visible(True)\n",
    "plt.colorbar(ticks=[0,1,2])\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_ex10.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "c3NGgmUUIbEe"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# Confirm that no pixels are 1\n",
    "np.unique(statsmatrix)"
   ],
   "metadata": {
    "id": "hRu6SuSFQElo"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "7gCNqOrwIa0D"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 11"
   ],
   "metadata": {
    "id": "sDnC-TR_6iqN"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# generate the data\n",
    "sampsize = 30\n",
    "\n",
    "sample1 = np.random.randn(sampsize)\n",
    "sample2 = np.random.randn(sampsize)**2\n",
    "sample2 = sample2-np.mean(sample2) + .5 # mean-center then mean-shift\n",
    "\n",
    "# compute the ttest\n",
    "tres = stats.ttest_ind(sample1,sample2)"
   ],
   "metadata": {
    "id": "RIqGfcAJ6k9l"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# CI parameters\n",
    "nBoots = 1000 # number of samples\n",
    "\n",
    "# observed difference in means\n",
    "obs_diff = np.mean(sample1) - np.mean(sample2)\n",
    "\n",
    "# initialize bootstrap sample-mean differences\n",
    "bootstrap_diffs = np.zeros(nBoots)\n",
    "\n",
    "# Generate bootstrap samples.\n",
    "# Note that each sample is independently resampled, and then the difference of means is calculated.\n",
    "for i in range(nBoots):\n",
    "  boot_sample1 = np.random.choice(sample1, size=sampsize)\n",
    "  boot_sample2 = np.random.choice(sample2, size=sampsize)\n",
    "  bootstrap_diffs[i] = np.mean(boot_sample1) - np.mean(boot_sample2)\n",
    "\n",
    "# empirical confidence intervals (hard-coded to 95%)\n",
    "CI_B = np.percentile(bootstrap_diffs,[2.5,97.5])\n",
    "\n",
    "# choose a color for the CI area based on significance\n",
    "areacolor = 'gray' if np.sign(CI_B[0])==np.sign(CI_B[1]) else 'red'\n",
    "\n",
    "\n",
    "### plotting\n",
    "_,axs = plt.subplots(1,2,figsize=(10,5))\n",
    "\n",
    "# data distributions\n",
    "axs[0].hist(sample1, bins='fd', color='k', edgecolor='k', alpha=.8, label=r'$S_1$')\n",
    "axs[0].hist(sample2, bins='fd', color='k', edgecolor='k', alpha=.3, label=r'$S_2$')\n",
    "axs[0].set(xlabel='Data value',ylabel='Count')\n",
    "axs[0].legend()\n",
    "axs[0].set_title(f'Data histograms (t={tres.statistic:.02f}, p={tres.pvalue:.03f})')\n",
    "\n",
    "# bootstrapping distribution\n",
    "axs[1].hist(bootstrap_diffs, bins=30, edgecolor='k',color='gray', alpha=.6)\n",
    "axs[1].axvline(obs_diff,color='k',linestyle='--',linewidth=3, label=r'$S_1-S_2$')\n",
    "axs[1].fill_betweenx([0, plt.gca().get_ylim()[1]], CI_B[0], CI_B[1], color=areacolor, alpha=.3, label='95% CI')\n",
    "axs[1].legend()\n",
    "axs[1].set(xlim=[-2,1],xlabel='Difference in Means',ylabel='Count')\n",
    "axs[1].set_title(f'Bootstrap mean difference histogram',loc='center')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_ex11.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "xFKECo-Y6lDc"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "dw1nzdMz7-y2"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 12"
   ],
   "metadata": {
    "id": "xH03Xy6v7-vT"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# colored marble counts\n",
    "blue   = 40\n",
    "yellow = 30\n",
    "orange = 20\n",
    "totalMarbs = blue + yellow + orange\n",
    "\n",
    "# put them all in a jar\n",
    "jar = np.hstack((1*np.ones(blue),2*np.ones(yellow),3*np.ones(orange)))\n",
    "\n",
    "# now we draw a sample of 500 marbles (with replacement)\n",
    "numDraws = 500\n",
    "marbSample = np.random.choice(jar,size=numDraws)\n",
    "\n",
    "\n",
    "# bootstrapping for empirical confidence intervals\n",
    "nBoots = 1000\n",
    "bootProps = np.zeros((3,nBoots))\n",
    "for i in range(nBoots):\n",
    "\n",
    "  # bootstrap sample\n",
    "  bootmarbs = np.random.choice(marbSample,size=numDraws)\n",
    "\n",
    "  # empirical proportions of this sample\n",
    "  for j in range(1,4):\n",
    "    bootProps[j-1,i] = sum(bootmarbs==j)/numDraws\n",
    "\n",
    "# confidence intervals\n",
    "CI = np.array([ np.percentile(bootProps[0,:],[2.5,97.5]),\n",
    "                np.percentile(bootProps[1,:],[2.5,97.5]),\n",
    "                np.percentile(bootProps[2,:],[2.5,97.5]) ])\n",
    "\n",
    "\n",
    "# empirical proportions of colors drawn\n",
    "props = np.array([ sum(marbSample==1) / numDraws,\n",
    "                   sum(marbSample==2) / numDraws,\n",
    "                   sum(marbSample==3) / numDraws ] )\n",
    "\n",
    "\n",
    "# plot those against the theoretical probability\n",
    "plt.figure(figsize=(8,4))\n",
    "plt.bar([1,2,3],props,label='Proportion',color=(.7,.7,.7))\n",
    "plt.plot([0.5, 1.5],[blue/totalMarbs, blue/totalMarbs],'k',linewidth=3,label='Probability')\n",
    "plt.plot([1.5, 2.5],[yellow/totalMarbs,yellow/totalMarbs],'k',linewidth=3)\n",
    "plt.plot([2.5, 3.5],[orange/totalMarbs,orange/totalMarbs],'k',linewidth=3)\n",
    "\n",
    "\n",
    "plt.errorbar([1,2,3],props,yerr=[props-CI[:,0],CI[:,1]-props],\n",
    "                marker='.',color=(.3,.3,.3),linestyle='None',label='95% C.I')\n",
    "\n",
    "plt.xticks([1,2,3],labels=('Blue','Yellow','Orange'))\n",
    "plt.xlabel('Marble color')\n",
    "plt.ylabel('Proportion/probability')\n",
    "plt.legend()\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('confint_ex12.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "_B8s6une7-sS"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "DU1mYzlaCkTb"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 13"
   ],
   "metadata": {
    "id": "VQTGjJbSCkWT"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# import data\n",
    "df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data',\n",
    "                 usecols = np.arange(9),\n",
    "                 names   = ['age','sex','height','weight','qrs','p-r','q-t','t','p'])\n",
    "\n",
    "# make a copy of the original data matrix\n",
    "df_z = df.copy()\n",
    "cols2zscore = []\n",
    "for col in df_z.columns:\n",
    "  if not (col=='sex'):\n",
    "    df_z[col] = (df[col] - df[col].mean()) / df[col].std(ddof=1)\n",
    "\n",
    "zThresh = 3.29 # p<.001\n",
    "df_clean = df.copy()\n",
    "df_clean[np.abs(df_z)>zThresh] = np.nan  # both tails"
   ],
   "metadata": {
    "id": "lqKxH3eLWjWr"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# Note that the methods mean(), std(), and count() in Pandas exclude NaN's.\n",
    "\n",
    "for col in df.columns:\n",
    "\n",
    "  # original data\n",
    "  mean = df[col].mean()\n",
    "  std  = df[col].std(ddof=1)\n",
    "  n    = df[col].count()\n",
    "  ci   = stats.t.ppf(.975,n-1) * std/np.sqrt(n)\n",
    "  print(f'{col:>6} initial: {mean:6.2f} +/- {ci:.2f}')\n",
    "\n",
    "  # repeat for cleaned\n",
    "  mean = df_clean[col].mean()\n",
    "  std  = df_clean[col].std(ddof=1)\n",
    "  n    = df_clean[col].count()\n",
    "  ci   = stats.t.ppf(.975,n-1) * std/np.sqrt(n)\n",
    "  print(f'{col:>6} cleaned: {mean:6.2f} +/- {ci:.2f}\\n')"
   ],
   "metadata": {
    "id": "KV27q8yv6idM"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "ox7d0oThXBQl"
   },
   "execution_count": null,
   "outputs": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  },
  "colab": {
   "provenance": [],
   "private_outputs": true
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}