{
 "nbformat": 4,
 "nbformat_minor": 0,
 "metadata": {
  "colab": {
   "private_outputs": true,
   "provenance": []
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3"
  },
  "language_info": {
   "name": "python"
  }
 },
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "view-in-github"
   },
   "source": [
    "<a href=\"https://colab.research.google.com/github/mikexcohen/Statistics_book/blob/main/stats_ch13_regression.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Modern statistics: Intuition, Math, Python, R\n",
    "## Mike X Cohen (sincxpress.com)\n",
    "### https://www.amazon.com/dp/B0CQRGWGLY\n",
    "#### Code for Chapter 15 (regression)\n",
    "\n",
    "---\n",
    "\n",
    "# About this code file:\n",
    "\n",
    "### This notebook will reproduce most of the figures in this chapter (some figures were made in Inkscape), and illustrate the statistical concepts explained in the text. The point of providing the code is not just for you to recreate the figures, but for you to modify, adapt, explore, and experiment with the code.\n",
    "\n",
    "### Solutions to all exercises are at the bottom of the notebook.\n",
    "\n",
    "#### This code was written in google-colab. The notebook may require some modifications if you use a different IDE."
   ],
   "metadata": {
    "id": "yeVh6hm2ezCO"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# import libraries and define global settings\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import seaborn as sns\n",
    "import scipy.stats as stats\n",
    "\n",
    "# new: for running regression models\n",
    "import statsmodels.api as sm\n",
    "\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# define global figure properties used for publication\n",
    "import matplotlib_inline.backend_inline\n",
    "matplotlib_inline.backend_inline.set_matplotlib_formats('svg') # display figures in vector format\n",
    "plt.rcParams.update({'font.size':14,             # font size\n",
    "                     'savefig.dpi':300,          # output resolution\n",
    "                     'axes.titlelocation':'left',# title location\n",
    "                     'axes.spines.right':False,  # remove axis bounding box\n",
    "                     'axes.spines.top':False,    # remove axis bounding box\n",
    "                     })"
   ],
   "metadata": {
    "id": "Bcz2Oz9IAG2T"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "7y1QkCW9zEeh"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.1: Geometric view of regression"
   ],
   "metadata": {
    "id": "YxClF-nl2dFC"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# some data\n",
    "N = 9\n",
    "x = np.linspace(-1,4,N)\n",
    "y = 1 + x + np.random.randn(N)\n",
    "\n",
    "# get GLM predictions\n",
    "mdl = sm.OLS(y,sm.add_constant(x)).fit()\n",
    "yHat = mdl.predict(sm.add_constant(x))\n",
    "\n",
    "\n",
    "# plt\n",
    "_,axs = plt.subplots(1,1,figsize=(8,5))\n",
    "\n",
    "# plot the data\n",
    "axs.plot(x,y,'ko',markerfacecolor=(.8,.8,.8),markersize=18,zorder=10,alpha=.5,label='Observations ($y$)')\n",
    "axs.set(xlabel='X (regressor)',ylabel='Y (DV)')\n",
    "\n",
    "# plot the regression line\n",
    "axs.plot(x,yHat,'k-',linewidth=3,label='Regression line')\n",
    "\n",
    "# plot the intercept\n",
    "intpnt = mdl.predict([1,0])\n",
    "axs.plot(0,intpnt,'kv',markersize=10,markerfacecolor='w',label='Intercept',zorder=10)\n",
    "\n",
    "\n",
    "# data-point-specific projection lines\n",
    "for i in range(N):\n",
    "  axs.plot([x[i],x[i]],[y[i],yHat[i]],'--.',color='gray',zorder=-4)\n",
    "  axs.plot([x[i],x[i]],[yHat[i],yHat[i]],'ks',markersize=10,markerfacecolor=(.4,.4,.4),label=r'Predicted ($\\hat{y}$)')\n",
    "\n",
    "\n",
    "# final adjustments\n",
    "labh,labels = axs.get_legend_handles_labels() # to prevent redundant 'Predicted' labels\n",
    "axs.legend(labh[:4],labels[:4]) # only the first four (unique) legends\n",
    "axs.grid()\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_picOfReg.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "Y-AvTpc1Mcwd"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "Fb5Sb32TMcq8"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.2: Regression vs. PCA"
   ],
   "metadata": {
    "id": "bDEi95ChMdFN"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# some data\n",
    "N = 10\n",
    "x = np.linspace(-1.5,1.5,N)\n",
    "y = x + np.random.randn(N)\n",
    "\n",
    "# mean-center variables\n",
    "y -= np.mean(y)\n",
    "maxval = np.max(np.abs(y))*1.1 # for axis scaling; PCA projections look orthogonal in square axes\n",
    "\n",
    "# get GLM predictions\n",
    "mdl = sm.OLS(y,sm.add_constant(x)).fit()\n",
    "yHat = mdl.predict(sm.add_constant(x))\n",
    "\n",
    "# compute PCA\n",
    "data = np.vstack((x,y)).T\n",
    "C = np.cov(data.T)\n",
    "evals,evecs = np.linalg.eig(C)\n",
    "PC = evecs[:,np.argmax(evals)]\n",
    "# projection points\n",
    "pcaX = np.zeros(N)\n",
    "pcaY = np.zeros(N)\n",
    "\n",
    "\n",
    "## plot\n",
    "_,axs = plt.subplots(1,2,figsize=(10,5))\n",
    "\n",
    "# plot the data\n",
    "for a in axs:\n",
    "  a.plot(x,y,'ko',markerfacecolor=(.8,.8,.8),markersize=18,zorder=10,alpha=.5)\n",
    "  a.set(xlabel='X',ylabel='Y',xlim=[-maxval,maxval],ylim=[-maxval,maxval])\n",
    "\n",
    "# plot the regression line\n",
    "axs[0].plot(x,yHat,'ks-',linewidth=3,markersize=10,markerfacecolor=(.4,.4,.4))\n",
    "\n",
    "\n",
    "# data-point-specific projection lines\n",
    "for i in range(N):\n",
    "\n",
    "  # regression is the projection onto the best fit line, holding 'x' constant\n",
    "  axs[0].plot([x[i],x[i]],[y[i],yHat[i]],'--.',color='gray',zorder=-4)\n",
    "\n",
    "  # compute and plot the PCA projection lines\n",
    "  pcaX[i],pcaY[i] = data[i,:]@PC*PC\n",
    "  axs[1].plot([x[i],pcaX[i]],[y[i],pcaY[i]],'--.',color='gray',zorder=-4)\n",
    "\n",
    "\n",
    "# now plot the PCA line\n",
    "axs[1].plot(pcaX,pcaY,'ks-',linewidth=3,markersize=10,markerfacecolor=(.4,.4,.4))\n",
    "\n",
    "\n",
    "# final adjustments\n",
    "axs[0].set_title(r'$\\bf{A}$)  Errors to minimize in regression')\n",
    "axs[0].legend(['Observations','Predictions','Errors'])\n",
    "\n",
    "axs[1].set_title(r'$\\bf{B}$)  Errors to minimize in PCA')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_regVpca.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "fvjeVFrs2crp"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# All in one plot (a bit confusing to look at...)\n",
    "\n",
    "_,axs = plt.subplots(1,1,figsize=(6,5))\n",
    "\n",
    "# plot the data\n",
    "axs.plot(x,y,'ko',markerfacecolor=(.8,.8,.8),markersize=18,zorder=10,alpha=.5,label='Observations')\n",
    "axs.set(xlabel='X',ylabel='Y',xlim=[-maxval,maxval],ylim=[-maxval,maxval])\n",
    "\n",
    "# plot the regression line\n",
    "axs.plot(x,yHat,'ks-',linewidth=3,markersize=10,markerfacecolor=(.4,.4,.4),label='Regression line')\n",
    "\n",
    "\n",
    "# data-point-specific projection lines\n",
    "for i in range(N):\n",
    "\n",
    "  # regression is the projection onto the best fit line, holding 'x' constant\n",
    "  axs.plot([x[i],x[i]],[y[i],yHat[i]],'--.',color='gray',zorder=-4)\n",
    "\n",
    "  # compute and plot the PCA projection lines\n",
    "  pcaX[i],pcaY[i] = data[i,:]@PC*PC\n",
    "  axs.plot([x[i],pcaX[i]],[y[i],pcaY[i]],'--.',color='green',zorder=-4)\n",
    "\n",
    "\n",
    "# now plot the PCA line\n",
    "axs.plot(pcaX,pcaY,'gs-',linewidth=3,markersize=10,label='PCA line')\n",
    "\n",
    "\n",
    "# final adjustments\n",
    "axs.legend()\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "Dk2amUwl7mvj"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "SRu0Iz9-zEZt"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.3: Joy and ice cream"
   ],
   "metadata": {
    "id": "ZpjWt_I2Xvxk"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# the data\n",
    "icecream  = np.array([ 1, 2, 4, 5, 7 ])\n",
    "happiness = np.array([ 5, 6.5, 6, 8, 9 ])\n",
    "\n",
    "# the plot\n",
    "plt.figure(figsize=(4,5))\n",
    "plt.plot(icecream,happiness,'ko',markerfacecolor='gray',markersize=18)\n",
    "plt.xlabel('Ice cream cones eaten')\n",
    "plt.ylabel('Overall life happiness (1-10)')\n",
    "plt.yticks(range(5,10))\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_icecreamjoy.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "-MVUSZznXoic"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "### run the regression\n",
    "\n",
    "# organize the IVs into a design matrix\n",
    "designMatrix = np.vstack((\n",
    "    np.ones(5,),  # intercept\n",
    "    icecream      # number of ice cream cones eaten\n",
    "    )).T\n",
    "\n",
    "# list of labels for model output\n",
    "IVnames = ['Intercept','Ice Cream']\n",
    "\n",
    "# evaluate the regression model (endog=DV, exog=IVs)\n",
    "regResults = sm.OLS(happiness,designMatrix).fit()\n",
    "\n",
    "# and print a summary of the results\n",
    "t = regResults.summary(xname=IVnames,yname='Happiness')\n",
    "t.as_text"
   ],
   "metadata": {
    "id": "j4HawK1dXof_"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "gDW_AliT62YH"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.4: Reminder of the geometry of regression"
   ],
   "metadata": {
    "id": "8vmG9_22625Q"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# some data\n",
    "N = 9\n",
    "x = np.linspace(-1,4,N)\n",
    "y = 1 + x + np.random.randn(N)\n",
    "\n",
    "# get GLM predictions\n",
    "mdl = sm.OLS(y,sm.add_constant(x)).fit()\n",
    "yHat = mdl.predict(sm.add_constant(x))\n",
    "\n",
    "\n",
    "# plt\n",
    "_,axs = plt.subplots(1,1,figsize=(4,6))\n",
    "\n",
    "# plot the data\n",
    "axs.plot(x,y,'ko',markerfacecolor=(.8,.8,.8),markersize=18,zorder=10,alpha=.5,label='Observations ($y$)')\n",
    "axs.set(xlabel='X (regressor)',ylabel='Y (DV)')\n",
    "\n",
    "# plot the regression line\n",
    "axs.plot(x,yHat,'k-',linewidth=3,label='Regression line')\n",
    "\n",
    "# plot the intercept\n",
    "intpnt = mdl.predict([1,0])\n",
    "axs.plot(0,intpnt,'kv',markersize=10,markerfacecolor='w',label='Intercept',zorder=10)\n",
    "\n",
    "\n",
    "# data-point-specific projection lines\n",
    "for i in range(N):\n",
    "  axs.plot([x[i],x[i]],[y[i],yHat[i]],'--.',color='gray',zorder=-4)\n",
    "  axs.plot([x[i],x[i]],[yHat[i],yHat[i]],'ks',markersize=10,markerfacecolor=(.4,.4,.4),label=r'Predicted ($\\hat{y}$)')\n",
    "\n",
    "\n",
    "# final adjustments\n",
    "labh,labels = axs.get_legend_handles_labels() # to prevent redundant 'Predicted' labels\n",
    "axs.legend(labh[:4],labels[:4],fontsize=12) # only the first four (unique) legends\n",
    "axs.grid()\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_picOfReg_redux.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "RLfS_oEF62TK"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "YECp_SES2cuR"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.6: Simulating regression data: example 1"
   ],
   "metadata": {
    "id": "gyQ7NbgYzEWz"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# coefficients for linking the IV to the DV\n",
    "B0 = 50  # intercept in cm\n",
    "B1 =  6  # coefficient for change in age, also in cm\n",
    "\n",
    "# number of observations\n",
    "N = 135\n",
    "\n",
    "# the independent variable\n",
    "age = np.random.uniform(0,20,N)\n",
    "\n",
    "# and the noise\n",
    "noise = np.random.normal(0,15,N)\n",
    "\n",
    "# and now put it together to simulate the data\n",
    "height = B0 + B1*age + noise\n",
    "\n",
    "\n",
    "# visualization\n",
    "plt.figure(figsize=(4,5))\n",
    "\n",
    "plt.plot(age,height,'ko',markerfacecolor=(.9,.9,.9),markersize=10)\n",
    "plt.xlabel('Age (years)')\n",
    "plt.ylabel('Height (cm)')\n",
    "plt.title('Scatter plot of height by age',loc='center')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_example1data.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "EW3nAF47zLGb"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "### run the regression\n",
    "\n",
    "# organize the IVs into a design matrix\n",
    "designMatrix = np.vstack((\n",
    "    np.ones(N,),  # intercept\n",
    "    age           # age IV\n",
    "    )).T\n",
    "\n",
    "# list of labels for model output\n",
    "IVnames = ['Intercept','Age']\n",
    "\n",
    "# evaluate the regression model (endog=DV, exog=IVs)\n",
    "regResults = sm.OLS(endog=height, exog=designMatrix).fit()\n",
    "\n",
    "# and print a summary of the results\n",
    "t = regResults.summary(xname=IVnames, yname='Height')\n",
    "t.as_text"
   ],
   "metadata": {
    "id": "5X4y0XO8wuuo"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "4tlBpXUx24nT"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.7: Visualizing the regression data"
   ],
   "metadata": {
    "id": "IaPSXECr244y"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# plot the predicted data\n",
    "yHat = regResults.predict()\n",
    "resid = regResults.resid\n",
    "\n",
    "\n",
    "_,axs = plt.subplots(2,2,figsize=(11,8))\n",
    "\n",
    "axs[0,0].plot(age,height,'k^',markerfacecolor=(.3,.3,.3),alpha=.6,markersize=10,label='Observed')\n",
    "axs[0,0].plot(age,yHat,'ks',markerfacecolor='w',alpha=.8,markersize=8,label='Predicted')\n",
    "axs[0,0].set(xlabel='Age (years)',ylabel='Height (cm)')\n",
    "axs[0,0].set_title(fr'$\\bf{{A}}$)  Observed and predicted data')\n",
    "axs[0,0].legend()\n",
    "\n",
    "axs[0,1].plot(height,yHat,'ko',markerfacecolor=(.3,.3,.3),alpha=.6,markersize=10)\n",
    "axs[0,1].set(xlabel='Observed height',ylabel='Predicted height')\n",
    "axs[0,1].set_title(fr'$\\bf{{B}}$)  Observed vs. predicted: r={np.corrcoef(height,yHat)[0,1]:.2f}')\n",
    "\n",
    "axs[1,0].plot(resid,yHat,'ks',markerfacecolor=(.5,.5,.5),alpha=.6,markersize=8)\n",
    "axs[1,0].set(xlabel='Residuals',ylabel='Predicted height')\n",
    "axs[1,0].set_title(fr'$\\bf{{C}}$)  Resid vs pred: r={np.corrcoef(resid,yHat)[0,1]:.2f}')\n",
    "\n",
    "axs[1,1].hist(resid,bins='fd',edgecolor='k',facecolor=(.7,.7,.7))\n",
    "axs[1,1].set(xlabel='Residuals',ylabel='Counts')\n",
    "axs[1,1].set_title(fr'$\\bf{{D}}$)  Distribution of residuals')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_example1res.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "lf6cQJUXzERA"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "GZUg9ybfRZe_"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.8: Simulating regression data: example 2"
   ],
   "metadata": {
    "id": "wyWRMY4daiYx"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# create coefficients for linking the IV to the DV\n",
    "B0 = 600  # intercept\n",
    "B1 = -2   # coefficient for brightness manipulation\n",
    "B2 = 60   # coefficient for experiment condition\n",
    "B3 = -2.5 # coefficient for interaction term\n",
    "\n",
    "\n",
    "# number of observations\n",
    "N = 100\n",
    "\n",
    "# generate independent variables\n",
    "brightness = np.random.uniform(10,100,N) # continuous IV\n",
    "category = (np.linspace(0,1,N)>.5)+0 # binary IV\n",
    "\n",
    "# noise\n",
    "noise = np.random.normal(0,50,N)\n",
    "\n",
    "\n",
    "# generate the data according to the model\n",
    "RT = B0 + B1*brightness + B2*category + B3*(brightness*category) + noise\n",
    "\n",
    "\n",
    "# visualization\n",
    "plt.figure(figsize=(4,5))\n",
    "\n",
    "plt.plot(brightness[category==0],RT[category==0],'ko',markerfacecolor=(.9,.9,.9),alpha=.7,markersize=10,label='Carrots')\n",
    "plt.plot(brightness[category==1],RT[category==1],'ks',markerfacecolor=(.5,.5,.5),alpha=.7,markersize=10,label='Chocolate')\n",
    "plt.xlabel('Picture brightness (% max)')\n",
    "plt.ylabel('Reaction time (ms)')\n",
    "plt.legend()\n",
    "plt.title(f'RT by brightness,\\ngrouped by category',loc='center')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_example2data.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "ZeKOmDvYLbLk"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# regression model using a dataframe\n",
    "\n",
    "# construct the design matrix as a dataframe\n",
    "df = pd.DataFrame({\n",
    "    'Brightness' : brightness,\n",
    "    'Category'   : category,\n",
    "    #'Interaction': brightness * category # uncomment to include interaction term\n",
    "})\n",
    "\n",
    "# add an intercept term (sm calls it \"constant\")\n",
    "X = sm.add_constant(df)\n",
    "\n",
    "# inspect the design matrix:\n",
    "X # (const = intercept)"
   ],
   "metadata": {
    "id": "mhaOodzkNAlg"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# fit the model (note the default positions of endog= and exog=)\n",
    "model = sm.OLS(RT,X).fit()\n",
    "\n",
    "# show the regression summary (using rich-formatted text instead of plain text as in the previous example)\n",
    "model.summary()"
   ],
   "metadata": {
    "id": "uYS3UkOEco3Z"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "nsSIdOzn3Dp1"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.9/10: Visualizing example 2"
   ],
   "metadata": {
    "id": "qEoOjy5b3D6q"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# generate predicted RT and residuals\n",
    "df['Predicted RT'] = model.predict(X)\n",
    "df['Residuals'] = df['Predicted RT'] - RT\n",
    "\n",
    "# change the values of the dummy-coded variables\n",
    "df['Food'] = df['Category'].map({0:'Carrots', 1:'Chocolate'})\n",
    "colorPalette = {'Carrots':(.7,.7,.7),'Chocolate':(.2,.2,.2)} # color mapping for visualization\n",
    "\n",
    "\n",
    "\n",
    "### now for the visualizations\n",
    "fig,axs = plt.subplots(1,3,figsize=(12,4))\n",
    "\n",
    "# scatter plot of observed data\n",
    "sns.scatterplot(x='Brightness',y=RT,hue='Food',data=df,\n",
    "                ax=axs[0],s=80,palette=colorPalette)\n",
    "\n",
    "# line plot of model predictions\n",
    "sns.lineplot(x='Brightness',y='Predicted RT',hue='Food',data=df,legend=False,\n",
    "             ax=axs[0],palette=colorPalette,linewidth=3)\n",
    "axs[0].set_title(r'$\\bf{A}$)  Data and predictions')\n",
    "\n",
    "\n",
    "# residuals plot\n",
    "sns.scatterplot(x='Predicted RT',y='Residuals',hue='Food',data=df,\n",
    "                ax=axs[1],s=80,palette=colorPalette)\n",
    "axs[1].set_title(r'$\\bf{B}$)  Residuals Plot')\n",
    "\n",
    "\n",
    "# histograms of residuals separated by category\n",
    "sns.histplot(data=df,x='Residuals',hue='Food',\n",
    "             palette=colorPalette,ax=axs[2])\n",
    "axs[2].set(xlabel='Residuals',ylabel='Count')\n",
    "axs[2].set_title(r'$\\bf{C}$)  Residuals histograms')\n",
    "\n",
    "# shrink down the legend font sizes\n",
    "for a in axs[:2]: a.legend(fontsize='x-small')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_example2res2.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "00YJldtCNLHd"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# Correlations between predicted data and residuals\n",
    "R = np.corrcoef(df['Predicted RT'],df['Residuals'])[0,1]\n",
    "print(f'Overall correlation: r={R:.3f}')\n",
    "\n",
    "print('')\n",
    "print('Correlations grouped by food type:')\n",
    "df.groupby('Food').apply(lambda group: group['Predicted RT'].corr(group['Residuals']))"
   ],
   "metadata": {
    "id": "6TsXXTOam1AA"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "P_nUpX7asYOb"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.13: Regression example 3"
   ],
   "metadata": {
    "id": "dudNNd6SsYLa"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "### create the data\n",
    "exam_scores = []\n",
    "for ei in range(5):\n",
    "  exam_scores = np.hstack((exam_scores,70*np.ones(6)+np.linspace(-1,5,6)*ei))\n",
    "\n",
    "hours_studied = np.tile(np.linspace(2,8,6),5)\n",
    "ave_sleep_hrs = np.linspace(4,8,30)\n",
    "\n",
    "\n",
    "\n",
    "## plot the data\n",
    "_,axs = plt.subplots(1,2,figsize=(12,4))\n",
    "\n",
    "### stratify by hours studied\n",
    "\n",
    "# fewer than 4 hours studied\n",
    "plotidx = hours_studied<4.1\n",
    "axs[0].plot(ave_sleep_hrs[plotidx],exam_scores[plotidx],'ko',\n",
    "            markerfacecolor=(.9,.9,.9),markersize=12,label='<4 hours studied')\n",
    "\n",
    "# 5-6 hours studied\n",
    "plotidx = np.logical_and(hours_studied>4.9, hours_studied<6.1)\n",
    "axs[0].plot(ave_sleep_hrs[plotidx],exam_scores[plotidx],'k^',\n",
    "            markerfacecolor=(.6,.6,.6),markersize=12,label='5-6 hours studied')\n",
    "\n",
    "# more than 6 hours\n",
    "plotidx = hours_studied>6\n",
    "axs[0].plot(ave_sleep_hrs[plotidx],exam_scores[plotidx],'ks',\n",
    "            markerfacecolor=(.3,.3,.3),markersize=12,label='>7 hours studied')\n",
    "\n",
    "axs[0].set(xlabel='Hours of sleep',ylabel='Exam score')\n",
    "axs[0].legend()\n",
    "axs[0].set_title(r'$\\bf{A}$)  Visualization by grouping')\n",
    "\n",
    "\n",
    "h = axs[1].scatter(ave_sleep_hrs,exam_scores,s=100,c=hours_studied,cmap='turbo')\n",
    "cbar = plt.colorbar(h,ax=axs[1]) # colorbar\n",
    "cbar.set_label('Hours Studied',rotation=270,labelpad=15)\n",
    "axs[1].set(xlabel='Hours of sleep',ylabel='Exam score')\n",
    "axs[1].set_title(r'$\\bf{B}$)  Visualization by color')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_example3data2d.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "iUsCTXs8sYIu"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "uRxqjj4g3feb"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.12: Multidimensional data in a multidimensional space"
   ],
   "metadata": {
    "id": "H-LGgheK3gJY"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# A 3D visualization of the data (looks neat, but not really practical)\n",
    "from mpl_toolkits.mplot3d import Axes3D\n",
    "\n",
    "fig = plt.figure(figsize=(6,5))\n",
    "ax = fig.add_subplot(111, projection='3d')\n",
    "ax.scatter(hours_studied,ave_sleep_hrs,exam_scores,c='k',marker='o',s=100,facecolors=(.8,.8,.8))\n",
    "\n",
    "ax.set(xlabel='Hours Studied',ylabel='Average Sleep Hours',zlabel='Exam Scores')\n",
    "ax.set_box_aspect(aspect=None, zoom=.84)\n",
    "\n",
    "# you can try changing the view angle to make the data more interpretable, but the fact is that\n",
    "# 2D representations are nearly always more informative than 3D representations.\n",
    "ax.view_init(elev=20,azim=30)\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_example3data3d.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "O5x_VBEP3dEb"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# put all the data (IVs and DV) into one df\n",
    "\n",
    "# construct the design matrix as a dataframe\n",
    "df = pd.DataFrame({\n",
    "    'ExamScores' : exam_scores,\n",
    "    'Intercept'  : np.ones(len(exam_scores)),\n",
    "    'StudyHours' : hours_studied,\n",
    "    'SleepHours' : ave_sleep_hrs,\n",
    "    'Interaction': hours_studied * ave_sleep_hrs\n",
    "})\n",
    "\n",
    "# let's have a look at the dataframe\n",
    "df"
   ],
   "metadata": {
    "id": "_Og6nZcYsYF0"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "Ags7J4HlsYC0"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.14: Piecewise regression"
   ],
   "metadata": {
    "id": "8HaK9tu3sYAB"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "## create the data\n",
    "N = 100\n",
    "x = np.linspace(0,10,N)\n",
    "bp = N//3 # bp = break point (one-third of the way through)\n",
    "\n",
    "# two different linear relationships\n",
    "y1 = 1.2*x[:bp]\n",
    "y2 =  .4*x[bp:]\n",
    "y2 = y2-y2[0]+y1[-1] # shift y2 to follow y1\n",
    "\n",
    "# combine the two parts with noise\n",
    "y = np.concatenate([y1,y2]) + np.random.normal(0,.3,size=N)\n",
    "\n",
    "\n",
    "\n",
    "### split the data\n",
    "# (here we know exactly where to split; in Exercise 6 you'll write an algorithm to find the best split)\n",
    "x1, y1 = x[x <= x[bp]], y[x <= x[bp]]\n",
    "x2, y2 = x[x >  x[bp]], y[x >  x[bp]]\n",
    "\n",
    "\n",
    "# fit separate linear regressions\n",
    "reg1 = sm.OLS(y1,sm.add_constant(x1)).fit()\n",
    "reg2 = sm.OLS(y2,sm.add_constant(x2)).fit()\n",
    "\n",
    "# predictions\n",
    "yHat1 = reg1.predict(sm.add_constant(x1))\n",
    "yHat2 = reg2.predict(sm.add_constant(x2))\n",
    "\n",
    "\n",
    "## plotting\n",
    "plt.figure(figsize=(4,4))\n",
    "plt.plot(x,y,'ko',markerfacecolor=(.95,.95,.95),markersize=10,alpha=.6)\n",
    "plt.plot(x1,yHat1,'k',linewidth=2)\n",
    "plt.plot(x2,yHat2,'k',linewidth=2)\n",
    "plt.axvline(x=x[bp],linestyle='--',color=(.5,.5,.5),zorder=-10,linewidth=.8)\n",
    "plt.xlabel('x')\n",
    "plt.ylabel('y')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_piecewise.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "IS3Vhj4gdPIN"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "LRovzFU_cSni"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.15: Polynomial design matrix"
   ],
   "metadata": {
    "id": "nNIf0NOqyn7i"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "x = np.linspace(-2,2,101)\n",
    "\n",
    "maxorder = 3\n",
    "\n",
    "\n",
    "plt.figure(figsize=(4,5))\n",
    "\n",
    "for i in range(maxorder+1):\n",
    "\n",
    "  # this regressor\n",
    "  xx = x**i\n",
    "\n",
    "  # plot it\n",
    "  c = i/(maxorder+1)\n",
    "  plt.plot(x,xx,color=(c,c,c),linewidth=3,label=fr'$x_{i}=x^{i}$')\n",
    "\n",
    "\n",
    "plt.xlim(x[[0,-1]])\n",
    "plt.ylim([-5,5])\n",
    "plt.xlabel('x')\n",
    "plt.ylabel('x$_k$')\n",
    "plt.legend()\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_polydesmat.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "mXecrmW8yncX"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "ePkM3-yKyltn"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.16: Polynomial regression"
   ],
   "metadata": {
    "id": "6a3G80u0cSsJ"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "n = 30\n",
    "x = np.linspace(-2,3,n)\n",
    "\n",
    "_,axs = plt.subplots(2,1,figsize=(4,6))\n",
    "\n",
    "# generate data\n",
    "y = x**2 + np.random.randn(n)\n",
    "\n",
    "# beta coefficients (need only the coefficients, not a full model evaluation)\n",
    "polycoefs = np.polyfit(x,y,2)\n",
    "\n",
    "# predictions\n",
    "yHat = np.polyval(polycoefs,x)\n",
    "\n",
    "# and plot\n",
    "axs[0].plot(x,y,'ko',markersize=10,markerfacecolor=(.9,.9,.9))\n",
    "axs[0].set(xlim=[-2.2,3.2],ylim=[np.min(y)*1.3,np.max(y)*1.1],xticks=[],yticks=[])\n",
    "axs[0].plot(x,yHat,color='k')\n",
    "axs[0].set_title(r'$\\bf{A}$)  2$^{nd}$ order polynomial')\n",
    "\n",
    "\n",
    "# repeat for 3rd order polynomial\n",
    "y = x**2 - .4*x**3 + np.random.randn(len(x))*.8\n",
    "axs[1].plot(x,y,'ko',markersize=10,markerfacecolor=(.9,.9,.9))\n",
    "axs[1].set(xlim=[-2.2,3.2],ylim=[np.min(y)*1.3,np.max(y)*1.1],xticks=[],yticks=[])\n",
    "axs[1].plot(x,np.polyval(np.polyfit(x,y,3),x),color='k')\n",
    "axs[1].set_title(r'$\\bf{B}$)  3$^{rd}$ order polynomial')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_polyExample23.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "_02oki8MjSgD"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "xWkM384LkX0F"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.17: Polynomial order and overfitting"
   ],
   "metadata": {
    "id": "fivGAvhskXrg"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# plotting various orders\n",
    "n = 30\n",
    "x = np.linspace(-2,3,n)\n",
    "y = x**2 - .4*x**3 + np.random.randn(n)*.8\n",
    "\n",
    "_,axs = plt.subplots(2,3,figsize=(8,5))\n",
    "\n",
    "for oi,ax in enumerate(axs.flatten()):\n",
    "\n",
    "  # order number\n",
    "  order = oi*3+1\n",
    "\n",
    "  ax.plot(x,y,'ko',markersize=10,markerfacecolor=(.9,.9,.9))\n",
    "  ax.set(xlim=[-2.2,4.2],ylim=[np.min(y)*1.3,np.max(y)*1.1],xticks=[],yticks=[])\n",
    "  ax.plot(x,np.polyval(np.polyfit(x,y,order),x),color='k')\n",
    "  ax.set_title(f'Order = {order}',loc='center')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_polyManyOrders.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "P3T6EqFlj4gP"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "rWIZFuC8nkGH"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.18: Bayes Information Criteria (BIC)"
   ],
   "metadata": {
    "id": "gFch26uSojaF"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# initialize\n",
    "maxorder = 17\n",
    "bic = np.zeros(maxorder)\n",
    "\n",
    "for i in range(maxorder):\n",
    "  yhat = np.polyval(np.polyfit(x,y,i),x)\n",
    "  bic[i] = n*np.log( np.sum((yhat-y)**2) ) + i*np.log(n)\n",
    "\n",
    "\n",
    "plt.figure(figsize=(6,4))\n",
    "plt.plot(range(maxorder),bic,'ko-',markersize=12,markerfacecolor=(.9,.9,.9),linewidth=.5)\n",
    "plt.xlabel('Polynomial model order')\n",
    "plt.xticks(range(0,maxorder,3))\n",
    "plt.ylabel('BIC')\n",
    "\n",
    "# draw an arrow to the best BIC\n",
    "bestK = np.argmin(bic)\n",
    "plt.annotate('',xy=(bestK+.3,bic[bestK]+3),xytext=(bestK+2,bic[bestK]*1.2),\n",
    "            arrowprops={'color':(.3,.3,.3)})\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_polyBIC.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "n7opLsFUmATs"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "6d_wb0O9aiIs"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.19: Log of probabilities"
   ],
   "metadata": {
    "id": "jP7ImmaTjq44"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "p = np.linspace(.0001,.3,156)\n",
    "\n",
    "plt.figure(figsize=(4,5))\n",
    "plt.plot(p,p/(1-p),color=(.8,.8,.8),linewidth=4,label='y = p/(1-p)')\n",
    "plt.plot(p,np.log(p/(1-p)),color=(.3,.3,.3),linewidth=4,label='y = ln[p/(1-p)]')\n",
    "\n",
    "plt.ylim([-9,1])\n",
    "plt.xlim(p[[0,-1]])\n",
    "plt.xlabel('p')\n",
    "plt.ylabel('Probability ratio (raw or log)')\n",
    "plt.legend()\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_logOdds.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "81b3l6yAjp8G"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "dW-1TCmljp5h"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Logistic regression example"
   ],
   "metadata": {
    "id": "CukJ_0QL4x1F"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# Generate data\n",
    "N = 100\n",
    "studyHours = np.random.uniform(0,10,N)\n",
    "\n",
    "# the generating equation\n",
    "pass_prob = 1 / (1 + np.exp(-(studyHours-5)))\n",
    "\n",
    "# randomize pass/fail according to probability function\n",
    "passed_exam = np.random.rand(N)<pass_prob"
   ],
   "metadata": {
    "id": "b8evQonC44R1"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# build design matrix\n",
    "X = np.vstack((np.ones(N),studyHours)).T\n",
    "\n",
    "# test the model\n",
    "model = sm.Logit(passed_exam,X).fit()\n",
    "print(model.summary())"
   ],
   "metadata": {
    "id": "bn6izUEy44Ot"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "MixnUV_K4G7F"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Figure 15.20: Visualization of logistic regression"
   ],
   "metadata": {
    "id": "iYoYJE7O4Hbm"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# interpolated values for study times\n",
    "xx = np.linspace(0,10,N)\n",
    "\n",
    "# predicted probabilities\n",
    "yHat = model.predict(sm.add_constant(xx))\n",
    "\n",
    "# and plot\n",
    "plt.figure(figsize=(8,5))\n",
    "plt.plot(studyHours,passed_exam,'ko',markersize=12,markerfacecolor=(.4,.4,.4),alpha=.5,label='Observed Data')\n",
    "plt.plot(xx,yHat,'k',linewidth=3,label='Logistic curve')\n",
    "plt.axhline(0,color='gray',zorder=-10)\n",
    "plt.axhline(1,color='gray',zorder=-10)\n",
    "plt.axhline(.5,color='gray',linestyle='--',zorder=-10,linewidth=.5)\n",
    "plt.plot([5,5],[0,1],'--',color='gray',zorder=-3,linewidth=.5)\n",
    "\n",
    "plt.xlabel('Hours Studied')\n",
    "plt.ylabel('Probability of Passing Exam')\n",
    "plt.ylim([-.1,1.1])\n",
    "plt.legend()\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_logistic.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "_Vukc37R44MD"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "rSmlANZl44JZ"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 1"
   ],
   "metadata": {
    "id": "7cG8Q6YRrnxX"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# the data (copied from the top of the code file)\n",
    "icecream  = np.array([ 1, 2, 4, 5, 7 ])\n",
    "happiness = np.array([ 5, 6.5, 6, 8, 9 ])\n",
    "\n",
    "# construct a design matrix\n",
    "X = np.vstack((icecream,np.ones(len(icecream)))).T\n",
    "\n",
    "# compute the left inverse\n",
    "leftInv = np.linalg.inv(X.T@X)@X.T\n",
    "\n",
    "# compute the regression coefficients\n",
    "betas = leftInv@happiness\n",
    "betas # compare to output of sm.OLS: [0.6096, 4.5833]"
   ],
   "metadata": {
    "id": "60DsJNq-wJ2D"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "I96aT_Flq1tZ"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 2"
   ],
   "metadata": {
    "id": "jSaZWSyHSne-"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# sample size\n",
    "N = 100\n",
    "\n",
    "# create data and design matrix\n",
    "DV = np.random.randn(N)\n",
    "DM = np.random.randn(N,1) # change 1 to 37\n",
    "\n",
    "# fit the model (including intercept)\n",
    "model = sm.OLS(DV,sm.add_constant(DM)).fit()\n",
    "\n",
    "# print the r-squared terms\n",
    "print(f'    R-squared: {model.rsquared:.3f}')\n",
    "print(f'adj.R-squared: {model.rsquared_adj:.3f}')"
   ],
   "metadata": {
    "id": "ne_zWdXCjES6"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# not part of the instructions, but FYI: demo that R2 is literally r(y,yHat)^2\n",
    "print( np.corrcoef(model.predict(sm.add_constant(DM)),DV)[0,1]**2 )\n",
    "print( model.rsquared )"
   ],
   "metadata": {
    "id": "7cQwpyW8jbr8"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# initializations\n",
    "nIVs = np.arange(1,N,3)\n",
    "results = np.zeros((len(nIVs),2))\n",
    "\n",
    "\n",
    "# the experiment\n",
    "for idx,M in enumerate(nIVs):\n",
    "\n",
    "  # loop over repetitions with new random numbers\n",
    "  for expi in range(50):\n",
    "\n",
    "    # create data and design matrix\n",
    "    DV = np.random.randn(N)\n",
    "    DM = np.random.randn(N,M)\n",
    "\n",
    "    # fit the model (including intercept)\n",
    "    model = sm.OLS(DV,sm.add_constant(DM)).fit()\n",
    "\n",
    "    # get the r-squared terms\n",
    "    results[idx,0] += 100*model.rsquared\n",
    "    results[idx,1] += 100*model.rsquared_adj\n",
    "\n",
    "# divide for the average\n",
    "results /= (expi+1)\n",
    "\n",
    "\n",
    "# now plot\n",
    "plt.figure(figsize=(8,4))\n",
    "plt.plot(nIVs/N,results[:,0],'ks',markersize=10,markerfacecolor=(.3,.3,.3),label='R-squared')\n",
    "plt.plot(nIVs/N,results[:,1],'ko',markersize=10,markerfacecolor=(.7,.7,.7),label='Adjusted R^2')\n",
    "plt.axhline(y=0,color='gray',linestyle='--',zorder=-4)\n",
    "plt.xlabel('Number of IVs (fraction of data points)')\n",
    "plt.ylabel('Variance explained (%)')\n",
    "plt.legend()\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex2.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "asRLmU10SncG"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# testing the model on new data drawn from the same population\n",
    "print('R2 for these data:')\n",
    "print(f'  {np.corrcoef(model.predict(sm.add_constant(DM)),DV)[0,1]**2:.3f}')\n",
    "\n",
    "print('')\n",
    "print('R2 for new data from the same population:')\n",
    "print(f'  {np.corrcoef(model.predict(sm.add_constant(DM)),np.random.randn(N))[0,1]**2:.3f}')"
   ],
   "metadata": {
    "id": "fvEE3pb4SnZP"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "ZSyh85bdSnV4"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 3"
   ],
   "metadata": {
    "id": "litynwuXYhMy"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# Important: The data for this exercise come from the code that created Figure 15.13.\n",
    "#            Run that code before running this code.\n",
    "\n",
    "# be careful with excluding the DV from the df!\n",
    "desmat = df.drop('ExamScores',axis=1) # design matrix\n",
    "DV = df['ExamScores']\n",
    "\n",
    "# fit the model\n",
    "model = sm.OLS(DV,desmat).fit()\n",
    "\n",
    "# show the regression summary\n",
    "model.summary().as_text"
   ],
   "metadata": {
    "id": "1ZWxrctmYkGU"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# and visualizations\n",
    "\n",
    "# generate predicted RT and residuals\n",
    "df['PredictedScores'] = model.predict(desmat)\n",
    "df['Residuals'] = df['PredictedScores'] - DV\n",
    "\n",
    "\n",
    "### now for the visualizations\n",
    "fig,axs = plt.subplots(1,3,figsize=(12,4))\n",
    "\n",
    "# observed by predicted data\n",
    "sns.scatterplot(data=df,x='ExamScores', y='PredictedScores',s=100,ax=axs[0],color='k')\n",
    "axs[0].set_title(r'$\\bf{A}$)  Observed vs. predicted')\n",
    "\n",
    "# Residuals plot\n",
    "sns.scatterplot(x='PredictedScores',y='Residuals',data=df,color='k',ax=axs[1],s=80)\n",
    "axs[1].set_title(r'$\\bf{B}$)  Residuals Plot')\n",
    "\n",
    "\n",
    "# Plot histograms of residuals separated by category\n",
    "sns.histplot(data=df,x='Residuals',ax=axs[2],color='k')\n",
    "axs[2].set(xlabel='Residuals',ylabel='Count')\n",
    "axs[2].set_title(r'$\\bf{C}$)  Residuals histograms')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "xNxC34d0YkI1"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "### Some observations:\n",
    "#\n",
    "# - The model predicts the data extremely well (panel A), which is no surprise:\n",
    "#   there was no added noise and the simulation was linear.\n",
    "#\n",
    "# - The residuals plot looks... strange. There is no linear correlation, but there is clearly structure in there.\n",
    "#   Examination of the regression summary table reveals strong skew and a significant non-normal distribution.\n",
    "#\n",
    "# - The residuals are strongly non-Gaussian distributed, probably due to the issues identified above.\n",
    "#\n",
    "# - Overall, the model diagnostics reveal some deep issues with these data, and question whether a regression is\n",
    "#   appropriate in this case. On the other hand, the patterns in the data are so clear that these violations can be\n",
    "#   be tolerated in the interest of quantifying the effects of the IVs (sleep/study, and their interaction).\n",
    "#"
   ],
   "metadata": {
    "id": "T9Sk1Fm-w6A_"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "JVvMFrZzwKDt"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 4"
   ],
   "metadata": {
    "id": "pTepB-T085Nv"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "import statsmodels.formula.api as smf\n",
    "\n",
    "# define the formula\n",
    "formula = 'ExamScores ~ StudyHours + SleepHours + StudyHours*SleepHours'\n",
    "\n",
    "# fit and check the results\n",
    "result = smf.ols(formula, data=df).fit()\n",
    "result.summary().as_text"
   ],
   "metadata": {
    "id": "xwiPUHOB85KW"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "5mCUUqjW85Gu"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 5"
   ],
   "metadata": {
    "id": "cfLYXGp3wKGb"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "from sklearn.linear_model import LinearRegression\n",
    "\n",
    "# extract relevant columns from the df\n",
    "X = df[['StudyHours','SleepHours','Interaction']]\n",
    "y = df['ExamScores']\n",
    "\n",
    "# create the model (SK=scikit, to avoid overwriting 'model' from sm)\n",
    "modelSK = LinearRegression()\n",
    "\n",
    "# and fit the model\n",
    "modelSK.fit(X,y)\n",
    "\n",
    "# print the coefficients\n",
    "print(f'Intercept: {modelSK.intercept_}')\n",
    "print(f'Coefficients: {modelSK.coef_}')"
   ],
   "metadata": {
    "id": "N4XfDsPSAy00"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "CFS8Vu7nxmtd"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 6"
   ],
   "metadata": {
    "id": "dQplY229xmc0"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "## create the simulated data\n",
    "N = 100\n",
    "x = np.linspace(0,10,N)\n",
    "bp = N//3 # bp = break point (one-third of the way through)\n",
    "\n",
    "# two different linear relationships\n",
    "y1 = 1.2*x[:bp]\n",
    "y2 =  .4*x[bp:]\n",
    "y2 = y2-y2[0]+y1[-1] # shift y2 to follow y1\n",
    "\n",
    "# combine the two parts with noise\n",
    "y = np.concatenate([y1,y2]) + np.random.normal(0,.1,size=N)\n",
    "\n",
    "\n",
    "\n",
    "### run the experiment\n",
    "# define breakpoints to evaluate, and initialize results\n",
    "breakPoints2test = np.linspace(2,8,37)\n",
    "BIC = np.zeros(len(breakPoints2test))\n",
    "\n",
    "# now for the experiment\n",
    "for idx,breakx in enumerate(breakPoints2test):\n",
    "\n",
    "  # split the data\n",
    "  x1, y1 = x[x <= breakx], y[x <= breakx]\n",
    "  x2, y2 = x[x >  breakx], y[x >  breakx]\n",
    "\n",
    "  # fit the regressions\n",
    "  reg1 = sm.OLS(y1,sm.add_constant(x1)).fit()\n",
    "  reg2 = sm.OLS(y2,sm.add_constant(x2)).fit()\n",
    "\n",
    "  # take the average BICs from the two pieces\n",
    "  BIC[idx] = (reg1.bic+reg2.bic)/2\n",
    "\n",
    "# find and report the \"optimal\" breakpoint\n",
    "bestBP = breakPoints2test[np.argmin(BIC)]\n",
    "\n",
    "print(f'Empirical best breakpoint: x = {bestBP:.2f}')\n",
    "print(f'Ground truth breakpoint:   x = {x[bp]:.2f}')"
   ],
   "metadata": {
    "id": "8H7P0Ml5J5GA"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "## now to run and visualize the model\n",
    "\n",
    "# Split the data data again\n",
    "x1, y1 = x[x <= bestBP], y[x <= bestBP]\n",
    "x2, y2 = x[x >  bestBP], y[x >  bestBP]\n",
    "\n",
    "# linear regressions (again)\n",
    "reg1 = sm.OLS(y1,sm.add_constant(x1)).fit()\n",
    "reg2 = sm.OLS(y2,sm.add_constant(x2)).fit()\n",
    "\n",
    "# predictions\n",
    "yHat1 = reg1.predict(sm.add_constant(x1))\n",
    "yHat2 = reg2.predict(sm.add_constant(x2))\n",
    "\n",
    "\n",
    "\n",
    "### plotting\n",
    "_,axs = plt.subplots(1,2,figsize=(10,4))\n",
    "\n",
    "axs[0].plot(breakPoints2test,BIC,'ks',markersize=10,alpha=.6,markerfacecolor='w')\n",
    "axs[0].axvline(bestBP,linestyle='--',zorder=-1,color='gray',label='Minimum BIC')\n",
    "axs[0].set(xlabel='Breakpoint (x)',ylabel='BIC')\n",
    "axs[0].legend()\n",
    "axs[0].set_title(r'$\\bf{A}$)  BIC by breakpoint')\n",
    "\n",
    "axs[1].plot(x,y,'ko',markerfacecolor=(.95,.95,.95),markersize=10,alpha=.6)\n",
    "axs[1].plot(x1,yHat1,'k',linewidth=2)\n",
    "axs[1].plot(x2,yHat2,'k',linewidth=2)\n",
    "axs[1].axvline(x=x[bp],color='k',zorder=-10,linewidth=.8,label='True bp')\n",
    "axs[1].axvline(x=bestBP,linestyle='--',zorder=-1,color='gray',label='Est. bp')\n",
    "axs[1].set(xlabel='x',ylabel='y')\n",
    "axs[1].legend()\n",
    "axs[1].set_title(r'$\\bf{B}$)  Data and predictions')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex6.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "ARjt-Luy-1l6"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "JG_BZFbKJ49Z"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 7"
   ],
   "metadata": {
    "id": "66QhvdLiD0yH"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "### simulate the data\n",
    "\n",
    "# sample size\n",
    "N = 40\n",
    "\n",
    "# design matrix with intercept\n",
    "X = sm.add_constant(np.linspace(0,5,N))\n",
    "\n",
    "# create the DV\n",
    "slope = np.exp(1)\n",
    "y = slope*X[:,1] + np.pi + np.random.randn(N)\n",
    "\n",
    "# fit the model\n",
    "orig_model = sm.OLS(y,X).fit()\n",
    "\n",
    "\n",
    "\n",
    "### experiment\n",
    "betas = np.zeros((N,2))\n",
    "\n",
    "\n",
    "for i in range(N):\n",
    "\n",
    "  # make a copy of the data with an outlier\n",
    "  yc = y.copy()\n",
    "  yc[i] += 10\n",
    "\n",
    "  # fit the model and get its slope (don't need to store the model)\n",
    "  betas[i,0] = sm.OLS(yc,X).fit().params[1]\n",
    "  betas[i,1] = sm.RLM(yc,X).fit().params[1]\n",
    "\n",
    "\n",
    "\n",
    "### plotting\n",
    "_,axs = plt.subplots(2,2,figsize=(10,8))\n",
    "\n",
    "axs[0,0].plot(X[:,1],y,'ks',markerfacecolor=(.9,.9,.9),markersize=10,alpha=.5)\n",
    "axs[0,0].plot(X[:,1],orig_model.predict(X),'k')\n",
    "axs[0,0].set(xlabel='x',ylabel='y')\n",
    "axs[0,0].set_title(r'$\\bf{A}$)  Original data')\n",
    "\n",
    "axs[0,1].plot(X[:,1],betas[:,0],'ko',markerfacecolor=(.9,.9,.9),markersize=8,alpha=.5,label='OLS')\n",
    "axs[0,1].plot(X[:,1],betas[:,1],'ks',markerfacecolor=(.5,.5,.5),markersize=8,alpha=.5,label='RLM')\n",
    "axs[0,1].axhline(y=slope,linestyle='--',color=(.4,.4,.4),label='GrTr')\n",
    "axs[0,1].axhline(y=orig_model.params[1],linestyle=':',linewidth=3,color=(.8,.8,.8),label='NoO')\n",
    "axs[0,1].legend(ncol=2)\n",
    "axs[0,1].set(xlabel='x value with outlier',ylabel=r'$\\beta$ coefficient')\n",
    "axs[0,1].set_title(r'$\\bf{B}$)  Slopes by outlier position')\n",
    "\n",
    "\n",
    "# drawing panels C and D\n",
    "for i in range(2):\n",
    "\n",
    "  # impose the outlier\n",
    "  yc = y.copy()\n",
    "  yc[-i] += 10\n",
    "\n",
    "  # plot the data\n",
    "  axs[1,i].plot(X[:,1],yc,'ks',markerfacecolor=(.9,.9,.9),markersize=10,alpha=.5)\n",
    "  axs[1,i].plot(X[-i,1],yc[-i],'kX',markersize=10)\n",
    "\n",
    "  # plot the model predictions\n",
    "  axs[1,i].plot(X[:,1],sm.OLS(yc,X).fit().predict(X),'k')\n",
    "  axs[1,i].set(xlabel='x',ylabel='y')\n",
    "\n",
    "# finalizing titles\n",
    "axs[1,0].set_title(r'$\\bf{C}$)  Outlier at the beginning')\n",
    "axs[1,1].set_title(r'$\\bf{D}$)  Outlier at the end')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex7.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "IcaOS55_D2K1"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "t1s2JsegD0vF"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 8"
   ],
   "metadata": {
    "id": "OgpGa_MbLIMX"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# simulation parameters\n",
    "N = 135\n",
    "x = np.linspace(0,7,N)\n",
    "\n",
    "# generate the data\n",
    "y = 1*x + x*np.random.randn(N)\n",
    "\n",
    "# fit the model\n",
    "mdl = sm.OLS(y,sm.add_constant(x)).fit()\n",
    "\n",
    "# get the predicted data and residuals\n",
    "yHat = mdl.predict()\n",
    "resid = mdl.resid\n",
    "\n",
    "\n",
    "### plotting\n",
    "_,axs = plt.subplots(1,3,figsize=(12,4))\n",
    "\n",
    "axs[0].plot(x,y,'k^',markerfacecolor=(.3,.3,.3),alpha=.6,markersize=10,label='Observed')\n",
    "axs[0].plot(x,yHat,'k',markerfacecolor='w',alpha=.8,markersize=8,label='Predicted')\n",
    "axs[0].set(xlabel='x',ylabel='y')\n",
    "axs[0].set_title(fr'$\\bf{{A}}$)  Data and prediction')\n",
    "axs[0].legend(fontsize='small')\n",
    "\n",
    "axs[1].plot(yHat,resid,'ks',markerfacecolor=(.5,.5,.5),alpha=.6,markersize=8)\n",
    "axs[1].set(ylabel='Residuals',xlabel='Predicted')\n",
    "axs[1].set_title(fr'$\\bf{{B}}$)  Resid vs pred: r={np.corrcoef(resid,yHat)[0,1]:.2f}')\n",
    "\n",
    "axs[2].hist(resid,bins='fd',edgecolor='k',facecolor=(.7,.7,.7))\n",
    "axs[2].set(xlabel='Residuals',ylabel='Counts')\n",
    "axs[2].set_title(fr'$\\bf{{C}}$)  Distribution of residuals')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex8a.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "HgK8nG_iD0lJ"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# print the true and estimated betas\n",
    "print( 'Ground-true beta: 1')\n",
    "print(f'Estimated beta  : {mdl.params[1]:.3f}')"
   ],
   "metadata": {
    "id": "S5vPluElaQ7f"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "BF217dEp180x"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# experiment parameters\n",
    "m = 2        # slope (fixed for now)\n",
    "numreps = 20 # number of repetitions in the experiment\n",
    "\n",
    "# range of maximum heteroscedasticity values\n",
    "badness = np.linspace(1,10,15)\n",
    "\n",
    "# results matrix\n",
    "mismatch = np.zeros((len(badness),2))\n",
    "\n",
    "\n",
    "# how to simulate the noise; choose 1, 2, or 3\n",
    "noiseSimulation = 1\n",
    "\n",
    "\n",
    "# start the experiment!\n",
    "for idx,maxbad in enumerate(badness):\n",
    "\n",
    "  # repeat the experiment multiple times\n",
    "  for _ in range(numreps):\n",
    "\n",
    "    # generate the noise\n",
    "    if noiseSimulation==1: # as initially specified in the exercise (heterogeneity, but also total std, increase)\n",
    "      noise = np.random.randn(N)*np.linspace(1,maxbad,N)\n",
    "\n",
    "    elif noiseSimulation==2: # only manipulate overall noise levels (homogeneity)\n",
    "      noise = np.random.randn(N)*np.linspace(maxbad,maxbad,N)\n",
    "\n",
    "    elif noiseSimulation==3: # normalize the total noise so that only heterogeneity is manipulated\n",
    "      noise = np.random.randn(N)*np.linspace(1,maxbad,N)\n",
    "      noise /= np.std(noise,ddof=1) # global normalization\n",
    "\n",
    "\n",
    "    # generate the data\n",
    "    y = m*x + noise\n",
    "\n",
    "\n",
    "    # fit the model\n",
    "    mdl = sm.OLS(y,sm.add_constant(x)).fit()\n",
    "    mdl.summary() # need to call .summary() to create the diagn dictionary\n",
    "\n",
    "    # store results (beta error and -ln(p)\n",
    "    mismatch[idx,0] += np.abs(100*(mdl.params[1]-m)/m)\n",
    "    mismatch[idx,1] += -np.log(mdl.diagn['omnipv'])\n",
    "\n",
    "\n",
    "# divide to average\n",
    "mismatch /= numreps\n",
    "\n",
    "\n",
    "### plotting\n",
    "_,axs = plt.subplots(1,2,figsize=(11,4))\n",
    "\n",
    "axs[0].plot(badness,mismatch[:,0],'ks',markerfacecolor=(.7,.7,.7),markersize=10)\n",
    "axs[0].set(xlabel='Max heteroscedasticity',ylabel=r'Percent error in $\\beta$')\n",
    "axs[0].set_title(r'$\\bf{A}$)  Error in coefficient estimate')\n",
    "\n",
    "axs[1].plot(badness,mismatch[:,1],'ko',markerfacecolor=(.7,.7,.7),markersize=10)\n",
    "axs[1].axhline(y=-np.log(.05),color='gray',linestyle='--',label='p<.05')\n",
    "axs[1].legend()\n",
    "axs[1].set(xlabel='Max heteroscedasticity',ylabel='-ln(p) from Ombinus test')\n",
    "axs[1].set_title(r'$\\bf{B}$)  Residual nonnormality significance')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex8c.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "l8jl-eC2D0n-"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "cOUdpFM7u4gz"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 9"
   ],
   "metadata": {
    "id": "WFFokrTAAbGf"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# base sample size (will be multiplied by 10 the way I implemented the simulation)\n",
    "N = 25\n",
    "\n",
    "# the linear term (temp in C)\n",
    "temp = np.tile( np.linspace(10,35,N) ,10)\n",
    "b_temp = 2\n",
    "\n",
    "# the nonlinear term (price)\n",
    "price = np.ceil(np.linspace(.01,8,N*10))\n",
    "b_price = -3\n",
    "\n",
    "# some noise\n",
    "noise = 4*np.random.randn(N*10)\n",
    "\n",
    "\n",
    "# the DV\n",
    "sales = 50 + b_temp*temp + b_price*(price-4)**2 + noise\n",
    "\n",
    "\n",
    "# plotting\n",
    "_,ax = plt.subplots(1,1,figsize=(6,4))\n",
    "h = ax.scatter(temp,sales,s=100,c=price,cmap='bwr',edgecolor='k')\n",
    "cbar = plt.colorbar(h,ax=ax) # colorbar\n",
    "cbar.set_label('Price per cone (\u20ac)',rotation=270,labelpad=15)\n",
    "ax.set(xlabel='Temperature (C)',ylabel='Ice cream sales')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex9a.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "C9qS0lHYAa-V"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# build the regression model\n",
    "\n",
    "# construct the design matrix as a dataframe\n",
    "X = pd.DataFrame({\n",
    "    'Sales'      : sales,\n",
    "    'Intercept'  : np.ones(len(sales)),\n",
    "    'Temperature': temp,\n",
    "    'Price'      : (price-4)**2,\n",
    "    'Interaction': temp * price\n",
    "})\n",
    "\n",
    "# let's have a look at the dataframe\n",
    "X"
   ],
   "metadata": {
    "id": "U3WY707CAa7X"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# fit the model\n",
    "model = sm.OLS(X['Sales'],X.drop('Sales',axis=1)).fit()\n",
    "\n",
    "# show the regression summary\n",
    "model.summary().as_text"
   ],
   "metadata": {
    "id": "iKivABFmAa4v"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "### make a prediction\n",
    "\n",
    "# predicted values of the IVs\n",
    "predicted_temp = 25\n",
    "predicted_price = 6.5\n",
    "\n",
    "# use the sm.OLS object to make a prediction (the 4 input values correspond to the IVs in the design matrix)\n",
    "yHat = model.predict(exog=(1,predicted_temp,predicted_price,0))\n",
    "\n",
    "\n",
    "\n",
    "# plotting\n",
    "_,ax = plt.subplots(1,1,figsize=(6.5,4.5))\n",
    "h = ax.scatter(temp,sales,s=100,c=price,cmap='bwr',edgecolor='k')\n",
    "cbar = plt.colorbar(h,ax=ax) # colorbar\n",
    "cbar.set_label('Price per cone (\u20ac)',rotation=270,labelpad=15)\n",
    "\n",
    "# plot the prediction\n",
    "ax.plot(predicted_temp,yHat,'ks',markerfacecolor=(.4,1,.3),markersize=14)\n",
    "ax.plot(predicted_temp,yHat,'kX',markersize=10)\n",
    "\n",
    "ax.set(xlabel='Temperature (C)',ylabel='Ice cream sales')\n",
    "ax.set_title(f'Predicted sales for {predicted_temp} \u00b0C and \u20ac{predicted_price}',loc='center')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex9b.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "mjOEhsW8Aa1_"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "uaw05Ez1AazQ"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 10"
   ],
   "metadata": {
    "id": "GIbfnIepfQTv"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# dataset website ref\n",
    "# https://archive.ics.uci.edu/dataset/437/residential+building+data+set\n",
    "\n",
    "\n",
    "# download the zip file\n",
    "!wget https://archive.ics.uci.edu/static/public/437/residential+building+data+set.zip -O z.zip\n",
    "\n",
    "# unpack it locally\n",
    "import zipfile\n",
    "with zipfile.ZipFile('z.zip','r') as zz:\n",
    "  zz.extractall('./')\n",
    "\n",
    "# import into pandas\n",
    "data = pd.read_excel('/content/Residential-Building-Data-Set.xlsx',skiprows=1,usecols='F,V,AA,DD')\n",
    "data.columns = ['FloorArea','Interest','CPI','Price']\n",
    "data"
   ],
   "metadata": {
    "id": "qy4v1onSD0iX"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# pairplot\n",
    "sns.pairplot(data,height=2,aspect=1.5,\n",
    "             plot_kws={'color':'black'},diag_kws={'color': 'black'})\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex10b.png')\n",
    "plt.show()\n",
    "\n",
    "# decisions based on visual inspection:\n",
    "# 1) log-transform FloorArea and Price\n",
    "# 2) Binarize Interest"
   ],
   "metadata": {
    "id": "rPvFRNMQD0fP"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# transformations\n",
    "data['log-Price']     = np.log(data['Price'])\n",
    "data['log-FloorArea'] = np.log(data['FloorArea'])\n",
    "data['bin-Interest']  = (data['Interest']>14.5) + 0\n",
    "\n",
    "data"
   ],
   "metadata": {
    "id": "W4-iJIPeYbhx"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# redo the pairplot with the new variables\n",
    "sns.pairplot(data,vars=['log-Price','log-FloorArea','CPI'],height=2,aspect=1.5,\n",
    "             plot_kws={'color':'black'},diag_kws={'color': 'black'})\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex10c.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "Lt_ZxqzcpJ6Q"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# pick a threshold for outliers\n",
    "zThresh = 3 # p<.001 (not exactly .001, but z=3 is also a typical threshold, I guess because people like integers)\n",
    "\n",
    "# create a copy of the data and z-transform\n",
    "data_z = data[['log-Price','log-FloorArea','CPI']].copy()\n",
    "for col in data_z.columns:\n",
    "  data_z[col] = (data[col] - data[col].mean()) / data[col].std(ddof=1)\n",
    "\n",
    "# box plots of z-scored data\n",
    "plt.figure(figsize=(4,5))\n",
    "sns.boxplot(data=data_z).set(xlabel='Data feature',ylabel='Data value (z)')\n",
    "plt.axhline(y=zThresh,color='gray',linestyle='--')\n",
    "plt.axhline(y=-zThresh,color='gray',linestyle='--')\n",
    "plt.gca().set_xticklabels(plt.gca().get_xticklabels(), fontsize=11)\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex10z.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "FI87DW5U7ccQ"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# remove the outliers from the original data\n",
    "\n",
    "print(f'Pre-cleaned dataset has {len(data)} rows.')\n",
    "\n",
    "# actual remove (note that I'm using data_z to remove rows from data, and that data.abs() makes it two-tailed)\n",
    "data = data[(data_z.abs() <= zThresh).all(axis=1)].copy()\n",
    "\n",
    "print(f'Post-cleaned dataset has {len(data)} rows.')\n",
    "\n",
    "# tip: try re-running the previous cell to recreate the boxplot"
   ],
   "metadata": {
    "id": "fKjsKZ5j-Gfr"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "## Correlation matrix\n",
    "\n",
    "R = data.drop(['Interest','bin-Interest'],axis=1).corr()#method='spearman')\n",
    "\n",
    "plt.figure(figsize=(8,6))\n",
    "sns.heatmap(R, annot=True, cmap='coolwarm',vmin=-1,\n",
    "            xticklabels=R.columns,yticklabels=R.columns)\n",
    "\n",
    "plt.title('Correlation matrix',loc='center',weight='bold')\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex10d.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "bGpZbPMUpJsQ"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "nu_sYINrpJny"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 11"
   ],
   "metadata": {
    "id": "g7JfeqsDr0dJ"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# add an intercept term\n",
    "data['Intercept'] = np.ones(len(data))\n",
    "\n",
    "# add an interaction\n",
    "data['Int X CPI'] = data['bin-Interest']*data['CPI']\n",
    "\n",
    "# fit the model\n",
    "desmat = data.drop(['log-Price','Price','FloorArea','Interest'],axis=1)\n",
    "model = sm.OLS(data['log-Price'],desmat).fit()\n",
    "\n",
    "# show the regression summary\n",
    "model.summary().as_text"
   ],
   "metadata": {
    "id": "dNYKp1gVpJkq"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# plot with predicted data and residuals\n",
    "\n",
    "\n",
    "# and visualizations\n",
    "colorPalette = {0:(.7,.7,.7),1:(.2,.2,.2)} # color mapping for visualization\n",
    "\n",
    "# generate predicted RT and residuals\n",
    "data['Predicted'] = model.predict(desmat)\n",
    "data['Residuals'] = data['Predicted'] - data['log-Price']\n",
    "\n",
    "\n",
    "\n",
    "### now for the visualizations\n",
    "fig,axs = plt.subplots(2,2,figsize=(12,8))\n",
    "\n",
    "# scatter plot of observed data\n",
    "sns.scatterplot(x='CPI',y='log-Price',hue='bin-Interest',data=data,\n",
    "                palette=colorPalette,ax=axs[0,0],s=80)\n",
    "\n",
    "# line plot of model predictions\n",
    "sns.lineplot(x='CPI',y='Predicted',hue='bin-Interest',data=data,legend=False,\n",
    "             ax=axs[0,0],linewidth=3,palette=colorPalette)\n",
    "axs[0,0].set_title(r'$\\bf{A}$)  Data and predictions')\n",
    "\n",
    "# predicted by observed\n",
    "sns.scatterplot(x='log-Price',y='Predicted',hue='bin-Interest',data=data,\n",
    "                palette=colorPalette,ax=axs[0,1],s=80)\n",
    "axs[0,1].set_title(r'$\\bf{B}$)  Data by predictions')\n",
    "\n",
    "# residuals plot\n",
    "sns.scatterplot(x='Predicted',y='Residuals',hue='bin-Interest',data=data,\n",
    "                ax=axs[1,0],s=80,palette=colorPalette)\n",
    "axs[1,0].set_title(r'$\\bf{C}$)  Residuals Plot')\n",
    "\n",
    "\n",
    "# histograms of residuals separated by category\n",
    "sns.histplot(data=data,x='Residuals',hue='bin-Interest',\n",
    "             palette=colorPalette,ax=axs[1,1])\n",
    "axs[1,1].set(xlabel='Residuals',ylabel='Count')\n",
    "axs[1,1].set_title(r'$\\bf{D}$)  Residuals histograms')\n",
    "\n",
    "# shrink down the legend font sizes\n",
    "for a in axs.flatten(): a.legend(fontsize='x-small')\n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('reg_ex11b.png')\n",
    "plt.show()"
   ],
   "metadata": {
    "id": "JzXWrSu_pJhc"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "jtaXi5O1pJep"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Exercise 12"
   ],
   "metadata": {
    "id": "GhSQAGAPkUzP"
   }
  },
  {
   "cell_type": "code",
   "source": [
    "### standardizing the data\n",
    "\n",
    "# the columns that need to be standardized (not the intercept!)\n",
    "cols2zscore = [ 'CPI','log-FloorArea','log-Price','bin-Interest','Int X CPI' ]\n",
    "\n",
    "# standardize into a new copy\n",
    "dataStd = data.copy()\n",
    "dataStd[cols2zscore] = (data[cols2zscore] - data[cols2zscore].mean()) / data[cols2zscore].std(ddof=1)\n",
    "\n",
    "# fit the model\n",
    "desmatStd = dataStd.drop(['log-Price','Price','FloorArea','Interest','Predicted','Residuals'],axis=1)\n",
    "modelStd = sm.OLS(dataStd['log-Price'],desmatStd).fit()\n",
    "\n",
    "# show the regression summary\n",
    "modelStd.summary().as_text"
   ],
   "metadata": {
    "id": "IEZ97flRm7hr"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "### standarding the betas\n",
    "\n",
    "# standard deviations of the data columns\n",
    "stds = data.std(ddof=1)\n",
    "\n",
    "\n",
    "# print top row of table\n",
    "print('     Variable:  Unstd  | Beta-std | Data-std')\n",
    "print('-------------:---------|----------|---------')\n",
    "\n",
    "# loop through the variable names\n",
    "for (name,beta),betaDataStd in zip(model.params.items(),modelStd.params):\n",
    "\n",
    "  # compute the standardized beta from the variable stds\n",
    "  betaStd = beta * stds[name]/stds['log-Price']\n",
    "\n",
    "  # print everything!\n",
    "  print(f'{name:>13}: {beta:7.4f} | {betaStd:7.4f}  | {betaDataStd:7.4f}')"
   ],
   "metadata": {
    "id": "snRQWDbt2E4V"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "# report the condition numbers\n",
    "print(f'Condition number of the unstandardized design matrix: {np.linalg.cond(desmat):8.2f}')\n",
    "print(f'Condition number of the standardized design matrix  : {np.linalg.cond(desmatStd):8.2f}')"
   ],
   "metadata": {
    "id": "CA_qIkGd5Jqk"
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [],
   "metadata": {
    "id": "yXQ7SCUG6vUv"
   },
   "execution_count": null,
   "outputs": []
  }
 ]
}