{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Applied Questions _ Chapter_8"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-19T12:24:03.143559Z",
     "start_time": "2023-09-19T12:24:02.482558Z"
    }
   },
   "outputs": [
    {
     "ename": "ImportError",
     "evalue": "\n`load_boston` has been removed from scikit-learn since version 1.2.\n\nThe Boston housing prices dataset has an ethical problem: as\ninvestigated in [1], the authors of this dataset engineered a\nnon-invertible variable \"B\" assuming that racial self-segregation had a\npositive impact on house prices [2]. Furthermore the goal of the\nresearch that led to the creation of this dataset was to study the\nimpact of air quality but it did not give adequate demonstration of the\nvalidity of this assumption.\n\nThe scikit-learn maintainers therefore strongly discourage the use of\nthis dataset unless the purpose of the code is to study and educate\nabout ethical issues in data science and machine learning.\n\nIn this special case, you can fetch the dataset from the original\nsource::\n\n    import pandas as pd\n    import numpy as np\n\n    data_url = \"http://lib.stat.cmu.edu/datasets/boston\"\n    raw_df = pd.read_csv(data_url, sep=\"\\s+\", skiprows=22, header=None)\n    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])\n    target = raw_df.values[1::2, 2]\n\nAlternative datasets include the California housing dataset and the\nAmes housing dataset. You can load the datasets as follows::\n\n    from sklearn.datasets import fetch_california_housing\n    housing = fetch_california_housing()\n\nfor the California housing dataset and::\n\n    from sklearn.datasets import fetch_openml\n    housing = fetch_openml(name=\"house_prices\", as_frame=True)\n\nfor the Ames housing dataset.\n\n[1] M Carlisle.\n\"Racist data destruction?\"\n<https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8>\n\n[2] Harrison Jr, David, and Daniel L. Rubinfeld.\n\"Hedonic housing prices and the demand for clean air.\"\nJournal of environmental economics and management 5.1 (1978): 81-102.\n<https://www.researchgate.net/publication/4974606_Hedonic_housing_prices_and_the_demand_for_clean_air>\n",
     "output_type": "error",
     "traceback": [
      "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[0;31mImportError\u001B[0m                               Traceback (most recent call last)",
      "Cell \u001B[0;32mIn[1], line 5\u001B[0m\n\u001B[1;32m      2\u001B[0m \u001B[38;5;28;01mimport\u001B[39;00m \u001B[38;5;21;01mnumpy\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m \u001B[38;5;21;01mnp\u001B[39;00m\n\u001B[1;32m      4\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01msklearn\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mlinear_model\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m Ridge,Lasso,LogisticRegression\n\u001B[0;32m----> 5\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01msklearn\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mdatasets\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m load_boston\n\u001B[1;32m      6\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01msklearn\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mmodel_selection\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m train_test_split,cross_val_score\n\u001B[1;32m      7\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01msklearn\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mensemble\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m RandomForestRegressor,BaggingRegressor\n",
      "File \u001B[0;32m/opt/anaconda3/envs/py309/lib/python3.9/site-packages/sklearn/datasets/__init__.py:157\u001B[0m, in \u001B[0;36m__getattr__\u001B[0;34m(name)\u001B[0m\n\u001B[1;32m    108\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m name \u001B[38;5;241m==\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mload_boston\u001B[39m\u001B[38;5;124m\"\u001B[39m:\n\u001B[1;32m    109\u001B[0m     msg \u001B[38;5;241m=\u001B[39m textwrap\u001B[38;5;241m.\u001B[39mdedent(\u001B[38;5;124m\"\"\"\u001B[39m\n\u001B[1;32m    110\u001B[0m \u001B[38;5;124m        `load_boston` has been removed from scikit-learn since version 1.2.\u001B[39m\n\u001B[1;32m    111\u001B[0m \n\u001B[0;32m   (...)\u001B[0m\n\u001B[1;32m    155\u001B[0m \u001B[38;5;124m        <https://www.researchgate.net/publication/4974606_Hedonic_housing_prices_and_the_demand_for_clean_air>\u001B[39m\n\u001B[1;32m    156\u001B[0m \u001B[38;5;124m        \u001B[39m\u001B[38;5;124m\"\"\"\u001B[39m)\n\u001B[0;32m--> 157\u001B[0m     \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mImportError\u001B[39;00m(msg)\n\u001B[1;32m    158\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m    159\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mglobals\u001B[39m()[name]\n",
      "\u001B[0;31mImportError\u001B[0m: \n`load_boston` has been removed from scikit-learn since version 1.2.\n\nThe Boston housing prices dataset has an ethical problem: as\ninvestigated in [1], the authors of this dataset engineered a\nnon-invertible variable \"B\" assuming that racial self-segregation had a\npositive impact on house prices [2]. Furthermore the goal of the\nresearch that led to the creation of this dataset was to study the\nimpact of air quality but it did not give adequate demonstration of the\nvalidity of this assumption.\n\nThe scikit-learn maintainers therefore strongly discourage the use of\nthis dataset unless the purpose of the code is to study and educate\nabout ethical issues in data science and machine learning.\n\nIn this special case, you can fetch the dataset from the original\nsource::\n\n    import pandas as pd\n    import numpy as np\n\n    data_url = \"http://lib.stat.cmu.edu/datasets/boston\"\n    raw_df = pd.read_csv(data_url, sep=\"\\s+\", skiprows=22, header=None)\n    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])\n    target = raw_df.values[1::2, 2]\n\nAlternative datasets include the California housing dataset and the\nAmes housing dataset. You can load the datasets as follows::\n\n    from sklearn.datasets import fetch_california_housing\n    housing = fetch_california_housing()\n\nfor the California housing dataset and::\n\n    from sklearn.datasets import fetch_openml\n    housing = fetch_openml(name=\"house_prices\", as_frame=True)\n\nfor the Ames housing dataset.\n\n[1] M Carlisle.\n\"Racist data destruction?\"\n<https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8>\n\n[2] Harrison Jr, David, and Daniel L. Rubinfeld.\n\"Hedonic housing prices and the demand for clean air.\"\nJournal of environmental economics and management 5.1 (1978): 81-102.\n<https://www.researchgate.net/publication/4974606_Hedonic_housing_prices_and_the_demand_for_clean_air>\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "from sklearn.linear_model import Ridge,Lasso,LogisticRegression\n",
    "from sklearn.datasets import load_boston\n",
    "from sklearn.model_selection import train_test_split,cross_val_score\n",
    "from sklearn.ensemble import RandomForestRegressor,BaggingRegressor\n",
    "from sklearn.metrics import mean_squared_error,accuracy_score,confusion_matrix\n",
    "from sklearn.tree import DecisionTreeRegressor,DecisionTreeClassifier\n",
    "from sklearn import tree\n",
    "from xgboost import XGBRegressor,XGBClassifier\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "\n",
    "\n",
    "from tqdm import tqdm\n",
    "import graphviz\n",
    "from IPython.display import HTML\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 7. In the lab, we applied random forests to the Boston data using mtry=6 and using ntree=25 and ntree=500. Create a plot displaying the test error resulting from random forests on this data set for a more comprehensive range of values for mtry and ntree. You can model your plot after Figure 8.10. Describe the results obtained\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-19T12:24:03.149884Z",
     "start_time": "2023-09-19T12:24:03.144843Z"
    }
   },
   "outputs": [],
   "source": [
    "boston_data = load_boston()\n",
    "boston = pd.DataFrame(boston_data['data'],columns=boston_data['feature_names'])\n",
    "boston['MEDV'] = boston_data['target']\n",
    "boston.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Before going any further, i consider a good practice to go through the data once, and see what the columns describe. \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- CRIM - per capita crime rate by town\n",
    "- ZN - proportion of residential land zoned for lots over 25,000 sq.ft.\n",
    "- INDUS - proportion of non-retail business acres per town.\n",
    "- CHAS - Charles River dummy variable (1 if tract bounds river; 0 otherwise)\n",
    "- NOX - nitric oxides concentration (parts per 10 million)\n",
    "- RM - average number of rooms per dwelling\n",
    "- AGE - proportion of owner-occupied units built prior to 1940\n",
    "- DIS - weighted distances to five Boston employment centres\n",
    "- RAD - index of accessibility to radial highways\n",
    "- TAX - full-value property-tax rate per $10,000\n",
    "\n",
    "- PTRATIO - pupil-teacher ratio by town\n",
    "- B - 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\n",
    "- LSTAT - % lower status of the population\n",
    "- MEDV - Median value of owner-occupied homes in $1000's"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.146658Z"
    }
   },
   "outputs": [],
   "source": [
    "#splitting the data into train and test \n",
    "X_train,X_test,y_train,y_test = train_test_split(boston.drop('MEDV',axis=1),boston['MEDV'],test_size = 0.5,)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.147974Z"
    }
   },
   "outputs": [],
   "source": [
    "p = boston.drop('MEDV',axis = 1).shape[1] # total number of features\n",
    "max_features_dict = {'p':p,'p/2':int(p/2),'sqrt(p)':int(np.sqrt(p))} # or we can also use keywords like 'auto','sqrt'\n",
    "scores_dict = {}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "randomforestregressor - https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html\n",
    "We would be focussing on two paramters - n_estimators and max_features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.149718Z"
    }
   },
   "outputs": [],
   "source": [
    "from tqdm import tqdm\n",
    "scores_dict = {}\n",
    "for max_feature_value in ['p','p/2','sqrt(p)']:\n",
    "    max_feat = max_features_dict[max_feature_value]\n",
    "    scores = []\n",
    "    for n_estimator in tqdm(range(1,501,10)):\n",
    "        rf = RandomForestRegressor(n_estimators=n_estimator,max_features=max_feat)\n",
    "        rf.fit(X_train,y_train)\n",
    "        scores.append(mean_squared_error(y_test,rf.predict(X_test)))\n",
    "    scores_dict[max_feature_value] = scores\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.151916Z"
    }
   },
   "outputs": [],
   "source": [
    "scores_dict.keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-19T12:24:03.181573Z",
     "start_time": "2023-09-19T12:24:03.154568Z"
    }
   },
   "outputs": [],
   "source": [
    "df = pd.DataFrame(scores_dict)\n",
    "df.index = np.arange(1,501,10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.156797Z"
    }
   },
   "outputs": [],
   "source": [
    "df.plot(figsize = (10,6))\n",
    "plt.ylim(10,25)\n",
    "plt.xlabel('Number of Trees')\n",
    "plt.ylabel('Test MSE')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 8. In the lab, a classification tree was applied to the Carseats data set after converting Sales into a qualitative response variable. Now we will seek to predict Sales using regression trees and related approaches, treating the response as a quantitative variable\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.158986Z"
    }
   },
   "outputs": [],
   "source": [
    "carseats = pd.read_csv('E:\\programming\\dataset\\Into_to_statstical_learning\\Carseats.csv')\n",
    "print(carseats.shape)\n",
    "carseats.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "( Optional )\n",
    "Going through the data columns - \n",
    "- Sales: unit sales in thousands\n",
    "- CompPrice: price charged by competitor at each location\n",
    "- Income: community income level in 1000s of dollars\n",
    "- Advertising: local ad budget at each location in 1000s of dollars\n",
    "- Population: regional pop in thousands\n",
    "- Price: price for car seats at each site\n",
    "- ShelveLoc: Bad, Good or Medium indicates quality of shelving location\n",
    "- Age: age level of the population\n",
    "- Education: ed level at location\n",
    "- Urban: Yes/No\n",
    "- US: Yes/No"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.160854Z"
    }
   },
   "outputs": [],
   "source": [
    "data = carseats.copy()\n",
    "# as the labels are ordinal, we would only be using label encoder, and since the values are small in number, we will map them\n",
    "yes_no_dict = {'Yes':1,'No':0}\n",
    "data['Urban'] = data['Urban'].map(yes_no_dict)\n",
    "data['US'] = data['US'].map(yes_no_dict)\n",
    "data['ShelveLoc'] = data['ShelveLoc'].map({'Bad':0,'Good':1,'Medium':3})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.162322Z"
    }
   },
   "outputs": [],
   "source": [
    "data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (a) Split the data set into a training set and a test set."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.163877Z"
    }
   },
   "outputs": [],
   "source": [
    "X_train,X_test,y_train,y_test = train_test_split(data.drop('Sales',axis= 1),data['Sales'],test_size = 0.5,random_state = 1)\n",
    "print(X_train.shape,X_test.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (b) Fit a regression tree to the training set. Plot the tree, and interpret the results. What test MSE do you obtain?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.165187Z"
    }
   },
   "outputs": [],
   "source": [
    "model = DecisionTreeRegressor()\n",
    "model.fit(X_train,y_train)\n",
    "print('Training MSE is ',mean_squared_error(y_train,model.predict(X_train))) \n",
    "## what the hell, its surely overfitting, lets now plot the tree and caclulate the test mse"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false,
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.166386Z"
    }
   },
   "outputs": [],
   "source": [
    "#plotting the tree\n",
    "\n",
    "dot_data = tree.export_graphviz(model, out_file=None, \n",
    "                                feature_names=X_train.columns,  \n",
    "                                filled=True)\n",
    "# Draw graph\n",
    "graph = graphviz.Source(dot_data) \n",
    "display(HTML(graph._repr_svg_()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The above tree is a very very complex tree, that is because i didn;t specified any max_depth. normally optimal max_depth ranges from 3-6, so, will have to see that"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.167474Z"
    }
   },
   "outputs": [],
   "source": [
    "#test mse \n",
    "print('Test MSE is ',mean_squared_error(y_test,model.predict(X_test)))\n",
    "print('Test R2 score is ',model.score(X_test,y_test)) # it is pretty poor"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (c) Use cross-validation in order to determine the optimal level of tree complexity. Does pruning the tree improve the test MSE?\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.168466Z"
    }
   },
   "outputs": [],
   "source": [
    "scores_dict = {}\n",
    "for max_depth in range(1,10):\n",
    "    model = DecisionTreeRegressor(max_depth=max_depth)\n",
    "    scores_dict[max_depth] = -np.mean(cross_val_score(model,X_train,y_train,scoring = 'neg_mean_squared_error',cv=10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.170306Z"
    }
   },
   "outputs": [],
   "source": [
    "plt.figure(figsize = (10,6))\n",
    "plt.plot(list(scores_dict.keys()),list(scores_dict.values()),marker = 'o')\n",
    "plt.plot(min(scores_dict, key=scores_dict.get),min(list(scores_dict.values())),marker = 'o',markersize = 8,c='r',label = 'min_error')\n",
    "plt.legend()\n",
    "plt.xlabel('Max Depth')\n",
    "plt.ylabel('10 CV Error')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.172364Z"
    }
   },
   "outputs": [],
   "source": [
    "# from the above graph, we get 4 as the best value of max_depth\n",
    "best_model = DecisionTreeRegressor(max_depth = 4)\n",
    "best_model.fit(X_train,y_train)\n",
    "print('Training MSE is ',mean_squared_error(y_train,best_model.predict(X_train)))\n",
    "print('Test MSE is ',mean_squared_error(y_test,best_model.predict(X_test)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Test MSE has decreases from 5.41834 to 4.82. That's great!"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Lets analyze this tree"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.173934Z"
    }
   },
   "outputs": [],
   "source": [
    "#plotting the tree\n",
    "\n",
    "dot_data = tree.export_graphviz(best_model, out_file=None, \n",
    "                                feature_names=X_train.columns,  \n",
    "                                filled=True)\n",
    "# Draw graph\n",
    "graph = graphviz.Source(dot_data) \n",
    "display(HTML(graph._repr_svg_()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.175518Z"
    }
   },
   "outputs": [],
   "source": [
    "# feature importance for best model\n",
    "pd.Series(best_model.feature_importances_,index = X_train.columns).sort_values(ascending=False).plot.bar()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "From the above graph it is clear that Shelveloc and Price are the most important features to detect the price. So, if someone is starting a new store, just make sure the shelveLoc is Good!"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (d) Use the bagging approach in order to analyze this data. What test MSE do you obtain? Use the importance() function to determine which variables are most important.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.176881Z"
    }
   },
   "outputs": [],
   "source": [
    "base_reg = DecisionTreeRegressor()\n",
    "bagging = BaggingRegressor(base_reg)\n",
    "bagging.fit(X_train,y_train)\n",
    "print('Training MSE ',mean_squared_error(y_train,bagging.predict(X_train)))\n",
    "print('Test MSE ',mean_squared_error(y_test,bagging.predict(X_test)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Using bagging we are getting a significant decrease in MSE to 1.15"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.178191Z"
    }
   },
   "outputs": [],
   "source": [
    "# going through each of the estimator in bagging model to calculate feature importance\n",
    "feature_importances = np.mean([tree.feature_importances_ for tree in bagging.estimators_], axis=0)\n",
    "pd.Series(feature_importances,index = X_train.columns).sort_values(ascending=False).plot.bar()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Unlike the decision tree regressor, bagging model gives similar importance to ShelveLoc and Price, infact here importnace of Price is slightly greater than ShelveLoc. So, if someone is starting a new store, we would suggest - \"hey buddy, keep the condition of ShelveLoc good, but keep the price reasonable too\" :D"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (e) Use random forests to analyze this data. What test MSE do you obtain? Use the importance() function to determine which variables are most important. Describe the effect of m, the number of variables considered at each split, on the error rate obtained.\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.180148Z"
    }
   },
   "outputs": [],
   "source": [
    "rf = RandomForestRegressor()\n",
    "rf.fit(X_train,y_train)\n",
    "print('Training MSE is ',mean_squared_error(y_train,rf.predict(X_train)))\n",
    "print('Test MSE is ',mean_squared_error(y_test,rf.predict(X_test)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Woah, random forest further decreases MSE to 2.968"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-09-19T12:24:03.355234Z",
     "start_time": "2023-09-19T12:24:03.181819Z"
    }
   },
   "outputs": [],
   "source": [
    "#feature importance\n",
    "pd.Series(rf.feature_importances_,index = X_train.columns).sort_values(ascending=False).plot.bar()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "We can see that the feature importance plot is quite similar to the one we got from bagging model. "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Effect of max_features (m)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.183840Z"
    }
   },
   "outputs": [],
   "source": [
    "scores_dict = {}\n",
    "for max_feature_value in range(1,11):\n",
    "    rf = RandomForestRegressor(max_features=max_feature_value)\n",
    "    rf.fit(X_train,y_train)\n",
    "    scores_dict[max_feature_value] = mean_squared_error(y_test,rf.predict(X_test))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.186147Z"
    }
   },
   "outputs": [],
   "source": [
    "plt.figure(figsize = (10,6))\n",
    "plt.plot(list(scores_dict.keys()),list(scores_dict.values()),marker = 'o')\n",
    "plt.plot(min(scores_dict, key=scores_dict.get),min(list(scores_dict.values())),marker = 'o',markersize = 8,c='r',label = 'min_error')\n",
    "plt.legend()\n",
    "plt.xlabel('Number of features considered for each split')\n",
    "plt.ylabel('Test Error')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "We are getting best value when the value of the parameter is 7."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 9. This problem involves the OJ data set which is part of the ISLR package.\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.188205Z"
    }
   },
   "outputs": [],
   "source": [
    "data = pd.read_csv('E:\\programming\\dataset\\Into_to_statstical_learning\\OJ.csv',index_col=0)\n",
    "print(data.shape)\n",
    "data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Lets go through the dataset, and see what it describes\n",
    "- Purchase-A factor with levels CH and MM indicating whether the customer purchased Citrus Hill or Minute Maid Orange Juice\n",
    "\n",
    "- WeekofPurchase - Week of purchase\n",
    "- StoreID-Store ID\n",
    "- PriceCH-Price charged for CH\n",
    "- PriceMM-Price charged for MM\n",
    "- DiscCH-Discount offered for CH\n",
    "- DiscMM-Discount offered for MM\n",
    "- SpecialCH-Indicator of special on CH\n",
    "- SpecialMM-Indicator of special on MM\n",
    "- LoyalCH-Customer brand loyalty for CH\n",
    "- SalePriceMM-Sale price for MM\n",
    "- SalePriceCH-Sale price for CH\n",
    "- PriceDiff-Sale price of MM less sale price of CH\n",
    "- Store7-A factor with levels No and Yes indicating whether the sale is at Store 7\n",
    "- PctDiscMM-Percentage discount for MM\n",
    "- PctDiscCH-Percentage discount for CH\n",
    "- ListPriceDiff-List price of MM less list price of CH\n",
    "- STORE-Which of 5 possible stores the sale occured at"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.189593Z"
    }
   },
   "outputs": [],
   "source": [
    "data['Purchase'].value_counts()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "We can guess from the dataset, that the problem we are having involves a number of predictors, and we want to predict whether a customer will go for CH(citrus hill), or mm(minute maid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.191189Z"
    }
   },
   "outputs": [],
   "source": [
    "#encoding the categorical columns\n",
    "data['Purchase'] = data['Purchase'].map({'CH':0,'MM':1})\n",
    "data['Store7'] = data['Store7'].map({'Yes':1,'No':0})\n",
    "data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (a) Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.193281Z"
    }
   },
   "outputs": [],
   "source": [
    "X_train,X_test,y_train,y_test = train_test_split(data.drop('Purchase',axis=1),data['Purchase'],test_size = 0.2523,random_state=1)\n",
    "print(X_train.shape,X_test.shape)\n",
    "#i have used test_size = 270 / 1070, such that training data has 800 observations"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (b) Fit a tree to the training data, with Purchase as the response and the other variables except for Buy as predictors. Use the summary() function to produce summary statistics about the tree, and describe the results obtained. What is the training error rate? How many terminal nodes does the tree have?\n",
    "\n",
    "\n",
    " \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.195168Z"
    }
   },
   "outputs": [],
   "source": [
    "model = DecisionTreeClassifier()\n",
    "model.fit(X_train,y_train)\n",
    "print('Training Error rate ',1 - model.score(X_train,y_train))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "One thing i have observed is that if we don;t specify any max_depth, or any other parameter that acts like a stopping condition, the tree is most likely to overfit, looking at the training error here, it seems to be the same."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (c) Type in the name of the tree object in order to get a detailed text output. Pick one of the terminal nodes, and interpret the information displayed.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.197063Z"
    }
   },
   "outputs": [],
   "source": [
    "# this is the closest i can find\n",
    "print(tree.export_text(model))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (d) Create a plot of the tree, and interpret the results."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.198866Z"
    }
   },
   "outputs": [],
   "source": [
    "#plotting the tree\n",
    "\n",
    "dot_data = tree.export_graphviz(model, out_file=None, \n",
    "                                feature_names=X_train.columns,  \n",
    "                                filled=True)\n",
    "# Draw graph\n",
    "graph = graphviz.Source(dot_data) \n",
    "display(HTML(graph._repr_svg_()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "One thing i can surely say is that this tree is too complex, and we would favour simpler tree than that (OCCAM's RAZOR)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (e) Predict the response on the test data, and produce a confusion matrix comparing the test labels to the predicted test labels. What is the test error rate?\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.201483Z"
    }
   },
   "outputs": [],
   "source": [
    "y_preds = model.predict(X_test)\n",
    "print('Test error is ',1 - accuracy_score(y_test,y_preds))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.203804Z"
    }
   },
   "outputs": [],
   "source": [
    "#confusion matrix \n",
    "cm = confusion_matrix(y_test,y_preds)\n",
    "pd.DataFrame(cm,columns = ['Pred_0','Pred_2'],index = ['True_0','True_1'])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (g) Produce a plot with tree size on the x-axis and cross-validated classification error rate on the y-axis.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.206203Z"
    }
   },
   "outputs": [],
   "source": [
    "cv_scores_dict = {}\n",
    "for max_depth in range(1,15):\n",
    "    model = DecisionTreeClassifier(max_depth=max_depth)\n",
    "    cv_scores_dict[max_depth] = 1 - np.mean(cross_val_score(model,X_train,y_train,cv = 10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.209060Z"
    }
   },
   "outputs": [],
   "source": [
    "plt.plot"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.211311Z"
    }
   },
   "outputs": [],
   "source": [
    "plt.figure(figsize = (10,6))\n",
    "plt.plot(list(cv_scores_dict.keys()),list(cv_scores_dict.values()),marker = 'o')\n",
    "plt.plot(min(cv_scores_dict, key=cv_scores_dict.get),min(list(cv_scores_dict.values())),marker = 'o',markersize = 8,c='r',label = 'min_error')\n",
    "plt.legend()\n",
    "plt.xlabel('Max Depth')\n",
    "plt.ylabel('10 CV Error')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (h) Which tree size corresponds to the lowest cross-validated classification error rate?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Tree with max_depth = 4, is giving the minimum cv error."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.214866Z"
    }
   },
   "outputs": [],
   "source": [
    "best_model = DecisionTreeClassifier(max_depth = 4)\n",
    "best_model.fit(X_train,y_train)\n",
    "dot_data = tree.export_graphviz(best_model, out_file=None, \n",
    "                                feature_names=X_train.columns,  \n",
    "                                filled=True)\n",
    "# Draw graph\n",
    "graph = graphviz.Source(dot_data) \n",
    "display(HTML(graph._repr_svg_()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "This looks simple and elegent"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (i) Produce a pruned tree corresponding to the optimal tree size obtained using cross-validation. If cross-validation does not lead to selection of a pruned tree, then create a pruned tree with five terminal nodes.\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.217157Z"
    }
   },
   "outputs": [],
   "source": [
    "# pruned tree with 5 terminal nodes\n",
    "pruned_tree = DecisionTreeClassifier(max_leaf_nodes=5)\n",
    "pruned_tree.fit(X_train,y_train)\n",
    "dot_data = tree.export_graphviz(pruned_tree, out_file=None, \n",
    "                                feature_names=X_train.columns,  \n",
    "                                filled=True)\n",
    "# Draw graph\n",
    "graph = graphviz.Source(dot_data) \n",
    "display(HTML(graph._repr_svg_()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.218743Z"
    }
   },
   "outputs": [],
   "source": [
    "unpruned_tree = DecisionTreeClassifier()\n",
    "unpruned_tree.fit(X_train,y_train)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (j) Compare the training error rates between the pruned and unpruned trees. Which is higher?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.220370Z"
    }
   },
   "outputs": [],
   "source": [
    "print('Training error for pruned ',np.round(1 - (pruned_tree.score(X_train,y_train)),4))\n",
    "print('Training error for unpruned ',np.round(1 - (unpruned_tree.score(X_train,y_train)),4))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (k) Compare the test error rates between the pruned and unpruned trees. Which is higher?\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.222002Z"
    }
   },
   "outputs": [],
   "source": [
    "print('Test error for pruned ',np.round(1 - (pruned_tree.score(X_test,y_test)),4))\n",
    "print('Test error for unpruned ',np.round(1 - (unpruned_tree.score(X_test,y_test)),4))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "From the above results, it can be seen that the unpruned tree is highly overfitting"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 10. We now use boosting to predict Salary in the Hitters data set."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.223383Z"
    }
   },
   "outputs": [],
   "source": [
    "data = pd.read_csv(r'E:\\programming\\dataset\\Into_to_statstical_learning\\Hitters.csv')\n",
    "data = data.iloc[:,1:] #removing the first col, which contains names\n",
    "print(data.shape)\n",
    "data.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.225300Z"
    }
   },
   "outputs": [],
   "source": [
    "#lets check for cat columns\n",
    "cat = [col for col in data.columns if data[col].dtype == 'O']\n",
    "for col in cat:\n",
    "    print(col)\n",
    "    print(data[col].value_counts())\n",
    "    print()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.226970Z"
    }
   },
   "outputs": [],
   "source": [
    "# encoding the cat cols \n",
    "data['League'] = data['League'].map({'A':0,'N':1})\n",
    "data['Division'] = data['Division'].map({'W':0,'E':1})\n",
    "data['NewLeague'] =  data['NewLeague'].map({'A':0,'N':1})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Now we have all the cols in numerics "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (a) Remove the observations for whom the salary information is unknown, and then log-transform the salaries\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.228205Z"
    }
   },
   "outputs": [],
   "source": [
    "# removing the null values (only the salary col has null values)\n",
    "data.dropna(inplace = True)\n",
    "\n",
    "#log_transforming the salaries \n",
    "data['Salary_log'] = np.log(data['Salary'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.231242Z"
    }
   },
   "outputs": [],
   "source": [
    "# how does the data looks like now!!\n",
    "print(data.shape)\n",
    "data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "It must be tiring to hear this now, but i request you to go throught the whole of the dataset. Like what it describes, what are each of the columns, and how this could be useful. (This is what matters in the end)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (b) Create a training set consisting of the first 200 observations, and a test set consisting of the remaining observations.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.231988Z"
    }
   },
   "outputs": [],
   "source": [
    "X_train,X_test,y_train,y_test = train_test_split(data.drop(['Salary','Salary_log'],axis = 1),data['Salary_log'],\n",
    "                                                    test_size = 0.239,random_state = 1)\n",
    "#test_size = 63 / 253\n",
    "print(X_train.shape,X_test.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (c) Perform boosting on the training set with 1,000 trees for a range of values of the shrinkage parameter λ. Produce a plot with different shrinkage values on the x-axis and the corresponding training set MSE on the y-axis.\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.233326Z"
    }
   },
   "outputs": [],
   "source": [
    "# we are going to use XGboost here, we saw in the labs that XGboost performs better than Adaboost\n",
    "scores_dict_train = {}\n",
    "scores_dict_test = {}\n",
    "gamma_values = np.linspace(0,1,50)\n",
    "for gamma in (gamma_values):\n",
    "    xgb = XGBRegressor(n_estimators=1000,gamma=gamma)\n",
    "    xgb.fit(X_train,y_train)\n",
    "    scores_dict_train[gamma] = mean_squared_error(y_train,xgb.predict(X_train))\n",
    "    scores_dict_test[gamma] = mean_squared_error(y_test,xgb.predict(X_test))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.233704Z"
    }
   },
   "outputs": [],
   "source": [
    "plt.figure(figsize = (10,6))\n",
    "plt.plot(list(scores_dict_train.keys()),list(scores_dict_train.values()))\n",
    "plt.plot(min(scores_dict_train, key=scores_dict_train.get),min(list(scores_dict_train.values())),marker = 'o',markersize = 8,c='r',label = 'min_error')\n",
    "plt.legend()\n",
    "plt.xlabel('Value of Shrinkage parameter')\n",
    "plt.ylabel('Training Error')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (d) Produce a plot with different shrinkage values on the x-axis and the corresponding test set MSE on the y-axis.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.234337Z"
    }
   },
   "outputs": [],
   "source": [
    "plt.figure(figsize = (10,6)) \n",
    "plt.plot(list(scores_dict_test.keys()),list(scores_dict_test.values()))\n",
    "plt.plot(min(scores_dict_test, key=scores_dict_test.get),min(list(scores_dict_test.values())),marker = 'o',markersize = 8,c='r',label = 'min_error')\n",
    "plt.legend()\n",
    "plt.xlabel('Value of Shrinkage parameter')\n",
    "plt.ylabel('Test Error')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The value of test mse is min when value of shrinkage paramter is 0.326"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (e) Compare the test MSE of boosting to the test MSE that results from applying two of the regression approaches seen in Chapters 3 and 6.\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Ridge\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.235878Z"
    }
   },
   "outputs": [],
   "source": [
    "scores_dict_ridge = {}\n",
    "list_alpha = 10**np.linspace(-2,5,100)\n",
    "for alpha in list_alpha:\n",
    "    model = Ridge(alpha=alpha) \n",
    "    model.fit(X_train,y_train)\n",
    "    scores_dict_ridge[alpha] = mean_squared_error(y_test,model.predict(X_test))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Lasso"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.237128Z"
    }
   },
   "outputs": [],
   "source": [
    "scores_dict_lasso = {}\n",
    "list_alpha = 10**np.linspace(-2,5,100)\n",
    "for alpha in list_alpha:\n",
    "    model = Lasso(alpha=alpha) \n",
    "    model.fit(X_train,y_train)\n",
    "    scores_dict_lasso[alpha] = mean_squared_error(y_test,model.predict(X_test))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.238437Z"
    }
   },
   "outputs": [],
   "source": [
    "plt.figure(figsize = (10,6)) \n",
    "plt.plot(list(scores_dict_ridge.keys()),list(scores_dict_ridge.values()),label = 'Ridge')\n",
    "plt.plot(list(scores_dict_lasso.keys()),list(scores_dict_lasso.values()),label = 'Lasso')\n",
    "plt.legend()\n",
    "plt.xlabel('Value of Shrinkage parameter')\n",
    "plt.ylabel('Test Error')\n",
    "plt.xscale('log')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.239488Z"
    }
   },
   "outputs": [],
   "source": [
    "# lets find out the best test mse value for both the cases \n",
    "print('Best test MSE for RIdge is ',min(scores_dict_ridge.values()))\n",
    "print('Best test MSE for LASSO is ',min(scores_dict_lasso.values()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "HMMMM, very close to each other"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.240654Z"
    }
   },
   "outputs": [],
   "source": [
    "# what was the best test MSE for boosting \n",
    "print('Best TEST MSE for Boosting is ',min(scores_dict_test.values()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "We can see that boosting outperforms the other methods..(BY A MILE)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (f) Which variables appear to be the most important predictors in the boosted model?\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.241699Z"
    }
   },
   "outputs": [],
   "source": [
    "best_boosting_model = XGBRegressor(n_estimators=1000,gamma=0.326)\n",
    "best_boosting_model.fit(X_train,y_train)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.243032Z"
    }
   },
   "outputs": [],
   "source": [
    "# feature importance\n",
    "pd.Series(best_boosting_model.feature_importances_,index = X_train.columns).sort_values(ascending=False).plot.bar(figsize = (10,6))\n",
    "plt.title('Feature Importance')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The three features CatBat, Chits, Cruns, are the most important features to determine Salary"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (g) Now apply bagging to the training set. What is the test set MSE for this approach?\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.243594Z"
    }
   },
   "outputs": [],
   "source": [
    "base_reg = DecisionTreeRegressor()\n",
    "bagging = BaggingRegressor(base_reg)\n",
    "bagging.fit(X_train,y_train)\n",
    "print('Training MSE ',mean_squared_error(y_train,bagging.predict(X_train)))\n",
    "print('Test MSE ',mean_squared_error(y_test,bagging.predict(X_test)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "It gives Test MSE of 0.28, which is close but not better than the MSE we got from Boosting, which was 0.22"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 11. This question uses the Caravan data set."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.244777Z"
    }
   },
   "outputs": [],
   "source": [
    "data = pd.read_csv('E:\\programming\\dataset\\Into_to_statstical_learning\\Caravan.csv')\n",
    "print(data.shape)\n",
    "data = data.iloc[:,1:] #removing the first col\n",
    "data.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.245937Z"
    }
   },
   "outputs": [],
   "source": [
    "# lets check for any cat values\n",
    "[col for col in data.columns if data[col].dtype == 'O']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Only the response value is categorical, which is Yes or No, lets encode it then"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.246646Z"
    }
   },
   "outputs": [],
   "source": [
    "data['Purchase'] = data['Purchase'].map({'Yes':1,'No':0})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (a) Create a training set consisting of the first 1,000 observations, and a test set consisting of the remaining observations.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.247401Z"
    }
   },
   "outputs": [],
   "source": [
    "train_data = data.iloc[:1000,:]\n",
    "X_train = train_data.drop('Purchase',axis = 1)\n",
    "y_train = train_data['Purchase']\n",
    "\n",
    "test_data = data.iloc[1000:,:]\n",
    "X_test = test_data.drop('Purchase',axis = 1)\n",
    "y_test = test_data['Purchase']\n",
    "\n",
    "print(X_train.shape,X_test.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (b) Fit a boosting model to the training set with Purchase as the response and the other variables as predictors. Use 1,000 trees, and a shrinkage value of 0.01. Which predictors appear to be the most important?\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.248031Z"
    }
   },
   "outputs": [],
   "source": [
    "model = XGBClassifier(n_estimators=1000,gamma=0.01)\n",
    "model.fit(X_train,y_train)\n",
    "print('Training score ',model.score(X_train,y_train))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.248845Z"
    }
   },
   "outputs": [],
   "source": [
    "# feature importance\n",
    "pd.Series(model.feature_importances_,index = X_train.columns).sort_values(ascending=False)[:10].plot.bar(figsize = (10,6))\n",
    "plt.title('Top 10 important features')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Above are the 10 most important features"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### (c) Use the boosting model to predict the response on the test data. Predict that a person will make a purchase if the estimated probability of purchase is greater than 20 %. Form a confusion matrix. What fraction of the people predicted to make a purchase do in fact make one? How does this compare with the results obtained from applying KNN or logistic regression to this data set?\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.249729Z"
    }
   },
   "outputs": [],
   "source": [
    "print('Test Accuracy is ', model.score(X_test,y_test))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "By default, the model will consider that a person will make a purchase if the estimated probabilirt of purchase is graeter than 20%."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.250737Z"
    }
   },
   "outputs": [],
   "source": [
    "pred_probs = model.predict_proba(X_test)[:,0]\n",
    "pred_probs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.252045Z"
    }
   },
   "outputs": [],
   "source": [
    "# if predicted prob > 0.2, then class 1, otherwise 0\n",
    "pred_class = np.where(pred_probs>0.2,1,0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.252979Z"
    }
   },
   "outputs": [],
   "source": [
    "print('Accuracy is ',accuracy_score(y_test,pred_class))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.254022Z"
    }
   },
   "outputs": [],
   "source": [
    "#confusion matrix \n",
    "cm = confusion_matrix(y_test,pred_class)\n",
    "pd.DataFrame(cm,columns = ['Pred_0','Pred_1'],index = ['True_0','True_1'])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "So, only 0.05 fraction of the population predicted by the model, will actually make a purchase..I think the model is being too optimistic here."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### KNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.255682Z"
    }
   },
   "outputs": [],
   "source": [
    "knn = KNeighborsClassifier(n_neighbors=6)\n",
    "knn.fit(X_train,y_train)\n",
    "pred_probas = knn.predict_proba(X_test)\n",
    "\n",
    "pred_classes = np.where(pred_probas>0.2,1,0)[:,0]\n",
    "print('Score for KNN ',accuracy_score(y_test,pred_classes))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Logistic Regression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2023-09-19T12:24:03.256410Z"
    }
   },
   "outputs": [],
   "source": [
    "lr = LogisticRegression()\n",
    "lr.fit(X_train,y_train)\n",
    "\n",
    "pred_probas = lr.predict_proba(X_test)\n",
    "\n",
    "pred_classes = np.where(pred_probas>0.2,1,0)[:,0]\n",
    "print('Score for Logistic Regression is ',accuracy_score(y_test,pred_classes))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Although none of the model is looking good, boosting performs the best amonst them"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
