{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "c56e4265",
   "metadata": {},
   "source": [
    "## 易错点\n",
    "1. df_sample['Date'].dt.year 隔段时间完全忘了\n",
    "2. factorize前有pd.\n",
    "3. Primary Type要改名，改完名后删除原列\n",
    "4. scoring : str or callable, default=None \n",
    "    scoring=make_scorer(f1_score)\n",
    "    scoring='accuracy'\n",
    "5. voting写法\n",
    "6,df_sample不是df\n",
    "7.VotingClassifier 后面for不用fit\n",
    "8.scores = cross_val_score(clf,X_train,y_train,cv=5,scoring='accuracy') scoring='accuracy'忘了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "f77d6a66",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "b'Skipping line 1513591: expected 23 fields, saw 24\\n'\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(19235, 23)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#导入相关库\n",
    "import warnings\n",
    "warnings.filterwarnings( \"ignore\" )\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import time\n",
    "import seaborn as sns\n",
    "import matplotlib.pyplot as plt\n",
    "from subprocess import check_output\n",
    "import lightgbm as lgb\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "from tqdm import tqdm_notebook as tqdm\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import confusion_matrix, log_loss\n",
    "%matplotlib inline\n",
    "#读取教据\n",
    "df = pd.read_csv('./data/Chicago_Crimes.csv',error_bad_lines=False)\n",
    "#并随机抽取，抽取数据集的1%\n",
    "df_sample = df.sample(frac=0.01)\n",
    "df_sample.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "336feab4",
   "metadata": {},
   "outputs": [],
   "source": [
    "#其中IUCR，FBI Code , Case Number，IDI它们是一种主要类型本身的编码，会给我们一个不切实际的效果很好的预测，用del直接删除\n",
    "del df_sample['IUCR']\n",
    "del df_sample['Case Number']\n",
    "del df_sample['ID']\n",
    "del df_sample['FBI Code']\n",
    "del df_sample['Updated On']\n",
    "del df_sample['Arrest']\n",
    "del df_sample['Domestic']\n",
    "del df_sample['Unnamed: 0']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "424228b8",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_sample.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c427fc7f",
   "metadata": {},
   "outputs": [],
   "source": [
    "#使用describe函数查看数值型数据和标量型数据的基本信息，包括最小值、最大值、均值、四分位数、总教等，查看数据的缺失值数量和占比情况\n",
    "df_sample.info\n",
    "df_sample.isnull().sum()\n",
    "df_sample.describe()\n",
    "df_sample.describe(include='O')\n",
    "df_na = pd.DataFrame(data=df_sample.isnull().sum()/df.shape[0],columns=['miss_rate']).sort_values(by='miss_rate',ascending=False)\n",
    "df_sample.dropna(inplace=True)\n",
    "df_sample.isnull().sum()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f74c93d2",
   "metadata": {},
   "source": [
    "1.先将字段Date转为datetime型，再扩展字段，提取年，月，周，日，小时信息。同时删除Date字段。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "dbae1a77",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 考生填写 部分内容无法联想\n",
    "df_sample['Date'] = df_sample['Date'].astype(np.datetime64)\n",
    "df_sample['year'] = df_sample['Date'].dt.year\n",
    "df_sample['month'] = df_sample['Date'].dt.month\n",
    "df_sample['weekday'] = df_sample['Date'].dt.weekday\n",
    "df_sample['day'] = df_sample['Date'].dt.day\n",
    "df_sample['hour'] = df_sample['Date'].dt.hour\n",
    "del df_sample['Date']\n",
    "# 考生填写\n",
    "#通过.dt访问器，可以提取出时间序列中的年份、月份、日、小时等元素"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b2f53d5f",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_sample.info()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "50a97dd4",
   "metadata": {},
   "source": [
    "2.字符串类型字段\"BlocK,'Primary Type,'Description,Location Description,Location，在进行数据分析之前需要数值化，提高运行效率。factorize函数可以将字符串类型数据映射为一组数字，相同的字符串类型映射为相同的数字。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "cc05cab1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 考生填写\n",
    "col_list = ['Block','Primary Type','Description','Location Description','Location']\n",
    "for col in col_list:\n",
    "    df_sample[col] = pd.factorize(df_sample[col])[0]\n",
    "df_sample['Primary_Type'] = df_sample['Primary Type']\n",
    "del df_sample['Primary Type']\n",
    "# 考生填写"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "842c48d7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数值类型数据处理\n",
    "#采用MinMaxScaler对数据进行规范化\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "df_sample['X Coordinate']= df_sample['X Coordinate'].astype(float)\n",
    "df_sample['Y Coordinate']= df_sample['Y Coordinate'].astype(float)\n",
    "df_sample['X Coordinate'] = MinMaxScaler().fit_transform(df_sample['X Coordinate'].values.reshape(-1, 1))\n",
    "df_sample['Y Coordinate'] = MinMaxScaler().fit_transform(df_sample['Y Coordinate'].values.reshape(-1, 1))\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "#拆分数据\n",
    "X_train, X_test, y_train, y_test = train_test_split(df_sample.loc[:, df_sample.columns !='Primary_Type'],\\\n",
    "df_sample['Primary_Type'],\\\n",
    "test_size=0.3,\\\n",
    "random_state=42)\n",
    "X_train.info()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ec030f5e",
   "metadata": {},
   "source": [
    "3.使用GradientBoostingClassifier分类器进行训练模型model_gbdt."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "5eba876e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "fl_score为0.8317320889376887\n"
     ]
    }
   ],
   "source": [
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.metrics import f1_score\n",
    "#考生完成\n",
    "model_gbdt = GradientBoostingClassifier(n_estimators=8)\n",
    "model_gbdt.fit(X_train,y_train)\n",
    "#考生完成\n",
    "y_prel = model_gbdt.predict(X_test)\n",
    "f1_scorel = f1_score(y_test, y_prel, average='micro')\n",
    "print('fl_score为{}'.format(f1_scorel))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b0672faf",
   "metadata": {},
   "source": [
    "4.使用网格搜索交叉验证对模型mode1_gbdt进行优化，调整参数learning_rate建议值为[0.1,0.2,0.3,0.4.0.5]，cv采用5折进行模型训练，得到最优模型。最优参数和最优评分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4a5b6ae1",
   "metadata": {},
   "outputs": [],
   "source": [
    "#由考生填写\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn.metrics import make_scorer\n",
    "param_grids = {'learning_rate':[0.1,0.2,0.3,0.4,0.5]}\n",
    "model_gs = GridSearchCV(estimator=model_gbdt,param_grid=param_grids,cv=5,scoring=make_scorer(f1_score))\n",
    "model_gs.fit(X_train,y_train)\n",
    "\n",
    "# 最优模型\n",
    "model_gs.best_estimator_\n",
    "#最优参数\n",
    "model_gs.best_params_\n",
    "#最优评分\n",
    "model_gs.best_score_\n",
    "#由考生填写"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9aafaae4",
   "metadata": {},
   "source": [
    "5.使用votingclassifier聚合了多个基础模型的预测结果。通过硬投票，软投票和自定义权重的软投票三种方式进行比较，确定最后的结果。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "8e6f5c0b",
   "metadata": {},
   "outputs": [],
   "source": [
    "#由考生填写\n",
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn.model_selection import cross_validate\n",
    "from sklearn.linear_model import LinearRegression\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.ensemble import AdaBoostClassifier\n",
    "from sklearn.svm import SVC\n",
    "from xgboost import XGBClassifier\n",
    "from sklearn.ensemble import  VotingClassifier\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "7aa66458",
   "metadata": {},
   "outputs": [],
   "source": [
    "clf1 = XGBClassifier(learning_rate=0.1,n_estimators=150, max_depth=3, min_child_weight=2,subsample=0.7,colsample_bytree=0.6,objective= 'binary:logistic')\n",
    "clf2 = RandomForestClassifier(n_estimators=50,max_depth=1,min_samples_split=4,min_samples_leaf=63,oob_score=True)\n",
    "clf3 = SVC(C=0.1,probability=True)\n",
    "# 硬投票\n",
    "eclf = VotingClassifier(estimators=[('xgb',clf1),('rf',clf2),('svc',clf3)],voting='hard')\n",
    "for clf,label in zip([clf1,clf2,clf3,eclf],['XGBoosting','Random Forest','SVC','voting']):\n",
    "    scores = cross_val_score(clf,X_train,y_train,cv=5,scoring='accuracy')\n",
    "    print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(),scores.std(),label))\n",
    "# 软投票voting=‘soft’\n",
    "eclf = VotingClassifier(estimators=[('xgb',clf1),('rf',clf2),('svc',clf3)],voting='soft')\n",
    "for clf,label in zip([clf1,clf2,clf3,eclf],['XGBoosting','Random Forest','SVC','voting']):\n",
    "    scores = cross_val_score(clf,X_train,y_train,cv=5,scoring='accuracy')\n",
    "    print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(),scores.std(),label))\n",
    "# 自定义投票\n",
    "eclf = VotingClassifier(estimators=[('xgb',clf1),('rf',clf2),('svc',clf3)],voting='soft',weights=[10,1,9])\n",
    "for clf,label in zip([clf1,clf2,clf3,eclf],['XGBoost','Random Forest','SVC','voting']):\n",
    "    scores = cross_val_score(clf,X_train,y_train,cv=5,scoring='accuracy')\n",
    "    print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(),scores.std(),label))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PySpark-2.4.5",
   "language": "python",
   "name": "pyspark-2.4.5"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
