{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-01-11T09:53:57.710365Z",
     "start_time": "2025-01-11T09:53:55.602758Z"
    }
   },
   "source": [
    "import time\n",
    "from sklearn.datasets import load_iris, fetch_20newsgroups, fetch_california_housing\n",
    "from sklearn.model_selection import train_test_split, GridSearchCV\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.naive_bayes import MultinomialNB\n",
    "from sklearn.metrics import classification_report\n",
    "from sklearn.feature_extraction import DictVectorizer\n",
    "from sklearn.tree import DecisionTreeClassifier, export_graphviz\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.metrics import roc_auc_score"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "## load直接加载的内存的，数据集比较小，并不会保存到本地磁盘\n",
    "## fetch数据集比较大，下载下来后会存在本地磁盘，下一次就不会再连接sklearn的服务器"
   ],
   "id": "e3a664049c8b9003"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "练习1：鸢尾花数据集 小数据集",
   "id": "3ff34b76920138d6"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:53:57.728017Z",
     "start_time": "2025-01-11T09:53:57.711369Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 鸢尾花数据集，查看特征，目标，样本量 \n",
    "\n",
    "li = load_iris()  # 加载鸢尾花数据集\n",
    "\n",
    "# feature_names 是特征的名字\n",
    "print(li.feature_names)  # 重点,特征名字 \n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(type(li.data))  # numpy.ndarray\n",
    "print(\"-\" * 50)\n",
    "print(li.data.shape)  # 150个样本，4个特征,一般看shape\n",
    "print(\"-\" * 50)\n",
    "li.data"
   ],
   "id": "3f737134f92c9f5f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']\n",
      "--------------------------------------------------\n",
      "<class 'numpy.ndarray'>\n",
      "--------------------------------------------------\n",
      "(150, 4)\n",
      "--------------------------------------------------\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array([[5.1, 3.5, 1.4, 0.2],\n",
       "       [4.9, 3. , 1.4, 0.2],\n",
       "       [4.7, 3.2, 1.3, 0.2],\n",
       "       [4.6, 3.1, 1.5, 0.2],\n",
       "       [5. , 3.6, 1.4, 0.2],\n",
       "       [5.4, 3.9, 1.7, 0.4],\n",
       "       [4.6, 3.4, 1.4, 0.3],\n",
       "       [5. , 3.4, 1.5, 0.2],\n",
       "       [4.4, 2.9, 1.4, 0.2],\n",
       "       [4.9, 3.1, 1.5, 0.1],\n",
       "       [5.4, 3.7, 1.5, 0.2],\n",
       "       [4.8, 3.4, 1.6, 0.2],\n",
       "       [4.8, 3. , 1.4, 0.1],\n",
       "       [4.3, 3. , 1.1, 0.1],\n",
       "       [5.8, 4. , 1.2, 0.2],\n",
       "       [5.7, 4.4, 1.5, 0.4],\n",
       "       [5.4, 3.9, 1.3, 0.4],\n",
       "       [5.1, 3.5, 1.4, 0.3],\n",
       "       [5.7, 3.8, 1.7, 0.3],\n",
       "       [5.1, 3.8, 1.5, 0.3],\n",
       "       [5.4, 3.4, 1.7, 0.2],\n",
       "       [5.1, 3.7, 1.5, 0.4],\n",
       "       [4.6, 3.6, 1. , 0.2],\n",
       "       [5.1, 3.3, 1.7, 0.5],\n",
       "       [4.8, 3.4, 1.9, 0.2],\n",
       "       [5. , 3. , 1.6, 0.2],\n",
       "       [5. , 3.4, 1.6, 0.4],\n",
       "       [5.2, 3.5, 1.5, 0.2],\n",
       "       [5.2, 3.4, 1.4, 0.2],\n",
       "       [4.7, 3.2, 1.6, 0.2],\n",
       "       [4.8, 3.1, 1.6, 0.2],\n",
       "       [5.4, 3.4, 1.5, 0.4],\n",
       "       [5.2, 4.1, 1.5, 0.1],\n",
       "       [5.5, 4.2, 1.4, 0.2],\n",
       "       [4.9, 3.1, 1.5, 0.2],\n",
       "       [5. , 3.2, 1.2, 0.2],\n",
       "       [5.5, 3.5, 1.3, 0.2],\n",
       "       [4.9, 3.6, 1.4, 0.1],\n",
       "       [4.4, 3. , 1.3, 0.2],\n",
       "       [5.1, 3.4, 1.5, 0.2],\n",
       "       [5. , 3.5, 1.3, 0.3],\n",
       "       [4.5, 2.3, 1.3, 0.3],\n",
       "       [4.4, 3.2, 1.3, 0.2],\n",
       "       [5. , 3.5, 1.6, 0.6],\n",
       "       [5.1, 3.8, 1.9, 0.4],\n",
       "       [4.8, 3. , 1.4, 0.3],\n",
       "       [5.1, 3.8, 1.6, 0.2],\n",
       "       [4.6, 3.2, 1.4, 0.2],\n",
       "       [5.3, 3.7, 1.5, 0.2],\n",
       "       [5. , 3.3, 1.4, 0.2],\n",
       "       [7. , 3.2, 4.7, 1.4],\n",
       "       [6.4, 3.2, 4.5, 1.5],\n",
       "       [6.9, 3.1, 4.9, 1.5],\n",
       "       [5.5, 2.3, 4. , 1.3],\n",
       "       [6.5, 2.8, 4.6, 1.5],\n",
       "       [5.7, 2.8, 4.5, 1.3],\n",
       "       [6.3, 3.3, 4.7, 1.6],\n",
       "       [4.9, 2.4, 3.3, 1. ],\n",
       "       [6.6, 2.9, 4.6, 1.3],\n",
       "       [5.2, 2.7, 3.9, 1.4],\n",
       "       [5. , 2. , 3.5, 1. ],\n",
       "       [5.9, 3. , 4.2, 1.5],\n",
       "       [6. , 2.2, 4. , 1. ],\n",
       "       [6.1, 2.9, 4.7, 1.4],\n",
       "       [5.6, 2.9, 3.6, 1.3],\n",
       "       [6.7, 3.1, 4.4, 1.4],\n",
       "       [5.6, 3. , 4.5, 1.5],\n",
       "       [5.8, 2.7, 4.1, 1. ],\n",
       "       [6.2, 2.2, 4.5, 1.5],\n",
       "       [5.6, 2.5, 3.9, 1.1],\n",
       "       [5.9, 3.2, 4.8, 1.8],\n",
       "       [6.1, 2.8, 4. , 1.3],\n",
       "       [6.3, 2.5, 4.9, 1.5],\n",
       "       [6.1, 2.8, 4.7, 1.2],\n",
       "       [6.4, 2.9, 4.3, 1.3],\n",
       "       [6.6, 3. , 4.4, 1.4],\n",
       "       [6.8, 2.8, 4.8, 1.4],\n",
       "       [6.7, 3. , 5. , 1.7],\n",
       "       [6. , 2.9, 4.5, 1.5],\n",
       "       [5.7, 2.6, 3.5, 1. ],\n",
       "       [5.5, 2.4, 3.8, 1.1],\n",
       "       [5.5, 2.4, 3.7, 1. ],\n",
       "       [5.8, 2.7, 3.9, 1.2],\n",
       "       [6. , 2.7, 5.1, 1.6],\n",
       "       [5.4, 3. , 4.5, 1.5],\n",
       "       [6. , 3.4, 4.5, 1.6],\n",
       "       [6.7, 3.1, 4.7, 1.5],\n",
       "       [6.3, 2.3, 4.4, 1.3],\n",
       "       [5.6, 3. , 4.1, 1.3],\n",
       "       [5.5, 2.5, 4. , 1.3],\n",
       "       [5.5, 2.6, 4.4, 1.2],\n",
       "       [6.1, 3. , 4.6, 1.4],\n",
       "       [5.8, 2.6, 4. , 1.2],\n",
       "       [5. , 2.3, 3.3, 1. ],\n",
       "       [5.6, 2.7, 4.2, 1.3],\n",
       "       [5.7, 3. , 4.2, 1.2],\n",
       "       [5.7, 2.9, 4.2, 1.3],\n",
       "       [6.2, 2.9, 4.3, 1.3],\n",
       "       [5.1, 2.5, 3. , 1.1],\n",
       "       [5.7, 2.8, 4.1, 1.3],\n",
       "       [6.3, 3.3, 6. , 2.5],\n",
       "       [5.8, 2.7, 5.1, 1.9],\n",
       "       [7.1, 3. , 5.9, 2.1],\n",
       "       [6.3, 2.9, 5.6, 1.8],\n",
       "       [6.5, 3. , 5.8, 2.2],\n",
       "       [7.6, 3. , 6.6, 2.1],\n",
       "       [4.9, 2.5, 4.5, 1.7],\n",
       "       [7.3, 2.9, 6.3, 1.8],\n",
       "       [6.7, 2.5, 5.8, 1.8],\n",
       "       [7.2, 3.6, 6.1, 2.5],\n",
       "       [6.5, 3.2, 5.1, 2. ],\n",
       "       [6.4, 2.7, 5.3, 1.9],\n",
       "       [6.8, 3. , 5.5, 2.1],\n",
       "       [5.7, 2.5, 5. , 2. ],\n",
       "       [5.8, 2.8, 5.1, 2.4],\n",
       "       [6.4, 3.2, 5.3, 2.3],\n",
       "       [6.5, 3. , 5.5, 1.8],\n",
       "       [7.7, 3.8, 6.7, 2.2],\n",
       "       [7.7, 2.6, 6.9, 2.3],\n",
       "       [6. , 2.2, 5. , 1.5],\n",
       "       [6.9, 3.2, 5.7, 2.3],\n",
       "       [5.6, 2.8, 4.9, 2. ],\n",
       "       [7.7, 2.8, 6.7, 2. ],\n",
       "       [6.3, 2.7, 4.9, 1.8],\n",
       "       [6.7, 3.3, 5.7, 2.1],\n",
       "       [7.2, 3.2, 6. , 1.8],\n",
       "       [6.2, 2.8, 4.8, 1.8],\n",
       "       [6.1, 3. , 4.9, 1.8],\n",
       "       [6.4, 2.8, 5.6, 2.1],\n",
       "       [7.2, 3. , 5.8, 1.6],\n",
       "       [7.4, 2.8, 6.1, 1.9],\n",
       "       [7.9, 3.8, 6.4, 2. ],\n",
       "       [6.4, 2.8, 5.6, 2.2],\n",
       "       [6.3, 2.8, 5.1, 1.5],\n",
       "       [6.1, 2.6, 5.6, 1.4],\n",
       "       [7.7, 3. , 6.1, 2.3],\n",
       "       [6.3, 3.4, 5.6, 2.4],\n",
       "       [6.4, 3.1, 5.5, 1.8],\n",
       "       [6. , 3. , 4.8, 1.8],\n",
       "       [6.9, 3.1, 5.4, 2.1],\n",
       "       [6.7, 3.1, 5.6, 2.4],\n",
       "       [6.9, 3.1, 5.1, 2.3],\n",
       "       [5.8, 2.7, 5.1, 1.9],\n",
       "       [6.8, 3.2, 5.9, 2.3],\n",
       "       [6.7, 3.3, 5.7, 2.5],\n",
       "       [6.7, 3. , 5.2, 2.3],\n",
       "       [6.3, 2.5, 5. , 1.9],\n",
       "       [6.5, 3. , 5.2, 2. ],\n",
       "       [6.2, 3.4, 5.4, 2.3],\n",
       "       [5.9, 3. , 5.1, 1.8]])"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:53:57.734184Z",
     "start_time": "2025-01-11T09:53:57.729020Z"
    }
   },
   "cell_type": "code",
   "source": [
    "print(\"目标值\")\n",
    "# target 的作用是什么？\n",
    "# 目标值是用来区分样本的，\n",
    "# 比如鸢尾花数据集，目标值就是花的种类，0,1,2分别表示山鸢尾，变色鸢尾，维吉尼亚鸢尾\n",
    "print(li.target)  # 目标值，0,1,2\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# target_names 是目标值的名字\n",
    "print(li.target_names)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(li.target.shape)  # 150个样本，1个特征\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# DESCR 是一个描述性的字符串，描述了数据集的一些信息\n",
    "print(li.DESCR)\n"
   ],
   "id": "6ed790a1b81df23e",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "目标值\n",
      "[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      " 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2\n",
      " 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n",
      " 2 2]\n",
      "--------------------------------------------------\n",
      "['setosa' 'versicolor' 'virginica']\n",
      "--------------------------------------------------\n",
      "(150,)\n",
      "--------------------------------------------------\n",
      ".. _iris_dataset:\n",
      "\n",
      "Iris plants dataset\n",
      "--------------------\n",
      "\n",
      "**Data Set Characteristics:**\n",
      "\n",
      ":Number of Instances: 150 (50 in each of three classes)\n",
      ":Number of Attributes: 4 numeric, predictive attributes and the class\n",
      ":Attribute Information:\n",
      "    - sepal length in cm\n",
      "    - sepal width in cm\n",
      "    - petal length in cm\n",
      "    - petal width in cm\n",
      "    - class:\n",
      "            - Iris-Setosa\n",
      "            - Iris-Versicolour\n",
      "            - Iris-Virginica\n",
      "\n",
      ":Summary Statistics:\n",
      "\n",
      "============== ==== ==== ======= ===== ====================\n",
      "                Min  Max   Mean    SD   Class Correlation\n",
      "============== ==== ==== ======= ===== ====================\n",
      "sepal length:   4.3  7.9   5.84   0.83    0.7826\n",
      "sepal width:    2.0  4.4   3.05   0.43   -0.4194\n",
      "petal length:   1.0  6.9   3.76   1.76    0.9490  (high!)\n",
      "petal width:    0.1  2.5   1.20   0.76    0.9565  (high!)\n",
      "============== ==== ==== ======= ===== ====================\n",
      "\n",
      ":Missing Attribute Values: None\n",
      ":Class Distribution: 33.3% for each of 3 classes.\n",
      ":Creator: R.A. Fisher\n",
      ":Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov)\n",
      ":Date: July, 1988\n",
      "\n",
      "The famous Iris database, first used by Sir R.A. Fisher. The dataset is taken\n",
      "from Fisher's paper. Note that it's the same as in R, but not as in the UCI\n",
      "Machine Learning Repository, which has two wrong data points.\n",
      "\n",
      "This is perhaps the best known database to be found in the\n",
      "pattern recognition literature.  Fisher's paper is a classic in the field and\n",
      "is referenced frequently to this day.  (See Duda & Hart, for example.)  The\n",
      "data set contains 3 classes of 50 instances each, where each class refers to a\n",
      "type of iris plant.  One class is linearly separable from the other 2; the\n",
      "latter are NOT linearly separable from each other.\n",
      "\n",
      ".. dropdown:: References\n",
      "\n",
      "  - Fisher, R.A. \"The use of multiple measurements in taxonomic problems\"\n",
      "    Annual Eugenics, 7, Part II, 179-188 (1936); also in \"Contributions to\n",
      "    Mathematical Statistics\" (John Wiley, NY, 1950).\n",
      "  - Duda, R.O., & Hart, P.E. (1973) Pattern Classification and Scene Analysis.\n",
      "    (Q327.D83) John Wiley & Sons.  ISBN 0-471-22361-1.  See page 218.\n",
      "  - Dasarathy, B.V. (1980) \"Nosing Around the Neighborhood: A New System\n",
      "    Structure and Classification Rule for Recognition in Partially Exposed\n",
      "    Environments\".  IEEE Transactions on Pattern Analysis and Machine\n",
      "    Intelligence, Vol. PAMI-2, No. 1, 67-71.\n",
      "  - Gates, G.W. (1972) \"The Reduced Nearest Neighbor Rule\".  IEEE Transactions\n",
      "    on Information Theory, May 1972, 431-433.\n",
      "  - See also: 1988 MLC Proceedings, 54-64.  Cheeseman et al\"s AUTOCLASS II\n",
      "    conceptual clustering system finds 3 classes in the data.\n",
      "  - Many, many more ...\n",
      "\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:53:57.741699Z",
     "start_time": "2025-01-11T09:53:57.736189Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 注意返回值,顺序不要错\n",
    "# 训练集 train  x_train, y_train \n",
    "# 测试集  test   x_test, y_test\n",
    "\n",
    "# train_test_split 划分训练集和测试集，默认是75%训练集，25%测试集\n",
    "# random_state 随机种子，保证每次划分的结果一样\n",
    "x_train, x_test, y_train, y_test = (\n",
    "    train_test_split(li.data, li.target,\n",
    "                     test_size=0.25, random_state=1))\n",
    "\n",
    "# print(\"训练集特征值和目标值：\", x_train, y_train)\n",
    "print(\"训练集特征值shape\", x_train.shape)  # (112, 4)\n",
    "print('-' * 50)\n",
    "\n",
    "# print(\"测试集特征值和目标值：\", x_test, y_test)\n",
    "print(\"测试集特征值shape\", x_test.shape)  # (38, 4)\n",
    "print('-' * 50)"
   ],
   "id": "9fdea2a83bec0696",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集特征值shape (112, 4)\n",
      "--------------------------------------------------\n",
      "测试集特征值shape (38, 4)\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "练习2：20类新闻组数据集 大数据集",
   "id": "df604954f55a931b"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:53:57.968185Z",
     "start_time": "2025-01-11T09:53:57.743704Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# subset代表下载的数据集类型，默认是train，只有训练集，\n",
    "# 如果下载全部数据，需要设置subset='all'\n",
    "# data_home代表数据集的保存路径，默认是~/scikit_learn_data\n",
    "news = fetch_20newsgroups(subset='all', data_home='./data')  # 加载20类新闻组数据集\n",
    "\n",
    "# # 这个数据集是没有的，因为没有特征，只有文本数据\n",
    "# print(news.feature_names)\n",
    "# print(\"-\" * 50)\n",
    "print(news.target_names)  # 目标值名字\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(news.target.shape)  # (18846,)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(len(news.target_names))  # 20\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(type(news.data))  # <class 'list'>\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(news.target[0:10])\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(min(news.target), max(news.target))  # 0 19\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(news.data[0])  # 第一个样本的文本内容"
   ],
   "id": "9857571e0115ae3d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc']\n",
      "--------------------------------------------------\n",
      "(18846,)\n",
      "--------------------------------------------------\n",
      "20\n",
      "--------------------------------------------------\n",
      "<class 'list'>\n",
      "--------------------------------------------------\n",
      "[10  3 17  3  4 12  4 10 10 19]\n",
      "--------------------------------------------------\n",
      "0 19\n",
      "--------------------------------------------------\n",
      "From: Mamatha Devineni Ratnam <mr47+@andrew.cmu.edu>\n",
      "Subject: Pens fans reactions\n",
      "Organization: Post Office, Carnegie Mellon, Pittsburgh, PA\n",
      "Lines: 12\n",
      "NNTP-Posting-Host: po4.andrew.cmu.edu\n",
      "\n",
      "\n",
      "\n",
      "I am sure some bashers of Pens fans are pretty confused about the lack\n",
      "of any kind of posts about the recent Pens massacre of the Devils. Actually,\n",
      "I am  bit puzzled too and a bit relieved. However, I am going to put an end\n",
      "to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they\n",
      "are killing those Devils worse than I thought. Jagr just showed you why\n",
      "he is much better than his regular season stats. He is also a lot\n",
      "fo fun to watch in the playoffs. Bowman should let JAgr have a lot of\n",
      "fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final\n",
      "regular season game.          PENS RULE!!!\n",
      "\n",
      "\n"
     ]
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "练习3：加州房价数据集 大数据集",
   "id": "b210f65df6c8cedc"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:53:57.996237Z",
     "start_time": "2025-01-11T09:53:57.969190Z"
    }
   },
   "cell_type": "code",
   "source": [
    "house = fetch_california_housing(data_home='./data')  # 加载加州房价数据集\n",
    "\n",
    "print(house.data.shape)  # (20640, 8)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(house.feature_names)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(house.target_names)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(house.target[0:10])\n",
    "print(\"-\" * 50)\n"
   ],
   "id": "2b46ccf42ce23169",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(20640, 8)\n",
      "--------------------------------------------------\n",
      "['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup', 'Latitude', 'Longitude']\n",
      "--------------------------------------------------\n",
      "['MedHouseVal']\n",
      "--------------------------------------------------\n",
      "[4.526 3.585 3.521 3.413 3.422 2.697 2.992 2.414 2.267 2.611]\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 6
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 分类估计器",
   "id": "946d098aee29e270"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 1.k近邻分类器",
   "id": "8004e625fb8a870a"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "（1） 数据预处理",
   "id": "2d66cf33ffbdcf72"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:09.364426Z",
     "start_time": "2025-01-11T09:53:57.998243Z"
    }
   },
   "cell_type": "code",
   "source": [
    "\"\"\"\n",
    "K-近邻预测用户签到位置\n",
    "\"\"\"\n",
    "\n",
    "data = pd.read_csv(\"./data/FBlocation/train.csv\")\n",
    "\n",
    "print(type(data))  # <class 'pandas.core.frame.DataFrame'>\n",
    "print(\"-\" * 50)\n",
    "print(data.head(5))\n",
    "print(\"-\" * 50)\n",
    "print(data.shape)  # (29118021, 6)  29118021条数据，6个特征\n",
    "print(\"-\" * 50)\n",
    "print(data.info())  # 属性数据类型\n",
    "print(\"-\" * 50)\n"
   ],
   "id": "9fd7a5a782f7df03",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "--------------------------------------------------\n",
      "   row_id       x       y  accuracy    time    place_id\n",
      "0       0  0.7941  9.0809        54  470702  8523065625\n",
      "1       1  5.9567  4.7968        13  186555  1757726713\n",
      "2       2  8.3078  7.0407        74  322648  1137537235\n",
      "3       3  7.3665  2.5165        65  704587  6567393236\n",
      "4       4  4.0961  1.1307        31  472130  7440663949\n",
      "--------------------------------------------------\n",
      "(29118021, 6)\n",
      "--------------------------------------------------\n",
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 29118021 entries, 0 to 29118020\n",
      "Data columns (total 6 columns):\n",
      " #   Column    Dtype  \n",
      "---  ------    -----  \n",
      " 0   row_id    int64  \n",
      " 1   x         float64\n",
      " 2   y         float64\n",
      " 3   accuracy  int64  \n",
      " 4   time      int64  \n",
      " 5   place_id  int64  \n",
      "dtypes: float64(2), int64(4)\n",
      "memory usage: 1.3 GB\n",
      "None\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:09.959121Z",
     "start_time": "2025-01-11T09:54:09.366431Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 处理数据\n",
    "# 1.缩小数据,查询数据,为了减少计算时间\n",
    "data = data.query(\"x >1.0 & x<1.25 & y >2.5 & y<2.75\")\n",
    "\n",
    "print(data.shape)  # (17710, 6)  17710条数据，6个特征\n",
    "print(\"-\" * 50)"
   ],
   "id": "b1cef6a3787a0983",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(17710, 6)\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:09.976128Z",
     "start_time": "2025-01-11T09:54:09.961137Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 2.处理时间的数据，unit是秒，把秒转换成日期格式\n",
    "time_data = pd.to_datetime(data['time'], unit='s')\n",
    "\n",
    "print(type(time_data))  # <class 'pandas.core.serie.Series'>\n",
    "print(\"-\" * 50)\n",
    "# print(time_data.head(5))\n",
    "\n",
    "# DatatimeIndex 格式化时间数据\n",
    "# 把日期格式转换成 字典格式，把年，月，日，时，分，秒转换为字典格式，\n",
    "time_data = pd.DatetimeIndex(time_data)  #<class 'pandas.core.indexes.datetimes.DatetimeIndex'>\n",
    "print(type(time_data))\n",
    "print(\"-\" * 50)\n",
    "print(time_data[0:5])\n"
   ],
   "id": "c14f8dbba7b5834c",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.series.Series'>\n",
      "--------------------------------------------------\n",
      "<class 'pandas.core.indexes.datetimes.DatetimeIndex'>\n",
      "--------------------------------------------------\n",
      "DatetimeIndex(['1970-01-01 18:09:40', '1970-01-10 02:11:10',\n",
      "               '1970-01-05 15:08:02', '1970-01-06 23:03:03',\n",
      "               '1970-01-09 11:26:50'],\n",
      "              dtype='datetime64[ns]', name='time', freq=None)\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:09.999853Z",
     "start_time": "2025-01-11T09:54:09.979134Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#日期，是否是周末，小时对于个人行为的影响是较大的(例如吃饭时间去饭店，看电影时间去电影院等),所以才做下面的处理\n",
    "# data.shape[1]是代表插入到最后的意思\n",
    "data.insert(data.shape[1], \"day\", time_data.day)\n",
    "data.insert(data.shape[1], 'hour', time_data.hour)\n",
    "data.insert(data.shape[1], 'weekday', time_data.weekday)\n",
    "\n",
    "# 这3句代码等价于上面3句代码，但不建议使用，不规范\n",
    "# data['day'] = time_value.day\n",
    "# data['hour'] = time_value.hour\n",
    "# data['weekday'] = time_value.weekday\n",
    "\n",
    "# 把样本的时间戳属性删除\n",
    "data = data.drop([\"time\"], axis=1)\n",
    "print(data.head(5))\n",
    "print(\"-\" * 50)"
   ],
   "id": "4ae1d8bcee09bcff",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      row_id       x       y  accuracy    place_id  day  hour  weekday\n",
      "600      600  1.2214  2.7023        17  6683426742    1    18        3\n",
      "957      957  1.1832  2.6891        58  6683426742   10     2        5\n",
      "4345    4345  1.1935  2.6550        11  6889790653    5    15        0\n",
      "4735    4735  1.1452  2.6074        49  6822359752    6    23        1\n",
      "5580    5580  1.0089  2.7287        19  1527921905    9    11        4\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:10.044559Z",
     "start_time": "2025-01-11T09:54:10.002859Z"
    }
   },
   "cell_type": "code",
   "source": "data.describe()  # 数据集的统计信息",
   "id": "128ffda2ebcb64ad",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "             row_id             x             y      accuracy      place_id  \\\n",
       "count  1.771000e+04  17710.000000  17710.000000  17710.000000  1.771000e+04   \n",
       "mean   1.450569e+07      1.122538      2.632309     82.482101  5.129895e+09   \n",
       "std    8.353805e+06      0.077086      0.070144    113.613227  2.357399e+09   \n",
       "min    6.000000e+02      1.000100      2.500100      1.000000  1.012024e+09   \n",
       "25%    7.327816e+06      1.049200      2.573800     25.000000  3.312464e+09   \n",
       "50%    1.443071e+07      1.123300      2.642300     62.000000  5.261906e+09   \n",
       "75%    2.163463e+07      1.190500      2.687800     75.000000  6.766325e+09   \n",
       "max    2.911215e+07      1.249900      2.749900   1004.000000  9.980711e+09   \n",
       "\n",
       "                day          hour       weekday  \n",
       "count  17710.000000  17710.000000  17710.000000  \n",
       "mean       5.101863     11.485545      3.092377  \n",
       "std        2.709287      6.932195      1.680218  \n",
       "min        1.000000      0.000000      0.000000  \n",
       "25%        3.000000      6.000000      2.000000  \n",
       "50%        5.000000     12.000000      3.000000  \n",
       "75%        7.000000     17.000000      4.000000  \n",
       "max       10.000000     23.000000      6.000000  "
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>row_id</th>\n",
       "      <th>x</th>\n",
       "      <th>y</th>\n",
       "      <th>accuracy</th>\n",
       "      <th>place_id</th>\n",
       "      <th>day</th>\n",
       "      <th>hour</th>\n",
       "      <th>weekday</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>1.771000e+04</td>\n",
       "      <td>17710.000000</td>\n",
       "      <td>17710.000000</td>\n",
       "      <td>17710.000000</td>\n",
       "      <td>1.771000e+04</td>\n",
       "      <td>17710.000000</td>\n",
       "      <td>17710.000000</td>\n",
       "      <td>17710.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>mean</th>\n",
       "      <td>1.450569e+07</td>\n",
       "      <td>1.122538</td>\n",
       "      <td>2.632309</td>\n",
       "      <td>82.482101</td>\n",
       "      <td>5.129895e+09</td>\n",
       "      <td>5.101863</td>\n",
       "      <td>11.485545</td>\n",
       "      <td>3.092377</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>std</th>\n",
       "      <td>8.353805e+06</td>\n",
       "      <td>0.077086</td>\n",
       "      <td>0.070144</td>\n",
       "      <td>113.613227</td>\n",
       "      <td>2.357399e+09</td>\n",
       "      <td>2.709287</td>\n",
       "      <td>6.932195</td>\n",
       "      <td>1.680218</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>min</th>\n",
       "      <td>6.000000e+02</td>\n",
       "      <td>1.000100</td>\n",
       "      <td>2.500100</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.012024e+09</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25%</th>\n",
       "      <td>7.327816e+06</td>\n",
       "      <td>1.049200</td>\n",
       "      <td>2.573800</td>\n",
       "      <td>25.000000</td>\n",
       "      <td>3.312464e+09</td>\n",
       "      <td>3.000000</td>\n",
       "      <td>6.000000</td>\n",
       "      <td>2.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50%</th>\n",
       "      <td>1.443071e+07</td>\n",
       "      <td>1.123300</td>\n",
       "      <td>2.642300</td>\n",
       "      <td>62.000000</td>\n",
       "      <td>5.261906e+09</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>12.000000</td>\n",
       "      <td>3.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75%</th>\n",
       "      <td>2.163463e+07</td>\n",
       "      <td>1.190500</td>\n",
       "      <td>2.687800</td>\n",
       "      <td>75.000000</td>\n",
       "      <td>6.766325e+09</td>\n",
       "      <td>7.000000</td>\n",
       "      <td>17.000000</td>\n",
       "      <td>4.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>max</th>\n",
       "      <td>2.911215e+07</td>\n",
       "      <td>1.249900</td>\n",
       "      <td>2.749900</td>\n",
       "      <td>1004.000000</td>\n",
       "      <td>9.980711e+09</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>23.000000</td>\n",
       "      <td>6.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:10.062133Z",
     "start_time": "2025-01-11T09:54:10.046563Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 把签到数量少于n个目标位置删除，place_id是标签，即目标值\n",
    "# groupby是按照place_id分组，count是统计每组的数量,\n",
    "# 即有同一个place_id的样本个数\n",
    "place_count = data.groupby(\"place_id\").count()\n",
    "print(type(place_count))  # <class 'pandas.core.frame.DataFrame'>\n",
    "print(\"-\" * 50)\n",
    "print(place_count.shape)  # (805, 7)\n",
    "place_count"
   ],
   "id": "f4a2c58f6c86291b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "--------------------------------------------------\n",
      "(805, 7)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "            row_id     x     y  accuracy   day  hour  weekday\n",
       "place_id                                                     \n",
       "1012023972       1     1     1         1     1     1        1\n",
       "1057182134       1     1     1         1     1     1        1\n",
       "1059958036       3     3     3         3     3     3        3\n",
       "1085266789       1     1     1         1     1     1        1\n",
       "1097200869    1044  1044  1044      1044  1044  1044     1044\n",
       "...            ...   ...   ...       ...   ...   ...      ...\n",
       "9904182060       1     1     1         1     1     1        1\n",
       "9915093501       1     1     1         1     1     1        1\n",
       "9946198589       1     1     1         1     1     1        1\n",
       "9950190890       1     1     1         1     1     1        1\n",
       "9980711012       5     5     5         5     5     5        5\n",
       "\n",
       "[805 rows x 7 columns]"
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>row_id</th>\n",
       "      <th>x</th>\n",
       "      <th>y</th>\n",
       "      <th>accuracy</th>\n",
       "      <th>day</th>\n",
       "      <th>hour</th>\n",
       "      <th>weekday</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>place_id</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>1012023972</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1057182134</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1059958036</th>\n",
       "      <td>3</td>\n",
       "      <td>3</td>\n",
       "      <td>3</td>\n",
       "      <td>3</td>\n",
       "      <td>3</td>\n",
       "      <td>3</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1085266789</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1097200869</th>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9904182060</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9915093501</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9946198589</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9950190890</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9980711012</th>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>805 rows × 7 columns</p>\n",
       "</div>"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:10.090330Z",
     "start_time": "2025-01-11T09:54:10.064139Z"
    }
   },
   "cell_type": "code",
   "source": "place_count.describe()",
   "id": "5114a20445a8e2d6",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "            row_id            x            y     accuracy          day  \\\n",
       "count   805.000000   805.000000   805.000000   805.000000   805.000000   \n",
       "mean     22.000000    22.000000    22.000000    22.000000    22.000000   \n",
       "std      88.955632    88.955632    88.955632    88.955632    88.955632   \n",
       "min       1.000000     1.000000     1.000000     1.000000     1.000000   \n",
       "25%       1.000000     1.000000     1.000000     1.000000     1.000000   \n",
       "50%       2.000000     2.000000     2.000000     2.000000     2.000000   \n",
       "75%       5.000000     5.000000     5.000000     5.000000     5.000000   \n",
       "max    1044.000000  1044.000000  1044.000000  1044.000000  1044.000000   \n",
       "\n",
       "              hour      weekday  \n",
       "count   805.000000   805.000000  \n",
       "mean     22.000000    22.000000  \n",
       "std      88.955632    88.955632  \n",
       "min       1.000000     1.000000  \n",
       "25%       1.000000     1.000000  \n",
       "50%       2.000000     2.000000  \n",
       "75%       5.000000     5.000000  \n",
       "max    1044.000000  1044.000000  "
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>row_id</th>\n",
       "      <th>x</th>\n",
       "      <th>y</th>\n",
       "      <th>accuracy</th>\n",
       "      <th>day</th>\n",
       "      <th>hour</th>\n",
       "      <th>weekday</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>805.000000</td>\n",
       "      <td>805.000000</td>\n",
       "      <td>805.000000</td>\n",
       "      <td>805.000000</td>\n",
       "      <td>805.000000</td>\n",
       "      <td>805.000000</td>\n",
       "      <td>805.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>mean</th>\n",
       "      <td>22.000000</td>\n",
       "      <td>22.000000</td>\n",
       "      <td>22.000000</td>\n",
       "      <td>22.000000</td>\n",
       "      <td>22.000000</td>\n",
       "      <td>22.000000</td>\n",
       "      <td>22.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>std</th>\n",
       "      <td>88.955632</td>\n",
       "      <td>88.955632</td>\n",
       "      <td>88.955632</td>\n",
       "      <td>88.955632</td>\n",
       "      <td>88.955632</td>\n",
       "      <td>88.955632</td>\n",
       "      <td>88.955632</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>min</th>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25%</th>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50%</th>\n",
       "      <td>2.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>2.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75%</th>\n",
       "      <td>5.000000</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>5.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>max</th>\n",
       "      <td>1044.000000</td>\n",
       "      <td>1044.000000</td>\n",
       "      <td>1044.000000</td>\n",
       "      <td>1044.000000</td>\n",
       "      <td>1044.000000</td>\n",
       "      <td>1044.000000</td>\n",
       "      <td>1044.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 13
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:10.107669Z",
     "start_time": "2025-01-11T09:54:10.094337Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#只选择去的人大于3的数据，认为1,2,3的是噪音，这个地方去的人很少，不用推荐给其他人\n",
    "# reset_index()是把原来的index变为0,1,2,3,4,5,6这种效果，从零开始排\n",
    "tf = place_count[place_count.row_id > 3].reset_index()\n",
    "tf"
   ],
   "id": "c336582105073719",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "       place_id  row_id     x     y  accuracy   day  hour  weekday\n",
       "0    1097200869    1044  1044  1044      1044  1044  1044     1044\n",
       "1    1228935308     120   120   120       120   120   120      120\n",
       "2    1267801529      58    58    58        58    58    58       58\n",
       "3    1278040507      15    15    15        15    15    15       15\n",
       "4    1285051622      21    21    21        21    21    21       21\n",
       "..          ...     ...   ...   ...       ...   ...   ...      ...\n",
       "234  9741307878       5     5     5         5     5     5        5\n",
       "235  9753855529      21    21    21        21    21    21       21\n",
       "236  9806043737       6     6     6         6     6     6        6\n",
       "237  9809476069      23    23    23        23    23    23       23\n",
       "238  9980711012       5     5     5         5     5     5        5\n",
       "\n",
       "[239 rows x 8 columns]"
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>place_id</th>\n",
       "      <th>row_id</th>\n",
       "      <th>x</th>\n",
       "      <th>y</th>\n",
       "      <th>accuracy</th>\n",
       "      <th>day</th>\n",
       "      <th>hour</th>\n",
       "      <th>weekday</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1097200869</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "      <td>1044</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1228935308</td>\n",
       "      <td>120</td>\n",
       "      <td>120</td>\n",
       "      <td>120</td>\n",
       "      <td>120</td>\n",
       "      <td>120</td>\n",
       "      <td>120</td>\n",
       "      <td>120</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1267801529</td>\n",
       "      <td>58</td>\n",
       "      <td>58</td>\n",
       "      <td>58</td>\n",
       "      <td>58</td>\n",
       "      <td>58</td>\n",
       "      <td>58</td>\n",
       "      <td>58</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1278040507</td>\n",
       "      <td>15</td>\n",
       "      <td>15</td>\n",
       "      <td>15</td>\n",
       "      <td>15</td>\n",
       "      <td>15</td>\n",
       "      <td>15</td>\n",
       "      <td>15</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1285051622</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>234</th>\n",
       "      <td>9741307878</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>235</th>\n",
       "      <td>9753855529</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>236</th>\n",
       "      <td>9806043737</td>\n",
       "      <td>6</td>\n",
       "      <td>6</td>\n",
       "      <td>6</td>\n",
       "      <td>6</td>\n",
       "      <td>6</td>\n",
       "      <td>6</td>\n",
       "      <td>6</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>237</th>\n",
       "      <td>9809476069</td>\n",
       "      <td>23</td>\n",
       "      <td>23</td>\n",
       "      <td>23</td>\n",
       "      <td>23</td>\n",
       "      <td>23</td>\n",
       "      <td>23</td>\n",
       "      <td>23</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>238</th>\n",
       "      <td>9980711012</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>239 rows × 8 columns</p>\n",
       "</div>"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 14
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:10.116600Z",
     "start_time": "2025-01-11T09:54:10.109679Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# isin()是判断某一列的值是否在另一列的值中，即判断place_id是否在tf的place_id\n",
    "data = data[data[\"place_id\"].isin(tf[\"place_id\"])]\n",
    "print(data.shape)  # (16918, 8)"
   ],
   "id": "c90a21af81c638a7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(16918, 8)\n"
     ]
    }
   ],
   "execution_count": 15
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:10.128436Z",
     "start_time": "2025-01-11T09:54:10.118608Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 目标值\n",
    "y = data[\"place_id\"]\n",
    "\n",
    "# 删除目标值，保留特征值\n",
    "x = data.drop([\"place_id\"], axis=1)\n",
    "# 删除无用的特征值，这里就是row_id，因为每条数据都有row_id\n",
    "x = x.drop([\"row_id\"], axis=1)\n",
    "\n",
    "print(x.shape)  # (16918, 6)\n",
    "print(x.columns)"
   ],
   "id": "5c62a1b03d27e256",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(16918, 6)\n",
      "Index(['x', 'y', 'accuracy', 'day', 'hour', 'weekday'], dtype='object')\n"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "(2) 划分数据集，数据特征处理",
   "id": "2b3e8b66ef45bd9f"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:10.148996Z",
     "start_time": "2025-01-11T09:54:10.130442Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# y=f(x),x是特征值，y是目标值，f是分类器\n",
    "# 进行数据的分割训练集合测试集\n",
    "x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=1)\n",
    "\n",
    "# 标准化数据\n",
    "std = StandardScaler()\n",
    "\n",
    "# 对测试集和训练集的特征值进行标准化,服务于knn fit\n",
    "x_train = std.fit_transform(x_train)\n",
    "print(std.mean_)\n",
    "print(std.var_)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "#transfrom不再进行均值和方差的计算，是在原有的基础上去标准化\n",
    "x_test = std.transform(x_test)\n",
    "print(std.mean_)\n",
    "print(std.var_)"
   ],
   "id": "5f4043d5a25efa3b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[ 1.12295735  2.63237278 81.34938525  5.10064628 11.44293821  3.10135561]\n",
      "[5.98489138e-03 4.86857391e-03 1.19597480e+04 7.32837915e+00\n",
      " 4.83742660e+01 2.81838404e+00]\n",
      "--------------------------------------------------\n",
      "[ 1.12295735  2.63237278 81.34938525  5.10064628 11.44293821  3.10135561]\n",
      "[5.98489138e-03 4.86857391e-03 1.19597480e+04 7.32837915e+00\n",
      " 4.83742660e+01 2.81838404e+00]\n"
     ]
    }
   ],
   "execution_count": 17
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "（3）算法流程",
   "id": "b485b6a7c8acde58"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:10.597048Z",
     "start_time": "2025-01-11T09:54:10.151004Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# KNeighborsClassifier 算法流程\n",
    "# 1. 计算样本之间的距离，距离度量有多种，默认是欧式距离，可选曼哈顿距离，余弦距离等\n",
    "# 2. 确定k值，即选择最近的k个样本，默认是5\n",
    "# 3. 确定分类，根据k个样本的标签，决定新样本的标签\n",
    "# 4. 计算准确率，召回率，F1值等\n",
    "# 5. 调参，调整k值，距离度量，算法类型等，提高准确率\n",
    "\n",
    "knn = KNeighborsClassifier(n_neighbors=7)\n",
    "\n",
    "# fit 训练模型\n",
    "# x_train 训练集特征值\n",
    "# y_train 训练集目标值\n",
    "# knn的fit是不训练的，只是把训练集的特征值和目标值放入到内存中\n",
    "knn.fit(x_train, y_train)\n",
    "\n",
    "# predict 得出预测数据\n",
    "y_predict = knn.predict(x_test)\n",
    "\n",
    "# score 计算准确率\n",
    "print(\"预测的准确率\")\n",
    "print(knn.score(x_test, y_test))\n",
    "# n_neighbors=10 准确率为0.47966903073286055\n",
    "# n_neighbors=9 准确率为0.4808510638297872\n",
    "# n_neighbors=8 准确率为0.48156028368794324\n",
    "# n_neighbors=7 准确率为0.48439716312056735\n",
    "# n_neighbors=6 准确率为0.484160756501182\n",
    "# n_neighbors=5 准确率为0.4806146572104019\n",
    "# n_neighbors=4 准确率为0.47375886524822697\n",
    "# n_neighbors=3 准确率为0.46430260047281324\n",
    "\n",
    "print(\"-\" * 50)\n",
    "print(\"预测要到的位置\")\n",
    "print(y_predict[0:10])\n",
    "print(\"实际要到的位置\")\n",
    "print(y_test[0:10])\n"
   ],
   "id": "6a1dc26a9f109a0",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "预测的准确率\n",
      "0.48439716312056735\n",
      "--------------------------------------------------\n",
      "预测要到的位置\n",
      "[1913341282 1097200869 6097504486 9632980559 6424972551 4022692381\n",
      " 8048985799 6683426742 1435128522 3312463746]\n",
      "实际要到的位置\n",
      "16751286    1893548673\n",
      "12423167    1097200869\n",
      "7517023     6097504486\n",
      "4400015     9632980559\n",
      "26212472    6424972551\n",
      "7089828     4022692381\n",
      "10935607    2327054745\n",
      "25025511    3533177779\n",
      "27755137    1435128522\n",
      "19678934    3312463746\n",
      "Name: place_id, dtype: int64\n"
     ]
    }
   ],
   "execution_count": 18
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## (4) 调超参的方法，网格搜索",
   "id": "bb4466e870b5b27"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:17.086138Z",
     "start_time": "2025-01-11T09:54:10.598055Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# weights 权重，uniform是所有样本权重相同，distance是根据样本到新样本的距离来计算权重\n",
    "param = {\"n_neighbors\": [3, 5, 10, 12, 15, 20], \"weights\": [\"uniform\", \"distance\"]}\n",
    "# 验证组数： a * b,共12组\n",
    "\n",
    "# 进行网格搜索，cv=3是3折交叉验证，用其中2折训练，1折验证\n",
    "gc = GridSearchCV(knn, param_grid=param, cv=3)\n",
    "\n",
    "# 你给它的x_train，它又分为训练集，验证集,然后用训练集训练模型，用验证集验证模型的准确率，\n",
    "gc.fit(x_train, y_train)\n",
    "\n",
    "print(\"在测试集上的准确率\")\n",
    "print(gc.score(x_test, y_test))\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"在交叉验证当中最好的结果：\")\n",
    "print(gc.best_score_)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"选择最好的模型是：\")\n",
    "print(gc.best_estimator_)  # 最好的模型,告诉你用了哪些参数\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"每个超参数每次交叉验证的结果：\")\n",
    "gc.cv_results_"
   ],
   "id": "7a9bc24bbfc2ebb",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Program Files\\Python312\\Lib\\site-packages\\sklearn\\model_selection\\_split.py:805: UserWarning: The least populated class in y has only 1 members, which is less than n_splits=3.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "在测试集上的准确率\n",
      "0.49763593380614657\n",
      "--------------------------------------------------\n",
      "在交叉验证当中最好的结果：\n",
      "0.4816362349278435\n",
      "--------------------------------------------------\n",
      "选择最好的模型是：\n",
      "KNeighborsClassifier(n_neighbors=12, weights='distance')\n",
      "--------------------------------------------------\n",
      "每个超参数每次交叉验证的结果：\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'mean_fit_time': array([0.0133365 , 0.01300343, 0.0123349 , 0.01333777, 0.01434104,\n",
       "        0.01233125, 0.0123349 , 0.01300279, 0.01900578, 0.01867143,\n",
       "        0.01266726, 0.01300232]),\n",
       " 'std_fit_time': array([4.70978200e-04, 1.18944085e-06, 4.71184217e-04, 4.70416244e-04,\n",
       "        1.88301614e-03, 4.78623185e-04, 4.72681387e-04, 1.29616312e-06,\n",
       "        2.82923461e-03, 4.03036324e-03, 4.73788463e-04, 1.83992972e-06]),\n",
       " 'mean_score_time': array([0.17937239, 0.0573469 , 0.17637356, 0.07168619, 0.19771107,\n",
       "        0.08702477, 0.19704088, 0.09968861, 0.32140891, 0.21138   ,\n",
       "        0.21738227, 0.12936346]),\n",
       " 'std_score_time': array([0.01065943, 0.00205371, 0.00250295, 0.0047846 , 0.0053028 ,\n",
       "        0.00081517, 0.00216425, 0.00330105, 0.02191759, 0.07761421,\n",
       "        0.00047081, 0.00169975]),\n",
       " 'param_n_neighbors': masked_array(data=[3, 3, 5, 5, 10, 10, 12, 12, 15, 15, 20, 20],\n",
       "              mask=[False, False, False, False, False, False, False, False,\n",
       "                    False, False, False, False],\n",
       "        fill_value=999999),\n",
       " 'param_weights': masked_array(data=['uniform', 'distance', 'uniform', 'distance',\n",
       "                    'uniform', 'distance', 'uniform', 'distance',\n",
       "                    'uniform', 'distance', 'uniform', 'distance'],\n",
       "              mask=[False, False, False, False, False, False, False, False,\n",
       "                    False, False, False, False],\n",
       "        fill_value=np.str_('?'),\n",
       "             dtype=object),\n",
       " 'params': [{'n_neighbors': 3, 'weights': 'uniform'},\n",
       "  {'n_neighbors': 3, 'weights': 'distance'},\n",
       "  {'n_neighbors': 5, 'weights': 'uniform'},\n",
       "  {'n_neighbors': 5, 'weights': 'distance'},\n",
       "  {'n_neighbors': 10, 'weights': 'uniform'},\n",
       "  {'n_neighbors': 10, 'weights': 'distance'},\n",
       "  {'n_neighbors': 12, 'weights': 'uniform'},\n",
       "  {'n_neighbors': 12, 'weights': 'distance'},\n",
       "  {'n_neighbors': 15, 'weights': 'uniform'},\n",
       "  {'n_neighbors': 15, 'weights': 'distance'},\n",
       "  {'n_neighbors': 20, 'weights': 'uniform'},\n",
       "  {'n_neighbors': 20, 'weights': 'distance'}],\n",
       " 'split0_test_score': array([0.44468085, 0.4534279 , 0.4607565 , 0.47399527, 0.46170213,\n",
       "        0.48014184, 0.45650118, 0.48108747, 0.45508274, 0.47895981,\n",
       "        0.44397163, 0.4749409 ]),\n",
       " 'split1_test_score': array([0.43390873, 0.4542445 , 0.45660913, 0.47528967, 0.45542681,\n",
       "        0.48238354, 0.45329865, 0.48049184, 0.44809648, 0.47623552,\n",
       "        0.43934736, 0.46890518]),\n",
       " 'split2_test_score': array([0.43982029, 0.4561362 , 0.45684559, 0.47221565, 0.4618113 ,\n",
       "        0.48191062, 0.45897375, 0.48332939, 0.46062899, 0.48049184,\n",
       "        0.45282573, 0.48238354]),\n",
       " 'mean_test_score': array([0.43946996, 0.45460287, 0.45807041, 0.47383353, 0.45964675,\n",
       "        0.48147867, 0.45625786, 0.48163623, 0.45460274, 0.47856239,\n",
       "        0.44538157, 0.47540987]),\n",
       " 'std_test_score': array([0.00440467, 0.00113433, 0.00190181, 0.00126016, 0.00298428,\n",
       "        0.00096479, 0.00232323, 0.00122169, 0.00512762, 0.00176021,\n",
       "        0.00559211, 0.0055125 ]),\n",
       " 'rank_test_score': array([12,  9,  7,  5,  6,  2,  8,  1, 10,  3, 11,  4], dtype=int32)}"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 19
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 2.朴素贝叶斯分类器",
   "id": "5e820b47a49f9cb9"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:17.300619Z",
     "start_time": "2025-01-11T09:54:17.087142Z"
    }
   },
   "cell_type": "code",
   "source": [
    "news = fetch_20newsgroups(subset='all', data_home='./data')  # 加载20类新闻组数据集\n",
    "\n",
    "print(type(news))  # <class 'sklearn.utils.Bunch'>\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(type(news.data))  # <class 'list'>\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(type(news.target))  # <class 'numpy.ndarray'>\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(news.target.shape)  # (18846,)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(len(news.data))  # 18846个样本\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# 标签的名字\n",
    "print(news.target_names)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# 标签的类别\n",
    "print(np.unique(news.target))\n",
    "print(\"-\" * 50)\n",
    "\n",
    "news.target[0:10]"
   ],
   "id": "31027a5c7cff0e17",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'sklearn.utils._bunch.Bunch'>\n",
      "--------------------------------------------------\n",
      "<class 'list'>\n",
      "--------------------------------------------------\n",
      "<class 'numpy.ndarray'>\n",
      "--------------------------------------------------\n",
      "(18846,)\n",
      "--------------------------------------------------\n",
      "18846\n",
      "--------------------------------------------------\n",
      "['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc']\n",
      "--------------------------------------------------\n",
      "[ 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19]\n",
      "--------------------------------------------------\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array([10,  3, 17,  3,  4, 12,  4, 10, 10, 19], dtype=int32)"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 20
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:23.645622Z",
     "start_time": "2025-01-11T09:54:17.301627Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 数据分割\n",
    "x_train, x_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.25, random_state=1)\n",
    "\n",
    "# 对数据集进行特征抽取\n",
    "tf = TfidfVectorizer()\n",
    "\n",
    "# 以训练集当中的词的列表进行每篇文章重要性统计['a','b','c','d']\n",
    "# 统计每个词出现的次数，然后计算每个词的tf-idf值，tf-idf值越大，代表这个词越重要\n",
    "x_train = tf.fit_transform(x_train)\n",
    "\n",
    "#针对特征内容，可以自行打印，下面的打印可以得到特征数目，总计有15万特征\n",
    "print(len(tf.get_feature_names_out()))  # 153196\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(tf.get_feature_names_out()[0:10])\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(x_train.toarray().shape)  # (14134, 153196)"
   ],
   "id": "2c59714853ef1326",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "153196\n",
      "--------------------------------------------------\n",
      "['00' '000' '0000' '00000' '0000000004' '0000000005' '0000000667'\n",
      " '0000001200' '000003' '000005102000']\n",
      "--------------------------------------------------\n",
      "(14134, 153196)\n"
     ]
    }
   ],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:23.861513Z",
     "start_time": "2025-01-11T09:54:23.646626Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 进行朴素贝叶斯算法的预测,alpha是拉普拉斯平滑系数，分子和分母加上一个系数，分母加alpha*特征词数目\n",
    "mlt = MultinomialNB(alpha=1.0)\n",
    "\n",
    "start = time.time()\n",
    "mlt.fit(x_train, y_train)  # 训练模型\n",
    "end = time.time()\n",
    "print(\"训练模型的时间：\", end - start)\n",
    "\n"
   ],
   "id": "3c8571b627f2bbac",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练模型的时间： 0.20903277397155762\n"
     ]
    }
   ],
   "execution_count": 22
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.753031Z",
     "start_time": "2025-01-11T09:54:23.863523Z"
    }
   },
   "cell_type": "code",
   "source": [
    "x_transform_test = tf.transform(x_test)  #特征数目不发生改变\n",
    "print(x_transform_test.shape)  # (4712, 153196)"
   ],
   "id": "12999a5601bccd38",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(4712, 153196)\n"
     ]
    }
   ],
   "execution_count": 23
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.815209Z",
     "start_time": "2025-01-11T09:54:24.754034Z"
    }
   },
   "cell_type": "code",
   "source": [
    "y_predict = mlt.predict(x_transform_test)  # 预测\n",
    "\n",
    "print(\"预测的前面10篇文章类别为：\")\n",
    "print(y_predict[0:10])\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"实际的前面10篇文章类别为：\")\n",
    "print(y_train[0:10])\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"准确率：\")\n",
    "print(mlt.score(x_transform_test, y_test))  # 准确率\n"
   ],
   "id": "54f724d7b11e17c6",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "预测的前面10篇文章类别为：\n",
      "[16 19 18  1  9 15  1  2 16 13]\n",
      "--------------------------------------------------\n",
      "实际的前面10篇文章类别为：\n",
      "[16  3 12  7 16 16  1 17  1  9]\n",
      "--------------------------------------------------\n",
      "准确率：\n",
      "0.8518675721561969\n"
     ]
    }
   ],
   "execution_count": 24
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 分类模型的评估",
   "id": "eff34c9ad251144f"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.828191Z",
     "start_time": "2025-01-11T09:54:24.817216Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 精确率(precision)：预测为正的样本中，真实为正的样本的比例 = TP / (TP + FP)\n",
    "# 召回率(recall)：真实为正的样本中，预测为正的样本的比例 = TP / (TP + FN)\n",
    "# F1值(f1-score)： 精确率和召回率的调和平均值，\n",
    "#       f1-score = 2 * (precision * recall) / (precision + recall)\n",
    "# 准确率(accuracy)：预测正确的样本的比例 = (TP + TN) / (TP + TN + FP + FN)\n",
    "\n",
    "# 目前这个场景我们不需要召回率，support是真实的为那个类别的有多少个样本\n",
    "# classification_report 打印出了precision, recall, f1-score, support\n",
    "print(classification_report(y_test, y_predict,\n",
    "                            target_names=news.target_names))\n",
    "\n"
   ],
   "id": "ea07856160fe2597",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                          precision    recall  f1-score   support\n",
      "\n",
      "             alt.atheism       0.91      0.77      0.83       199\n",
      "           comp.graphics       0.83      0.79      0.81       242\n",
      " comp.os.ms-windows.misc       0.89      0.83      0.86       263\n",
      "comp.sys.ibm.pc.hardware       0.80      0.83      0.81       262\n",
      "   comp.sys.mac.hardware       0.90      0.88      0.89       234\n",
      "          comp.windows.x       0.92      0.85      0.88       230\n",
      "            misc.forsale       0.96      0.67      0.79       257\n",
      "               rec.autos       0.90      0.87      0.88       265\n",
      "         rec.motorcycles       0.90      0.95      0.92       251\n",
      "      rec.sport.baseball       0.89      0.96      0.93       226\n",
      "        rec.sport.hockey       0.95      0.98      0.96       262\n",
      "               sci.crypt       0.76      0.97      0.85       257\n",
      "         sci.electronics       0.84      0.80      0.82       229\n",
      "                 sci.med       0.97      0.86      0.91       249\n",
      "               sci.space       0.92      0.96      0.94       256\n",
      "  soc.religion.christian       0.55      0.98      0.70       243\n",
      "      talk.politics.guns       0.76      0.96      0.85       234\n",
      "   talk.politics.mideast       0.93      0.99      0.96       224\n",
      "      talk.politics.misc       0.98      0.56      0.72       197\n",
      "      talk.religion.misc       0.97      0.26      0.41       132\n",
      "\n",
      "                accuracy                           0.85      4712\n",
      "               macro avg       0.88      0.84      0.84      4712\n",
      "            weighted avg       0.87      0.85      0.85      4712\n",
      "\n"
     ]
    }
   ],
   "execution_count": 25
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.837801Z",
     "start_time": "2025-01-11T09:54:24.829196Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# where 函数是用来替换数组中的元素的，\n",
    "# 第一个参数是条件，第二个参数是替换的值，\n",
    "# 这里的作用是把label为0的样本的标签改为1，label为1的样本的标签改为0\n",
    "y_test1 = np.where(y_test == 0, 1, 0)\n",
    "print(y_test1.sum())  #测试集中label为0的样本数 199\n",
    "print(\"-\" * 50)\n",
    "\n",
    "y_predict1 = np.where(y_predict == 0, 1, 0)\n",
    "print(y_predict1.sum())  #预测集中label为0的样本数 168\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# 168个预测为正例中，有153预测正确了\n",
    "# 两列相乘，对应位置都为1的位置才为1，即预测正确的个数\n",
    "print((y_test1 * y_predict1).sum())  # 153\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# 准确率\n",
    "print(153 / 168)\n",
    "\n",
    "# 召回率\n",
    "print(153 / 199)\n",
    "\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# label为0的AUC值\n",
    "print(\"AUC指标：\")\n",
    "print(roc_auc_score(y_test1, y_predict1))"
   ],
   "id": "d14629132fbf9873",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "199\n",
      "--------------------------------------------------\n",
      "168\n",
      "--------------------------------------------------\n",
      "153\n",
      "--------------------------------------------------\n",
      "0.9107142857142857\n",
      "0.7688442211055276\n",
      "--------------------------------------------------\n",
      "AUC指标：\n",
      "0.8827602448315142\n"
     ]
    }
   ],
   "execution_count": 26
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.845713Z",
     "start_time": "2025-01-11T09:54:24.838806Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#算多分类的精确率，召回率，F1-score\n",
    "FP = np.where((np.array(y_test1) - np.array(y_predict1)) == -1, 1, 0).sum()\n",
    "\n",
    "TP = y_predict1.sum() - FP  #TP是196\n",
    "print(TP)\n",
    "\n",
    "FN = np.where((np.array(y_test1) - np.array(y_predict1)) == 1, 1, 0).sum()  #FN是34\n",
    "\n",
    "print(FN)  #FN是1\n",
    "TN = np.where(y_test1 == 0, 1, 0).sum() - FP  #4464\n",
    "print(TN)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# 精确率\n",
    "print(TP / (TP + FP))\n",
    "# 召回率\n",
    "print(TP / (TP + FN))\n",
    "# F1-score\n",
    "print(2 * TP / (2 * TP + FP + FN))\n"
   ],
   "id": "a317742fd61d9d92",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "153\n",
      "46\n",
      "4498\n",
      "--------------------------------------------------\n",
      "0.9107142857142857\n",
      "0.7688442211055276\n",
      "0.8337874659400545\n"
     ]
    }
   ],
   "execution_count": 27
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 3.决策树",
   "id": "356578eaec1eb754"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.875260Z",
     "start_time": "2025-01-11T09:54:24.846720Z"
    }
   },
   "cell_type": "code",
   "source": [
    "titanic = pd.read_csv(\"./data/titanic.txt\")\n",
    "\n",
    "print(type(titanic))  # <class 'pandas.core.frame.DataFrame'>\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(titanic.shape)  # (1313, 11)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "titanic.info()"
   ],
   "id": "ce76d41c4e795c9a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "--------------------------------------------------\n",
      "(1313, 11)\n",
      "--------------------------------------------------\n",
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 1313 entries, 0 to 1312\n",
      "Data columns (total 11 columns):\n",
      " #   Column     Non-Null Count  Dtype  \n",
      "---  ------     --------------  -----  \n",
      " 0   row.names  1313 non-null   int64  \n",
      " 1   pclass     1313 non-null   object \n",
      " 2   survived   1313 non-null   int64  \n",
      " 3   name       1313 non-null   object \n",
      " 4   age        633 non-null    float64\n",
      " 5   embarked   821 non-null    object \n",
      " 6   home.dest  754 non-null    object \n",
      " 7   room       77 non-null     object \n",
      " 8   ticket     69 non-null     object \n",
      " 9   boat       347 non-null    object \n",
      " 10  sex        1313 non-null   object \n",
      "dtypes: float64(1), int64(2), object(8)\n",
      "memory usage: 113.0+ KB\n"
     ]
    }
   ],
   "execution_count": 28
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.886267Z",
     "start_time": "2025-01-11T09:54:24.876265Z"
    }
   },
   "cell_type": "code",
   "source": "titanic",
   "id": "4902d2db3ec9ca2",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "      row.names pclass  survived  \\\n",
       "0             1    1st         1   \n",
       "1             2    1st         0   \n",
       "2             3    1st         0   \n",
       "3             4    1st         0   \n",
       "4             5    1st         1   \n",
       "...         ...    ...       ...   \n",
       "1308       1309    3rd         0   \n",
       "1309       1310    3rd         0   \n",
       "1310       1311    3rd         0   \n",
       "1311       1312    3rd         0   \n",
       "1312       1313    3rd         0   \n",
       "\n",
       "                                                 name      age     embarked  \\\n",
       "0                        Allen, Miss Elisabeth Walton  29.0000  Southampton   \n",
       "1                         Allison, Miss Helen Loraine   2.0000  Southampton   \n",
       "2                 Allison, Mr Hudson Joshua Creighton  30.0000  Southampton   \n",
       "3     Allison, Mrs Hudson J.C. (Bessie Waldo Daniels)  25.0000  Southampton   \n",
       "4                       Allison, Master Hudson Trevor   0.9167  Southampton   \n",
       "...                                               ...      ...          ...   \n",
       "1308                               Zakarian, Mr Artun      NaN          NaN   \n",
       "1309                           Zakarian, Mr Maprieder      NaN          NaN   \n",
       "1310                                  Zenn, Mr Philip      NaN          NaN   \n",
       "1311                                    Zievens, Rene      NaN          NaN   \n",
       "1312                                   Zimmerman, Leo      NaN          NaN   \n",
       "\n",
       "                            home.dest room      ticket   boat     sex  \n",
       "0                        St Louis, MO  B-5  24160 L221      2  female  \n",
       "1     Montreal, PQ / Chesterville, ON  C26         NaN    NaN  female  \n",
       "2     Montreal, PQ / Chesterville, ON  C26         NaN  (135)    male  \n",
       "3     Montreal, PQ / Chesterville, ON  C26         NaN    NaN  female  \n",
       "4     Montreal, PQ / Chesterville, ON  C22         NaN     11    male  \n",
       "...                               ...  ...         ...    ...     ...  \n",
       "1308                              NaN  NaN         NaN    NaN    male  \n",
       "1309                              NaN  NaN         NaN    NaN    male  \n",
       "1310                              NaN  NaN         NaN    NaN    male  \n",
       "1311                              NaN  NaN         NaN    NaN  female  \n",
       "1312                              NaN  NaN         NaN    NaN    male  \n",
       "\n",
       "[1313 rows x 11 columns]"
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>row.names</th>\n",
       "      <th>pclass</th>\n",
       "      <th>survived</th>\n",
       "      <th>name</th>\n",
       "      <th>age</th>\n",
       "      <th>embarked</th>\n",
       "      <th>home.dest</th>\n",
       "      <th>room</th>\n",
       "      <th>ticket</th>\n",
       "      <th>boat</th>\n",
       "      <th>sex</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>1st</td>\n",
       "      <td>1</td>\n",
       "      <td>Allen, Miss Elisabeth Walton</td>\n",
       "      <td>29.0000</td>\n",
       "      <td>Southampton</td>\n",
       "      <td>St Louis, MO</td>\n",
       "      <td>B-5</td>\n",
       "      <td>24160 L221</td>\n",
       "      <td>2</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2</td>\n",
       "      <td>1st</td>\n",
       "      <td>0</td>\n",
       "      <td>Allison, Miss Helen Loraine</td>\n",
       "      <td>2.0000</td>\n",
       "      <td>Southampton</td>\n",
       "      <td>Montreal, PQ / Chesterville, ON</td>\n",
       "      <td>C26</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>3</td>\n",
       "      <td>1st</td>\n",
       "      <td>0</td>\n",
       "      <td>Allison, Mr Hudson Joshua Creighton</td>\n",
       "      <td>30.0000</td>\n",
       "      <td>Southampton</td>\n",
       "      <td>Montreal, PQ / Chesterville, ON</td>\n",
       "      <td>C26</td>\n",
       "      <td>NaN</td>\n",
       "      <td>(135)</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>4</td>\n",
       "      <td>1st</td>\n",
       "      <td>0</td>\n",
       "      <td>Allison, Mrs Hudson J.C. (Bessie Waldo Daniels)</td>\n",
       "      <td>25.0000</td>\n",
       "      <td>Southampton</td>\n",
       "      <td>Montreal, PQ / Chesterville, ON</td>\n",
       "      <td>C26</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>5</td>\n",
       "      <td>1st</td>\n",
       "      <td>1</td>\n",
       "      <td>Allison, Master Hudson Trevor</td>\n",
       "      <td>0.9167</td>\n",
       "      <td>Southampton</td>\n",
       "      <td>Montreal, PQ / Chesterville, ON</td>\n",
       "      <td>C22</td>\n",
       "      <td>NaN</td>\n",
       "      <td>11</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1308</th>\n",
       "      <td>1309</td>\n",
       "      <td>3rd</td>\n",
       "      <td>0</td>\n",
       "      <td>Zakarian, Mr Artun</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1309</th>\n",
       "      <td>1310</td>\n",
       "      <td>3rd</td>\n",
       "      <td>0</td>\n",
       "      <td>Zakarian, Mr Maprieder</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1310</th>\n",
       "      <td>1311</td>\n",
       "      <td>3rd</td>\n",
       "      <td>0</td>\n",
       "      <td>Zenn, Mr Philip</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1311</th>\n",
       "      <td>1312</td>\n",
       "      <td>3rd</td>\n",
       "      <td>0</td>\n",
       "      <td>Zievens, Rene</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1312</th>\n",
       "      <td>1313</td>\n",
       "      <td>3rd</td>\n",
       "      <td>0</td>\n",
       "      <td>Zimmerman, Leo</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>1313 rows × 11 columns</p>\n",
       "</div>"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 29
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.892905Z",
     "start_time": "2025-01-11T09:54:24.887271Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 样本特征值提取\n",
    "data = titanic.loc[:, [\"pclass\", \"age\", \"sex\"]]\n",
    "\n",
    "# 样本目标值提取\n",
    "target = titanic.loc[:, \"survived\"]\n"
   ],
   "id": "993555404ffda270",
   "outputs": [],
   "execution_count": 30
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.902968Z",
     "start_time": "2025-01-11T09:54:24.893914Z"
    }
   },
   "cell_type": "code",
   "source": "data",
   "id": "a2f87a497577038b",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "     pclass      age     sex\n",
       "0       1st  29.0000  female\n",
       "1       1st   2.0000  female\n",
       "2       1st  30.0000    male\n",
       "3       1st  25.0000  female\n",
       "4       1st   0.9167    male\n",
       "...     ...      ...     ...\n",
       "1308    3rd      NaN    male\n",
       "1309    3rd      NaN    male\n",
       "1310    3rd      NaN    male\n",
       "1311    3rd      NaN  female\n",
       "1312    3rd      NaN    male\n",
       "\n",
       "[1313 rows x 3 columns]"
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>pclass</th>\n",
       "      <th>age</th>\n",
       "      <th>sex</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1st</td>\n",
       "      <td>29.0000</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1st</td>\n",
       "      <td>2.0000</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1st</td>\n",
       "      <td>30.0000</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1st</td>\n",
       "      <td>25.0000</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1st</td>\n",
       "      <td>0.9167</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1308</th>\n",
       "      <td>3rd</td>\n",
       "      <td>NaN</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1309</th>\n",
       "      <td>3rd</td>\n",
       "      <td>NaN</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1310</th>\n",
       "      <td>3rd</td>\n",
       "      <td>NaN</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1311</th>\n",
       "      <td>3rd</td>\n",
       "      <td>NaN</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1312</th>\n",
       "      <td>3rd</td>\n",
       "      <td>NaN</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>1313 rows × 3 columns</p>\n",
       "</div>"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 31
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.910429Z",
     "start_time": "2025-01-11T09:54:24.904977Z"
    }
   },
   "cell_type": "code",
   "source": "target",
   "id": "b2c91f45176ddf2",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0       1\n",
       "1       0\n",
       "2       0\n",
       "3       0\n",
       "4       1\n",
       "       ..\n",
       "1308    0\n",
       "1309    0\n",
       "1310    0\n",
       "1311    0\n",
       "1312    0\n",
       "Name: survived, Length: 1313, dtype: int64"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 32
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.923718Z",
     "start_time": "2025-01-11T09:54:24.911434Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# include='all' 包含所有列，包括object类型\n",
    "data.describe(include='all')"
   ],
   "id": "80fe9f0592b496fd",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "       pclass         age   sex\n",
       "count    1313  633.000000  1313\n",
       "unique      3         NaN     2\n",
       "top       3rd         NaN  male\n",
       "freq      711         NaN   850\n",
       "mean      NaN   31.194181   NaN\n",
       "std       NaN   14.747525   NaN\n",
       "min       NaN    0.166700   NaN\n",
       "25%       NaN   21.000000   NaN\n",
       "50%       NaN   30.000000   NaN\n",
       "75%       NaN   41.000000   NaN\n",
       "max       NaN   71.000000   NaN"
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>pclass</th>\n",
       "      <th>age</th>\n",
       "      <th>sex</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>1313</td>\n",
       "      <td>633.000000</td>\n",
       "      <td>1313</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>unique</th>\n",
       "      <td>3</td>\n",
       "      <td>NaN</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>top</th>\n",
       "      <td>3rd</td>\n",
       "      <td>NaN</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>freq</th>\n",
       "      <td>711</td>\n",
       "      <td>NaN</td>\n",
       "      <td>850</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>mean</th>\n",
       "      <td>NaN</td>\n",
       "      <td>31.194181</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>std</th>\n",
       "      <td>NaN</td>\n",
       "      <td>14.747525</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>min</th>\n",
       "      <td>NaN</td>\n",
       "      <td>0.166700</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25%</th>\n",
       "      <td>NaN</td>\n",
       "      <td>21.000000</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50%</th>\n",
       "      <td>NaN</td>\n",
       "      <td>30.000000</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75%</th>\n",
       "      <td>NaN</td>\n",
       "      <td>41.000000</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>max</th>\n",
       "      <td>NaN</td>\n",
       "      <td>71.000000</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 33
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.934760Z",
     "start_time": "2025-01-11T09:54:24.924724Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 缺失值处理,这里选择用平均值来填充\n",
    "mean = data[\"age\"].mean()\n",
    "\n",
    "data.loc[:, \"age\"] = data.loc[:, \"age\"].fillna(mean)\n",
    "\n",
    "data"
   ],
   "id": "627d8953cc4ee4e5",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "     pclass        age     sex\n",
       "0       1st  29.000000  female\n",
       "1       1st   2.000000  female\n",
       "2       1st  30.000000    male\n",
       "3       1st  25.000000  female\n",
       "4       1st   0.916700    male\n",
       "...     ...        ...     ...\n",
       "1308    3rd  31.194181    male\n",
       "1309    3rd  31.194181    male\n",
       "1310    3rd  31.194181    male\n",
       "1311    3rd  31.194181  female\n",
       "1312    3rd  31.194181    male\n",
       "\n",
       "[1313 rows x 3 columns]"
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>pclass</th>\n",
       "      <th>age</th>\n",
       "      <th>sex</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1st</td>\n",
       "      <td>29.000000</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1st</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1st</td>\n",
       "      <td>30.000000</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1st</td>\n",
       "      <td>25.000000</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1st</td>\n",
       "      <td>0.916700</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1308</th>\n",
       "      <td>3rd</td>\n",
       "      <td>31.194181</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1309</th>\n",
       "      <td>3rd</td>\n",
       "      <td>31.194181</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1310</th>\n",
       "      <td>3rd</td>\n",
       "      <td>31.194181</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1311</th>\n",
       "      <td>3rd</td>\n",
       "      <td>31.194181</td>\n",
       "      <td>female</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1312</th>\n",
       "      <td>3rd</td>\n",
       "      <td>31.194181</td>\n",
       "      <td>male</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>1313 rows × 3 columns</p>\n",
       "</div>"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 34
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.946317Z",
     "start_time": "2025-01-11T09:54:24.935765Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 分割数据集到训练集合测试集\n",
    "# random_state是随机数种子，保证每次随机分割数据集的结果相同\n",
    "x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.25, random_state=4)\n",
    "\n",
    "print(x_train.shape)  # (984, 3)  \n",
    "print(\"-\" * 50)\n",
    "print(x_test.shape)  # (329, 3)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(x_train.head())"
   ],
   "id": "ff4c426c82b6c2d8",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(984, 3)\n",
      "--------------------------------------------------\n",
      "(329, 3)\n",
      "--------------------------------------------------\n",
      "    pclass        age     sex\n",
      "598    2nd  30.000000    male\n",
      "246    1st  62.000000    male\n",
      "905    3rd  31.194181  female\n",
      "300    1st  31.194181  female\n",
      "509    2nd  64.000000    male\n"
     ]
    }
   ],
   "execution_count": 35
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:54:24.968949Z",
     "start_time": "2025-01-11T09:54:24.947323Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# # 把df变为字典，样本变为一个一个的字典，字典中列名变为键，值变为特征值\n",
    "# # orient='records' 以json格式输出\n",
    "# x_train.to_dict(orient='records')\n",
    "\n",
    "# 进行处理（特征工程）特征-》类别-》one_hot编码\n",
    "dict = DictVectorizer(sparse=False)\n",
    "\n",
    "# 这一步是对字典进行特征抽取,to_dict可以把df变为字典，records代表列名变为键\n",
    "x_train = dict.fit_transform(x_train.to_dict(orient='records'))\n",
    "\n",
    "print(type(x_train))  # <class 'numpy.ndarray'>\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(dict.get_feature_names_out())\n",
    "# ['age' 'pclass=1st' 'pclass=2nd' 'pclass=3rd' 'sex=female' 'sex=male']\n",
    "print(\"-\" * 50)\n",
    "\n",
    "x_test = dict.transform(x_test.to_dict(orient='records'))\n",
    "\n",
    "x_train"
   ],
   "id": "5036661429e74352",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'numpy.ndarray'>\n",
      "--------------------------------------------------\n",
      "['age' 'pclass=1st' 'pclass=2nd' 'pclass=3rd' 'sex=female' 'sex=male']\n",
      "--------------------------------------------------\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array([[30.        ,  0.        ,  1.        ,  0.        ,  0.        ,\n",
       "         1.        ],\n",
       "       [62.        ,  1.        ,  0.        ,  0.        ,  0.        ,\n",
       "         1.        ],\n",
       "       [31.19418104,  0.        ,  0.        ,  1.        ,  1.        ,\n",
       "         0.        ],\n",
       "       ...,\n",
       "       [34.        ,  0.        ,  1.        ,  0.        ,  0.        ,\n",
       "         1.        ],\n",
       "       [46.        ,  1.        ,  0.        ,  0.        ,  0.        ,\n",
       "         1.        ],\n",
       "       [31.19418104,  0.        ,  0.        ,  1.        ,  0.        ,\n",
       "         1.        ]], shape=(984, 6))"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 36
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T09:58:10.977039Z",
     "start_time": "2025-01-11T09:58:10.946237Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 用决策树进行预测，修改max_depth试试,修改criterion为entropy\n",
    "# 树过于复杂，就会产生过拟合\n",
    "dec = DecisionTreeClassifier()\n",
    "\n",
    "# 训练\n",
    "dec.fit(x_train, y_train)\n",
    "\n",
    "# 预测准确率\n",
    "print(\"预测的准确率：\")\n",
    "print(dec.score(x_test, y_test))\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# 导出决策树的结构\n",
    "# feature_names是特征的名字，out_file是导出的文件名\n",
    "export_graphviz(dec, out_file='tree.dot',\n",
    "                feature_names=['age', 'pclass=1st', 'pclass=2nd', 'pclass=3rd', 'female', 'male'])\n"
   ],
   "id": "9a4f3194365795bf",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "预测的准确率：\n",
      "0.8085106382978723\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 37
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 对决策树进行参数调优",
   "id": "762e659c4b9d5f28"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T10:04:25.690996Z",
     "start_time": "2025-01-11T10:04:25.672023Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 分割数据集到训练集合测试集\n",
    "# random_state是随机数种子，保证每次随机分割数据集的结果相同\n",
    "x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.25, random_state=4)\n",
    "\n",
    "# 进行处理（特征工程）特征-》类别-》one_hot编码\n",
    "dict = DictVectorizer(sparse=False)\n",
    "\n",
    "# 这一步是对字典进行特征抽取,to_dict可以把df变为字典，records代表列名变为键\n",
    "x_train = dict.fit_transform(x_train.to_dict(orient='records'))\n",
    "x_test = dict.transform(x_test.to_dict(orient='records'))\n",
    "\n",
    "# 调参\n",
    "# max_depth是树的最大深度，决策树过深容易造成过拟合\n",
    "# min_impurity_decrease是节点划分最小不纯度下降值，\n",
    "# min_samples_split是节点划分最小样本数\n",
    "dec = DecisionTreeClassifier(max_depth=7,\n",
    "                             min_impurity_decrease=0.01,\n",
    "                             min_samples_split=20)\n",
    "\n",
    "# 训练\n",
    "dec.fit(x_train, y_train)\n",
    "\n",
    "# 预测准确率\n",
    "print(\"预测的准确率：\")\n",
    "print(dec.score(x_test, y_test))\n",
    "\n",
    "# feature_names是特征的名字，此时dct.get_feature_names_out()是特征的名字\n",
    "# out_file是导出的文件名\n",
    "export_graphviz(dec, out_file='tree_2.dot',\n",
    "                feature_names=dict.get_feature_names_out())\n",
    "\n",
    "# 可见，通过调参，实现决策树的结构简化，避免过拟合，准确率提升"
   ],
   "id": "8b5ee2063ee2d43d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "预测的准确率：\n",
      "0.8206686930091185\n"
     ]
    }
   ],
   "execution_count": 38
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T10:25:52.708948Z",
     "start_time": "2025-01-11T10:25:52.696931Z"
    }
   },
   "cell_type": "code",
   "source": [
    "x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.25, random_state=4)\n",
    "\n",
    "# 进行处理（特征工程）特征-》类别-》one_hot编码\n",
    "dict = DictVectorizer(sparse=False)\n",
    "\n",
    "# 这一步是对字典进行特征抽取,to_dict可以把df变为字典，records代表列名变为键\n",
    "x_train = dict.fit_transform(x_train.to_dict(orient='records'))\n",
    "x_test = dict.transform(x_test.to_dict(orient='records'))"
   ],
   "id": "458aa573067c66c8",
   "outputs": [],
   "execution_count": 39
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 随机森林",
   "id": "58b5d402b42efb1"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T10:32:16.422916Z",
     "start_time": "2025-01-11T10:29:48.286178Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 随机森林进行预测 （超参数调优），n_jobs充分利用多核的一个参数，-1代表使用所有核\n",
    "# 随机森林是集成学习方法，通过多棵树的投票来预测结果，可以有效避免过拟合\n",
    "# 随机森林的优点是可以处理多分类问题，缺点是计算量大，速度慢\n",
    "# 随机森林的超参数调优，可以用GridSearchCV来进行，GridSearchCV会自动遍历参数组合，找到最优参数\n",
    "rf = RandomForestClassifier(n_jobs=-1)\n",
    "\n",
    "# 这里的超参数调优，主要是n_estimators和max_depth\n",
    "# 这里的n_estimators是树的数量，max_depth是树的最大深度\n",
    "param={\"n_estimators\":[1500,2000,2500],\"max_depth\":[2, 3, 5, 8, 15, 25]}\n",
    "\n",
    "# 网格搜索与交叉验证\n",
    "gc=GridSearchCV(rf,param_grid=param,cv=3)\n",
    "\n",
    "gc.fit(x_train,y_train)\n",
    "\n",
    "print(\"测试集准确率\")\n",
    "print(gc.score(x_test,y_test))\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"最优参数\")\n",
    "print(gc.best_params_)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"最优模型\")\n",
    "print(gc.best_estimator_)\n",
    "print(\"-\" * 50)\n",
    "\n"
   ],
   "id": "9f4abb7fd165415d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试集准确率\n",
      "0.8328267477203647\n",
      "--------------------------------------------------\n",
      "最优参数\n",
      "{'max_depth': 3, 'n_estimators': 1500}\n",
      "--------------------------------------------------\n",
      "最优模型\n",
      "RandomForestClassifier(max_depth=3, n_estimators=1500, n_jobs=-1)\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 40
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-11T10:32:56.906402Z",
     "start_time": "2025-01-11T10:32:56.900004Z"
    }
   },
   "cell_type": "code",
   "source": [
    "print(\"每个超参数每次交叉验证的结果：\")\n",
    "print(gc.cv_results_)"
   ],
   "id": "3b654f0eb7075599",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "每个超参数每次交叉验证的结果：\n",
      "{'mean_fit_time': array([1.81977979, 2.33722814, 3.1376462 , 1.93946973, 2.60534279,\n",
      "       3.00419569, 1.81094543, 2.59332999, 3.03834589, 1.9137973 ,\n",
      "       2.44639738, 3.00160209, 1.79601073, 2.37708203, 3.01585491,\n",
      "       1.84363675, 2.43284345, 3.00678682]), 'std_fit_time': array([0.01470408, 0.00754286, 0.12815338, 0.18744379, 0.24086891,\n",
      "       0.00701659, 0.01433927, 0.25605413, 0.04846916, 0.07718796,\n",
      "       0.01958094, 0.01952822, 0.00640875, 0.01005907, 0.03809258,\n",
      "       0.02115976, 0.02286731, 0.02423167]), 'mean_score_time': array([0.16970778, 0.22480599, 0.30750378, 0.19383359, 0.2336123 ,\n",
      "       0.29815618, 0.18970855, 0.30181193, 0.2993412 , 0.20176363,\n",
      "       0.25710956, 0.33037265, 0.19469309, 0.25237695, 0.32602914,\n",
      "       0.19744468, 0.25677824, 0.31610227]), 'std_score_time': array([0.00464891, 0.00231739, 0.03860339, 0.03020706, 0.01040157,\n",
      "       0.02854189, 0.01778679, 0.07158448, 0.01876445, 0.00468867,\n",
      "       0.01057976, 0.00480627, 0.00066674, 0.00576945, 0.00985023,\n",
      "       0.00471375, 0.00476503, 0.00047141]), 'param_max_depth': masked_array(data=[2, 2, 2, 3, 3, 3, 5, 5, 5, 8, 8, 8, 15, 15, 15, 25, 25,\n",
      "                   25],\n",
      "             mask=[False, False, False, False, False, False, False, False,\n",
      "                   False, False, False, False, False, False, False, False,\n",
      "                   False, False],\n",
      "       fill_value=999999), 'param_n_estimators': masked_array(data=[1500, 2000, 2500, 1500, 2000, 2500, 1500, 2000, 2500,\n",
      "                   1500, 2000, 2500, 1500, 2000, 2500, 1500, 2000, 2500],\n",
      "             mask=[False, False, False, False, False, False, False, False,\n",
      "                   False, False, False, False, False, False, False, False,\n",
      "                   False, False],\n",
      "       fill_value=999999), 'params': [{'max_depth': 2, 'n_estimators': 1500}, {'max_depth': 2, 'n_estimators': 2000}, {'max_depth': 2, 'n_estimators': 2500}, {'max_depth': 3, 'n_estimators': 1500}, {'max_depth': 3, 'n_estimators': 2000}, {'max_depth': 3, 'n_estimators': 2500}, {'max_depth': 5, 'n_estimators': 1500}, {'max_depth': 5, 'n_estimators': 2000}, {'max_depth': 5, 'n_estimators': 2500}, {'max_depth': 8, 'n_estimators': 1500}, {'max_depth': 8, 'n_estimators': 2000}, {'max_depth': 8, 'n_estimators': 2500}, {'max_depth': 15, 'n_estimators': 1500}, {'max_depth': 15, 'n_estimators': 2000}, {'max_depth': 15, 'n_estimators': 2500}, {'max_depth': 25, 'n_estimators': 1500}, {'max_depth': 25, 'n_estimators': 2000}, {'max_depth': 25, 'n_estimators': 2500}], 'split0_test_score': array([0.73780488, 0.73780488, 0.73780488, 0.80182927, 0.80182927,\n",
      "       0.80182927, 0.81097561, 0.81097561, 0.81097561, 0.82012195,\n",
      "       0.82012195, 0.82012195, 0.82012195, 0.82012195, 0.81402439,\n",
      "       0.82012195, 0.82012195, 0.82012195]), 'split1_test_score': array([0.82621951, 0.82621951, 0.82317073, 0.82317073, 0.82317073,\n",
      "       0.82317073, 0.81402439, 0.81402439, 0.81402439, 0.81097561,\n",
      "       0.81097561, 0.81097561, 0.81707317, 0.81402439, 0.81707317,\n",
      "       0.81097561, 0.81097561, 0.81707317]), 'split2_test_score': array([0.81707317, 0.81707317, 0.81707317, 0.82926829, 0.82926829,\n",
      "       0.82926829, 0.82317073, 0.82317073, 0.82317073, 0.79268293,\n",
      "       0.79268293, 0.79268293, 0.79573171, 0.79573171, 0.79573171,\n",
      "       0.79573171, 0.79573171, 0.79573171]), 'mean_test_score': array([0.79369919, 0.79369919, 0.79268293, 0.81808943, 0.81808943,\n",
      "       0.81808943, 0.81605691, 0.81605691, 0.81605691, 0.80792683,\n",
      "       0.80792683, 0.80792683, 0.81097561, 0.80995935, 0.80894309,\n",
      "       0.80894309, 0.80894309, 0.81097561]), 'std_test_score': array([0.03969924, 0.03969924, 0.0388844 , 0.01176406, 0.01176406,\n",
      "       0.01176406, 0.00518193, 0.00518193, 0.00518193, 0.01140749,\n",
      "       0.01140749, 0.01140749, 0.01085069, 0.01036386, 0.00942441,\n",
      "       0.01006046, 0.01006046, 0.01085069]), 'rank_test_score': array([16, 16, 18,  1,  1,  1,  4,  4,  4, 13, 13, 13,  7,  9, 10, 10, 10,\n",
      "        7], dtype=int32)}\n"
     ]
    }
   ],
   "execution_count": 41
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
