{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T10:59:50.898474Z",
     "start_time": "2025-01-13T10:59:50.893987Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "\n",
    "from sklearn.datasets import fetch_california_housing\n",
    "from sklearn.linear_model import LinearRegression, SGDRegressor, Ridge, LogisticRegression, Lasso\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.metrics import mean_squared_error, classification_report, roc_auc_score\n",
    "import joblib\n",
    "import pandas as pd\n",
    "import numpy as np"
   ],
   "id": "459a15012d5659a",
   "outputs": [],
   "execution_count": 27
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 线性回归",
   "id": "d63634fbc73d186f"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 1.正规方程   线性回归模型LinearRegression",
   "id": "eb4db157b2f0236"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "正规方程是让导数直接为0",
   "id": "55237a1534f20ca5"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T10:59:50.963998Z",
     "start_time": "2025-01-13T10:59:50.951488Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 获取数据\n",
    "fe_cal = fetch_california_housing(data_home='./data')\n",
    "\n",
    "print(fe_cal.data.shape)  # (20640, 8) 20640条数据，8个特征\n",
    "print(\"-\" * 50)\n",
    "print(fe_cal.target.shape)  # (20640,) 20640条数据，目标值\n",
    "print(\"-\" * 50)\n",
    "# 第一个样本特征\n",
    "print(fe_cal.data[0])\n",
    "print(\"-\" * 50)\n",
    "# 样本目标值\n",
    "print(fe_cal.target)\n",
    "print(\"-\" * 50)\n",
    "print(fe_cal.feature_names)  # 特征名称"
   ],
   "id": "f5dff5ae25222c2d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(20640, 8)\n",
      "--------------------------------------------------\n",
      "(20640,)\n",
      "--------------------------------------------------\n",
      "[   8.3252       41.            6.98412698    1.02380952  322.\n",
      "    2.55555556   37.88       -122.23      ]\n",
      "--------------------------------------------------\n",
      "[4.526 3.585 3.521 ... 0.923 0.847 0.894]\n",
      "--------------------------------------------------\n",
      "['MedInc', 'HouseAge', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup', 'Latitude', 'Longitude']\n"
     ]
    }
   ],
   "execution_count": 28
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T10:59:51.011551Z",
     "start_time": "2025-01-13T10:59:51.001012Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 假设这里不对目标结果进行标准化，和后面进行标准化的结果进行比较\n",
    "\n",
    "# 分割数据集为训练集和测试集，数据预处理\n",
    "x_train, x_test, y_train, y_test = train_test_split(fe_cal.data, fe_cal.target, test_size=0.25, random_state=1)\n",
    "\n",
    "print(x_train.shape)  # (15480, 8)\n",
    "\n",
    "# 标准化数据\n",
    "std_x = StandardScaler()\n",
    "# 训练集和测试机标准化\n",
    "x_train = std_x.fit_transform(x_train)\n",
    "x_test = std_x.transform(x_test)\n"
   ],
   "id": "237077127c801325",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(15480, 8)\n"
     ]
    }
   ],
   "execution_count": 29
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T10:59:51.037954Z",
     "start_time": "2025-01-13T10:59:51.026557Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 正规方程求解方式预测结果，正规方程进行线性回归\n",
    "lr = LinearRegression()\n",
    "\n",
    "# 训练模型\n",
    "# 此时的fit是耗时的\n",
    "lr.fit(x_train, y_train)\n",
    "\n",
    "# 回归系数可以看特征与目标之间的相关性\n",
    "print(\"回归系数\")\n",
    "print(lr.coef_)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "y_predict = lr.predict(x_test)\n",
    "\n",
    "# 删除之前的模型文件\n",
    "os.unlink(\"./tmp/test.pkl\")\n",
    "# 重新保存模型\n",
    "joblib.dump(lr, \"./tmp/test.pkl\")\n",
    "\n",
    "print(\"正规方程测试集里面前10个房子的预测价格：\")\n",
    "print(y_predict[0:10])\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"正规方程测试集的均方误差：\")\n",
    "# 回归不能用score评估，因为评估结果是0，而是应该用均方误差评估\n",
    "print(mean_squared_error(y_test, y_predict))\n",
    "# 0.5356532845422556"
   ],
   "id": "dc49ee74154f8bb9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "回归系数\n",
      "[ 0.83167028  0.12159502 -0.26758589  0.30983997 -0.00518054 -0.04040421\n",
      " -0.90736902 -0.88212727]\n",
      "--------------------------------------------------\n",
      "正规方程测试集里面前10个房子的预测价格：\n",
      "[2.12391852 0.93825754 2.7088455  1.70873764 2.82954754 3.50376456\n",
      " 3.0147162  1.62781292 1.74317518 2.01897806]\n",
      "--------------------------------------------------\n",
      "正规方程测试集的均方误差：\n",
      "0.5356532845422556\n"
     ]
    }
   ],
   "execution_count": 30
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T10:59:51.056374Z",
     "start_time": "2025-01-13T10:59:51.038958Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 假设这里对目标结果进行标准化，和前面不进行标准化的结果进行比较\n",
    "\n",
    "# 分割数据集为训练集和测试集，数据预处理\n",
    "x_train, x_test, y_train, y_test = train_test_split(fe_cal.data, fe_cal.target, test_size=0.25, random_state=1)\n",
    "\n",
    "# 标准化数据\n",
    "std_x = StandardScaler()\n",
    "# 训练集和测试机标准化\n",
    "x_train = std_x.fit_transform(x_train)\n",
    "x_test = std_x.transform(x_test)\n",
    "\n",
    "# 标准化目标值\n",
    "std_y = StandardScaler()\n",
    "# -1代表把剩余的元素都堆到哪一维\n",
    "# 目标值是一维的，这里需要传进去2维的\n",
    "y_train = std_y.fit_transform(y_train.reshape(-1, 1))\n",
    "y_test = std_y.transform(y_test.reshape(-1, 1))\n",
    "\n",
    "# 正规方程求解方式预测结果，正规方程进行线性回归\n",
    "lr = LinearRegression()\n",
    "\n",
    "# 训练模型\n",
    "# 此时的fit是耗时的\n",
    "lr.fit(x_train, y_train)\n",
    "\n",
    "# 回归系数可以看特征与目标之间的相关性\n",
    "print(\"回归系数\")\n",
    "print(lr.coef_)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "y_predict = lr.predict(x_test)\n",
    "# 通过inverse得到真正的房子价格\n",
    "y_lr_predict = std_y.inverse_transform(y_predict)\n",
    "\n",
    "# 删除之前的模型文件\n",
    "os.unlink(\"./tmp/test.pkl\")\n",
    "# 重新保存模型\n",
    "joblib.dump(lr, \"./tmp/test.pkl\")\n",
    "\n",
    "print(\"正规方程测试集里面前10个房子的预测价格：\")\n",
    "print(y_lr_predict[0:10])\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"正规方程测试集的均方误差：\")\n",
    "# 回归不能用score评估，因为评估结果是0，而是应该用均方误差评估\n",
    "print(mean_squared_error(y_test, y_predict))\n",
    "# 0.40082431136214186 比较得，标准化后效果更好"
   ],
   "id": "7c06cbc4261f9844",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "回归系数\n",
      "[[ 0.71942632  0.10518431 -0.23147194  0.26802332 -0.00448136 -0.03495117\n",
      "  -0.7849086  -0.76307353]]\n",
      "--------------------------------------------------\n",
      "正规方程测试集里面前10个房子的预测价格：\n",
      "[[2.12391852]\n",
      " [0.93825754]\n",
      " [2.7088455 ]\n",
      " [1.70873764]\n",
      " [2.82954754]\n",
      " [3.50376456]\n",
      " [3.0147162 ]\n",
      " [1.62781292]\n",
      " [1.74317518]\n",
      " [2.01897806]]\n",
      "--------------------------------------------------\n",
      "正规方程测试集的均方误差：\n",
      "0.40082431136214186\n"
     ]
    }
   ],
   "execution_count": 31
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 加载保存的模型",
   "id": "d9ea838e0a15895b"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T10:59:51.064287Z",
     "start_time": "2025-01-13T10:59:51.057378Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 模拟上线时加载模型,之前的训练结果保存在模型文件中\n",
    "model = joblib.load(\"./tmp/test.pkl\")\n",
    "\n",
    "y_predict = model.predict(x_test)\n",
    "# 通过inverse得到真正的房子价格\n",
    "y_lr_predict = std_y.inverse_transform(y_predict)\n",
    "\n",
    "print(\"正规方程测试集里面前10个房子的预测价格：\")\n",
    "print(y_lr_predict[0:10])\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"正规方程测试集的均方误差：\")\n",
    "# 回归不能用score评估，因为评估结果是0，而是应该用均方误差评估\n",
    "print(mean_squared_error(y_test, y_predict))\n"
   ],
   "id": "6fe67e776b5e7b19",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "正规方程测试集里面前10个房子的预测价格：\n",
      "[[2.12391852]\n",
      " [0.93825754]\n",
      " [2.7088455 ]\n",
      " [1.70873764]\n",
      " [2.82954754]\n",
      " [3.50376456]\n",
      " [3.0147162 ]\n",
      " [1.62781292]\n",
      " [1.74317518]\n",
      " [2.01897806]]\n",
      "--------------------------------------------------\n",
      "正规方程测试集的均方误差：\n",
      "0.40082431136214186\n"
     ]
    }
   ],
   "execution_count": 32
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T10:59:51.069808Z",
     "start_time": "2025-01-13T10:59:51.065293Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# mean_squared_error计算方法：\n",
    "y_true = [3, -0.5, 2, 7]\n",
    "y_pred = [2.5, 0.0, 2, 8]\n",
    "print(mean_squared_error(y_true, y_pred))\n",
    "\n",
    "print((np.square(3 - 2.5) + np.square(-0.5 - 0.0) + np.square(2 - 2) + np.square(7 - 8)) / 4)"
   ],
   "id": "c39bdd35908bc499",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.375\n",
      "0.375\n"
     ]
    }
   ],
   "execution_count": 33
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 2.梯度下降   梯度下降估计SGDRegressor",
   "id": "f0b62b47f3e49fef"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T11:27:44.116724Z",
     "start_time": "2025-01-13T11:27:44.093621Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# SGDRegressor是随机梯度下降法，可以解决线性回归问题\n",
    "# 随机梯度下降法是一种迭代优化算法，它利用了随机梯度下降的思想，每次迭代只用一部分数据来计算梯度，并根据梯度的方向更新参数，从而逐渐逼近最优解。\n",
    "# 随机梯度下降法的优点是可以处理大数据集，缺点是收敛速度慢。\n",
    "\n",
    "# 通过使用SGD最小化正则经验损失来拟合线性模型\n",
    "# SGD代表随机梯度下降：一次估计每个样本的损失梯度，并随着强度进度表（即学习率）的降低而更新模型。\n",
    "# SGDRegressor的学习率参数alpha控制了学习率的衰减速度，可以防止模型陷入局部最小值。\n",
    "# 正则化器是对损失函数的一种惩罚，它使用平方的欧几里德范数L2或绝对范数L1或两者的组合（弹性网）将模型参数向零矢量收缩。\n",
    "# 正则化的目标是使模型的复杂度最小化，以便在训练数据上获得更好的泛化性能。\n",
    "# 如果由于正则化而使参数更新超过0.0值，则更新将被截断为0.0以允许学习稀疏模型并实现在线特征选择。\n",
    "\n",
    "\n",
    "# eta0是学习率的初始值，max_iter是最大迭代次数，\n",
    "# learning_rate:学习率调整的方式\n",
    "# penalty是正则化方法，l1是Lasso，l2是Ridge\n",
    "# L1正则化产生稀疏的权值, L2正则化产生平滑的权值\n",
    "# L1正则化偏向于稀疏，它会自动进行特征选择，去掉一些没用的特征，也就是将这些特征对应的权重置为0\n",
    "# L2主要功能是为了防止过拟合，当要求参数越小时，说明模型越简单，而模型越简单则，越趋向于平滑，从而防止过拟合。\n",
    "# alpha是正则化系数，值越高，正则化力度越强\n",
    "sgd = SGDRegressor(eta0=0.01, penalty='l2', max_iter=1000)\n",
    "\n",
    "# 训练\n",
    "sgd.fit(x_train, y_train)\n",
    "\n",
    "print(\"梯度下降的回归系数\")\n",
    "print(sgd.coef_)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"梯度下降的均方误差：\")\n",
    "print(mean_squared_error(y_test, sgd.predict(x_test)))\n",
    "# 0.39962450198092375"
   ],
   "id": "c1cbd8e68731ebd3",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "梯度下降的回归系数\n",
      "[ 0.71500757  0.10131499 -0.24975412  0.26258784 -0.00281587 -0.1859521\n",
      " -0.7879317  -0.75911034]\n",
      "--------------------------------------------------\n",
      "梯度下降的均方误差：\n",
      "0.3973014396545884\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Program Files\\Python312\\Lib\\site-packages\\sklearn\\utils\\validation.py:1408: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n",
      "  y = column_or_1d(y, warn=True)\n"
     ]
    }
   ],
   "execution_count": 94
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T11:47:22.798397Z",
     "start_time": "2025-01-13T11:47:22.793789Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 梯度下降的计算方法\n",
    "w = 1  # 初始值\n",
    "alpha = 0.1  # 正则化系数\n",
    "\n",
    "\n",
    "# 正则化系数会影响到学习率的衰减速度，如果正则化系数过大，则学习率会很快衰减，导致模型无法收敛；如果正则化系数过小，则学习率会很慢，导致模型收敛速度慢。\n",
    "# eg: 理想的w为 -0.3333333\n",
    "# alpha=0.01时，最终的w为 -0.12499252511111358\n",
    "# alpha=0.7时，最终的w为 正负振荡，不收敛\n",
    "# alpha=0.1时。最终的w为 -0.3333333333317961 合适的正则化系数\n",
    "\n",
    "def loss(w):\n",
    "    return 3 * w ** 2 + 2 * w + 2\n",
    "\n",
    "\n",
    "def dao_shu(w):\n",
    "    return 6 * w + 2\n",
    "\n",
    "\n",
    "for i in range(30):\n",
    "    w = w - alpha * dao_shu(w)\n",
    "    print(f'w {w} 损失{loss(w)}')"
   ],
   "id": "60fc11eca3bd524d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "w 0.19999999999999996 损失2.5199999999999996\n",
      "w -0.12000000000000005 损失1.8032\n",
      "w -0.24800000000000003 损失1.688512\n",
      "w -0.2992 损失1.67016192\n",
      "w -0.31968 损失1.6672259072\n",
      "w -0.327872 损失1.666756145152\n",
      "w -0.33114879999999997 损失1.6666809832243201\n",
      "w -0.33245952 损失1.6666689573158913\n",
      "w -0.332983808 损失1.6666670331705427\n",
      "w -0.3331935232 损失1.6666667253072869\n",
      "w -0.33327740928 损失1.666666676049166\n",
      "w -0.333310963712 损失1.6666666681678666\n",
      "w -0.3333243854848 损失1.6666666669068586\n",
      "w -0.33332975419392 损失1.6666666667050973\n",
      "w -0.333331901677568 损失1.6666666666728156\n",
      "w -0.3333327606710272 损失1.6666666666676506\n",
      "w -0.3333331042684109 损失1.6666666666668242\n",
      "w -0.33333324170736434 损失1.6666666666666918\n",
      "w -0.33333329668294576 损失1.6666666666666707\n",
      "w -0.3333333186731783 损失1.6666666666666674\n",
      "w -0.3333333274692713 损失1.6666666666666667\n",
      "w -0.3333333309877085 损失1.6666666666666667\n",
      "w -0.3333333323950834 损失1.6666666666666665\n",
      "w -0.33333333295803336 损失1.6666666666666667\n",
      "w -0.33333333318321334 损失1.6666666666666667\n",
      "w -0.33333333327328535 损失1.6666666666666667\n",
      "w -0.33333333330931414 损失1.6666666666666667\n",
      "w -0.33333333332372567 损失1.6666666666666667\n",
      "w -0.3333333333294903 损失1.6666666666666665\n",
      "w -0.3333333333317961 损失1.6666666666666665\n"
     ]
    }
   ],
   "execution_count": 122
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 3.Lasso回归    （L1正则化的一种） ",
   "id": "9ed2fe4c1b4f91b"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T11:47:44.962131Z",
     "start_time": "2025-01-13T11:47:44.836716Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Lasso 是拟合稀疏系数的线性模型。 \n",
    "# 它在一些情况下是有用的，因为它倾向于使用具有较少参数值的情况，有效地减少给定解决方案所依赖变量的数量。\n",
    "# Lasso回归和线性回归的区别\n",
    "# (1) 目标函数不同：Lasso回归使用L1正则化项（即lasso惩罚），而线性回归没有正则化。\n",
    "# (2) 模型复杂度不同：Lasso回归通过L1正则化可以缩小特征系数，从而减少模型复杂度，因此可以用于特征选择。但线性回归不具备这个功能，因为它对所有特征赋予非零系数。\n",
    "# (3) Lasso回归对异常值比较敏感，而线性回归相对鲁棒。\n",
    "# (4) 解法不同：Lasso回归需要使用优化算法来求解，例如坐标下降法或最小角回归；而线性回归可以直接使用矩阵求逆公式来求解闭合形式解。\n",
    "\n",
    "# alpha就是补偿的系数\n",
    "ls = Lasso(alpha=0.005)\n",
    "\n",
    "ls.fit(x_train, y_train)\n",
    "\n",
    "# Lasso的回归系数\n",
    "print(\"Lasso回归系数\")\n",
    "print(ls.coef_)\n",
    "# 可以看到，部分的回归系数是0，表示这些特征没有被使用\n",
    "print(\"-\" * 50)\n",
    "\n",
    "y_predict = ls.predict(x_test)\n",
    "\n",
    "print(\"Lasso回归的均方误差：\")\n",
    "print(mean_squared_error(y_test, y_predict))\n",
    "# alpha=0.1 0.5087627130794923\n",
    "# alpha=0.05 0.4720295203622488\n",
    "# alpha=0.01 0.40511068287093405\n",
    "# alpha=0.001 0.40081302952916914"
   ],
   "id": "6e87b226d3f7c7b7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Lasso回归系数\n",
      "[ 0.69355682  0.10911911 -0.16181473  0.19834603 -0.         -0.0301411\n",
      " -0.73164807 -0.70645189]\n",
      "--------------------------------------------------\n",
      "Lasso回归的均方误差：\n",
      "0.40175895322423766\n"
     ]
    }
   ],
   "execution_count": 125
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 4.岭(Ridge)回归      （L2正则化的一种） ",
   "id": "8aa20c408417afae"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T12:11:35.263986Z",
     "start_time": "2025-01-13T12:11:35.256079Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# L2正则化是一种用于解决过拟合问题的技术。\n",
    "# 作用：可以使得W的每个元素都很小，都接近于0\n",
    "# 优点：越小的参数说明模型.越简单，越简单的模型则越不容易产生过拟合现象\n",
    "\n",
    "# 岭回归是对线性回归加入L2正则化，L2正则化是对系数的平方和进行惩罚\n",
    "# alpha就是补偿的系数\n",
    "# 正规方程求解，加补偿就可以让正规方程可逆\n",
    "rd = Ridge(alpha=0.01)\n",
    "\n",
    "rd.fit(x_train, y_train)\n",
    "\n",
    "print(\"岭回归系数\")\n",
    "print(rd.coef_)\n",
    "# 岭回归得到的回归系数更符合实际，更可靠。另外，能让估计参数的波动范围变小，变的更稳定。在存在病态数据偏多的研究中有较大的实用价值。\n",
    "\n",
    "print(\"-\" * 50)\n",
    "\n",
    "print(\"岭回归的均方误差：\")\n",
    "print(mean_squared_error(y_test, rd.predict(x_test)))\n",
    "# alpha=0.02 0.40082418668554404\n",
    "# alpha=0.01 0.4008242490154845"
   ],
   "id": "262481692a13804f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "岭回归系数\n",
      "[ 0.71942604  0.10518508 -0.23147041  0.26802132 -0.0044811  -0.03495122\n",
      " -0.7849013  -0.76306617]\n",
      "--------------------------------------------------\n",
      "岭回归的均方误差：\n",
      "0.4008242490154845\n"
     ]
    }
   ],
   "execution_count": 129
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 逻辑回归",
   "id": "ca26e209a82ca862"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T12:51:30.582014Z",
     "start_time": "2025-01-13T12:51:30.554496Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 逻辑回归是一种分类算法，它可以用来预测某一事件发生的概率。\n",
    "# 逻辑回归模型的输出是一个概率值，它表示事件发生的可能性。\n",
    "# 逻辑回归模型可以用于二分类问题，也可以用于多分类问题。\n",
    "# 逻辑回归模型的损失函数是logistic损失函数，它是sigmoid函数的负对数似然函数。\n",
    "# 逻辑回归模型的目标是找到一个最优的模型参数，使得模型的预测值与真实值之间的差距最小。\n",
    "\n",
    "# 构造列标签名字\n",
    "column = ['Sample code number', 'Clump Thickness', 'Uniformity of Cell Size',\n",
    "          'Uniformity of CellShape',\n",
    "          'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli',\n",
    "          'Mitoses', 'Class']\n",
    "\n",
    "# 读取数据 \n",
    "# names参数指定了列标签名字\n",
    "data = pd.read_csv(\"./data/breast-cancer-wisconsin.csv\", names=column)\n",
    "\n",
    "print(data.shape)\n",
    "print(\"-\" * 50)\n",
    "print(data.info())\n",
    "print(\"-\" * 50)\n",
    "data.describe(include='all')"
   ],
   "id": "ab5be329c6b37e76",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(699, 11)\n",
      "--------------------------------------------------\n",
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 699 entries, 0 to 698\n",
      "Data columns (total 11 columns):\n",
      " #   Column                       Non-Null Count  Dtype \n",
      "---  ------                       --------------  ----- \n",
      " 0   Sample code number           699 non-null    int64 \n",
      " 1   Clump Thickness              699 non-null    int64 \n",
      " 2   Uniformity of Cell Size      699 non-null    int64 \n",
      " 3   Uniformity of CellShape      699 non-null    int64 \n",
      " 4   Marginal Adhesion            699 non-null    int64 \n",
      " 5   Single Epithelial Cell Size  699 non-null    int64 \n",
      " 6   Bare Nuclei                  699 non-null    object\n",
      " 7   Bland Chromatin              699 non-null    int64 \n",
      " 8   Normal Nucleoli              699 non-null    int64 \n",
      " 9   Mitoses                      699 non-null    int64 \n",
      " 10  Class                        699 non-null    int64 \n",
      "dtypes: int64(10), object(1)\n",
      "memory usage: 60.2+ KB\n",
      "None\n",
      "--------------------------------------------------\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "        Sample code number  Clump Thickness  Uniformity of Cell Size  \\\n",
       "count         6.990000e+02       699.000000               699.000000   \n",
       "unique                 NaN              NaN                      NaN   \n",
       "top                    NaN              NaN                      NaN   \n",
       "freq                   NaN              NaN                      NaN   \n",
       "mean          1.071704e+06         4.417740                 3.134478   \n",
       "std           6.170957e+05         2.815741                 3.051459   \n",
       "min           6.163400e+04         1.000000                 1.000000   \n",
       "25%           8.706885e+05         2.000000                 1.000000   \n",
       "50%           1.171710e+06         4.000000                 1.000000   \n",
       "75%           1.238298e+06         6.000000                 5.000000   \n",
       "max           1.345435e+07        10.000000                10.000000   \n",
       "\n",
       "        Uniformity of CellShape  Marginal Adhesion  \\\n",
       "count                699.000000         699.000000   \n",
       "unique                      NaN                NaN   \n",
       "top                         NaN                NaN   \n",
       "freq                        NaN                NaN   \n",
       "mean                   3.207439           2.806867   \n",
       "std                    2.971913           2.855379   \n",
       "min                    1.000000           1.000000   \n",
       "25%                    1.000000           1.000000   \n",
       "50%                    1.000000           1.000000   \n",
       "75%                    5.000000           4.000000   \n",
       "max                   10.000000          10.000000   \n",
       "\n",
       "        Single Epithelial Cell Size Bare Nuclei  Bland Chromatin  \\\n",
       "count                    699.000000         699       699.000000   \n",
       "unique                          NaN          11              NaN   \n",
       "top                             NaN           1              NaN   \n",
       "freq                            NaN         402              NaN   \n",
       "mean                       3.216023         NaN         3.437768   \n",
       "std                        2.214300         NaN         2.438364   \n",
       "min                        1.000000         NaN         1.000000   \n",
       "25%                        2.000000         NaN         2.000000   \n",
       "50%                        2.000000         NaN         3.000000   \n",
       "75%                        4.000000         NaN         5.000000   \n",
       "max                       10.000000         NaN        10.000000   \n",
       "\n",
       "        Normal Nucleoli     Mitoses       Class  \n",
       "count        699.000000  699.000000  699.000000  \n",
       "unique              NaN         NaN         NaN  \n",
       "top                 NaN         NaN         NaN  \n",
       "freq                NaN         NaN         NaN  \n",
       "mean           2.866953    1.589413    2.689557  \n",
       "std            3.053634    1.715078    0.951273  \n",
       "min            1.000000    1.000000    2.000000  \n",
       "25%            1.000000    1.000000    2.000000  \n",
       "50%            1.000000    1.000000    2.000000  \n",
       "75%            4.000000    1.000000    4.000000  \n",
       "max           10.000000   10.000000    4.000000  "
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Sample code number</th>\n",
       "      <th>Clump Thickness</th>\n",
       "      <th>Uniformity of Cell Size</th>\n",
       "      <th>Uniformity of CellShape</th>\n",
       "      <th>Marginal Adhesion</th>\n",
       "      <th>Single Epithelial Cell Size</th>\n",
       "      <th>Bare Nuclei</th>\n",
       "      <th>Bland Chromatin</th>\n",
       "      <th>Normal Nucleoli</th>\n",
       "      <th>Mitoses</th>\n",
       "      <th>Class</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>6.990000e+02</td>\n",
       "      <td>699.000000</td>\n",
       "      <td>699.000000</td>\n",
       "      <td>699.000000</td>\n",
       "      <td>699.000000</td>\n",
       "      <td>699.000000</td>\n",
       "      <td>699</td>\n",
       "      <td>699.000000</td>\n",
       "      <td>699.000000</td>\n",
       "      <td>699.000000</td>\n",
       "      <td>699.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>unique</th>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>11</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>top</th>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>1</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>freq</th>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>402</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>mean</th>\n",
       "      <td>1.071704e+06</td>\n",
       "      <td>4.417740</td>\n",
       "      <td>3.134478</td>\n",
       "      <td>3.207439</td>\n",
       "      <td>2.806867</td>\n",
       "      <td>3.216023</td>\n",
       "      <td>NaN</td>\n",
       "      <td>3.437768</td>\n",
       "      <td>2.866953</td>\n",
       "      <td>1.589413</td>\n",
       "      <td>2.689557</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>std</th>\n",
       "      <td>6.170957e+05</td>\n",
       "      <td>2.815741</td>\n",
       "      <td>3.051459</td>\n",
       "      <td>2.971913</td>\n",
       "      <td>2.855379</td>\n",
       "      <td>2.214300</td>\n",
       "      <td>NaN</td>\n",
       "      <td>2.438364</td>\n",
       "      <td>3.053634</td>\n",
       "      <td>1.715078</td>\n",
       "      <td>0.951273</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>min</th>\n",
       "      <td>6.163400e+04</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>NaN</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>2.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25%</th>\n",
       "      <td>8.706885e+05</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>NaN</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>2.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50%</th>\n",
       "      <td>1.171710e+06</td>\n",
       "      <td>4.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>NaN</td>\n",
       "      <td>3.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>2.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75%</th>\n",
       "      <td>1.238298e+06</td>\n",
       "      <td>6.000000</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>4.000000</td>\n",
       "      <td>4.000000</td>\n",
       "      <td>NaN</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>4.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>4.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>max</th>\n",
       "      <td>1.345435e+07</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>NaN</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>4.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ]
     },
     "execution_count": 135,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 135
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T12:51:36.430951Z",
     "start_time": "2025-01-13T12:51:36.426306Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 因为上面发现它是object类型，所以查看所包含的数据种类\n",
    "# 发现有一些数据是字符类型，需要进行转换\n",
    "data[\"Bare Nuclei\"].unique()"
   ],
   "id": "41a9ed08cdac73ed",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array(['1', '10', '2', '4', '3', '9', '7', '?', '5', '8', '6'],\n",
       "      dtype=object)"
      ]
     },
     "execution_count": 136,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 136
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T12:55:15.151221Z",
     "start_time": "2025-01-13T12:55:15.142731Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 缺失值处理\n",
    "data = data.replace(to_replace=\"?\", value=np.nan)\n",
    "\n",
    "# 直接删除，哪一行有空值，就删除对应的样本\n",
    "data = data.dropna()\n",
    "\n",
    "print(data.shape)  # (683, 11)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# 分类的类别是2和4,对应 \"良性\", \"恶性\"\n",
    "data[\"Class\"].unique()\n",
    "\n",
    "# 把第6列的字符串转化为数字类型\n",
    "data[\"Bare Nuclei\"] = data[\"Bare Nuclei\"].astype(\"int16\")\n",
    "\n",
    "data.info()"
   ],
   "id": "fd5d0764bb552a34",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(683, 11)\n",
      "--------------------------------------------------\n",
      "<class 'pandas.core.frame.DataFrame'>\n",
      "Index: 683 entries, 0 to 698\n",
      "Data columns (total 11 columns):\n",
      " #   Column                       Non-Null Count  Dtype\n",
      "---  ------                       --------------  -----\n",
      " 0   Sample code number           683 non-null    int64\n",
      " 1   Clump Thickness              683 non-null    int64\n",
      " 2   Uniformity of Cell Size      683 non-null    int64\n",
      " 3   Uniformity of CellShape      683 non-null    int64\n",
      " 4   Marginal Adhesion            683 non-null    int64\n",
      " 5   Single Epithelial Cell Size  683 non-null    int64\n",
      " 6   Bare Nuclei                  683 non-null    int16\n",
      " 7   Bland Chromatin              683 non-null    int64\n",
      " 8   Normal Nucleoli              683 non-null    int64\n",
      " 9   Mitoses                      683 non-null    int64\n",
      " 10  Class                        683 non-null    int64\n",
      "dtypes: int16(1), int64(10)\n",
      "memory usage: 60.0 KB\n"
     ]
    }
   ],
   "execution_count": 142
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T12:59:00.560683Z",
     "start_time": "2025-01-13T12:59:00.551577Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 进行数据的分割,第零列是编号，不可以作为特征，把第1-9列作为特征，第10列作为标签\n",
    "x_train, x_test, y_train, y_test = train_test_split(data.iloc[:, 1:10], data.iloc[:, 10], test_size=0.25,\n",
    "                                                    random_state=1)\n",
    "\n",
    "# 标准化\n",
    "std = StandardScaler()\n",
    "\n",
    "x_train = std.fit_transform(x_train)\n",
    "x_test = std.transform(x_test)\n"
   ],
   "id": "71ea0b50277f8827",
   "outputs": [],
   "execution_count": 144
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T13:21:22.061744Z",
     "start_time": "2025-01-13T13:21:22.051282Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 逻辑回归预测\n",
    "# C：正则化强度的倒数，C越小表示正则化强度越高。\n",
    "# penalty：正则化项的类型，L1或L2正则化。\n",
    "# solver = 'liblinear'  solver是学习率优化算法，就是学习率会随着epoch的变化而变化\n",
    "# max_iter：最大迭代次数。\n",
    "lg = LogisticRegression(C=1, solver='lbfgs')\n",
    "\n",
    "lg.fit(x_train, y_train)\n",
    "\n",
    "print(\"逻辑回归系数：\")\n",
    "print(lg.coef_)\n",
    "print(\"-\" * 50)\n",
    "\n",
    "y_predict = lg.predict(x_test)\n",
    "\n",
    "print(\"逻辑回归的准确率：\")\n",
    "print(lg.score(x_test, y_test))\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# predict_proba()方法可以得到预测的概率值\n",
    "print(lg.predict_proba(x_test)[0:5])\n",
    "# C=0.75 0.9824561403508771\n",
    "# C=0.5 0.9824561403508771\n",
    "# C=0.25 0.9824561403508771\n"
   ],
   "id": "f741228d3d899bdd",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "逻辑回归系数：\n",
      "[[1.21235722 0.11615948 0.88555005 0.66842352 0.01706111 1.17999801\n",
      "  0.87091028 0.6886091  0.77883141]]\n",
      "--------------------------------------------------\n",
      "逻辑回归的准确率：\n",
      "0.9824561403508771\n",
      "--------------------------------------------------\n",
      "[[0.95272902 0.04727098]\n",
      " [0.99609421 0.00390579]\n",
      " [0.98596781 0.01403219]\n",
      " [0.02453657 0.97546343]\n",
      " [0.99808408 0.00191592]]\n"
     ]
    }
   ],
   "execution_count": 151
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T13:02:44.337685Z",
     "start_time": "2025-01-13T13:02:44.325214Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# labels和target_names对应\n",
    "# macro avg 平均值  weighted avg 加权平均值\n",
    "print(classification_report(y_test, y_predict,\n",
    "      labels=[2, 4], target_names=[\"良性\", \"恶性\"]))\n",
    "print(\"-\"*50) \n",
    "\n",
    "print(\"AUC值：\")\n",
    "print(roc_auc_score(y_test,y_predict))\n"
   ],
   "id": "7f5439cfe71db003",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "          良性       0.97      1.00      0.99       111\n",
      "          恶性       1.00      0.95      0.97        60\n",
      "\n",
      "    accuracy                           0.98       171\n",
      "   macro avg       0.99      0.97      0.98       171\n",
      "weighted avg       0.98      0.98      0.98       171\n",
      "\n",
      "--------------------------------------------------\n",
      "AUC值：\n",
      "0.975\n"
     ]
    }
   ],
   "execution_count": 147
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
