{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "# 第6章.管道\n",
    "### 6.1.管道介绍\n",
    "### 6.1.1.评估器\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "None\n",
      "30\n"
     ]
    }
   ],
   "source": [
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.datasets import load_breast_cancer\n",
    "\n",
    "X,y = load_breast_cancer(return_X_y=True)\n",
    "# 评估器modelTree\n",
    "modelTree = DecisionTreeClassifier()\n",
    "# 此时,评估器上有一个参数max_features，表示训练模型时采用的特征数量\n",
    "print(modelTree.max_features)\n",
    "modelTree.fit(X,y)\n",
    "# 此时,评估器还有一个参数max_features_,表示训练后模型的最大特征数\n",
    "print(modelTree.max_features_)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-11T12:38:39.643839400Z",
     "start_time": "2023-09-11T12:38:29.065028300Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[-1.  1.]\n",
      " [ 1. -1.]]\n"
     ]
    }
   ],
   "source": [
    "### 6.1.2 转换器(transformer)\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "# 数据标准化\n",
    "## 创建一个转换器stdScaler,他是一个标准缩放器\n",
    "stdScaler = StandardScaler()\n",
    "# 需要标准化的数据\n",
    "X0 = [[0,15],[1,-10]]\n",
    "# 训练并转换数据\n",
    "X1 = stdScaler.fit(X0).transform(X0)\n",
    "print(X1)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-11T12:50:17.691608700Z",
     "start_time": "2023-09-11T12:50:17.643618500Z"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 6.1.3 管道"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pipeline(steps=[('reduce_dim', PCA()), ('clf', SVC())])\n"
     ]
    }
   ],
   "source": [
    "from sklearn.pipeline import Pipeline\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.decomposition import PCA\n",
    "estimators = [('reduce_dim',PCA()),('clf',SVC())]\n",
    "pipe = Pipeline(estimators)\n",
    "print(pipe)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-11T12:55:26.219769900Z",
     "start_time": "2023-09-11T12:55:26.169766900Z"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 6.2.管道机制概述"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.7267326732673267\n",
      "-------------------------------------\n",
      "[False  True  True False False False  True  True False False False  True\n",
      "  True False  True  True  True False  True False]\n"
     ]
    }
   ],
   "source": [
    "from sklearn.datasets import make_classification\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.feature_selection import SelectKBest\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.feature_selection import f_classif # 方差分析(单变量特征选择)\n",
    "from sklearn.pipeline import Pipeline\n",
    "#0 随机生成一个具有两个标签的分类数据集\n",
    "# make_classification()默认特征数量n_features默认特征数量n_features取默认值20个\n",
    "X,y =  make_classification(n_samples=50500,n_classes=3,n_informative=5,random_state=42)\n",
    "X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.33,random_state=42)\n",
    "# 1.特征选择->特征标准化->逻辑回归(分类)\n",
    "out_filter = SelectKBest(f_classif,k=5)\n",
    "std_Scale = StandardScaler()\n",
    "clf_log = LogisticRegression()\n",
    "# 2.构造管道pipeline对象\n",
    "clfPipeline = Pipeline([('anova',out_filter),(\"scale\",std_Scale),('logReg',clf_log)])\n",
    "# 设置参数\n",
    "clfPipeline.set_params(anova__k=10,logReg__solver='liblinear',logReg__penalty='l1',logReg__max_iter=500)\n",
    "# 训练模型\n",
    "clfPipeline.fit(X_train,y_train)\n",
    "# 使用管道的predict()进行预测\n",
    "prediction = clfPipeline.predict(X_train)\n",
    "# 使用管道的score()计算分数\n",
    "tmpScore = clfPipeline.score(X_test,y_test)\n",
    "print(tmpScore)\n",
    "print('-'*37)\n",
    "# 获取out_filter选择的特征\n",
    "aFeatures = clfPipeline['anova'].get_support()\n",
    "print(aFeatures)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-11T14:11:01.034608200Z",
     "start_time": "2023-09-11T14:10:57.870610600Z"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 6.4特征聚合转换器\n",
    "<P>sklearn提供了特征聚合转换器FeatureUnion,能够把多个转换器的输出结果连接起来,形成一个新的特征空间，作为其他转换器的评估或输入</P>\n",
    "<P>值得注意的是,FeatureUnion转换器只是把输出结果连接在一起，也就是说,他没有去重功能</P>"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "聚合后的特征变量空间有3个特征变量\n",
      "-------------------------------------\n",
      "预测类别值为:\n",
      " [0 1 1 1 0 1 2 2 2 2 1 2 1 1 0 0 0 1 0 1 2 1 1 1 2 1 0 1 0 2 2 2 0 0 0 0 2\n",
      " 1]\n",
      "真实类别值为:\n",
      " [0 1 1 1 0 1 2 2 2 2 2 2 1 1 0 0 0 1 0 1 2 1 2 1 2 1 0 2 0 1 2 2 0 0 0 0 2\n",
      " 1]\n",
      "精度:0.895\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from sklearn.pipeline import FeatureUnion\n",
    "from sklearn.datasets import load_iris\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.feature_selection import SelectKBest\n",
    "from sklearn.svm import SVC\n",
    "\n",
    "# 加载鸢尾花数据集\n",
    "X,y = load_iris(return_X_y=True)\n",
    "# 对完整数据集做分割\n",
    "X_train,X_test,y_train,y_test = train_test_split(X,y,random_state=42,stratify=y)\n",
    "# 声明一个主成分分析PCA对象\n",
    "pca = PCA(n_components=2)\n",
    "# 声明一个SelectKBest对象\n",
    "# 这里k=1,选择最好的前K个特征变量\n",
    "selection = SelectKBest(k=1)\n",
    "# 构建特征聚合转换器对象\n",
    "combined_features = FeatureUnion([('pca',pca),(\"selected\",selection)])\n",
    "# 使用X_train,y_train训练特征聚合转换器对象\n",
    "cmbnd = combined_features.fit(X_train,y_train)\n",
    "# 对X_train做转换,以便用于其他模型\n",
    "# 基于X_train的新聚合后的训练数据集\n",
    "X_train_fu = cmbnd.transform(X_train)\n",
    "print(f\"聚合后的特征变量空间有{X_train_fu.shape[1]}个特征变量\")\n",
    "print(\"-\"*37)\n",
    "\n",
    "# 使用聚合后的新数据集训练SVC对象\n",
    "svm = SVC(kernel='linear')\n",
    "svm.fit(X_train_fu,y_train)\n",
    "# 先对X_test作聚合，然后预测结果\n",
    "X_fu_test = cmbnd.transform(X_test)\n",
    "result= svm.predict(X_fu_test)\n",
    "print(\"预测类别值为:\\n\",result)\n",
    "# 输出真实结果,以便对照\n",
    "print(\"真实类别值为:\\n\",y_test)\n",
    "print(f'精度:{round(svm.score(X_fu_test,y_test),3)}')"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-12T11:01:24.228763500Z",
     "start_time": "2023-09-12T11:01:24.203764Z"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 6.5.列转换机制\n",
    "<P>如果需要对数据集中的每个特征做不同的转换,则需要用到列转换器。sklearn中提供了列转换器sklearn.compose.ColumnTransFormer,能够对不同的特征数据做预处理变换</P>"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "转换后的特征名称:\n",
      " ['categories__x0_London', 'categories__x0_Paris', 'categories__x0_Sallisaw', 'title_now__feast', 'title_now__grapes', 'title_now__his', 'title_now__how', 'title_now__last', 'title_now__learned', 'title_now__moveable', 'title_now__of', 'title_now__row', 'title_now__the', 'title_now__trick', 'title_now__warth', 'title_now__watson'] \n",
      "\n",
      "[[1 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0]\n",
      " [1 0 0 0 0 0 1 0 1 0 0 0 1 1 0 1]\n",
      " [0 1 0 1 0 0 0 0 0 1 0 0 0 0 0 0]\n",
      " [0 0 1 0 1 0 0 0 0 0 1 0 1 0 1 0]]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.compose import ColumnTransformer\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from  sklearn.feature_extraction.text import CountVectorizer\n",
    "from sklearn.preprocessing import OneHotEncoder\n",
    "# 构建示例数据,包含了字符串特征变量m数值特征变量\n",
    "X = pd.DataFrame({\n",
    "    'city':['London','London','Paris','Sallisaw'],\n",
    "    'title':['His Last Row','How Watson Learned the Trick','A Moveable Feast','The Grapes of Warth'],\n",
    "    'expert_rating':[5,3,4,5],\n",
    "    'user_rating':[4,5,4,3]\n",
    "})\n",
    "# 1\n",
    "column_trans1 = ColumnTransformer([('categories',OneHotEncoder(dtype='int'),['city']),\n",
    "                                   ('title_now',CountVectorizer(),'title'),\n",
    "                                   ],remainder='drop')\n",
    "# 训练ColumnTransformer对象\n",
    "column_trans1.fit(X)\n",
    "# 输出转换后的特征变量\n",
    "out_col_names = column_trans1.get_feature_names()\n",
    "print(\"转换后的特征名称:\\n\",out_col_names,'\\n')\n",
    "# 便于观察 转化为数组形式\n",
    "X_new = column_trans1.transform(X)\n",
    "print(X_new.toarray())"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-12T12:30:21.936166800Z",
     "start_time": "2023-09-12T12:30:21.906130600Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[ 0.90453403  0.          1.          0.          0.        ]\n",
      " [-1.50755672  1.41421356  1.          0.          0.        ]\n",
      " [-0.30151134  0.          0.          1.          0.        ]\n",
      " [ 0.90453403 -1.41421356  0.          0.          1.        ]]\n"
     ]
    }
   ],
   "source": [
    "# 2.基于特征名称模式,数据类型等条件,使用方法make_column_selector()选择列,特征向量\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.compose import make_column_selector\n",
    "# 默认remainder = 'drop'\n",
    "column_trans2 = ColumnTransformer([\n",
    "    (\"scaler\",StandardScaler(),make_column_selector(dtype_include=np.number)),\n",
    "    ('onehot',OneHotEncoder(),make_column_selector(pattern='city',dtype_include=object))\n",
    "])\n",
    "# 训练模型\n",
    "column_trans2.fit(X)\n",
    "# 输出转换后的特征变量\n",
    "# out_col_names = column_trans2.get_feature_names()\n",
    "# print(\"转换后的特征变量的名称:\\n\",out_col_names,'\\n')\n",
    "X_new = column_trans2.fit_transform(X)\n",
    "print(X_new)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-12T12:45:29.769740Z",
     "start_time": "2023-09-12T12:45:29.734742500Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "转化后的特征变量名称:\n",
      " ['city_category__x0_London', 'city_category__x0_Paris', 'city_category__x0_Sallisaw', 'title_bow__feast', 'title_bow__grapes', 'title_bow__his', 'title_bow__how', 'title_bow__last', 'title_bow__learned', 'title_bow__moveable', 'title_bow__of', 'title_bow__row', 'title_bow__the', 'title_bow__trick', 'title_bow__warth', 'title_bow__watson', 'expert_rating', 'user_rating'] \n",
      "\n",
      "[[1 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 5 4]\n",
      " [1 0 0 0 0 0 1 0 1 0 0 0 1 1 0 1 3 5]\n",
      " [0 1 0 1 0 0 0 0 0 1 0 0 0 0 0 0 4 4]\n",
      " [0 0 1 0 1 0 0 0 0 0 1 0 1 0 1 0 5 3]]\n"
     ]
    }
   ],
   "source": [
    "column_trans3 =ColumnTransformer(\n",
    "    [('city_category',OneHotEncoder(dtype='int'),['city']),\n",
    "     ('title_bow',CountVectorizer(),'title')\n",
    "     ],remainder='passthrough'\n",
    ")\n",
    "column_trans3.fit(X)\n",
    "\n",
    "out_col_names = column_trans3.get_feature_names()\n",
    "print(\"转化后的特征变量名称:\\n\",out_col_names,'\\n')\n",
    "#\n",
    "X_new = column_trans3.fit_transform(X)\n",
    "print(X_new)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-12T12:39:03.736380200Z",
     "start_time": "2023-09-12T12:39:03.678389Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[1.  0.  0.  0.  0.  1.  0.  1.  0.  0.  0.  1.  0.  0.  0.  0.  1.  0.5]\n",
      " [1.  0.  0.  0.  0.  0.  1.  0.  1.  0.  0.  0.  1.  1.  0.  1.  0.  1. ]\n",
      " [0.  1.  0.  1.  0.  0.  0.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.5 0.5]\n",
      " [0.  0.  1.  0.  1.  0.  0.  0.  0.  0.  1.  0.  1.  0.  1.  0.  1.  0. ]]\n"
     ]
    }
   ],
   "source": [
    "from sklearn.preprocessing import MinMaxScaler\n",
    "column_trans4 = ColumnTransformer(\n",
    "    [('city_category',OneHotEncoder(),['city']),\n",
    "     ('title_bow',CountVectorizer(),'title')\n",
    "     ],remainder=MinMaxScaler()\n",
    ")\n",
    "column_trans4.fit(X)\n",
    "#out_col_names = column_trans4.get_feature_names()\n",
    "# print(\"转换后的特征变量名称:\\n\",out_col_names,'\\n')\n",
    "X_new = column_trans4.fit_transform(X)\n",
    "print(X_new)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-12T12:43:54.930412400Z",
     "start_time": "2023-09-12T12:43:54.911840700Z"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 6.6.模型选择"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Fitting 5 folds for each of 144 candidates, totalling 720 fits\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
      "[Parallel(n_jobs=1)]: Done 720 out of 720 | elapsed:    3.2s finished\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "基于训练数据集,搜索到的最佳参数组合是:\n",
      "{'reduce_dim__n_components': 6, 'regressor__alpha': 11}\n",
      "测试集精度:0.73\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from sklearn.datasets import load_boston\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.pipeline import Pipeline\n",
    "\n",
    "# 1.导入波士顿数据集\n",
    "X,y = load_boston(return_X_y=True)\n",
    "X_train,X_test,y_train,y_test = train_test_split(X,y)\n",
    "\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.linear_model import Ridge\n",
    "\n",
    "scaler = StandardScaler()\n",
    "pca = PCA()\n",
    "ridge = Ridge()\n",
    "\n",
    "pipe = Pipeline([\n",
    "    ('scaler',scaler),\n",
    "    ('reduce_dim',pca),\n",
    "    ('regressor',ridge)\n",
    "])\n",
    "# 定义GridSearchCV的参数\n",
    "n_features_to_test = np.arange(1,7)\n",
    "alpha_to_test = np.arange(-12,12)\n",
    "params = {'reduce_dim__n_components':n_features_to_test,'regressor__alpha':alpha_to_test}\n",
    "# 网格搜索\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "gridsearch = GridSearchCV(pipe,params,verbose=1)\n",
    "gridsearch.fit(X_train,y_train)\n",
    "print(\"\\n基于训练数据集,搜索到的最佳参数组合是:\")\n",
    "print(gridsearch.best_params_)\n",
    "print(f\"测试集精度:{round(gridsearch.score(X_test,y_test),3)}\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-09-12T13:01:14.960988800Z",
     "start_time": "2023-09-12T13:01:11.270627500Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
