{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:10.673332Z",
     "start_time": "2025-01-10T09:13:10.668505Z"
    }
   },
   "source": [
    "from sklearn.feature_extraction import DictVectorizer\n",
    "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n",
    "from sklearn.preprocessing import MinMaxScaler, StandardScaler\n",
    "from sklearn.feature_selection import VarianceThreshold\n",
    "from sklearn.decomposition import PCA\n",
    "import jieba\n",
    "import numpy as np\n",
    "from sklearn.impute import SimpleImputer\n",
    "from sklearn.datasets import load_iris, fetch_20newsgroups, fetch_california_housing\n",
    "from sklearn.model_selection import train_test_split, GridSearchCV"
   ],
   "outputs": [],
   "execution_count": 16
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "DictVectorizer:是一个类，用于对字典数据进行特征值化",
   "id": "2b1c49c743a09cae"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "",
   "id": "5bc60816a9918f3"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:10.800010Z",
     "start_time": "2025-01-10T09:13:10.792316Z"
    }
   },
   "cell_type": "code",
   "source": [
    "dv1=DictVectorizer(sparse=True)#实例化一个对象，默认sparse=True。\n",
    "dv2=DictVectorizer(sparse=False)\n",
    "my_dict=[{'city':'北京','temperature':100},{'city':'上海','temperature':60},{'city':'广州','temperature':30}]\n",
    "data1=dv1.fit_transform(my_dict)#训练数据,返回一个稀疏矩阵即只记录非零位置的特征值（因为实例化对dv1象时sparse=True）。\n",
    "data2=dv2.fit_transform(my_dict)#fit_transform训练数据,返回一个密集数组即记录所有特征值。\n",
    "print(data1)\n",
    "print(data1.toarray())#稀疏矩阵转换为数组（即密集数组data2）\n",
    "print(data2)\n",
    "print(dv1.get_feature_names_out())#输出特征名\n",
    "print(dv1.inverse_transform(data1))#去看每个特征名代表的含义。"
   ],
   "id": "29db42dbb923f445",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<Compressed Sparse Row sparse matrix of dtype 'float64'\n",
      "\twith 6 stored elements and shape (3, 4)>\n",
      "  Coords\tValues\n",
      "  (0, 1)\t1.0\n",
      "  (0, 3)\t100.0\n",
      "  (1, 0)\t1.0\n",
      "  (1, 3)\t60.0\n",
      "  (2, 2)\t1.0\n",
      "  (2, 3)\t30.0\n",
      "[[  0.   1.   0. 100.]\n",
      " [  1.   0.   0.  60.]\n",
      " [  0.   0.   1.  30.]]\n",
      "[[  0.   1.   0. 100.]\n",
      " [  1.   0.   0.  60.]\n",
      " [  0.   0.   1.  30.]]\n",
      "['city=上海' 'city=北京' 'city=广州' 'temperature']\n",
      "[{'city=北京': np.float64(1.0), 'temperature': np.float64(100.0)}, {'city=上海': np.float64(1.0), 'temperature': np.float64(60.0)}, {'city=广州': np.float64(1.0), 'temperature': np.float64(30.0)}]\n"
     ]
    }
   ],
   "execution_count": 17
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:10.815630Z",
     "start_time": "2025-01-10T09:13:10.813233Z"
    }
   },
   "cell_type": "code",
   "source": "",
   "id": "75a73439aa9675cc",
   "outputs": [],
   "execution_count": 17
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "CountVectorizer：是一个类，用于对文本数据进行词频统计(输入样本是字符串或字符串列表（多个样本时）)，并将统计结果转换为特征值。",
   "id": "483e07cd6b71bafc"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:10.846633Z",
     "start_time": "2025-01-10T09:13:10.841231Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#英文文本\n",
    "cv1=CountVectorizer()\n",
    "text=[\"life is  short,i like python life\",\"life is too long,i dislike python\",\"life is short\"]\n",
    "data=cv1.fit_transform(text)#训练数据，会通过空格和标点符号分割词，不同词作为特征名，并统计每个词出现的次数。默认会去除单个字母的单词，认为这个词对整个样本没有影响。\n",
    "print(data)\n",
    "print(data.toarray())#稀疏矩阵转换为数组，对照feature_names，看出每个样本的这个特征（词）出现的次数。\n",
    "print(cv1.get_feature_names_out())#输出特征名\n",
    "\n"
   ],
   "id": "5fa9db5edd28a93b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<Compressed Sparse Row sparse matrix of dtype 'int64'\n",
      "\twith 14 stored elements and shape (3, 8)>\n",
      "  Coords\tValues\n",
      "  (0, 2)\t2\n",
      "  (0, 1)\t1\n",
      "  (0, 6)\t1\n",
      "  (0, 3)\t1\n",
      "  (0, 5)\t1\n",
      "  (1, 2)\t1\n",
      "  (1, 1)\t1\n",
      "  (1, 5)\t1\n",
      "  (1, 7)\t1\n",
      "  (1, 4)\t1\n",
      "  (1, 0)\t1\n",
      "  (2, 2)\t1\n",
      "  (2, 1)\t1\n",
      "  (2, 6)\t1\n",
      "[[0 1 2 1 0 1 1 0]\n",
      " [1 1 1 0 1 1 0 1]\n",
      " [0 1 1 0 0 0 1 0]]\n",
      "['dislike' 'is' 'life' 'like' 'long' 'python' 'short' 'too']\n"
     ]
    }
   ],
   "execution_count": 18
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:10.856199Z",
     "start_time": "2025-01-10T09:13:10.847638Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#汉字文本需要先分词（jieba分词），因为cv是通过空格和标点符号分割词的，这不符合汉字的情况。\n",
    "text1=jieba.cut(\"今天很残酷，明天更残酷，后天很美好，但绝对大部分是死在明天晚上，所以每个人不要放弃今天。\")#返回的是一个分过词的generator（生成器）。\n",
    "text2=jieba.cut(\"我们看到的从很远星系来的光是在几百万年之前发出的，这样当我们看到宇宙时，我们是在看它的过去。\")\n",
    "text3=jieba.cut(\"如果只用一种方式了解某样事物，你就不会真正了解它。了解事物真正含义的秘密取决于如何将其与我们所了解的事物相联系。\")\n",
    "content1 = list(text1)#将generator转换为list。\n",
    "content2 = list(text2)\n",
    "content3 = list(text3)\n",
    "c1 = ' '.join(content1)#把列表转换成字符串,每个词之间用空格隔开\n",
    "c2 = ' '.join(content2)\n",
    "c3 = ' '.join(content3)\n",
    "text=[c1,c2,c3]#整理好可以输入cv的样本数据了。\n",
    "cv2=CountVectorizer(min_df=1)#参数max_df和min_df：如min_df=2表示至少出现两次的词才会被保留，max_df=0.5如果一个词在超过50%的文档中出现它将被忽略。（分为整数和小数（0-1））\n",
    "data=cv2.fit_transform(text)\n",
    "print(data.toarray())\n",
    "print(cv2.get_feature_names_out())"
   ],
   "id": "94ac75b173bccf2e",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0 0 1 0 0 0 2 0 0 0 0 0 1 0 1 0 0 0 0 1 1 0 2 0 1 0 2 1 0 0 0 1 1 0 0 0]\n",
      " [0 0 0 1 0 0 0 1 1 1 0 0 0 0 0 0 0 1 3 0 0 0 0 1 0 0 0 0 2 0 0 0 0 0 1 1]\n",
      " [1 1 0 0 4 3 0 0 0 0 1 1 0 1 0 1 1 0 1 0 0 1 0 0 0 1 0 0 0 2 1 0 0 1 0 0]]\n",
      "['一种' '不会' '不要' '之前' '了解' '事物' '今天' '光是在' '几百万年' '发出' '取决于' '只用' '后天' '含义'\n",
      " '大部分' '如何' '如果' '宇宙' '我们' '所以' '放弃' '方式' '明天' '星系' '晚上' '某样' '残酷' '每个'\n",
      " '看到' '真正' '秘密' '绝对' '美好' '联系' '过去' '这样']\n"
     ]
    }
   ],
   "execution_count": 19
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "TfidfVectorizer：是一个类，用于对文本数据进行词频统计，并将统计结果转换为tf-idf值(中文特征值化)，即每个词的tf-idf权重，用以评估一字词对于一个文件集或一个语料库中的其中一份文件的重要程度。",
   "id": "9c53a231f8e7794b"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "tf：词频。idf：逆文档频率（log(总文档数/包含该词的文档数)）。",
   "id": "bbd24a5ada6f176d"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:10.867174Z",
     "start_time": "2025-01-10T09:13:10.860603Z"
    }
   },
   "cell_type": "code",
   "source": [
    "tf=TfidfVectorizer(smooth_idf=True)#参数smooth_idf：是否平滑idf，默认为True，防止零分裂。\n",
    "data=tf.fit_transform(text)\n",
    "print(data.toarray())\n",
    "print(tf.get_feature_names_out())"
   ],
   "id": "166c0cf3cdf31536",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0.         0.         0.21821789 0.         0.         0.\n",
      "  0.43643578 0.         0.         0.         0.         0.\n",
      "  0.21821789 0.         0.21821789 0.         0.         0.\n",
      "  0.         0.21821789 0.21821789 0.         0.43643578 0.\n",
      "  0.21821789 0.         0.43643578 0.21821789 0.         0.\n",
      "  0.         0.21821789 0.21821789 0.         0.         0.        ]\n",
      " [0.         0.         0.         0.2410822  0.         0.\n",
      "  0.         0.2410822  0.2410822  0.2410822  0.         0.\n",
      "  0.         0.         0.         0.         0.         0.2410822\n",
      "  0.55004769 0.         0.         0.         0.         0.2410822\n",
      "  0.         0.         0.         0.         0.48216441 0.\n",
      "  0.         0.         0.         0.         0.2410822  0.2410822 ]\n",
      " [0.15698297 0.15698297 0.         0.         0.62793188 0.47094891\n",
      "  0.         0.         0.         0.         0.15698297 0.15698297\n",
      "  0.         0.15698297 0.         0.15698297 0.15698297 0.\n",
      "  0.1193896  0.         0.         0.15698297 0.         0.\n",
      "  0.         0.15698297 0.         0.         0.         0.31396594\n",
      "  0.15698297 0.         0.         0.15698297 0.         0.        ]]\n",
      "['一种' '不会' '不要' '之前' '了解' '事物' '今天' '光是在' '几百万年' '发出' '取决于' '只用' '后天' '含义'\n",
      " '大部分' '如何' '如果' '宇宙' '我们' '所以' '放弃' '方式' '明天' '星系' '晚上' '某样' '残酷' '每个'\n",
      " '看到' '真正' '秘密' '绝对' '美好' '联系' '过去' '这样']\n"
     ]
    }
   ],
   "execution_count": 20
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "数值型数据的特征处理（不同的特征拉到到同一个量纲：转化为算法要求的数据）：1、归一化2、标准化",
   "id": "627147a43ccc5261"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:10.908718Z",
     "start_time": "2025-01-10T09:13:10.903105Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#归一化MinMaxScaler\n",
    "mm=MinMaxScaler(feature_range=(0, 1))#feature_range代表特征值范围，一般设置为(0,1)默认,或者(-1,1)。\n",
    "data=mm.fit_transform([[90, 2, 10, 40], [60, 4, 15, 45], [75, 3, 13, 46]])\n",
    "print(data)\n",
    "out = mm.transform([[1, 2, 3, 4], [6, 5, 8, 7]])#transform用于测试集，不会重新找最小值、最大值、均值、方差这些在训练集中用于将数据转换为特征值的参数（这些是根据训练集得出的）。\n",
    "print(out)"
   ],
   "id": "557468a4e2d3eb1f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[1.         0.         0.         0.        ]\n",
      " [0.         1.         1.         0.83333333]\n",
      " [0.5        0.5        0.6        1.        ]]\n",
      "[[-1.96666667  0.         -1.4        -6.        ]\n",
      " [-1.8         1.5        -0.4        -5.5       ]]\n"
     ]
    }
   ],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:10.936775Z",
     "start_time": "2025-01-10T09:13:10.931216Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#标准化StandardScaler\n",
    "std = StandardScaler()#标准化缩放，将数据转换为均值为0，方差为1。\n",
    "data = std.fit_transform([[1., -1., 3.],[2., 4., 2.],[4., 6., -1.]])\n",
    "print(data)\n",
    "print(std.n_samples_seen_)#样本数量"
   ],
   "id": "b90c31445afc5ad",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[-1.06904497 -1.35873244  0.98058068]\n",
      " [-0.26726124  0.33968311  0.39223227]\n",
      " [ 1.33630621  1.01904933 -1.37281295]]\n",
      "3\n"
     ]
    }
   ],
   "execution_count": 22
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "SimpleImputer：是一个类，用于对缺失值进行填充，常见的有用法是用均值、众数、中位数等填充。",
   "id": "8c271d886508ede9"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:10.985307Z",
     "start_time": "2025-01-10T09:13:10.979518Z"
    }
   },
   "cell_type": "code",
   "source": [
    "im = SimpleImputer(missing_values=np.nan, strategy='mean')#参数missing_values：缺失值(默认np.nan)，strategy：填充方式\n",
    "data=im.fit_transform([[1, 2], [np.nan, 3], [7, 6], [3, 2]])\n",
    "print(data)"
   ],
   "id": "57a32d5f90610ec2",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[1.         2.        ]\n",
      " [3.66666667 3.        ]\n",
      " [7.         6.        ]\n",
      " [3.         2.        ]]\n"
     ]
    }
   ],
   "execution_count": 23
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "VarianceThreshold：是一个类，通过删除低方差的特征，实现降维（特征数变少）。",
   "id": "632fd6c5b1f802d8"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:11.000030Z",
     "start_time": "2025-01-10T09:13:10.994764Z"
    }
   },
   "cell_type": "code",
   "source": [
    "var = VarianceThreshold(threshold=0.1)#参数threshold：方差阈值，默认为0。这里为0.1，表示方差小于等于0.1的特征将被删除。\n",
    "data = var.fit_transform([[0, 2, 0, 3],[0, 1, 4, 3],[0, 1, 1, 3]])\n",
    "print(data)\n",
    "print(var.get_support(True))#获得被保留特征的列编号。"
   ],
   "id": "8ac3681d070b3742",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[2 0]\n",
      " [1 4]\n",
      " [1 1]]\n",
      "[1 2]\n"
     ]
    }
   ],
   "execution_count": 24
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "PCA（主成分分析）：是一个类，尽可能降低原数据的维数，并且损失少量信息，实现降维",
   "id": "f96e3e43766581f7"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "工作流程：1、PCA 计算所有主成分及其解释的方差比例。2、按方差比例从大到小排序。3、选择前 k 个主成分，使它们的累计方差比例达到或超过阈值。",
   "id": "b347c84a51376d3"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:11.014458Z",
     "start_time": "2025-01-10T09:13:11.008825Z"
    }
   },
   "cell_type": "code",
   "source": [
    "pca=PCA(n_components=0.95)#参数n_components的值为0到1之间的浮点数（业界选择90~95%），这里表示我们希望保留的主成分能够解释原数据95%方差的主成分。\n",
    "original_data=np.array([[2, 8, 4, 5],[6, 3, 0, 8],[5, 4, 9, 1]])\n",
    "data = pca.fit_transform(original_data)\n",
    "print(data)\n",
    "print(type(data))\n",
    "print(pca.explained_variance_ratio_.sum())#计算data的方差占总方差的比例"
   ],
   "id": "3accd64ee5f0beef",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[-1.28620952e-15  3.82970843e+00]\n",
      " [-5.74456265e+00 -1.91485422e+00]\n",
      " [ 5.74456265e+00 -1.91485422e+00]]\n",
      "<class 'numpy.ndarray'>\n",
      "1.0\n"
     ]
    }
   ],
   "execution_count": 25
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "load及fetch数据加载:load直接加载的内存的，数据集比较小，并不会保存到本地磁盘 ,fetch数据集比较大，下载下来后会存在本地磁盘，下一次就不会再连接sklearn的服务器",
   "id": "9e3e7f33b1811d9e"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "load",
   "id": "93d340df91c165a7"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:11.043705Z",
     "start_time": "2025-01-10T09:13:11.037873Z"
    }
   },
   "cell_type": "code",
   "source": [
    "li=load_iris()#加载鸢尾花数据集\n",
    "print(li.DESCR)#描述信息（里面有数据集的详细信息，可以分析是否有缺失值、异常值等）"
   ],
   "id": "a923b6963ee7d43d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      ".. _iris_dataset:\n",
      "\n",
      "Iris plants dataset\n",
      "--------------------\n",
      "\n",
      "**Data Set Characteristics:**\n",
      "\n",
      ":Number of Instances: 150 (50 in each of three classes)\n",
      ":Number of Attributes: 4 numeric, predictive attributes and the class\n",
      ":Attribute Information:\n",
      "    - sepal length in cm\n",
      "    - sepal width in cm\n",
      "    - petal length in cm\n",
      "    - petal width in cm\n",
      "    - class:\n",
      "            - Iris-Setosa\n",
      "            - Iris-Versicolour\n",
      "            - Iris-Virginica\n",
      "\n",
      ":Summary Statistics:\n",
      "\n",
      "============== ==== ==== ======= ===== ====================\n",
      "                Min  Max   Mean    SD   Class Correlation\n",
      "============== ==== ==== ======= ===== ====================\n",
      "sepal length:   4.3  7.9   5.84   0.83    0.7826\n",
      "sepal width:    2.0  4.4   3.05   0.43   -0.4194\n",
      "petal length:   1.0  6.9   3.76   1.76    0.9490  (high!)\n",
      "petal width:    0.1  2.5   1.20   0.76    0.9565  (high!)\n",
      "============== ==== ==== ======= ===== ====================\n",
      "\n",
      ":Missing Attribute Values: None\n",
      ":Class Distribution: 33.3% for each of 3 classes.\n",
      ":Creator: R.A. Fisher\n",
      ":Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov)\n",
      ":Date: July, 1988\n",
      "\n",
      "The famous Iris database, first used by Sir R.A. Fisher. The dataset is taken\n",
      "from Fisher's paper. Note that it's the same as in R, but not as in the UCI\n",
      "Machine Learning Repository, which has two wrong data points.\n",
      "\n",
      "This is perhaps the best known database to be found in the\n",
      "pattern recognition literature.  Fisher's paper is a classic in the field and\n",
      "is referenced frequently to this day.  (See Duda & Hart, for example.)  The\n",
      "data set contains 3 classes of 50 instances each, where each class refers to a\n",
      "type of iris plant.  One class is linearly separable from the other 2; the\n",
      "latter are NOT linearly separable from each other.\n",
      "\n",
      ".. dropdown:: References\n",
      "\n",
      "  - Fisher, R.A. \"The use of multiple measurements in taxonomic problems\"\n",
      "    Annual Eugenics, 7, Part II, 179-188 (1936); also in \"Contributions to\n",
      "    Mathematical Statistics\" (John Wiley, NY, 1950).\n",
      "  - Duda, R.O., & Hart, P.E. (1973) Pattern Classification and Scene Analysis.\n",
      "    (Q327.D83) John Wiley & Sons.  ISBN 0-471-22361-1.  See page 218.\n",
      "  - Dasarathy, B.V. (1980) \"Nosing Around the Neighborhood: A New System\n",
      "    Structure and Classification Rule for Recognition in Partially Exposed\n",
      "    Environments\".  IEEE Transactions on Pattern Analysis and Machine\n",
      "    Intelligence, Vol. PAMI-2, No. 1, 67-71.\n",
      "  - Gates, G.W. (1972) \"The Reduced Nearest Neighbor Rule\".  IEEE Transactions\n",
      "    on Information Theory, May 1972, 431-433.\n",
      "  - See also: 1988 MLC Proceedings, 54-64.  Cheeseman et al\"s AUTOCLASS II\n",
      "    conceptual clustering system finds 3 classes in the data.\n",
      "  - Many, many more ...\n",
      "\n"
     ]
    }
   ],
   "execution_count": 26
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:11.062054Z",
     "start_time": "2025-01-10T09:13:11.053842Z"
    }
   },
   "cell_type": "code",
   "source": [
    "print(li.data.shape)#查看特征值形状:(150, 4)有150个样本，每个样本有4个特征值。\n",
    "print(li.feature_names)#特征名(可以看出有几个特征)\n",
    "print(li.data)#每个样本的特征值"
   ],
   "id": "f2f3b5aa6c98ddec",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(150, 4)\n",
      "['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']\n",
      "[[5.1 3.5 1.4 0.2]\n",
      " [4.9 3.  1.4 0.2]\n",
      " [4.7 3.2 1.3 0.2]\n",
      " [4.6 3.1 1.5 0.2]\n",
      " [5.  3.6 1.4 0.2]\n",
      " [5.4 3.9 1.7 0.4]\n",
      " [4.6 3.4 1.4 0.3]\n",
      " [5.  3.4 1.5 0.2]\n",
      " [4.4 2.9 1.4 0.2]\n",
      " [4.9 3.1 1.5 0.1]\n",
      " [5.4 3.7 1.5 0.2]\n",
      " [4.8 3.4 1.6 0.2]\n",
      " [4.8 3.  1.4 0.1]\n",
      " [4.3 3.  1.1 0.1]\n",
      " [5.8 4.  1.2 0.2]\n",
      " [5.7 4.4 1.5 0.4]\n",
      " [5.4 3.9 1.3 0.4]\n",
      " [5.1 3.5 1.4 0.3]\n",
      " [5.7 3.8 1.7 0.3]\n",
      " [5.1 3.8 1.5 0.3]\n",
      " [5.4 3.4 1.7 0.2]\n",
      " [5.1 3.7 1.5 0.4]\n",
      " [4.6 3.6 1.  0.2]\n",
      " [5.1 3.3 1.7 0.5]\n",
      " [4.8 3.4 1.9 0.2]\n",
      " [5.  3.  1.6 0.2]\n",
      " [5.  3.4 1.6 0.4]\n",
      " [5.2 3.5 1.5 0.2]\n",
      " [5.2 3.4 1.4 0.2]\n",
      " [4.7 3.2 1.6 0.2]\n",
      " [4.8 3.1 1.6 0.2]\n",
      " [5.4 3.4 1.5 0.4]\n",
      " [5.2 4.1 1.5 0.1]\n",
      " [5.5 4.2 1.4 0.2]\n",
      " [4.9 3.1 1.5 0.2]\n",
      " [5.  3.2 1.2 0.2]\n",
      " [5.5 3.5 1.3 0.2]\n",
      " [4.9 3.6 1.4 0.1]\n",
      " [4.4 3.  1.3 0.2]\n",
      " [5.1 3.4 1.5 0.2]\n",
      " [5.  3.5 1.3 0.3]\n",
      " [4.5 2.3 1.3 0.3]\n",
      " [4.4 3.2 1.3 0.2]\n",
      " [5.  3.5 1.6 0.6]\n",
      " [5.1 3.8 1.9 0.4]\n",
      " [4.8 3.  1.4 0.3]\n",
      " [5.1 3.8 1.6 0.2]\n",
      " [4.6 3.2 1.4 0.2]\n",
      " [5.3 3.7 1.5 0.2]\n",
      " [5.  3.3 1.4 0.2]\n",
      " [7.  3.2 4.7 1.4]\n",
      " [6.4 3.2 4.5 1.5]\n",
      " [6.9 3.1 4.9 1.5]\n",
      " [5.5 2.3 4.  1.3]\n",
      " [6.5 2.8 4.6 1.5]\n",
      " [5.7 2.8 4.5 1.3]\n",
      " [6.3 3.3 4.7 1.6]\n",
      " [4.9 2.4 3.3 1. ]\n",
      " [6.6 2.9 4.6 1.3]\n",
      " [5.2 2.7 3.9 1.4]\n",
      " [5.  2.  3.5 1. ]\n",
      " [5.9 3.  4.2 1.5]\n",
      " [6.  2.2 4.  1. ]\n",
      " [6.1 2.9 4.7 1.4]\n",
      " [5.6 2.9 3.6 1.3]\n",
      " [6.7 3.1 4.4 1.4]\n",
      " [5.6 3.  4.5 1.5]\n",
      " [5.8 2.7 4.1 1. ]\n",
      " [6.2 2.2 4.5 1.5]\n",
      " [5.6 2.5 3.9 1.1]\n",
      " [5.9 3.2 4.8 1.8]\n",
      " [6.1 2.8 4.  1.3]\n",
      " [6.3 2.5 4.9 1.5]\n",
      " [6.1 2.8 4.7 1.2]\n",
      " [6.4 2.9 4.3 1.3]\n",
      " [6.6 3.  4.4 1.4]\n",
      " [6.8 2.8 4.8 1.4]\n",
      " [6.7 3.  5.  1.7]\n",
      " [6.  2.9 4.5 1.5]\n",
      " [5.7 2.6 3.5 1. ]\n",
      " [5.5 2.4 3.8 1.1]\n",
      " [5.5 2.4 3.7 1. ]\n",
      " [5.8 2.7 3.9 1.2]\n",
      " [6.  2.7 5.1 1.6]\n",
      " [5.4 3.  4.5 1.5]\n",
      " [6.  3.4 4.5 1.6]\n",
      " [6.7 3.1 4.7 1.5]\n",
      " [6.3 2.3 4.4 1.3]\n",
      " [5.6 3.  4.1 1.3]\n",
      " [5.5 2.5 4.  1.3]\n",
      " [5.5 2.6 4.4 1.2]\n",
      " [6.1 3.  4.6 1.4]\n",
      " [5.8 2.6 4.  1.2]\n",
      " [5.  2.3 3.3 1. ]\n",
      " [5.6 2.7 4.2 1.3]\n",
      " [5.7 3.  4.2 1.2]\n",
      " [5.7 2.9 4.2 1.3]\n",
      " [6.2 2.9 4.3 1.3]\n",
      " [5.1 2.5 3.  1.1]\n",
      " [5.7 2.8 4.1 1.3]\n",
      " [6.3 3.3 6.  2.5]\n",
      " [5.8 2.7 5.1 1.9]\n",
      " [7.1 3.  5.9 2.1]\n",
      " [6.3 2.9 5.6 1.8]\n",
      " [6.5 3.  5.8 2.2]\n",
      " [7.6 3.  6.6 2.1]\n",
      " [4.9 2.5 4.5 1.7]\n",
      " [7.3 2.9 6.3 1.8]\n",
      " [6.7 2.5 5.8 1.8]\n",
      " [7.2 3.6 6.1 2.5]\n",
      " [6.5 3.2 5.1 2. ]\n",
      " [6.4 2.7 5.3 1.9]\n",
      " [6.8 3.  5.5 2.1]\n",
      " [5.7 2.5 5.  2. ]\n",
      " [5.8 2.8 5.1 2.4]\n",
      " [6.4 3.2 5.3 2.3]\n",
      " [6.5 3.  5.5 1.8]\n",
      " [7.7 3.8 6.7 2.2]\n",
      " [7.7 2.6 6.9 2.3]\n",
      " [6.  2.2 5.  1.5]\n",
      " [6.9 3.2 5.7 2.3]\n",
      " [5.6 2.8 4.9 2. ]\n",
      " [7.7 2.8 6.7 2. ]\n",
      " [6.3 2.7 4.9 1.8]\n",
      " [6.7 3.3 5.7 2.1]\n",
      " [7.2 3.2 6.  1.8]\n",
      " [6.2 2.8 4.8 1.8]\n",
      " [6.1 3.  4.9 1.8]\n",
      " [6.4 2.8 5.6 2.1]\n",
      " [7.2 3.  5.8 1.6]\n",
      " [7.4 2.8 6.1 1.9]\n",
      " [7.9 3.8 6.4 2. ]\n",
      " [6.4 2.8 5.6 2.2]\n",
      " [6.3 2.8 5.1 1.5]\n",
      " [6.1 2.6 5.6 1.4]\n",
      " [7.7 3.  6.1 2.3]\n",
      " [6.3 3.4 5.6 2.4]\n",
      " [6.4 3.1 5.5 1.8]\n",
      " [6.  3.  4.8 1.8]\n",
      " [6.9 3.1 5.4 2.1]\n",
      " [6.7 3.1 5.6 2.4]\n",
      " [6.9 3.1 5.1 2.3]\n",
      " [5.8 2.7 5.1 1.9]\n",
      " [6.8 3.2 5.9 2.3]\n",
      " [6.7 3.3 5.7 2.5]\n",
      " [6.7 3.  5.2 2.3]\n",
      " [6.3 2.5 5.  1.9]\n",
      " [6.5 3.  5.2 2. ]\n",
      " [6.2 3.4 5.4 2.3]\n",
      " [5.9 3.  5.1 1.8]]\n"
     ]
    }
   ],
   "execution_count": 27
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:11.102022Z",
     "start_time": "2025-01-10T09:13:11.097918Z"
    }
   },
   "cell_type": "code",
   "source": [
    "print(li.target.shape)#查看标签形状:(150,)\n",
    "print(li.target_names)#标签名(目标名)可以看出有几类标签\n",
    "print(li.target)#每个样本的标签值"
   ],
   "id": "bfc870d419d8a6e9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(150,)\n",
      "['setosa' 'versicolor' 'virginica']\n",
      "[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      " 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2\n",
      " 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n",
      " 2 2]\n"
     ]
    }
   ],
   "execution_count": 28
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "train_test_split样本切分:以鸢尾花数据集为例",
   "id": "b3bcbf3f013e3408"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:11.108609Z",
     "start_time": "2025-01-10T09:13:11.103032Z"
    }
   },
   "cell_type": "code",
   "source": [
    "\"\"\"1、x_train, x_test, y_train, y_test分别是训练集、测试集、训练集标签、测试集标签。接受返回值的顺序不能乱。\n",
    "2、test_size：测试集占比，这里为0.25\n",
    "3、每次划分可能不同random_state可以确保每次的随机策略一致，保证每次的测试集和训练集相同的随机数据。\n",
    "\"\"\"\n",
    "x_train, x_test, y_train, y_test = train_test_split(li.data, li.target, test_size=0.25, random_state=1)\n",
    "print(\"训练集特征值shape\", x_train.shape)#可以看出训练集具体分得样本数\n",
    "print(\"测试集特征值shape\", x_test.shape)"
   ],
   "id": "84663b9691dc3eb7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集特征值shape (112, 4)\n",
      "测试集特征值shape (38, 4)\n"
     ]
    }
   ],
   "execution_count": 29
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "fetch",
   "id": "5ec0148c3c47bbbb"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T09:13:11.390103Z",
     "start_time": "2025-01-10T09:13:11.122146Z"
    }
   },
   "cell_type": "code",
   "source": [
    "news=fetch_20newsgroups(subset='all', data_home='..\\\\data')#subset代表下载的数据集类型，默认是train，只有训练集。data_home参数指定下载数据的保存路径，若本来就有数据集，则不会下载。\n",
    "print(news.data[0])#查看第一个样本内容。注意这个数据集是没有特征的，每个样本都是一个文本。\n",
    "print(news.target_names)#查看标签名\n",
    "print(len(news.target_names))#计算有多少种标签\n",
    "print(news.target)#查看每个样本的标签值（数据太多时会省略掉中间部分）"
   ],
   "id": "6fe40c7a6335a602",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "From: Mamatha Devineni Ratnam <mr47+@andrew.cmu.edu>\n",
      "Subject: Pens fans reactions\n",
      "Organization: Post Office, Carnegie Mellon, Pittsburgh, PA\n",
      "Lines: 12\n",
      "NNTP-Posting-Host: po4.andrew.cmu.edu\n",
      "\n",
      "\n",
      "\n",
      "I am sure some bashers of Pens fans are pretty confused about the lack\n",
      "of any kind of posts about the recent Pens massacre of the Devils. Actually,\n",
      "I am  bit puzzled too and a bit relieved. However, I am going to put an end\n",
      "to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they\n",
      "are killing those Devils worse than I thought. Jagr just showed you why\n",
      "he is much better than his regular season stats. He is also a lot\n",
      "fo fun to watch in the playoffs. Bowman should let JAgr have a lot of\n",
      "fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final\n",
      "regular season game.          PENS RULE!!!\n",
      "\n",
      "\n",
      "['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc']\n",
      "20\n",
      "[10  3 17 ...  3  1  7]\n"
     ]
    }
   ],
   "execution_count": 30
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
