{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.feature_extraction import DictVectorizer\n",
    "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n",
    "import jieba"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 字典特征提取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def dict_demo():\n",
    "    \"\"\"\n",
    "    字典提取\n",
    "    \"\"\"\n",
    "    # 获取数据\n",
    "    data = [{'city': '北京','temperature':100},\n",
    "            {'city': '上海','temperature':60},\n",
    "            {'city': '深圳','temperature':30}]\n",
    "    \n",
    "    # 实例化转换器 \n",
    "    transfer = DictVectorizer(sparse=True)  # sparse默认是True，表示使用三元组进行存储，\n",
    "    # sparse矩阵的特点：节省内存储空间，读取效率高\n",
    "    # 标准化\n",
    "    data = transfer.fit_transform(data)\n",
    "    \n",
    "    print(\"特征名字是:\\n\", transfer.get_feature_names())\n",
    "    print(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征名字是:\n",
      " ['city=上海', 'city=北京', 'city=深圳', 'temperature']\n",
      "  (0, 1)\t1.0\n",
      "  (0, 3)\t100.0\n",
      "  (1, 0)\t1.0\n",
      "  (1, 3)\t60.0\n",
      "  (2, 2)\t1.0\n",
      "  (2, 3)\t30.0\n"
     ]
    }
   ],
   "source": [
    "dict_demo()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 文本特征提取（英文）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "def english_text_demo():\n",
    "    \"\"\"\n",
    "    文本特征提取（英文）\n",
    "    注意：只统计单词，不统计单个字母和字符\n",
    "    \"\"\"\n",
    "    data = [\"life is short,i like python\", \"life is too long,i dislike python\"]\n",
    "    \n",
    "    # 1. 创建转换器\n",
    "    # 注意这个转换器没有sparse属性，如果要查看矩阵存储则要用toarray()方法\n",
    "    transfer = CountVectorizer(stop_words=['dislike'])  # stop_words表示不统计的单词\n",
    "    \n",
    "    # 2. 标准化\n",
    "    transfer_data = transfer.fit_transform(data)\n",
    "    \n",
    "    print(\"特征名字是:\\n\", transfer.get_feature_names())\n",
    "    print(\"存储矩阵是:\\n\", transfer_data.toarray())\n",
    "    print(transfer_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征名字是:\n",
      " ['dislike', 'is', 'life', 'like', 'long', 'python', 'short', 'too']\n",
      "存储矩阵是:\n",
      " [[0 1 1 1 0 1 1 0]\n",
      " [1 1 1 0 1 1 0 1]]\n",
      "  (0, 2)\t1\n",
      "  (0, 1)\t1\n",
      "  (0, 6)\t1\n",
      "  (0, 3)\t1\n",
      "  (0, 5)\t1\n",
      "  (1, 2)\t1\n",
      "  (1, 1)\t1\n",
      "  (1, 5)\t1\n",
      "  (1, 7)\t1\n",
      "  (1, 4)\t1\n",
      "  (1, 0)\t1\n"
     ]
    }
   ],
   "source": [
    "english_text_demo()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 文本特征提取（中文）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "按照外国人的习惯，如果要分词是按空格为标准进行划分，单个字母或字符不参与划分\n",
    "\"\"\"\n",
    "# 结巴分词\n",
    "def jieba_demo(sen):\n",
    "    \"\"\"\n",
    "    jieba.cut(要分割的中文字符串),返回的是一个对象\n",
    "    \"\"\"\n",
    "#     print(\" \".join(list(jieba.cut(sen))))\n",
    "    return \" \".join(list(jieba.cut(sen)))\n",
    "\n",
    "\n",
    "def chinese_text_demo():\n",
    "    \"\"\"\n",
    "    文本特征提取（中文）\n",
    "    \"\"\"\n",
    "    data = [\"一种还是一种今天很残酷，明天更残酷，后天很美好，但绝对大部分是死在明天晚上，所以每个人不要放弃今天。\",\n",
    "            \"我们看到的从很远星系来的光是在几百万年之前发出的，这样当我们看到宇宙时，我们是在看它的过去。\",\n",
    "            \"如果只用一种方式了解某样事物，你就不会真正了解它。了解事物真正含义的秘密取决于如何将其与我们所了解的事物相联系。\"]\n",
    "    \n",
    "    # 划分数据\n",
    "    new_list = []\n",
    "    for temp in data:\n",
    "        new_list.append(jieba_demo(temp))\n",
    "    print(new_list)\n",
    "    \n",
    "    # 创建转换器\n",
    "    transfer = CountVectorizer()  # 可以通过stop_word设置屏蔽词\n",
    "    \n",
    "    transfer_data = transfer.fit_transform(new_list)\n",
    "    print(\"特征名字是:\\n\", transfer.get_feature_names())\n",
    "    print(\"文本特征抽取的结果为:\\n\", transfer_data.toarray())\n",
    "    print(transfer_data)\n",
    "    \n",
    "\n",
    "# tfidf的使用\n",
    "def tfidf_demo():\n",
    "    \"\"\"\n",
    "    对中文进行特征抽取\n",
    "    \"\"\"\n",
    "    data = [\"一种还是一种今天很残酷，明天更残酷，后天很美好，但绝对大部分是死在明天晚上，所以每个人不要放弃今天。\",\n",
    "            \"我们看到的从很远星系来的光是在几百万年之前发出的，这样当我们看到宇宙时，我们是在看它的过去。\",\n",
    "            \"如果只用一种方式了解某样事物，你就不会真正了解它。了解事物真正含义的秘密取决于如何将其与我们所了解的事物相联系。\"]\n",
    "    \n",
    "    new_list = []\n",
    "    for temp in data:\n",
    "        new_list.append(jieba_demo(temp))\n",
    "    \n",
    "    transfer = TfidfVectorizer()\n",
    "    transfer_data = transfer.fit_transform(new_list)\n",
    "    \n",
    "    print(\"特征名字为:\\n\", transfer.get_feature_names())\n",
    "    print(\"特征抽取的结果为:\\n\", transfer_data.toarray())  # 这里的结果先带过，不用纠结\n",
    "    print(transfer_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征名字为:\n",
      " ['一种', '不会', '不要', '之前', '了解', '事物', '今天', '光是在', '几百万年', '发出', '取决于', '只用', '后天', '含义', '大部分', '如何', '如果', '宇宙', '我们', '所以', '放弃', '方式', '明天', '星系', '晚上', '某样', '残酷', '每个', '看到', '真正', '秘密', '绝对', '美好', '联系', '过去', '还是', '这样']\n",
      "特征抽取的结果为:\n",
      " [[0.30847454 0.         0.20280347 0.         0.         0.\n",
      "  0.40560694 0.         0.         0.         0.         0.\n",
      "  0.20280347 0.         0.20280347 0.         0.         0.\n",
      "  0.         0.20280347 0.20280347 0.         0.40560694 0.\n",
      "  0.20280347 0.         0.40560694 0.20280347 0.         0.\n",
      "  0.         0.20280347 0.20280347 0.         0.         0.20280347\n",
      "  0.        ]\n",
      " [0.         0.         0.         0.2410822  0.         0.\n",
      "  0.         0.2410822  0.2410822  0.2410822  0.         0.\n",
      "  0.         0.         0.         0.         0.         0.2410822\n",
      "  0.55004769 0.         0.         0.         0.         0.2410822\n",
      "  0.         0.         0.         0.         0.48216441 0.\n",
      "  0.         0.         0.         0.         0.2410822  0.\n",
      "  0.2410822 ]\n",
      " [0.12001469 0.15780489 0.         0.         0.63121956 0.47341467\n",
      "  0.         0.         0.         0.         0.15780489 0.15780489\n",
      "  0.         0.15780489 0.         0.15780489 0.15780489 0.\n",
      "  0.12001469 0.         0.         0.15780489 0.         0.\n",
      "  0.         0.15780489 0.         0.         0.         0.31560978\n",
      "  0.15780489 0.         0.         0.15780489 0.         0.\n",
      "  0.        ]]\n",
      "  (0, 20)\t0.20280347192512724\n",
      "  (0, 2)\t0.20280347192512724\n",
      "  (0, 27)\t0.20280347192512724\n",
      "  (0, 19)\t0.20280347192512724\n",
      "  (0, 24)\t0.20280347192512724\n",
      "  (0, 14)\t0.20280347192512724\n",
      "  (0, 31)\t0.20280347192512724\n",
      "  (0, 32)\t0.20280347192512724\n",
      "  (0, 12)\t0.20280347192512724\n",
      "  (0, 22)\t0.4056069438502545\n",
      "  (0, 26)\t0.4056069438502545\n",
      "  (0, 6)\t0.4056069438502545\n",
      "  (0, 35)\t0.20280347192512724\n",
      "  (0, 0)\t0.3084745355009243\n",
      "  (1, 34)\t0.24108220270067757\n",
      "  (1, 17)\t0.24108220270067757\n",
      "  (1, 36)\t0.24108220270067757\n",
      "  (1, 9)\t0.24108220270067757\n",
      "  (1, 3)\t0.24108220270067757\n",
      "  (1, 8)\t0.24108220270067757\n",
      "  (1, 7)\t0.24108220270067757\n",
      "  (1, 23)\t0.24108220270067757\n",
      "  (1, 28)\t0.48216440540135513\n",
      "  (1, 18)\t0.5500476874707075\n",
      "  (2, 33)\t0.15780489008821472\n",
      "  (2, 15)\t0.15780489008821472\n",
      "  (2, 10)\t0.15780489008821472\n",
      "  (2, 30)\t0.15780489008821472\n",
      "  (2, 13)\t0.15780489008821472\n",
      "  (2, 29)\t0.31560978017642943\n",
      "  (2, 1)\t0.15780489008821472\n",
      "  (2, 5)\t0.4734146702646441\n",
      "  (2, 25)\t0.15780489008821472\n",
      "  (2, 4)\t0.6312195603528589\n",
      "  (2, 21)\t0.15780489008821472\n",
      "  (2, 11)\t0.15780489008821472\n",
      "  (2, 16)\t0.15780489008821472\n",
      "  (2, 18)\t0.1200146864046492\n",
      "  (2, 0)\t0.1200146864046492\n"
     ]
    }
   ],
   "source": [
    "# jieba_demo('我爱你中国\n",
    "# chinese_text_demo()\n",
    "tfidf_demo()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['一种 还是 一种 今天 很 残酷 ， 明天 更 残酷 ， 后天 很 美好 ， 但 绝对 大部分 是 死 在 明天 晚上 ， 所以 每个 人 不要 放弃 今天 。', '我们 看到 的 从 很 远 星系 来 的 光是在 几百万年 之前 发出 的 ， 这样 当 我们 看到 宇宙 时 ， 我们 是 在 看 它 的 过去 。', '如果 只用 一种 方式 了解 某样 事物 ， 你 就 不会 真正 了解 它 。 了解 事物 真正 含义 的 秘密 取决于 如何 将 其 与 我们 所 了解 的 事物 相 联系 。']\n",
      "特征名字是:\n",
      " ['一种', '不会', '不要', '之前', '了解', '事物', '今天', '光是在', '几百万年', '发出', '取决于', '只用', '后天', '含义', '大部分', '如何', '如果', '宇宙', '我们', '所以', '放弃', '方式', '明天', '星系', '晚上', '某样', '残酷', '每个', '看到', '真正', '秘密', '绝对', '美好', '联系', '过去', '还是', '这样']\n",
      "文本特征抽取的结果为:\n",
      " [[2 0 1 0 0 0 2 0 0 0 0 0 1 0 1 0 0 0 0 1 1 0 2 0 1 0 2 1 0 0 0 1 1 0 0 1\n",
      "  0]\n",
      " [0 0 0 1 0 0 0 1 1 1 0 0 0 0 0 0 0 1 3 0 0 0 0 1 0 0 0 0 2 0 0 0 0 0 1 0\n",
      "  1]\n",
      " [1 1 0 0 4 3 0 0 0 0 1 1 0 1 0 1 1 0 1 0 0 1 0 0 0 1 0 0 0 2 1 0 0 1 0 0\n",
      "  0]]\n",
      "  (0, 0)\t2\n",
      "  (0, 35)\t1\n",
      "  (0, 6)\t2\n",
      "  (0, 26)\t2\n",
      "  (0, 22)\t2\n",
      "  (0, 12)\t1\n",
      "  (0, 32)\t1\n",
      "  (0, 31)\t1\n",
      "  (0, 14)\t1\n",
      "  (0, 24)\t1\n",
      "  (0, 19)\t1\n",
      "  (0, 27)\t1\n",
      "  (0, 2)\t1\n",
      "  (0, 20)\t1\n",
      "  (1, 18)\t3\n",
      "  (1, 28)\t2\n",
      "  (1, 23)\t1\n",
      "  (1, 7)\t1\n",
      "  (1, 8)\t1\n",
      "  (1, 3)\t1\n",
      "  (1, 9)\t1\n",
      "  (1, 36)\t1\n",
      "  (1, 17)\t1\n",
      "  (1, 34)\t1\n",
      "  (2, 0)\t1\n",
      "  (2, 18)\t1\n",
      "  (2, 16)\t1\n",
      "  (2, 11)\t1\n",
      "  (2, 21)\t1\n",
      "  (2, 4)\t4\n",
      "  (2, 25)\t1\n",
      "  (2, 5)\t3\n",
      "  (2, 1)\t1\n",
      "  (2, 29)\t2\n",
      "  (2, 13)\t1\n",
      "  (2, 30)\t1\n",
      "  (2, 10)\t1\n",
      "  (2, 15)\t1\n",
      "  (2, 33)\t1\n"
     ]
    }
   ],
   "source": [
    "chinese_text_demo()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
