{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<h3>文本特征抽取TfidVectorizer</h3>\n",
       "<span>\n",
       "TF-idf<br>&nbsp&nbsp\n",
       "主要思想:如果某个词或短语在一篇文章中出现概率比较高,并且在其他文章中出现比较少,则任务此词或短语具有很好的类别区分能力,适合用来分类.<br>&nbsp&nbsp\n",
       "<strong>主要作用:用以评估一个词对于一个文件集或一个词料库中的其中一份文件的重要程度.</strong><br>\n",
       "词频( term frequency tf )<br>&nbsp&nbsp\n",
       "某一个特定词在该文件中出现的频率.<br>\n",
       "逆向文档频率<br>&nbsp&nbsp\n",
       "<strong>一个特定词的权重,某一特定词的idf,可以由总文件数目除以包含该词语之文件的数目,再将得到的商取以10为底的对数得到.</strong>\n",
       "</span>\n",
       "<img src = \"1-04.png\">\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "%%html\n",
    "<h3>文本特征抽取TfidVectorizer</h3>\n",
    "<span>\n",
    "TF-idf<br>&nbsp&nbsp\n",
    "主要思想:如果某个词或短语在一篇文章中出现概率比较高,并且在其他文章中出现比较少,则任务此词或短语具有很好的类别区分能力,适合用来分类.<br>&nbsp&nbsp\n",
    "<strong>主要作用:用以评估一个词对于一个文件集或一个词料库中的其中一份文件的重要程度.</strong><br>\n",
    "词频( term frequency tf )<br>&nbsp&nbsp\n",
    "某一个特定词在该文件中出现的频率.<br>\n",
    "逆向文档频率<br>&nbsp&nbsp\n",
    "<strong>一个特定词的权重,某一特定词的idf,可以由总文件数目除以包含该词语之文件的数目,再将得到的商取以10为底的对数得到.</strong>\n",
    "</span>\n",
    "<img src = \"1-04.png\">"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "import jieba \n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "data = [\n",
    "    \"今天很残酷,明天更残酷,后天很美好,但绝大部分是死在明天的晚上,所以每个人不要放弃.\",\n",
    "    \"我们看到的从很远星系来的光时在几百万年之前发出的,这样当我们看到于轴时,我们是在看它的过去.\",\n",
    "    \"如果只用一种方式了解某样事物,你就不会真正了解它.了解事物真正的含义的秘密取决于如何将其与我们所了解的事物相联系.\"\n",
    "]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def cut_word( text ):\n",
    "    \"\"\"jieba分词\"\"\"\n",
    "    return ( \" \".join(list( jieba.cut( text ))))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_new = []\n",
    "for sent in data:\n",
    "    result = cut_word( sent )\n",
    "    data_new.append( result )\n",
    "print(\"data_new\",data_new)\n",
    "transfer = TfidfVectorizer(stop_words=[\".\",\",\"])\n",
    "data_result = transfer.fit_transform( data_new )\n",
    "print(\"data_result\\n\",data_result.toarray())\n",
    "print(\"特征名字:\\n\",transfer.get_feature_names())"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
