{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "FileNotFoundError",
     "evalue": "[Errno 2] No such file or directory: '根据原始数据获得的A1-D4//第二种情况_0819//mi.txt'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-1-79d501d1494d>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     79\u001b[0m     \u001b[1;31m# print(tf_idf_mat.toarray())\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     80\u001b[0m     \u001b[0mtf_idf_array\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf_idf_mat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtoarray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 81\u001b[1;33m     \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msavetxt\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'根据原始数据获得的A1-D4//第二种情况_0819//{}.txt'\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfileName\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtf_idf_array\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfmt\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'%.5f'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdelimiter\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'\\t'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     82\u001b[0m     \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"{}构造并保存完成！\"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfileName\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda\\lib\\site-packages\\numpy\\lib\\npyio.py\u001b[0m in \u001b[0;36msavetxt\u001b[1;34m(fname, X, fmt, delimiter, newline, header, footer, comments, encoding)\u001b[0m\n\u001b[0;32m   1357\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0m_is_string_like\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1358\u001b[0m         \u001b[1;31m# datasource doesn't support creating a new file ...\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1359\u001b[1;33m         \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'wt'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1360\u001b[0m         \u001b[0mfh\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlib\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_datasource\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'wt'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mencoding\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1361\u001b[0m         \u001b[0mown_fh\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '根据原始数据获得的A1-D4//第二种情况_0819//mi.txt'"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "\n",
    "\"\"\"\n",
    "#语料\n",
    "corpus = [\n",
    "    'This is the first document.',\n",
    "    'This is the second second document.',\n",
    "    'And the third one.',\n",
    "    'Is this the first document?',\n",
    "]\n",
    "\n",
    "vectorizer = TfidfVectorizer()\n",
    "# 用X_train数据来fit\n",
    "vectorizer.fit(corpus)\n",
    "# 得到tfidf的矩阵\n",
    "tfidf_train = vectorizer.transform(corpus)\n",
    "\n",
    "tfidf_train.toarray()\n",
    "\n",
    "print(vectorizer.get_feature_names())\n",
    "print(tfidf_train.toarray())\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "def loadData(root, files):\n",
    "    data = [] # 保存语料\n",
    "    num = 0\n",
    "    for fileName in files:\n",
    "        # if num == 3:\n",
    "        #     break\n",
    "        fileName = root + '\\\\' + fileName\n",
    "        try:\n",
    "            file = open(fileName, 'r', encoding='utf-8')\n",
    "            content = file.read()\n",
    "            data.append(content)\n",
    "            file.close()\n",
    "        except Exception as e:\n",
    "            print(fileName)\n",
    "            print(e)\n",
    "        # num += 1\n",
    "    # print(data)\n",
    "    # print(len(data))\n",
    "    return data\n",
    "\n",
    "\"\"\"\n",
    "A1comp.os.ms-windows.misc  A2comp.sys.ibm.pc.hardware  A3rec.autos  A4rec.sport.baseball\n",
    "B1sci.crypt  B2sci.space  B3talk.politics.guns  B4talk.politics.misc\n",
    "C1comp.windows.x  C2comp.sys.mac.hardware  C3rec.motorcycles  C4rec.sport.hockey\n",
    "D1sci.electronics  D2sci.med  D3talk.politics.mideast  D4talk.religion.misc\n",
    "\"\"\"\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    file_dir = \"mini_newsgroups\\D4talk.religion.misc\"\n",
    "    fileName = file_dir[32:34]\n",
    "    for root, dirs, files in os.walk(file_dir):\n",
    "        \"\"\"\n",
    "        root:当前目录路径\n",
    "        dirs:当前路径下所有子目录\n",
    "        files:当前路径下所有非目录子文件\n",
    "        \"\"\"\n",
    "        corpus = loadData(root, files) # 获取语料库\n",
    "        # print(corpus)\n",
    "        # print(np.shape(corpus))\n",
    "\n",
    "    # 3 建立文档-词项矩阵（Document-Term Matrix）\n",
    "    vectorizer = TfidfVectorizer(stop_words='english',\n",
    "                                 analyzer='word',\n",
    "                                 # max_df=0.5,\n",
    "                                 token_pattern=r'[a-zA-Z]{3,}',\n",
    "                                 max_features=334,  # keep top 1000 terms\n",
    "                                 lowercase=True,\n",
    "                                 smooth_idf=True)\n",
    "    tf_idf_mat = vectorizer.fit_transform(corpus)\n",
    "    # print(vectorizer.get_feature_names())\n",
    "    # print(tf_idf_mat.toarray())\n",
    "    tf_idf_array = tf_idf_mat.toarray()\n",
    "    \n",
    "    np.savetxt('根据原始数据获得的A1-D4//第二种情况_0819//{}.txt'.format(fileName), tf_idf_array, fmt='%.5f', delimiter='\\t')\n",
    "    print(\"{}构造并保存完成！\".format(fileName))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[0.        , 0.        , 0.        , ..., 0.        , 0.        ,\n",
       "        0.        ],\n",
       "       [0.        , 0.        , 0.        , ..., 0.        , 0.        ,\n",
       "        0.04296371],\n",
       "       [0.11846653, 0.        , 0.        , ..., 0.        , 0.07180087,\n",
       "        0.        ],\n",
       "       ...,\n",
       "       [0.        , 0.        , 0.        , ..., 0.        , 0.        ,\n",
       "        0.04803483],\n",
       "       [0.        , 0.        , 0.        , ..., 0.        , 0.10637432,\n",
       "        0.03210709],\n",
       "       [0.        , 0.        , 0.        , ..., 0.        , 0.        ,\n",
       "        0.05560367]])"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tf_idf_array "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
