{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-01-09T18:58:43.607474Z",
     "start_time": "2025-01-09T18:58:43.161798Z"
    }
   },
   "source": [
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "import pandas as pd\n",
    "from sklearn.datasets import load_iris\n",
    "from sklearn.tree import DecisionTreeClassifier, plot_tree\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.tree import export_graphviz\n",
    "from sklearn.ensemble import ExtraTreesClassifier\n",
    "import graphviz\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "# 定义一些示例文本数据\n",
    "texts = [\n",
    "    \"This is the first document.\",\n",
    "    \"This document is the second document.\",\n",
    "    \"And this is the third one.\",\n",
    "    \"Is this the first document?\"\n",
    "]\n",
    "labels = [\"A\",\"B\",\"C\",\"A\"]\n",
    "# 创建 CountVectorizer 实例，默认参数\n",
    "vectorizer = CountVectorizer(binary=True)\n",
    "\n",
    "# 拟合并转换文本数据，得到词频矩阵\n",
    "X = vectorizer.fit_transform(texts)\n",
    "# 创建决策树分类器实例并拟合数据\n",
    "clf = DecisionTreeClassifier(random_state=1234,max_depth=5)\n",
    "model = clf.fit(X, labels)\n",
    "dot_data = export_graphviz(clf, out_file=None, \n",
    "                           feature_names=vectorizer.get_feature_names_out(),  \n",
    "                           class_names=labels,  \n",
    "                           filled=True, rounded=True,  \n",
    "                           special_characters=True)  \n",
    "# 将结果转换为 DataFrame 以便于阅读\n",
    "df = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names_out())\n",
    "\n",
    "print(\"Feature names (vocabulary):\")\n",
    "print(vectorizer.get_feature_names_out())\n",
    "print(\"\\nTerm Frequency Matrix:\")\n",
    "print(df)\n",
    "\n",
    "graphviz.Source(dot_data)\n"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Feature names (vocabulary):\n",
      "['and' 'document' 'first' 'is' 'one' 'second' 'the' 'third' 'this']\n",
      "\n",
      "Term Frequency Matrix:\n",
      "   and  document  first  is  one  second  the  third  this\n",
      "0    0         1      1   1    0       0    1      0     1\n",
      "1    0         1      0   1    0       1    1      0     1\n",
      "2    1         0      0   1    1       0    1      1     1\n",
      "3    0         1      1   1    0       0    1      0     1\n"
     ]
    },
    {
     "data": {
      "image/svg+xml": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n<!-- Generated by graphviz version 12.2.1 (20241206.2353)\n -->\n<!-- Title: Tree Pages: 1 -->\n<svg width=\"307pt\" height=\"325pt\"\n viewBox=\"0.00 0.00 307.00 324.50\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n<g id=\"graph0\" class=\"graph\" transform=\"scale(1 1) rotate(0) translate(4 320.5)\">\n<title>Tree</title>\n<polygon fill=\"white\" stroke=\"none\" points=\"-4,4 -4,-320.5 303,-320.5 303,4 -4,4\"/>\n<!-- 0 -->\n<g id=\"node1\" class=\"node\">\n<title>0</title>\n<path fill=\"#f6d5bd\" stroke=\"black\" d=\"M223,-316.5C223,-316.5 138,-316.5 138,-316.5 132,-316.5 126,-310.5 126,-304.5 126,-304.5 126,-241.75 126,-241.75 126,-235.75 132,-229.75 138,-229.75 138,-229.75 223,-229.75 223,-229.75 229,-229.75 235,-235.75 235,-241.75 235,-241.75 235,-304.5 235,-304.5 235,-310.5 229,-316.5 223,-316.5\"/>\n<text text-anchor=\"start\" x=\"152.75\" y=\"-299.2\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">first ≤ 0.5</text>\n<text text-anchor=\"start\" x=\"145.25\" y=\"-283.45\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">gini = 0.625</text>\n<text text-anchor=\"start\" x=\"143.38\" y=\"-267.7\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">samples = 4</text>\n<text text-anchor=\"start\" x=\"134\" y=\"-251.95\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">value = [2, 1, 1]</text>\n<text text-anchor=\"start\" x=\"153.12\" y=\"-236.2\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">class = A</text>\n</g>\n<!-- 1 -->\n<g id=\"node2\" class=\"node\">\n<title>1</title>\n<path fill=\"#ffffff\" stroke=\"black\" d=\"M160,-193.75C160,-193.75 75,-193.75 75,-193.75 69,-193.75 63,-187.75 63,-181.75 63,-181.75 63,-119 63,-119 63,-113 69,-107 75,-107 75,-107 160,-107 160,-107 166,-107 172,-113 172,-119 172,-119 172,-181.75 172,-181.75 172,-187.75 166,-193.75 160,-193.75\"/>\n<text text-anchor=\"start\" x=\"78.88\" y=\"-176.45\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">second ≤ 0.5</text>\n<text text-anchor=\"start\" x=\"89.75\" y=\"-160.7\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">gini = 0.5</text>\n<text text-anchor=\"start\" x=\"80.38\" y=\"-144.95\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">samples = 2</text>\n<text text-anchor=\"start\" x=\"71\" y=\"-129.2\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">value = [0, 1, 1]</text>\n<text text-anchor=\"start\" x=\"89.75\" y=\"-113.45\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">class = B</text>\n</g>\n<!-- 0&#45;&gt;1 -->\n<g id=\"edge1\" class=\"edge\">\n<title>0&#45;&gt;1</title>\n<path fill=\"none\" stroke=\"black\" d=\"M158.29,-229.55C154.01,-221.35 149.47,-212.66 145.05,-204.17\"/>\n<polygon fill=\"black\" stroke=\"black\" points=\"148.15,-202.56 140.42,-195.31 141.95,-205.8 148.15,-202.56\"/>\n<text text-anchor=\"middle\" x=\"132.23\" y=\"-212.39\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">True</text>\n</g>\n<!-- 4 -->\n<g id=\"node5\" class=\"node\">\n<title>4</title>\n<path fill=\"#e58139\" stroke=\"black\" d=\"M287,-185.88C287,-185.88 202,-185.88 202,-185.88 196,-185.88 190,-179.88 190,-173.88 190,-173.88 190,-126.88 190,-126.88 190,-120.88 196,-114.88 202,-114.88 202,-114.88 287,-114.88 287,-114.88 293,-114.88 299,-120.88 299,-126.88 299,-126.88 299,-173.88 299,-173.88 299,-179.88 293,-185.88 287,-185.88\"/>\n<text text-anchor=\"start\" x=\"216.75\" y=\"-168.57\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">gini = 0.0</text>\n<text text-anchor=\"start\" x=\"207.38\" y=\"-152.82\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">samples = 2</text>\n<text text-anchor=\"start\" x=\"198\" y=\"-137.07\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">value = [2, 0, 0]</text>\n<text text-anchor=\"start\" x=\"217.12\" y=\"-121.33\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">class = A</text>\n</g>\n<!-- 0&#45;&gt;4 -->\n<g id=\"edge4\" class=\"edge\">\n<title>0&#45;&gt;4</title>\n<path fill=\"none\" stroke=\"black\" d=\"M203.06,-229.55C208.85,-218.64 215.09,-206.87 220.92,-195.86\"/>\n<polygon fill=\"black\" stroke=\"black\" points=\"223.85,-197.82 225.44,-187.34 217.66,-194.54 223.85,-197.82\"/>\n<text text-anchor=\"middle\" x=\"233.49\" y=\"-204.48\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">False</text>\n</g>\n<!-- 2 -->\n<g id=\"node3\" class=\"node\">\n<title>2</title>\n<path fill=\"#8139e5\" stroke=\"black\" d=\"M97,-71C97,-71 12,-71 12,-71 6,-71 0,-65 0,-59 0,-59 0,-12 0,-12 0,-6 6,0 12,0 12,0 97,0 97,0 103,0 109,-6 109,-12 109,-12 109,-59 109,-59 109,-65 103,-71 97,-71\"/>\n<text text-anchor=\"start\" x=\"26.75\" y=\"-53.7\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">gini = 0.0</text>\n<text text-anchor=\"start\" x=\"17.38\" y=\"-37.95\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">samples = 1</text>\n<text text-anchor=\"start\" x=\"8\" y=\"-22.2\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">value = [0, 0, 1]</text>\n<text text-anchor=\"start\" x=\"26.38\" y=\"-6.45\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">class = C</text>\n</g>\n<!-- 1&#45;&gt;2 -->\n<g id=\"edge2\" class=\"edge\">\n<title>1&#45;&gt;2</title>\n<path fill=\"none\" stroke=\"black\" d=\"M93.68,-106.7C89.03,-98.36 84.13,-89.59 79.44,-81.18\"/>\n<polygon fill=\"black\" stroke=\"black\" points=\"82.51,-79.51 74.58,-72.48 76.4,-82.92 82.51,-79.51\"/>\n</g>\n<!-- 3 -->\n<g id=\"node4\" class=\"node\">\n<title>3</title>\n<path fill=\"#39e581\" stroke=\"black\" d=\"M224,-71C224,-71 139,-71 139,-71 133,-71 127,-65 127,-59 127,-59 127,-12 127,-12 127,-6 133,0 139,0 139,0 224,0 224,0 230,0 236,-6 236,-12 236,-12 236,-59 236,-59 236,-65 230,-71 224,-71\"/>\n<text text-anchor=\"start\" x=\"153.75\" y=\"-53.7\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">gini = 0.0</text>\n<text text-anchor=\"start\" x=\"144.38\" y=\"-37.95\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">samples = 1</text>\n<text text-anchor=\"start\" x=\"135\" y=\"-22.2\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">value = [0, 1, 0]</text>\n<text text-anchor=\"start\" x=\"153.75\" y=\"-6.45\" font-family=\"Helvetica,sans-Serif\" font-size=\"14.00\">class = B</text>\n</g>\n<!-- 1&#45;&gt;3 -->\n<g id=\"edge3\" class=\"edge\">\n<title>1&#45;&gt;3</title>\n<path fill=\"none\" stroke=\"black\" d=\"M141.7,-106.7C146.48,-98.27 151.51,-89.39 156.32,-80.9\"/>\n<polygon fill=\"black\" stroke=\"black\" points=\"159.22,-82.9 161.1,-72.47 153.13,-79.45 159.22,-82.9\"/>\n</g>\n</g>\n</svg>\n",
      "text/plain": [
       "<graphviz.sources.Source at 0x17b57908090>"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 33
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T18:53:17.399790Z",
     "start_time": "2025-01-09T18:53:17.392721Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import numpy as np\n",
    "\n",
    "def calculate_idf(documents):\n",
    "    # 创建词汇表\n",
    "    vocab = set(word for doc in documents for word in doc.split())\n",
    "    print(vocab)\n",
    "    # 计算每个词的文档频率 (df)\n",
    "    df = {word: sum(word in doc.split() for doc in documents) for word in vocab}\n",
    "    print(df)\n",
    "    # 文档总数\n",
    "    N = len(documents)\n",
    "    \n",
    "    # 确保没有除以零的情况，并计算 IDF\n",
    "    idf = {word: np.log(N / float(df[word])) + 1 if df[word] > 0 else 0 for word in df}\n",
    "    \n",
    "    return idf\n",
    "\n",
    "# 示例文档\n",
    "documents = [\n",
    "    \"this is the first document\",\n",
    "    \"this document is the second document\",\n",
    "    \"and this is the third one\",\n",
    "    \"is this the first document\"\n",
    "]\n",
    "\n",
    "# 获取 IDF 值\n",
    "idf_values = calculate_idf(documents)\n",
    "print(\"Manually calculated IDF values:\", idf_values)"
   ],
   "id": "ac805aeda85c5b3a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'the', 'is', 'document', 'and', 'first', 'one', 'third', 'second', 'this'}\n",
      "{'the': 4, 'is': 4, 'document': 3, 'and': 1, 'first': 2, 'one': 1, 'third': 1, 'second': 1, 'this': 4}\n",
      "Manually calculated IDF values: {'the': 1.0, 'is': 1.0, 'document': 1.2876820724517808, 'and': 2.386294361119891, 'first': 1.6931471805599454, 'one': 2.386294361119891, 'third': 2.386294361119891, 'second': 2.386294361119891, 'this': 1.0}\n"
     ]
    }
   ],
   "execution_count": 32
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T18:51:38.672544Z",
     "start_time": "2025-01-09T18:51:38.664228Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "\n",
    "# 示例文档\n",
    "documents = [\n",
    "    \"This is the first document.\",\n",
    "    \"This document is the second document.\",\n",
    "    \"And this is the third one.\",\n",
    "    \"Is this the first document? all right\"\n",
    "]\n",
    "\n",
    "# 创建 TfidfVectorizer 实例\n",
    "vectorizer = TfidfVectorizer()\n",
    "\n",
    "# 拟合文档并转换为 TF-IDF 矩阵\n",
    "X_tfidf = vectorizer.fit_transform(documents)\n",
    "\n",
    "# 获取词汇表和对应的 IDF 值\n",
    "idf = vectorizer.idf_\n",
    "print(\"Vocabulary:\", dict(zip(vectorizer.get_feature_names_out(), idf)))\n",
    "\n",
    "# 查看每个文档的 TF-IDF 分数\n",
    "feature_names = vectorizer.get_feature_names_out()\n",
    "for i, doc in enumerate(documents):\n",
    "    print(f\"\\nDocument {i+1}: '{doc}'\")\n",
    "    tfidf_scores = zip(feature_names, X_tfidf[i].toarray()[0])\n",
    "    for word, score in tfidf_scores:\n",
    "        if word == 'this':\n",
    "            print(f\"Word: {word}, TF-IDF Score: {score}\")"
   ],
   "id": "a3b47e5df62af92f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Vocabulary: {'all': 1.916290731874155, 'and': 1.916290731874155, 'document': 1.2231435513142097, 'first': 1.5108256237659907, 'is': 1.0, 'one': 1.916290731874155, 'right': 1.916290731874155, 'second': 1.916290731874155, 'the': 1.0, 'third': 1.916290731874155, 'this': 1.0}\n",
      "\n",
      "Document 1: 'This is the first document.'\n",
      "Word: this, TF-IDF Score: 0.38408524091481483\n",
      "\n",
      "Document 2: 'This document is the second document.'\n",
      "Word: this, TF-IDF Score: 0.281088674033753\n",
      "\n",
      "Document 3: 'And this is the third one.'\n",
      "Word: this, TF-IDF Score: 0.267103787642168\n",
      "\n",
      "Document 4: 'Is this the first document? all right'\n",
      "Word: this, TF-IDF Score: 0.26609474426129054\n"
     ]
    }
   ],
   "execution_count": 31
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
