{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "ad61f4f7",
   "metadata": {},
   "source": [
    "# 习题\n",
    "![image.png](./images/exercise2.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a4cc4c6f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "文本的话题序列z：\n",
      "[[0 0 2 2 0 0 0 0 0]\n",
      " [0 0 0 0 0 2 0 0 2]\n",
      " [0 2 0 0 0 0 0 2 0]\n",
      " [0 0 0 0 0 0 2 0 2]\n",
      " [2 0 0 0 0 2 0 0 0]\n",
      " [2 2 2 2 2 2 2 2 2]\n",
      " [2 0 2 0 0 0 0 0 0]\n",
      " [0 0 0 0 0 0 2 0 2]\n",
      " [0 0 0 0 0 2 0 0 2]\n",
      " [2 0 2 0 0 0 0 2 0]\n",
      " [0 0 0 2 2 0 0 0 0]]\n",
      "样本的计数矩阵N_KV：\n",
      "[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
      " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
      " [2. 2. 2. 2. 2. 9. 2. 2. 2. 3. 2.]]\n",
      "样本的计数矩阵N_MK：\n",
      "[[0. 0. 4.]\n",
      " [0. 0. 2.]\n",
      " [0. 0. 4.]\n",
      " [0. 0. 3.]\n",
      " [0. 0. 2.]\n",
      " [0. 0. 4.]\n",
      " [0. 0. 3.]\n",
      " [0. 0. 3.]\n",
      " [0. 0. 5.]]\n",
      "模型参数varphi：\n",
      "[[0.091 0.091 0.091 0.091 0.091 0.091 0.091 0.091 0.091 0.091 0.091]\n",
      " [0.091 0.091 0.091 0.091 0.091 0.091 0.091 0.091 0.091 0.091 0.091]\n",
      " [0.067 0.067 0.067 0.067 0.067 0.293 0.067 0.067 0.067 0.1   0.067]]\n",
      "模型参数theta：\n",
      "[[0.067 0.067 0.867]\n",
      " [0.111 0.111 0.778]\n",
      " [0.067 0.067 0.867]\n",
      " [0.083 0.083 0.833]\n",
      " [0.111 0.111 0.778]\n",
      " [0.067 0.067 0.867]\n",
      " [0.083 0.083 0.833]\n",
      " [0.083 0.083 0.833]\n",
      " [0.056 0.056 0.889]]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "\n",
    "\n",
    "class GibbsSamplingLDA:\n",
    "    def __init__(self, iter_max=1000):\n",
    "        self.iter_max = iter_max\n",
    "        self.weights_ = []\n",
    "\n",
    "    def fit(self, words, K):\n",
    "        \"\"\"\n",
    "        :param words: 单词-文本矩阵\n",
    "        :param K: 话题个数\n",
    "        :return: 文本话题序列z\n",
    "        \"\"\"\n",
    "        # M, Nm分别为文本个数和单词个数\n",
    "        words = words.T\n",
    "        M, Nm = words.shape\n",
    "\n",
    "        # 初始化超参数alpha, beta，其中alpha为文本的话题分布相关参数\n",
    "        # beta为话题的单词分布相关参数\n",
    "        alpha = np.array([1 / K] * K)\n",
    "        beta = np.array([1 / Nm] * Nm)\n",
    "\n",
    "        # 初始化参数theta, varphi，其中theta为文本关于话题的多项分布参数，\n",
    "        # varphi为话题关于单词的多项分布参数\n",
    "        theta = np.zeros([M, K])\n",
    "        varphi = np.zeros([K, Nm])\n",
    "\n",
    "        # 输出文本的话题序列z\n",
    "        z = np.zeros(words.shape, dtype='int')\n",
    "\n",
    "        # (1)设所有计数矩阵的元素n_mk、n_kv，计数向量的元素n_m、n_k初值为 0\n",
    "        n_mk = np.zeros([M, K])\n",
    "        n_kv = np.zeros([K, Nm])\n",
    "        n_m = np.zeros(M)\n",
    "        n_k = np.zeros(K)\n",
    "\n",
    "        # (2)对所有M个文本中的所有单词进行循环\n",
    "        for m in range(M):\n",
    "            for v in range(Nm):\n",
    "                # 如果单词v存在于文本m\n",
    "                if words[m, v] != 0:\n",
    "                    # (2.a)抽样话题\n",
    "                    z[m, v] = np.random.choice(list(range(K)))\n",
    "                    # 增加文本-话题计数\n",
    "                    n_mk[m, z[m, v]] += 1\n",
    "                    # 增加文本-话题和计数\n",
    "                    n_m[m] += 1\n",
    "                    # 增加话题-单词计数\n",
    "                    n_kv[z[m, v], v] += 1\n",
    "                    # 增加话题-单词和计数\n",
    "                    n_k[z[m, v]] += 1\n",
    "\n",
    "        # (3)对所有M个文本中的所有单词进行循环，直到进入燃烧期\n",
    "        zi = 0\n",
    "        for i in range(self.iter_max):\n",
    "            for m in range(M):\n",
    "                for v in range(Nm):\n",
    "                    # (3.a)如果单词v存在于文本m，那么当前单词是第v个单词，\n",
    "                    # 话题指派z_mv是第k个话题\n",
    "                    if words[m, v] != 0:\n",
    "                        # 减少计数\n",
    "                        n_mk[m, z[m, v]] -= 1\n",
    "                        n_m[m] -= 1\n",
    "                        n_kv[z[m, v], v] -= 1\n",
    "                        n_k[z[m, v]] -= 1\n",
    "\n",
    "                        # (3.b)按照满条件分布进行抽样\n",
    "                        max_zi_value, max_zi_index = -float('inf'), z[m, v]\n",
    "                        for k in range(K):\n",
    "                            zi = ((n_kv[k, v] + beta[v]) / (n_kv[k, :].sum() + beta.sum())) * \\\n",
    "                                 ((n_mk[m, k] + alpha[k]) / (n_mk[m, :].sum() + alpha.sum()))\n",
    "\n",
    "                        # 得到新的第 k‘个话题，分配给 z_mv\n",
    "                        if max_zi_value < zi:\n",
    "                            max_zi_value, max_zi_index = zi, k\n",
    "                            z[m, v] = max_zi_index\n",
    "\n",
    "                        # (3.c) (3.d)增加计数并得到两个更新的计数矩阵的n_kv和n_mk\n",
    "                        n_mk[m, z[m, v]] += 1\n",
    "                        n_m[m] += 1\n",
    "                        n_kv[z[m, v], v] += 1\n",
    "                        n_k[z[m, v]] += 1\n",
    "\n",
    "        # (4)利用得到的样本计数，计算模型参数\n",
    "        for m in range(M):\n",
    "            for k in range(K):\n",
    "                theta[m, k] = (n_mk[m, k] + alpha[k]) / (n_mk[m, :].sum() + alpha.sum())\n",
    "\n",
    "        for k in range(K):\n",
    "            for v in range(Nm):\n",
    "                varphi[k, v] = (n_kv[k, v] + beta[v]) / (n_kv[k, :].sum() + beta.sum())\n",
    "\n",
    "        self.weights_ = [varphi, theta]\n",
    "        return z.T, n_kv, n_mk\n",
    "\n",
    "    \n",
    "gibbs_sampling_lda = GibbsSamplingLDA(iter_max=1000)\n",
    "\n",
    "# 输入文本-单词矩阵，共有9个文本，11个单词\n",
    "words = np.array([[0, 0, 1, 1, 0, 0, 0, 0, 0],\n",
    "                  [0, 0, 0, 0, 0, 1, 0, 0, 1],\n",
    "                  [0, 1, 0, 0, 0, 0, 0, 1, 0],\n",
    "                  [0, 0, 0, 0, 0, 0, 1, 0, 1],\n",
    "                  [1, 0, 0, 0, 0, 1, 0, 0, 0],\n",
    "                  [1, 1, 1, 1, 1, 1, 1, 1, 1],\n",
    "                  [1, 0, 1, 0, 0, 0, 0, 0, 0],\n",
    "                  [0, 0, 0, 0, 0, 0, 1, 0, 1],\n",
    "                  [0, 0, 0, 0, 0, 2, 0, 0, 1],\n",
    "                  [1, 0, 1, 0, 0, 0, 0, 1, 0],\n",
    "                  [0, 0, 0, 1, 1, 0, 0, 0, 0]])\n",
    "\n",
    "K = 3  # 假设话题数量为3\n",
    "\n",
    "# 设置精度为3\n",
    "np.set_printoptions(precision=3, suppress=True)\n",
    "\n",
    "z, n_kv, n_mk = gibbs_sampling_lda.fit(words, K)\n",
    "varphi = gibbs_sampling_lda.weights_[0]\n",
    "theta = gibbs_sampling_lda.weights_[1]\n",
    "\n",
    "print(\"文本的话题序列z：\")\n",
    "print(z)\n",
    "print(\"样本的计数矩阵N_KV：\")\n",
    "print(n_kv)\n",
    "print(\"样本的计数矩阵N_MK：\")\n",
    "print(n_mk)\n",
    "print(\"模型参数varphi：\")\n",
    "print(varphi)\n",
    "print(\"模型参数theta：\")\n",
    "print(theta)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "69ed1d28",
   "metadata": {},
   "source": [
    "# gensim包使用\n",
    "gensim 是一个开源的 Python 库，专门用于处理自然语言处理（NLP）中的主题建模和文档相似度分析。它以其高效的实现和简洁的 API 而闻名，特别适合处理大型文本数据集。\n",
    "\n",
    "**gensim 包的主要用途包括：**\n",
    "\n",
    "1. 主题建模：gensim 提供了多种算法来发现文档集合中的主题，最著名的是 Latent Dirichlet Allocation (LDA) 和 Latent Semantic Analysis (LSA)。这些算法可以帮助你理解文档集合中的主要话题和趋势。\n",
    "\n",
    "2. 文档相似度：gensim 可以计算文档之间的相似度，这对于信息检索、推荐系统和文档聚类等应用非常有用。\n",
    "\n",
    "3. 词向量：gensim 支持词向量模型，如 Word2Vec 和 FastText，这些模型可以将单词转换为向量，以便在向量空间中进行操作，从而捕捉语义和语法关系。\n",
    "\n",
    "4. 文档向量：基于词向量的文档向量模型，如 Doc2Vec，可以将整个文档转换为向量，用于进一步的分析和处理。\n",
    "\n",
    "5. 高效的内存使用：gensim 设计得非常高效，即使在处理大型数据集时也能保持较低的内存占用。\n",
    "\n",
    "6. 可扩展性：gensim 支持在线学习和增量更新，这意味着模型可以在新数据到来时进行更新，而不需要从头开始重新训练。\n",
    "\n",
    "7. 与其他工具的集成：gensim 可以与其他数据处理和机器学习工具（如 NumPy、SciPy 和 scikit-learn）无缝集成。\n",
    "\n",
    "在 gensim 包中，corpora, models, 和 similarities 是三个主要的模块，它们各自承担着不同的角色和功能：\n",
    "\n",
    "**corpora：**\n",
    "\n",
    "- 这个模块用于处理文本语料库（collection of texts）。在 gensim 中，一个语料库是一个文档集合，其中每个文档被表示为一个词袋（bag-of-words）或词向量（word vectors）。\n",
    "- corpora 模块提供了多种工具和类来创建和管理语料库，包括语料库的读取、序列化、转换和稀疏向量的表示。\n",
    "- 它支持多种格式的语料库，如列表的列表、文件、流等，并且可以高效地处理大规模语料库。\n",
    "\n",
    "**models：**\n",
    "\n",
    "- 这个模块包含了各种主题模型和词向量模型，是 gensim 包的核心部分。\n",
    "- 它提供了实现 LDA、LSA、Word2Vec、FastText、Doc2Vec 等模型的类，这些模型可以用于训练、更新、查询和保存模型。\n",
    "- 通过这些模型，你可以发现文档中的主题、学习词向量、将文档转换为向量等。\n",
    "\n",
    "**similarities：**\n",
    "\n",
    "- 这个模块用于计算文档之间的相似度，它提供了多种相似度计算方法和索引结构。\n",
    "- 它可以用于创建相似度矩阵、查询最相似的文档、进行文档聚类等任务。\n",
    "- similarities 模块支持多种相似度度量，如余弦相似度、Jaccard相似度等，并且提供了高效的索引结构（如稀疏矩阵索引），以加速相似度查询。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "7b701577",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Text = \n",
      "[['human', 'machine', 'interface', 'lab', 'abc', 'computer', 'applications'],\n",
      " ['survey', 'user', 'opinion', 'computer', 'system', 'response', 'time'],\n",
      " ['eps', 'user', 'interface', 'management', 'system'],\n",
      " ['system', 'human', 'system', 'engineering', 'testing', 'eps'],\n",
      " ['relation', 'user', 'perceived', 'response', 'time', 'error', 'measurement'],\n",
      " ['generation', 'random', 'binary', 'unordered', 'trees'],\n",
      " ['intersection', 'graph', 'paths', 'trees'],\n",
      " ['graph', 'minors', 'iv', 'widths', 'trees', 'well', 'quasi', 'ordering'],\n",
      " ['graph', 'minors', 'survey']]\n",
      "Dictionary<35 unique tokens: ['abc', 'applications', 'computer', 'human', 'interface']...>\n",
      "TF-IDF:\n",
      "[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1)]\n",
      "[(2, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1)]\n",
      "[(4, 1), (10, 1), (12, 1), (13, 1), (14, 1)]\n",
      "[(3, 1), (10, 2), (13, 1), (15, 1), (16, 1)]\n",
      "[(8, 1), (11, 1), (12, 1), (17, 1), (18, 1), (19, 1), (20, 1)]\n",
      "[(21, 1), (22, 1), (23, 1), (24, 1), (25, 1)]\n",
      "[(24, 1), (26, 1), (27, 1), (28, 1)]\n",
      "[(24, 1), (26, 1), (29, 1), (30, 1), (31, 1), (32, 1), (33, 1), (34, 1)]\n",
      "[(9, 1), (26, 1), (30, 1)]\n"
     ]
    }
   ],
   "source": [
    "from gensim import corpora, models, similarities\n",
    "from pprint import pprint\n",
    "import warnings\n",
    "\n",
    "f = open('./data/LDA_test.txt')\n",
    "stop_list = set('for a of the and to in'.split())\n",
    "# texts = [line.strip().split() for line in f]\n",
    "# print('Before')\n",
    "# pprint(texts)\n",
    "# print('After')\n",
    "\n",
    "texts = [[\n",
    "    word for word in line.strip().lower().split() if word not in stop_list\n",
    "] for line in f]\n",
    "print('Text = ')\n",
    "pprint(texts)\n",
    "\n",
    "# corpora.Dictionary 是一个非常重要的类，它用于创建一个词典，该词典映射文档中出现的每个词到一个唯一的整数ID。\n",
    "# 这个词典是处理文本数据的基础，特别是在使用主题模型和词向量模型时。\n",
    "dictionary = corpora.Dictionary(texts)\n",
    "print(dictionary)\n",
    "\n",
    "V = len(dictionary)\n",
    "corpus = [dictionary.doc2bow(text) for text in texts]\n",
    "corpus_tfidf = models.TfidfModel(corpus)[corpus]\n",
    "corpus_tfidf = corpus\n",
    "\n",
    "print('TF-IDF:')\n",
    "for c in corpus_tfidf:\n",
    "    print(c)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ffa000c5",
   "metadata": {},
   "source": [
    "## LSI模型\n",
    "LsiModel 是一个用于训练和使用潜在语义索引（Latent Semantic Indexing, LSI）模型的类。LSI 是一种数学技术，用于从文本集合中提取潜在的主题信息，它基于奇异值分解（Singular Value Decomposition, SVD）来减少文本数据的维度和揭示潜在的语义结构。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "48fbc5cc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "LSI Model:\n",
      "[[(0, 0.9334981916792657), (1, 0.10508952614086228)],\n",
      " [(0, 2.0319923746870234), (1, -0.04714531412173611)],\n",
      " [(0, 1.5351342836582045), (1, 0.13488784052204503)],\n",
      " [(0, 1.954007719459447), (1, 0.2178049857607443)],\n",
      " [(0, 1.2902472956004107), (1, -0.002252143749926661)],\n",
      " [(0, 0.022783081905507235), (1, -0.7778052604326757)],\n",
      " [(0, 0.056715675769210404), (1, -1.182770344670485)],\n",
      " [(0, 0.12360003320648266), (1, -2.6343068608236826)],\n",
      " [(0, 0.23560627195889225), (1, -0.9407936203668299)]]\n",
      "LSI Topics:\n",
      "[(0,\n",
      "  '0.579*\"system\" + 0.376*\"user\" + 0.270*\"eps\" + 0.257*\"time\" + '\n",
      "  '0.257*\"response\"'),\n",
      " (1,\n",
      "  '-0.480*\"graph\" + -0.464*\"trees\" + -0.361*\"minors\" + -0.266*\"iv\" + '\n",
      "  '-0.266*\"widths\"')]\n",
      "Similarity:\n",
      "[array([ 1.        ,  0.9908607 ,  0.9997008 ,  0.9999994 ,  0.9935261 ,\n",
      "       -0.08272626, -0.06414512, -0.06517283,  0.13288835], dtype=float32),\n",
      " array([0.9908607 , 0.99999994, 0.9938636 , 0.99100804, 0.99976987,\n",
      "       0.0524564 , 0.07105229, 0.070025  , 0.2653665 ], dtype=float32),\n",
      " array([ 0.9997008 ,  0.9938636 ,  0.99999994,  0.999727  ,  0.99600756,\n",
      "       -0.05832579, -0.03971674, -0.04074576,  0.15709123], dtype=float32),\n",
      " array([ 0.9999994 ,  0.99100804,  0.999727  ,  1.        ,  0.9936501 ,\n",
      "       -0.08163348, -0.06305084, -0.06407862,  0.13397504], dtype=float32),\n",
      " array([0.9935261 , 0.99976987, 0.99600756, 0.9936501 , 0.99999994,\n",
      "       0.03102366, 0.04963995, 0.04861134, 0.24462426], dtype=float32),\n",
      " array([-0.08272626,  0.0524564 , -0.05832579, -0.08163348,  0.03102366,\n",
      "        0.99999994,  0.99982643,  0.9998451 ,  0.97674036], dtype=float32),\n",
      " array([-0.06414512,  0.07105229, -0.03971674, -0.06305084,  0.04963995,\n",
      "        0.99982643,  1.        ,  0.9999995 ,  0.9805657 ], dtype=float32),\n",
      " array([-0.06517283,  0.070025  , -0.04074576, -0.06407862,  0.04861134,\n",
      "        0.9998451 ,  0.9999995 ,  1.        ,  0.9803632 ], dtype=float32),\n",
      " array([0.13288835, 0.2653665 , 0.15709123, 0.13397504, 0.24462426,\n",
      "       0.97674036, 0.9805657 , 0.9803632 , 1.        ], dtype=float32)]\n"
     ]
    }
   ],
   "source": [
    "print('\\nLSI Model:')\n",
    "lsi = models.LsiModel(corpus_tfidf, num_topics=2, id2word=dictionary)\n",
    "topic_result = [a for a in lsi[corpus_tfidf]]\n",
    "pprint(topic_result)\n",
    "\n",
    "print('LSI Topics:')\n",
    "pprint(lsi.print_topics(num_topics=2, num_words=5))\n",
    "\n",
    "similarity = similarities.MatrixSimilarity(lsi[corpus_tfidf])   # similarities.Similarity()\n",
    "print('Similarity:')\n",
    "pprint(list(similarity))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4e6cac44",
   "metadata": {},
   "source": [
    "## LDA模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "391a178d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "LDA Model:\n",
      "Document-Topic:\n",
      "\n",
      "[[(0, 0.023332613), (1, 0.9766674)],\n",
      " [(0, 0.023462947), (1, 0.97653705)],\n",
      " [(0, 0.032134935), (1, 0.96786505)],\n",
      " [(0, 0.027017599), (1, 0.9729824)],\n",
      " [(0, 0.9800736), (1, 0.019926421)],\n",
      " [(0, 0.9726538), (1, 0.027346203)],\n",
      " [(0, 0.9663219), (1, 0.033678155)],\n",
      " [(0, 0.98252094), (1, 0.01747906)],\n",
      " [(0, 0.95609355), (1, 0.04390639)]]\n",
      "Document-Topic:\n",
      "\n",
      "[(0, 0.023332108), (1, 0.97666794)]\n",
      "[(0, 0.023463078), (1, 0.9765369)]\n",
      "[(0, 0.03213491), (1, 0.96786505)]\n",
      "[(0, 0.027017659), (1, 0.97298235)]\n",
      "[(0, 0.980074), (1, 0.019926017)]\n",
      "[(0, 0.97265357), (1, 0.027346475)]\n",
      "[(0, 0.9663219), (1, 0.033678126)]\n",
      "[(0, 0.98252106), (1, 0.017478881)]\n",
      "[(0, 0.9560938), (1, 0.043906223)]\n",
      "Topic 0\n",
      "[('graph', 0.0783448),\n",
      " ('trees', 0.078328446),\n",
      " ('minors', 0.05605246),\n",
      " ('response', 0.034141194),\n",
      " ('time', 0.034083754),\n",
      " ('user', 0.03392993),\n",
      " ('survey', 0.033889327),\n",
      " ('quasi', 0.03365731),\n",
      " ('ordering', 0.03365686),\n",
      " ('well', 0.0336565)]\n",
      "Topic 1\n",
      "[('system', 0.10575562),\n",
      " ('interface', 0.05881498),\n",
      " ('human', 0.058808263),\n",
      " ('eps', 0.05880317),\n",
      " ('user', 0.058616713),\n",
      " ('computer', 0.058581233),\n",
      " ('testing', 0.03529052),\n",
      " ('engineering', 0.035281125),\n",
      " ('lab', 0.035279464),\n",
      " ('machine', 0.03527904)]\n",
      "Similarity:\n",
      "[array([1.        , 1.        , 0.9999567 , 0.99999255, 0.04419908,\n",
      "       0.05196895, 0.05868899, 0.04166115, 0.06971917], dtype=float32),\n",
      " array([1.        , 1.        , 0.999958  , 0.999993  , 0.04434185,\n",
      "       0.05211167, 0.05883166, 0.04180394, 0.06986172], dtype=float32),\n",
      " array([0.9999567 , 0.999958  , 0.99999994, 0.9999853 , 0.05349262,\n",
      "       0.06125867, 0.06797496, 0.05095582, 0.07899805], dtype=float32),\n",
      " array([0.99999255, 0.999993  , 0.9999853 , 1.        , 0.04807067,\n",
      "       0.05583903, 0.06255759, 0.04553318, 0.07358492], dtype=float32),\n",
      " array([0.04419908, 0.04434185, 0.05349262, 0.04807067, 1.0000001 ,\n",
      "       0.9999697 , 0.99989474, 0.99999684, 0.99967337], dtype=float32),\n",
      " array([0.05196895, 0.05211167, 0.06125867, 0.05583903, 0.9999697 ,\n",
      "       1.        , 0.99997735, 0.9999468 , 0.99984187], dtype=float32),\n",
      " array([0.05868899, 0.05883166, 0.06797496, 0.06255759, 0.99989474,\n",
      "       0.99997735, 1.        , 0.9998547 , 0.9999389 ], dtype=float32),\n",
      " array([0.04166115, 0.04180394, 0.05095582, 0.04553318, 0.99999684,\n",
      "       0.9999468 , 0.9998547 , 1.        , 0.9996052 ], dtype=float32),\n",
      " array([0.06971917, 0.06986172, 0.07899805, 0.07358492, 0.99967337,\n",
      "       0.99984187, 0.9999389 , 0.9996052 , 1.        ], dtype=float32)]\n",
      "\n",
      "\n",
      "USE WITH CARE--\n",
      "HDA Model:\n",
      "[[(0, 0.4026821091956041),\n",
      "  (1, 0.5272432540712062),\n",
      "  (2, 0.017579688623491102),\n",
      "  (3, 0.01364930530095091),\n",
      "  (4, 0.010029315768878914)],\n",
      " [(0, 0.5504574061344856),\n",
      "  (1, 0.37947965146275586),\n",
      "  (2, 0.017569900855318194),\n",
      "  (3, 0.013647318796012678),\n",
      "  (4, 0.010029389924909113)],\n",
      " [(0, 0.8751565683434994),\n",
      "  (1, 0.031447365238665266),\n",
      "  (2, 0.02340317705740089),\n",
      "  (3, 0.018198632733181605),\n",
      "  (4, 0.013372485882085179),\n",
      "  (5, 0.010075426682759195)],\n",
      " [(0, 0.6116610527273639),\n",
      "  (1, 0.02720712711329961),\n",
      "  (2, 0.3011336508297578),\n",
      "  (3, 0.015602783080398275),\n",
      "  (4, 0.011462430341282361)],\n",
      " [(0, 0.9059842611690332),\n",
      "  (1, 0.023911991714360446),\n",
      "  (2, 0.01760745074359857),\n",
      "  (3, 0.013650535009853219),\n",
      "  (4, 0.010029418211903625)],\n",
      " [(0, 0.6655490786413387),\n",
      "  (1, 0.24102382939798642),\n",
      "  (2, 0.023426508791025268),\n",
      "  (3, 0.018205877764253246),\n",
      "  (4, 0.013372934687343883),\n",
      "  (5, 0.01007542664307626)],\n",
      " [(0, 0.3693608576805098),\n",
      "  (1, 0.03755817161105353),\n",
      "  (2, 0.5090787728417354),\n",
      "  (3, 0.021848701179779002),\n",
      "  (4, 0.01604737211746471),\n",
      "  (5, 0.012090511691653566)],\n",
      " [(0, 0.030506419453886442),\n",
      "  (1, 0.1271269137579733),\n",
      "  (2, 0.7956924886649493),\n",
      "  (3, 0.012143809367743692)],\n",
      " [(0, 0.8123938303701818),\n",
      "  (1, 0.04721083015508953),\n",
      "  (2, 0.03537399297464647),\n",
      "  (3, 0.027328526273565024),\n",
      "  (4, 0.020060140504551283),\n",
      "  (5, 0.015113163575423878),\n",
      "  (6, 0.010939060893890748)]]\n",
      "HDA Topics:\n",
      "[(0, '0.112*intersection + 0.088*human + 0.088*computer + 0.073*measurement + 0.045*eps'), (1, '0.164*computer + 0.094*opinion + 0.057*lab + 0.056*generation + 0.053*interface')]\n"
     ]
    }
   ],
   "source": [
    "print('\\nLDA Model:')\n",
    "num_topics = 2\n",
    "lda = models.LdaModel(\n",
    "    corpus_tfidf,\n",
    "    num_topics=num_topics,\n",
    "    id2word=dictionary,\n",
    "    alpha='auto',\n",
    "    eta='auto',\n",
    "    minimum_probability=0.001,\n",
    "    passes=10)\n",
    "# 获取文档主题方式一\n",
    "doc_topic = [doc_t for doc_t in lda[corpus_tfidf]]\n",
    "print('Document-Topic:\\n')\n",
    "pprint(doc_topic)\n",
    "# 获取文档主题方式二\n",
    "print('Document-Topic:\\n')\n",
    "for doc_topic in lda.get_document_topics(corpus_tfidf):\n",
    "    print(doc_topic)\n",
    "    \n",
    "for topic_id in range(num_topics):\n",
    "    print('Topic', topic_id)\n",
    "    # pprint(lda.get_topic_terms(topicid=topic_id))\n",
    "    pprint(lda.show_topic(topic_id))\n",
    "similarity = similarities.MatrixSimilarity(lda[corpus_tfidf])\n",
    "print('Similarity:')\n",
    "pprint(list(similarity))\n",
    "\n",
    "hda = models.HdpModel(corpus_tfidf, id2word=dictionary)\n",
    "topic_result = [a for a in hda[corpus_tfidf]]\n",
    "print('\\n\\nUSE WITH CARE--\\nHDA Model:')\n",
    "pprint(topic_result)\n",
    "print('HDA Topics:')\n",
    "print(hda.print_topics(num_topics=2, num_words=5))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
