{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "c1c2226b",
   "metadata": {},
   "source": [
    "# 习题\n",
    "## 习题17.1\n",
    "![image.png](./images/exercise1.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "636442d1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "话题空间U：\n",
      "[[-0.08  0.28  0.89]\n",
      " [-0.16  0.57 -0.45]\n",
      " [-0.14 -0.01  0.  ]\n",
      " [-0.73 -0.55  0.  ]\n",
      " [-0.15 -0.18  0.  ]\n",
      " [-0.63  0.51 -0.  ]]\n",
      "文本在话题空间的表示SV：\n",
      "[[-0.79 -1.57 -2.86 -2.96]\n",
      " [ 1.08  2.15 -0.1  -1.33]\n",
      " [ 1.79 -0.89  0.    0.  ]]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "\n",
    "\n",
    "def lsa_svd(X, k):\n",
    "    \"\"\"\n",
    "    潜在语义分析的矩阵奇异值分解\n",
    "    :param X: 单词-文本矩阵\n",
    "    :param k: 话题数\n",
    "    :return: 话题向量空间、文本集合在话题向量空间的表示\n",
    "    \"\"\"\n",
    "    # 单词-文本矩阵X的奇异值分解\n",
    "    U, S, V = np.linalg.svd(X)\n",
    "    # 矩阵的截断奇异值分解，取前k个\n",
    "    U = U[:, :k]\n",
    "    S = np.diag(S[:k])\n",
    "    V = V[:k, :]\n",
    "\n",
    "    return U, np.dot(S, V)\n",
    "\n",
    "X = np.array([[2, 0, 0, 0],\n",
    "              [0, 2, 0, 0],\n",
    "              [0, 0, 1, 0],\n",
    "              [0, 0, 2, 3],\n",
    "              [0, 0, 0, 1],\n",
    "              [1, 2, 2, 1]])\n",
    "\n",
    "# 设置精度为2\n",
    "np.set_printoptions(precision=2, suppress=True)\n",
    "# 假设话题的个数是3个\n",
    "U, SV = lsa_svd(X, k=3)\n",
    "print(\"话题空间U：\")\n",
    "print(U)\n",
    "print(\"文本在话题空间的表示SV：\")\n",
    "print(SV)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "id": "c4f8df7d",
   "metadata": {},
   "source": [
    "## 习题17.2\n",
    "![image.png](./images/exercise2.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "22088d94",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "话题空间W：\n",
      "[[0.   0.   1.39]\n",
      " [0.   1.47 0.  ]\n",
      " [0.35 0.   0.  ]\n",
      " [1.77 0.   0.  ]\n",
      " [0.35 0.   0.  ]\n",
      " [1.06 1.47 0.7 ]]\n",
      "文本在话题空间的表示H：\n",
      "[[0.   0.   1.41 1.41]\n",
      " [0.   1.36 0.   0.  ]\n",
      " [1.44 0.   0.   0.  ]]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "\n",
    "\n",
    "class DivergenceNmfLsa:\n",
    "    def __init__(self, max_iter=1000, tol=1e-6, random_state=0):\n",
    "        \"\"\"\n",
    "        损失函数是散度损失时的非负矩阵分解\n",
    "        :param max_iter: 最大迭代次数\n",
    "        :param tol: 容差\n",
    "        :param random_state: 随机种子\n",
    "        \"\"\"\n",
    "        self.max_iter = max_iter\n",
    "        self.tol = tol\n",
    "        self.random_state = random_state\n",
    "        np.random.seed(self.random_state)\n",
    "\n",
    "    def _init_param(self, X, k):\n",
    "        self.__m, self.__n = X.shape\n",
    "        self.__W = np.random.random((self.__m, k))\n",
    "        self.__H = np.random.random((k, self.__n))\n",
    "\n",
    "    def _div_loss(self, X, W, H):\n",
    "        Y = np.dot(W, H)\n",
    "        loss = 0\n",
    "        for i in range(self.__m):\n",
    "            for j in range(self.__n):\n",
    "                loss += (X[i][j] * np.log(X[i][j] / Y[i][j]) if X[i][j] * Y[i][j] > 0 else 0) - X[i][j] + Y[i][j]\n",
    "\n",
    "        return loss\n",
    "\n",
    "    def fit(self, X, k):\n",
    "        \"\"\"\n",
    "        :param X: 单词-文本矩阵\n",
    "        :param k: 话题个数\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        # (1)初始化\n",
    "        self._init_param(X, k)\n",
    "        # (2.c)计算散度损失\n",
    "        loss = self._div_loss(X, self.__W, self.__H)\n",
    "\n",
    "        for _ in range(self.max_iter):\n",
    "            # (2.a)更新W的元素\n",
    "            WH = np.dot(self.__W, self.__H)\n",
    "            for i in range(self.__m):\n",
    "                for l in range(k):\n",
    "                    s1 = sum(self.__H[l][j] * X[i][j] / WH[i][j] for j in range(self.__n))\n",
    "                    s2 = sum(self.__H[l][j] for j in range(self.__n))\n",
    "                    self.__W[i][l] *= s1 / s2\n",
    "\n",
    "            # (2.b)更新H的元素\n",
    "            WH = np.dot(self.__W, self.__H)\n",
    "            for l in range(k):\n",
    "                for j in range(self.__n):\n",
    "                    s1 = sum(self.__W[i][l] * X[i][j] / WH[i][j] for i in range(self.__m))\n",
    "                    s2 = sum(self.__W[i][l] for i in range(self.__m))\n",
    "                    self.__H[l][j] *= s1 / s2\n",
    "\n",
    "            new_loss = self._div_loss(X, self.__W, self.__H)\n",
    "            if abs(new_loss - loss) < self.tol:\n",
    "                break\n",
    "\n",
    "            loss = new_loss\n",
    "\n",
    "        return self.__W, self.__H\n",
    "    \n",
    "X = np.array([[2, 0, 0, 0],\n",
    "              [0, 2, 0, 0],\n",
    "              [0, 0, 1, 0],\n",
    "              [0, 0, 2, 3],\n",
    "              [0, 0, 0, 1],\n",
    "              [1, 2, 2, 1]])\n",
    "\n",
    "# 设置精度为2\n",
    "np.set_printoptions(precision=2, suppress=True)\n",
    "# 假设话题的个数是3个\n",
    "k = 3\n",
    "div_nmf = DivergenceNmfLsa(max_iter=1000, random_state=2022)\n",
    "W, H = div_nmf.fit(X, k)\n",
    "print(\"话题空间W：\")\n",
    "print(W)\n",
    "print(\"文本在话题空间的表示H：\")\n",
    "print(H)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8deb8f2a",
   "metadata": {},
   "source": [
    "# 使用sklearn.decomposition.TruncatedSVD\n",
    "sklearn.decomposition.TruncatedSVD 是 Scikit-learn 库中的一个类，用于对高维数据进行奇异值分解（SVD），并将其降维。这是一种用于减少数据维度的线性降维技术，特别适合稀疏矩阵，如文本数据中的词频矩阵（TF-IDF 矩阵）。\n",
    "\n",
    "**主要功能**\n",
    "TruncatedSVD 类通过保留数据矩阵的前几个奇异值和相应的奇异向量，来近似表示原始数据，从而实现降维。它与 PCA（主成分分析）类似，但 TruncatedSVD 可以处理稀疏矩阵和不居中的数据。\n",
    "\n",
    "**主要参数**\n",
    "- n_components: 要保留的奇异值和奇异向量的数量（即降维后的目标维度）。\n",
    "- algorithm: 选择计算奇异值分解的方法。默认值为 'randomized'，它是一种快速随机化 SVD 算法。另一种选择是 'arpack'。\n",
    "- n_iter: 随机化 SVD 算法中迭代次数。默认值为 5。\n",
    "- random_state: 控制随机数生成，以确保结果的可重复性。默认值为 None。\n",
    "- tol: 与 'arpack' 算法一起使用的收敛公差。默认值为 0.0。\n",
    "**主要方法**\n",
    "- fit(X): 拟合模型到数据矩阵 X。\n",
    "- fit_transform(X): 拟合模型并将数据矩阵 X 转换为降维后的形式。\n",
    "- transform(X): 将数据矩阵 X 转换为降维后的形式。\n",
    "- inverse_transform(X): 将降维后的数据近似反变换回原始维度。\n",
    "- fit_transform(X): 拟合模型并将数据矩阵 X 转换为降维后的形式。\n",
    "**示例**\n",
    "以下是使用 TruncatedSVD 对稀疏矩阵进行降维的示例：\n",
    "\n",
    "```python\n",
    "import numpy as np\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "\n",
    "# 示例文本数据\n",
    "documents = [\n",
    "    \"This is the first document.\",\n",
    "    \"This document is the second document.\",\n",
    "    \"And this is the third one.\",\n",
    "    \"Is this the first document?\",\n",
    "]\n",
    "\n",
    "# 使用 TfidfVectorizer 将文本数据转换为 TF-IDF 矩阵\n",
    "vectorizer = TfidfVectorizer()\n",
    "X = vectorizer.fit_transform(documents)\n",
    "\n",
    "# 使用 TruncatedSVD 进行降维\n",
    "svd = TruncatedSVD(n_components=2, random_state=42)\n",
    "X_reduced = svd.fit_transform(X)\n",
    "\n",
    "print(\"原始数据形状:\", X.shape)\n",
    "print(\"降维后数据形状:\", X_reduced.shape)\n",
    "\n",
    "print(\"降维后数据:\")\n",
    "print(X_reduced)\n",
    "```\n",
    "在这个示例中，我们首先将一组文本数据转换为 TF-IDF 矩阵，然后使用 TruncatedSVD 将其降维到 2 个成分。输出显示了原始数据的形状和降维后数据的形状以及降维后的数据内容。\n",
    "\n",
    "**适用场景**\n",
    "TruncatedSVD 特别适用于以下场景：\n",
    "\n",
    "- 处理稀疏矩阵，如文本数据的词频矩阵或 TF-IDF 矩阵。\n",
    "- 在特征选择和降维中，通过保留数据的主要信息来减少计算复杂度。\n",
    "- 在推荐系统和自然语言处理等应用中，对大规模数据进行降维处理。\n",
    "- 通过 TruncatedSVD，可以有效地将高维稀疏数据降维，保留数据的主要信息，减少计算资源的消耗，同时提高算法的性能和效果。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "62086cd5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.4  0.35 0.19]\n",
      "0.9339264600284477\n",
      "[4.48 2.75 2.  ]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "\n",
    "X = [[2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 1, 0], [0, 0, 2, 3], [0, 0, 0, 1], [1, 2, 2, 1]]\n",
    "X = np.asarray(X)\n",
    "\n",
    "U,sigma,VT=np.linalg.svd(X)\n",
    "\n",
    "# 截断奇异值分解\n",
    "svd = TruncatedSVD(n_components=3, n_iter=7, random_state=42)\n",
    "svd.fit(X)  \n",
    "\n",
    "print(svd.explained_variance_ratio_)\n",
    "print(svd.explained_variance_ratio_.sum())\n",
    "print(svd.singular_values_)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f9ad98ce",
   "metadata": {},
   "source": [
    "# 自编程实现非负矩阵分解"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "90b67d32",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1.5439641249244453"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def inverse_transform(W, H):\n",
    "    # 重构\n",
    "    return W.dot(H)\n",
    "\n",
    "def loss(X, X_):\n",
    "    #计算重构误差\n",
    "    return ((X - X_) * (X - X_)).sum()\n",
    "\n",
    "class MyNMF:\n",
    "    def fit(self, X, k, t):\n",
    "        m, n = X.shape\n",
    "        \n",
    "        W = np.random.rand(m, k)\n",
    "        W = W/W.sum(axis=0)\n",
    "        \n",
    "        H = np.random.rand(k, n)\n",
    "        \n",
    "        i = 1\n",
    "        while i < t:\n",
    "            \n",
    "            W = W * X.dot(H.T) / W.dot(H).dot(H.T)\n",
    "            \n",
    "            H = H * (W.T).dot(X) / (W.T).dot(W).dot(H)\n",
    "            \n",
    "            i += 1\n",
    "            \n",
    "        return W, H\n",
    "\n",
    "model = MyNMF()\n",
    "W, H = model.fit(X, 3, 200)\n",
    "\n",
    "# 重构\n",
    "X_ = inverse_transform(W, H)\n",
    "\n",
    "# 重构误差\n",
    "loss(X, X_)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "73bc9ab5",
   "metadata": {},
   "source": [
    "# 使用 sklearn 计算NMF\n",
    "sklearn.decomposition.NMF 是 Scikit-learn 库中的一个类，用于非负矩阵分解（NMF）。NMF 是一种线性降维技术，用于将非负数据矩阵分解为两个非负矩阵的乘积。该方法广泛应用于图像处理、文本挖掘和推荐系统等领域。\n",
    "\n",
    "**主要功能**\n",
    "\n",
    "NMF 将数据矩阵𝑉分解为两个非负矩阵𝑊和𝐻，使得 𝑉≈𝑊𝐻。其中，矩阵 𝑊 包含了基（或主题），矩阵 𝐻 则包含了权重或系数。由于所有矩阵元素都是非负的，这种分解方法特别适合处理只有非负元素的数据，如图像像素值、文本词频等。\n",
    "\n",
    "**主要参数**\n",
    "\n",
    "- n_components: 要保留的成分数量（即基的数量）。如果未指定，则默认值为 None。\n",
    "- init: 初始化方法。可选值包括 'random'、'nndsvd'、'nndsvda'、'nndsvdar' 和 None。默认值为 None，表示使用 'nndsvd' 进行初始化。\n",
    "- solver: 优化算法。可选值包括 'cd'（坐标下降）和 'mu'（乘法更新）。默认值为 'cd'。\n",
    "- beta_loss: 用于定义损失函数。常见值包括 'frobenius'（Frobenius 范数）、'kullback-leibler'（KL 散度）和 'itakura-saito'。默认值为 'frobenius'。\n",
    "- tol: 优化过程的收敛容差。默认值为 1e-4。\n",
    "- max_iter: 优化算法的最大迭代次数。默认值为 200。\n",
    "- random_state: 控制随机数生成，以确保结果的可重复性。默认值为 None。\n",
    "- alpha: 正则化参数。默认值为 0.0。\n",
    "- l1_ratio: L1 正则化的比例。alpha 的权重被分配到 L1 和 L2 正则化。l1_ratio 在 0 和 1 之间。默认值为 0.0。\n",
    "\n",
    "**主要方法**\n",
    "\n",
    "- fit(X, y=None): 拟合模型到数据矩阵 X。\n",
    "- fit_transform(X, y=None, W=None, H=None): 拟合模型并将数据矩阵 X 转换为低维形式。\n",
    "- transform(X): 将数据矩阵 X 转换为低维形式。\n",
    "- inverse_transform(W): 将低维表示的矩阵近似反变换回原始数据矩阵。\n",
    "\n",
    "**示例**\n",
    "\n",
    "以下是使用 NMF 类进行非负矩阵分解的示例：\n",
    "\n",
    "```python\n",
    "import numpy as np\n",
    "from sklearn.decomposition import NMF\n",
    "\n",
    "# 示例数据\n",
    "X = np.array([[1, 2, 3], \n",
    "              [4, 5, 6], \n",
    "              [7, 8, 9]])\n",
    "\n",
    "# 创建 NMF 模型并拟合数据\n",
    "nmf_model = NMF(n_components=2, random_state=42)\n",
    "W = nmf_model.fit_transform(X)\n",
    "H = nmf_model.components_\n",
    "\n",
    "print(\"原始数据矩阵 X:\")\n",
    "print(X)\n",
    "\n",
    "print(\"\\n分解后的矩阵 W:\")\n",
    "print(W)\n",
    "\n",
    "print(\"\\n分解后的矩阵 H:\")\n",
    "print(H)\n",
    "\n",
    "# 近似重构原始矩阵\n",
    "X_approx = np.dot(W, H)\n",
    "print(\"\\n重构后的矩阵 X_approx:\")\n",
    "print(X_approx)\n",
    "```\n",
    "在这个示例中，我们将一个 3x3 的矩阵分解为两个非负矩阵 𝑊 和 𝐻，并展示了如何使用这些矩阵近似重构原始矩阵。\n",
    "\n",
    "**适用场景**\n",
    "\n",
    "NMF 特别适用于以下场景：\n",
    "\n",
    "- 图像处理：将图像分解为基本组件（如图像分解为基础图案）。\n",
    "- 文本挖掘：将文档-词矩阵分解为主题和主题权重矩阵（如主题模型）。\n",
    "- 推荐系统：将用户-物品矩阵分解为用户特征和物品特征矩阵，用于生成推荐。\n",
    "通过使用 NMF，可以有效地将高维非负数据降维，提取出数据的潜在结构或特征，从而在数据分析和建模中发挥重要作用。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "114b18d6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "4.0000016725824565"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.decomposition import NMF\n",
    "model = NMF(n_components=3, init='random', max_iter=200, random_state=0)\n",
    "W = model.fit_transform(X)\n",
    "H = model.components_\n",
    "X__ = inverse_transform(W, H)\n",
    "loss(X, X__)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
