{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# TODO 利用LDA提取专利主题"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "np.random.seed(2022)\n",
    "\n",
    "import gensim\n",
    "from gensim.utils import simple_preprocess\n",
    "from gensim.parsing.preprocessing import STOPWORDS\n",
    "import nltk\n",
    "from nltk.stem import WordNetLemmatizer, SnowballStemmer\n",
    "from nltk.stem.porter import *    #词干提取算法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def lemmatize_stemming(text):\n",
    "\t'''\n",
    "\t词形归并->提取词干\n",
    "\tparams text:单词\n",
    "\t'''\n",
    "\tlemmatizer = WordNetLemmatizer()\t#词形归并\n",
    "\t# porter_stemmer = PorterStemmer()\t#提取词干\n",
    "\t# return porter_stemmer.stem(lemmatizer.lemmatize(text, pos='v'))\n",
    "\treturn lemmatizer.lemmatize(text, pos='v')\n",
    "\n",
    "def preprocess(text):\n",
    "\t'''\n",
    "\t去除停用词与长度小于3的词->词形归并-提取词干\n",
    "\tparams test:单词组成的字符串\n",
    "\t'''\n",
    "\tresult = []\n",
    "\t#note：分词可以把特定的技术词也划分出来就好了，是否可以导入词典\n",
    "\t#note：在这之前那还得把技术关键词 格式合并\n",
    "\tfor token in gensim.utils.simple_preprocess(text):\n",
    "\t\tif token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:\n",
    "\t\t\tresult.append(lemmatize_stemming(token))\n",
    "\treturn result\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "patent number:3057\n"
     ]
    }
   ],
   "source": [
    "# 读取数据\n",
    "file_path = r'./data/DIpatent/textandtabindicator.xlsx'\n",
    "#note:原始txt粘贴到excel，再粘贴回来。\n",
    "data_df = pd.read_excel(file_path)\n",
    "abstract_list = data_df['abstract'].values\n",
    "print('patent number:{}'.format(len(abstract_list)))\n",
    "\n",
    "# 数据预处理\n",
    "abstract_preprocessed = []\n",
    "for abstract in abstract_list:\n",
    "\tabstract_preprocessed.append(preprocess(abstract))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "create dictionary----------------------------\n",
      "create bow_corpus----------------------------\n",
      "run LDA--------------------------------------\n"
     ]
    }
   ],
   "source": [
    "# ----------------自己定义topic个数，并训练LDA------------------------\n",
    "# 1.创建总^的字典(包含单词在训练集中出现的次数)\n",
    "print('create dictionary----------------------------') \n",
    "dictionary = gensim.corpora.Dictionary(abstract_preprocessed)\n",
    "\n",
    "# 过滤分词\n",
    "#-去掉少于出现在15个文档(绝对数量)\n",
    "#-超过0.5个文档(占总语料库大小的比例，而不是绝对数量)的分词\n",
    "#在执行以上两个步骤之后，只保留前10万个最频繁的分词\n",
    "dictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000)\n",
    "\n",
    "# 2.使用Gensim doc2bow(每个^文档词袋) \n",
    "# 每个文档创建一个dictionary，然后报告有多少个文档以及这些单词出现的次数\n",
    "print('create bow_corpus----------------------------') \n",
    "bow_corpus = [dictionary.doc2bow(doc) for doc in abstract_preprocessed]\n",
    "\n",
    "# 3.使用单词包运行LDA\n",
    "print('run LDA--------------------------------------')\n",
    "topicNum = 19  #根据LDAtopicnum.ipynbj结果确定#\n",
    "lda_model = gensim.models.LdaMulticore(corpus=bow_corpus,\n",
    "                                       num_topics=topicNum,\n",
    "                                       id2word=dictionary,\n",
    "                                       minimum_probability=0)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "总字典排名最高：\n",
      "0 acquisition\n",
      "1 activity\n",
      "2 agent\n",
      "3 algorithm\n",
      "4 allow\n",
      "5 archive\n",
      "6 basic\n",
      "7 business\n",
      "8 chart\n",
      "9 company\n",
      "10 compliance\n"
     ]
    }
   ],
   "source": [
    "# 查看词典排名前n的词汇--\n",
    "top_n = 10\n",
    "count = 0\n",
    "print('总字典排名最高：')\n",
    "for k, v in dictionary.iteritems():\n",
    "\tprint(k, v)\n",
    "\tcount += 1\n",
    "\tif count > top_n:\n",
    "\t\tbreak"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 文档主题可视化--\n",
    "import pyLDAvis.gensim\n",
    "pyLDAvis.enable_notebook(local=True)\n",
    "vis = pyLDAvis.gensim.prepare(lda_model, bow_corpus, dictionary)\n",
    "pyLDAvis.save_html(vis, './result/lda.html')\n",
    "pyLDAvis.show(vis)  #？没显示"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "排名前n的主题-单词分布：\n",
      "Topic: 0 \n",
      " Words: 0.013*\"comprise\" + 0.011*\"data\" + 0.011*\"acid\" + 0.011*\"network\" + 0.009*\"time\" + 0.008*\"user\" + 0.008*\"alkyl\" + 0.008*\"unit\" + 0.008*\"device\" + 0.007*\"image\"\n",
      "Topic: 1 \n",
      " Words: 0.032*\"comprise\" + 0.010*\"acid\" + 0.010*\"step\" + 0.008*\"cell\" + 0.007*\"memory\" + 0.007*\"sequence\" + 0.007*\"composition\" + 0.006*\"second\" + 0.006*\"process\" + 0.006*\"preferably\"\n",
      "Topic: 2 \n",
      " Words: 0.019*\"data\" + 0.017*\"comprise\" + 0.014*\"device\" + 0.010*\"user\" + 0.009*\"second\" + 0.009*\"network\" + 0.008*\"acid\" + 0.007*\"electronic\" + 0.006*\"determine\" + 0.006*\"information\"\n",
      "Topic: 3 \n",
      " Words: 0.023*\"device\" + 0.019*\"data\" + 0.011*\"second\" + 0.010*\"game\" + 0.008*\"network\" + 0.008*\"comprise\" + 0.008*\"user\" + 0.008*\"communication\" + 0.007*\"store\" + 0.007*\"storage\"\n",
      "Topic: 4 \n",
      " Words: 0.037*\"image\" + 0.013*\"data\" + 0.012*\"comprise\" + 0.008*\"information\" + 0.008*\"document\" + 0.007*\"determine\" + 0.007*\"process\" + 0.007*\"signal\" + 0.007*\"step\" + 0.006*\"acid\"\n",
      "Topic: 5 \n",
      " Words: 0.049*\"data\" + 0.021*\"model\" + 0.015*\"learn\" + 0.010*\"comprise\" + 0.010*\"image\" + 0.009*\"machine\" + 0.009*\"process\" + 0.008*\"train\" + 0.008*\"input\" + 0.008*\"generate\"\n",
      "Topic: 6 \n",
      " Words: 0.017*\"comprise\" + 0.010*\"second\" + 0.010*\"data\" + 0.010*\"determine\" + 0.009*\"sequence\" + 0.009*\"user\" + 0.007*\"multiple\" + 0.007*\"content\" + 0.007*\"acid\" + 0.005*\"train\"\n",
      "Topic: 7 \n",
      " Words: 0.028*\"data\" + 0.020*\"comprise\" + 0.010*\"control\" + 0.010*\"process\" + 0.007*\"composition\" + 0.007*\"train\" + 0.007*\"time\" + 0.006*\"second\" + 0.006*\"sequence\" + 0.006*\"have\"\n",
      "Topic: 8 \n",
      " Words: 0.018*\"comprise\" + 0.010*\"amino\" + 0.009*\"control\" + 0.008*\"compound\" + 0.007*\"acid\" + 0.007*\"sensor\" + 0.007*\"have\" + 0.006*\"select\" + 0.006*\"plant\" + 0.006*\"substrate\"\n",
      "Topic: 9 \n",
      " Words: 0.031*\"data\" + 0.018*\"device\" + 0.017*\"user\" + 0.012*\"process\" + 0.011*\"information\" + 0.009*\"determine\" + 0.008*\"vehicle\" + 0.008*\"network\" + 0.007*\"receive\" + 0.007*\"comprise\"\n",
      "Topic: 10 \n",
      " Words: 0.017*\"comprise\" + 0.014*\"process\" + 0.012*\"data\" + 0.010*\"information\" + 0.009*\"sequence\" + 0.008*\"vector\" + 0.008*\"second\" + 0.007*\"control\" + 0.007*\"user\" + 0.007*\"machine\"\n",
      "Topic: 11 \n",
      " Words: 0.016*\"user\" + 0.014*\"comprise\" + 0.011*\"device\" + 0.010*\"preferably\" + 0.009*\"data\" + 0.008*\"process\" + 0.007*\"acid\" + 0.006*\"step\" + 0.006*\"aluminum\" + 0.006*\"access\"\n",
      "Topic: 12 \n",
      " Words: 0.023*\"image\" + 0.016*\"comprise\" + 0.016*\"acid\" + 0.014*\"process\" + 0.013*\"signal\" + 0.011*\"sequence\" + 0.009*\"device\" + 0.008*\"amino\" + 0.008*\"second\" + 0.007*\"control\"\n",
      "Topic: 13 \n",
      " Words: 0.043*\"layer\" + 0.013*\"comprise\" + 0.012*\"position\" + 0.011*\"device\" + 0.008*\"signal\" + 0.007*\"data\" + 0.007*\"form\" + 0.006*\"user\" + 0.006*\"second\" + 0.006*\"control\"\n",
      "Topic: 14 \n",
      " Words: 0.022*\"process\" + 0.018*\"model\" + 0.013*\"data\" + 0.012*\"network\" + 0.010*\"device\" + 0.010*\"gene\" + 0.009*\"comprise\" + 0.009*\"unit\" + 0.008*\"determine\" + 0.008*\"information\"\n",
      "Topic: 15 \n",
      " Words: 0.023*\"comprise\" + 0.011*\"second\" + 0.011*\"device\" + 0.010*\"layer\" + 0.008*\"prefer\" + 0.008*\"composition\" + 0.008*\"level\" + 0.007*\"cancer\" + 0.007*\"material\" + 0.007*\"acid\"\n",
      "Topic: 16 \n",
      " Words: 0.018*\"sample\" + 0.014*\"acid\" + 0.012*\"cell\" + 0.012*\"select\" + 0.011*\"position\" + 0.010*\"have\" + 0.010*\"search\" + 0.009*\"comprise\" + 0.008*\"amino\" + 0.008*\"change\"\n",
      "Topic: 17 \n",
      " Words: 0.034*\"user\" + 0.029*\"device\" + 0.019*\"information\" + 0.012*\"receive\" + 0.010*\"step\" + 0.009*\"electronic\" + 0.008*\"process\" + 0.008*\"network\" + 0.008*\"display\" + 0.008*\"content\"\n",
      "Topic: 18 \n",
      " Words: 0.015*\"comprise\" + 0.013*\"process\" + 0.012*\"search\" + 0.011*\"preferably\" + 0.010*\"data\" + 0.008*\"information\" + 0.008*\"user\" + 0.008*\"component\" + 0.007*\"multiple\" + 0.007*\"network\"\n"
     ]
    }
   ],
   "source": [
    "# 主题-单词矩阵++\n",
    "print('排名前n的主题-单词分布：')\n",
    "for idx, topic in lda_model.print_topics(-1):\n",
    "\tprint('Topic: {} \\n Words: {}'.format(idx, topic))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[(0, 0.0009076157), (1, 0.0009076157), (2, 0.0009076157), (3, 0.0009076157), (4, 0.0009076157), (5, 0.0009076157), (6, 0.0009076157), (7, 0.0009076157), (8, 0.9836629), (9, 0.0009076157), (10, 0.0009076157), (11, 0.0009076157), (12, 0.0009076157), (13, 0.0009076157), (14, 0.0009076157), (15, 0.0009076157), (16, 0.0009076157), (17, 0.0009076157), (18, 0.0009076157)]\n"
     ]
    }
   ],
   "source": [
    "# 查看某文档的主题分布\n",
    "docTopic = lda_model.get_document_topics(bow_corpus[10], per_word_topics=False)\n",
    "print(docTopic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[6.58152450e-04 6.58152450e-04 6.58152450e-04 ... 6.58152450e-04\n",
      "  6.58152450e-04 1.44791096e-01]\n",
      " [1.22436101e-03 1.22436101e-03 1.42633155e-01 ... 1.22436101e-03\n",
      "  2.18856871e-01 1.22436101e-03]\n",
      " [2.25929901e-01 7.63054995e-04 7.63054995e-04 ... 7.63054995e-04\n",
      "  7.63054995e-04 7.63054995e-04]\n",
      " ...\n",
      " [4.01882082e-01 8.09887948e-04 8.09887948e-04 ... 8.09887948e-04\n",
      "  8.09887948e-04 8.09887948e-04]\n",
      " [5.66176837e-04 5.66176837e-04 5.66176837e-04 ... 5.66176837e-04\n",
      "  5.66176837e-04 5.66176837e-04]\n",
      " [2.72753387e-04 4.26639736e-01 2.72753387e-04 ... 2.72753387e-04\n",
      "  2.72753357e-04 5.68723440e-01]]\n"
     ]
    }
   ],
   "source": [
    "# 获取文档-主题矩阵+++\n",
    "patentNum = len(bow_corpus)\n",
    "docTopicMatrix = np.zeros((patentNum,topicNum))\n",
    "for patentIndex in range(patentNum):\n",
    "    docTopic = lda_model.get_document_topics(bow_corpus[patentIndex], per_word_topics=False)\n",
    "    for topicIndex,value in docTopic:\n",
    "        docTopicMatrix[patentIndex][topicIndex] = value\n",
    "\n",
    "print(docTopicMatrix)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.6.8 64-bit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "d207591e6ff77a7c5fb4cef0dd9fd3703274637a9d0902d2045beb3a65bf572a"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
