{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.8/site-packages/scipy/sparse/sparsetools.py:21: DeprecationWarning: `scipy.sparse.sparsetools` is deprecated!\n",
      "scipy.sparse.sparsetools is a private module for scipy.sparse, and should not be used.\n",
      "  _deprecated()\n"
     ]
    }
   ],
   "source": [
    "import jieba\n",
    "import os\n",
    "from typing import List\n",
    "from mlxtend.frequent_patterns import fpgrowth\n",
    "from mlxtend.frequent_patterns import apriori\n",
    "import xlrd\n",
    "import logging\n",
    "import time\n",
    "import re\n",
    "from harvesttext import HarvestText\n",
    "from harvesttext.resources import get_baidu_stopwords\n",
    "from tqdm import tqdm\n",
    "import jieba\n",
    "from gensim import corpora\n",
    "from gensim.models import LdaModel\n",
    "from gensim.corpora import Dictionary\n",
    "from gensim.models import CoherenceModel\n",
    "from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n",
    "from gensim.models.doc2vec import Doc2Vec\n",
    "import gensim\n",
    "from sys import getsizeof as getsize\n",
    "\n",
    "TaggedDocument = gensim.models.doc2vec.TaggedDocument\n",
    "\n",
    "ht = HarvestText()\n",
    "\n",
    "# TODO\n",
    "# 如果后期要转换为命令行格式，这些都是必填的参数项\n",
    "\n",
    "\n",
    "# 这些都是全局变量。全局变量指定了很多内容。\n",
    "# 主要还是依赖xls原始捕捉文件。\n",
    "xls_folder_path = '../data/paper23/221030_new_LDA/tangshan'  # 存放表格数据的路径, str\n",
    "xls_column = 2  # 要抽取的文本列, int\n",
    "frequent_mode = ''  # 选择何种方法进行频繁项集抽取\n",
    "log_info = 'test'\n",
    "exp_name = 'test'\n",
    "\n",
    "\n",
    "# 日志信息基础确认\n",
    "logging.basicConfig(\n",
    "    level=logging.DEBUG,\n",
    "    filemode='a',\n",
    "    filename=f'./log/{time.strftime(\"%Y-%m-%d-%H:%M:%S\", time.localtime())}'\n",
    "             f'_{xls_column}_{log_info}.log',\n",
    "    format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [],
   "source": [
    "def clean(text):\n",
    "    \"\"\"\n",
    "    无效字符清理\n",
    "    :param text:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    cop = re.compile(\"[^\\u4e00-\\u9fa5^a-z^A-Z^0-9,.，。！？!?()（）@《》]\") # 匹配不是中文、大小写、数字的其他字符\n",
    "    string1 = cop.sub('', text)\n",
    "    return string1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [],
   "source": [
    "def original_text_load(path):\n",
    "    \"\"\"\n",
    "\n",
    "    应该是要把多个csv一起读，然后拼凑到一起\n",
    "    通过文件夹指定时间区间，而这个函数只负责聚合所有文件夹里的文件\n",
    "    读成一个dataframe是2.0的方案\n",
    "\n",
    "    但是目前来说，保留其他信息并没用。所以还是输出文档列表比较好。\n",
    "\n",
    "    # parameter\n",
    "\n",
    "    path : `str`\n",
    "        文件夹的路径\n",
    "\n",
    "    # return\n",
    "\n",
    "    docs_list : `List[str]`\n",
    "        文档的列表\n",
    "\n",
    "    \"\"\"\n",
    "    file_list = os.listdir(path)\n",
    "    file_list = [i for i in file_list if not i.startswith('.') and i.endswith('.xls')]  # 去除隐藏文件无关项\n",
    "    docs_list = []\n",
    "    tag_list = []\n",
    "\n",
    "    logging.info(f'开始加载数据 {xls_folder_path}')\n",
    "    print('开始加载数据\\n')\n",
    "    for xls_file in tqdm(file_list):\n",
    "        work_book = xlrd.open_workbook(os.path.join(xls_folder_path, xls_file))\n",
    "        work_sheets = work_book.sheets()  # 默认第一个工作表\n",
    "        for sheet in work_sheets:\n",
    "            col_data = sheet.col_values(xls_column)\n",
    "            col_data.pop(0)  # 删除表头\n",
    "            docs_list = docs_list + col_data\n",
    "        tag_list += [xls_file.replace('.xls', '')] * len(docs_list)  # 制作标签列表\n",
    "\n",
    "    only = set()\n",
    "    clean_doc_list = []\n",
    "    clean_tag_list = []\n",
    "    logging.info('清洗数据 开始')\n",
    "    print('开始 清洗数据\\n')\n",
    "    for index, doc in tqdm(enumerate(docs_list)):\n",
    "        if hash(clean(doc)[:10]) not in only:  # 确认不重复\n",
    "            only.add(hash(clean(doc)[:10]))\n",
    "        else:\n",
    "            continue\n",
    "        tmp_text = ht.clean_text(doc).replace('...', '').replace('#', '')\n",
    "        if len(tmp_text) <= 7:  # 小于七个中文字符直接放弃\n",
    "            continue\n",
    "        clean_doc_list.append(tmp_text)\n",
    "        clean_tag_list.append(tag_list[index])\n",
    "\n",
    "    logging.info(f'全部 doc_length: {len(docs_list)}')\n",
    "    logging.info(f'有效 doc_length: {len(clean_doc_list)}')\n",
    "    return clean_doc_list, clean_tag_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [],
   "source": [
    "def preprocess_vectorize(text_list: List[str]) -> List[str]:\n",
    "    \"\"\"\n",
    "    为下一步向量化准备语料\n",
    "\n",
    "    :param text_list:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    stop_word = list(get_baidu_stopwords())\n",
    "    segment_jieba = lambda text: \" \".join([i for i in jieba.lcut(text) if i not in stop_word])\n",
    "    corpus = list()\n",
    "    for doc in tqdm(text_list):\n",
    "        corpus.append(segment_jieba(doc))\n",
    "    print(corpus[1])\n",
    "    logging.info(f'corpus size: {round(getsize(corpus) / 1024, 3)} KB')\n",
    "    return corpus"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [],
   "source": [
    "def vectorize_tfidf(text_list: List[str]):\n",
    "    \"\"\"\n",
    "    根据tfidf获取权重\n",
    "    这里直接返回的是整一个矩阵。可能不太妙\n",
    "    :param text_list:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    time_s = time.perf_counter()\n",
    "    vectorizer = CountVectorizer(min_df=0.0002, max_features=100000)\n",
    "    transformer = TfidfTransformer()\n",
    "    tfidf = transformer.fit_transform(vectorizer.fit_transform(text_list))\n",
    "    word = vectorizer.get_feature_names()\n",
    "    print(f\"feature length: {len(word)}\")\n",
    "    weight = tfidf.toarray()\n",
    "    time_e = time.perf_counter()\n",
    "    logging.info(f'tfidf weight generate time: {(time_e - time_s) * 1000} ms')\n",
    "    logging.info(f'tfidf weight size: {round(getsize(weight) / 1024 ** 2, 3)} MB')\n",
    "    return weight"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [],
   "source": [
    "def vectorize_doc2vec(text_list: List[str]):\n",
    "    \"\"\"\n",
    "    基于doc2vec的向量模型生成\n",
    "    :param text_list:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    x_train = list()\n",
    "    for i, text in enumerate(text_list):\n",
    "        word_list = text.split(' ')\n",
    "        l = len(word_list)\n",
    "        word_list[l - 1] = word_list[l - 1].strip()\n",
    "        document = TaggedDocument(word_list, tags=[i])\n",
    "        x_train.append(document)\n",
    "\n",
    "    time_s = time.perf_counter()\n",
    "    model_d2v = Doc2Vec(x_train, min_count=10, window=3, vector_size=200, sample=1e-3, negative=5, workers=10)\n",
    "    model_d2v.train(x_train, total_examples=model_d2v.corpus_count, epochs=100)\n",
    "    time_e = time.perf_counter()\n",
    "    logging.info(f'd2v weight generate time: {(time_e - time_s) * 1000} ms')\n",
    "    logging.info(f'd2v weight size: {round(getsize(model_d2v) / 1024 ** 2, 3)} MB')\n",
    "    model_d2v.save(f'model/model_{exp_name}')\n",
    "\n",
    "    return model_d2v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [],
   "source": [
    "def trad_cluster(mode, text_list, tag_list):\n",
    "\n",
    "    # 获取doc2vec向量，读取预缓存的\n",
    "    infer_vector_list = []\n",
    "    model_d2v = Doc2Vec.load(f\"model/model_{exp_name}\")\n",
    "    for text, label in zip(text_list, tag_list):\n",
    "        vector = model_d2v.infer_vector(text)\n",
    "        infer_vector_list.append(vector)\n",
    "\n",
    "    # 获取tfidf向量矩阵，实时生成\n",
    "    model_tfidf = vectorize_tfidf(text_list)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "text_content, tag_content = original_text_load(xls_folder_path)\n",
    "text_content = preprocess_vectorize(text_content)\n",
    "vec_tfidf = vectorize_tfidf(text_content)\n",
    "vec_tfidf[10]\n",
    "vec_d2v = vectorize_doc2vec(text_content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始加载数据\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 2/2 [00:01<00:00,  1.76it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始 清洗数据\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "40000it [00:01, 25327.46it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "你妈的 想吐 快死 快去死 RT: 唐山烧烤店打人事件已抓获8人我看到这头皮发麻了……太恐怖了啊\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/17599 [00:00<?, ?it/s]Building prefix dict from the default dictionary ...\n",
      "Loading model from cache /tmp/jieba.cache\n",
      "Loading model cost 0.488 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "100%|██████████| 17599/17599 [00:45<00:00, 386.55it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['唐山', '烧烤', '烧烤店', '打人', '事件', '抓获', '头皮', '发麻', '头皮发麻', '恐怖']\n",
      "开始 寻找最佳主题\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 12%|█▎        | 1/8 [02:34<18:04, 154.95s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "主题 2, 分数为：0.4085024895481057\n",
      "(0, '0.021*\"视频\" + 0.016*\"举报\" + 0.014*\"女子\" + 0.012*\"男子\" + 0.012*\"微博\" + 0.011*\"实名\" + 0.009*\"社会\" + 0.008*\"展开\" + 0.008*\"殴打\" + 0.008*\"多名\"')\n",
      "(1, '0.017*\"暴力\" + 0.017*\"骚扰\" + 0.015*\"展开\" + 0.014*\"性别\" + 0.014*\"女性\" + 0.009*\"一个\" + 0.008*\"殴打\" + 0.008*\"法律\" + 0.008*\"公共\" + 0.006*\"性骚扰\"')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 25%|██▌       | 2/8 [05:27<16:32, 165.44s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "主题 3, 分数为：0.4168035871938561\n",
      "(0, '0.021*\"举报\" + 0.014*\"实名\" + 0.013*\"视频\" + 0.009*\"女子\" + 0.008*\"展开\" + 0.008*\"社会\" + 0.007*\"微博\" + 0.007*\"老板\" + 0.007*\"暴力\" + 0.007*\"人员\"')\n",
      "(1, '0.028*\"骚扰\" + 0.024*\"暴力\" + 0.021*\"性别\" + 0.019*\"女性\" + 0.014*\"展开\" + 0.014*\"女生\" + 0.010*\"公共\" + 0.010*\"性骚扰\" + 0.007*\"有人\" + 0.007*\"毫无\"')\n",
      "(2, '0.024*\"视频\" + 0.020*\"男子\" + 0.018*\"女子\" + 0.015*\"姑娘\" + 0.013*\"殴打\" + 0.013*\"多名\" + 0.013*\"微博\" + 0.012*\"一个\" + 0.011*\"展开\" + 0.011*\"衣服\"')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 38%|███▊      | 3/8 [08:09<13:38, 163.65s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "主题 4, 分数为：0.4646787287547143\n",
      "(0, '0.032*\"举报\" + 0.021*\"实名\" + 0.018*\"女子\" + 0.015*\"视频\" + 0.012*\"微博\" + 0.012*\"男子\" + 0.010*\"酒吧\" + 0.009*\"人员\" + 0.009*\"多名\" + 0.009*\"抓获\"')\n",
      "(1, '0.041*\"视频\" + 0.016*\"男子\" + 0.016*\"微博\" + 0.015*\"老板\" + 0.012*\"女子\" + 0.012*\"发声\" + 0.012*\"女生\" + 0.011*\"殴打\" + 0.011*\"骚扰\" + 0.011*\"店主\"')\n",
      "(2, '0.023*\"一个\" + 0.015*\"展开\" + 0.015*\"姑娘\" + 0.012*\"暴力\" + 0.011*\"衣服\" + 0.011*\"社会\" + 0.011*\"女性\" + 0.009*\"受害\" + 0.008*\"骚扰\" + 0.008*\"法律\"')\n",
      "(3, '0.029*\"性别\" + 0.027*\"骚扰\" + 0.017*\"暴力\" + 0.016*\"公共\" + 0.015*\"展开\" + 0.014*\"严惩\" + 0.013*\"性骚扰\" + 0.012*\"女性\" + 0.011*\"滋事\" + 0.011*\"寻衅\"')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 50%|█████     | 4/8 [10:51<10:53, 163.29s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "主题 5, 分数为：0.4839250144176746\n",
      "(0, '0.032*\"严惩\" + 0.023*\"视频\" + 0.018*\"多名\" + 0.018*\"女子\" + 0.018*\"武鸣\" + 0.016*\"姐姐\" + 0.014*\"社会\" + 0.012*\"酒吧\" + 0.012*\"展开\" + 0.012*\"狂徒\"')\n",
      "(1, '0.029*\"视频\" + 0.024*\"姑娘\" + 0.023*\"一个\" + 0.019*\"衣服\" + 0.014*\"暴力\" + 0.014*\"殴打\" + 0.013*\"骚扰\" + 0.012*\"女性\" + 0.012*\"几个\" + 0.012*\"微博\"')\n",
      "(2, '0.042*\"骚扰\" + 0.041*\"性别\" + 0.023*\"暴力\" + 0.019*\"性骚扰\" + 0.017*\"女性\" + 0.016*\"公共\" + 0.016*\"展开\" + 0.014*\"暴打\" + 0.013*\"女生\" + 0.012*\"无关\"')\n",
      "(3, '0.045*\"举报\" + 0.033*\"实名\" + 0.015*\"女子\" + 0.011*\"男子\" + 0.010*\"殴打\" + 0.009*\"团伙\" + 0.009*\"暴力\" + 0.008*\"维权\" + 0.008*\"酒吧\" + 0.008*\"扫黑\"')\n",
      "(4, '0.014*\"视频\" + 0.012*\"展开\" + 0.010*\"女子\" + 0.009*\"社会\" + 0.007*\"男子\" + 0.007*\"案件\" + 0.007*\"发声\" + 0.007*\"微博\" + 0.006*\"人员\" + 0.006*\"发生\"')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 62%|██████▎   | 5/8 [13:32<08:06, 162.29s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "主题 6, 分数为：0.498519266298487\n",
      "(0, '0.038*\"视频\" + 0.022*\"殴打\" + 0.022*\"微博\" + 0.018*\"多名\" + 0.017*\"女生\" + 0.015*\"男子\" + 0.014*\"骚扰\" + 0.013*\"武鸣\" + 0.012*\"姐姐\" + 0.011*\"社会\"')\n",
      "(1, '0.015*\"严惩\" + 0.015*\"举报\" + 0.014*\"社会\" + 0.014*\"视频\" + 0.012*\"微博\" + 0.012*\"人员\" + 0.010*\"涉案\" + 0.010*\"抓获\" + 0.010*\"涉案人\" + 0.010*\"涉案人员\"')\n",
      "(2, '0.040*\"性别\" + 0.040*\"骚扰\" + 0.032*\"暴力\" + 0.024*\"女性\" + 0.018*\"性骚扰\" + 0.016*\"展开\" + 0.015*\"公共\" + 0.013*\"有人\" + 0.013*\"一个\" + 0.012*\"女生\"')\n",
      "(3, '0.021*\"法律\" + 0.015*\"展开\" + 0.014*\"毫无\" + 0.013*\"徐克\" + 0.013*\"情况\" + 0.012*\"滋事\" + 0.011*\"寻衅\" + 0.011*\"寻衅滋事\" + 0.011*\"学生\" + 0.011*\"大学\"')\n",
      "(4, '0.035*\"姑娘\" + 0.035*\"一个\" + 0.028*\"衣服\" + 0.015*\"展开\" + 0.015*\"女子\" + 0.013*\"女孩\" + 0.011*\"骚扰\" + 0.010*\"黑衣\" + 0.010*\"殴打\" + 0.010*\"朋友\"')\n",
      "(5, '0.026*\"举报\" + 0.025*\"女子\" + 0.022*\"实名\" + 0.018*\"男子\" + 0.017*\"视频\" + 0.013*\"老板\" + 0.012*\"发声\" + 0.010*\"女孩\" + 0.009*\"围殴\" + 0.008*\"暴力\"')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 75%|███████▌  | 6/8 [16:12<05:23, 161.60s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "主题 7, 分数为：0.47944816345645874\n",
      "(0, '0.045*\"举报\" + 0.030*\"实名\" + 0.017*\"老板\" + 0.014*\"视频\" + 0.014*\"女子\" + 0.011*\"暴力\" + 0.009*\"老板娘\" + 0.009*\"多人\" + 0.009*\"社会\" + 0.008*\"发声\"')\n",
      "(1, '0.015*\"毫无\" + 0.014*\"徐克\" + 0.013*\"展开\" + 0.013*\"殴打\" + 0.013*\"案发\" + 0.012*\"发生\" + 0.009*\"成龙\" + 0.009*\"教育\" + 0.009*\"包庇\" + 0.008*\"抓获\"')\n",
      "(2, '0.046*\"严惩\" + 0.018*\"社会\" + 0.017*\"除恶\" + 0.015*\"扫黑\" + 0.014*\"足以\" + 0.014*\"不足以\" + 0.014*\"狂徒\" + 0.012*\"所有人\" + 0.012*\"妇女\" + 0.012*\"中国\"')\n",
      "(3, '0.029*\"姑娘\" + 0.024*\"一个\" + 0.021*\"衣服\" + 0.021*\"女孩\" + 0.018*\"男子\" + 0.015*\"女子\" + 0.012*\"展开\" + 0.011*\"视频\" + 0.009*\"围殴\" + 0.009*\"人员\"')\n",
      "(4, '0.044*\"骚扰\" + 0.031*\"性别\" + 0.029*\"暴力\" + 0.027*\"女生\" + 0.021*\"女性\" + 0.019*\"视频\" + 0.013*\"性骚扰\" + 0.012*\"展开\" + 0.012*\"男子\" + 0.012*\"公共\"')\n",
      "(5, '0.020*\"殴打\" + 0.020*\"滋事\" + 0.020*\"寻衅\" + 0.020*\"寻衅滋事\" + 0.014*\"显示\" + 0.013*\"视频\" + 0.011*\"案件\" + 0.011*\"判决\" + 0.011*\"女性\" + 0.011*\"展开\"')\n",
      "(6, '0.031*\"视频\" + 0.021*\"微博\" + 0.020*\"社会\" + 0.019*\"女子\" + 0.018*\"酒吧\" + 0.017*\"多名\" + 0.014*\"有人\" + 0.012*\"武鸣\" + 0.012*\"姐姐\" + 0.011*\"关注\"')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 88%|████████▊ | 7/8 [18:49<02:39, 159.89s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "主题 8, 分数为：0.4751570741238911\n",
      "(0, '0.043*\"骚扰\" + 0.036*\"性别\" + 0.025*\"公共\" + 0.021*\"暴力\" + 0.019*\"性骚扰\" + 0.017*\"展开\" + 0.015*\"暴打\" + 0.015*\"女性\" + 0.015*\"殴打\" + 0.013*\"无关\"')\n",
      "(1, '0.025*\"视频\" + 0.014*\"微博\" + 0.014*\"展开\" + 0.014*\"社会\" + 0.013*\"有人\" + 0.011*\"成龙\" + 0.010*\"法律\" + 0.010*\"毫无\" + 0.010*\"发声\" + 0.009*\"维权\"')\n",
      "(2, '0.047*\"举报\" + 0.033*\"实名\" + 0.027*\"女子\" + 0.022*\"视频\" + 0.016*\"男子\" + 0.015*\"老板\" + 0.015*\"酒吧\" + 0.013*\"多名\" + 0.012*\"团伙\" + 0.012*\"微博\"')\n",
      "(3, '0.070*\"姑娘\" + 0.054*\"衣服\" + 0.045*\"一个\" + 0.029*\"女孩\" + 0.020*\"骚扰\" + 0.020*\"朋友\" + 0.018*\"几个\" + 0.016*\"男子\" + 0.016*\"黑衣\" + 0.015*\"黑衣服\"')\n",
      "(4, '0.034*\"女生\" + 0.028*\"视频\" + 0.024*\"男子\" + 0.024*\"骚扰\" + 0.022*\"殴打\" + 0.019*\"涉案人\" + 0.019*\"涉案人员\" + 0.019*\"多名\" + 0.015*\"女子\" + 0.014*\"涉案\"')\n",
      "(5, '0.015*\"抓获\" + 0.013*\"一个\" + 0.012*\"暴力\" + 0.010*\"犯罪\" + 0.010*\"展开\" + 0.009*\"女性\" + 0.009*\"施暴\" + 0.009*\"社会\" + 0.008*\"势力\" + 0.008*\"令人\"')\n",
      "(6, '0.026*\"暴力\" + 0.021*\"女性\" + 0.016*\"女孩\" + 0.016*\"有人\" + 0.013*\"受害\" + 0.013*\"所有人\" + 0.013*\"除恶\" + 0.013*\"社会\" + 0.012*\"噩梦\" + 0.011*\"扫黑\"')\n",
      "(7, '0.059*\"严惩\" + 0.019*\"足以\" + 0.019*\"不足以\" + 0.018*\"狂徒\" + 0.016*\"妇女\" + 0.015*\"展开\" + 0.014*\"后续\" + 0.014*\"殴打\" + 0.013*\"女性\" + 0.013*\"中国\"')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 8/8 [21:25<00:00, 160.70s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "主题 9, 分数为：0.49858916718810053\n",
      "(0, '0.077*\"严惩\" + 0.028*\"令人\" + 0.026*\"足以\" + 0.025*\"不足以\" + 0.022*\"中国\" + 0.022*\"妇女\" + 0.021*\"狂徒\" + 0.020*\"报复\" + 0.018*\"社会\" + 0.016*\"底线\"')\n",
      "(1, '0.064*\"举报\" + 0.043*\"实名\" + 0.020*\"女子\" + 0.020*\"酒吧\" + 0.017*\"团伙\" + 0.016*\"微博\" + 0.014*\"武鸣\" + 0.013*\"视频\" + 0.013*\"姐姐\" + 0.012*\"社会\"')\n",
      "(2, '0.033*\"视频\" + 0.024*\"几个\" + 0.021*\"细思极\" + 0.017*\"勇敢\" + 0.012*\"赶紧\" + 0.012*\"事情\" + 0.011*\"思考\" + 0.010*\"老板\" + 0.010*\"女生\" + 0.010*\"流氓\"')\n",
      "(3, '0.033*\"发声\" + 0.022*\"成龙\" + 0.019*\"毫无\" + 0.018*\"展开\" + 0.018*\"徐克\" + 0.016*\"老板娘\" + 0.016*\"老板\" + 0.014*\"涉事\" + 0.014*\"学生\" + 0.012*\"案发\"')\n",
      "(4, '0.027*\"殴打\" + 0.027*\"维权\" + 0.022*\"滋事\" + 0.021*\"寻衅\" + 0.021*\"寻衅滋事\" + 0.021*\"显示\" + 0.019*\"案件\" + 0.016*\"刑事\" + 0.015*\"公共\" + 0.015*\"场所\"')\n",
      "(5, '0.037*\"视频\" + 0.029*\"男子\" + 0.028*\"姑娘\" + 0.024*\"一个\" + 0.023*\"女孩\" + 0.021*\"衣服\" + 0.016*\"女生\" + 0.015*\"骚扰\" + 0.014*\"女子\" + 0.014*\"殴打\"')\n",
      "(6, '0.052*\"性别\" + 0.049*\"骚扰\" + 0.029*\"暴力\" + 0.023*\"性骚扰\" + 0.020*\"公共\" + 0.019*\"女性\" + 0.018*\"女生\" + 0.018*\"拒绝\" + 0.015*\"无关\" + 0.015*\"男生\"')\n",
      "(7, '0.019*\"视频\" + 0.017*\"社会\" + 0.017*\"暴力\" + 0.015*\"有人\" + 0.013*\"女子\" + 0.012*\"微博\" + 0.011*\"受害\" + 0.010*\"发生\" + 0.009*\"受害者\" + 0.008*\"关注\"')\n",
      "(8, '0.018*\"人员\" + 0.014*\"公安\" + 0.013*\"殴打\" + 0.012*\"嫌犯\" + 0.012*\"涉案\" + 0.012*\"展开\" + 0.011*\"嫌疑\" + 0.011*\"嫌疑人\" + 0.011*\"涉案人\" + 0.011*\"涉案人员\"')\n",
      "(0, '0.038*\"视频\" + 0.022*\"殴打\" + 0.022*\"微博\" + 0.018*\"多名\" + 0.017*\"女生\"')\n",
      "(1, '0.015*\"严惩\" + 0.015*\"举报\" + 0.014*\"社会\" + 0.014*\"视频\" + 0.012*\"微博\"')\n",
      "(2, '0.040*\"性别\" + 0.040*\"骚扰\" + 0.032*\"暴力\" + 0.024*\"女性\" + 0.018*\"性骚扰\"')\n",
      "(3, '0.021*\"法律\" + 0.015*\"展开\" + 0.014*\"毫无\" + 0.013*\"徐克\" + 0.013*\"情况\"')\n",
      "(4, '0.035*\"姑娘\" + 0.035*\"一个\" + 0.028*\"衣服\" + 0.015*\"展开\" + 0.015*\"女子\"')\n",
      "(5, '0.026*\"举报\" + 0.025*\"女子\" + 0.022*\"实名\" + 0.018*\"男子\" + 0.017*\"视频\"')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def keyword_find(docs_list, method='jieba_tfidf') -> List[str]:\n",
    "    \"\"\"\n",
    "    找到关键词，输出为关键词列表，\n",
    "    但是关键词抽取有不同的方案。\n",
    "\n",
    "    # parameters\n",
    "\n",
    "    docs_list : `List[str]`\n",
    "        文档列表\n",
    "\n",
    "    method : `str`\n",
    "        只能是 jieba_tfidf 或者 textrank\n",
    "        因为目前使用的是harvesttext作为基础\n",
    "        后续可以加入jiagu或者直接使用jieba\n",
    "\n",
    "    # return\n",
    "\n",
    "    keywords_list : `List[list[str]]`\n",
    "        对应每一个文档的一组关键词\n",
    "\n",
    "    \"\"\"\n",
    "    keywords_list = []\n",
    "\n",
    "    print('开始提取关键词\\n')\n",
    "    for doc in tqdm(docs_list):\n",
    "        kws = ht.extract_keywords(text=doc, topK=5, method=method)\n",
    "        keywords_list.append(kws)\n",
    "\n",
    "    return keywords_list\n",
    "\n",
    "\n",
    "def frequent_pattern_find(word_group_list) -> List[List[str]]:\n",
    "    \"\"\"\n",
    "    频繁模式的获取涉及到不同的超参数，需要引入一些新的参数\n",
    "\n",
    "    # parameter\n",
    "\n",
    "    word_group_list : `List[List[str]]`\n",
    "        获得词语组\n",
    "\n",
    "    # return\n",
    "\n",
    "    frequent_patterns : `List[List[str]]`\n",
    "        获得频繁模式\n",
    "\n",
    "    \"\"\"\n",
    "\n",
    "\n",
    "def LDA_topic_find(doc_list, topic_num=10, epoch=50) -> LdaModel:\n",
    "    \"\"\"\n",
    "    LDA 主题分析，这本来是一个比较复杂的问题。\n",
    "    涉及到多个超参数和一些额外判断的技巧。\n",
    "    LDA只会弄出主题，但是这个主题到底是什么，是否值得关注，\n",
    "    都需要你自己判断。\n",
    "\n",
    "    # parameter\n",
    "\n",
    "    doc_list : `List[str]`\n",
    "        文档列表\n",
    "\n",
    "    topic_num : `int`\n",
    "        考虑有多少个主题, 最大值\n",
    "        属于超参数\n",
    "\n",
    "    epoch : `int`\n",
    "        训练的轮次\n",
    "        属于超参数\n",
    "\n",
    "    # return\n",
    "\n",
    "    lda_model : gensim.models.LdaModel\n",
    "        最终训练好的模型\n",
    "\n",
    "    \"\"\"\n",
    "\n",
    "    stop_word = list(get_baidu_stopwords())\n",
    "    cut_docs = []\n",
    "    for doc in tqdm(doc_list):  # 首先进行分词\n",
    "        doc = re.sub('[^\\u4e00-\\u9fa5]+', ' ', doc)\n",
    "        # cut_doc = jieba.lcut(doc, cut_all=True)\n",
    "        cut_doc = list(jieba.cut_for_search(doc))\n",
    "        cut_doc = [i for i in cut_doc if i not in stop_word and i != ' ' and len(i) >= 2]  # 去除停用词\n",
    "        cut_docs.append(cut_doc)\n",
    "\n",
    "    print(cut_docs[0])\n",
    "    dictionary = corpora.Dictionary(cut_docs)\n",
    "    dictionary.filter_extremes(no_below=20, no_above=0.5)\n",
    "    corpus = [dictionary.doc2bow(doc) for doc in cut_docs]\n",
    "\n",
    "    coherence_score = []\n",
    "    # perplexity = []\n",
    "    model_list = []\n",
    "\n",
    "    print('开始 寻找最佳主题')\n",
    "    for num_topic in tqdm(range(2, topic_num, 1)):\n",
    "        lda_model = LdaModel(corpus=corpus,\n",
    "                             id2word=dictionary,\n",
    "                             num_topics=num_topic,\n",
    "                             passes=epoch)\n",
    "        model_list.append(lda_model)\n",
    "        coherence_model = CoherenceModel(model=lda_model,\n",
    "                                         texts=cut_docs,\n",
    "                                         dictionary=dictionary,\n",
    "                                         coherence='c_v')\n",
    "        coherence_score.append(round(coherence_model.get_coherence(), 3))\n",
    "        print(f'主题 {num_topic}, 分数为：{coherence_model.get_coherence()}')\n",
    "        topics = lda_model.print_topics(num_words=10)\n",
    "        logging.info(f'主题 {num_topic}, 分数为：{coherence_model.get_coherence()}')\n",
    "        for topic in topics:\n",
    "            print(topic)\n",
    "            logging.info(topic)\n",
    "\n",
    "    max_score, max_score_index = max(coherence_score), coherence_score.index(max(coherence_score))\n",
    "    logging.info(f'最大分数：{max_score}, 索引为：{max_score_index}')\n",
    "    best_lda_model = model_list[max_score_index]\n",
    "\n",
    "    beat_topics = best_lda_model.print_topics(num_words=5)\n",
    "    print('here is the best corr result:')\n",
    "    logging.info('here is the best corr result:')\n",
    "    for topic in beat_topics:\n",
    "        print(topic)\n",
    "        logging.info(topic)\n",
    "\n",
    "    return lda_model\n",
    "\n",
    "\n",
    "def doc_topic_find(docs_list: List, lda_model: LdaModel) -> List[str]:\n",
    "    \"\"\"\n",
    "    获取到主题之后，想必是要对每个文档赋予具体的分类。\n",
    "\n",
    "    # parameter\n",
    "\n",
    "    docs_list : `List[str]`\n",
    "        文档列表\n",
    "\n",
    "    lda_model : `LdaModel`\n",
    "        主题模型\n",
    "\n",
    "    # return\n",
    "\n",
    "    topic_list : `List[str]`\n",
    "\n",
    "    \"\"\"\n",
    "\n",
    "\n",
    "def main():\n",
    "    \"\"\"\n",
    "    主函数\n",
    "    \"\"\"\n",
    "\n",
    "    text_list, _ = original_text_load(xls_folder_path)\n",
    "    print(text_list[0])\n",
    "    # input()\n",
    "    # kws_list = keyword_find(text_list)\n",
    "    topics = LDA_topic_find(text_list)\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    main()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": "['你好', ' ', '我', '是', ' ', '尼玛']"
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "jieba.lcut('你好 我是 尼玛')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [],
   "source": [
    "stop_word = list(get_baidu_stopwords())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": "['after',\n 'again',\n 'each',\n '虽',\n \"you'd\",\n \"it'll\",\n '另外',\n '与否',\n '已经',\n '不独',\n 'keeps',\n \"who's\",\n 'every',\n '大力',\n '先生',\n '是的',\n '而且',\n 'that',\n '并不',\n '当然',\n '准备',\n '归',\n '顺',\n '谁',\n '左右',\n '沿着',\n 'useful',\n 'moreover',\n '如',\n 'well',\n 'by',\n 'those',\n '之所以',\n '比方',\n 'nor',\n '哎哟',\n '或是',\n 'meanwhile',\n 'ours',\n 'value',\n '十分',\n '由于',\n '别说',\n '怎么样',\n 'vs',\n '连',\n 'only',\n '大多数',\n '倘若',\n '那么些',\n \"they'd\",\n '开始',\n '随着',\n '一般',\n 'accordingly',\n 'apart',\n 'whoever',\n '不要',\n '所有',\n '您',\n 'further',\n '还有',\n '虽说',\n 'an',\n 'beside',\n '一些',\n '总的来说',\n '至',\n 'thus',\n '今天',\n '表示',\n '向',\n 'someone',\n \"c's\",\n 'theirs',\n '似的',\n '的话',\n '之后',\n '似乎',\n 'respectively',\n 'way',\n '赶',\n 'becomes',\n 'though',\n 'et',\n 'instead',\n 'in',\n '一时',\n '别',\n '最後',\n '大大',\n '这种',\n \"ain't\",\n '故',\n '个人',\n '必然',\n 'hers',\n '呕',\n '然后',\n '怎麽',\n '》',\n '多少',\n \"isn't\",\n '关于',\n '要',\n '处理',\n '相反',\n 'had',\n 'follows',\n '本着',\n '宣布',\n 'thanks',\n '比',\n '用',\n '时候',\n 'inward',\n '若是',\n '开外',\n '有关',\n '呵',\n '相对而言',\n 'until',\n 'viz',\n '互相',\n 'somewhere',\n '进步',\n '非但',\n '虽然',\n 'with',\n '考虑',\n '上下',\n '突出',\n 'our',\n 'exactly',\n '需要',\n 'its',\n '目前',\n '使得',\n '什么样',\n 'seen',\n '特点',\n '对应',\n 'when',\n '非徒',\n 'tried',\n 'next',\n '喂',\n 'else',\n 'below',\n '自从',\n 'have',\n '也罢',\n '正在',\n '罢了',\n '既然',\n '立即',\n 'be',\n '必须',\n '仍然',\n '往往',\n 'available',\n 'anybody',\n '像',\n '所',\n 'forth',\n '果真',\n 'doing',\n '上面',\n '最大',\n '确定',\n 'seriously',\n 'wants',\n '不过',\n 'also',\n '这个',\n 'ought',\n 'plus',\n '每天',\n 'furthermore',\n '其二',\n '于',\n 'whereby',\n '安全',\n '後面',\n '严重',\n 'everyone',\n \"that's\",\n '总的说来',\n 'upon',\n '边',\n 'why',\n '他们',\n 'ex',\n 'one',\n 'causes',\n 'yet',\n 'consider',\n \"t's\",\n '论',\n '整个',\n 'whole',\n '每个',\n 'once',\n 'best',\n 'want',\n '有著',\n '同一',\n '显然',\n '除',\n \"we'll\",\n '此时',\n '相对',\n '咦',\n 'entirely',\n '以后',\n '否则',\n '总之',\n '能',\n '假若',\n '因此',\n '既是',\n 'aside',\n '其一',\n '同时',\n '多',\n '引起',\n 'others',\n '双方',\n '要不',\n '这样',\n 'your',\n '知道',\n '之类',\n 'okay',\n 'seeming',\n '地',\n '咳',\n '：',\n '等等',\n 'you',\n \"here's\",\n '今年',\n 'rather',\n \"c'mon\",\n 'thereby',\n 'inasmuch',\n '这儿',\n 'especially',\n ',',\n 'contain',\n 'says',\n 'seven',\n 'never',\n '进入',\n 'third',\n 'hello',\n '伟大',\n '可',\n 'beyond',\n 'sorry',\n '啦',\n '不惟',\n 'various',\n 'thoroughly',\n \"can't\",\n '重要',\n 'need',\n 'normally',\n '以便',\n '另一方面',\n 'least',\n 'anyway',\n 'does',\n '就',\n '无宁',\n '设若',\n 'insofar',\n '一直',\n '每当',\n '靠',\n '起来',\n 'somewhat',\n '只是',\n '欢迎',\n '看看',\n 'down',\n '者',\n '紧接着',\n '啐',\n '凭借',\n '你',\n 'ok',\n '中小',\n '不但',\n '中间',\n '里面',\n '任何',\n '并',\n '并不是',\n 'hardly',\n '给',\n '若',\n '各',\n '及其',\n '有所',\n '有时',\n 'three',\n 'serious',\n 'different',\n '彼',\n '一下',\n 'often',\n '一天',\n '其',\n 'quite',\n '不同',\n '哪儿',\n 'hereby',\n '按照',\n '进行',\n '相等',\n '那麽',\n 'hopefully',\n 'so',\n 'thats',\n 'whereupon',\n '出现',\n 'edu',\n '这',\n '例如',\n 'unless',\n 'here',\n 'same',\n 'two',\n '就是',\n 'otherwise',\n 'ZT',\n 'are',\n 'behind',\n '部分',\n '那个',\n '觉得',\n 'about',\n '过',\n 'yours',\n '叫做',\n '下来',\n '一定',\n 'come',\n '转贴',\n '特殊',\n '反过来',\n 'secondly',\n '之前',\n '得出',\n '啊',\n '开展',\n \"i've\",\n 'into',\n '范围',\n '适应',\n '规定',\n '尤其',\n '较',\n 'always',\n 'get',\n '只限',\n 'greetings',\n '广大',\n '意思',\n '除非',\n 'looking',\n '乃',\n '总结',\n \"i'm\",\n '成为',\n 'certainly',\n 'somebody',\n 'elsewhere',\n '不是',\n 'everybody',\n 'however',\n '具体',\n '再说',\n '于是',\n 'because',\n '前面',\n '着呢',\n 'which',\n 'rd',\n '组成',\n 'become',\n '各自',\n '离',\n '允许',\n 'unto',\n '我',\n '附近',\n '完全',\n 'certain',\n '哦',\n '旁人',\n '其余',\n '有点',\n '倘然',\n '深入',\n '自个儿',\n '非常',\n 'outside',\n '重新',\n 'reasonably',\n '同',\n '替',\n 'between',\n 'somehow',\n 'while',\n 'necessary',\n '举行',\n 'course',\n 'brief',\n '乃至',\n '就是说',\n '一样',\n '召开',\n 'took',\n 'know',\n 'co',\n '首先',\n 'anyways',\n 'please',\n 'unfortunately',\n 'take',\n '咱们',\n '自身',\n '什么',\n '看见',\n '宁可',\n '正如',\n '最近',\n 'away',\n 'lately',\n '一',\n '分别',\n \"wasn't\",\n 'myself',\n 'alone',\n '今後',\n '固然',\n '普通',\n 'sensible',\n '吓',\n 'ZZ',\n '有利',\n '反过来说',\n 'six',\n 'four',\n '反之',\n '多次',\n '少数',\n '宁',\n 'th',\n '照着',\n 'immediate',\n '更加',\n '那些',\n '之一',\n 'ones',\n 'was',\n '虽则',\n '，',\n '了',\n 'who',\n 'thru',\n 'allow',\n '也',\n '结合',\n '阿',\n \"he's\",\n '绝对',\n 'particularly',\n '嗡嗡',\n 'around',\n '不变',\n 'sometime',\n '、',\n 'on',\n '根本',\n '唉',\n '具体地说',\n '尚且',\n '加之',\n '哇',\n '有的',\n '云云',\n '假如',\n 'seem',\n 'even',\n 'seems',\n 'whereas',\n '因而',\n '重大',\n \"we're\",\n '容易',\n '怎么办',\n 'itself',\n 'my',\n '实现',\n '不敢',\n '遇到',\n '先後',\n '几',\n '表明',\n '抑或',\n \"wouldn't\",\n '是不是',\n 'probably',\n 'except',\n 'changes',\n 'still',\n 'neither',\n '－－',\n '自家',\n 'nowhere',\n '一边',\n '一则',\n 'asking',\n 'own',\n 'known',\n '~',\n 'welcome',\n '这么样',\n '冒',\n 'such',\n 'the',\n '这么些',\n 'allows',\n '而已',\n '之',\n 'obviously',\n '不拘',\n '嗳',\n 'like',\n '以及',\n '或',\n '呸',\n 'anything',\n 'beforehand',\n '漫说',\n '可是',\n '帮助',\n 'getting',\n 'very',\n '可见',\n 'besides',\n '后来',\n '甚么',\n '行动',\n \"what's\",\n 'among',\n '要不然',\n 'novel',\n 'specify',\n '每年',\n '啥',\n '还是',\n 'needs',\n 'whose',\n '这就是说',\n '其中',\n 'looks',\n '如此',\n '那么',\n '特别是',\n '即便',\n '当时',\n '综上所述',\n 'think',\n 'yourself',\n '恰恰相反',\n '莫若',\n '尽管',\n '；',\n '下去',\n '不论',\n '那样',\n 'little',\n '自各儿',\n 'latter',\n '那么样',\n '”',\n 'some',\n 'whether',\n '共同',\n '哟',\n '它',\n '哈',\n 'already',\n '哪怕',\n 'most',\n '这些',\n '相同',\n '丰富',\n 'later',\n 'then',\n '接著',\n 'containing',\n '换言之',\n '不然',\n '到',\n '况且',\n 'lest',\n '迅速',\n '看出',\n '个',\n '实际',\n '倘或',\n '所谓',\n '慢说',\n '嘘',\n '总的来看',\n '她的',\n 'nine',\n '企图',\n 'usually',\n 'although',\n '代替',\n '咋',\n '战斗',\n '一致',\n '适用',\n 'tries',\n 'concerning',\n '极了',\n '以至',\n '维持',\n '□',\n '而外',\n 'became',\n '乘',\n 'do',\n '你们',\n '为主',\n 'specified',\n 'presumably',\n 'out',\n '通过',\n '则',\n 'has',\n '根据',\n '有',\n 'relatively',\n '强烈',\n '尔后',\n '俺们',\n 'using',\n '产生',\n '继续',\n '向着',\n 'nothing',\n 'regardless',\n 'look',\n '说说',\n 'mostly',\n '以外',\n 'too',\n '自',\n '也好',\n 'say',\n '大约',\n '作为',\n '往',\n 'as',\n '最后',\n 'inc',\n '显著',\n '喏',\n '照',\n 'sent',\n 'knows',\n 'together',\n 'afterwards',\n '一方面',\n '既',\n '进而',\n '而',\n '其它',\n 'oh',\n 'kept',\n '造成',\n '被',\n 'specifying',\n '把',\n '是',\n 'not',\n 'something',\n '移动',\n \"aren't\",\n 'regards',\n '保持',\n '焉',\n '加入',\n 'last',\n 'let',\n '不特',\n '甚至',\n '真是',\n '许多',\n '下列',\n 'is',\n '要不是',\n 'whom',\n 'before',\n '继而',\n 'done',\n '己',\n '结果',\n 'definitely',\n 'gone',\n 'que',\n '可以',\n '嘿',\n 'nobody',\n '认真',\n '通常',\n 'likely',\n '出去',\n '乌乎',\n '怎么',\n 'mainly',\n '广泛',\n ' ',\n '--',\n '是否',\n 'hereupon',\n '突然',\n '但',\n '一起',\n '他',\n 'tell',\n 'sup',\n 'amongst',\n 'went',\n '密切',\n 'merely',\n 'non',\n 'whenever',\n '趁',\n 'trying',\n 'use',\n '“',\n '随著',\n 'without',\n '任务',\n '哪个',\n '说明',\n 'happens',\n '从事',\n \"let's\",\n 'and',\n '出来',\n \"couldn't\",\n 'to',\n '凭',\n '哪天',\n '最高',\n 'no',\n '周围',\n \"weren't\",\n '每',\n '原来',\n '良好',\n '.',\n '于是乎',\n '望',\n '呀',\n 'hence',\n '相似',\n \"won't\",\n '乎',\n '接着',\n 'above',\n 'across',\n '如上所述',\n '何',\n 'towards',\n '过来',\n '得到',\n 'eight',\n '受到',\n '她们',\n '吗',\n '避免',\n '怎',\n '若非',\n 'becoming',\n 'etc',\n '万一',\n 'several',\n '！',\n '失去',\n 'me',\n 'against',\n '她',\n 'ie',\n '\\r',\n 'actually',\n 'shall',\n '复杂',\n '然而',\n 'truly',\n '适当',\n 'theres',\n '有力',\n '正常',\n '它的',\n '任凭',\n \"where's\",\n '沿',\n 'just',\n '假使',\n 'self',\n '对',\n \"haven't\",\n 'un',\n 'wherever',\n '拿',\n 'it',\n '没有',\n '构成',\n '决定',\n 'us',\n '掌握',\n '为',\n 'keep',\n '当前',\n '倘',\n '反应',\n '这么',\n '《',\n '顺着',\n 'might',\n '尽',\n 'for',\n 'able',\n 'being',\n 'they',\n '采取',\n 'if',\n '同样',\n '一面',\n '两者',\n '以至于',\n 'used',\n '及至',\n '好象',\n '趁着',\n 'indicate',\n '如若',\n 'consequently',\n \"you're\",\n '而况',\n 'ltd',\n '哪样',\n 'associated',\n '相应',\n '要么',\n 'she',\n 'formerly',\n '哪些',\n '加以',\n 'whatever',\n 'their',\n '明显',\n '看到',\n 'under',\n '为什麽',\n '以来',\n '朝着',\n 'yes',\n '嗯',\n '由此可见',\n 'this',\n '为何',\n '么',\n '现代',\n '纵令',\n '以为',\n 'right',\n 'everything',\n '鄙人',\n 'these',\n '如下',\n '人家',\n '为什么',\n '因',\n '最好',\n '坚决',\n '总而言之',\n '问题',\n 'thanx',\n '当',\n 'cause',\n 'try',\n '那边',\n 'per',\n '之後',\n '一来',\n '不管',\n 'we',\n 'according',\n '叫',\n \"it'd\",\n 'zz',\n '而言',\n '比较',\n \"they've\",\n '转动',\n '强调',\n '嘎',\n '直到',\n \"hadn't\",\n 'namely',\n 'howbeit',\n 'zero',\n '吱',\n '不足',\n '待',\n '宁愿',\n '毫不',\n 'onto',\n '着',\n '常常',\n \"it's\",\n '换句话说',\n '这边',\n 'what',\n '们',\n '上升',\n 'am',\n 'wish',\n '不够',\n '此间',\n '呗',\n '呼哧',\n '各个',\n 'overall',\n '认为',\n \"i'd\",\n 'throughout',\n 'could',\n '此',\n '\\n',\n 'see',\n '哉',\n '扩大',\n '大批',\n '如何',\n '呢',\n 'following',\n '做到',\n '愿意',\n '该',\n '吧哒',\n 'along',\n 'perhaps',\n '上来',\n '练习',\n \"you'll\",\n 'other',\n '不怕',\n 'themselves',\n 'everywhere',\n '哪里',\n '即使',\n 'mean',\n '不会',\n '总是',\n '而是',\n \"doesn't\",\n 'former',\n '严格',\n '大量',\n 'uses',\n 'where',\n 'old',\n 'name',\n '心里',\n '那会儿',\n '应用',\n '他人',\n '让',\n '这么点儿',\n '任',\n 'going',\n 'nevertheless',\n '。',\n 'now',\n '使用',\n \"i'll\",\n '以前',\n ...]"
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "stop_word"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [],
   "source": [
    "seg_list = jieba.cut_for_search(\"小明硕士毕业于中国科学院计算所，后在日本京都大学深造\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    },
    "trusted": false
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}