{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Dumping model to file cache /var/folders/7c/tx2rnzzj2_x546cjtv31_6yr0000gn/T/jieba.cache\n",
      "Loading model cost 0.924 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    }
   ],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "\n",
    "import jieba\n",
    "import jieba.analyse\n",
    "\n",
    "jieba.suggest_freq('沙瑞金', True)\n",
    "jieba.suggest_freq('田国富', True)\n",
    "jieba.suggest_freq('高育良', True)\n",
    "jieba.suggest_freq('侯亮平', True)\n",
    "jieba.suggest_freq('钟小艾', True)\n",
    "jieba.suggest_freq('陈岩石', True)\n",
    "jieba.suggest_freq('欧阳菁', True)\n",
    "jieba.suggest_freq('易学习', True)\n",
    "jieba.suggest_freq('王大路', True)\n",
    "jieba.suggest_freq('蔡成功', True)\n",
    "jieba.suggest_freq('孙连城', True)\n",
    "jieba.suggest_freq('季昌明', True)\n",
    "jieba.suggest_freq('丁义珍', True)\n",
    "jieba.suggest_freq('郑西坡', True)\n",
    "jieba.suggest_freq('赵东来', True)\n",
    "jieba.suggest_freq('高小琴', True)\n",
    "jieba.suggest_freq('赵瑞龙', True)\n",
    "jieba.suggest_freq('林华华', True)\n",
    "jieba.suggest_freq('陆亦可', True)\n",
    "jieba.suggest_freq('刘新建', True)\n",
    "jieba.suggest_freq('刘庆祝', True)\n",
    "\n",
    "with open('./in_the_name_of_people.txt') as f:\n",
    "    document = f.read()\n",
    "    \n",
    "    #document_decode = document.decode('GBK')\n",
    "    \n",
    "    document_cut = jieba.cut(document)\n",
    "    #print ( ' '.join(document_cut))  #如果打印结果，则分词效果消失，后面的result无法显示\n",
    "    result = ' '.join(document_cut)\n",
    "    #result = result.encode('utf-8')\n",
    "    with open('./in_the_name_of_people_segment.txt', 'w') as f2:\n",
    "        f2.write(result)\n",
    "f.close()\n",
    "f2.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2020-03-15 08:36:38,830 : INFO : collecting all words and their counts\n",
      "2020-03-15 08:36:38,832 : INFO : PROGRESS: at sentence #0, processed 0 words, keeping 0 word types\n",
      "2020-03-15 08:36:38,895 : INFO : collected 17878 word types from a corpus of 161343 raw words and 2311 sentences\n",
      "2020-03-15 08:36:38,897 : INFO : Loading a fresh vocabulary\n",
      "2020-03-15 08:36:38,959 : INFO : effective_min_count=1 retains 17878 unique words (100% of original 17878, drops 0)\n",
      "2020-03-15 08:36:38,960 : INFO : effective_min_count=1 leaves 161343 word corpus (100% of original 161343, drops 0)\n",
      "2020-03-15 08:36:39,027 : INFO : deleting the raw counts dictionary of 17878 items\n",
      "2020-03-15 08:36:39,028 : INFO : sample=0.001 downsamples 38 most-common words\n",
      "2020-03-15 08:36:39,028 : INFO : downsampling leaves estimated 120578 word corpus (74.7% of prior 161343)\n",
      "2020-03-15 08:36:39,044 : INFO : constructing a huffman tree from 17878 words\n",
      "2020-03-15 08:36:39,517 : INFO : built huffman tree with maximum node depth 17\n",
      "2020-03-15 08:36:39,553 : INFO : estimated required memory for 17878 words and 100 dimensions: 33968200 bytes\n",
      "2020-03-15 08:36:39,553 : INFO : resetting layer weights\n",
      "2020-03-15 08:36:43,654 : INFO : training model with 3 workers on 17878 vocabulary and 100 features, using sg=0 hs=1 sample=0.001 negative=5 window=3\n",
      "2020-03-15 08:36:43,874 : INFO : worker thread finished; awaiting finish of 2 more threads\n",
      "2020-03-15 08:36:43,883 : INFO : worker thread finished; awaiting finish of 1 more threads\n",
      "2020-03-15 08:36:43,893 : INFO : worker thread finished; awaiting finish of 0 more threads\n",
      "2020-03-15 08:36:43,894 : INFO : EPOCH - 1 : training on 161343 raw words (120618 effective words) took 0.2s, 511910 effective words/s\n",
      "2020-03-15 08:36:44,108 : INFO : worker thread finished; awaiting finish of 2 more threads\n",
      "2020-03-15 08:36:44,109 : INFO : worker thread finished; awaiting finish of 1 more threads\n",
      "2020-03-15 08:36:44,121 : INFO : worker thread finished; awaiting finish of 0 more threads\n",
      "2020-03-15 08:36:44,122 : INFO : EPOCH - 2 : training on 161343 raw words (120546 effective words) took 0.2s, 533835 effective words/s\n",
      "2020-03-15 08:36:44,333 : INFO : worker thread finished; awaiting finish of 2 more threads\n",
      "2020-03-15 08:36:44,335 : INFO : worker thread finished; awaiting finish of 1 more threads\n",
      "2020-03-15 08:36:44,343 : INFO : worker thread finished; awaiting finish of 0 more threads\n",
      "2020-03-15 08:36:44,344 : INFO : EPOCH - 3 : training on 161343 raw words (120605 effective words) took 0.2s, 551224 effective words/s\n",
      "2020-03-15 08:36:44,603 : INFO : worker thread finished; awaiting finish of 2 more threads\n",
      "2020-03-15 08:36:44,607 : INFO : worker thread finished; awaiting finish of 1 more threads\n",
      "2020-03-15 08:36:44,608 : INFO : worker thread finished; awaiting finish of 0 more threads\n",
      "2020-03-15 08:36:44,609 : INFO : EPOCH - 4 : training on 161343 raw words (120444 effective words) took 0.3s, 459061 effective words/s\n",
      "2020-03-15 08:36:44,926 : INFO : worker thread finished; awaiting finish of 2 more threads\n",
      "2020-03-15 08:36:44,930 : INFO : worker thread finished; awaiting finish of 1 more threads\n",
      "2020-03-15 08:36:44,932 : INFO : worker thread finished; awaiting finish of 0 more threads\n",
      "2020-03-15 08:36:44,933 : INFO : EPOCH - 5 : training on 161343 raw words (120598 effective words) took 0.3s, 375306 effective words/s\n",
      "2020-03-15 08:36:44,934 : INFO : training on a 806715 raw words (602811 effective words) took 1.3s, 471466 effective words/s\n"
     ]
    }
   ],
   "source": [
    "# import modules & set up logging\n",
    "import logging\n",
    "import os\n",
    "from gensim.models import word2vec\n",
    "\n",
    "logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n",
    "\n",
    "sentences = word2vec.LineSentence('./in_the_name_of_people_segment.txt') \n",
    "\n",
    "model = word2vec.Word2Vec(sentences, hs=1,min_count=1,window=3,size=100)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2020-03-15 08:36:55,018 : INFO : precomputing L2-norms of word weight vectors\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "急得 0.9505180716514587\n",
      "开卷 0.9415163397789001\n",
      "开张 0.9388079047203064\n",
      "股气 0.9379755854606628\n",
      "心慌意乱 0.9369588494300842\n",
      "存心 0.9358711838722229\n",
      "照样 0.9356940984725952\n",
      "更好 0.9341181516647339\n",
      "带头 0.9318569898605347\n",
      "粗声大气 0.9314426779747009\n",
      "换来 0.9272453188896179\n",
      "借力 0.9262708425521851\n",
      "掩映着 0.9255391955375671\n",
      "读点 0.9254662990570068\n",
      "行走 0.9252498149871826\n",
      "商人 0.9243399500846863\n",
      "一团 0.9228260517120361\n",
      "拉出去 0.9214733839035034\n",
      "友谊 0.921315610408783\n",
      "这大 0.9204831719398499\n",
      "不留余地 0.9183871150016785\n",
      "永不 0.9182659387588501\n",
      "火爆 0.9181596636772156\n",
      "知根知底 0.9162418842315674\n",
      "不负责任 0.9147095680236816\n",
      "值得 0.9145989418029785\n",
      "反腐败 0.9144550561904907\n",
      "震动 0.9127089381217957\n",
      "连篇 0.912657618522644\n",
      "事故 0.9121453762054443\n",
      "化妆品 0.9111428260803223\n",
      "跑遍 0.9110373258590698\n",
      "办完 0.9108434319496155\n",
      "来个 0.9107255935668945\n",
      "最佳 0.9106427431106567\n",
      "文章 0.9104737043380737\n",
      "心梗 0.9095228910446167\n",
      "类 0.9088730812072754\n",
      "小时 0.9086607098579407\n",
      "问钱 0.9080460667610168\n",
      "双眼 0.9079349040985107\n",
      "林南 0.9073784351348877\n",
      "女生 0.9073432087898254\n",
      "比试 0.9071492552757263\n",
      "饮食男女 0.9060388803482056\n",
      "明镜 0.9056115746498108\n",
      "受惊 0.9046570658683777\n",
      "雪山 0.9040892124176025\n",
      "三十八 0.9038866758346558\n",
      "拉斯维加斯 0.9036061763763428\n"
     ]
    }
   ],
   "source": [
    "req_count = 50\n",
    "for key in model.wv.similar_by_word('太低', topn =100):\n",
    "    #if len(key[0])==2:\n",
    "        req_count -= 1\n",
    "        print (key[0] ,key[1])\n",
    "        if req_count == 0:\n",
    "            break;"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
