{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-10-02T06:47:08.521746Z",
     "start_time": "2020-10-02T06:47:02.964428Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 2.324 seconds.\n",
      "Prefix dict has been built succesfully.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "top 3邻居 [91, 94, 7]\n",
      "['关于', '里', '皮', '的', '辞职']\n",
      "['也许', '是', '里', '皮', '头脑', '一热', '的', '决定']\n",
      "['里', '皮', '辞职']\n"
     ]
    }
   ],
   "source": [
    "import jieba\n",
    "from datasketch import MinHash, MinHashLSHForest\n",
    "import re\n",
    "import random\n",
    "\n",
    "def get_sentences_list(file):\n",
    "    with open(file, 'r', encoding='utf-8') as f:\n",
    "        text = f.read()\n",
    "        text = text.replace('\\n', '')   #去掉回车换行\n",
    "        text = text.replace('\\u200b','') #去掉全角空格\n",
    "        sentences = re.split('[，。？！：“]', text) #根据标点符号将句子切分\n",
    "    stopwords_list = []\n",
    "    with open('./stopword.txt', 'r', encoding='utf-8') as f:\n",
    "        for line in f:\n",
    "            word = line.strip()\n",
    "            if  word not in stopwords_list:\n",
    "                stopwords_list.append(word)\n",
    "\n",
    "    sentences_words = []\n",
    "    for sentence in sentences:\n",
    "        sentence_words = [word for word in jieba.cut(sentence.strip()) if word not in stopwords_list]\n",
    "        sentences_words.append(sentence_words)\n",
    "\n",
    "    return sentences_words\n",
    "\n",
    "sentences = get_sentences_list('./weibos.txt')\n",
    "forest = MinHashLSHForest()\n",
    "\n",
    "#计算每个句子的MinHash,并追加到LSHForest中去\n",
    "for index,sentence in enumerate(sentences):\n",
    "    m = MinHash()\n",
    "    for word in sentence:\n",
    "        m.update(word.encode('utf8'))\n",
    "    forest.add(index, m)\n",
    "forest.index()\n",
    "\n",
    "#随机选一个句子\n",
    "# rand_sentence_index = random.randint(0, len(sentences))\n",
    "# print('选取的index编号为:'+str(rand_sentence_index))\n",
    "query = ['里','皮', '的', '辞职']\n",
    "#query = ['关于','里皮', '的', '辞职']\n",
    "\n",
    "m1= MinHash()\n",
    "for word in query:\n",
    "    m1.update(word.encode('utf-8'))\n",
    "\n",
    "top_k = 3\n",
    "result = forest.query(m1, top_k)\n",
    "print('top 3邻居', result)\n",
    "for i in result:\n",
    "    print(sentences[i])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.2"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
