{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy\n",
    "import gensim\n",
    "import numpy as np\n",
    "from jieba import analyse\n",
    "from gensim.models import Word2Vec\n",
    "from gensim.models.word2vec import LineSentence\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "与'文化'相似度最高的10个词: \n",
      "[('地理', 0.703942596912384), ('娱乐', 0.6645095944404602), ('民间', 0.6559509634971619), ('现代', 0.6551218032836914), ('艺术', 0.6399315595626831), ('社会学', 0.6392759084701538), ('地域', 0.6384734511375427), ('交流', 0.6380982995033264), ('哲学', 0.6354150772094727), ('天地', 0.625521719455719)]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "def train_word2vec():\n",
    "    with open('TrainData.txt', 'r', encoding='utf-8') as cor_data:\n",
    "        model = Word2Vec(LineSentence(cor_data), sg=0, vector_size=200, window=5, min_count=5, workers=9)\n",
    "        model.save('model_word2vec')\n",
    "        print(\"与'文化'相似度最高的10个词: \")\n",
    "        print(model.wv.most_similar('文化', topn=10))\n",
    "\n",
    "train_word2vec()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "def keyword(data):\n",
    "    tfidf = analyse.extract_tags\n",
    "    keywords = tfidf(data)\n",
    "    return keywords\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_keywords(docpath, savepath):\n",
    "    with open(docpath, 'r', encoding='utf-8') as docf, open(savepath, 'w') as outf:\n",
    "        for data in docf:\n",
    "            data = data.strip()\n",
    "            keywords = keyword(data)\n",
    "            for word in keywords:\n",
    "                outf.write(word + ' ')\n",
    "            outf.write('\\n')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_pos(string, char):\n",
    "    space_pos = []\n",
    "    try:\n",
    "        space_pos = list((pos for pos, val in enumerate(string) if (val == char)))\n",
    "    except:\n",
    "        pass\n",
    "    return space_pos"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_vector(file_name, model):\n",
    "    with open(file_name, 'r') as f:\n",
    "        wordvec_size = 200\n",
    "        word_vector = np.zeros(wordvec_size)\n",
    "        for data in f:\n",
    "            space_pos = get_pos(data, ' ')\n",
    "            if space_pos:\n",
    "                first_word = data[0:space_pos[0]]\n",
    "                if model.wv.__contains__(first_word):\n",
    "                    word_vector = word_vector + model.wv[first_word]\n",
    "                for i in range(len(space_pos) - 1):\n",
    "                    if i + 1 < len(space_pos):\n",
    "                        word = data[space_pos[i]:space_pos[i + 1]]\n",
    "                        if model.wv.__contains__(word):\n",
    "                            word_vector = word_vector + model.wv[word]\n",
    "    return word_vector"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "def similarity(vector1, vector2):\n",
    "    vector1_abs = np.sqrt(vector1.dot(vector1))\n",
    "    vector2_abs = np.sqrt(vector2.dot(vector2))\n",
    "    if vector2_abs != 0 and vector1_abs != 0:\n",
    "        similarity = (vector1.dot(vector2)) / (vector1_abs * vector2_abs)\n",
    "    else:\n",
    "        similarity = 0\n",
    "    return similarity\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "def main():\n",
    "    model = gensim.models.Word2Vec.load('model_word2vec')\n",
    "    new1 = 'new1.txt'\n",
    "    new2 = 'new2.txt'\n",
    "    new1_keywords = 'new1_keywords.txt'\n",
    "    new2_keywords = 'new2_keywords.txt'\n",
    "    get_keywords(new1, new1_keywords)\n",
    "    get_keywords(new2, new2_keywords)\n",
    "    new1_vector = get_vector(new1_keywords, model)\n",
    "    print('文本new1的部分向量: \\n', new1_vector[:20])\n",
    "    new2_vector = get_vector(new2_keywords, model)\n",
    "    print('文本new2的部分向量: \\n', new2_vector[:20])\n",
    "    print('文本new1和文本new2的相似度: ', similarity(new1_vector, new2_vector))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "文本new1的部分向量: \n",
      " [ 0.49030101  0.05792595 -0.27302174  1.15941569  0.83053049  0.04818115\n",
      "  0.45979304  0.9330957  -0.69097138 -0.92307061 -0.27436808  0.06655046\n",
      "  0.33678616  0.53349095 -0.40811329 -0.47255588 -0.02671385  0.74718283\n",
      "  1.15548093 -1.48916373]\n",
      "文本new2的部分向量: \n",
      " [ 0.64230544  0.56926335 -0.41156543  0.66737021  1.90064734 -0.08531917\n",
      "  0.86304359  1.83094306 -1.69885305 -1.80961076 -0.28559392  0.81206941\n",
      "  0.98427771  0.89969919 -0.7918205  -1.24462666  0.32805722  0.99882454\n",
      "  0.97366615 -1.51737603]\n",
      "文本new1和文本new2的相似度:  0.7058593187305086\n"
     ]
    }
   ],
   "source": [
    "if __name__ == \"__main__\":\n",
    "     main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
