{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "与'文化'相似度最高的10个词：\n",
      "[('哲学', 0.661015510559082), ('艺术', 0.6548650860786438), ('现代', 0.653694748878479), ('民间', 0.650560736656189), ('娱乐', 0.6504202485084534), ('交流', 0.6346988677978516), ('地理', 0.6329925060272217), ('戏剧', 0.6291259527206421), ('素养', 0.6204221248626709), ('音乐', 0.6144771575927734)]\n"
     ]
    }
   ],
   "source": [
    "import numpy\n",
    "import gensim\n",
    "import numpy as np\n",
    "from jieba import analyse\n",
    "from gensim.models import Word2Vec\n",
    "from gensim.models.word2vec import LineSentence\n",
    "\n",
    "def train_word2vec():     #训练Word2Vec模型\n",
    "    #读取已分词的数据集\n",
    "    cor_data=open('C:/Users/Administrator/Desktop/TrainData.txt','r',encoding='utf-8')\n",
    "    model=Word2Vec(LineSentence(cor_data),sg=0,vector_size=200,window=5,min_count=5,workers=9)   #训练模型\n",
    "    model.save('C:/Users/Administrator/Desktop/model_word2vec')   #保存模型\n",
    "    print(\"与'文化'相似度最高的10个词：\")\n",
    "    print(model.wv.most_similar('文化',topn=10))\n",
    "train_word2vec()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def keyword(data):   #提取关键词\n",
    "    tfidf=analyse.extract_tags\n",
    "    #使用TF-IDF算法提取关键词\n",
    "    keywords=tfidf(data)\n",
    "    return keywords\n",
    "def get_keywords(docpath,savepath):   #提取新闻文本关键词\n",
    "    with open(docpath, 'r', encoding='utf-8') as docf,open(savepath, 'w') as outf:\n",
    "        for data in docf:    #读取每一行数据\n",
    "            data = data[:len(data)-1]   #删除行尾的换行符\n",
    "            keywords=keyword(data)      #提取关键词\n",
    "            for word in keywords:\n",
    "                outf.write(word + ' ')\n",
    "            outf.write('\\n')\n",
    "def get_pos(string,char):\n",
    "    space_pos=[]   #初始化一个空列表，用于存储字符在字符串中的位置\n",
    "    try:\n",
    "        #获取字符串的索引和值，如果值为查找的字符，添加到列表\n",
    "        space_pos = list(((pos)for pos, val in enumerate(string) if (val == char)))\n",
    "    except:\n",
    "        pass\n",
    "    return space_pos\n",
    "#用训练好的模型和关键词计算文本向量\n",
    "def get_vector(file_name, model):\n",
    "    with open(file_name, 'r') as f:\n",
    "        wordvec_size = 200  #定义词向量维度 \n",
    "        word_vector = numpy.zeros(wordvec_size)   #生成全零的数组\n",
    "        for data in f:\n",
    "            space_pos = get_pos(data,' ')   #获取空格的位置\n",
    "            first_word=data[0:space_pos[0]]  #提取第一个词\n",
    "            #判断模型是否包含第一个词\n",
    "            if model.wv.__contains__(first_word):\n",
    "                word_vector=word_vector + model.wv[first_word]\n",
    "            #除第一个词外的其他词\n",
    "            for i in range(len(space_pos) - 1):\n",
    "                word = data[space_pos[i]: space_pos[i+1]]\n",
    "                if model.wv.__contains__(word):\n",
    "                    word_vector = word_vector + model.wv[word]\n",
    "    return word_vector"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#计算两个文本向量的相似度\n",
    "def similarity(vector1, vector2):\n",
    "    #计算文本向量vector1的模\n",
    "    vector1_abs=np.sqrt(vector1.dot(vector1))\n",
    "    #计算文本向量vector2的模\n",
    "    vector2_abs=np.sqrt(vector2.dot(vector2))\n",
    "    #判断两个文本向量的模是否为0\n",
    "    if vector2_abs != 0 and vector1_abs != 0:\n",
    "        similarity = (vector1.dot(vector2))/(vector1_abs * vector2_abs)   #计算相似度\n",
    "    else:\n",
    "        similarity = 0\n",
    "    return similarity\n",
    "def main():\n",
    "    #加载模型\n",
    "    model = gensim.models.Word2Vec.load('C:/Users/Administrator/Desktop/model_word2vec')\n",
    "    new1 = 'C:/Users/Administrator/Desktop/new1.txt'    #为变量new1初始化\n",
    "    new2 = 'C:/Users/Administrator/Desktop/new2.txt'    #为变量new2初始化\n",
    "    new1_keywords='C:/Users/Administrator/Desktop/new1_keywords.txt'\n",
    "    new2_keywords='C:/Users/Administrator/Desktop/new2_keywords.txt'\n",
    "    get_keywords(new1, new1_keywords)  #提取文本new1的关键词\n",
    "    get_keywords(new2, new2_keywords)  #提取文本new2的关键词\n",
    "    new1_vector = get_vector(new1_keywords, model)\n",
    "    print('文本new1的部分向量：\\n',new1_vector[:20])\n",
    "    new2_vector = get_vector(new2_keywords, model)\n",
    "    print('文本new2的部分向量： \\n',new2_vector[:20])\n",
    "    print('文本new1和文本new2的相似度：',similarity(new1_vector,new2_vector))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Dumping model to file cache C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 0.815 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "文本new1的部分向量：\n",
      " [ 0.34920738  0.12012252 -0.1879139   1.42620327  0.08701125 -0.27005731\n",
      "  0.41194444  0.78864833 -0.78746284 -0.27226432 -0.5666421  -0.1121978\n",
      "  0.52354395  0.54817268 -0.1473689  -0.68972267  0.19833314  0.42588097\n",
      "  0.86756521 -1.24800247]\n",
      "文本new2的部分向量： \n",
      " [ 0.66128209 -0.1337686  -0.31844263  1.63306059  0.82100049  0.0983046\n",
      "  0.30547997  1.90199713 -1.67570936 -0.91317028 -0.48696916  0.38946313\n",
      "  0.82448019  0.13138354 -0.67961672 -1.70689303  1.10326257  0.31186231\n",
      "  0.68425982 -1.37422224]\n",
      "文本new1和文本new2的相似度： 0.6871122471464547\n"
     ]
    }
   ],
   "source": [
    "if __name__ == \"__main__\":\n",
    "    main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
