{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'wordcloud'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-1-1b96336573b5>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mjieba\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mwordcloud\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mWordCloud\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      3\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpyplot\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mimageio\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[0mf\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'mess.txt'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;34m'utf-8'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'wordcloud'"
     ]
    }
   ],
   "source": [
    "import jieba\n",
    "from wordcloud import WordCloud\n",
    "import matplotlib.pyplot as plt\n",
    "import imageio\n",
    "f = open('mess.txt', encoding = 'utf-8')\n",
    "t = f.read()\n",
    "f.close()\n",
    "words = jieba.lcut(t)\n",
    "txt = ' '.join(words)\n",
    "image = imageio.imread('horse.png')\n",
    "w = WordCloud(font_path = 'FZFSJW.TTF', background_color = 'white', mask = image)\n",
    "w.generate(txt)\n",
    "w.to_file('test.png')\n",
    "plt.imshow(w, interpolation = 'bilinear')\n",
    "plt.axis(\"off\")\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "对文本进行分句：\n",
      " [{'text': '2015年7月31日，北京获得2022年第24届冬奥会的举办权', 'pos': {'x': 0, 'y': 0, 'mark': ['FIRSTSECTION', 'FIRSTSENTENCE']}}, {'text': '第24届冬奥会将于2022年2月4日至2月20日在中国北京举行，北京也将成为历史上第一座既举办过夏奥会又举办过冬奥会的城市', 'pos': {'x': 0, 'y': 1, 'mark': ['FIRSTSECTION', 'LASTSENTENCE']}}, {'text': '2022年北京冬奥会计划使用25个场馆，场馆分布在3个赛区，分别是北京赛区、延庆赛区和张家口赛区', 'pos': {'x': 1, 'y': 0, 'mark': ['FIRSTSENTENCE', 'LASTSENTENCE']}}, {'text': '北京是中华人民共和国的首都，是全国政治中心、文化中心、国际交往中心、科技创新中心', 'pos': {'x': 2, 'y': 0, 'mark': ['FIRSTSENTENCE']}}, {'text': '2008年，北京成功举办了第29届夏季奥林匹克运动会', 'pos': {'x': 2, 'y': 1, 'mark': ['LASTSENTENCE']}}]\n"
     ]
    }
   ],
   "source": [
    "import re\n",
    "import jieba.analyse\n",
    "import jieba.posseg\n",
    "def get_sentence(text):\n",
    "    split_num = 0\n",
    "    sentences = []\n",
    "    for paragraph in text.split(\"\\n\"):\n",
    "        if paragraph:\n",
    "            sentence_num = 0\n",
    "            for paragraph in re.split(\"!|。|？\", paragraph):\n",
    "                if paragraph:\n",
    "                    mark = []\n",
    "                    if split_num == 0:\n",
    "                        mark.append(\"FIRSTSECTION\")\n",
    "                    if sentence_num == 0:\n",
    "                        mark.append(\"FIRSTSENTENCE\")\n",
    "                    sentences.append({\"text\": paragraph, \"pos\": {\"x\": split_num, \"y\": sentence_num, \"mark\": mark}})\n",
    "                    sentence_num = sentence_num + 1\n",
    "            split_num = split_num + 1\n",
    "            sentences[-1][\"pos\"][\"mark\"].append(\"LASTSENTENCE\")\n",
    "    for i in range(0, len(sentences)):\n",
    "        if sentences[i][\"pos\"][\"x\"] == sentences[-1][\"pos\"][\"x\"]:\n",
    "            sentences[i][\"pos\"][\"mark\"].append(\"LASTSECTION\")\n",
    "    return sentences\n",
    "sample = open('mess.txt', encoding = 'utf-8').read()\n",
    "sentences = get_sentence(sample)\n",
    "print(\"对文本进行分句：\\n\", sentences[:5])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 0.330 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "对文本进行关键词提取：\n",
      " ['运动员', '冰雪', '赛区', '展现', '赛场', '举办', '飞跃', '举办地', '获得', '中心']\n"
     ]
    }
   ],
   "source": [
    "def get_keywords(text):\n",
    "    keywords = jieba.analyse.extract_tags(text, topK = 100, withWeight = False, allowPOS = ('n','vn','v')) \n",
    "    return keywords\n",
    "keywords = get_keywords(sample)\n",
    "print(\"对文本进行关键词提取：\\n\", keywords[:10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "文本中每个句子的权重为：\n",
      " [7, 7, 5, 5, 3, 10, 4, 4, 3, 4, 16, 7, 17, 7, 8, 14, 4, 16, 11, 8, 8, 20, 5, 9, 3, 4, 11]\n"
     ]
    }
   ],
   "source": [
    "def get_weight(sentences,keywords):\n",
    "    for sentence in sentences:\n",
    "        mark = sentence[\"pos\"][\"mark\"]\n",
    "        weightPos = 0\n",
    "        if \"FIRSTSECTION\" in mark:\n",
    "            weightPos = weightPos + 2\n",
    "        if \"FIRSTSENTENCE\" in mark:\n",
    "            weightPos = weightPos + 2\n",
    "        if \"LASTSENTENCE\" in mark:\n",
    "            weightPos = weightPos + 1\n",
    "        if \"LASTSECTION\" in mark:\n",
    "            weightPos = weightPos + 1\n",
    "        sentence[\"weightPos\"] = weightPos\n",
    "    index = [\" 冬梦 \", \" 飞跃 \"]\n",
    "    for sentence in sentences:\n",
    "        sentence[\"weightCueWords\"] = 0\n",
    "        sentence[\"weightKeywords\"] = 0\n",
    "    for i in index:\n",
    "        for sentence in sentences:\n",
    "            if sentence[\"text\"].find(i) >= 0:\n",
    "                sentence[\"weightCueWords\"] = 1\n",
    "    for keyword in keywords:\n",
    "        for sentence in sentences:\n",
    "            if sentence[\"text\"].find(keyword) >= 0:\n",
    "                sentence[\"weightKeywords\"] = sentence[\"weightKeywords\"] + 1\n",
    "    weight = []\n",
    "    for sentence in sentences:\n",
    "        sentence[\"weight\"] = sentence[\"weightPos\"] + 2 * sentence[\"weightCueWords\"] + sentence[\"weightKeywords\"]\n",
    "        weight.append(sentence[\"weight\"])\n",
    "    return weight\n",
    "weight = get_weight(sentences, keywords)\n",
    "print(\"文本中每个句子的权重为：\\n\", weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
