{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "071c23b3-a8ee-4bd8-ada7-32f15d1a2cd8",
   "metadata": {},
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'wordcloud'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[1], line 2\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mjieba\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mwordcloud\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m WordCloud\n\u001b[0;32m      3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mmatplotlib\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpyplot\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mplt\u001b[39;00m\n\u001b[0;32m      4\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mimageio\u001b[39;00m\n",
      "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'wordcloud'"
     ]
    }
   ],
   "source": [
    "import jieba\n",
    "from wordcloud import WordCloud\n",
    "import matplotlib.pyplot as plt\n",
    "import imageio\n",
    "f = open('mess.txt', encoding = 'utf-8')\n",
    "t = f.read()\n",
    "f.close()\n",
    "words = jieba.lcut(t)\n",
    "txt = ' '.join(words)\n",
    "image = imageio.imread('horse.png')\n",
    "w = WordCloud(font_path = 'FZFSJW.TTF', background_color = 'white', mask = image)\n",
    "w.generate(txt)\n",
    "w.to_file('test.png')\n",
    "plt.imshow(w, interpolation = 'bilinear')\n",
    "plt.axis(\"off\")\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "b1460fc3-f934-4d74-b89a-2bc336a751a5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "对文本进行分句：\n",
      " [{'text': '2015年7月31日，北京获得2022年第24届冬奥会的举办权', 'pos': {'x': 0, 'y': 0, 'mark': ['FIRSTSECTION', 'FIRSTSENTENCE']}}, {'text': '第24届冬奥会将于2022年2月4日至2月20日在中国北京举行，北京也将成为历史上第一座既举办过夏奥会又举办过冬奥会的城市', 'pos': {'x': 0, 'y': 1, 'mark': ['FIRSTSECTION', 'LASTSENTENCE']}}, {'text': '2022年北京冬奥会计划使用25个场馆，场馆分布在3个赛区，分别是北京赛区、延庆赛区和张家口赛区', 'pos': {'x': 1, 'y': 0, 'mark': ['FIRSTSENTENCE', 'LASTSENTENCE']}}, {'text': '北京是中华人民共和国的首都，是全国政治中心、文化中心、国际交往中心、科技创新中心', 'pos': {'x': 2, 'y': 0, 'mark': ['FIRSTSENTENCE']}}, {'text': '2008年，北京成功举办了第29届夏季奥林匹克运动会', 'pos': {'x': 2, 'y': 1, 'mark': ['LASTSENTENCE']}}]\n"
     ]
    }
   ],
   "source": [
    "import re\n",
    "import jieba.analyse\n",
    "import jieba.posseg\n",
    "def get_sentence(text):\n",
    "    split_num = 0\n",
    "    sentences = []\n",
    "    for paragraph in text.split(\"\\n\"):\n",
    "        if paragraph:\n",
    "            sentence_num = 0\n",
    "            for paragraph in re.split(\"!|。|？\", paragraph):\n",
    "                if paragraph:\n",
    "                    mark = []\n",
    "                    if split_num == 0:\n",
    "                        mark.append(\"FIRSTSECTION\")\n",
    "                    if sentence_num == 0:\n",
    "                        mark.append(\"FIRSTSENTENCE\")\n",
    "                    sentences.append({\"text\": paragraph, \"pos\": {\"x\": split_num, \"y\": sentence_num, \"mark\": mark}})\n",
    "                    sentence_num = sentence_num + 1\n",
    "            split_num = split_num + 1\n",
    "            sentences[-1][\"pos\"][\"mark\"].append(\"LASTSENTENCE\")\n",
    "    for i in range(0, len(sentences)):\n",
    "        if sentences[i][\"pos\"][\"x\"] == sentences[-1][\"pos\"][\"x\"]:\n",
    "            sentences[i][\"pos\"][\"mark\"].append(\"LASTSECTION\")\n",
    "    return sentences\n",
    "sample = open('mess.txt', encoding = 'utf-8').read()\n",
    "sentences = get_sentence(sample)\n",
    "print(\"对文本进行分句：\\n\", sentences[:5])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c80cd7f7-a4d6-420d-986a-b1537193971f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 0.634 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "对文本进行关键词提取：\n",
      " ['运动员', '冰雪', '赛区', '展现', '赛场', '举办', '飞跃', '举办地', '获得', '中心']\n"
     ]
    }
   ],
   "source": [
    "def get_keywords(text):\n",
    "    keywords = jieba.analyse.extract_tags(text, topK = 100, withWeight = False, allowPOS = ('n','vn','v')) \n",
    "    return keywords\n",
    "keywords = get_keywords(sample)\n",
    "print(\"对文本进行关键词提取：\\n\", keywords[:10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "db2513e8-5df8-4cf5-95af-e1d55cb8bcce",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "文本中每个句子的权重为：\n",
      " [7, 7, 5, 5, 3, 10, 4, 4, 3, 4, 16, 7, 17, 7, 8, 14, 4, 16, 11, 8, 8, 20, 5, 9, 3, 4, 11]\n"
     ]
    }
   ],
   "source": [
    "def get_weight(sentences,keywords):\n",
    "    for sentence in sentences:\n",
    "        mark = sentence[\"pos\"][\"mark\"]\n",
    "        weightPos = 0\n",
    "        if \"FIRSTSECTION\" in mark:\n",
    "            weightPos = weightPos + 2\n",
    "        if \"FIRSTSENTENCE\" in mark:\n",
    "            weightPos = weightPos + 2\n",
    "        if \"LASTSENTENCE\" in mark:\n",
    "            weightPos = weightPos + 1\n",
    "        if \"LASTSECTION\" in mark:\n",
    "            weightPos = weightPos + 1\n",
    "        sentence[\"weightPos\"] = weightPos\n",
    "    index = [\" 冬梦 \", \" 飞跃 \"]\n",
    "    for sentence in sentences:\n",
    "        sentence[\"weightCueWords\"] = 0\n",
    "        sentence[\"weightKeywords\"] = 0\n",
    "    for i in index:\n",
    "        for sentence in sentences:\n",
    "            if sentence[\"text\"].find(i) >= 0:\n",
    "                sentence[\"weightCueWords\"] = 1\n",
    "    for keyword in keywords:\n",
    "        for sentence in sentences:\n",
    "            if sentence[\"text\"].find(keyword) >= 0:\n",
    "                sentence[\"weightKeywords\"] = sentence[\"weightKeywords\"] + 1\n",
    "    weight = []\n",
    "    for sentence in sentences:\n",
    "        sentence[\"weight\"] = sentence[\"weightPos\"] + 2 * sentence[\"weightCueWords\"] + sentence[\"weightKeywords\"]\n",
    "        weight.append(sentence[\"weight\"])\n",
    "    return weight\n",
    "weight = get_weight(sentences, keywords)\n",
    "print(\"文本中每个句子的权重为：\\n\", weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "a3f1bf0b-f601-4df6-80ba-55e69e5a2b13",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成自动文摘：\n",
      " 会徽图形整体充满了昂扬向上之激情，奋进飞跃之动感，色彩丰富，构图完美，象征并激发运动员以坚强的意志作为精神的翅膀，在冬残奥赛场上放飞青春梦想！\n",
      "中间舞动的线条流畅且充满韵律，代表举办地起伏的山峦、赛场、冰雪滑道和节日飘舞的丝带，为会徽增添了节日喜庆的视觉感受，也象征着北京冬奥会将在中国春节期间举行\n",
      "冬奥会会徽以汉字“冬”为灵感来源，运用中国书法的艺术形态， 将厚重的东方文化底蕴与国际化的现代 风格融为一体，呈现出新时代的中国新形象、新梦想，传递出新时代中国为办好北京冬奥会，圆冬奥之梦，实现“三亿人参与冰雪运动”目标，圆体育强国之梦，推动世界冰雪运动发展，为国际奥林匹克运动做出新贡献的不懈努力和美好追求\n"
     ]
    }
   ],
   "source": [
    "def get_sum(sentences, keywords, ratio = 0.1):\n",
    "    summary = list()\n",
    "    sentences = sorted(sentences, key = lambda k: k['weight'], reverse = True)\n",
    "    length = 0\n",
    "    for i in range(len(sentences)):\n",
    "        if length<3:\n",
    "            if i < ratio * len(sentences):\n",
    "                sentence = sentences[i]\n",
    "                summary.append(sentence[\"text\"]) \n",
    "                length += 1\n",
    "    return summary\n",
    "abstract_content = \"\\n\".join(get_sum(sentences, keywords))\n",
    "print('生成自动文摘：\\n', abstract_content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4bdab726-4da3-4649-bbce-589c44250ca0",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:base] *",
   "language": "python",
   "name": "conda-base-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
