{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "对本文进行分句:\n",
      " [{'text': '2015年7月31日，北京获得2022年第24届冬奥会的举办权', 'pos': {'x': 0, 'y': 0, 'mark': ['FIRSTSECTION', 'FIRSESENTENCE']}}, {'text': '第24届冬奥会将于2022年2月4日至2月20日在中国北京举行，北京也将成为历史上第一座既举办过夏奥会又举办过冬奥会的城市', 'pos': {'x': 0, 'y': 1, 'mark': ['FIRSTSECTION', 'LASTSENTENCE']}}, {'text': '2022年北京冬奥会计划使用25个场馆，场馆分布在3个赛区，分别是北京赛区、延庆赛区和张家口赛区', 'pos': {'x': 1, 'y': 0, 'mark': ['FIRSESENTENCE', 'LASTSENTENCE']}}, {'text': '北京是中华人民共和国的首都，是全国政治中心、文化中心、国际交往中心、科技创新中心', 'pos': {'x': 2, 'y': 0, 'mark': ['FIRSESENTENCE']}}, {'text': '2008年，北京成功举办了第29届夏季奥林匹克运动会', 'pos': {'x': 2, 'y': 1, 'mark': ['LASTSENTENCE']}}]\n"
     ]
    }
   ],
   "source": [
    "import re\n",
    "import jieba.analyse\n",
    "import jieba.posseg\n",
    "def get_sentence(text):\n",
    "    split_num=0\n",
    "    sentences=[]\n",
    "    for paragraph in text.split('\\n'):\n",
    "        if paragraph:\n",
    "            sentence_num=0\n",
    "            for paragraph in re.split(\"！|。|？\",paragraph):\n",
    "                if paragraph:\n",
    "                    mark=[]\n",
    "                    if split_num==0:\n",
    "                        mark.append('FIRSTSECTION')\n",
    "                    if sentence_num==0:\n",
    "                        mark.append('FIRSESENTENCE')\n",
    "                    sentences.append({'text':paragraph,'pos':{'x':split_num,'y':sentence_num,'mark':mark}})\n",
    "                    sentence_num=sentence_num+1\n",
    "            split_num=split_num+1\n",
    "            sentences[-1]['pos']['mark'].append('LASTSENTENCE')\n",
    "    for i in range(0,len(sentences)):\n",
    "        if sentences[i]['pos']['x']==sentences[-1]['pos']['x']:\n",
    "            sentences[i]['pos']['mark'].append('LASTSECTION')\n",
    "    return sentences\n",
    "sample=open('mess.txt',encoding='utf-8').read()\n",
    "sentences=get_sentence(sample)\n",
    "print('对本文进行分句:\\n',sentences[:5])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "对文本进行关键词提取:\n",
      " ['运动员', '冰雪', '赛区', '展现', '赛场', '举办', '飞跃', '举办地', '获得', '中心']\n"
     ]
    }
   ],
   "source": [
    "def get_keywords(text):\n",
    "    keywords=jieba.analyse.extract_tags(text,topK=100,withWeight=False,allowPOS=('n','vn','v'))\n",
    "    return keywords\n",
    "keywords=get_keywords(sample)\n",
    "print('对文本进行关键词提取:\\n',keywords[:10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "文本中每个句子的权重为:\n",
      " [5, 7, 3, 3, 3, 8, 2, 4, 3, 2, 14, 5, 17, 5, 8, 12, 4, 14, 11, 8, 10, 20, 3, 7, 3, 4, 9]\n"
     ]
    }
   ],
   "source": [
    "def get_weight(sentences,keywords):\n",
    "    for sentence in sentences:\n",
    "        mark=sentence['pos']['mark']\n",
    "        weightPos=0\n",
    "        if 'FIRSTSECTION' in mark:\n",
    "            weightPos=weightPos+2\n",
    "        if 'FIRSTSENTENCE' in mark:\n",
    "            weightPos=weightPos+2\n",
    "        if 'LASTSENTENCE' in mark:\n",
    "            weightPos=weightPos+1\n",
    "        if 'LASTSECTION' in mark:\n",
    "            weightPos=weightPos+1\n",
    "        sentence['weightPos']=weightPos\n",
    "    index=['冬梦','飞跃']\n",
    "    for sentence in sentences:\n",
    "        sentence['weightCueWords']=0\n",
    "        sentence['weightKeywords']=0\n",
    "    for i in index:\n",
    "        for sentence in sentences:\n",
    "            if sentence['text'].find(i)>=0:\n",
    "                sentence['weightCueWords']=1\n",
    "    for keyword in keywords:\n",
    "        for sentence in sentences:\n",
    "            if sentence['text'].find(keyword)>=0:\n",
    "                sentence['weightKeywords']=sentence['weightKeywords']+1\n",
    "    weight=[]\n",
    "    for sentence in sentences:\n",
    "        sentence['weight']=sentence['weightPos']+2*sentence['weightCueWords']+sentence['weightKeywords']\n",
    "        weight.append(sentence['weight'])\n",
    "    return weight\n",
    "weight=get_weight(sentences,keywords)\n",
    "print('文本中每个句子的权重为:\\n',weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
