{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/anaconda3/envs/py37/lib/python3.7/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import jieba\n",
    "import os\n",
    "from sklearn.metrics import classification_report\n",
    "from paddlenlp import Taskflow\n",
    "lac = Taskflow(\"word_segmentation\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "total_truth = []\n",
    "total_jieba = []\n",
    "total_baidu = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# path = '../data/segment sentence data/msr_training.utf8'\n",
    "path = '../data/segment_sentence_data/pku_training.utf8'\n",
    "f = open(path,'r', encoding='utf-8')\n",
    "data = f.readlines()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "for i in data[:100]:\n",
    "    line = i.strip(' \\n').split('  ')\n",
    "    hidden_seq = ''\n",
    "    for term in line:\n",
    "        # BMES\n",
    "        if len(term)==1:\n",
    "            hidden_seq+='S'\n",
    "        elif len(term)==0:\n",
    "            continue\n",
    "        else:\n",
    "            hid = 'B'+'M'*(len(term)-2)+'E'\n",
    "            hidden_seq+=hid    \n",
    "    \n",
    "    line = ''.join(line)\n",
    "    if not line:\n",
    "        continue\n",
    "\n",
    "    jieba_seq = ''\n",
    "    for j in jieba.cut(line):\n",
    "        if len(j)==1:\n",
    "            jieba_seq+='S'\n",
    "        elif len(j)==0:\n",
    "            continue\n",
    "        else:\n",
    "            hid = 'B'+'M'*(len(j)-2)+'E'\n",
    "            jieba_seq+=hid    \n",
    "\n",
    "    baidu_seq = ''\n",
    "    for j in lac(line):\n",
    "        if len(j)==1:\n",
    "            baidu_seq+='S'\n",
    "        elif len(j)==0:\n",
    "            continue\n",
    "        else:\n",
    "            hid = 'B'+'M'*(len(j)-2)+'E'\n",
    "            baidu_seq+=hid    \n",
    "\n",
    "\n",
    "    # total_truth.extend(list(hidden_seq))\n",
    "    # total_jieba.extend(list(jieba_seq))\n",
    "    # total_baidu.extend(list(baidu_seq))\n",
    "    total_truth.append(list(hidden_seq))\n",
    "    total_jieba.append(list(jieba_seq))\n",
    "    total_baidu.append(list(baidu_seq))\n",
    "    # print(line)\n",
    "    # print(hidden_seq)\n",
    "    # print(jieba_seq)\n",
    "    # break\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision: 0.8311795891318754, recall: 0.7708973570989551, f1-score: 0.799904336734694\n"
     ]
    }
   ],
   "source": [
    "def using_bmes_count_termf1(total_truth, total_result):\n",
    "\n",
    "    total_truth_term = 0\n",
    "    total_predict_term = 0\n",
    "    total_correct_term = 0\n",
    "\n",
    "    for y,x in zip(total_truth, total_result):\n",
    "        truth = []\n",
    "        temp_term = []\n",
    "        for index, tag in enumerate(y):\n",
    "            if tag=='E' or tag == 'S':\n",
    "                temp_term.append(index)\n",
    "                truth.append(temp_term)\n",
    "                temp_term = []\n",
    "            else:\n",
    "                temp_term.append(index)\n",
    "        if temp_term:\n",
    "            truth.append(temp_term)    \n",
    "\n",
    "        result = []\n",
    "        temp_term = []\n",
    "        for index, tag in enumerate(x):\n",
    "            if tag=='E' or tag == 'S':\n",
    "                temp_term.append(index)\n",
    "                result.append(temp_term)\n",
    "                temp_term = []\n",
    "            else:\n",
    "                temp_term.append(index)\n",
    "        if temp_term:\n",
    "            result.append(temp_term)   \n",
    "        \n",
    "        total_truth_term += len(truth)\n",
    "        total_predict_term += len(result)\n",
    "        total_correct_term += len([i for i in result if i in truth])\n",
    "\n",
    "    precision = total_correct_term/total_predict_term\n",
    "    recall = total_correct_term/total_truth_term  \n",
    "\n",
    "    f1 = 2*(precision*recall)/(precision+recall)      \n",
    "\n",
    "    return precision, recall, f1\n",
    "\n",
    "print('precision: %s, recall: %s, f1-score: %s'%using_bmes_count_termf1(total_truth, total_jieba))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision: 0.8664791528788881, recall: 0.8047019053472649, f1-score: 0.8344486934353091\n"
     ]
    }
   ],
   "source": [
    "print('precision: %s, recall: %s, f1-score: %s'%using_bmes_count_termf1(total_truth, total_baidu))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           B       0.88      0.87      0.87    585232\n",
      "           E       0.90      0.89      0.89    585232\n",
      "           M       0.50      0.73      0.59    131296\n",
      "           S       0.85      0.77      0.81    524715\n",
      "\n",
      "    accuracy                           0.84   1826475\n",
      "   macro avg       0.78      0.81      0.79   1826475\n",
      "weighted avg       0.85      0.84      0.84   1826475\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(classification_report(total_truth, total_jieba))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           B       0.88      0.89      0.88    585232\n",
      "           E       0.89      0.89      0.89    585232\n",
      "           M       0.49      0.79      0.61    131296\n",
      "           S       0.94      0.78      0.85    524715\n",
      "\n",
      "    accuracy                           0.85   1826475\n",
      "   macro avg       0.80      0.84      0.81   1826475\n",
      "weighted avg       0.87      0.85      0.86   1826475\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(classification_report(total_truth, total_baidu))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pkuseg\n",
    " \n",
    "seg = pkuseg.pkuseg()           # 以默认配置加载模型\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['我', '爱', '北京', '天安门']"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "seg.cut('我爱北京天安门')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "total_pkuseg = []\n",
    "for i in data:\n",
    "    line = i.strip(' \\n').split('  ')\n",
    "    line = ''.join(line)\n",
    "    if not line:\n",
    "        continue\n",
    "    \n",
    "    pku_seq = ''\n",
    "    for j in seg.cut(line):\n",
    "        if len(j)==1:\n",
    "            pku_seq+='S'\n",
    "        elif len(j)==0:\n",
    "            continue\n",
    "        else:\n",
    "            hid = 'B'+'M'*(len(j)-2)+'E'\n",
    "            pku_seq+=hid \n",
    "    total_pkuseg.append(list(pku_seq))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "19056\n"
     ]
    }
   ],
   "source": [
    "print(len(data))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision: 0.9689176396339539, recall: 0.9436078672403196, f1-score: 0.9560952825782344\n"
     ]
    }
   ],
   "source": [
    "print('precision: %s, recall: %s, f1-score: %s'%using_bmes_count_termf1(total_truth, total_pkuseg))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           B       0.88      0.89      0.88    585232\n",
      "           E       0.89      0.89      0.89    585232\n",
      "           M       0.49      0.79      0.61    131296\n",
      "           S       0.94      0.78      0.85    524715\n",
      "\n",
      "    accuracy                           0.85   1826475\n",
      "   macro avg       0.80      0.84      0.81   1826475\n",
      "weighted avg       0.87      0.85      0.86   1826475\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(classification_report(total_truth, total_pkuseg))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model loaded succeed\n",
      "[['我', ''], ['爱', ''], ['北京', ''], ['天安门', '']]\n"
     ]
    }
   ],
   "source": [
    "import thulac\n",
    "\n",
    "thu = thulac.thulac(seg_only=True)\n",
    "print(thu.cut('我爱北京天安门'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "total_thulac = []\n",
    "for i in data:\n",
    "    line = i.strip(' \\n').split('  ')\n",
    "    line = ''.join(line)\n",
    "    if not line:\n",
    "        continue\n",
    "    \n",
    "    thu_seq = ''\n",
    "    for jj in thu.cut(line):\n",
    "        j = jj[0]\n",
    "        if len(j)==1:\n",
    "            thu_seq+='S'\n",
    "        elif len(j)==0:\n",
    "            continue\n",
    "        else:\n",
    "            hid = 'B'+'M'*(len(j)-2)+'E'\n",
    "            thu_seq+=hid \n",
    "    total_thulac.append(list(thu_seq))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision: 0.9387281448243849, recall: 0.9322372464658881, f1-score: 0.9354714362809343\n"
     ]
    }
   ],
   "source": [
    "print('precision: %s, recall: %s, f1-score: %s'%using_bmes_count_termf1(total_truth, total_thulac))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           B       0.96      0.94      0.95    585232\n",
      "           E       0.98      0.96      0.97    585232\n",
      "           M       0.86      0.91      0.88    131296\n",
      "           S       0.92      0.95      0.93    524715\n",
      "\n",
      "    accuracy                           0.94   1826475\n",
      "   macro avg       0.93      0.94      0.93   1826475\n",
      "weighted avg       0.94      0.94      0.94   1826475\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(classification_report(total_truth, total_thulac))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[('我', 'pronoun'),\n",
       " ('海上', 'locative word'),\n",
       " ('稠', 'adjective'),\n",
       " ('油田', 'noun'),\n",
       " ('又', 'adverb'),\n",
       " ('建', 'verb'),\n",
       " ('新区', 'noun'),\n",
       " ('。', 'punctuation mark')]"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import pynlpir\n",
    "pynlpir.open()\n",
    "# ..need a stop signal at the end of sentence.\n",
    "\n",
    "s = '我海上稠油田又建新区。'\n",
    "pynlpir.segment(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "total_nlpir = []\n",
    "for i in data:\n",
    "    line = i.strip(' \\n').split('  ')\n",
    "    line = ''.join(line)\n",
    "    if not line:\n",
    "        continue\n",
    "    \n",
    "    nlpir_seq = ''\n",
    "    for jj in pynlpir.segment(line):\n",
    "        j = jj[0]\n",
    "        if len(j)==1:\n",
    "            nlpir_seq+='S'\n",
    "        elif len(j)==0:\n",
    "            continue\n",
    "        else:\n",
    "            hid = 'B'+'M'*(len(j)-2)+'E'\n",
    "            nlpir_seq+=hid\n",
    "\n",
    "    if len(line) != len(nlpir_seq): \n",
    "        print(line, len(line))\n",
    "        print(pynlpir.segment(line))\n",
    "        print(nlpir_seq, len(nlpir_seq))\n",
    "\n",
    "\n",
    "    # total_nlpir.extend(list(nlpir_seq))\n",
    "    # break\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(classification_report(total_truth, total_nlpir))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "74fe859eaf5d00bce5565637797e820181e41cef3326bb2f5cfc02a88bf2f28b"
  },
  "kernelspec": {
   "display_name": "Python 3.7.13 ('py37')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.13"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
