{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import random\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义读取数据的类,像mnist那样\n",
    "class WSTrainData(object):\n",
    "    def __init__(self, data_folder):\n",
    "        self.data_folder = data_folder\n",
    "        self.features = []\n",
    "        self.labels = []\n",
    "        \n",
    "        self._query_data()\n",
    "        print(\"features: %d, labels: %d\" % (len(self.features), len(self.labels)))\n",
    "    \n",
    "    def _query_data(self):\n",
    "        print(self.data_folder)\n",
    "        for root, dirs, files in os.walk(self.data_folder):\n",
    "            for ifile in files:\n",
    "                cur_file = os.path.join(root, ifile)\n",
    "                cur_features, cur_labels = self._query_file_data(cur_file)\n",
    "                self.features.extend(cur_features)\n",
    "                self.labels.extend(cur_labels)\n",
    "                \n",
    "    def _query_file_data(self, file_name):\n",
    "        features = []\n",
    "        labels = []\n",
    "        with open(file_name, 'r', encoding=\"utf-8-sig\") as pfile:\n",
    "            for isent in [x.strip() for x in pfile if x.strip() != \"\"]:\n",
    "                words = isent.split()\n",
    "                chars = []\n",
    "                tags = []\n",
    "                for iword in words:\n",
    "                    chars.extend(list(iword))\n",
    "                    if len(iword) == 1:\n",
    "                        tags.append(\"S\")\n",
    "                    else:\n",
    "                        tags.extend([\"B\"]+[\"M\"]*(len(iword)-2)+[\"E\"])\n",
    "                features.append(chars)\n",
    "                labels.append(tags)\n",
    "        \n",
    "        return features, labels\n",
    "    \n",
    "    def next_batch(self, batch_size):\n",
    "        return list(zip(*random.sample(list(zip(self.features, self.labels)), batch_size)))\n",
    "    \n",
    "    def get_data(self):\n",
    "        return self.features, self.labels\n",
    "    \n",
    "\n",
    "# class WSDataReader(object):\n",
    "#     def __init__(self, train_folder, test_folder, dev_folder):\n",
    "#         self.train_data = "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建数据转换等工具函数\n",
    "def create_dic(item_list, add_unk=False, add_pad=False):\n",
    "    \"\"\"\n",
    "    Create a dictionary of items from a list of list of items.\n",
    "    \"\"\"\n",
    "    assert type(item_list) in (list, tuple)\n",
    "    dic = {}\n",
    "    for items in item_list:\n",
    "        for item in items:\n",
    "            if item not in dic:\n",
    "                dic[item] = 1\n",
    "            else:\n",
    "                dic[item] += 1\n",
    "    # Make sure that <PAD> have a id 0.\n",
    "    if add_pad:\n",
    "        dic['<PAD>'] = 1e20\n",
    "    # If specified, add a special item <UNK>.\n",
    "    if add_unk:\n",
    "        dic['<UNK>'] = 1e10\n",
    "    return dic\n",
    "\n",
    "def create_mapping(items):\n",
    "    \"\"\"\n",
    "    Create a mapping (item to ID / ID to item) from a dictionary.\n",
    "    Items are ordered by decreasing frequency.\n",
    "    \"\"\"\n",
    "    if type(items) is dict:\n",
    "        sorted_items = sorted(items.items(), key=lambda x: (-x[1], x[0]))\n",
    "        id2item = {i: v[0] for i, v in enumerate(sorted_items)}\n",
    "        item2id = {v: k for k, v in id2item.items()}\n",
    "        return item2id, id2item\n",
    "    elif type(items) is list:\n",
    "        id2item = {i: v for i, v in enumerate(items)}\n",
    "        item2id = {v: k for k, v in id2item.items()}\n",
    "        return item2id, id2item\n",
    "\n",
    "\n",
    "def create_input(batch):\n",
    "    \"\"\"\n",
    "    Take each sentence data in batch and return an input for\n",
    "    the training or the evaluation function.\n",
    "    \"\"\"\n",
    "    assert len(batch) > 0\n",
    "    lengths = [len(seq) for seq in batch]\n",
    "    max_len = max(2, max(lengths))\n",
    "    ret = []\n",
    "    for seq_id, pos in zip(batch, lengths):\n",
    "        assert len(seq_id) == pos\n",
    "        pad = [0] * (max_len - pos)\n",
    "        ret.append(np.array(seq_id + pad))\n",
    "    ret.append(lengths)\n",
    "    return ret\n",
    "\n",
    "def data_to_ids(data, mappings):\n",
    "    \"\"\"\n",
    "    Map text data to ids.\n",
    "    \"\"\"\n",
    "\n",
    "    def strQ2B(ustring):\n",
    "        rstring = \"\"\n",
    "        for uchar in ustring:\n",
    "            inside_code = ord(uchar)\n",
    "            if inside_code == 12288:\n",
    "                inside_code = 32\n",
    "            elif 65281 <= inside_code <= 65374:\n",
    "                inside_code -= 65248\n",
    "            rstring += chr(inside_code)\n",
    "        return rstring\n",
    "    def strB2Q(ustring):\n",
    "        rstring = \"\"\n",
    "        for uchar in ustring:\n",
    "            inside_code = ord(uchar)\n",
    "            if inside_code == 32:\n",
    "                inside_code = 12288\n",
    "            elif 32 <= inside_code <= 126:\n",
    "                inside_code += 65248\n",
    "            rstring += chr(inside_code)\n",
    "        return rstring\n",
    "\n",
    "    def map(item, mapping):\n",
    "        if item in mapping:\n",
    "            return mapping[item]\n",
    "        item = strB2Q(item)\n",
    "        if item in mapping:\n",
    "            return mapping[item]\n",
    "        item = strQ2B(item)\n",
    "\n",
    "    def map_seq(seqs, mapping):\n",
    "        return [[map(item, mapping) for item in seq] for seq in seqs]\n",
    "\n",
    "    ret = []\n",
    "    for d, m in zip(data, mappings):\n",
    "        ret.append(map_seq(d, m))\n",
    "    return tuple(ret)\n",
    "\n",
    "def data_iterator(inputs, batch_size, shuffle=True, max_length=200):\n",
    "    \"\"\"\n",
    "    A simple iterator for generating dynamic mini batches.\n",
    "    \"\"\"\n",
    "    assert len(inputs) > 0\n",
    "    assert all([len(item) == len(inputs[0]) for item in inputs])\n",
    "    inputs = zip(*inputs)\n",
    "    if shuffle:\n",
    "        np.random.shuffle(inputs)\n",
    "\n",
    "    batch = []\n",
    "    bs = batch_size\n",
    "    for d in inputs:\n",
    "        if len(d[0]) > max_length:\n",
    "            bs = max(1, min(batch_size * max_length / len(d[0]), bs))\n",
    "        if len(batch) < bs:\n",
    "            batch.append(d)\n",
    "        else:\n",
    "            yield zip(*batch)\n",
    "            batch = [d]\n",
    "            if len(d[0]) < max_length:\n",
    "                bs = batch_size\n",
    "            else:\n",
    "                bs = max(1, batch_size * max_length / len(d[0]))\n",
    "    if batch:\n",
    "        yield zip(*batch)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "./datasets/sighan2005-pku/\n",
      "features: 0, labels: 0\n"
     ]
    }
   ],
   "source": [
    "train_data = WSTrainData(\"./datasets/sighan2005-pku/\")\n",
    "# 创建训练数据的字典,生成mapping,然后把训练数据都转为ID\n",
    "train_features_list, train_label_list = train_data.get_data()\n",
    "\n",
    "feature_dict = create_dic(train_features_list)\n",
    "label_dict = create_dic(train_label_list)\n",
    "\n",
    "feature_mapping = create_mapping(feature_dict)\n",
    "label_mapping = create_mapping(label_dict)\n",
    "\n",
    "feature_ids_list = data_to_ids(train_features_list, feature_mapping)\n",
    "label_ids_list = data_to_ids(train_label_list, label_mapping)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "./data/data/train_pku\n",
      "features: 17149, labels: 17149\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Logging before flag parsing goes to stderr.\n",
      "W0807 10:33:52.295961 139995648624448 deprecation.py:506] From /home/meixiao/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Call initializer instance with the dtype argument instead of passing it to the constructor\n",
      "W0807 10:33:52.943188 139995648624448 lazy_loader.py:50] \n",
      "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "  * https://github.com/tensorflow/io (for I/O related ops)\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n",
      "W0807 10:33:53.271694 139995648624448 deprecation.py:323] From <ipython-input-5-f98677ed089a>:46: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See `tf.nn.softmax_cross_entropy_with_logits_v2`.\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "masks shape: (?, ?)\n",
      "output shape: (?, ?, 4)\n",
      "output shape: TensorShape([Dimension(None), Dimension(4)]), y_real shape: TensorShape([Dimension(None), Dimension(4)])\n",
      "(?, 4) (?, 4)\n",
      "<class 'tensorflow.python.framework.ops.Tensor'>\n",
      "cost: 1.304517, train accuracy: 0.674679\n",
      "cost: 1.266193, train accuracy: 0.690101\n",
      "cost: 1.289039, train accuracy: 0.693976\n",
      "cost: 1.284967, train accuracy: 0.691523\n",
      "cost: 1.272350, train accuracy: 0.679876\n",
      "cost: 1.301581, train accuracy: 0.675134\n",
      "cost: 1.279155, train accuracy: 0.687957\n",
      "cost: 1.334553, train accuracy: 0.675992\n",
      "cost: 1.254689, train accuracy: 0.669146\n",
      "cost: 1.300018, train accuracy: 0.672907\n",
      "cost: 1.299292, train accuracy: 0.683694\n",
      "cost: 1.291030, train accuracy: 0.701939\n",
      "cost: 1.316339, train accuracy: 0.673492\n",
      "cost: 1.302697, train accuracy: 0.680245\n",
      "cost: 1.276957, train accuracy: 0.698174\n",
      "cost: 1.331769, train accuracy: 0.671440\n",
      "cost: 1.321330, train accuracy: 0.682103\n",
      "cost: 1.326140, train accuracy: 0.685611\n",
      "cost: 1.306884, train accuracy: 0.662046\n",
      "cost: 1.313660, train accuracy: 0.709650\n",
      "cost: 1.293337, train accuracy: 0.703682\n",
      "cost: 1.295178, train accuracy: 0.676706\n",
      "cost: 1.291573, train accuracy: 0.677553\n",
      "cost: 1.296395, train accuracy: 0.697445\n",
      "cost: 1.271470, train accuracy: 0.696841\n",
      "cost: 1.306509, train accuracy: 0.675612\n",
      "cost: 1.324446, train accuracy: 0.667500\n",
      "cost: 1.300730, train accuracy: 0.695783\n",
      "cost: 1.301317, train accuracy: 0.666564\n",
      "cost: 1.291145, train accuracy: 0.663370\n",
      "cost: 1.297506, train accuracy: 0.661111\n",
      "cost: 1.311177, train accuracy: 0.687082\n",
      "cost: 1.266552, train accuracy: 0.700565\n",
      "cost: 1.273166, train accuracy: 0.672844\n",
      "cost: 1.320413, train accuracy: 0.678287\n",
      "cost: 1.298206, train accuracy: 0.677981\n",
      "cost: 1.318429, train accuracy: 0.661202\n",
      "cost: 1.294819, train accuracy: 0.665016\n",
      "cost: 1.291189, train accuracy: 0.667021\n",
      "cost: 1.311521, train accuracy: 0.666555\n",
      "cost: 1.339317, train accuracy: 0.688965\n",
      "cost: 1.303619, train accuracy: 0.655216\n",
      "cost: 1.243915, train accuracy: 0.688894\n",
      "cost: 1.312946, train accuracy: 0.695409\n",
      "cost: 1.269041, train accuracy: 0.683881\n",
      "cost: 1.295367, train accuracy: 0.704075\n",
      "cost: 1.281479, train accuracy: 0.676251\n",
      "cost: 1.298655, train accuracy: 0.665281\n",
      "cost: 1.313066, train accuracy: 0.663311\n",
      "cost: 1.324951, train accuracy: 0.646290\n",
      "cost: 1.335346, train accuracy: 0.688677\n",
      "cost: 1.274778, train accuracy: 0.688851\n",
      "cost: 1.306468, train accuracy: 0.658900\n",
      "cost: 1.293606, train accuracy: 0.686115\n",
      "cost: 1.308274, train accuracy: 0.674217\n",
      "cost: 1.316967, train accuracy: 0.686448\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-5-f98677ed089a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m    111\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    112\u001b[0m \u001b[0mtrain_data_folder\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"./data/data/train_pku\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 113\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_data_folder\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m<ipython-input-5-f98677ed089a>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(data_folder)\u001b[0m\n\u001b[1;32m    105\u001b[0m             \u001b[0;32massert\u001b[0m \u001b[0mcur_lengths\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mcur_label_lengths\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    106\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 107\u001b[0;31m             \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mcur_feature_ids_padding\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mcur_label_ids_padding\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlengths\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mcur_lengths\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    108\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m%\u001b[0m\u001b[0;36m100\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    109\u001b[0m                 \u001b[0mcost\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcur_accuracy\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcross_entropy\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maccuracy\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mcur_feature_ids_padding\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mcur_label_ids_padding\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlengths\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mcur_lengths\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m    948\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    949\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 950\u001b[0;31m                          run_metadata_ptr)\n\u001b[0m\u001b[1;32m    951\u001b[0m       \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    952\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1171\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1172\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1173\u001b[0;31m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m   1174\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1175\u001b[0m       \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1348\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1349\u001b[0m       return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1350\u001b[0;31m                            run_metadata)\n\u001b[0m\u001b[1;32m   1351\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1352\u001b[0m       \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m   1354\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1355\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1356\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1357\u001b[0m     \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1358\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m   1339\u001b[0m       \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1340\u001b[0m       return self._call_tf_sessionrun(\n\u001b[0;32m-> 1341\u001b[0;31m           options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m   1342\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1343\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/mx_py3/lib/python3.7/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m   1427\u001b[0m     return tf_session.TF_SessionRun_wrapper(\n\u001b[1;32m   1428\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1429\u001b[0;31m         run_metadata)\n\u001b[0m\u001b[1;32m   1430\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1431\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_call_tf_sessionprun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# 数据处理完毕,接下来,定义模型结构:前向网络,损失函数,优化器,精度计算,运行网络\n",
    "num_classes = 4 # B,E,M,S\n",
    "\n",
    "# embedding 网络\n",
    "def build_input_graph(vocab_size, emb_size):\n",
    "    \"\"\"\n",
    "    transform to embeddings from lookup tables\n",
    "    \"\"\"\n",
    "    x = tf.placeholder(dtype=tf.int32, shape=[None, None])\n",
    "    y = tf.placeholder(dtype = tf.int32, shape=[None, None])\n",
    "    \n",
    "    embeddings = tf.get_variable(\"embeddings\", [vocab_size, emb_size])\n",
    "    embedding_output = tf.nn.embedding_lookup(embeddings, x)\n",
    "    \n",
    "    lengths = tf.placeholder(dtype=tf.int32, shape=[None])\n",
    "    \n",
    "    return x, y, embeddings, embedding_output, lengths\n",
    "\n",
    "# 简单的两层mlp，先跑通再说\n",
    "def build_tag_graph(inputs, hidden1_dim, hidden2_dim, num_classes, cur_label_ids_padding, lengths):\n",
    "    # inputs 已经是embedding, channels是embedding的维度\n",
    "    # 从Inputs中抽取出数据以及lengths\n",
    "    masks = tf.cast(tf.sequence_mask(lengths), tf.float32)\n",
    "    # 第一层fc，使用relu激活，\n",
    "    hidden1_output = tf.contrib.layers.fully_connected(inputs, hidden1_dim, tf.identity)\n",
    "    # 第二层\n",
    "    hidden2_output = tf.contrib.layers.fully_connected(hidden1_output, hidden2_dim, tf.identity)\n",
    "    # 第三层输出\n",
    "    output = tf.contrib.layers.fully_connected(hidden2_output, num_classes, tf.identity)\n",
    "    output = tf.nn.softmax(output)\n",
    "#     print(\"output shape: %s\" % output.shape)\n",
    "    \n",
    "    # 去掉padding\n",
    "    output = tf.multiply(output,  tf.expand_dims(masks, -1))\n",
    "    print(\"masks shape: %s\" % masks.shape)\n",
    "    print(\"output shape: %s\" % output.shape)\n",
    "    \n",
    "    # reshape\n",
    "    output = tf.reshape(output, [-1, num_classes])\n",
    "    \n",
    "    cur_label_one_hot = tf.one_hot(cur_label_ids_padding, 4)\n",
    "    y_real = tf.reshape(cur_label_one_hot, [-1, num_classes])\n",
    "    print(\"output shape: %r, y_real shape: %r\" % (output.shape, y_real.shape))\n",
    "\n",
    "    # 计算损失\n",
    "    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y_real))\n",
    "    \n",
    "    # 计算当前训练集上的精度\n",
    "    print(output.shape, y_real.shape)\n",
    "    correct_pred = tf.equal(tf.argmax(output, 1), tf.argmax(y_real, 1))\n",
    "    \n",
    "    all_chars_num = tf.cast(tf.shape(tf.reshape(inputs, [-1, 256]))[0], tf.float32)\n",
    "    print(type(all_chars_num))\n",
    "    all_true_chars_num = tf.cast(tf.reduce_sum(lengths), tf.float32)\n",
    "    accuracy = (tf.reduce_sum(tf.cast(correct_pred, tf.float32))+all_true_chars_num-all_chars_num)/all_true_chars_num\n",
    "#     print(type(all_true_chars_num))\n",
    "#     extra_accuracy = 1 - tf.divide(all_true_chars_num, all_chars_num)\n",
    "#     print(type(extra_accuracy))\n",
    "#     accuracy = tf.subtract(tf.reduce_mean(tf.cast(correct_pred, tf.float32)) , tf.cast(extra_accuracy, tf.float32))\n",
    "    \n",
    "    return cross_entropy, accuracy\n",
    "\n",
    "\n",
    "def train(data_folder):\n",
    "    # 获取数据\n",
    "    all_data_obj = WSTrainData(data_folder)\n",
    "    # 创建dict和mapping，并将数据转为id序列\n",
    "    feature_dic = create_dic(all_data_obj.features, True, True)\n",
    "    label_dic = create_dic(all_data_obj.labels)\n",
    "    feature_2_id, id_2_feature = create_mapping(feature_dic)\n",
    "    label_2_id, id_2_label = create_mapping(label_dic)\n",
    "    \n",
    "    # 输出batch进行训练\n",
    "    iters = 40000\n",
    "    batch_size = 32\n",
    "    embedding_size = 256\n",
    "    # 创建网络\n",
    "    x, y, embeddings, embeddings_out, lengths = build_input_graph(len(feature_dic), embedding_size)\n",
    "    cross_entropy, accuracy = build_tag_graph(embeddings_out, 512, 128, 4, y, lengths)\n",
    "    \n",
    "    # 创建优化方案\n",
    "    lr = 1e-3\n",
    "    optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(cross_entropy)\n",
    "    \n",
    "    # 创建session并且初始化\n",
    "    init_op = tf.global_variables_initializer() \n",
    "    \n",
    "    with tf.Session() as sess:\n",
    "        sess.run(init_op)\n",
    "        for i in range(iters):\n",
    "            cur_features, cur_labels = all_data_obj.next_batch(batch_size)\n",
    "#             print(cur_features)\n",
    "#             print(len(cur_features))\n",
    "            cur_feature_ids = data_to_ids([cur_features], mappings=[feature_2_id])[0]\n",
    "#             print(np.array(cur_feature_ids).shape)\n",
    "            cur_label_ids = data_to_ids([cur_labels], mappings=[label_2_id])[0]\n",
    "            # padding\n",
    "            input_info = create_input(cur_feature_ids)\n",
    "            cur_feature_ids_padding = input_info[:-1]\n",
    "            cur_lengths = input_info[-1]\n",
    "            \n",
    "            label_info = create_input(cur_label_ids)\n",
    "            cur_label_ids_padding = label_info[:-1]\n",
    "            cur_label_lengths = label_info[-1]\n",
    "            assert cur_lengths == cur_label_lengths\n",
    "            \n",
    "            sess.run(optimizer, feed_dict={x: cur_feature_ids_padding, y: cur_label_ids_padding, lengths: cur_lengths})\n",
    "            if (i+1)%100 == 0:\n",
    "                cost, cur_accuracy = sess.run([cross_entropy, accuracy], feed_dict={x: cur_feature_ids_padding, y: cur_label_ids_padding, lengths: cur_lengths})\n",
    "                print(\"cost: %f, train accuracy: %f\" % (cost, cur_accuracy))\n",
    "                \n",
    "train_data_folder = \"./data/data/train_pku\"\n",
    "train(train_data_folder)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sess = tf.Session()\n",
    "a = tf.placeholder(tf.float32, [5, 1])\n",
    "b = tf.placeholder(tf.float32, [3,4,5,6])\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "print(tf.multiply(a,b).shape)\n",
    "print((a*b).shape)\n",
    "a = tf.sequence_mask([1,2,3,4])\n",
    "print(sess.run(a))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "mx_py3",
   "language": "python",
   "name": "mx_py3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
