{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\n    @Author: King\\n    @Date: 2019.05.28\\n    @Purpose: Neural Relation Extraction with Selective Attention over Instances\\n    @Introduction:  Neural Relation Extraction with Selective Attention over Instances\\n    @Datasets: Chinese relation extration datasets\\n    @Link : 论文研读笔记作业-\\n    @Reference : \\n    @paper ： https://aclweb.org/anthology/P/P16/P16-1200.pdf\\n'"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# encoding = utf8\n",
    "'''\n",
    "    @Author: King\n",
    "    @Date: 2019.05.28\n",
    "    @Purpose: Neural Relation Extraction with Selective Attention over Instances\n",
    "    @Introduction:  Neural Relation Extraction with Selective Attention over Instances\n",
    "    @Datasets: Chinese relation extration datasets\n",
    "    @Link : 论文研读笔记作业-\n",
    "    @Reference : \n",
    "    @paper ： https://aclweb.org/anthology/P/P16/P16-1200.pdf\n",
    "'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Neural Relation Extraction with Selective Attention over Instances\n",
    "\n",
    "\n",
    "Original paper [Neural Relation Extraction with Selective Attention over Instances](https://aclweb.org/anthology/P/P16/P16-1200.pdf) \n",
    "\n",
    "    In this paper, we propose a sentence-level attention-based convolutional neural network (CNN) for distantly supervised relation extraction. \n",
    "\n",
    "    As illustrated in Fig. 1, we employ a CNN to embed the semantics of sentences. Afterward, to utilize all informative sentences, we represent the relationship as the semantic composition of sentence embeddings. \n",
    "\n",
    "    To address the wrong labeling problem, we build sentence-level attention over multiple instances, which is expected to dynamically reduce the weights of those noisy instances. Finally, we extract relation with the relation vector weighted by sentence-level attention. (为了解决错误的标签问题，我们在多个实例上建立了句子级别的注意力，这有望动态地减少那些噪声实例的权重。 最后，我们提取与句子级注意加权的关系向量的关系。)\n",
    "\n",
    "    We evaluate our model on a real-world dataset in the task of relation extraction. The experimental results show that our model achieves significant and consistent improvements in relation extraction as compared with the state-of-the-art methods.\n",
    "\n",
    "<p align=\"center\">\n",
    "\t<img width=\"500\" height=\"300\" src=\"img/sentence_level_attention_based_CNN.png\">\n",
    "</p>\n",
    "\n",
    "### Requrements\n",
    "\n",
    "* Python (>=3.5)\n",
    "\n",
    "* TensorFlow (>=r1.0)\n",
    "\n",
    "* scikit-learn (>=0.18)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1、导入包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\progrom\\python\\python\\python3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import random\n",
    "import os,sys\n",
    "import datetime\n",
    "from collections import Counter\n",
    "\n",
    "def set_seed():\n",
    "    os.environ['PYTHONHASHSEED'] = '0'\n",
    "    np.random.seed(2019)\n",
    "    random.seed(2019)\n",
    "    tf.set_random_seed(2019)\n",
    "\n",
    "set_seed()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2、编写 Settings Class"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Settings(object):\n",
    "    def __init__(self):\n",
    "        \n",
    "        '''\n",
    "            Data loading params \n",
    "        '''\n",
    "        self.data_path = \"E:/pythonWp/game/CCKS2019/RelationshipExtraction/open_data/\" # data dir to load\n",
    "        self.relation2id_path = \"relation2id.txt\"\n",
    "        \n",
    "        self.sent_train_path = \"sent_train.txt\" \n",
    "        self.bag_relation_train_path = \"bag_relation_train.txt\" \n",
    "        self.sent_relation_train_path = \"sent_relation_train.txt\" \n",
    "        \n",
    "        self.sent_dev_path = \"sent_dev.txt\" \n",
    "        self.bag_relation_dev_path = \"bag_relation_dev.txt\" \n",
    "        self.sent_relation_dev_path = \"sent_relation_dev.txt\" \n",
    "        \n",
    "        self.sent_test_path = \"sent_test.txt\" \n",
    "        self.bag_relation_test_path = \"bag_relation_test.txt\" \n",
    "        self.sent_relation_test_path = \"sent_relation_test.txt\" \n",
    "        \n",
    "        self.result_bag_file = \"result_bag.txt\" \n",
    "        self.result_sent_file = \"result_sent.txt\" \n",
    "        \n",
    "\n",
    "        '''\n",
    "            Model Hyper-parameters \n",
    "        '''\n",
    "        '''\n",
    "            1、 parameters\n",
    "        '''\n",
    "        self.cuda = '0'                   # gpu id\n",
    "        self.batch_size = 50              # batch size\n",
    "        self.epochs = 200                 # max train epochs\n",
    "        self.model_path = 'model'         # save model dir\n",
    "        self.level = 'sent'                # bag level or sentence level, option:bag/sent\n",
    "        self.mode = 'train'               # train or test\n",
    "        \n",
    "        '''\n",
    "            2、Embeddings\n",
    "        '''\n",
    "        self.embed_path = \"E:/pythonWp/game/CCKS2019/RelationshipExtraction/origin_data/vec.txt\"\n",
    "        self.pre_embed = True             # load pre-trained word2vec\n",
    "        self.word_dim = 200               # dimension of word embedding\n",
    "        self.pos_dim = 5                  # dimension of position embedding\n",
    "        \n",
    "        '''\n",
    "            2、Training parameters\n",
    "        '''\n",
    "        self.hidden_dim = 100             #dimension of hidden embedding\n",
    "        self.dropout = 0.5\n",
    "        self.lr = 0.001\n",
    "        self.word_frequency = 5          # minimum word frequency when constructing vocabulary list\n",
    "        self.pos_limit = 15              # max distance of position embedding\n",
    "        self.sen_len = 60                # sentence length\n",
    "        self.window = 3                  # window size\n",
    "        \n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ACNN:\n",
    "    def __init__(self, settins):\n",
    "        self.lr = settins.lr\n",
    "        self.sen_len = settins.sen_len\n",
    "        self.pre_embed = settins.pre_embed\n",
    "        self.pos_limit = settins.pos_limit\n",
    "        self.pos_dim = settins.pos_dim\n",
    "        self.window = settins.window\n",
    "        self.word_dim = settins.word_dim\n",
    "        self.hidden_dim = settins.hidden_dim\n",
    "        self.batch_size = settins.batch_size\n",
    "        self.data_path = settins.data_path\n",
    "        self.model_path = settins.model_path\n",
    "        self.mode = settins.mode\n",
    "        self.epochs = settins.epochs\n",
    "        self.dropout = settins.dropout\n",
    "        self.word_frequency = settins.word_frequency\n",
    "\n",
    "        self.relation2id_path = settins.relation2id_path\n",
    "        self.embed_path = settins.embed_path\n",
    "\n",
    "        self.sent_train_path = settins.sent_train_path\n",
    "        self.bag_relation_train_path = settins.bag_relation_train_path\n",
    "        self.sent_relation_train_path =settins.sent_relation_train_path\n",
    "\n",
    "        self.sent_dev_path = settins.sent_dev_path\n",
    "        self.bag_relation_dev_path = settins.bag_relation_dev_path\n",
    "        self.sent_relation_dev_path = settins.sent_relation_dev_path\n",
    "\n",
    "        self.sent_test_path = settins.sent_test_path\n",
    "        self.bag_relation_test_path = settins.bag_relation_test_path\n",
    "        self.sent_relation_test_path = settins.sent_relation_test_path\n",
    "\n",
    "        self.result_bag_file = settins.result_bag_file\n",
    "        self.result_sent_file = settins.result_sent_file\n",
    "\n",
    "        if settins.level == 'sent':\n",
    "            self.bag = False\n",
    "        elif settins.level == 'bag':\n",
    "            self.bag = True\n",
    "        else:\n",
    "            self.bag = True\n",
    "\n",
    "        self.pos_num = 2 * self.pos_limit + 3\n",
    "        self.relation2id = self.load_relation()\n",
    "        self.num_classes = len(self.relation2id)\n",
    "\n",
    "        ''' self.pos_num  self.relation2id  self.num_classes\n",
    "            print(\"self.pos_num:{0}\".format(self.pos_num))\n",
    "            print(\"self.relation2id:{0}\".format(self.relation2id))\n",
    "            print(\"self.num_classes:{0}\".format(self.num_classes))\n",
    "            sys.exit(0)\n",
    "            output:\n",
    "                self.pos_num:33\n",
    "                self.relation2id:\n",
    "                    {'NA': 0, '人物关系/亲属关系/配偶/丈夫/现夫': 1, ... ,'人物关系/师生关系/学生': 34}\n",
    "                self.num_classes:35\n",
    "        '''\n",
    "\n",
    "        if self.pre_embed:\n",
    "            self.wordMap, word_embed = self.load_wordVec()\n",
    "            self.word_embedding = tf.get_variable(initializer=word_embed, name='word_embedding', trainable=False)\n",
    "\n",
    "        else:\n",
    "            self.wordMap = self.load_wordMap()\n",
    "            self.word_embedding = tf.get_variable(shape=[len(self.wordMap), self.word_dim], name='word_embedding',trainable=True)\n",
    "\n",
    "\n",
    "        self.pos_e1_embedding = tf.get_variable(name='pos_e1_embedding', shape=[self.pos_num, self.pos_dim])\n",
    "        self.pos_e2_embedding = tf.get_variable(name='pos_e2_embedding', shape=[self.pos_num, self.pos_dim])\n",
    "\n",
    "        self.relation_embedding = tf.get_variable(name='relation_embedding', shape=[self.hidden_dim, self.num_classes])\n",
    "        self.relation_embedding_b = tf.get_variable(name='relation_embedding_b', shape=[self.num_classes])\n",
    "\n",
    "        self.sentence_reps = self.CNN_encoder()\n",
    "\n",
    "        if self.bag:\n",
    "            self.bag_level()\n",
    "        else:\n",
    "            self.sentence_level()\n",
    "        self._classifier_train_op = tf.train.AdamOptimizer(self.lr).minimize(self.classifier_loss)\n",
    "\n",
    "    def pos_index(self, x):\n",
    "        if x < -self.pos_limit:\n",
    "            return 0\n",
    "        if x >= -self.pos_limit and x <= self.pos_limit:\n",
    "            return x + self.pos_limit + 1\n",
    "        if x > self.pos_limit:\n",
    "            return 2 * self.pos_limit + 2\n",
    "\n",
    "    # 加载词向量及词典\n",
    "    def load_wordVec(self):\n",
    "        wordMap = {}\n",
    "        wordMap['PAD'] = len(wordMap)\n",
    "        wordMap['UNK'] = len(wordMap)\n",
    "        word_embed = []\n",
    "        for line in open(os.path.join(self.data_path, self.embed_path),'r',encoding='utf8'):\n",
    "            content = line.strip().split()\n",
    "            if len(content) != self.word_dim + 1:\n",
    "                continue\n",
    "            wordMap[content[0]] = len(wordMap)\n",
    "            word_embed.append(np.asarray(content[1:], dtype=np.float32))\n",
    "\n",
    "        #print(word_embed)\n",
    "        word_embed = np.stack(word_embed)\n",
    "        embed_mean, embed_std = word_embed.mean(), word_embed.std()\n",
    "\n",
    "        pad_embed = np.random.normal(embed_mean, embed_std, (2, self.word_dim))\n",
    "        word_embed = np.concatenate((pad_embed, word_embed), axis=0)\n",
    "        word_embed = word_embed.astype(np.float32)\n",
    "        return wordMap, word_embed\n",
    "\n",
    "    # 加载词典，在没用预训练词向量的时候，需要利用该方法加载词典\n",
    "    def load_wordMap(self):\n",
    "        wordMap = {}\n",
    "        wordMap['PAD'] = len(wordMap)\n",
    "        wordMap['UNK'] = len(wordMap)\n",
    "        all_content = []\n",
    "        for line in open(os.path.join(self.data_path, self.sent_train_path),encoding='utf-8'):\n",
    "            all_content += line.strip().split('\\t')[3].split()\n",
    "        for item in Counter(all_content).most_common():\n",
    "            if item[1] > self.word_frequency:\n",
    "                wordMap[item[0]] = len(wordMap)\n",
    "            else:\n",
    "                break\n",
    "        return wordMap\n",
    "    \n",
    "    # 加载关系\n",
    "    def load_relation(self):\n",
    "        relation2id = {}\n",
    "        for line in open(os.path.join(self.data_path, self.relation2id_path),encoding='utf8'):\n",
    "            relation, id_ = line.strip().split()\n",
    "            relation2id[relation] = int(id_)\n",
    "        return relation2id\n",
    "\n",
    "    # 加载句子文件，并计算位置嵌入向量\n",
    "    def load_sent(self, filename):\n",
    "        sentence_dict = {}\n",
    "        nums = 0\n",
    "        with open(os.path.join(self.data_path, filename), 'r',encoding='utf8') as fr:\n",
    "            for line in fr:\n",
    "                id_, en1, en2, sentence = line.strip().split('\\t')\n",
    "                sentence = sentence.split()\n",
    "                en1_pos = 0\n",
    "                en2_pos = 0\n",
    "                for i in range(len(sentence)):\n",
    "                    if sentence[i] == en1:\n",
    "                        en1_pos = i\n",
    "                    if sentence[i] == en2:\n",
    "                        en2_pos = i\n",
    "                words = []\n",
    "                pos1 = []\n",
    "                pos2 = []\n",
    "\n",
    "                length = min(self.sen_len, len(sentence))\n",
    "\n",
    "                for i in range(length):\n",
    "                    words.append(self.wordMap.get(sentence[i], self.wordMap['UNK']))\n",
    "                    pos1.append(self.pos_index(i - en1_pos))\n",
    "                    pos2.append(self.pos_index(i - en2_pos))\n",
    "\n",
    "                if length < self.sen_len:\n",
    "                    for i in range(length, self.sen_len):\n",
    "                        words.append(self.wordMap['PAD'])\n",
    "                        pos1.append(self.pos_index(i - en1_pos))\n",
    "                        pos2.append(self.pos_index(i - en2_pos))\n",
    "                sentence_dict[id_] = np.reshape(np.asarray([words, pos1, pos2], dtype=np.int32), (1, 3, self.sen_len))\n",
    "                \n",
    "                ''' sentence_dict\n",
    "                   output:\n",
    "                   {\n",
    "                       'TRAIN_SENT_ID_000001': \n",
    "                       array(\n",
    "                       [\n",
    "                           [\n",
    "                           [   528,   1345,   1865,   2439,    141,      7,     18,  18946,\n",
    "                                     18, 130059,      2, 146384,    867, 426307, 426307, 426307,\n",
    "                                 426307, 426307, 426307, 426307, 426307, 426307, 426307, 426307,\n",
    "                                 426307, 426307, 426307, 426307, 426307, 426307, 426307, 426307,\n",
    "                                 426307, 426307, 426307, 426307, 426307, 426307, 426307, 426307,\n",
    "                                 426307, 426307, 426307, 426307, 426307, 426307, 426307, 426307,\n",
    "                                 426307, 426307, 426307, 426307, 426307, 426307, 426307, 426307,\n",
    "                                 426307, 426307, 426307, 426307],\n",
    "                            [     7,      8,      9,     10,     11,     12,     13,     14,\n",
    "                                 15,     16,     17,     18,     19,     20,     21,     22,\n",
    "                                 23,     24,     25,     26,     27,     28,     29,     30,\n",
    "                                 31,     32,     32,     32,     32,     32,     32,     32,\n",
    "                                 32,     32,     32,     32,     32,     32,     32,     32,\n",
    "                                 32,     32,     32,     32,     32,     32,     32,     32,\n",
    "                                 32,     32,     32,     32,     32,     32,     32,     32,\n",
    "                                 32,     32,     32,     32],\n",
    "                            [     5,      6,      7,      8,      9,     10,     11,     12,\n",
    "                                 13,     14,     15,     16,     17,     18,     19,     20,\n",
    "                                 21,     22,     23,     24,     25,     26,     27,     28,\n",
    "                                 29,     30,     31,     32,     32,     32,     32,     32,\n",
    "                                 32,     32,     32,     32,     32,     32,     32,     32,\n",
    "                                 32,     32,     32,     32,     32,     32,     32,     32,\n",
    "                                 32,     32,     32,     32,     32,     32,     32,     32,\n",
    "                                 32,     32,     32,     32\n",
    "                             ]\n",
    "                             ]\n",
    "                        ]\n",
    "                        )\n",
    "                    }\n",
    "                \n",
    "                '''\n",
    "        return sentence_dict\n",
    "\n",
    "    def data_batcher(self, sentence_dict, filename, padding=False, shuffle=True):\n",
    "        if self.bag:\n",
    "            all_bags = []\n",
    "            all_sents = []\n",
    "            all_labels = []\n",
    "            with open(os.path.join(self.data_path, filename), 'r',encoding='utf-8') as fr:\n",
    "                for line in fr:\n",
    "                    rel = [0] * self.num_classes\n",
    "                    try:\n",
    "                        bag_id, _, _, sents, types = line.strip().split('\\t')\n",
    "                        type_list = types.split()\n",
    "                        for tp in type_list:\n",
    "                            if len(type_list) > 1 and tp == '0': # if a bag has multiple relations, we only consider non-NA relations\n",
    "                                continue\n",
    "                            rel[int(tp)] = 1\n",
    "                    except:\n",
    "                        bag_id, _, _, sents = line.strip().split('\\t')\n",
    "\n",
    "                    sent_list = []\n",
    "                    for sent in sents.split():\n",
    "                        sent_list.append(sentence_dict[sent])\n",
    "\n",
    "                    all_bags.append(bag_id)\n",
    "                    all_sents.append(np.concatenate(sent_list,axis=0))\n",
    "                    all_labels.append(np.asarray(rel, dtype=np.float32))\n",
    "\n",
    "            self.data_size = len(all_bags)\n",
    "            self.datas = all_bags\n",
    "            data_order = list(range(self.data_size))\n",
    "            if shuffle:\n",
    "                np.random.shuffle(data_order)\n",
    "            if padding:\n",
    "                if self.data_size % self.batch_size != 0:\n",
    "                    data_order += [data_order[-1]] * (self.batch_size - self.data_size % self.batch_size)\n",
    "\n",
    "            for i in range(len(data_order) // self.batch_size):\n",
    "                total_sens = 0\n",
    "                out_sents = []\n",
    "                out_sent_nums = []\n",
    "                out_labels = []\n",
    "                for k in data_order[i * self.batch_size:(i + 1) * self.batch_size]:\n",
    "                    out_sents.append(all_sents[k])\n",
    "                    out_sent_nums.append(total_sens)\n",
    "                    total_sens += all_sents[k].shape[0]\n",
    "                    out_labels.append(all_labels[k])\n",
    "\n",
    "\n",
    "                out_sents = np.concatenate(out_sents, axis=0)\n",
    "                out_sent_nums.append(total_sens)\n",
    "                out_sent_nums = np.asarray(out_sent_nums, dtype=np.int32)\n",
    "                out_labels = np.stack(out_labels)\n",
    "\n",
    "                yield out_sents, out_labels, out_sent_nums\n",
    "        else:\n",
    "            all_sent_ids = []\n",
    "            all_sents = []\n",
    "            all_labels = []\n",
    "            with open(os.path.join(self.data_path, filename), 'r',encoding='utf-8') as fr:\n",
    "                for line in fr:\n",
    "                    rel = [0] * self.num_classes\n",
    "                    try:\n",
    "                        sent_id, types = line.strip().split('\\t')\n",
    "                        type_list = types.split()\n",
    "                        for tp in type_list:\n",
    "                            if len(type_list) > 1 and tp == '0': # if a sentence has multiple relations, we only consider non-NA relations\n",
    "                                continue\n",
    "                            rel[int(tp)] = 1\n",
    "                    except:\n",
    "                        sent_id = line.strip()\n",
    "\n",
    "                    all_sent_ids.append(sent_id)\n",
    "                    all_sents.append(sentence_dict[sent_id])\n",
    "\n",
    "                    all_labels.append(np.reshape(np.asarray(rel, dtype=np.float32), (-1, self.num_classes)))\n",
    "\n",
    "            self.data_size = len(all_sent_ids)\n",
    "            self.datas = all_sent_ids\n",
    "\n",
    "            all_sents = np.concatenate(all_sents, axis=0)\n",
    "            all_labels = np.concatenate(all_labels, axis=0)\n",
    "            '''\n",
    "                output:\n",
    "                all_sents[0]:[[   528   1345   1865   2439    141      7     18  18946     18 130059\n",
    "                                   2 146384    867 426307 426307 426307 426307 426307 426307 426307\n",
    "                              426307 426307 426307 426307 426307 426307 426307 426307 426307 426307\n",
    "                              426307 426307 426307 426307 426307 426307 426307 426307 426307 426307\n",
    "                              426307 426307 426307 426307 426307 426307 426307 426307 426307 426307\n",
    "                              426307 426307 426307 426307 426307 426307 426307 426307 426307 426307]\n",
    "                             [     7      8      9     10     11     12     13     14     15     16\n",
    "                                  17     18     19     20     21     22     23     24     25     26\n",
    "                                  27     28     29     30     31     32     32     32     32     32\n",
    "                                  32     32     32     32     32     32     32     32     32     32\n",
    "                                  32     32     32     32     32     32     32     32     32     32\n",
    "                                  32     32     32     32     32     32     32     32     32     32]\n",
    "                             [     5      6      7      8      9     10     11     12     13     14\n",
    "                                  15     16     17     18     19     20     21     22     23     24\n",
    "                                  25     26     27     28     29     30     31     32     32     32\n",
    "                                  32     32     32     32     32     32     32     32     32     32\n",
    "                                  32     32     32     32     32     32     32     32     32     32\n",
    "                                  32     32     32     32     32     32     32     32     32     32]]\n",
    "                all_labels[0]:[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
    "                             0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]   \n",
    "            '''\n",
    "\n",
    "            data_order = list(range(self.data_size))\n",
    "            if shuffle:\n",
    "                np.random.shuffle(data_order)\n",
    "            if padding:\n",
    "                if self.data_size % self.batch_size != 0:\n",
    "                    data_order += [data_order[-1]] * (self.batch_size - self.data_size % self.batch_size)     \n",
    "            \n",
    "            for i in range(len(data_order) // self.batch_size):\n",
    "                idx = data_order[i * self.batch_size:(i + 1) * self.batch_size]\n",
    "                yield all_sents[idx], all_labels[idx], None\n",
    "    # ACNN 模型\n",
    "    def CNN_encoder(self):\n",
    "        self.keep_prob = tf.placeholder(dtype=tf.float32, name='keep_prob')\n",
    "        self.input_word = tf.placeholder(dtype=tf.int32, shape=[None, self.sen_len], name='input_word')\n",
    "        self.input_pos_e1 = tf.placeholder(dtype=tf.int32, shape=[None, self.sen_len], name='input_pos_e1')\n",
    "        self.input_pos_e2 = tf.placeholder(dtype=tf.int32, shape=[None, self.sen_len], name='input_pos_e2')\n",
    "        self.input_label = tf.placeholder(dtype=tf.float32, shape=[None, self.num_classes], name='input_label')\n",
    "\n",
    "        inputs_forward = tf.concat(axis=2, values=[tf.nn.embedding_lookup(self.word_embedding, self.input_word), \\\n",
    "                                                   tf.nn.embedding_lookup(self.pos_e1_embedding, self.input_pos_e1), \\\n",
    "                                                   tf.nn.embedding_lookup(self.pos_e2_embedding, self.input_pos_e2)])\n",
    "        inputs_forward = tf.expand_dims(inputs_forward, -1)\n",
    "\n",
    "        with tf.name_scope('conv-maxpool'):\n",
    "            w = tf.get_variable(name='w', shape=[self.window, self.word_dim + 2 * self.pos_dim, 1, self.hidden_dim])\n",
    "            b = tf.get_variable(name='b', shape=[self.hidden_dim])\n",
    "            conv = tf.nn.conv2d(\n",
    "                inputs_forward,\n",
    "                w,\n",
    "                strides=[1, 1, 1, 1],\n",
    "                padding='VALID',\n",
    "                name='conv')\n",
    "            h = tf.nn.bias_add(conv, b)\n",
    "            pooled = tf.nn.max_pool(\n",
    "                h,\n",
    "                ksize=[1, self.sen_len - self.window + 1, 1, 1],\n",
    "                strides=[1, 1, 1, 1],\n",
    "                padding='VALID',\n",
    "                name='pool')\n",
    "        sen_reps = tf.tanh(tf.reshape(pooled, [-1, self.hidden_dim]))\n",
    "        sen_reps = tf.nn.dropout(sen_reps, self.keep_prob)\n",
    "        return sen_reps\n",
    "\n",
    "    def bag_level(self):\n",
    "        self.classifier_loss = 0.0\n",
    "        self.probability = []\n",
    "\n",
    "        self.bag_sens = tf.placeholder(dtype=tf.int32, shape=[self.batch_size + 1], name='bag_sens')\n",
    "        self.att_A = tf.get_variable(name='att_A', shape=[self.hidden_dim])\n",
    "        self.rel = tf.reshape(tf.transpose(self.relation_embedding), [self.num_classes, self.hidden_dim])\n",
    "\n",
    "        for i in range(self.batch_size):\n",
    "            sen_reps = tf.reshape(self.sentence_reps[self.bag_sens[i]:self.bag_sens[i + 1]], [-1, self.hidden_dim])\n",
    "\n",
    "            att_sen = tf.reshape(tf.multiply(sen_reps, self.att_A), [-1, self.hidden_dim])\n",
    "            score = tf.matmul(self.rel, tf.transpose(att_sen))\n",
    "            alpha = tf.nn.softmax(score, 1)\n",
    "            bag_rep = tf.matmul(alpha, sen_reps)\n",
    "\n",
    "            out = tf.matmul(bag_rep, self.relation_embedding) + self.relation_embedding_b\n",
    "\n",
    "            prob = tf.reshape(tf.reduce_sum(tf.nn.softmax(out, 1) * tf.reshape(self.input_label[i], [-1, 1]), 0),\n",
    "                              [self.num_classes])\n",
    "\n",
    "            self.probability.append(\n",
    "                tf.reshape(tf.reduce_sum(tf.nn.softmax(out, 1) * tf.diag([1.0] * (self.num_classes)), 1),\n",
    "                           [-1, self.num_classes]))\n",
    "            self.classifier_loss += tf.reduce_sum(\n",
    "                -tf.log(tf.clip_by_value(prob, 1.0e-10, 1.0)) * tf.reshape(self.input_label[i], [-1]))\n",
    "\n",
    "        self.probability = tf.concat(axis=0, values=self.probability)\n",
    "        self.classifier_loss = self.classifier_loss / tf.cast(self.batch_size, tf.float32)\n",
    "\n",
    "    def sentence_level(self):\n",
    "        out = tf.matmul(self.sentence_reps, self.relation_embedding) + self.relation_embedding_b\n",
    "        self.probability = tf.nn.softmax(out, 1)\n",
    "        self.classifier_loss = tf.reduce_mean(\n",
    "            tf.reduce_sum(-tf.log(tf.clip_by_value(self.probability, 1.0e-10, 1.0)) * self.input_label, 1))\n",
    "\n",
    "    def run_train(self, sess, batch):\n",
    "\n",
    "        sent_batch, label_batch, sen_num_batch = batch\n",
    "\n",
    "        feed_dict = {}\n",
    "        feed_dict[self.keep_prob] = self.dropout\n",
    "        feed_dict[self.input_word] = sent_batch[:, 0, :]\n",
    "        feed_dict[self.input_pos_e1] = sent_batch[:, 1, :]\n",
    "        feed_dict[self.input_pos_e2] = sent_batch[:, 2, :]\n",
    "        feed_dict[self.input_label] = label_batch\n",
    "        if self.bag:\n",
    "            feed_dict[self.bag_sens] = sen_num_batch\n",
    "\n",
    "        _, classifier_loss = sess.run([self._classifier_train_op, self.classifier_loss], feed_dict)\n",
    "\n",
    "        return classifier_loss\n",
    "\n",
    "    def run_dev(self, sess, dev_batchers):\n",
    "        all_labels = []\n",
    "        all_probs = []\n",
    "        for batch in dev_batchers:\n",
    "            sent_batch, label_batch, sen_num_batch = batch\n",
    "            all_labels.append(label_batch)\n",
    "\n",
    "            feed_dict = {}\n",
    "            feed_dict[self.keep_prob] = 1.0\n",
    "            feed_dict[self.input_word] = sent_batch[:, 0, :]\n",
    "            feed_dict[self.input_pos_e1] = sent_batch[:, 1, :]\n",
    "            feed_dict[self.input_pos_e2] = sent_batch[:, 2, :]\n",
    "            if self.bag:\n",
    "                feed_dict[self.bag_sens] = sen_num_batch\n",
    "            prob = sess.run([self.probability], feed_dict)\n",
    "            all_probs.append(np.reshape(prob, (-1, self.num_classes)))\n",
    "\n",
    "        all_labels = np.concatenate(all_labels, axis=0)[:self.data_size]\n",
    "        all_probs = np.concatenate(all_probs, axis=0)[:self.data_size]\n",
    "        if self.bag:\n",
    "            all_preds = all_probs\n",
    "            all_preds[all_probs > 0.9] = 1\n",
    "            all_preds[all_probs <= 0.9] = 0\n",
    "        else:\n",
    "            all_preds = np.eye(self.num_classes)[np.reshape(np.argmax(all_probs, 1), (-1))]\n",
    "\n",
    "        return all_preds, all_labels\n",
    "\n",
    "    def run_test(self, sess, test_batchers):\n",
    "        all_probs = []\n",
    "        for batch in test_batchers:\n",
    "            sent_batch, _, sen_num_batch = batch\n",
    "\n",
    "            feed_dict = {}\n",
    "            feed_dict[self.keep_prob] = 1.0\n",
    "            feed_dict[self.input_word] = sent_batch[:, 0, :]\n",
    "            feed_dict[self.input_pos_e1] = sent_batch[:, 1, :]\n",
    "            feed_dict[self.input_pos_e2] = sent_batch[:, 2, :]\n",
    "            if self.bag:\n",
    "                feed_dict[self.bag_sens] = sen_num_batch\n",
    "            prob = sess.run([self.probability], feed_dict)\n",
    "            all_probs.append(np.reshape(prob, (-1, self.num_classes)))\n",
    "\n",
    "        all_probs = np.concatenate(all_probs,axis=0)[:self.data_size]\n",
    "        if self.bag:\n",
    "            all_preds = all_probs\n",
    "            all_preds[all_probs > 0.9] = 1\n",
    "            all_preds[all_probs <= 0.9] = 0\n",
    "        else:\n",
    "            all_preds = np.eye(self.num_classes)[np.reshape(np.argmax(all_probs, 1), (-1))]\n",
    "\n",
    "        if self.bag:\n",
    "            with open(self.result_bag_file, 'w',encoding='utf-8') as fw:\n",
    "                for i in range(self.data_size):\n",
    "                    rel_one_hot = [int(num) for num in all_preds[i].tolist()]\n",
    "                    rel_list = []\n",
    "                    for j in range(0, self.num_classes):\n",
    "                        if rel_one_hot[j] == 1:\n",
    "                            rel_list.append(str(j))\n",
    "                    if len(rel_list) == 0: # if a bag has no relation, it will be consider as having a relation NA\n",
    "                        rel_list.append('0')\n",
    "                    fw.write(self.datas[i] + '\\t' + ' '.join(rel_list) + '\\n')\n",
    "        else:\n",
    "            with open(self.result_sent_file , 'w',encoding='utf-8') as fw:\n",
    "                for i in range(self.data_size):\n",
    "                    rel_one_hot = [int(num) for num in all_preds[i].tolist()]\n",
    "                    rel_list = []\n",
    "                    for j in range(0, self.num_classes):\n",
    "                        if rel_one_hot[j] == 1:\n",
    "                            rel_list.append(str(j))\n",
    "                    fw.write(self.datas[i] + '\\t' + ' '.join(rel_list) + '\\n')\n",
    "\n",
    "    def run_model(self, sess, saver):\n",
    "        if self.mode == 'train':\n",
    "            global_step = 0\n",
    "            sent_train = self.load_sent(self.sent_train_path)\n",
    "            sent_dev = self.load_sent(self.sent_dev_path)\n",
    "            max_f1 = 0.0\n",
    "\n",
    "            if not os.path.isdir(self.model_path):\n",
    "                os.mkdir(self.model_path)\n",
    "\n",
    "            for epoch in range(self.epochs):\n",
    "                if self.bag:\n",
    "                    train_batchers = self.data_batcher(sent_train, self.bag_relation_train_path, padding=False, shuffle=True)\n",
    "                else:\n",
    "                    train_batchers = self.data_batcher(sent_train, self.sent_relation_train_path, padding=False, shuffle=True)\n",
    "                for batch in train_batchers:\n",
    "\n",
    "                    losses = self.run_train(sess, batch)\n",
    "                    global_step += 1\n",
    "                    if global_step % 50 == 0:\n",
    "                        time_str = datetime.datetime.now().isoformat()\n",
    "                        tempstr = \"{}: step {}, classifier_loss {:g}\".format(time_str, global_step, losses)\n",
    "                        print(tempstr)\n",
    "                    if global_step % 200 == 0:\n",
    "                        if self.bag:\n",
    "                            dev_batchers = self.data_batcher(sent_dev, self.bag_relation_dev_path, padding=True, shuffle=False)\n",
    "                        else:\n",
    "                            dev_batchers = self.data_batcher(sent_dev, self.sent_relation_dev_path, padding=True, shuffle=False)\n",
    "                        all_preds, all_labels = self.run_dev(sess, dev_batchers)\n",
    "\n",
    "                        # when calculate f1 score, we don't consider whether NA results are predicted or not\n",
    "                        # the number of non-NA answers in test is counted as n_std\n",
    "                        # the number of non-NA answers in predicted answers is counted as n_sys\n",
    "                        # intersection of two answers is counted as n_r\n",
    "                        n_r = int(np.sum(all_preds[:, 1:] * all_labels[:, 1:]))\n",
    "                        n_std = int(np.sum(all_labels[:,1:]))\n",
    "                        n_sys = int(np.sum(all_preds[:,1:]))\n",
    "                        try:\n",
    "                            precision = n_r / n_sys\n",
    "                            recall = n_r / n_std\n",
    "                            f1 = 2 * precision * recall / (precision + recall)\n",
    "                        except ZeroDivisionError:\n",
    "                            f1 = 0.0\n",
    "\n",
    "                        if f1 > max_f1:\n",
    "                            max_f1 = f1\n",
    "                            print('f1: %f' % f1)\n",
    "                            print('saving model')\n",
    "                            path = saver.save(sess, os.path.join(self.model_path, 'ipre_bag_%d' % (self.bag)), global_step=0)\n",
    "                            tempstr = 'have saved model to ' + path\n",
    "                            print(tempstr)\n",
    "\n",
    "        else:\n",
    "            path = os.path.join(self.model_path, 'ipre_bag_%d' % self.bag) + '-0'\n",
    "            tempstr = 'load model: ' + path\n",
    "            print(tempstr)\n",
    "            try:\n",
    "                saver.restore(sess, path)\n",
    "            except:\n",
    "                raise ValueError('Unvalid model name')\n",
    "\n",
    "            sent_test = self.load_sent(self.sent_test_path)\n",
    "            if self.bag:\n",
    "                test_batchers = self.data_batcher(sent_test, self.bag_relation_test_path, padding=True, shuffle=False)\n",
    "            else:\n",
    "                test_batchers = self.data_batcher(sent_test, self.sent_relation_test_path, padding=True, shuffle=False)\n",
    "\n",
    "            self.run_test(sess, test_batchers)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "build model\n",
      "2019-10-24T17:00:07.801513: step 50, classifier_loss 0.721858\n",
      "2019-10-24T17:00:12.156861: step 100, classifier_loss 0.988556\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-13-8e394d3ac166>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     28\u001b[0m         \u001b[0msess\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mglobal_variables_initializer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     29\u001b[0m         \u001b[0msaver\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mSaver\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmax_to_keep\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 30\u001b[1;33m         \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_model\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msess\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msaver\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32m<ipython-input-12-40a93f10bf0c>\u001b[0m in \u001b[0;36mrun_model\u001b[1;34m(self, sess, saver)\u001b[0m\n\u001b[0;32m    496\u001b[0m                 \u001b[1;32mfor\u001b[0m \u001b[0mbatch\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mtrain_batchers\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    497\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 498\u001b[1;33m                     \u001b[0mlosses\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_train\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msess\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    499\u001b[0m                     \u001b[0mglobal_step\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    500\u001b[0m                     \u001b[1;32mif\u001b[0m \u001b[0mglobal_step\u001b[0m \u001b[1;33m%\u001b[0m \u001b[1;36m50\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m<ipython-input-12-40a93f10bf0c>\u001b[0m in \u001b[0;36mrun_train\u001b[1;34m(self, sess, batch)\u001b[0m\n\u001b[0;32m    403\u001b[0m             \u001b[0mfeed_dict\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbag_sens\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msen_num_batch\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    404\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 405\u001b[1;33m         \u001b[0m_\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mclassifier_loss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msess\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_classifier_train_op\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclassifier_loss\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    406\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    407\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mclassifier_loss\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\progrom\\python\\python\\python3\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36mrun\u001b[1;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m    927\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    928\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[1;32m--> 929\u001b[1;33m                          run_metadata_ptr)\n\u001b[0m\u001b[0;32m    930\u001b[0m       \u001b[1;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    931\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\progrom\\python\\python\\python3\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run\u001b[1;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1150\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[1;32mor\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1151\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[1;32m-> 1152\u001b[1;33m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[0;32m   1153\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1154\u001b[0m       \u001b[0mresults\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\progrom\\python\\python\\python3\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_run\u001b[1;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1326\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1327\u001b[0m       return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[1;32m-> 1328\u001b[1;33m                            run_metadata)\n\u001b[0m\u001b[0;32m   1329\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1330\u001b[0m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\progrom\\python\\python\\python3\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m   1332\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1333\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1334\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1335\u001b[0m     \u001b[1;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1336\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\progrom\\python\\python\\python3\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[1;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[0;32m   1317\u001b[0m       \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1318\u001b[0m       return self._call_tf_sessionrun(\n\u001b[1;32m-> 1319\u001b[1;33m           options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[0;32m   1320\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1321\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\progrom\\python\\python\\python3\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[1;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[0;32m   1405\u001b[0m     return tf_session.TF_SessionRun_wrapper(\n\u001b[0;32m   1406\u001b[0m         \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1407\u001b[1;33m         run_metadata)\n\u001b[0m\u001b[0;32m   1408\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1409\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_call_tf_sessionprun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# 清除每次运行时，tensorflow中不断增加的节点并重置整个defualt graph\n",
    "tf.reset_default_graph()\n",
    "print('build model')\n",
    "settings = Settings()\n",
    "# 设置 GPU\n",
    "gpu_options = tf.GPUOptions(visible_device_list=settings.cuda, allow_growth=True)\n",
    "with tf.Graph().as_default():\n",
    "    set_seed()\n",
    "    '''\n",
    "        通过设置intra_op_parallelism_threads参数和inter_op_parallelism_threads参数，\n",
    "        来控制每个操作符op并行计算的线程个数。二者的区别在于:\n",
    "\n",
    "        intra_op_parallelism_threads 控制运算符op内部的并行\n",
    "            当运算符op为单一运算符，并且内部可以实现并行时，如矩阵乘法，reduce_sum之类的操作，\n",
    "            可以通过设置intra_op_parallelism_threads参数来并行, intra代表内部。\n",
    "        inter_op_parallelism_threads 控制多个运算符op之间的并行计算\n",
    "            当有多个运算符op，并且他们之间比较独立，运算符和运算符之间没有直接的路径Path相连。\n",
    "            Tensorflow会尝试并行地计算他们，使用由inter_op_parallelism_threads参数来控制数量的一个线程池。\n",
    "        参考：https://blog.csdn.net/s_sunnyy/article/details/71422264\n",
    "    '''\n",
    "    sess = tf.Session(\n",
    "        config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True, intra_op_parallelism_threads=1, inter_op_parallelism_threads=1))\n",
    "    with sess.as_default():\n",
    "        # This function implements the weight initialization\n",
    "        initializer = tf.contrib.layers.xavier_initializer()\n",
    "        with tf.variable_scope('', initializer=initializer):\n",
    "            model = ACNN(settings)\n",
    "        sess.run(tf.global_variables_initializer())\n",
    "        saver = tf.train.Saver(max_to_keep=None)\n",
    "        model.run_model(sess, saver)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
