{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pathlib\n",
    "import os\n",
    "\n",
    "import os\n",
    "from queue import Queue\n",
    "from threading import Thread\n",
    "\n",
    "import pandas as pd\n",
    "import tensorflow as tf\n",
    "import collections\n",
    "#from bert_model.config import Config\n",
    "\n",
    "from bert_model import modeling\n",
    "from bert_model import tokenization\n",
    "from bert_model import optimization"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "basedir1 = \"../\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Config():\n",
    "\n",
    "    def __init__(self):\n",
    "        # bert config 文件\n",
    "        self.bert_config_file = os.path.join(basedir1, 'chinese-bert/bert_config.json')\n",
    "        # bert vocab 文件\n",
    "        self.vocab_file = os.path.join(basedir1, 'chinese-bert/vocab.txt')\n",
    "        # bert 模型使用的数据路径\n",
    "        self.data_dir = os.path.join(basedir1, 'data')\n",
    "        # 模型输出路径\n",
    "        self.output_dir = os.path.join(basedir1, 'results')\n",
    "        self.predict_file = os.path.join(basedir1, 'data/dev.csv')\n",
    "        self.test_file = os.path.join(basedir1, 'data/test.csv')\n",
    "        # 预训练模型地址\n",
    "        self.init_checkpoint = os.path.join(basedir1, 'chinese-bert/bert_model.ckpt')\n",
    "        self.train_checkpoint = os.path.join(basedir1, 'results')\n",
    "\n",
    "        self.do_lower_case = True\n",
    "        self.verbose_logging = False\n",
    "        self.master = None\n",
    "        self.version_2_with_negative = False\n",
    "        self.null_score_diff_threshold = 0.0\n",
    "        self.use_tpu = False\n",
    "        self.tpu_name = None\n",
    "        self.tpu_zone = None\n",
    "        self.gcp_project = None\n",
    "        self.num_tpu_cores = 8\n",
    "        self.task_name = 'sim'\n",
    "        self.gpu_memory_fraction = 0.8\n",
    "\n",
    "        self.max_seq_length = 128\n",
    "        self.doc_stride = 128\n",
    "        self.max_query_length = 64\n",
    "\n",
    "\n",
    "        self.do_train = True\n",
    "        self.do_predict = False\n",
    "        self.batch_size = 20\n",
    "        self.predict_batch_size = 8\n",
    "        self.learning_rate = 5e-5\n",
    "        self.num_train_epochs = 3.0\n",
    "        self.warmup_proportion = 0.1\n",
    "        self.save_checkpoints_steps = 1000\n",
    "        self.iterations_per_loop = 1000\n",
    "        self.n_best_size = 20\n",
    "        self.max_answer_length = 30\n",
    "        self.eval_batch_size = 16\n",
    "        # self.do_eval = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "cf = Config()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class InputExample(object):\n",
    "    \"\"\"A single training/test example for simple sequence classification.\"\"\"\n",
    "\n",
    "    def __init__(self, guid, text_a, text_b=None, label=None):\n",
    "        \"\"\"Constructs a InputExample.\n",
    "      Args:\n",
    "          guid: Unique id for the example.\n",
    "          text_a: string. The untokenized text of the first sequence. For single\n",
    "            sequence tasks, only this sequence must be specified.\n",
    "          text_b: (Optional) string. The untokenized text of the second sequence.\n",
    "            Only must be specified for sequence pair tasks.\n",
    "          label: (Optional) string. The label of the example. This should be\n",
    "            specified for train and dev examples, but not for test examples.\n",
    "        \"\"\"\n",
    "        self.guid = guid\n",
    "        self.text_a = text_a\n",
    "        self.text_b = text_b\n",
    "        self.label = label\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class InputFeatures(object):\n",
    "    \"\"\"A single set of features of data.\"\"\"\n",
    "\n",
    "    def __init__(self, input_ids, input_mask, segment_ids, label_id):\n",
    "        self.input_ids = input_ids\n",
    "        self.input_mask = input_mask\n",
    "        self.segment_ids = segment_ids\n",
    "        self.label_id = label_id\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DataProcessor(object):\n",
    "    \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n",
    "\n",
    "    def get_train_examples(self, data_dir):\n",
    "        \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n",
    "        raise NotImplementedError()\n",
    "\n",
    "    def get_dev_examples(self, data_dir):\n",
    "        \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n",
    "        raise NotImplementedError()\n",
    "\n",
    "    def get_test_examples(self, data_dir):\n",
    "        \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n",
    "        raise NotImplementedError()\n",
    "\n",
    "    def get_labels(self):\n",
    "        \"\"\"Gets the list of labels for this data set.\"\"\"\n",
    "        raise NotImplementedError()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SimProcessor(DataProcessor):\n",
    "    def get_train_examples(self, data_dir):\n",
    "        file_path = os.path.join(data_dir, 'train.csv')\n",
    "        train_df = pd.read_csv(file_path, encoding='utf-8')\n",
    "        train_data = []\n",
    "        for index, train in enumerate(train_df.values):\n",
    "            guid = 'train-%d' % index\n",
    "            text_a = tokenization.convert_to_unicode(str(train[0]))\n",
    "            text_b = tokenization.convert_to_unicode(str(train[1]))\n",
    "            label = str(train[2])\n",
    "            train_data.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n",
    "        return train_data\n",
    "\n",
    "    def get_dev_examples(self, data_dir):\n",
    "        file_path = os.path.join(data_dir, 'dev.csv')\n",
    "        dev_df = pd.read_csv(file_path, encoding='utf-8')\n",
    "        dev_data = []\n",
    "        for index, dev in enumerate(dev_df.values):\n",
    "            guid = 'test-%d' % index\n",
    "            text_a = tokenization.convert_to_unicode(str(dev[0]))\n",
    "            text_b = tokenization.convert_to_unicode(str(dev[1]))\n",
    "            label = str(dev[2])\n",
    "            dev_data.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n",
    "        return dev_data\n",
    "\n",
    "    def get_test_examples(self, data_dir):\n",
    "        file_path = os.path.join(data_dir, 'test.csv')\n",
    "        test_df = pd.read_csv(file_path, encoding='utf-8')\n",
    "        test_data = []\n",
    "        for index, test in enumerate(test_df.values):\n",
    "            guid = 'test-%d' % index\n",
    "            text_a = tokenization.convert_to_unicode(str(test[0]))\n",
    "            text_b = tokenization.convert_to_unicode(str(test[1]))\n",
    "            label = str(test[2])\n",
    "            test_data.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n",
    "        return test_data\n",
    "\n",
    "    def get_sentence_examples(self, questions):\n",
    "        for index, data in enumerate(questions):\n",
    "            guid = 'test-%d' % index\n",
    "            text_a = tokenization.convert_to_unicode(str(data[0]))\n",
    "            text_b = tokenization.convert_to_unicode(str(data[1]))\n",
    "            label = str(0)\n",
    "            yield InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)\n",
    "\n",
    "    def get_labels(self):\n",
    "        return ['0', '1']\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BertSim():\n",
    "\n",
    "    def __init__(self, batch_size=cf.batch_size):\n",
    "\n",
    "        self.mode = None\n",
    "        self.max_seq_length = cf.max_seq_length\n",
    "        self.tokenizer = tokenization.FullTokenizer(vocab_file=cf.vocab_file, do_lower_case=True)\n",
    "        self.batch_size = batch_size\n",
    "        self.estimator = None\n",
    "        self.processor = SimProcessor()    # 加载训练、测试数据class\n",
    "        # tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n",
    "        tf.logging.set_verbosity(tf.logging.INFO)\n",
    "\n",
    "    def set_mode(self, mode):\n",
    "        self.mode = mode\n",
    "        self.estimator = self.get_estimator()\n",
    "        if mode == tf.estimator.ModeKeys.PREDICT:\n",
    "            self.input_queue = Queue(maxsize=1)\n",
    "            self.output_queue = Queue(maxsize=1)\n",
    "            self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)#daemon守护进程\n",
    "            self.predict_thread.start()\n",
    "\n",
    "    def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n",
    "                     labels, num_labels, use_one_hot_embeddings):\n",
    "        \"\"\"Creates a classification model.\"\"\"\n",
    "        model = modeling.BertModel(\n",
    "            config=bert_config,\n",
    "            is_training=is_training,\n",
    "            input_ids=input_ids,\n",
    "            input_mask=input_mask,\n",
    "            token_type_ids=segment_ids,\n",
    "            use_one_hot_embeddings=use_one_hot_embeddings)\n",
    "\n",
    "        # In the demo, we are doing a simple classification task on the entire\n",
    "        # segment.\n",
    "        #\n",
    "        # If you want to use the token-level output, use model.get_sequence_output()\n",
    "        # instead.\n",
    "        output_layer = model.get_pooled_output()\n",
    "\n",
    "        hidden_size = output_layer.shape[-1].value\n",
    "\n",
    "        output_weights = tf.get_variable(\n",
    "            \"output_weights\", [num_labels, hidden_size],\n",
    "            initializer=tf.truncated_normal_initializer(stddev=0.02))\n",
    "\n",
    "        output_bias = tf.get_variable(\n",
    "            \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n",
    "\n",
    "        with tf.variable_scope(\"loss\"):\n",
    "            if is_training:\n",
    "                # I.e., 0.1 dropout\n",
    "                output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n",
    "\n",
    "            logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n",
    "            logits = tf.nn.bias_add(logits, output_bias)\n",
    "            probabilities = tf.nn.softmax(logits, axis=-1)\n",
    "            log_probs = tf.nn.log_softmax(logits, axis=-1)\n",
    "\n",
    "            one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n",
    "\n",
    "            per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n",
    "            loss = tf.reduce_mean(per_example_loss)\n",
    "\n",
    "            return (loss, per_example_loss, logits, probabilities)\n",
    "\n",
    "    def model_fn_builder(self, bert_config, num_labels, init_checkpoint, learning_rate,\n",
    "                         num_train_steps, num_warmup_steps,\n",
    "                         use_one_hot_embeddings):\n",
    "        \"\"\"Returns `model_fn` closurimport_tfe for TPUEstimator.\"\"\"\n",
    "\n",
    "        def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument\n",
    "            from tensorflow.python.estimator.model_fn import EstimatorSpec\n",
    "\n",
    "            # tf.compat.v1.logging.info(\"*** Features ***\")\n",
    "            tf.logging.info(\"*** Features ***\")\n",
    "            for name in sorted(features.keys()):\n",
    "                # tf.compat.v1.logging.info(\"  name = %s, shape = %s\" % (name, features[name].shape))\n",
    "                tf.logging.info(\"  name = %s, shape = %s\" % (name, features[name].shape))\n",
    "\n",
    "            input_ids = features[\"input_ids\"]\n",
    "            input_mask = features[\"input_mask\"]\n",
    "            segment_ids = features[\"segment_ids\"]\n",
    "            label_ids = features[\"label_ids\"]\n",
    "\n",
    "            is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n",
    "\n",
    "            (total_loss, per_example_loss, logits, probabilities) = BertSim.create_model(\n",
    "                bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n",
    "                num_labels, use_one_hot_embeddings)\n",
    "\n",
    "            # tvars = tf.compat.v1.trainable_variables()\n",
    "            tvars = tf.trainable_variables()\n",
    "            initialized_variable_names = {}\n",
    "\n",
    "            if init_checkpoint:\n",
    "                (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n",
    "                # tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n",
    "                tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n",
    "\n",
    "            # tf.compat.v1.logging.info(\"**** Trainable Variables ****\")\n",
    "            tf.logging.info(\"**** Trainable Variables ****\")\n",
    "            for var in tvars:\n",
    "                init_string = \"\"\n",
    "                if var.name in initialized_variable_names:\n",
    "                    init_string = \", *INIT_FROM_CKPT*\"\n",
    "                # tf.compat.v1.logging.info(\"  name = %s, shape = %s%s\", var.name, var.shape, init_string)\n",
    "                tf.logging.info(\"  name = %s, shape = %s%s\", var.name, var.shape, init_string)\n",
    "\n",
    "            if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "\n",
    "                train_op = optimization.create_optimizer(\n",
    "                    total_loss, learning_rate, num_train_steps, num_warmup_steps, False)\n",
    "\n",
    "                output_spec = EstimatorSpec(\n",
    "                    mode=mode,\n",
    "                    loss=total_loss,\n",
    "                    train_op=train_op)\n",
    "            elif mode == tf.estimator.ModeKeys.EVAL:\n",
    "\n",
    "                def metric_fn(per_example_loss, label_ids, logits):\n",
    "                    predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n",
    "                    accuracy = tf.metrics.accuracy(label_ids, predictions)\n",
    "                    auc = tf.metrics.auc(label_ids, predictions)\n",
    "                    loss = tf.metrics.mean(per_example_loss)\n",
    "                    return {\n",
    "                        \"eval_accuracy\": accuracy,\n",
    "                        \"eval_auc\": auc,\n",
    "                        \"eval_loss\": loss,\n",
    "                    }\n",
    "\n",
    "                eval_metrics = metric_fn(per_example_loss, label_ids, logits)\n",
    "                output_spec = EstimatorSpec(\n",
    "                    mode=mode,\n",
    "                    loss=total_loss,\n",
    "                    eval_metric_ops=eval_metrics)\n",
    "            else:\n",
    "                output_spec = EstimatorSpec(mode=mode, predictions=probabilities)\n",
    "\n",
    "            return output_spec\n",
    "\n",
    "        return model_fn\n",
    "\n",
    "    def get_estimator(self):\n",
    "\n",
    "        from tensorflow.python.estimator.estimator import Estimator\n",
    "        from tensorflow.python.estimator.run_config import RunConfig\n",
    "\n",
    "        bert_config = modeling.BertConfig.from_json_file(cf.bert_config_file)\n",
    "        label_list = self.processor.get_labels()\n",
    "        train_examples = self.processor.get_train_examples(cf.data_dir)\n",
    "        num_train_steps = int(len(train_examples) / self.batch_size * cf.num_train_epochs)\n",
    "        num_warmup_steps = int(num_train_steps * 0.1)\n",
    "\n",
    "        if self.mode == tf.estimator.ModeKeys.TRAIN:\n",
    "            init_checkpoint = cf.init_checkpoint\n",
    "        else:\n",
    "            init_checkpoint = cf.output_dir   # 预测模式下加载\n",
    "\n",
    "        model_fn = self.model_fn_builder(\n",
    "            bert_config=bert_config,\n",
    "            num_labels=len(label_list),\n",
    "            init_checkpoint=init_checkpoint,\n",
    "            learning_rate=cf.learning_rate,\n",
    "            num_train_steps=num_train_steps,\n",
    "            num_warmup_steps=num_warmup_steps,\n",
    "            use_one_hot_embeddings=False)\n",
    "\n",
    "        # config = tf.compat.v1.ConfigProto()\n",
    "        config = tf.ConfigProto()\n",
    "        config.gpu_options.allow_growth = True\n",
    "        config.gpu_options.per_process_gpu_memory_fraction = cf.gpu_memory_fraction\n",
    "        config.log_device_placement = False\n",
    "\n",
    "        return Estimator(model_fn=model_fn, config=RunConfig(session_config=config), model_dir=cf.output_dir,\n",
    "                         params={'batch_size': self.batch_size})\n",
    "\n",
    "    def predict_from_queue(self):\n",
    "        for i in self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False):\n",
    "            self.output_queue.put(i)\n",
    "\n",
    "    def queue_predict_input_fn(self):\n",
    "        return (tf.data.Dataset.from_generator(\n",
    "            self.generate_from_queue,\n",
    "            output_types={\n",
    "                'input_ids': tf.int32,\n",
    "                'input_mask': tf.int32,\n",
    "                'segment_ids': tf.int32,\n",
    "                'label_ids': tf.int32},\n",
    "            output_shapes={\n",
    "                'input_ids': (None, self.max_seq_length),\n",
    "                'input_mask': (None, self.max_seq_length),\n",
    "                'segment_ids': (None, self.max_seq_length),\n",
    "                'label_ids': (1,)}).prefetch(10))\n",
    "\n",
    "    def convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer):\n",
    "        \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n",
    "\n",
    "        for (ex_index, example) in enumerate(examples):\n",
    "            label_map = {}\n",
    "            for (i, label) in enumerate(label_list):\n",
    "                label_map[label] = i\n",
    "\n",
    "            tokens_a = tokenizer.tokenize(example.text_a)\n",
    "            tokens_b = None\n",
    "            if example.text_b:\n",
    "                tokens_b = tokenizer.tokenize(example.text_b)\n",
    "\n",
    "            if tokens_b:\n",
    "                # Modifies `tokens_a` and `tokens_b` in place so that the total\n",
    "                # length is less than the specified length.\n",
    "                # Account for [CLS], [SEP], [SEP] with \"- 3\"\n",
    "                self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n",
    "            else:\n",
    "                # Account for [CLS] and [SEP] with \"- 2\"\n",
    "                if len(tokens_a) > max_seq_length - 2:\n",
    "                    tokens_a = tokens_a[0:(max_seq_length - 2)]\n",
    "\n",
    "            # The convention in BERT is:\n",
    "            # (a) For sequence pairs:\n",
    "            #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n",
    "            #  type_ids: 0     0  0    0    0     0       0 0     1  1  1  1   1 1\n",
    "            # (b) For single sequences:\n",
    "            #  tokens:   [CLS] the dog is hairy . [SEP]\n",
    "            #  type_ids: 0     0   0   0  0     0 0\n",
    "            #\n",
    "            # Where \"type_ids\" are used to indicate whether this is the first\n",
    "            # sequence or the second sequence. The embedding vectors for `type=0` and\n",
    "            # `type=1` were learned during pre-training and are added to the wordpiece\n",
    "            # embedding vector (and position vector). This is not *strictly* necessary\n",
    "            # since the [SEP] token unambiguously separates the sequences, but it makes\n",
    "            # it easier for the model to learn the concept of sequences.\n",
    "            #\n",
    "            # For classification tasks, the first vector (corresponding to [CLS]) is\n",
    "            # used as as the \"sentence vector\". Note that this only makes sense because\n",
    "            # the entire model is fine-tuned.\n",
    "            tokens = []\n",
    "            segment_ids = []\n",
    "            tokens.append(\"[CLS]\")\n",
    "            segment_ids.append(0)\n",
    "            for token in tokens_a:\n",
    "                tokens.append(token)\n",
    "                segment_ids.append(0)\n",
    "            tokens.append(\"[SEP]\")\n",
    "            segment_ids.append(0)\n",
    "\n",
    "            if tokens_b:\n",
    "                for token in tokens_b:\n",
    "                    tokens.append(token)\n",
    "                    segment_ids.append(1)\n",
    "                tokens.append(\"[SEP]\")\n",
    "                segment_ids.append(1)\n",
    "\n",
    "            input_ids = tokenizer.convert_tokens_to_ids(tokens)\n",
    "\n",
    "            # The mask has 1 for real tokens and 0 for padding tokens. Only real\n",
    "            # tokens are attended to.\n",
    "            input_mask = [1] * len(input_ids)\n",
    "\n",
    "            # Zero-pad up to the sequence length.\n",
    "            while len(input_ids) < max_seq_length:\n",
    "                input_ids.append(0)\n",
    "                input_mask.append(0)\n",
    "                segment_ids.append(0)\n",
    "\n",
    "            assert len(input_ids) == max_seq_length\n",
    "            assert len(input_mask) == max_seq_length\n",
    "            assert len(segment_ids) == max_seq_length\n",
    "\n",
    "            label_id = label_map[example.label]\n",
    "            if ex_index < 5:\n",
    "                # tf.compat.v1.logging.info(\"*** Example ***\")\n",
    "                # tf.compat.v1.logging.info(\"guid: %s\" % (example.guid))\n",
    "                # tf.compat.v1.logging.info(\"tokens: %s\" % \" \".join(\n",
    "                #     [tokenization.printable_text(x) for x in tokens]))\n",
    "                # tf.compat.v1.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n",
    "                # tf.compat.v1.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n",
    "                # tf.compat.v1.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n",
    "                # tf.compat.v1.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n",
    "                tf.logging.info(\"*** Example ***\")\n",
    "                tf.logging.info(\"guid: %s\" % (example.guid))\n",
    "                tf.logging.info(\"tokens: %s\" % \" \".join(\n",
    "                    [tokenization.printable_text(x) for x in tokens]))\n",
    "                tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n",
    "                tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n",
    "                tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n",
    "                tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n",
    "\n",
    "            feature = InputFeatures(\n",
    "                input_ids=input_ids,\n",
    "                input_mask=input_mask,\n",
    "                segment_ids=segment_ids,\n",
    "                label_id=label_id)\n",
    "\n",
    "            yield feature\n",
    "\n",
    "    def generate_from_queue(self):\n",
    "        while True:\n",
    "            predict_examples = self.processor.get_sentence_examples(self.input_queue.get())\n",
    "            features = list(self.convert_examples_to_features(predict_examples, self.processor.get_labels(),\n",
    "                                                              cf.max_seq_length, self.tokenizer))\n",
    "            yield {\n",
    "                'input_ids': [f.input_ids for f in features],\n",
    "                'input_mask': [f.input_mask for f in features],\n",
    "                'segment_ids': [f.segment_ids for f in features],\n",
    "                'label_ids': [f.label_id for f in features]\n",
    "            }\n",
    "\n",
    "    def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\n",
    "        \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n",
    "\n",
    "        # This is a simple heuristic which will always truncate the longer sequence\n",
    "        # one token at a time. This makes more sense than truncating an equal percent\n",
    "        # of tokens from each, since if one sequence is very short then each token\n",
    "        # that's truncated likely contains more information than a longer sequence.\n",
    "        while True:\n",
    "            total_length = len(tokens_a) + len(tokens_b)\n",
    "            if total_length <= max_length:\n",
    "                break\n",
    "            if len(tokens_a) > len(tokens_b):\n",
    "                tokens_a.pop()\n",
    "            else:\n",
    "                tokens_b.pop()\n",
    "\n",
    "    def convert_single_example(self, ex_index, example, label_list, max_seq_length, tokenizer):\n",
    "        \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n",
    "        label_map = {}\n",
    "        for (i, label) in enumerate(label_list):\n",
    "            label_map[label] = i\n",
    "\n",
    "        tokens_a = tokenizer.tokenize(example.text_a)\n",
    "        tokens_b = None\n",
    "        if example.text_b:\n",
    "            tokens_b = tokenizer.tokenize(example.text_b)\n",
    "\n",
    "        if tokens_b:\n",
    "            # Modifies `tokens_a` and `tokens_b` in place so that the total\n",
    "            # length is less than the specified length.\n",
    "            # Account for [CLS], [SEP], [SEP] with \"- 3\"\n",
    "            self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n",
    "        else:\n",
    "            # Account for [CLS] and [SEP] with \"- 2\"\n",
    "            if len(tokens_a) > max_seq_length - 2:\n",
    "                tokens_a = tokens_a[0:(max_seq_length - 2)]\n",
    "\n",
    "        # The convention in BERT is:\n",
    "        # (a) For sequence pairs:\n",
    "        #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n",
    "        #  type_ids: 0     0  0    0    0     0       0 0     1  1  1  1   1 1\n",
    "        # (b) For single sequences:\n",
    "        #  tokens:   [CLS] the dog is hairy . [SEP]\n",
    "        #  type_ids: 0     0   0   0  0     0 0\n",
    "        #\n",
    "        # Where \"type_ids\" are used to indicate whether this is the first\n",
    "        # sequence or the second sequence. The embedding vectors for `type=0` and\n",
    "        # `type=1` were learned during pre-training and are added to the wordpiece\n",
    "        # embedding vector (and position vector). This is not *strictly* necessary\n",
    "        # since the [SEP] token unambiguously separates the sequences, but it makes\n",
    "        # it easier for the model to learn the concept of sequences.\n",
    "        #\n",
    "        # For classification tasks, the first vector (corresponding to [CLS]) is\n",
    "        # used as as the \"sentence vector\". Note that this only makes sense because\n",
    "        # the entire model is fine-tuned.\n",
    "        tokens = []\n",
    "        segment_ids = []\n",
    "        tokens.append(\"[CLS]\")\n",
    "        segment_ids.append(0)\n",
    "        for token in tokens_a:\n",
    "            tokens.append(token)\n",
    "            segment_ids.append(0)\n",
    "        tokens.append(\"[SEP]\")\n",
    "        segment_ids.append(0)\n",
    "\n",
    "        if tokens_b:\n",
    "            for token in tokens_b:\n",
    "                tokens.append(token)\n",
    "                segment_ids.append(1)\n",
    "            tokens.append(\"[SEP]\")\n",
    "            segment_ids.append(1)\n",
    "\n",
    "        input_ids = tokenizer.convert_tokens_to_ids(tokens)\n",
    "\n",
    "        # The mask has 1 for real tokens and 0 for padding tokens. Only real\n",
    "        # tokens are attended to.\n",
    "        input_mask = [1] * len(input_ids)\n",
    "\n",
    "        # Zero-pad up to the sequence length.\n",
    "        while len(input_ids) < max_seq_length:\n",
    "            input_ids.append(0)\n",
    "            input_mask.append(0)\n",
    "            segment_ids.append(0)\n",
    "\n",
    "        assert len(input_ids) == max_seq_length\n",
    "        assert len(input_mask) == max_seq_length\n",
    "        assert len(segment_ids) == max_seq_length\n",
    "\n",
    "        label_id = label_map[example.label]\n",
    "        if ex_index < 5:\n",
    "            # tf.compat.v1.logging.info(\"*** Example ***\")\n",
    "            # tf.compat.v1.logging.info(\"guid: %s\" % (example.guid))\n",
    "            # tf.compat.v1.logging.info(\"tokens: %s\" % \" \".join(\n",
    "            #     [tokenization.printable_text(x) for x in tokens]))\n",
    "            # tf.compat.v1.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n",
    "            # tf.compat.v1.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n",
    "            # tf.compat.v1.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n",
    "            # tf.compat.v1.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n",
    "            tf.logging.info(\"*** Example ***\")\n",
    "            tf.logging.info(\"guid: %s\" % (example.guid))\n",
    "            tf.logging.info(\"tokens: %s\" % \" \".join(\n",
    "                [tokenization.printable_text(x) for x in tokens]))\n",
    "            tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n",
    "            tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n",
    "            tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n",
    "            tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n",
    "\n",
    "        feature = InputFeatures(\n",
    "            input_ids=input_ids,\n",
    "            input_mask=input_mask,\n",
    "            segment_ids=segment_ids,\n",
    "            label_id=label_id)\n",
    "        return feature\n",
    "\n",
    "    def file_based_convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer, output_file):\n",
    "        \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n",
    "\n",
    "        writer = tf.python_io.TFRecordWriter(output_file)\n",
    "\n",
    "        for (ex_index, example) in enumerate(examples):\n",
    "            if ex_index % 10000 == 0:\n",
    "                # tf.compat.v1.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n",
    "                tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n",
    "\n",
    "            feature = self.convert_single_example(ex_index, example, label_list,\n",
    "                                                  max_seq_length, tokenizer)\n",
    "\n",
    "            def create_int_feature(values):\n",
    "                f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n",
    "                return f\n",
    "\n",
    "            features = collections.OrderedDict()\n",
    "            features[\"input_ids\"] = create_int_feature(feature.input_ids)\n",
    "            features[\"input_mask\"] = create_int_feature(feature.input_mask)\n",
    "            features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n",
    "            features[\"label_ids\"] = create_int_feature([feature.label_id])\n",
    "\n",
    "            tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n",
    "            writer.write(tf_example.SerializeToString())\n",
    "\n",
    "    def file_based_input_fn_builder(self, input_file, seq_length, is_training, drop_remainder):\n",
    "        \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n",
    "\n",
    "        name_to_features = {\n",
    "            \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n",
    "            \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n",
    "            \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n",
    "            \"label_ids\": tf.FixedLenFeature([], tf.int64),\n",
    "        }\n",
    "\n",
    "        def _decode_record(record, name_to_features):\n",
    "            \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n",
    "            example = tf.parse_single_example(record, name_to_features)\n",
    "\n",
    "            # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n",
    "            # So cast all int64 to int32.\n",
    "            for name in list(example.keys()):\n",
    "                t = example[name]\n",
    "                if t.dtype == tf.int64:\n",
    "                    t = tf.to_int32(t)\n",
    "                example[name] = t\n",
    "\n",
    "            return example\n",
    "\n",
    "        def input_fn(params):\n",
    "            \"\"\"The actual input function.\"\"\"\n",
    "            batch_size = params[\"batch_size\"]\n",
    "\n",
    "            # For training, we want a lot of parallel reading and shuffling.\n",
    "            # For eval, we want no shuffling and parallel reading doesn't matter.\n",
    "            d = tf.data.TFRecordDataset(input_file)\n",
    "            if is_training:\n",
    "                d = d.repeat()\n",
    "                d = d.shuffle(buffer_size=100)\n",
    "\n",
    "            d = d.apply(\n",
    "                tf.contrib.data.map_and_batch(\n",
    "                    lambda record: _decode_record(record, name_to_features),\n",
    "                    batch_size=batch_size,\n",
    "                    drop_remainder=drop_remainder))\n",
    "\n",
    "            return d\n",
    "\n",
    "        return input_fn\n",
    "\n",
    "    def train(self):\n",
    "        if self.mode is None:\n",
    "            raise ValueError(\"Please set the 'mode' parameter\")\n",
    "\n",
    "        bert_config = modeling.BertConfig.from_json_file(cf.bert_config_file)\n",
    "\n",
    "        if cf.max_seq_length > bert_config.max_position_embeddings:\n",
    "            raise ValueError(\n",
    "                \"Cannot use sequence length %d because the BERT model \"\n",
    "                \"was only trained up to sequence length %d\" %\n",
    "                (cf.max_seq_length, bert_config.max_position_embeddings))\n",
    "\n",
    "        tf.gfile.MakeDirs(cf.output_dir)\n",
    "\n",
    "        label_list = self.processor.get_labels()\n",
    "\n",
    "        train_examples = self.processor.get_train_examples(cf.data_dir)\n",
    "        num_train_steps = int(len(train_examples) / cf.batch_size * cf.num_train_epochs)\n",
    "\n",
    "        estimator = self.get_estimator()\n",
    "\n",
    "        train_file = os.path.join(cf.output_dir, \"train.tf_record\")\n",
    "        self.file_based_convert_examples_to_features(train_examples, label_list, cf.max_seq_length, self.tokenizer,\n",
    "                                                     train_file)\n",
    "        # tf.compat.v1.logging.info(\"***** Running training *****\")\n",
    "        # tf.compat.v1.logging.info(\"  Num examples = %d\", len(train_examples))\n",
    "        # tf.compat.v1.logging.info(\"  Batch size = %d\", cf.batch_size)\n",
    "        # tf.compat.v1.logging.info(\"  Num steps = %d\", num_train_steps)\n",
    "        tf.logging.info(\"***** Running training *****\")\n",
    "        tf.logging.info(\"  Num examples = %d\", len(train_examples))\n",
    "        tf.logging.info(\"  Batch size = %d\", cf.batch_size)\n",
    "        tf.logging.info(\"  Num steps = %d\", num_train_steps)\n",
    "        train_input_fn = self.file_based_input_fn_builder(input_file=train_file, seq_length=cf.max_seq_length,\n",
    "                                                          is_training=True,\n",
    "                                                          drop_remainder=True)\n",
    "\n",
    "        # early_stopping = tf.contrib.estimator.stop_if_no_decrease_hook(\n",
    "        #     estimator,\n",
    "        #     metric_name='loss',\n",
    "        #     max_steps_without_decrease=10,\n",
    "        #     min_steps=num_train_steps)\n",
    "\n",
    "        # estimator.train(input_fn=train_input_fn, hooks=[early_stopping])\n",
    "        estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n",
    "\n",
    "    def eval(self):\n",
    "        if self.mode is None:\n",
    "            raise ValueError(\"Please set the 'mode' parameter\")\n",
    "        eval_examples = self.processor.get_dev_examples(cf.data_dir)\n",
    "        eval_file = os.path.join(cf.output_dir, \"eval.tf_record\")\n",
    "        label_list = self.processor.get_labels()\n",
    "        self.file_based_convert_examples_to_features(\n",
    "            eval_examples, label_list, cf.max_seq_length, self.tokenizer, eval_file)\n",
    "\n",
    "        # tf.compat.v1.logging.info(\"***** Running evaluation *****\")\n",
    "        # tf.compat.v1.logging.info(\"  Num examples = %d\", len(eval_examples))\n",
    "        # tf.compat.v1.logging.info(\"  Batch size = %d\", self.batch_size)\n",
    "        tf.logging.info(\"***** Running evaluation *****\")\n",
    "        tf.logging.info(\"  Num examples = %d\", len(eval_examples))\n",
    "        tf.logging.info(\"  Batch size = %d\", self.batch_size)\n",
    "\n",
    "        eval_input_fn = self.file_based_input_fn_builder(\n",
    "            input_file=eval_file,\n",
    "            seq_length=cf.max_seq_length,\n",
    "            is_training=False,\n",
    "            drop_remainder=False)\n",
    "\n",
    "        estimator = self.get_estimator()\n",
    "        result = estimator.evaluate(input_fn=eval_input_fn, steps=None)\n",
    "\n",
    "        output_eval_file = os.path.join(cf.output_dir, \"eval_results.txt\")\n",
    "        with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n",
    "            # tf.compat.v1.logging.info(\"***** Eval results *****\")\n",
    "            tf.logging.info(\"***** Eval results *****\")\n",
    "            for key in sorted(result.keys()):\n",
    "                # tf.compat.v1.logging.info(\"  %s = %s\", key, str(result[key]))\n",
    "                tf.logging.info(\"  %s = %s\", key, str(result[key]))\n",
    "                writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n",
    "\n",
    "    def predict(self, sentence1, sentence2):\n",
    "        if self.mode is None:\n",
    "            raise ValueError(\"Please set the 'mode' parameter\")\n",
    "        self.input_queue.put([(sentence1, sentence2)])\n",
    "        prediction = self.output_queue.get()\n",
    "        return prediction\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "sim = BertSim()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'stop' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-13-449949e5e7f8>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0ma\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mstop\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m: name 'stop' is not defined"
     ]
    }
   ],
   "source": [
    "a=stop"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ====do_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sim.set_mode(tf.estimator.ModeKeys.TRAIN)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sim.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "sim.set_mode(tf.estimator.ModeKeys.EVAL)\n",
    "sim.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ===do_predict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Using config: {'_model_dir': '../results', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': gpu_options {\n",
      "  per_process_gpu_memory_fraction: 0.8\n",
      "  allow_growth: true\n",
      "}\n",
      ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x00000261CB344908>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:*** Features ***\n",
      "INFO:tensorflow:  name = input_ids, shape = (?, 128)\n",
      "INFO:tensorflow:  name = input_mask, shape = (?, 128)\n",
      "INFO:tensorflow:  name = label_ids, shape = (1,)\n",
      "INFO:tensorflow:  name = segment_ids, shape = (?, 128)\n"
     ]
    }
   ],
   "source": [
    "sim.set_mode(tf.estimator.ModeKeys.PREDICT)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:**** Trainable Variables ****\n",
      "INFO:tensorflow:  name = bert/embeddings/word_embeddings:0, shape = (21128, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/embeddings/token_type_embeddings:0, shape = (2, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/embeddings/position_embeddings:0, shape = (512, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/embeddings/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/embeddings/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_0/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_1/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_2/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_3/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_4/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_5/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_6/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_7/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_8/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_9/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_10/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/encoder/layer_11/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/pooler/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = bert/pooler/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = output_weights:0, shape = (2, 768), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:  name = output_bias:0, shape = (2,), *INIT_FROM_CKPT*\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from ../results\\model.ckpt-24\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:*** Example ***\n",
      "INFO:tensorflow:guid: test-0\n",
      "INFO:tensorflow:tokens: [CLS] 今 天 晚 上 要 不 去 吃 西 餐 吧 ？ [SEP] 当 预 算 只 有 十 万 元 时 ， 买 哪 种 车 最 好 ？ [SEP]\n",
      "INFO:tensorflow:input_ids: 101 791 1921 3241 677 6206 679 1343 1391 6205 7623 1416 8043 102 2496 7564 5050 1372 3300 1282 674 1039 3198 8024 743 1525 4905 6756 3297 1962 8043 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:label: 0 (id = 0)\n",
      "0.4588609\n"
     ]
    }
   ],
   "source": [
    "sentence1 = '今天晚上要不去吃西餐吧？'\n",
    "sentence2 = '当预算只有十万元时，买哪种车最好？'\n",
    "predict = sim.predict(sentence1, sentence2)\n",
    "# 输出值就是浮点值的相似度\n",
    "print(predict[0][1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:*** Example ***\n",
      "INFO:tensorflow:guid: test-0\n",
      "INFO:tensorflow:tokens: [CLS] 十 万 预 算 买 什 么 车 好 ？ [SEP] 当 预 算 只 有 十 万 元 时 ， 买 哪 种 车 最 好 ？ [SEP]\n",
      "INFO:tensorflow:input_ids: 101 1282 674 7564 5050 743 784 720 6756 1962 8043 102 2496 7564 5050 1372 3300 1282 674 1039 3198 8024 743 1525 4905 6756 3297 1962 8043 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:label: 0 (id = 0)\n",
      "0.58520913\n"
     ]
    }
   ],
   "source": [
    "sentence1 = '十万预算买什么车好？'\n",
    "sentence2 = '当预算只有十万元时，买哪种车最好？'\n",
    "predict = sim.predict(sentence1, sentence2)\n",
    "# 输出值就是浮点值的相似度\n",
    "print(predict[0][1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:*** Example ***\n",
      "INFO:tensorflow:guid: test-0\n",
      "INFO:tensorflow:tokens: [CLS] 十 万 预 算 买 什 么 车 好 ？ [SEP] 当 预 算 只 有 十 万 元 时 ， 买 哪 种 车 最 好 ？ [SEP]\n",
      "INFO:tensorflow:input_ids: 101 1282 674 7564 5050 743 784 720 6756 1962 8043 102 2496 7564 5050 1372 3300 1282 674 1039 3198 8024 743 1525 4905 6756 3297 1962 8043 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:label: 0 (id = 0)\n",
      "0.58520913\n"
     ]
    }
   ],
   "source": [
    "sentence1 = '十万预算买什么车好？'\n",
    "sentence2 = '当预算只有十万元时，买哪种车最好？'\n",
    "predict = sim.predict(sentence1, sentence2)\n",
    "print(predict[0][1])\n",
    "# 预测值为 0.9995735\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:*** Example ***\n",
      "INFO:tensorflow:guid: test-0\n",
      "INFO:tensorflow:tokens: [CLS] 今 天 晚 上 要 不 去 吃 西 餐 吧 ？ [SEP] 当 预 算 只 有 十 万 元 时 ， 买 哪 种 车 最 好 ？ [SEP]\n",
      "INFO:tensorflow:input_ids: 101 791 1921 3241 677 6206 679 1343 1391 6205 7623 1416 8043 102 2496 7564 5050 1372 3300 1282 674 1039 3198 8024 743 1525 4905 6756 3297 1962 8043 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "INFO:tensorflow:label: 0 (id = 0)\n",
      "0.4588609\n"
     ]
    }
   ],
   "source": [
    "sentence1 = '今天晚上要不去吃西餐吧？'\n",
    "sentence2 = '当预算只有十万元时，买哪种车最好？'\n",
    "predict = sim.predict(sentence1, sentence2)\n",
    "print(predict[0][1])\n",
    "# 预测值为 0.0011827872"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
