{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# DSSM keras"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "## 关键问题\n",
    "\n",
    "* 不同于word2vec，word2vec的词典数量不大，进行负采样很容易，dssm网络的query和doc千变万化，不能全部加载进内存进行负采样，因此应该在准备训练数据的时候就准备好负样本\n",
    "* 计算query和doc的cosine相似度，是不是直接使用keras的Dot层即可？\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "class NegativeSamplingModelBuilder:\n",
    "    \"\"\"Negative sampling model builder.\"\"\"\n",
    "\n",
    "    def __init__(self, neg_num=10, params=None):\n",
    "        self.neg_num = neg_num\n",
    "\n",
    "        default_params = self.default_params()\n",
    "        if params:\n",
    "            default_params.update(params)\n",
    "        self.params = default_params\n",
    "\n",
    "        self.model = None\n",
    "        self.observable_model = None\n",
    "\n",
    "    def build_model(self):\n",
    "        raise NotImplementedError()\n",
    "\n",
    "    def build_observable_model(self):\n",
    "        raise NotImplementedError()\n",
    "\n",
    "    def cosine(self, x):\n",
    "        query, doc = x\n",
    "        query_norm = tf.tile(tf.sqrt(tf.reduce_sum(tf.square(query), 1, True)), [self.neg_num + 1, 1])\n",
    "        doc_norm = tf.sqrt(tf.reduce_sum(tf.square(doc), 1, True))\n",
    "\n",
    "        prod = tf.reduce_sum(tf.multiply(tf.tile(query, [self.neg_num + 1, 1]), doc), 1, True)\n",
    "        prod_norm = tf.multiply(query_norm, doc_norm)\n",
    "\n",
    "        cos = tf.truediv(prod, prod_norm)\n",
    "\n",
    "        cos = tf.transpose(tf.reshape(tf.transpose(cos), [self.neg_num + 1, self.params['batch_size']]))\n",
    "        return cos\n",
    "\n",
    "    def default_params(self):\n",
    "        params = {\n",
    "            'vocab_size': 100,\n",
    "            'vec_dim': 256,\n",
    "            'batch_size': 32,\n",
    "            'query_max_len': 10,\n",
    "            'doc_max_len': 100\n",
    "        }\n",
    "        return params\n",
    "\n",
    "\n",
    "class MLPNegativeSamplingModelBuilder(NegativeSamplingModelBuilder):\n",
    "    \"\"\"MLP negative sampling model builder. Multi-hot input instead of embedding.\"\"\"\n",
    "\n",
    "    def build_model(self):\n",
    "        if self.model:\n",
    "            return self.model\n",
    "\n",
    "        query_input = tf.keras.layers.Input(shape=(self.params['query_max_len'],), name='query_input')\n",
    "        query_dense_1 = tf.keras.layers.Dense(1024, name='query_dense_1')(query_input)\n",
    "        query_vec = tf.keras.layers.Dense(self.params['vec_dim'], name='query_vec')(query_dense_1)\n",
    "\n",
    "        doc_input = tf.keras.layers.Input(shape=(self.params['doc_max_len'],), name='doc_input')\n",
    "        doc_dense_1 = tf.keras.layers.Dense(1024, name='doc_dense_1')(doc_input)\n",
    "        doc_vec = tf.keras.layers.Dense(self.params['vec_dim'], name='doc_vec')(doc_dense_1)\n",
    "\n",
    "        cosine = tf.keras.layers.Lambda(self.cosine, name='cosine')([query_vec, doc_vec])\n",
    "\n",
    "        output = tf.keras.layers.Activation('softmax')(cosine)\n",
    "\n",
    "        similarity = tf.keras.layers.Lambda(lambda x: tf.slice(x, [0, 0], [-1, 1]), name='similarity')(output)\n",
    "\n",
    "        model = tf.keras.Model(inputs=[query_input, doc_input], outputs=[similarity])\n",
    "        metrics = ['accuracy', tf.keras.metrics.Recall(), tf.keras.metrics.Precision()]\n",
    "        model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=metrics)\n",
    "        model.summary()\n",
    "\n",
    "        self.model = model\n",
    "        return self.model\n",
    "\n",
    "    def build_observable_model(self):\n",
    "        if self.observable_model:\n",
    "            return self.observable_model\n",
    "        model = self.build_model()\n",
    "        observable_model = tf.keras.Model(\n",
    "            inputs=model.input,\n",
    "            outputs=[model.get_layer('query_vec').output,\n",
    "                     model.get_layer('doc_vec').output,\n",
    "                     model.get_layer('similarity').output\n",
    "                     ])\n",
    "        self.observable_model = observable_model\n",
    "        return self.observable_model\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def build_mlp_model(config):\n",
    "    query_input = tf.keras.layers.Input(shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Logging before flag parsing goes to stderr.\n",
      "W0423 16:32:22.415776 4506338752 training_utils.py:1152] Output lambda_6 missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to lambda_6.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tensor(\"unified_lstm_15/strided_slice_3:0\", shape=(None, 32), dtype=float32)\n",
      "Tensor(\"unified_lstm_15_1/strided_slice_3:0\", shape=(None, 32), dtype=float32)\n",
      "cos: Tensor(\"lambda_6/truediv:0\", shape=(None, 1), dtype=float32)\n",
      "Model: \"model_10\"\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_31 (InputLayer)           [(None, 6)]          0                                            \n",
      "__________________________________________________________________________________________________\n",
      "input_32 (InputLayer)           [(None, 10)]         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "embedding_15 (Embedding)        multiple             12800       input_31[0][0]                   \n",
      "                                                                 input_32[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "unified_lstm_15 (UnifiedLSTM)   (None, 32)           20608       embedding_15[0][0]               \n",
      "                                                                 embedding_15[1][0]               \n",
      "__________________________________________________________________________________________________\n",
      "dense_51 (Dense)                (None, 1024)         33792       unified_lstm_15[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "dense_52 (Dense)                (None, 1024)         33792       unified_lstm_15[1][0]            \n",
      "__________________________________________________________________________________________________\n",
      "dense_53 (Dense)                (None, 256)          262400      dense_51[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "dense_54 (Dense)                (None, 256)          262400      dense_52[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "dot_11 (Dot)                    (None, 1)            0           dense_53[0][0]                   \n",
      "                                                                 dense_54[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "out (Dense)                     (None, 1)            2           dot_11[0][0]                     \n",
      "__________________________________________________________________________________________________\n",
      "lambda_6 (Lambda)               (None, 1)            0           dense_53[0][0]                   \n",
      "                                                                 dense_54[0][0]                   \n",
      "==================================================================================================\n",
      "Total params: 625,794\n",
      "Trainable params: 625,794\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "\n",
    "class Attention(tf.keras.Model):\n",
    "    def __init__(self, units):\n",
    "        super(Attention, self).__init__()\n",
    "        self.W1 = tf.keras.layers.Dense(units)\n",
    "        self.W2 = tf.keras.layers.Dense(units)\n",
    "        self.V = tf.keras.layers.Dense(1)\n",
    " \n",
    "    def call(self, features, hidden):\n",
    "        hidden_with_time_axis = tf.expand_dims(hidden, 1)\n",
    "        score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))\n",
    "        attention_weights = tf.nn.softmax(self.V(score), axis=1)\n",
    "        context_vector = attention_weights * features\n",
    "        context_vector = tf.reduce_sum(context_vector, axis=1)\n",
    " \n",
    "        return context_vector, attention_weights\n",
    "\n",
    "\n",
    "def build_lstm_model():\n",
    "    query_input = tf.keras.layers.Input(shape=(6,))\n",
    "    doc_input = tf.keras.layers.Input(shape=(10,))\n",
    "    \n",
    "    embedding = tf.keras.layers.Embedding(100, 128)\n",
    "    \n",
    "    query_embedding = embedding(query_input)\n",
    "    doc_embedding = embedding(doc_input)\n",
    "    \n",
    "    lstm = tf.keras.layers.LSTM(32)\n",
    "    query_lstm = lstm(query_embedding)\n",
    "    doc_lstm = lstm(doc_embedding)\n",
    "    print(query_lstm)\n",
    "    print(doc_lstm)\n",
    "    \n",
    "    query_dense = tf.keras.layers.Dense(1024)(query_lstm)\n",
    "    doc_dense = tf.keras.layers.Dense(1024)(doc_lstm)\n",
    "    \n",
    "    query_vec = tf.keras.layers.Dense(256)(query_dense)\n",
    "    doc_vec = tf.keras.layers.Dense(256)(doc_dense)\n",
    "    \n",
    "    def cosine(x):\n",
    "        q, d = x\n",
    "        q_norm = tf.sqrt(tf.reduce_sum(tf.square(q), 1, True))\n",
    "        d_norm = tf.sqrt(tf.reduce_sum(tf.square(d), 1, True))\n",
    "        prod = tf.reduce_sum(tf.multiply(q, d), 1, True)\n",
    "        prod_norm = tf.multiply(q_norm, d_norm)\n",
    "        cos = tf.truediv(prod, prod_norm)\n",
    "        print('cos:', cos)\n",
    "        return cos\n",
    "        \n",
    "    cos = tf.keras.layers.Lambda(lambda x: cosine(x))([query_vec, doc_vec])\n",
    "    \n",
    "    dot = tf.keras.layers.Dot(axes=1, normalize=True,)([query_vec, doc_vec])\n",
    "    out = tf.keras.layers.Dense(1, activation='sigmoid', name='out')(dot)\n",
    "    \n",
    "    model = tf.keras.Model(inputs=[query_input, doc_input], outputs=[out, cos])\n",
    "    return model\n",
    "\n",
    "model = build_lstm_model()\n",
    "model.summary()\n",
    "\n",
    "model.compile(loss={'out': 'binary_crossentropy'}, optimizer='sgd')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def cosine_similarity(x):\n",
    "    q, d = x\n",
    "    q_norm = tf.sqrt(tf.reduce_sum(tf.square(q), 1, True))\n",
    "    d_norm = tf.sqrt(tf.reduce_sum(tf.square(d), 1, True))\n",
    "    p = tf.reduce_sum(tf.multiply(q, d), 1, True)\n",
    "    p_norm = tf.multiply(q_norm, d_norm)\n",
    "    cos = tf.truediv(p, p_norm)\n",
    "    return cos"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.0.0-alpha0\n",
      "v: (<tf.Tensor: id=12, shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4], dtype=int32)>, <tf.Tensor: id=13, shape=(4,), dtype=int32, numpy=array([2, 1, 3, 4], dtype=int32)>, <tf.Tensor: id=14, shape=(1,), dtype=int32, numpy=array([1], dtype=int32)>) \n",
      "\n",
      "v: (<tf.Tensor: id=18, shape=(4,), dtype=int32, numpy=array([2, 3, 4, 1], dtype=int32)>, <tf.Tensor: id=19, shape=(4,), dtype=int32, numpy=array([3, 4, 1, 2], dtype=int32)>, <tf.Tensor: id=20, shape=(1,), dtype=int32, numpy=array([0], dtype=int32)>) \n",
      "\n",
      "===============================\n",
      "query embedding shape:  (None, 4, 128)\n",
      "doc embedding shape:  (None, 4, 128)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Logging before flag parsing goes to stderr.\n",
      "W0423 20:00:34.438781 4481783232 training_utils.py:1152] Output output_1 missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to output_1.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "query lstm shape:  (None, 32)\n",
      "doc lstm shape:  (None, 32)\n",
      "4/4 [==============================] - 2s 462ms/step - loss: 0.6923 - output_2_loss: 0.6929\n",
      "cos: \n",
      " [[-0.01785314]\n",
      " [ 0.0249357 ]\n",
      " [ 0.0249357 ]\n",
      " [-0.01785314]\n",
      " [-0.01785314]\n",
      " [ 0.0249357 ]\n",
      " [ 0.0249357 ]\n",
      " [-0.01785314]]\n",
      "prob: \n",
      " [[0.4987174]\n",
      " [0.5017917]\n",
      " [0.5017917]\n",
      " [0.4987174]\n",
      " [0.4987174]\n",
      " [0.5017917]\n",
      " [0.5017917]\n",
      " [0.4987174]]\n"
     ]
    }
   ],
   "source": [
    "print(tf.__version__)\n",
    "\n",
    "\n",
    "class LSTMModel(tf.keras.Model):\n",
    "    \n",
    "    def __init__(self, params=None):\n",
    "        super(LSTMModel, self).__init__(name='lstm_model')\n",
    "        \n",
    "        self.embedding = tf.keras.layers.Embedding(100, 128)\n",
    "        \n",
    "        self.query_dense = tf.keras.layers.Dense(1024)\n",
    "        self.doc_dense = tf.keras.layers.Dense(1024)\n",
    "        \n",
    "        self.query_lstm = tf.keras.layers.LSTM(32)\n",
    "        self.doc_lstm = tf.keras.layers.LSTM(32)\n",
    "        \n",
    "        self.query_dense_2 = tf.keras.layers.Dense(256, name='query_vec')\n",
    "        self.doc_dense_2 = tf.keras.layers.Dense(256, name='doc_vec')\n",
    "        \n",
    "        self.cosine = tf.keras.layers.Lambda(lambda x: cosine_similarity(x), name='similarity')\n",
    "        \n",
    "        self.dot = tf.keras.layers.Dot(axes=1, normalize=True, name='dot')\n",
    "        \n",
    "        self.out = tf.keras.layers.Dense(1, activation='sigmoid', name='out')\n",
    "        \n",
    "    def call(self, inputs, training=True, mask=None):\n",
    "        query, doc = inputs\n",
    "        query_embedding = self.embedding(query)\n",
    "        doc_embedding = self.embedding(doc)\n",
    "        print('query embedding shape: ', query_embedding.shape)\n",
    "        print('doc embedding shape: ', doc_embedding.shape)\n",
    "        \n",
    "        query_lstm = self.query_lstm(query_embedding)\n",
    "        doc_lstm = self.doc_lstm(doc_embedding)\n",
    "        print('query lstm shape: ', query_lstm.shape)\n",
    "        print('doc lstm shape: ', doc_lstm.shape)\n",
    "        \n",
    "        query_vec = self.query_dense_2(query_lstm)\n",
    "        doc_vec = self.doc_dense_2(doc_lstm)\n",
    "        \n",
    "        cos = self.cosine([query_vec, doc_vec])\n",
    "        dot = self.dot([query_vec, doc_vec])\n",
    "        out = self.out(dot)\n",
    "        \n",
    "        return {'cos': cos, 'out': out}\n",
    "        \n",
    "model = LSTMModel()\n",
    "# loss = {'out': 'binary_crossentropy'} 报错找不到 'out'\n",
    "model.compile(loss={'output_2': 'binary_crossentropy'}, optimizer='sgd')\n",
    "\n",
    "\n",
    "q = [\n",
    "    [1, 2, 3, 4],\n",
    "    [2, 3, 4, 1]\n",
    "]\n",
    "d = [\n",
    "    [2, 1, 3, 4],\n",
    "    [3, 4, 1, 2]\n",
    "]\n",
    "l = [\n",
    "    [1],\n",
    "    [0],\n",
    "]\n",
    "qd = tf.data.Dataset.from_tensor_slices(q)\n",
    "dd = tf.data.Dataset.from_tensor_slices(d)\n",
    "ld = tf.data.Dataset.from_tensor_slices(l)\n",
    "\n",
    "d = tf.data.Dataset.zip((qd, dd, ld))\n",
    "for v in iter(d):\n",
    "    print('v:', v, '\\n')\n",
    "print('===============================')\n",
    "d = d.shuffle(100)\n",
    "d = d.map(lambda q, d, l: ((q, d), l))\n",
    "d = d.repeat(4)\n",
    "d = d.batch(2)\n",
    "\n",
    "# for v in iter(d):\n",
    "#     print(v)\n",
    "#     print('---------------------------')\n",
    "    \n",
    "h = model.fit(d)\n",
    "\n",
    "pred_d = d\n",
    "outputs = model.predict(pred_d)\n",
    "cos, prob = outputs[0], outputs[1]\n",
    "print('cos: \\n', cos)\n",
    "print('prob: \\n', prob)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
