{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [],
   "source": [
    "import keras.layers as KL\n",
    "from keras.models import Model\n",
    "import keras.backend as K\n",
    "import keras\n",
    "import tensorflow as tf\n",
    "from keras.utils import plot_model\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from PIL import Image\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [],
   "source": [
    "def building_block(filters,block):\n",
    "    \n",
    "#     import random\n",
    "#     bn = random.randint(100,300,)\n",
    "    #判断block1和2\n",
    "    if block != 0: #如果不等于0 那么使用 stride=1\n",
    "        stride = 1\n",
    "    else:         #如果等于0 采用stride 2 两倍下采样 也就是 如果是 building1 使用2倍下采样\n",
    "        stride = 2\n",
    "\n",
    "    def f(x):\n",
    "        #主通路结构\n",
    "        y = KL.Conv2D(filters=filters,kernel_size=(1,1),strides=stride)(x)\n",
    "        y = KL.BatchNormalization(axis=3)(y)\n",
    "        y = KL.Activation('relu')(y)\n",
    "\n",
    "        y = KL.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(y) #注意这里没有stride使用padding same就是保证size相同\n",
    "        y = KL.BatchNormalization(axis=3)(y)\n",
    "        y = KL.Activation('relu')(y)\n",
    "\n",
    "        #主通路输出\n",
    "        y = KL.Conv2D(filters=4*filters,kernel_size=(1,1))(y)\n",
    "        y = KL.BatchNormalization(axis=3)(y)\n",
    "\n",
    "        #判断是哪个block 设定不同的 shortcut支路参数\n",
    "        if block == 0 : #如果是0 那么就是block1的通路\n",
    "            shortcut = KL.Conv2D(filters=4*filters,kernel_size=(1,1),strides=stride)(x)\n",
    "            shortcut = KL.BatchNormalization(axis=3)(shortcut)\n",
    "        else:\n",
    "            #如果不等于0 那就是block2  那么就直接接input的tensor\n",
    "            shortcut = x\n",
    "\n",
    "        #主通路和shortcut 相加\n",
    "        y = KL.Add()([y,shortcut]) #y主 shortcut支路 直接通过add层相加\n",
    "        import random\n",
    "        y = KL.Activation('relu',name='last'+str(random.randint(100,300)))(y)\n",
    "        return y\n",
    "    return f"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "#resnet 主输入函数\n",
    "def ResNet_Extractor(inputs):\n",
    "    x = KL.Conv2D(filters=64,kernel_size=(3,3),padding='same')(inputs)\n",
    "    x = KL.BatchNormalization(axis=3)(x)\n",
    "    x = KL.Activation('relu')(x)\n",
    "\n",
    "    #控制调用网络结构feature map 特征图\n",
    "    #每个stage要有不同的 b12的数量 ，还有 第一个Block1 输入维度后边要迭代（stage）\n",
    "    filters = 64\n",
    "    block = [2,2,2]\n",
    "    for i,block_num in enumerate(block):\n",
    "        for block_id in range(block_num):\n",
    "            x = building_block(filters=filters,block=block_id)(x)\n",
    "        filters *= 2 #每个stage double filter个数\n",
    "\n",
    "    return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "#share map 和 anchor提取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "def RpnNet(featuremap, k=9):\n",
    "    #共享\n",
    "    shareMap = KL.Conv2D(filters=256,kernel_size=(3,3),padding='same',name='SSharemap')(featuremap)\n",
    "    shareMap = KL.Activation('linear')(shareMap)\n",
    "    \n",
    "    #计算rpn分类前后景\n",
    "    rpn_classification = KL.Conv2D(filters=2*k,kernel_size=(1,1))(shareMap)\n",
    "    rpn_classification = KL.Lambda(lambda x:tf.reshape(x,[tf.shape(x)[0],-1,2]))(rpn_classification)\n",
    "    rpn_classification = KL.Activation('linear',name='rpn_classification')(rpn_classification)\n",
    "    \n",
    "    rpn_probability = KL.Activation('softmax',name='rpn_probability')(rpn_classification)\n",
    "    \n",
    "    #计算回归修正\n",
    "    \n",
    "    rpn_position = KL.Conv2D(filters=4*k,kernel_size=(1,1))(shareMap)\n",
    "    rpn_position = KL.Activation('linear')(rpn_position)\n",
    "    rpn_BoundingBox =KL.Lambda(lambda x:tf.reshape(x,[tf.shape(x)[0],-1,4]),name='rpn_POS')(rpn_position)\n",
    "    \n",
    "    return rpn_classification,rpn_probability,rpn_BoundingBox\n",
    "     \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "# x = KL.Input((64,64,3)) \n",
    "# featureMap = ResNet_Extractor(x)\n",
    "# rpn_classification,rpn_probability,rpn_BoundingBox = RpnNet(featureMap,k=9)\n",
    "# model = Model(inputs = [x],outputs=[rpn_classification,rpn_probability,rpn_BoundingBox])\n",
    "# model.summary()\n",
    "# plot_model(model=model,to_file='siezemap test.png',show_shapes=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [],
   "source": [
    "def RPNClassLoss(rpn_match,rpn_Cal):\n",
    "    rpn_match = tf.squeeze(rpn_match,axis=-1)\n",
    "    \n",
    "    indices = tf.where(K.not_equal(x=rpn_match,y=0))\n",
    "    #1=1  0 and -1 = 0\n",
    "    anchor_class =  K.cast(K.equal(rpn_match,1),tf.int32) #return Ture = 1 False = 0 1 1010010 \n",
    "    \n",
    "    anchor_class = tf.gather_nd(params=anchor_class ,indices=indices) #这个是我们原始样本结果\n",
    "    \n",
    "    rpn_cal_class = tf.gather_nd(params=rpn_Cal,indices=indices) # 这个我们rpn计算值结果\n",
    "     \n",
    "    loss = K.sparse_categorical_crossentropy(target=anchor_class,output=rpn_cal_class,from_logits=True)\n",
    "                # if                        then                         else     \n",
    "    loss = K.switch(condition=tf.size(loss)>0,then_expression=K.mean(loss),else_expression=tf.constant(0.0))\n",
    "    \n",
    "    return loss\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [],
   "source": [
    "#小工具提取\n",
    "def batch_pack(x,counts,num_rows):\n",
    "    output = []\n",
    "    for i in range(num_rows):\n",
    "        output.append(x[i,:counts[i]])\n",
    "    return tf.concat(output,axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [],
   "source": [
    "#位置loss\n",
    "def RpnBBoxLoss(target_bbox,rpn_match,rpn_bbox):\n",
    "    rpn_match = tf.squeeze(input=rpn_match,axis=-1)\n",
    "    indice =  tf.where(K.equal(x = rpn_match,y=1)) #正样本 位置\n",
    "    \n",
    "    rpn_bbox = tf.gather_nd(params=rpn_bbox,indices=indice) #rpn 预测值\n",
    "    \n",
    "    batch_counts = K.sum(K.cast(K.equal(x = rpn_match,y=1),tf.int32),axis=-1)\n",
    "    target_bbox = batch_pack(x= target_bbox,counts=batch_counts,num_rows=10)\n",
    "    \n",
    "    #loss 计算\n",
    "    \n",
    "    diff = K.abs(target_bbox-rpn_bbox)\n",
    "    less_than_one = K.cast(K.less(x = diff, y=1.0),tf.float32)\n",
    "    loss = less_than_one * 0.5 * diff**2 + (1 - less_than_one)*(diff-0.5)\n",
    "    \n",
    "    loss = K.switch(condition=tf.size(loss)>0,then_expression=K.mean(loss),else_expression=tf.constant(0.0))\n",
    "\n",
    "    return loss\n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [],
   "source": [
    "#确定input\n",
    "input_image = KL.Input(shape=[64,64,3],dtype=tf.float32)\n",
    "input_bbox = KL.Input(shape=[None,4],dtype=tf.float32)\n",
    "input_class_ids = KL.Input(shape = [None],dtype=tf.int32)    # map {'dog':0,'cat':1}\n",
    "input_rpn_match = KL.Input(shape=[None,1],dtype=tf.int32)\n",
    "input_rpn_bbox = KL.Input(shape=[None,4],dtype=tf.float32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "#in out put\n",
    "feature_map = ResNet_Extractor(input_image)\n",
    "rpn_classification,rpn_probability,rpn_BoundingBox = RpnNet(feature_map,k=9)\n",
    "\n",
    "loss_rpn_class = KL.Lambda(lambda x:RPNClassLoss(*x),name='classloss')([input_rpn_match,rpn_classification])\n",
    "loss_rpn_bbox = KL.Lambda(lambda x:RpnBBoxLoss(*x),name='bboxloss')([input_rpn_bbox,input_rpn_match,rpn_BoundingBox])\n",
    "\n",
    "model = Model(inputs=[input_image,input_bbox,input_class_ids,input_rpn_match,input_rpn_bbox],\n",
    "              outputs = [rpn_classification,rpn_probability,rpn_BoundingBox,loss_rpn_class,loss_rpn_bbox] )\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_16 (InputLayer)           (None, 64, 64, 3)    0                                            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_117 (Conv2D)             (None, 64, 64, 64)   1792        input_16[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_111 (BatchN (None, 64, 64, 64)   256         conv2d_117[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_72 (Activation)      (None, 64, 64, 64)   0           batch_normalization_111[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_118 (Conv2D)             (None, 32, 32, 64)   4160        activation_72[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_112 (BatchN (None, 32, 32, 64)   256         conv2d_118[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_73 (Activation)      (None, 32, 32, 64)   0           batch_normalization_112[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_119 (Conv2D)             (None, 32, 32, 64)   36928       activation_73[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_113 (BatchN (None, 32, 32, 64)   256         conv2d_119[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_74 (Activation)      (None, 32, 32, 64)   0           batch_normalization_113[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_120 (Conv2D)             (None, 32, 32, 256)  16640       activation_74[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_121 (Conv2D)             (None, 32, 32, 256)  16640       activation_72[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_114 (BatchN (None, 32, 32, 256)  1024        conv2d_120[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_115 (BatchN (None, 32, 32, 256)  1024        conv2d_121[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "add_31 (Add)                    (None, 32, 32, 256)  0           batch_normalization_114[0][0]    \n",
      "                                                                 batch_normalization_115[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "last155 (Activation)            (None, 32, 32, 256)  0           add_31[0][0]                     \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_122 (Conv2D)             (None, 32, 32, 64)   16448       last155[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_116 (BatchN (None, 32, 32, 64)   256         conv2d_122[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_75 (Activation)      (None, 32, 32, 64)   0           batch_normalization_116[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_123 (Conv2D)             (None, 32, 32, 64)   36928       activation_75[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_117 (BatchN (None, 32, 32, 64)   256         conv2d_123[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_76 (Activation)      (None, 32, 32, 64)   0           batch_normalization_117[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_124 (Conv2D)             (None, 32, 32, 256)  16640       activation_76[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_118 (BatchN (None, 32, 32, 256)  1024        conv2d_124[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "add_32 (Add)                    (None, 32, 32, 256)  0           batch_normalization_118[0][0]    \n",
      "                                                                 last155[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "last250 (Activation)            (None, 32, 32, 256)  0           add_32[0][0]                     \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_125 (Conv2D)             (None, 16, 16, 128)  32896       last250[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_119 (BatchN (None, 16, 16, 128)  512         conv2d_125[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_77 (Activation)      (None, 16, 16, 128)  0           batch_normalization_119[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_126 (Conv2D)             (None, 16, 16, 128)  147584      activation_77[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_120 (BatchN (None, 16, 16, 128)  512         conv2d_126[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_78 (Activation)      (None, 16, 16, 128)  0           batch_normalization_120[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_127 (Conv2D)             (None, 16, 16, 512)  66048       activation_78[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_128 (Conv2D)             (None, 16, 16, 512)  131584      last250[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_121 (BatchN (None, 16, 16, 512)  2048        conv2d_127[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_122 (BatchN (None, 16, 16, 512)  2048        conv2d_128[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "add_33 (Add)                    (None, 16, 16, 512)  0           batch_normalization_121[0][0]    \n",
      "                                                                 batch_normalization_122[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "last174 (Activation)            (None, 16, 16, 512)  0           add_33[0][0]                     \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_129 (Conv2D)             (None, 16, 16, 128)  65664       last174[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_123 (BatchN (None, 16, 16, 128)  512         conv2d_129[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_79 (Activation)      (None, 16, 16, 128)  0           batch_normalization_123[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_130 (Conv2D)             (None, 16, 16, 128)  147584      activation_79[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_124 (BatchN (None, 16, 16, 128)  512         conv2d_130[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_80 (Activation)      (None, 16, 16, 128)  0           batch_normalization_124[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_131 (Conv2D)             (None, 16, 16, 512)  66048       activation_80[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_125 (BatchN (None, 16, 16, 512)  2048        conv2d_131[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "add_34 (Add)                    (None, 16, 16, 512)  0           batch_normalization_125[0][0]    \n",
      "                                                                 last174[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "last112 (Activation)            (None, 16, 16, 512)  0           add_34[0][0]                     \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_132 (Conv2D)             (None, 8, 8, 256)    131328      last112[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_126 (BatchN (None, 8, 8, 256)    1024        conv2d_132[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_81 (Activation)      (None, 8, 8, 256)    0           batch_normalization_126[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_133 (Conv2D)             (None, 8, 8, 256)    590080      activation_81[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_127 (BatchN (None, 8, 8, 256)    1024        conv2d_133[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_82 (Activation)      (None, 8, 8, 256)    0           batch_normalization_127[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_134 (Conv2D)             (None, 8, 8, 1024)   263168      activation_82[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_135 (Conv2D)             (None, 8, 8, 1024)   525312      last112[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_128 (BatchN (None, 8, 8, 1024)   4096        conv2d_134[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_129 (BatchN (None, 8, 8, 1024)   4096        conv2d_135[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "add_35 (Add)                    (None, 8, 8, 1024)   0           batch_normalization_128[0][0]    \n",
      "                                                                 batch_normalization_129[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "last297 (Activation)            (None, 8, 8, 1024)   0           add_35[0][0]                     \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_136 (Conv2D)             (None, 8, 8, 256)    262400      last297[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_130 (BatchN (None, 8, 8, 256)    1024        conv2d_136[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_83 (Activation)      (None, 8, 8, 256)    0           batch_normalization_130[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_137 (Conv2D)             (None, 8, 8, 256)    590080      activation_83[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_131 (BatchN (None, 8, 8, 256)    1024        conv2d_137[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_84 (Activation)      (None, 8, 8, 256)    0           batch_normalization_131[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_138 (Conv2D)             (None, 8, 8, 1024)   263168      activation_84[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_132 (BatchN (None, 8, 8, 1024)   4096        conv2d_138[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "add_36 (Add)                    (None, 8, 8, 1024)   0           batch_normalization_132[0][0]    \n",
      "                                                                 last297[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "last206 (Activation)            (None, 8, 8, 1024)   0           add_36[0][0]                     \n",
      "__________________________________________________________________________________________________\n",
      "SSharemap (Conv2D)              (None, 8, 8, 256)    2359552     last206[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "activation_85 (Activation)      (None, 8, 8, 256)    0           SSharemap[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_139 (Conv2D)             (None, 8, 8, 18)     4626        activation_85[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_140 (Conv2D)             (None, 8, 8, 36)     9252        activation_85[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "lambda_4 (Lambda)               (None, None, 2)      0           conv2d_139[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "activation_86 (Activation)      (None, 8, 8, 36)     0           conv2d_140[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "rpn_classification (Activation) (None, None, 2)      0           lambda_4[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "rpn_POS (Lambda)                (None, None, 4)      0           activation_86[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "input_19 (InputLayer)           (None, None, 1)      0                                            \n",
      "__________________________________________________________________________________________________\n",
      "input_20 (InputLayer)           (None, None, 4)      0                                            \n",
      "__________________________________________________________________________________________________\n",
      "rpn_probability (Activation)    (None, None, 2)      0           rpn_classification[0][0]         \n",
      "__________________________________________________________________________________________________\n",
      "classloss (Lambda)              ()                   0           input_19[0][0]                   \n",
      "                                                                 rpn_classification[0][0]         \n",
      "__________________________________________________________________________________________________\n",
      "bboxloss (Lambda)               ()                   0           input_20[0][0]                   \n",
      "                                                                 input_19[0][0]                   \n",
      "                                                                 rpn_POS[0][0]                    \n",
      "==================================================================================================\n",
      "Total params: 5,831,478\n",
      "Trainable params: 5,817,014\n",
      "Non-trainable params: 14,464\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "#自定义loss 输入\n",
    "loss_layer1 = model.get_layer('classloss').output\n",
    "loss_layer2 = model.get_layer('bboxloss').output\n",
    "\n",
    "model.add_loss(tf.reduce_mean(loss_layer1))\n",
    "model.add_loss(tf.reduce_mean(loss_layer2))\n",
    "\n",
    "model.compile(loss=[None]*len(model.outputs),\n",
    "             optimizer=keras.optimizers.SGD(lr=0.00003))\n",
    "\n",
    "#编译模型\n",
    " \n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Demo 一下 batch-pack"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "def batch_pack(x,counts,num_rows): #matix\n",
    "    output=[]\n",
    "    for i in range(num_rows):\n",
    "        output.append(x[i,:counts[i]]) #i第几行  ， 取该行的范围,count 对应第几行[i] 的预留数量\n",
    "    return tf.concat(output,axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = np.array(\n",
    "        [[1,2,0,0,0],\n",
    "         [3,6,0,0,0],\n",
    "         [4,7,0,0,0],\n",
    "         [5,8,0,0,0],\n",
    "         [2,1,0,0,0],\n",
    "        ]\n",
    ")\n",
    "\n",
    "counts = np.array([2,1,1,1,1])\n",
    "num_rows = 5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "picked = batch_pack(x=x,counts=counts,num_rows=num_rows)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1 2 3 4 5 2]\n"
     ]
    }
   ],
   "source": [
    "with tf.Session() as sess:\n",
    "    print (sess.run(picked))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
