# coding=utf-8
import math
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
import a11_net_params as netparam
import a12_custom_layers as custom_layers # offer some basic functions

"""
Implementation of the SSD VGG-based 300 network.

The default features layers with 300x300 image input are:
1  conv4  ==> 38 x 38
2  conv7  ==> 19 x 19
3  conv8  ==> 10 x 10
4  conv9  ==> 5 x 5
5  conv10 ==> 3 x 3
6  conv11 ==> 1 x 1
The default image size used to train this network is 300x300.
输入是一幅300×300的RGB图像，输出为各特征金字塔各层计算得到的prior box数据，每个
prior box包含了类别数据label，和位置偏移。
"""
def ssd300vgg16net(inputs,
                   is_training = True,
                   dropout_keep_prob = 0.5,
                   prediction_fn = slim.softmax,
                   reuse=None,
                   scope='ssd_300_vgg'):
    # End_points collect relevant activations for external use.
    end_points = {}
    with tf.variable_scope(scope, 'ssd_300_vgg', [inputs], reuse=reuse):
        # Original VGG-16 blocks.
        net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
        end_points['block1'] = net
        net = slim.max_pool2d(net, [2, 2], scope='pool1')
        
        # Block 2.
        net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
        end_points['block2'] = net
        net = slim.max_pool2d(net, [2, 2], scope='pool2')
        
        # Block 3.
        net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
        end_points['block3'] = net
        # error will occur at 'block11' because the size of feature maps 
        # will miss-match with the paper. so add the folowing line. guoyf
        net = custom_layers.pad2d(net, (1, 1)) 
        net = slim.max_pool2d(net, [2, 2], scope='pool3')
        
        # Block 4.
        net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
        end_points['block4'] = net
        net = slim.max_pool2d(net, [2, 2], scope='pool4')
        
        # Block 5.
        net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
        end_points['block5'] = net
        # error will occur at 'block11' because the size of feature maps 
        # will miss-match with the paper. so add the folowing line. guoyf
        net = custom_layers.pad2d(net, (1, 1)) 
        net = slim.max_pool2d(net, [3, 3], stride=1, scope='pool5')
        
        # Additional SSD blocks.
        # Block 6: input feature maps size 19*19
        net = slim.conv2d(net, 1024, [3, 3], rate=6, scope='conv6')
        end_points['block6'] = net
        net = tf.layers.dropout(net, rate=dropout_keep_prob, training=is_training)
        
        # Block 7: input feature maps size 19*19
        net = slim.conv2d(net, 1024, [1, 1], scope='conv7')
        end_points['block7'] = net
        net = tf.layers.dropout(net, rate=dropout_keep_prob, training=is_training)
        
        # Block 8/9/10/11: 1x1 and 3x3 convolutions stride 2 (except lasts).
        end_point = 'block8'
        with tf.variable_scope(end_point):
            net = slim.conv2d(net, 256, [1, 1], scope='conv1x1')
            net = custom_layers.pad2d(net, pad=(1, 1))
            net = slim.conv2d(net, 512, [3, 3], stride=2, scope='conv3x3', padding='VALID')
        end_points[end_point] = net
        
        end_point = 'block9'
        with tf.variable_scope(end_point):
            net = slim.conv2d(net, 128, [1, 1], scope='conv1x1')
            net = custom_layers.pad2d(net, pad=(1, 1))
            net = slim.conv2d(net, 256, [3, 3], stride=2, scope='conv3x3', padding='VALID')
        end_points[end_point] = net
        
        end_point = 'block10'
        with tf.variable_scope(end_point):
            net = slim.conv2d(net, 128, [1, 1], scope='conv1x1')
            net = slim.conv2d(net, 256, [3, 3], scope='conv3x3', padding='VALID')
        end_points[end_point] = net
        
        end_point = 'block11'
        with tf.variable_scope(end_point):
            net = slim.conv2d(net, 128, [1, 1], scope='conv1x1')
            net = slim.conv2d(net, 256, [3, 3], scope='conv3x3', padding='VALID')
        end_points[end_point] = net

        # Prediction and localisations layers.
        predictions = []
        logits = []
        localisations = []
        for i, layer in enumerate(netparam.default_params.feat_layers):
            with tf.variable_scope(layer + '_box'):
                cls_pred, loc_pred=ssd_multibox_layer(
								   end_points[layer],
								   netparam.default_params.num_classes,
								   netparam.default_params.anchor_sizes[i],
								   netparam.default_params.anchor_ratios[i],
								   netparam.default_params.normalizations[i],
								   False)
            # 对每一个prior box计算softmax,即属于某一类胡概率
            activeRst = prediction_fn(cls_pred)
            predictions.append(activeRst)
            logits.append(cls_pred)
            localisations.append(loc_pred)
        return predictions, localisations, logits, end_points

"""
Construct a multibox layer, return a class and localization predictions.
"""
def ssd_multibox_layer(
    inputs,                 # feature maps. 4-D tensor
	num_classes,            # classes number
	sizes,                  # min_size and max_size of prior boxes
	ratios=[1],             # ratio of the prior boxes
	normalization=-1,       # L2Normalization
	bn_normalization=False):
    
    net = inputs
    if normalization > 0:
        net = custom_layers.l2_normalization(net, scaling=True)
    
    # Number of anchors. Number of prior boxes of each position of the feature map
    num_anchors = len(sizes) + len(ratios)
    
    # Location.
    num_loc_pred = num_anchors * 4
    loc_pred = slim.conv2d(net, num_loc_pred, [3, 3], activation_fn=None, scope='conv_loc')
    loc_pred = custom_layers.channel_to_last(loc_pred)
    # In  [batch, height, width, num_loc_pred]
    # out [batch, height, width, num_anchors, 4]
    loc_pred = tf.reshape(loc_pred, tensor_shape(loc_pred, 4)[:-1]+[num_anchors, 4])
    
    # Class prediction.
    num_cls_pred = num_anchors * num_classes
    cls_pred = slim.conv2d(net, num_cls_pred, [3, 3], activation_fn=None, scope='conv_cls')
    cls_pred = custom_layers.channel_to_last(cls_pred)
    # In  [batch, height, width, num_cls_pred]
    # out [batch, height, width, num_anchors, num_classes]
    cls_pred = tf.reshape(cls_pred, tensor_shape(cls_pred, 4)[:-1]+[num_anchors, num_classes])
    
    return cls_pred, loc_pred

"""
Returns the dimensions of a tensor.
Args:
    image: A N-D Tensor of shape.
Returns:
    A list of dimensions. Dimensions that are statically known are python
    integers,otherwise they are integer scalar tensors.
"""
def tensor_shape(x, rank=3):
    if x.get_shape().is_fully_defined():
        return x.get_shape().as_list()
    else:
        static_shape = x.get_shape().with_rank(rank).as_list()
        dynamic_shape = tf.unstack(tf.shape(x), rank)
        return [s if s is not None else d
                for s, d in zip(static_shape, dynamic_shape)]

if __name__ == '__main__':
	ssd300vgg16net(tf.constant(np.zeros([1, 300, 300, 3], dtype = float), 'float32'))
	print 'OK'

