# -*- coding: utf-8 -*-

import tensorflow as tf
import numpy as np
import cv2

# 激活函数
def leaky_relu(x):    #leaky relu激活函数，leaky_relu激活函数一般用在比较深层次神经网络中
    return tf.maximum(0.1*x,x)
    #return tf.nn.leaky_relu(x,alpha=0.1,name='leaky_relu') # 或者tf.maximum(0.1*x,x)

# Conv+BN
def conv2d(x,filters_num,filters_size,pad_size=0,stride=1,batch_normalize=True,activation=leaky_relu,use_bias=False,name='conv2d'):
    # padding，注意: 不用padding="SAME",否则可能会导致坐标计算错误，用自己定义的填充方式
    if pad_size > 0:
        x = tf.pad(x,[[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])  #这里使用tensorflow中的pad进行填充,主要填充第2和第3维度，填充的目的是使得经过卷积运算之后，特征图的大小不会发生变化
  
    out = tf.layers.conv2d(x,filters=filters_num,kernel_size=filters_size,strides=stride, padding='same',activation=None,use_bias=use_bias,name=name)
    # BN应该在卷积层conv和激活函数activation之间,(后面有BN层的conv就不用偏置bias，并激活函数activation在后)
    if batch_normalize:      #!!!卷积层的输出，先经过batch_normalization
        out = tf.layers.batch_normalization(out,axis=-1,momentum=0.9,training=True,name=name+'_bn')
    if activation:   #!!!经过batch_normalization处理之后的网络输出输入到激活函数
        out = activation(out)
    return out

# max_pool
def maxpool(x,size=2,stride=2,name='maxpool'):
    return tf.layers.max_pooling2d(x,pool_size=size,strides=stride)

# reorg layer(带passthrough的重组层)，主要是利用到Fine-Grained Feature（细粒度特征用于检测微小物体）
def reorg(x,stride):
    return tf.space_to_depth(x,block_size=stride)   #返回一个与input具有相同的类型的Tensor
    # return tf.extract_image_patches(x,ksizes=[1,stride,stride,1],strides=[1,stride,stride,1],rates=[1,1,1,1],padding='VALID')

def yolt(image, n_last_channels=5):
    net = conv2d(image, filters_num=32, filters_size=3, stride=1, name='conv0')     #0  416 416 32
    net = maxpool(net, size=2, stride=2, name='maxpool0')                           #1  208 208 32

    net = conv2d(net, 64, 3, stride=1, name='conv1')                                #2  208 208 64
    net = maxpool(net, 2, 2, name='maxpool1')                                       #3  104 104 64

    net = conv2d(net, 128, 3, stride=1, name='conv2')                               #4  104 104 128
    net = conv2d(net, 64, 1, stride=1, name='conv3')                                #5  104 104 64
    net = conv2d(net, 128, 3, stride=1, name='conv4')                               #6  104 104 128
    net = maxpool(net, 2, 2, name='maxpool2')                                       #7  52 52 64

    net = conv2d(net, 256, 3, stride=1, name='conv5')                               #8  52 52 256
    net = conv2d(net, 128, 1, stride=1, name='conv6')                               #9  52 52 128
    net = conv2d(net, 256, 3, stride=1, name='conv7')                               #10 52 52 256
    passthrough = net                                                               #   52 52 256
    net = maxpool(net, 2, 2, name='maxpool3')                                       #11  26 26 256

    net = conv2d(net, 512, 3, stride=1, name='conv8')                               #12  26 26 512
    net = conv2d(net, 256, 1, stride=1, name='conv9')                               #13  26 26 256 
    net = conv2d(net, 512, 3, stride=1, name='conv10')                              #14  26 26 512
    net = conv2d(net, 256, 1, stride=1, name='conv11')                              #15  26 26 256
    net = conv2d(net, 512, 3, stride=1, name='conv12')                              #16  26 26 512
    net = conv2d(net, 1024, 3, stride=1, name='conv13')                             #17  26 26 1024
    net = conv2d(net, 1024, 3, stride=1, name='conv14')                             #18  26 26 1024

    passthrough = reorg(passthrough, 2)     #2-1                                        #19  26 26 1024
    net = tf.concat([passthrough, net], axis=-1)

    net = conv2d(net, 1024, 3, stride=1, name='conv15')                             #20  26 26 1024

    output = conv2d(net, filters_num=n_last_channels, filters_size=1, batch_normalize=False, activation=None, use_bias=True, name='conv_dec')  # 26 26 5
    # output = conv2d(net, filters_num=n_last_channels, filters_size=1)  # 26 26 5

    output = tf.reshape(output, [-1, 26*26*n_last_channels]) # 26x26x5=3380
    # output = tf.sign(output)

    return output

# if __name__ == '__main__':
#     x = tf.random_normal([1, 416, 416, 3])
#     model_output = yolt(x)













































# def conv2d(x, W):
#     return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
 
# def max_pool_2x2(x):
#     return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
#                           strides=[1, 2, 2, 1], padding='SAME')
# def max_pool_1x1(x):
#     return tf.nn.max_pool(x, ksize=[1, 1, 1, 1],
#                           strides=[1, 1, 1, 1], padding='SAME')


# def weight_variable(shape):
#     initial = tf.truncated_normal(shape, stddev=0.1)
#     return tf.Variable(initial)
 
# def bias_variable(shape):
#     initial = tf.constant(0.1, shape=shape)
#     return tf.Variable(initial)

# def yolt():
#     wCL_0 = weight_variable([3,3,1,32])
#     bCL_0 = bias_variable([32])
#     hCL_0 = tf.nn.relu(conv2d(image, wCL_0) + bCL_0)
#     pCL_0 = max_pool_2x2(hCL_0)

#     wCL_1 = weight_variable([3,3,32,64])
#     bCL_1 = bias_variable([64])
#     hCL_1 = tf.nn.relu(conv2d(pCL_0, wCL_1) + bCL_1)
#     pCL_1 = max_pool_2x2(hCL_1)

#     wCL_2 = weight_variable([3,3,64,128])
#     bCL_2 = bias_variable([128])
#     hCL_2 = tf.nn.relu(conv2d(pCL_1, wCL_2) + bCL_2)

#     wCL_3 = weight_variable([1, 1, 128, 64])
#     bCL_2 = bias_variable([64])
#     hCL_3 = tf.nn.relu(conv2d(hCL_2, wCL_3) + bCL_3)

#     wCL_4 = weight_variable([3,3,64,128])
#     bCL_4 = bias_variable([128])
#     hCL_4 = tf.nn.relu(conv2d(hCL_3, wCL_4) + bCL_4)
#     pCL_4 = max_pool_2x2(hCL_4)

#     wCL_5 = weight_variable([3,3,128,256])
#     bCL_5 = bias_variable([256])
#     hCL_5 = tf.nn.relu(conv2d(pCL_4, wCL_5) + bCL_5)

#     wCL_6 = weight_variable([1, 1, 256, 128])
#     bCL_6 = bias_variable([128])
#     hCL_6 = tf.nn.relu(conv2d(hCL_5, wCL_6) + bCL_6)

#     wCL_7 = weight_variable([3,3,128,256])
#     bCL_7 = bias_variable([256])
#     hCL_7 = tf.nn.relu(conv2d(hCL_6, wCL_7) + bCL_7)
#     pCL_7 = max_pool_2x2(hCL_7)

#     wCL_8 = weight_variable([3,3,256,512])
#     bCL_8 = bias_variable([512])
#     hCL_8 = tf.nn.relu(conv2d(pCL_7, wCL_8) + bCL_8)

#     wCL_9 = weight_variable([1, 1, 512, 256])
#     bCL_9 = bias_variable([256])
#     hCL_9 = tf.nn.relu(conv2d(hCL_8, wCL_9) + bCL_9)

#     wCL_10 = weight_variable([3,3,256,512])
#     bCL_10 = bias_variable([512])
#     hCL_10 = tf.nn.relu(conv2d(hCL_9, wCL_10) + bCL_10)
        
#     wCL_11 = weight_variable([1,1,512,256])
#     bCL_11 = bias_variable([256])
#     hCL_11 = tf.nn.relu(conv2d(hCL_10, wCL_11) + bCL_11)

#     wCL_12 = weight_variable([3, 3, 256, 512])
#     bCL_12 = bias_variable([512])
#     hCL_12 = tf.nn.relu(conv2d(hCL_11, wCL_12) + bCL_12)

#     wCL_13 = weight_variable([3,3,512,1024])
#     bCL_13 = bias_variable([1024])
#     hCL_13 = tf.nn.relu(conv2d(hCL_12, wCL_13) + bCL_13)

#     wCL_14 = weight_variable([3,3,1024,1024])
#     bCL_14 = bias_variable([1024])
#     hCL_14 = tf.nn.relu(conv2d(hCL_13, wCL_14) + bCL_14)

#     #passthrogh

#     wCL_16 = weight_variable([3,3,1024,1024])
#     bCL_16 = bias_variable([1024])
#     hCL_16 = tf.nn.relu(conv2d(hCL_15, wCL_16) + bCL_16)


    
