import logging
import os
import time
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.python.framework import graph_util

import numpy as np

import cv2



def prelu(inputs):
    alpha = tf.get_variable("alphas", 
        shape = inputs.get_shape()[-1],
        dtype = tf.float32, 
        initializer=tf.constant_initializer(0.25))
    pos = tf.nn.relu(inputs)
    neg = alpha * (inputs - abs(inputs)) * 0.5
    return pos + neg

def trainNet(outputDir,op,loss, gs, num_epochs):

    config = tf.ConfigProto()
    config.allow_soft_placement = True
    # config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
    config.gpu_options.allow_growth = True    


    with tf.Session(config=config) as sess:
        # summary_string_writer = tf.summary.FileWriter(outputDir, sess.graph)

        saver = tf.train.Saver(max_to_keep=2)

        init_op = tf.group(tf.global_variables_initializer(),
                    tf.local_variables_initializer())
                
        sess.run(init_op)               
        print('session Initialized.')      

        try:
            checkpoint_path = tf.train.latest_checkpoint(outputDir)
            saver.restore(sess, checkpoint_path)
            print('restore from [{0}]'.format(checkpoint_path))
            time.sleep(1)
            #########################################
            output_graph_def = graph_util.convert_variables_to_constants(
                sess,
                sess.graph_def,
                ['cls_fc/BiasAdd','bb_fc/BiasAdd','landmark_fc/BiasAdd']
            )
            with tf.gfile.GFile(outputDir + './model.pb','wb') as f:
                f.write(output_graph_def.SerializeToString())
            ########################################    
        except Exception:
            print('no check point found....')   
        

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        try:
            while not coord.should_stop():
                _,_loss_cls, _step = sess.run([op,loss, gs])   
                if _step % 100 == 0:
                    print("_loss:" + str(_loss_cls) + " /----/step:" +str(_step)) 
                if _step % 1000 == 0:
                    saver.save(sess, os.path.join(outputDir, 'model.ckpt'), global_step=_step)
                    print('saving')

                if _step %5000 == 0:
                    #########################################
                    output_graph_def = graph_util.convert_variables_to_constants(
                        sess,
                        sess.graph_def,
                        ['cls_fc/BiasAdd','bb_fc/BiasAdd','landmark_fc/BiasAdd']
                    )
                    with tf.gfile.GFile(outputDir + './model.pb','wb') as f:
                        f.write(output_graph_def.SerializeToString())
                    ########################################    
        except tf.errors.OutOfRangeError:
            print(
                'Done training for %d epochs, %d steps.' %
                (num_epochs, _step))
        finally:
            coord.request_stop()

        coord.join(threads)   

        

def ONet(training = True):
    
    num_epochs = 100
    with tf.name_scope('input'):
        # with tf.device('/cpu:0'):
        filename_queue = tf.train.string_input_producer(
                ["./onet_data.tfrecords"],num_epochs=num_epochs)

        _images, _clss, _bbs, _landmarks = read_and_decode(filename_queue, 48,batch_size=64)



    inputs = _images

    gs = tf.Variable(1, name='gs')

    loss_weight=[1.0, 0.5, 1]
    with slim.arg_scope([slim.conv2d],
                        activation_fn = prelu,
                        weights_initializer=slim.xavier_initializer(),
                        biases_initializer=tf.zeros_initializer(),
                        weights_regularizer=slim.l2_regularizer(0.0005), 
                        padding = 'valid'):
        print(inputs.get_shape())
        # 'input/shuffle_batch:0'
        print(inputs.name)

        #第一个卷积层
        conv_1 = slim.conv2d(inputs,num_outputs=32, kernel_size=[3,3], stride=1, scope='conv_1')
        print("conv_1:" + str(conv_1.get_shape()))
        #接一个maxpooling
        conv_1_mp = slim.max_pool2d(conv_1, kernel_size=[3,3], stride = 2, scope = 'pool1', padding='SAME')
        print("conv_1_mp:" + str(conv_1_mp.get_shape()))
        #第二个卷积层
        conv_2 = slim.conv2d(conv_1_mp, num_outputs=64, kernel_size=[3,3], stride=1, scope='conv2')
        print("conv_2:" + str(conv_2.get_shape()))
        #接一个maxpooling
        conv_2_mp = slim.max_pool2d(conv_2, kernel_size=[3,3], stride = 2, scope = 'pool2')
        print("conv_2_mp:" + str(conv_2_mp.get_shape()))
        #第三个卷积层
        conv_3 = slim.conv2d(conv_2_mp, num_outputs=64, kernel_size=[3,3],stride=1,scope='conv3')
        print("conv_3:" + str(conv_3.get_shape()))
        #接一个maxpooling
        conv_3_mp = slim.max_pool2d(conv_3, kernel_size=[2,2], stride = 2, scope = 'pool3', padding='SAME')
        print("conv_2_mp:" + str(conv_3_mp.get_shape()))
        #第四个卷积层
        conv_4 = slim.conv2d(conv_3_mp, num_outputs=128, kernel_size=[2,2],stride=1,scope='conv4')
        print("conv_3:" + str(conv_4.get_shape()))
        #把矩阵拉直成向量，准备全连接
        fc_flatten = slim.flatten(conv_4)
        #全连接
        fc = slim.fully_connected(fc_flatten, num_outputs=256, scope = 'fc1')
        #cls 
        cls_prob = slim.fully_connected(fc, num_outputs=2, scope = 'cls_fc', activation_fn=None)#, activation_fn=tf.nn.softmax)
        #'cls_fc/BiasAdd:0'
        print(cls_prob.name)
        # clss_softmax = tf.nn.softmax(cls_prob,name= 'cls_output')
        #bb
        bb = slim.fully_connected(fc, num_outputs=4, scope = 'bb_fc', activation_fn=None)
        #'bb_fc/BiasAdd:0'
        print(bb.name)
        #landmark
        landmark = slim.fully_connected(fc, num_outputs=10, scope = 'landmark_fc', activation_fn=None)
        #'landmark_fc/BiasAdd:0'
        print(landmark.name)
        
    # cls loss 交叉熵损失
    softMax_cls =tf.nn.softmax_cross_entropy_with_logits_v2(labels=_clss,
                                                    logits=cls_prob)
    softMax_cls_mean = tf.reduce_mean(softMax_cls)                                   
    softmax_loss_cls = loss_weight[0] *softMax_cls_mean

    # bb loss 欧式距离
    square_error = tf.square(_bbs-bb)
    square_error = tf.reduce_sum(square_error,axis=1)
    softmax_loss_bb = loss_weight[1] * tf.reduce_mean(square_error)

    # landmark loss 欧式距离
    square_error = tf.square(_landmarks-landmark)
    square_error = tf.reduce_sum(square_error,axis=1)
    softmax_loss_land = loss_weight[2] * tf.reduce_mean(square_error)

    loss = softmax_loss_cls + softmax_loss_bb +  softmax_loss_land
    op = tf.train.AdamOptimizer(learning_rate=0.0001) \
                    .minimize(loss,global_step=gs)

    trainNet('./output_O/',op,loss, gs, num_epochs)

def RNet(training = True):
    
    num_epochs = 300
    with tf.name_scope('input'):
        # with tf.device('/cpu:0'):
        filename_queue = tf.train.string_input_producer(
                ["./rnet_data.tfrecords"],num_epochs=num_epochs)

        _images, _clss, _bbs, _landmarks = read_and_decode(filename_queue, 24,batch_size=128)



    inputs = _images

    gs = tf.Variable(1, name='gs')

    loss_weight=[1.0, 0.5, 0.5]
    with slim.arg_scope([slim.conv2d],
                        activation_fn = prelu,
                        weights_initializer=slim.xavier_initializer(),
                        biases_initializer=tf.zeros_initializer(),
                        weights_regularizer=slim.l2_regularizer(0.0005), 
                        padding = 'valid'):
        print(inputs.get_shape())

        #第一个卷积层
        conv_1 = slim.conv2d(inputs,num_outputs=28, kernel_size=[3,3], stride=1, scope='conv_1')
        print("conv_1:" + str(conv_1.get_shape()))
        #接一个maxpooling
        conv_1_mp = slim.max_pool2d(conv_1, kernel_size=[3,3], stride = 2, scope = 'pool1', padding='SAME')
        print("conv_1_mp:" + str(conv_1_mp.get_shape()))
        #第二个卷积层
        conv_2 = slim.conv2d(conv_1_mp, num_outputs=48, kernel_size=[3,3], stride=1, scope='conv2')
        print("conv_2:" + str(conv_2.get_shape()))
        #接一个maxpooling
        conv_2_mp = slim.max_pool2d(conv_2, kernel_size=[3,3], stride = 2, scope = 'pool2')
        print("conv_2_mp:" + str(conv_2_mp.get_shape()))
        #第三个卷积层
        conv_3 = slim.conv2d(conv_2_mp, num_outputs=64, kernel_size=[2,2],stride=1,scope='conv3')
        print("conv_3:" + str(conv_3.get_shape()))
        # #把矩阵拉直成向量，准备全连接
        fc_flatten = slim.flatten(conv_3)
        print("fc_flatten:" + str(fc_flatten.get_shape()))
        #全连接
        
        fc = tf.contrib.layers.fully_connected(fc_flatten, num_outputs=128, scope = 'fc1')
        # fc = slim.fully_connected(fc_flatten, num_outputs=128, scope = 'fc1')
        #cls 
        cls_prob = slim.fully_connected(fc, num_outputs=2, scope = 'cls_fc', activation_fn=None)#, activation_fn=tf.nn.softmax)
        print("cls_prob:" + str(cls_prob.get_shape()))
        print("cls_prob:" + str(cls_prob.name))
        # clss_softmax = tf.nn.softmax(cls_prob,name= 'cls_output')
        #bb
        bb = slim.fully_connected(fc, num_outputs=4, scope = 'bb_fc', activation_fn=None)
        print("bb:" + str(bb.get_shape()))
        #landmark
        landmark = slim.fully_connected(fc, num_outputs=10, scope = 'landmark_fc', activation_fn=None)
        print("landmark:" + str(landmark.get_shape()))
        
    # cls loss
    softMax_cls =tf.nn.softmax_cross_entropy_with_logits_v2(labels=_clss,
                                                    logits=cls_prob)
    softMax_cls_mean = tf.reduce_mean(softMax_cls)                                   
    softmax_loss_cls = loss_weight[0] *softMax_cls_mean

    # bb loss
    square_error = tf.square(_bbs-bb)
    square_error = tf.reduce_sum(square_error,axis=1)
    softmax_loss_bb = loss_weight[1] * tf.reduce_mean(square_error)

    # landmark loss
    square_error = tf.square(_landmarks-landmark)
    square_error = tf.reduce_sum(square_error,axis=1)
    softmax_loss_land = loss_weight[2] * tf.reduce_mean(square_error)

    loss = softmax_loss_cls + softmax_loss_bb +  softmax_loss_land
    op = tf.train.AdamOptimizer(learning_rate=0.0001) \
                    .minimize(loss,global_step=gs)

    trainNet('./output_R/',op,loss, gs, num_epochs)


def PNet(training = True):#label = None, bb_target = None, landmark_target = None, 



    num_epochs = 300
    with tf.name_scope('input'):
        # with tf.device('/cpu:0'):
        filename_queue = tf.train.string_input_producer(
                ["./pnet_data.tfrecords"],num_epochs=num_epochs)

        _images, _clss, _bbs, _landmarks = read_and_decode(filename_queue, 12, batch_size= 128)

    inputs = _images


    gs = tf.Variable(1, name='gs')

    loss_weight=[1.0, 0.5, 0.5]
    with slim.arg_scope([slim.conv2d],
                        activation_fn = prelu,
                        weights_initializer=slim.xavier_initializer(),
                        biases_initializer=tf.zeros_initializer(),
                        weights_regularizer=slim.l2_regularizer(0.0005), 
                        padding = 'valid'):
    
    # with tf.name_scope('pnet'):
        print(inputs.get_shape())

        #第一个卷积层
        conv_1 = slim.conv2d(inputs, 10, 3, stride=1, scope='conv_1')
        print("conv_1:" + str(conv_1.get_shape()))
        #接一个maxpooling
        conv_1_mp = slim.max_pool2d(conv_1, kernel_size=[2,2], stride = 2, scope = 'pool1', padding='SAME')
        print("conv_1_mp:" + str(conv_1_mp.get_shape()))
        #第二个卷积层
        conv_2 = slim.conv2d(conv_1_mp, num_outputs=16, kernel_size=[3,3], stride=1, scope='conv2')
        print("conv_2:" + str(conv_2.get_shape()))
        #第三个卷积层
        conv_3 = slim.conv2d(conv_2, num_outputs=32, kernel_size=[3,3],stride=1,scope='conv3')
        print("conv_3:" + str(conv_3.get_shape()))
        #face_classisification 的输出， output为2
        clss = slim.conv2d(conv_3,num_outputs=2, kernel_size=[1,1],stride=1,scope='cls_fc',activation_fn=None)#, )tf.nn.softmax
        print(clss.name)
        # print("cls:" + str(clss.get_shape()))
        # print(clss.name)
        # clss_softmax = tf.nn.softmax(clss,name = "cls_softmax")
        # print(clss_softmax.name)
        # print(clss_softmax.shape)
        
        # cls_fc/Reshape_1
        # 把PRO网络的输出节点名称统一
        
        clss = tf.reshape(clss,[-1,2])
        # clss_softmax = tf.nn.softmax(clss,name = "cls_softmax")
        
        #bb 的输出
        bb = slim.conv2d(conv_3, num_outputs=4, kernel_size=[1,1], stride=1, scope='bb_fc', activation_fn=None)
        print(bb.name)
        bb = tf.reshape(bb, [-1,4])
        print(bb.name)
        #landmark的输出
        landmark = slim.conv2d(conv_3, num_outputs=10, kernel_size=[1,1], stride=1, scope='conv4_3', activation_fn=None)
        print("landmark:" + str(landmark.get_shape()))
        landmark = tf.reshape(landmark,[-1,10],name='landmark_fc/BiasAdd')
        print(landmark.name)

    # cls loss
    
    # softMax_cls_mean = -tf.reduce_mean(_clss * tf.log(tf.clip_by_value(clss, 1e-10, 1.0)))     
    softMax_cls =tf.nn.softmax_cross_entropy_with_logits_v2(labels=_clss,
                                                    logits=clss)                                            
    softMax_cls_mean = tf.reduce_mean(softMax_cls)                                   
    softmax_loss_cls = loss_weight[0] *softMax_cls_mean

    # bb loss
    square_error = tf.square(_bbs-bb)
    square_error = tf.reduce_sum(square_error,axis=1)
    softmax_loss_bb = loss_weight[1] * tf.reduce_mean(square_error)

    # landmark loss
    square_error = tf.square(_landmarks-landmark)
    square_error = tf.reduce_sum(square_error,axis=1)
    softmax_loss_land = loss_weight[2] * tf.reduce_mean(square_error)
    
               

    loss = softmax_loss_cls + softmax_loss_bb # +  softmax_loss_land
    op = tf.train.AdamOptimizer(learning_rate=0.0001) \
                    .minimize(loss,global_step=gs)


    trainNet('./output_P/',op,loss, gs, num_epochs)



def read_and_decode(filename_queue, shape, batch_size = 128):

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'image_raw': tf.FixedLenFeature([], tf.string),
            'cls_raw': tf.FixedLenFeature([], tf.string),
            'landmark_raw': tf.FixedLenFeature([], tf.string),
            'bb_raw': tf.FixedLenFeature([], tf.string),
        })
    print("features len:" + str(len(features)))    
    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image = tf.cast(image, tf.float32)

    # 变换到[-0.5,0.5]
    image = (image - 127.5) * (1. / 128.0)
    image.set_shape([shape * shape * 3])
    image = tf.reshape(image, [shape, shape, 3])

    ret_cls = tf.decode_raw(features['cls_raw'], tf.float32)
    ret_cls.set_shape([2])
    ret_bb = tf.decode_raw(features['bb_raw'], tf.float32)
    ret_bb.set_shape([4])
    ret_landmark = tf.decode_raw(features['landmark_raw'], tf.float32)
    ret_landmark.set_shape([10])

    images, clss, bbs, landmarks = tf.train.shuffle_batch(
            [image, ret_cls,ret_bb,ret_landmark], batch_size=batch_size,
            capacity=1000+ 3 * batch_size,
            min_after_dequeue=1000)


    return images, clss, bbs, landmarks



if __name__ == '__main__':

    # g = tf.Graph()
    # with g.as_default():


    PNet(training=True)          
    # RNet(training=True)
    # ONet(training=True)