import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.python.slim.learning import train_step
#from fetchData import *
from data_util.streamer import *
from data_util.parser import *
import os
import numpy as np
from tensorflow.python import debug as tf_debug
import tensorflow_hub as hub
import sys
from util.tools import Tooler

data_path = "/home/jh/working_data/anti-spoofing/Oulu_NPU"
export_path = './model_wholeScale_mobileNet_035_128'

module_path = "/home/jh/working_data/models/tensorflow_hub/mobileNet_v2_140_224"

train_log_save_path = "./tflog/train_OULU_p1c1_mobileNet_140_224_three_levels"
test_log_save_path = "./tflog/test_OULU_p1c1_mobileNet_140_224_three_levels"
ckpt = "./tmp_OULU_p1c1_mobileNet_140_224_three_levels/model.ckpt"
ckpt_path = "./tmp_OULU_p1c1_mobileNet_140_224_three_levels"

def build_SSD( module_path , module_trainable ):
    graph = tf.get_default_graph()

    m = hub.Module( module_path , trainable = module_trainable )

    # add global step tensor to default graph
    # to track global step when training
    global_step = tf.train.get_or_create_global_step( )

    # specify the input tensors
    imgs = graph.get_tensor_by_name( "module/hub_input/images:0" )
    label_placeholder = tf.placeholder( tf.int64 , [None] , name = "label" )

    # specify the output tensors and build some extra bricks upon that

    # try to use expanded_conv_6 relu6 output as low-level feature
    # its dimension is : ? * 28 * 28 * 288
    # add another 1 * 1 kernel convolution layer to generate the final feature
    # to do classification
    # before that, u should 

    low_level_feature1 = graph.get_tensor_by_name( \
            "module/MobilenetV2/expanded_conv_6/expand/Relu6:0" )

    low_level_feature2 = graph.get_tensor_by_name( \
            "module/MobilenetV2/expanded_conv_10/output:0" )

    low_level_feature3 = graph.get_tensor_by_name( \
            "module/MobilenetV2/expanded_conv_13/expand/Relu6:0" )

    # add 1x1 convolution layer on this low level feature layer
    llf1_conv = slim.conv2d( low_level_feature1 , 256 , [1,1] )
    llf1_avep = slim.avg_pool2d( llf1_conv , [ 28, 28 ] )
    llf1_avep = tf.squeeze( llf1_avep , [1,2] )

    llf2_conv = slim.conv2d( low_level_feature2 , 512 , [1,1] )
    llf2_avep = slim.avg_pool2d( llf2_conv , [ 14, 14 ] )
    llf2_avep = tf.squeeze( llf2_avep , [1,2] )

    llf3_conv = slim.conv2d( low_level_feature3 , 1024 , [1,1] )
    llf3_avep = slim.avg_pool2d( llf3_conv , [ 14, 14 ] )
    llf3_avep = tf.squeeze( llf3_avep , [1,2] )

    total_features = tf.concat( [llf1_avep, llf2_avep , llf3_avep] , \
            axis = 1 , name = "total_feature" )

    # get the final fc
    net = slim.fully_connected( total_features , 2 , scope = "final_fc" )

    # add extra loss related and return related
    loss = tf.losses.sparse_softmax_cross_entropy( \
            labels = label_placeholder , \
            logits = net , scope = "loss" )

    argmax = tf.argmax( net , 1 , name = "argmax" )
    acc = tf.reduce_mean(tf.cast( tf.equal ( \
            label_placeholder , argmax ) , tf.float32 ) , name = "acc" )

    trainable_list = tf.trainable_variables()
    train_op = tf.train.AdamOptimizer( learning_rate = 0.00001 , name = "adam" ).minimize( \
            loss , var_list = trainable_list , global_step = global_step )

    #summary related

    return imgs, label_placeholder, train_op, \
            global_step, acc, loss

if __name__ == "__main__":
    tf.logging.set_verbosity(tf.logging.INFO)
    os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"

    oulu = Oulu_NPU( data_path )
    #train_data_ops = oulu.trainDataStream( 128 , 'p1' , 'Train'  )
    #test_data_ops  = oulu.testDataStream ( 128 , 'p1' , 'Test'  )

    train_data_ops = oulu.trainDataStream( 128 )
    test_data_ops  = oulu.testDataStream( 128 )

    sess = tf.Session()

    if False:
        graph = tf.get_default_graph()
        Tooler.restore_from_ckpt( ckpt_path , sess )
        imgs = graph.get_tensor_by_name( "module/hub_input/images:0" )
        label_placeholder = graph.get_tensor_by_name( "label:0" )
        loss = graph.get_tensor_by_name( "loss/value:0" )
        argmax = graph.get_tensor_by_name( "argmax:0" )
        acc = graph.get_tensor_by_name( "acc:0" )
        train_op = graph.get_tensor_by_name( "adam:0" )
        global_step = tf.train.get_or_create_global_step()

        loss_summary = tf.summary.scalar( "loss" , loss )
        acc_summary = tf.summary.scalar( "accuracy" , acc )
        summary_merged = tf.summary.merge( [ loss_summary , acc_summary ] , name = "summary" )

    else:
        imgs, label_placeholder, train_op, global_step, acc, loss = \
                build_SSD( module_path, False )

        loss_summary = tf.summary.scalar( "loss" , loss )
        acc_summary = tf.summary.scalar( "accuracy" , acc )
        summary_merged = tf.summary.merge( [ loss_summary , acc_summary ] , name = "summary" )

    train_writer = tf.summary.FileWriter( train_log_save_path, graph = tf.get_default_graph() )
    test_writer  = tf.summary.FileWriter( test_log_save_path )

    saver = tf.train.Saver()
    sess.run( tf.global_variables_initializer() )

    for i in range( 10000 ):
        train_images , train_labels = sess.run( train_data_ops )
        _ , GLOBAL_STEP , ACC , LOSS, SUMMARY = sess.run( \
                [train_op , global_step, acc , loss , summary_merged ] , \
                feed_dict = { imgs: train_images , \
                label_placeholder : train_labels } )

        train_writer.add_summary( SUMMARY , GLOBAL_STEP )
        print( "iter = %d , loss = %f "  %( GLOBAL_STEP , LOSS ) )

        if i% 200 == 0:
            save_path = saver.save( sess, ckpt , global_step = GLOBAL_STEP )
            print("Model saved in path: %s" % save_path)
       
        if i%50 == 0:
            test_images , test_labels = sess.run( test_data_ops )

            GLOBAL_STEP, ACC , LOSS , SUMMARY = sess.run( \
                    [ global_step , acc , loss , summary_merged ] , \
                    feed_dict = { imgs: test_images , \
                    label_placeholder : test_labels } )

            test_writer.add_summary( SUMMARY , GLOBAL_STEP )
            print( "accuracy = %f" % ACC )

    train_writer.close()
    test_writer.close()


    # 
    # export the final model
    #

    builder = tf.saved_model.builder.SavedModelBuilder(export_path)
    tensor_info_input  = tf.saved_model.utils.build_tensor_info( imgs )
    tensor_info_output = tf.saved_model.utils.build_tensor_info( net )

    prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs  = { 'images' : tensor_info_input } , 
                outputs = { 'scores' : tensor_info_output} , 
                method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))

    builder.add_meta_graph_and_variables(
            sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                'predict_images':
                prediction_signature,
                },
            main_op=tf.tables_initializer(),
            strip_default_attrs=True)
    builder.save()
