import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.python.slim.learning import train_step
from data_util.streamer import Oulu_NPU, OneLabelData, HomeMadeData, REPLAY_ATTACK
import os
import numpy as np
from tensorflow.python import debug as tf_debug
import tensorflow_hub as hub
import sys
import cv2
from skimage.feature import local_binary_pattern
sys.path.append( '/home/jh/working_pros/models-master/research/slim' )
from nets.nets_factory import get_network_fn

pOULU = "/home/jh/working_data/anti-spoofing/Oulu_NPU"
pRP = "/home/jh/working_data/anti-spoofing/replay_attack/original_REPLAY_ATTACK/replayattack"
pMSU = "/home/jh/working_data/anti-spoofing/MSU_USSA_Public_bk"
pOFFICE = "/home/jh/working_data/anti-spoofing/inTime99_madeData"

OULU = Oulu_NPU( pOULU )
RP = REPLAY_ATTACK( pRP )
MSU_LIVE = OneLabelData( pMSU + "/LiveSubjectsImages" , label = 1 )
MSU_S1 = OneLabelData( pMSU + "/SpoofSubjectImages/MacBook_RearCamera" , label = 0)
MSU_S2 = OneLabelData( pMSU + "/SpoofSubjectImages/Tablet_RearCamera" , label = 0)
MSU_S3 = OneLabelData( pMSU + "/SpoofSubjectImages/Nexus_RearCamera" , label = 0)
MSU_S4 = OneLabelData( pMSU + "/SpoofSubjectImages/MacBook_FrontCamera" , label = 0)
MSU_S5 = OneLabelData( pMSU + "/SpoofSubjectImages/Tablet_FrontCamera" , label = 0)
MSU_S6 = OneLabelData( pMSU + "/SpoofSubjectImages/Nexus_FrontCamera" , label = 0)

OFF_TRAIN = HomeMadeData( pOFFICE + "/train" )
OFF_TEST = HomeMadeData( pOFFICE + "/test" )

#data_op_list = [ OULU.trainDisplayDataStream( 16 ) , 
#        RP.trainDisplayDataStream(16), 
#        MSU_LIVE.testDataStream(16),
#        MSU_S1.testDataStream(16) ,
#        MSU_S2.testDataStream(16) ,
#        MSU_S3.testDataStream(16) ,
#        MSU_S4.testDataStream(16) ,
#        MSU_S5.testDataStream(16) ,
#        MSU_S6.testDataStream(16) ,
#        ]
data_op_list = [ OFF_TRAIN.allDataStream( 128 ) ,]

test_writer_dict = {
        'in_oFFICE_test': OFF_TEST.allDataStream( 128 )
        #'in_oulu_test': OULU.devDisplayDataStream(128),
        #'in_rp_test': RP.devDisplayDataStream(128),
        #'cross_msu_live_test': MSU_LIVE.testDataStream(128),
        #'cross_msu_spoof1': MSU_S1.testDataStream(128) , 
        #'cross_msu_spoof2': MSU_S2.testDataStream(128), 
        #'cross_msu_spoof3': MSU_S3.testDataStream(128)
        }

ckpt_path = 'model_conv_1/'

def deal_with_ckpt( sess , ckpt_path ):
    ckpt = tf.train.latest_checkpoint( ckpt_path )
    if ckpt == None:
        return
    else:
        saver = tf.train.Saver( )
        saver.restore( sess , ckpt )
        return

if __name__ == "__main__":
    tf.logging.set_verbosity(tf.logging.INFO)
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    sess = tf.Session()

    #VGG = get_network_fn( 'mobilenet_v2_140' , 2 )

    #imgs = tf.placeholder( tf.float32 , [None , 224, 224, 3 ] , name = "images" )
    #imgs = tf.placeholder( tf.float32 , [None , 256 ] , name = "images" )
    label_placeholder = tf.placeholder( tf.int64 , [None] , name = "label" )

    #features, end_points = VGG( imgs )

    #ssaver = tf.train.Saver()
    #ssaver.restore( sess, "./vgg16_model/vgg_16.ckpt")

    #ckpt_path = tf.train.latest_checkpoint( './mobilenetV2_model' )
    #ssaver.restore( sess, './mobilenetV2_model/mobilenet_v2_1.4_224.ckpt' ) 

    #features = tf.reduce_mean( end_points['vgg_16/conv1/conv1_1'] , [1, 2], \
    #        keep_dims=False , name='global_pool')

    #features = tf.squeeze( features , [1,2] )


    mobileNetV2 = get_network_fn( 'mobilenet_v2_140' , None , weight_decay = 0.0 )
    imgs = tf.placeholder( tf.float32 , [None , 224, 224, 3 ] , name = "images" )

    _, _ = mobileNetV2( imgs )

    graph = tf.get_default_graph()
    features = graph.get_tensor_by_name( 'MobilenetV2/expanded_conv_1/output:0' )
    features = tf.reduce_mean( features , [1,2] , keep_dims = False , name = 'global_pooling' )
    #features = graph.get_tensor_by_name( 'MobilenetV2/Logits/AvgPool:0' )

    #features = tf.squeeze( features , [1,2] )



    #m = hub.Module( "/home/jh/working_data/models/tensorflow_hub/mobileNet_v2_140_224" , \
    #        trainable = True )

    #graph = tf.get_default_graph()
    #imgs = graph.get_tensor_by_name( 'module/hub_input/images:0' )

    #features = m( imgs )
    #features = graph.get_tensor_by_name( 'module/MobilenetV2/expanded_conv/depthwise_output:0' )
    #features = tf.reduce_mean( features , [1,2] , keep_dims = False , name = 'global_pooling' )



    #net = slim.fully_connected( imgs , 2 , scope = "final_fc" )
    with slim.arg_scope( [slim.fully_connected, ] , weights_regularizer = slim.l2_regularizer( 0.0 ) ):
        net = slim.fully_connected( features , 2 , scope = "final_fc" )

    # weighted loss
    loss_condition = tf.equal( label_placeholder , 1 )
    #a = tf.constant( value = 0.8 , shape = tf.shape(label_placeholder) )
    #b = tf.constant( value = 0.2 , shape = tf.shape(label_placeholder) )

    a = tf.fill( tf.shape( label_placeholder ) , 5. )
    b = tf.fill( tf.shape( label_placeholder ) , 1. )

    weights = tf.where( loss_condition , a, b )

    loss = tf.losses.sparse_softmax_cross_entropy( \
            labels = label_placeholder , \
            logits = net , scope = "loss" , weights = weights ) 
    #regularization_loss = tf.add_n( slim.losses.get_regularization_losses() )
    regularization_loss = 0
    total_loss = loss + regularization_loss

    total_loss_summary = tf.summary.scalar( "total_loss" , total_loss )
    regularization_loss_summary = tf.summary.scalar( "regularization_loss" , regularization_loss )
    data_loss_summary = tf.summary.scalar( "data_loss" , loss )

    argmax = tf.argmax( net , 1 , name = "argmax" )
    acc = tf.reduce_mean(\
            tf.cast( tf.equal ( label_placeholder , argmax ) , tf.float32 ) , name = "acc" )
    acc_summary = tf.summary.scalar( "accuracy" , acc )

    # compute HTER accoding to argmax, and label_placeholder
    TP_condition = tf.math.logical_and( tf.equal( label_placeholder , argmax ) , tf.equal( argmax , 1 ) )
    FP_condition = tf.math.logical_and( tf.math.logical_not( tf.equal( label_placeholder , argmax ) ) , tf.equal( argmax , 1 ) )
    TN_condition = tf.math.logical_and( tf.equal( label_placeholder , argmax ) , tf.equal( argmax , 0 ) )
    FN_condition = tf.math.logical_and( tf.math.logical_not( tf.equal( label_placeholder , argmax ) ) , tf.equal( argmax , 0 ) )

    TP = tf.reduce_sum( tf.cast( TP_condition  , tf.float32 ) )
    FP = tf.reduce_sum( tf.cast( FP_condition  , tf.float32 ) )
    TN = tf.reduce_sum( tf.cast( TN_condition  , tf.float32 ) )
    FN = tf.reduce_sum( tf.cast( FN_condition  , tf.float32 ) )

    FAR = FP/( FP + TN )
    FRR = FN/( TP + FN )

    HTER = ( FAR + FRR )/2

    far_summary = tf.summary.scalar( "FAR" , FAR )
    frr_summary = tf.summary.scalar( "FRR" , FRR )
    hter_summary = tf.summary.scalar( "HTER" , HTER )

    #trainable_list = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES )
    trainable_list = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES , 'final_fc' ) + \
            tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, 'MobilenetV2/Conv/' ) + \
            tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, 'MobilenetV2/expanded_conv/' ) +\
            tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, 'MobilenetV2/expanded_conv_1/' ) 

    global_step = tf.train.get_or_create_global_step()

    for i in trainable_list:
        print( i )

    train_op = tf.train.AdamOptimizer( learning_rate = \
            0.00001 ).minimize( total_loss , var_list = trainable_list , \
            global_step = global_step )

    summary_merged = tf.summary.merge( [ total_loss_summary, regularization_loss_summary, \
            data_loss_summary , acc_summary , far_summary, frr_summary, hter_summary ] )

    train_writer = tf.summary.FileWriter( './tflog/train_conv_1' , \
            graph = tf.get_default_graph() )
    saver = tf.train.Saver()

    sess.run( tf.global_variables_initializer() )
    deal_with_ckpt( sess , ckpt_path )

    # build all the writers' dict
    w_dict = {}
    for k in test_writer_dict.keys():
        w_dict[k] = tf.summary.FileWriter( './tflog/' + k )

    for i in range( 3000 ):
        train_images = []
        train_labels = []

        for op in data_op_list:
            t_img, t_label = sess.run( op )
            train_images.append( t_img )
            train_labels.append( t_label )
        
        train_images = np.concatenate( train_images , axis = 0)
        train_labels = np.concatenate( train_labels , axis = 0)
        #train_images , train_labels = sess.run( train_data_ops )
        #train_images = generate_color_lbp( train_images )

        _ , ACC , LOSS, SUMMARY , ARGMAX , gst = sess.run( \
                [train_op , acc , loss , summary_merged , argmax , global_step ] , \
                feed_dict = { imgs: train_images , \
                label_placeholder : train_labels } )

        train_writer.add_summary( SUMMARY , gst )
        print( "iter = %d , loss = %f "  %( gst , LOSS ) )

        #for i in range( train_labels.shape[0] ):
        #    if train_labels[i] != ARGMAX[i]:
        #        print( '%d / %d samples with label = %d ' %( i, train_labels.shape[0] , train_labels[i] ) )

        if i% 200 == 0:
            save_path = saver.save(sess, ckpt_path , global_step = global_step )
            print("Model saved in path: %s" % save_path)
       
        if i%10 == 0:
            for k in w_dict.keys():
                #test_images , test_labels = sess.run( test_data_ops )
                test_images , test_labels = sess.run( test_writer_dict[k] )
                #test_images = generate_color_lbp( test_images )

                ACC , LOSS , SUMMARY , gst= sess.run( \
                        [ acc , loss , summary_merged , global_step] , \
                        feed_dict = { imgs: test_images , \
                        label_placeholder : test_labels } )

                #test_writer.add_summary( SUMMARY , gst )
                w_dict[k].add_summary( SUMMARY , gst )
                #print( "accuracy = %f" % ACC )

    train_writer.close()
    #test_writer.close()
