import tensorflow as tf
import tensorflow.contrib.slim as slim
from data_util.streamer import Oulu_NPU, OneLabelData, HomeMadeData
from data_util.parser import *
import os
import numpy as np
import tensorflow_hub as hub
import sys

opj = os.path.join

oulu_path = "/home/jh/working_data/anti-spoofing/Oulu_NPU"
nuaa_path = "/home/jh/working_data/anti-spoofing/nuaa/cropped_face_only_NUAA"
msu_path = "/home/jh/working_data/anti-spoofing/MSU_USSA_Public"
hotel_path = "/home/jh/working_data/anti-spoofing/hotel_cheating"

train_oulu = Oulu_NPU( oulu_path ).trainDataStream( 64 )

OULU = Oulu_NPU( oulu_path )

dtt = lambda: OULU.devDisplayDataStream( 20 )

sess = tf.Session()
OULU.showStream( sess , dtt )

sys.exit()


train_hotel_pos = OneLabelData( opj(hotel_path,"train_pos"),label=0).testDataStream(64)
train_hotel_neg = OneLabelData( opj(hotel_path,"train_neg"),label=1).testDataStream(64)
train_nuaa = HomeMadeData( nuaa_path ).trainDataStream(64)
train_msu_pos  = OneLabelData(opj(msu_path,'train_LiveSubjectsImages'),\
        label=0).testDataStream(64)
train_msu_neg1 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'train_MacBook_FrontCamera' ),label=1).testDataStream(64)
train_msu_neg2 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'train_MacBook_RearCamera' ),label=1).testDataStream(64)
train_msu_neg3 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'train_Nexus_FrontCamera' ),label=1).testDataStream(64)
train_msu_neg4 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'train_Nexus_RearCamera' ),label=1).testDataStream(64)
train_msu_neg5 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'train_PrintedPhoto_FrontCamera' ),label=1).testDataStream(64)
train_msu_neg6 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'train_PrintedPhoto_RearCamera' ),label=1).testDataStream(64)
train_msu_neg7 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'train_Tablet_FrontCamera' ),label=1).testDataStream(64)
train_msu_neg8 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'train_Tablet_RearCamera' ),label=1).testDataStream(64)

test_oulu = Oulu_NPU( oulu_path ).testDataStream( 64 )
test_hotel_pos = OneLabelData( opj(hotel_path,"test_pos"),label=0).testDataStream(64)
test_hotel_neg = OneLabelData( opj(hotel_path,"test_neg"),label=1).testDataStream(64)
test_nuaa = HomeMadeData( nuaa_path ).testDataStream(64)
test_msu_pos  = OneLabelData(opj(msu_path,'test_LiveSubjectsImages'),\
        label=0).testDataStream(64)
test_msu_neg1 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'test_MacBook_FrontCamera' ),label=1).testDataStream(64)
test_msu_neg2 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'test_MacBook_RearCamera' ),label=1).testDataStream(64)
test_msu_neg3 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'test_Nexus_FrontCamera' ),label=1).testDataStream(64)
test_msu_neg4 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'test_Nexus_RearCamera' ),label=1).testDataStream(64)
test_msu_neg5 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'test_PrintedPhoto_FrontCamera' ),label=1).testDataStream(64)
test_msu_neg6 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'test_PrintedPhoto_RearCamera' ),label=1).testDataStream(64)
test_msu_neg7 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'test_Tablet_FrontCamera' ),label=1).testDataStream(64)
test_msu_neg8 = OneLabelData(opj(msu_path,'SpoofSubjectImages', \
        'test_Tablet_RearCamera' ),label=1).testDataStream(64)


#train_datasss = [ train_oulu , train_hotel_pos , train_hotel_neg , train_nuaa , \
#        train_msu_pos , train_msu_neg1 , train_msu_neg2 , train_msu_neg3 , \
#        train_msu_neg4 , train_msu_neg5 , train_msu_neg6 , train_msu_neg7 , \
#        train_msu_neg8 ]

#train_datasss = [ train_oulu ,  train_nuaa , \
#        train_msu_pos , train_msu_neg1 , train_msu_neg2 , train_msu_neg3 , \
#        train_msu_neg4 , train_msu_neg5 , train_msu_neg6 , train_msu_neg7 , \
#        train_msu_neg8 ]

train_datasss = [ train_hotel_neg , train_hotel_pos]

test_datasss = [ test_oulu , test_nuaa , test_hotel_pos, test_hotel_neg, \
        test_msu_pos , test_msu_neg1 , test_msu_neg2 , test_msu_neg3 , \
        test_msu_neg4 , test_msu_neg5 , test_msu_neg6, test_msu_neg7 , \
        test_msu_neg8 ]


test_datasss = [test_hotel_neg , test_hotel_pos]

test1_writer = tf.summary.FileWriter( './tflog/Test1' )
test2_writer = tf.summary.FileWriter( './tflog/Hotel_Pos_Test2' )
test3_writer = tf.summary.FileWriter( './tflog/Hotel_Neg_Test3' )
test4_writer = tf.summary.FileWriter( './tflog/Test4' )
test5_writer = tf.summary.FileWriter( './tflog/Test5' )
test6_writer = tf.summary.FileWriter( './tflog/Test6' )
test7_writer = tf.summary.FileWriter( './tflog/Test7' )
test8_writer = tf.summary.FileWriter( './tflog/Test8' )
test9_writer = tf.summary.FileWriter( './tflog/Test9' )
test10_writer = tf.summary.FileWriter( './tflog/Test10' )
test11_writer = tf.summary.FileWriter( './tflog/Test11' )
test12_writer = tf.summary.FileWriter( './tflog/Test12' )
test13_writer = tf.summary.FileWriter( './tflog/Test13' )

test_writersss = [ test1_writer , test2_writer, test3_writer, test4_writer, \
        test5_writer, test6_writer, test7_writer, test8_writer, test9_writer, \
        test10_writer, test11_writer, test12_writer, test13_writer ]

test_writersss = [test1_writer, test2_writer ]

if __name__ == "__main__":
    tf.logging.set_verbosity( tf.logging.INFO )
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    #train_ops = Oulu_NPU( "/home/jh/working_data/anti-spoofing/Oulu_NPU" \
    #        ).trainDataStream( 128 )

    #test_ops = Oulu_NPU( "/home/jh/working_data/anti-spoofing/Oulu_NPU" \
    #        ).testDataStream( 128 )
    
    #TRAIN1_OPS = OneLabelData( "/home/jh/working_data/anti-spoofing/hotel_cheating/train_pos" , label = 0 ).testDataStream( 64 )
    #TRAIN2_OPS = OneLabelData( "/home/jh/working_data/anti-spoofing/hotel_cheating/train_neg" , label =1 ).testDataStream( 64 )

    #TEST1_OPS = OneLabelData( "/home/jh/working_data/anti-spoofing/hotel_cheating/test_pos" , label = 0 ).testDataStream( 64 )
    #TEST2_OPS = OneLabelData( "/home/jh/working_data/anti-spoofing/hotel_cheating/test_neg" , label =1 ).testDataStream( 64 )
    

    m = hub.Module( "/home/jh/working_data/models/tensorflow_hub/mobileNet_v2_140_224" \
            , trainable = True )

    sess = tf.Session()
    graph = tf.get_default_graph()

    # set all tensorflow placeholders
    imgs = graph.get_tensor_by_name( "module/hub_input/images:0" )
    label_placeholder = tf.placeholder( tf.int64 , [None] , name = "label" )
    #imgs = tf.placeholder( tf.float32 , [ None, 224, 224, 1  ] , name = "img"  )

    # build model1 loss, this model was handmaded
    # based on Local Binary Pattern features
    # this features couldn't be trained

    rgb_imgs = tf.image.rgb_to_grayscale( imgs )

    w1 = tf.constant( [1,0,0, 0,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w2 = tf.constant( [0,1,0, 0,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w3 = tf.constant( [0,0,1, 0,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w4 = tf.constant( [0,0,0, 0,-1,1, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w5 = tf.constant( [0,0,0, 0,-1,0, 0,0,1] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w6 = tf.constant( [0,0,0, 0,-1,0, 0,1,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w7 = tf.constant( [0,0,0, 0,-1,0, 1,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w8 = tf.constant( [0,0,0, 1,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )

    f1 = tf.nn.conv2d( rgb_imgs , w1, strides=[1, 1, 1, 1] , padding='SAME' )
    f2 = tf.nn.conv2d( rgb_imgs , w2, strides=[1, 1, 1, 1] , padding='SAME' )
    f3 = tf.nn.conv2d( rgb_imgs , w3, strides=[1, 1, 1, 1] , padding='SAME' )
    f4 = tf.nn.conv2d( rgb_imgs , w4, strides=[1, 1, 1, 1] , padding='SAME' )
    f5 = tf.nn.conv2d( rgb_imgs , w5, strides=[1, 1, 1, 1] , padding='SAME' )
    f6 = tf.nn.conv2d( rgb_imgs , w6, strides=[1, 1, 1, 1] , padding='SAME' )
    f7 = tf.nn.conv2d( rgb_imgs , w7, strides=[1, 1, 1, 1] , padding='SAME' )
    f8 = tf.nn.conv2d( rgb_imgs , w8, strides=[1, 1, 1, 1] , padding='SAME' )

    f = tf.concat( [ f1,f2,f3,f4,f5,f6,f7,f8 ] , axis = -1 )
    f_code = tf.where( tf.less( f , 0 ) , tf.zeros_like( f ) , tf.ones_like( f ) )

    code_w = tf.constant( [ 128, 64, 32, 16, 8, 4, 2, 1 ] , shape = [1,1,1,8] , \
            dtype = tf.float32 )
    f_code = tf.multiply( f_code , code_w )
    f_code = tf.reduce_sum( f_code , axis = -1 )
    f_code = tf.to_int32( f_code )

    def fn( tensor ):
        return tf.bincount( tensor , minlength = 256 , dtype = tf.float32 )

    f_code = tf.map_fn( fn , f_code , dtype = tf.float32 )

    norm = tf.sqrt(tf.reduce_sum( tf.square( f_code ) , axis=1, keepdims=True) )
    f_code = f_code / norm
    f_code.set_shape( [None , 256] )

    lbp_net = slim.fully_connected( f_code , 2 , scope = "lbp_final_fc" )

    lbp_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( \
            labels = label_placeholder , \
            logits = lbp_net , name = "lbp_loss" )

    # build model2 loss, this model was trained by CNN
    # initial weights from MobileNetV2
    # only train the first two layers
    # this feature will be trained

    low_level_feature1 = graph.get_tensor_by_name( \
            "module/MobilenetV2/expanded_conv/output:0" )
    llf1_avep = slim.avg_pool2d( low_level_feature1 , [ 112, 112 ] )
    llf1_avep = tf.squeeze( llf1_avep , [1,2] )

    cnn_net = slim.fully_connected( llf1_avep , 2 , scope = "cnn_final_fc" )

    cnn_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( \
            labels = label_placeholder , \
            logits = cnn_net , name = "cnn_loss" )

    # compute the reliability ratio of the two models
   

    #loss_epsilon = 1e-10

    #lbp_model_confidence = ( cnn_loss + 0.5 * loss_epsilon ) / \
    #        ( cnn_loss + lbp_loss + loss_epsilon )
    #cnn_model_confidence = ( lbp_loss + 0.5 * loss_epsilon ) / \
    #        ( cnn_loss + lbp_loss + loss_epsilon )


    shape_tmp= slim.fully_connected( f_code , 1 )
    #cnn_model_confidence = 1. - lbp_model_confidence

    lbp_model_confidence = tf.ones_like( shape_tmp )
    cnn_model_confidence = 1. - lbp_model_confidence

    #lbp_model_confidence = tf.expand_dims( lbp_model_confidence , -1 )
    #cnn_model_confidence = tf.expand_dims( cnn_model_confidence , -1 )



    # compute the probability when two models occur
    # follow the following formulae
    # P( x = x1 ) = P( m1 ) * P( x=x1 | m1) + P(m2) * P( x=x1 | m2)

    lbp_prob = tf.nn.softmax( lbp_net , axis = -1 )
    cnn_prob = tf.nn.softmax( cnn_net , axis = -1 )

    lbp_prob_weighted = tf.multiply( lbp_prob , lbp_model_confidence )
    cnn_prob_weighted = tf.multiply( cnn_prob , cnn_model_confidence )

    combined_prob = lbp_prob_weighted + cnn_prob_weighted


    # compute the loss
    # while the combined prob already output probability, 
    # we use my own-defined cross entropy loss to compute

    combined_prob_before_softmax = tf.math.log( combined_prob + 1e-10 )

    loss = tf.losses.sparse_softmax_cross_entropy( \
            labels = label_placeholder , \
            logits = combined_prob_before_softmax , scope = "loss" )

    #loss = tf.losses.sparse_softmax_cross_entropy( \
    #        labels = label_placeholder , \
    #        logits = cnn_net , scope = "loss" )

    # log everything
    loss_summary = tf.summary.scalar( "loss" , loss )
    argmax = tf.argmax( combined_prob , 1 , name = "argmax" )
    acc = tf.reduce_mean(\
            tf.cast( tf.equal ( label_placeholder , argmax ) , tf.float32 ) , name = "acc" )
    acc_summary = tf.summary.scalar( "accuracy" , acc )

    trainable_list = tf.trainable_variables()
    for t in trainable_list:
        print( t )
    train_op = tf.train.AdamOptimizer( learning_rate = \
            0.01 ).minimize( loss , var_list = trainable_list )

    summary_merged = tf.summary.merge( [ loss_summary , acc_summary ] )

    train_writer = tf.summary.FileWriter( './tflog/train' , graph = tf.get_default_graph() )
    #test_writer = tf.summary.FileWriter( './tflog/OnlyCNN_frozen_test' )


    saver = tf.train.Saver()
    sess.run( tf.global_variables_initializer() )

    for i in range( 10000 ):
        t1_I, t1_L = sess.run( train_hotel_neg )
        t2_I, t2_L = sess.run( train_hotel_pos )

        t_I = np.concatenate( [t1_I , t2_I] , axis = 0 )
        t_L = np.concatenate( [t1_L , t2_L] , axis = 0 )
        
        #t_I, t_L = sess.run( train_datasss[i%len(train_datasss)] )

        _ , ACC , LOSS, SUMMARY = sess.run( \
                [train_op , acc , loss , summary_merged ] , \
                feed_dict = { imgs: t_I , \
                label_placeholder : t_L } )

        train_writer.add_summary( SUMMARY , i )
        print( "iter = %d , loss = %f "  %( i , LOSS ) )

        if i% 200 == 0:
            save_path = saver.save(sess, "./tmp/model.ckpt")
            print("Model saved in path: %s" % save_path)
       
        if i%50 == 0:
            t_I, t_L = sess.run( test_oulu )

            ACC, LBP_CONFI, CNN_CONFI, LBP_PROB, CNN_PROB, COMBINED_PROB, \
                    ARGMAX  = sess.run( [ acc, lbp_model_confidence, \
                    cnn_model_confidence, lbp_prob, cnn_prob, combined_prob, \
                    argmax] , feed_dict = { imgs: t_I , \
                    label_placeholder : t_L } )

            print( "lbp confidence of model" )
            print( LBP_CONFI[0:10] )
            print( "cnn confidence of model" )
            print( CNN_CONFI[0:10] )
            print( "lbp pro " )
            print( LBP_PROB[0:10] )
            print( "cnn pro " )
            print( CNN_PROB[0:10] )
            print( "combined confidence " )
            print( COMBINED_PROB[0:10] )
            print( "argmax" )
            print( ARGMAX[0:10] )
            print( "label" )
            print( t_L[0:10] )
            print( "acc: %f" %( ACC ))

            for test_ops, writer in zip( test_datasss , test_writersss ):
                t_I , t_L = sess.run( test_ops )
            #t1_I, t1_L = sess.run( TEST1_OPS )
            #t2_I, t2_L = sess.run( TEST2_OPS )

                ACC , LOSS , SUMMARY = sess.run( \
                        [ acc , loss , summary_merged ] , \
                        feed_dict = { imgs: t_I , \
                        label_placeholder : t_L } )

                writer.add_summary( SUMMARY , i )
            #test2_writer.add_summary( SUMMARY2 , i )

            #print( "pos pos pos accuracy = %f " % ACC1 )
            #print( "neg neg neg accuracy = %f " % ACC2 )

    train_writer.close()
    #test_writer.close()
