import tensorflow as tf
import tensorflow.contrib.slim as slim
from data_util.streamer import *
from data_util.parser import *
import os
import numpy as np
from tensorflow.python import debug as tf_debug
import tensorflow_hub as hub
import sys
from skimage.feature import local_binary_pattern

data_path = "/home/jh/working_data/anti-spoofing/Oulu_NPU"

if __name__ == "__main__":
    tf.logging.set_verbosity(tf.logging.INFO)
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    train_ops = Oulu_NPU( "/home/jh/working_data/anti-spoofing/Oulu_NPU" \
            ).trainDataStream( 128 )

    test_ops = Oulu_NPU( "/home/jh/working_data/anti-spoofing/Oulu_NPU" \
            ).testDataStream( 128 )

    outData_test_ops = OneLabelData(\
            "/home/jh/working_data/anti-spoofing/HomeMadeData2/100_printed/train_resized" \
            ,label = 1 ).testDataStream( 55 )

    TRAIN1_OPS = OneLabelData( "/home/jh/working_data/anti-spoofing/HomeMadeData2/100_pos/train" , label = 0 ).testDataStream( 64 )
    TRAIN2_OPS = OneLabelData( "/home/jh/working_data/anti-spoofing/HomeMadeData2/100_printed/train_resized" , label =1 ).testDataStream( 64 )
    TEST1_OPS = OneLabelData( "/home/jh/working_data/anti-spoofing/HomeMadeData2/100_pos/test" , label = 0 ).testDataStream( 64 )
    TEST2_OPS = OneLabelData( "/home/jh/working_data/anti-spoofing/HomeMadeData2/100_printed/test_resized" , label =1 ).testDataStream( 64 )

    m = hub.Module( "/home/jh/working_data/models/tensorflow_hub/mobileNet_v2_140_224" \
            , trainable = True )

    sess = tf.Session()

    graph = tf.get_default_graph()

    imgs = graph.get_tensor_by_name( "module/hub_input/images:0" )
    label_placeholder = tf.placeholder( tf.int64 , [None] , name = "label" )
    #imgs = tf.placeholder( tf.float32 , [ None, 224, 224, 1  ] , name = "img"  )
    low_level_feature1 = graph.get_tensor_by_name( \
            "module/MobilenetV2/expanded_conv/output:0" )

    #llf1_conv = slim.conv2d( low_level_feature1 , 256 , [1,1] )
    #llf1_avep = slim.avg_pool2d( llf1_conv , [ 28, 28 ] )
    #llf1_avep = tf.squeeze( llf1_avep , [1,2] )
    
    llf1_avep = slim.avg_pool2d( low_level_feature1 , [ 112, 112 ] )
    llf1_avep = tf.squeeze( llf1_avep , [1,2] )

    llf1_avep = tf.nn.l2_normalize( llf1_avep , 1 , 1e-10 )

    kernel = slim.model_variable( "AM_kernel" , shape=[ 24 , 2 ] , \
            initializer=tf.truncated_normal_initializer(stddev=0.1), \
            regularizer=slim.l2_regularizer(0.05) )

    s = 30
    m = 0.3
    kernel_norm = tf.nn.l2_normalize(kernel, 0, 1e-10, name='kernel_norm')
    cos_theta = tf.matmul( llf1_avep , kernel_norm )
    cos_theta = tf.clip_by_value( cos_theta , -1 , 1 )
    phi = cos_theta - m
    label_onehot = tf.one_hot( label_placeholder , 2 )
    adjust_theta = s * tf.where(tf.equal( label_onehot , 1 ), phi, cos_theta )

    """
    w1 = tf.constant( [1,0,0, 0,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w2 = tf.constant( [0,1,0, 0,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w3 = tf.constant( [0,0,1, 0,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w4 = tf.constant( [0,0,0, 0,-1,1, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w5 = tf.constant( [0,0,0, 0,-1,0, 0,0,1] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w6 = tf.constant( [0,0,0, 0,-1,0, 0,1,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w7 = tf.constant( [0,0,0, 0,-1,0, 1,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w8 = tf.constant( [0,0,0, 1,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )

    f1 = tf.nn.conv2d( imgs , w1, strides=[1, 1, 1, 1] , padding='SAME' )
    f2 = tf.nn.conv2d( imgs , w2, strides=[1, 1, 1, 1] , padding='SAME' )
    f3 = tf.nn.conv2d( imgs , w3, strides=[1, 1, 1, 1] , padding='SAME' )
    f4 = tf.nn.conv2d( imgs , w4, strides=[1, 1, 1, 1] , padding='SAME' )
    f5 = tf.nn.conv2d( imgs , w5, strides=[1, 1, 1, 1] , padding='SAME' )
    f6 = tf.nn.conv2d( imgs , w6, strides=[1, 1, 1, 1] , padding='SAME' )
    f7 = tf.nn.conv2d( imgs , w7, strides=[1, 1, 1, 1] , padding='SAME' )
    f8 = tf.nn.conv2d( imgs , w8, strides=[1, 1, 1, 1] , padding='SAME' )

    f = tf.concat( [ f1,f2,f3,f4,f5,f6,f7,f8 ] , axis = -1 )
    f_code = tf.where( tf.less( f , 0 ) , tf.zeros_like( f ) , tf.ones_like( f ) )

    code_w = tf.constant( [ 128, 64, 32, 16, 8, 4, 2, 1 ] , shape = [1,1,1,8] , \
            dtype = tf.float32 )
    f_code = tf.multiply( f_code , code_w )
    f_code = tf.reduce_sum( f_code , axis = -1 )
    f_code = tf.to_int32( f_code )

    def fn( tensor ):
        return tf.bincount( tensor , minlength = 256 , dtype = tf.float32 )

    f_code = tf.map_fn( fn , f_code , dtype = tf.float32 )

    norm = tf.sqrt(tf.reduce_sum( tf.square( f_code ) , axis=1, keepdims=True) )
    f_code = f_code / norm
    f_code.set_shape( [None , 256] )
    """


    """
    f_code = tf.nn.l2_normalize( f_code , 1 , 1e-10 )

    kernel = slim.model_variable( "AM_kernel" , shape=[ 256 , 2 ] , \
            initializer=tf.truncated_normal_initializer(stddev=0.1), \
            regularizer=slim.l2_regularizer(0.05) )

    s = 5
    m = 0.9
    kernel_norm = tf.nn.l2_normalize(kernel, 0, 1e-10, name='kernel_norm')
    cos_theta = tf.matmul( f_code , kernel_norm )
    cos_theta = tf.clip_by_value( cos_theta , -1 , 1 )
    phi = cos_theta - m
    label_onehot = tf.one_hot( label_placeholder , 2 )
    adjust_theta = s * tf.where(tf.equal( label_onehot , 1 ), phi, cos_theta )

    """

    #net = slim.fully_connected( llf1_avep , 2 , scope = "final_fc" )

    loss = tf.losses.sparse_softmax_cross_entropy( \
            labels = label_placeholder , \
            logits = adjust_theta , scope = "loss" )

    loss_summary = tf.summary.scalar( "loss" , loss )

    argmax = tf.argmax( cos_theta , 1 , name = "argmax" )
    acc = tf.reduce_mean(\
            tf.cast( tf.equal ( label_placeholder , argmax ) , tf.float32 ) , name = "acc" )
    acc_summary = tf.summary.scalar( "accuracy" , acc )

    trainable_list = tf.trainable_variables()
    for t in trainable_list:
        print( t )
    train_op = tf.train.AdamOptimizer( learning_rate = \
            0.0001 ).minimize( loss , var_list = trainable_list )

    summary_merged = tf.summary.merge( [ loss_summary , acc_summary ] )

    train_writer = tf.summary.FileWriter( './tflog/train' , graph = tf.get_default_graph() )
    test_writer = tf.summary.FileWriter( './tflog/test' )
    #outData_test_writer = tf.summary.FileWriter( "./tflog/NEGtest" )

    #test1_writer = tf.summary.FileWriter( './tflog/PosTest1' )
    #test2_writer = tf.summary.FileWriter( './tflog/PrintedTest1' )

    saver = tf.train.Saver()
    sess.run( tf.global_variables_initializer() )

    for i in range( 10000 ):
        #train_lbp, trainL = sess.run( train_ops )

        t_I, t_L = sess.run( train_ops )
        #t1_I, t1_L = sess.run( TRAIN1_OPS )
        #t2_I, t2_L = sess.run( TRAIN2_OPS )

        #t_I = np.concatenate( [t1_I , t2_I ] , axis = 0 )
        #t_L = np.concatenate( [t1_L , t2_L ] , axis = 0 )

        _ , ACC , LOSS, SUMMARY = sess.run( \
                [train_op , acc , loss , summary_merged ] , \
                feed_dict = { imgs: t_I , \
                label_placeholder : t_L } )

        train_writer.add_summary( SUMMARY , i )
        print( "iter = %d , loss = %f "  %( i , LOSS ) )

        if i% 200 == 0:
            save_path = saver.save(sess, "./tmp/model.ckpt")
            print("Model saved in path: %s" % save_path)
       
        if i%50 == 0:
            #t1_lbp, tl1 = sess.run( test_ops )
            #o1_img, o1_l = sess.run( outData_test_ops )

            #t1_I, t1_L = sess.run( TEST1_OPS )
            #t2_I, t2_L = sess.run( TEST2_OPS )

            t1_I, t1_L = sess.run( test_ops )

            ACC1 , LOSS1 , SUMMARY1 = sess.run( \
                    [ acc , loss , summary_merged ] , \
                    feed_dict = { imgs: t1_I , \
                    label_placeholder : t1_L } )

            #ACC2 , LOSS2 , SUMMARY2 = sess.run( \
            #        [ acc , loss , summary_merged ] , \
            #        feed_dict = { imgs: t2_I , \
            #        label_placeholder : t2_L } )

            test_writer.add_summary( SUMMARY1 , i )
            #outData_test_writer.add_summary( SUMMARY2 , i )

            print( "test test test accuracy = %f " % ACC1 )
            #print( "neg neg neg test accuracy = %f " % ACC2 )

    train_writer.close()
    #test_writer.close()
