import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.python.slim.learning import train_step
#from fetchData import *
from data_util.streamer import *
from data_util.parser import *
import os
import numpy as np
from tensorflow.python import debug as tf_debug
import tensorflow_hub as hub
import sys
from skimage.feature import local_binary_pattern

data_path = "/home/jh/working_data/anti-spoofing/Oulu_NPU"
export_path = './model_wholeScale_mobileNet_035_128'

def generate_hist( lbp , bits = 256 ):
    """
    generate the lbp histgram in the whole image
    """
    h, w = lbp.shape
    hist = bits * [0]

    for ih in range( h ):
        for iw in range( w ):
            hist[ int( lbp[ih][iw] ) ] += 1

    for i in range( bits ):
        hist[i] /= 1. * h * w 

    return hist

def generate_lbp( imgs ):
    hist_list = []

    for i in range( imgs.shape[0] ):
        lbp8_1 = local_binary_pattern( imgs[i] , P = 8 , R = 1.0 )
        hist_list.append( generate_hist( lbp8_1 ) )

    return np.array( hist_list )

def _gram_matrix( tensor ):
    """
    tensor [ batchSize , Height, Width, Channel ]
    """
    shape = tensor.get_shape()
    num_channels = int(shape[3])
    num_resolution = int( shape[1] ) * int( shape[2] )

    matrix = tf.reshape( tensor, shape=[ -1 , num_resolution , num_channels ] )
    Tmatrix = tf.transpose( matrix , [0,2,1] )
    gram = tf.matmul( Tmatrix , matrix )

    return tf.reshape( gram , shape = [ -1 , num_channels * num_channels ] )

if __name__ == "__main__":
    tf.logging.set_verbosity(tf.logging.INFO)
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    #oulu = Oulu_NPU( data_path )
    #train_data_ops = oulu.trainDataStream( 128 )
    #test_data_ops  = oulu.testDataStream ( 128 )

    train1_ops = OneLabelData( "/home/jh/working_data/anti-spoofing/HomeMadeData2/100_pos/train", label =0 ).testDataStream( 64 )
    train2_ops = OneLabelData( "/home/jh/working_data/anti-spoofing/HomeMadeData2/100_printed/train_resized" , label =1).testDataStream( 64 )

    test1_ops = OneLabelData( "/home/jh/working_data/anti-spoofing/HomeMadeData2/100_pos/test", label =0 ).testDataStream( 64 )
    test2_ops = OneLabelData( "/home/jh/working_data/anti-spoofing/HomeMadeData2/100_printed/test_resized" , label =1).testDataStream( 64 )

    sess = tf.Session()

    #m = hub.Module( "/home/jh/working_data/models/tensorflow_hub/mobileNet_v2_140_224" , \
    #      trainable = True )

    graph = tf.get_default_graph()

    #imgs = graph.get_tensor_by_name( "module/hub_input/images:0" )
    label_placeholder = tf.placeholder( tf.int64 , [None] , name = "label" )
    imgs = tf.placeholder( tf.float32 , [ None, 256 ] , name = "lbp"  )

    #low_level_feature1 = graph.get_tensor_by_name( \
    #        "module/MobilenetV2/expanded_conv_6/expand/Relu6:0" )

    """
    low_level_feature1 = graph.get_tensor_by_name( \
            "module/MobilenetV2/expanded_conv/output:0" )

    gram_matrix = _gram_matrix( low_level_feature1 )
    net = slim.batch_norm( gram_matrix )


    # AM Softmax implementation

    net_norm = tf.nn.l2_normalize( net , 1 , 1e-10 , name = 'net_norm' )
    net_shape = net.get_shape()
    
    kernel = slim.model_variable( "AM_kernel" , shape=[ int(net_shape[1]) , 2 ] , \
            initializer=tf.truncated_normal_initializer(stddev=0.1), \
            regularizer=slim.l2_regularizer(0.05) )

    s = 30
    m = 0.35
    kernel_norm = tf.nn.l2_normalize(kernel, 0, 1e-10, name='kernel_norm')
    cos_theta = tf.matmul( net_norm , kernel_norm )
    cos_theta = tf.clip_by_value( cos_theta , -1 , 1 )
    phi = cos_theta - m
    label_onehot = tf.one_hot( label_placeholder , 2 )
    adjust_theta = s * tf.where(tf.equal( label_onehot , 1 ), phi, cos_theta )

    """


    net = slim.fully_connected( imgs , 2 , scope = "final_fc" )

    """
    mask1 = slim.conv2d( low_level_feature1 , 1 , [1,1] )
    mask2 = slim.conv2d( low_level_feature1 , 1 , [1,1] )
    mask3 = slim.conv2d( low_level_feature1 , 1 , [1,1] )
    mask4 = slim.conv2d( low_level_feature1 , 1 , [1,1] )

    mask_concat = tf.concat( [mask1,mask2,mask3,mask4] , axis = -1 )
    mask_softmax = tf.nn.softmax( mask_concat , axis = -1 )
 
    mask1 = mask_softmax[:,:,:,0]
    mask2 = mask_softmax[:,:,:,1]
    mask3 = mask_softmax[:,:,:,2]
    mask4 = mask_softmax[:,:,:,3]

    mask1 = tf.expand_dims( mask1 , -1 )
    mask2 = tf.expand_dims( mask2 , -1 )
    mask3 = tf.expand_dims( mask3 , -1 )
    mask4 = tf.expand_dims( mask4 , -1 )

    # compute the new grouped feature

    masked_feature1 = tf.multiply( low_level_feature1 , mask1 )
    masked_feature2 = tf.multiply( low_level_feature1 , mask2 )
    masked_feature3 = tf.multiply( low_level_feature1 , mask3 )
    masked_feature4 = tf.multiply( low_level_feature1 , mask4 )

    gram1 = _gram_matrix( masked_feature1 )
    gram2 = _gram_matrix( masked_feature2 )
    gram3 = _gram_matrix( masked_feature3 )
    gram4 = _gram_matrix( masked_feature4 )


    gram_concat = tf.concat( [ gram1, gram2 , gram3, gram4 ] , axis = -1 )


    net = slim.batch_norm( gram_concat )

    #net = slim.batch_norm( gram_matrix )
    net = slim.fully_connected( net , 2 , scope = "final_fc" )


    #net = slim.fully_connected( llf1_avep , 2 , scope = "final_fc" )


    #sm = slim.softmax( net )
    """

    loss = tf.losses.sparse_softmax_cross_entropy( \
            labels = label_placeholder , \
            logits = net , scope = "loss" )

    loss_summary = tf.summary.scalar( "loss" , loss )

    argmax = tf.argmax( net , 1 , name = "argmax" )
    acc = tf.reduce_mean(\
            tf.cast( tf.equal ( label_placeholder , argmax ) , tf.float32 ) , name = "acc" )
    acc_summary = tf.summary.scalar( "accuracy" , acc )

    trainable_list = tf.trainable_variables()
    for t in trainable_list:
        print( t )
    train_op = tf.train.AdamOptimizer( learning_rate = \
            0.01 ).minimize( loss , var_list = trainable_list )

    summary_merged = tf.summary.merge( [ loss_summary , acc_summary ] )

    train_writer = tf.summary.FileWriter( './tflog/train1' , graph = tf.get_default_graph() )
    #test_writer = tf.summary.FileWriter( './tflog/test' )

    test1_writer = tf.summary.FileWriter( './tflog/PosTest1' )
    test2_writer = tf.summary.FileWriter( './tflog/PrintedTest1' )

    saver = tf.train.Saver()
    sess.run( tf.global_variables_initializer() )

    for i in range( 10000 ):
        trainI1, trainl1 = sess.run( train1_ops )
        trainI2, trainl2 = sess.run( train2_ops )

        train_lbp1 = generate_lbp( trainI1 )
        train_lbp2 = generate_lbp( trainI2 )

        trainLBP = np.concatenate( [train_lbp1, train_lbp2 ] , axis = 0 )
        trainL = np.concatenate( [trainl1 , trainl2 ] , axis = 0 )

        _ , ACC , LOSS, SUMMARY = sess.run( \
                [train_op , acc , loss , summary_merged ] , \
                feed_dict = { imgs: trainLBP , \
                label_placeholder : trainL } )

        train_writer.add_summary( SUMMARY , i )
        print( "iter = %d , loss = %f "  %( i , LOSS ) )

        if i% 200 == 0:
            save_path = saver.save(sess, "./tmp/model.ckpt")
            print("Model saved in path: %s" % save_path)
       
        if i%50 == 0:
            ti1, tl1 = sess.run( test1_ops )
            ti2, tl2 = sess.run( test2_ops )

            t1_lbp = generate_lbp( ti1 )
            t2_lbp = generate_lbp( ti2 )

            ACC1 , LOSS1 , SUMMARY1 = sess.run( \
                    [ acc , loss , summary_merged ] , \
                    feed_dict = { imgs: t1_lbp , \
                    label_placeholder : tl1 } )

            ACC2 , LOSS2 , SUMMARY2 = sess.run( \
                    [ acc , loss , summary_merged ] , \
                    feed_dict = { imgs: t2_lbp , \
                    label_placeholder : tl2 } )

            test1_writer.add_summary( SUMMARY1 , i )
            test2_writer.add_summary( SUMMARY2 , i )

            #print( "accuracy = %f" % ACC )
            print( "real access accuracy = %f " % ACC1 )
            print( "printed attack accuracy = %f " % ACC2 )

    train_writer.close()
    #test_writer.close()

    """
    builder = tf.saved_model.builder.SavedModelBuilder(export_path)
    tensor_info_input  = tf.saved_model.utils.build_tensor_info( imgs )
    tensor_info_output = tf.saved_model.utils.build_tensor_info( net )

    prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs  = { 'images' : tensor_info_input } , 
                outputs = { 'scores' : tensor_info_output} , 
                method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))

    builder.add_meta_graph_and_variables(
            sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                'predict_images':
                prediction_signature,
                },
            main_op=tf.tables_initializer(),
            strip_default_attrs=True)
    builder.save()
    """
