import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.python.slim.learning import train_step
from data_util.streamer import *
from data_util.parser import *
import os
import numpy as np
from tensorflow.python import debug as tf_debug
#import tensorflow_hub as hub
import sys
from skimage.feature import local_binary_pattern
import cv2

data_path = "/home/jh/working_data/anti-spoofing/Oulu_NPU"
replay_path = "/home/jh/working_data/anti-spoofing/replay_attack/original_REPLAY_ATTACK/replayattack"

def noRepeatTrainDS( replay_attack , batch_size ):
    trainFileBefore = replay_attack._train_file
    trainFileAfter = os.path.join( replay_attack._train_split_dir , 'train_Display.txt' )
    replay_attack._filterByAttackType( trainFileBefore , trainFileAfter , Includes =  \
            [] , Excludes = ['print', ] )

    dataset = tf.data.TextLineDataset( trainFileAfter )
    dataset = dataset.map( replay_attack._train_parser )
    dataset = dataset.batch( batch_size )

    return dataset.make_one_shot_iterator().get_next()

def noRepeatDevDS( replay_attack , batch_size ):
    trainFileBefore = replay_attack._test_file
    trainFileAfter = os.path.join( replay_attack._test_split_dir , 'test_Display.txt' )
    replay_attack._filterByAttackType( trainFileBefore , trainFileAfter , Includes =  \
            [] , Excludes = ['print', ] )

    dataset = tf.data.TextLineDataset( trainFileAfter )
    dataset = dataset.map( replay_attack._test_parser )
    dataset = dataset.batch( batch_size )

    return dataset.make_one_shot_iterator().get_next()

def getFeaturesAndLabels( noRepeatDS ):
    sess = tf.Session()

    init = False
    while True:
        try:
            I, L = sess.run( noRepeatDS )
            if not init:
                features = generate_color_lbp( I )
                labels   = L
                init = True
            else:
                features = np.concatenate( [ features , generate_color_lbp( I ) ] , axis = 0 )
                labels   = np.concatenate( [ labels , L ] , axis = 0 )

        except tf.errors.OutOfRangeError:
            break
    return features , labels

def dataOP( noRepeatDS , batch_size ):
    features , labels = getFeaturesAndLabels( noRepeatDS )

    dataset = tf.data.Dataset.from_tensor_slices( (features, labels) )
    dataset = dataset.repeat().batch( batch_size ).prefetch( 10 )

    return dataset.make_one_shot_iterator().get_next()

def generate_hist( lbp , bits = 59 ):
    """
    generate the lbp histgram in the whole image
    """
    h, w = lbp.shape
    hist = bits * [0]

    for ih in range( h ):
        for iw in range( w ):
            hist[ int( lbp[ih][iw] ) ] += 1

    for i in range( bits ):
        hist[i] /= 1. * h * w 

    return np.array( hist )

def generate_color_lbp( imgs ):
    # transform the image into HSV colorspace and YUV colorspace
    # transform the 6 channels into LBP features then concatenate 
    # all the features into one single feature

    hist_list = []

    for i in range( imgs.shape[0] ):
        # get HSV and YUV image
        # the img comes from TensorFlow in an order RGB
        img_HSV = cv2.cvtColor( imgs[i] , cv2.COLOR_RGB2HSV )
        img_YUV = cv2.cvtColor( imgs[i] , cv2.COLOR_RGB2YCrCb )

        img_HSV_H, img_HSV_S, img_HSV_V = cv2.split( img_HSV )
        img_YUV_Y, img_YUV_U, img_YUV_V = cv2.split( img_YUV )

        lbp1 = local_binary_pattern( img_HSV_H, P = 8,R= 1.0,method = 'nri_uniform')
        lbp2 = local_binary_pattern( img_HSV_S, P = 8,R= 1.0,method = 'nri_uniform')
        lbp3 = local_binary_pattern( img_HSV_V, P = 8,R= 1.0,method = 'nri_uniform')
        lbp4 = local_binary_pattern( img_YUV_Y, P = 8,R= 1.0,method = 'nri_uniform')
        lbp5 = local_binary_pattern( img_YUV_U, P = 8,R= 1.0,method = 'nri_uniform')
        lbp6 = local_binary_pattern( img_YUV_V, P = 8,R= 1.0,method = 'nri_uniform')

        hist1 = generate_hist( lbp1 )
        hist2 = generate_hist( lbp2 )
        hist3 = generate_hist( lbp3 )
        hist4 = generate_hist( lbp4 )
        hist5 = generate_hist( lbp5 )
        hist6 = generate_hist( lbp6 )

        color_lbp = np.concatenate( [ hist1, hist2, hist3, hist4, \
                hist5, hist6 ] , axis = 0 )

        hist_list.append( color_lbp )

    return np.array( hist_list )

if __name__ == "__main__":
    tf.logging.set_verbosity(tf.logging.INFO)
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    oulu = Oulu_NPU( data_path )
    #oulu = REPLAY_ATTACK( replay_path )
    #train_data_ops = oulu.trainDisplayDataStream( 128 )
    #test_data_ops  = oulu.devDisplayDataStream ( 128 )

    train_data_ops = dataOP( oulu.trainDisplayDataStream( 128 ) , 128 )
    test_data_ops  = dataOP( oulu.devDisplayDataStream( 128 ) , 128 )

    sess = tf.Session()

    """
    for i in range( 15 ):
        img1, label1 = sess.run( train_data_ops )
        img2, label2 = sess.run( train_data_ops2 )

        img2 = generate_color_lbp( img2 )

        # compare img1, img2, label1, label2
        dIMG = img1 - img2
        dLABEL = label1 - label2

        for i in range(128):
            print( dLABEL[i] )
            print( "%f  %f  %f"%( dIMG[i][0], dIMG[i][-1] , dIMG[i][100] ) )

    sys.exit()
    """

    graph = tf.get_default_graph()

    label_placeholder = tf.placeholder( tf.int64 , [None] , name = "label" )
    imgs = tf.placeholder( tf.float32 , [ None, 59*6 ] , name = "lbp"  )

    net = slim.fully_connected( imgs , 2 , scope = "final_fc" )

    loss = tf.losses.sparse_softmax_cross_entropy( \
            labels = label_placeholder , \
            logits = net , scope = "loss" )

    loss_summary = tf.summary.scalar( "loss" , loss )

    argmax = tf.argmax( net , 1 , name = "argmax" )
    acc = tf.reduce_mean( tf.cast( tf.equal ( label_placeholder , argmax ) , \
            tf.float32 ) , name = "acc" )
    acc_summary = tf.summary.scalar( "accuracy" , acc )

    trainable_list = tf.trainable_variables()
    for t in trainable_list:
        print( t )
    train_op = tf.train.AdamOptimizer( learning_rate = \
            0.001 ).minimize( loss , var_list = trainable_list )

    summary_merged = tf.summary.merge( [ loss_summary , acc_summary ] )

    train_writer = tf.summary.FileWriter( './tflog/train' , graph = graph )
    test_writer = tf.summary.FileWriter( './tflog/test' )

    saver = tf.train.Saver()
    sess.run( tf.global_variables_initializer() )

    for i in range( 30000 ):
        train_IMG, train_LABEL = sess.run( train_data_ops )
        #train_COLOR_LBP = generate_color_lbp( train_IMG )

        _ , ACC , LOSS, SUMMARY = sess.run( \
                [train_op , acc , loss , summary_merged ] , \
                feed_dict = { imgs: train_IMG , \
                label_placeholder : train_LABEL } )

        train_writer.add_summary( SUMMARY , i )
        print( "iter = %d , loss = %f "  %( i , LOSS ) )

        if i% 100 == 0:
            save_path = saver.save(sess, "./tflog/MODELS/model.ckpt")
            print("Model saved in path: %s" % save_path)
       
        if i%20 == 0:
            test_IMG, test_LABEL = sess.run( test_data_ops )
            #test_COLOR_LBP = generate_color_lbp( test_IMG )

            ACC , LOSS , SUMMARY = sess.run( \
                    [ acc , loss , summary_merged ] , \
                    feed_dict = { imgs: test_IMG , \
                    label_placeholder : test_LABEL } )

            test_writer.add_summary( SUMMARY , i )

            print( "accuracy = %f" % ACC )

    train_writer.close()
    test_writer.close()
