import tensorflow as tf
import tensorflow.contrib.slim as slim
from data_util.streamer import Oulu_NPU, OneLabelData, HomeMadeData
from data_util.parser import *
import os
import numpy as np
import sys

opj = os.path.join

hotel_path = "/home/jh/working_data/anti-spoofing/hotel_cheating"

train_hotel_pos =OneLabelData( opj(hotel_path,"train_pos"),label=0).testDataStream(32)
train_hotel_neg =OneLabelData( opj(hotel_path,"train_neg"),label=1).testDataStream(32)

test_hotel_pos =OneLabelData( opj(hotel_path,"test_pos"),label=0).testDataStream(64)
test_hotel_neg =OneLabelData( opj(hotel_path,"test_neg"),label=1).testDataStream(64)

if __name__ == "__main__":
    tf.logging.set_verbosity( tf.logging.INFO )
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"

    sess = tf.Session()
    graph = tf.get_default_graph()

    imgs = tf.placeholder( tf.float32 , [None, 224,224,3 ] , name = 'img' )
    label_placeholder = tf.placeholder( tf.int64 , [None] , name = "label" )

    rgb_imgs = tf.image.rgb_to_grayscale( imgs )

    w1 = tf.constant( [1,0,0, 0,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w2 = tf.constant( [0,1,0, 0,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w3 = tf.constant( [0,0,1, 0,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w4 = tf.constant( [0,0,0, 0,-1,1, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w5 = tf.constant( [0,0,0, 0,-1,0, 0,0,1] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w6 = tf.constant( [0,0,0, 0,-1,0, 0,1,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w7 = tf.constant( [0,0,0, 0,-1,0, 1,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )
    w8 = tf.constant( [0,0,0, 1,-1,0, 0,0,0] ,shape = [3, 3, 1, 1] ,dtype = tf.float32 )

    f1 = tf.nn.conv2d( rgb_imgs , w1, strides=[1, 1, 1, 1] , padding='SAME' )
    f2 = tf.nn.conv2d( rgb_imgs , w2, strides=[1, 1, 1, 1] , padding='SAME' )
    f3 = tf.nn.conv2d( rgb_imgs , w3, strides=[1, 1, 1, 1] , padding='SAME' )
    f4 = tf.nn.conv2d( rgb_imgs , w4, strides=[1, 1, 1, 1] , padding='SAME' )
    f5 = tf.nn.conv2d( rgb_imgs , w5, strides=[1, 1, 1, 1] , padding='SAME' )
    f6 = tf.nn.conv2d( rgb_imgs , w6, strides=[1, 1, 1, 1] , padding='SAME' )
    f7 = tf.nn.conv2d( rgb_imgs , w7, strides=[1, 1, 1, 1] , padding='SAME' )
    f8 = tf.nn.conv2d( rgb_imgs , w8, strides=[1, 1, 1, 1] , padding='SAME' )

    f = tf.concat( [ f1,f2,f3,f4,f5,f6,f7,f8 ] , axis = -1 )
    f_code = tf.where( tf.less( f , 0 ) , tf.zeros_like( f ) , tf.ones_like( f ) )

    code_w = tf.constant( [ 128, 64, 32, 16, 8, 4, 2, 1 ] , shape = [1,1,1,8] , \
            dtype = tf.float32 )
    f_code = tf.multiply( f_code , code_w )
    f_code = tf.reduce_sum( f_code , axis = -1 )
    f_code = tf.to_int32( f_code )

    def fn( tensor ):
        return tf.bincount( tensor , minlength = 256 , dtype = tf.float32 )

    f_code = tf.map_fn( fn , f_code , dtype = tf.float32 )

    norm = tf.sqrt(tf.reduce_sum( tf.square( f_code ) , axis=1, keepdims=True) )
    f_code = f_code / norm
    f_code.set_shape( [None , 256] )

    lbp_net = slim.fully_connected( f_code , 2 , scope = "lbp_final_fc" )

    loss = tf.losses.sparse_softmax_cross_entropy( \
            labels = label_placeholder , \
            logits = lbp_net , scope = "loss" )

    argmax = tf.argmax( lbp_net , 1 , name = "argmax" )
    acc = tf.reduce_mean(\
            tf.cast( tf.equal ( label_placeholder , argmax ) , tf.float32 ) , name = "acc" )

    loss_summary = tf.summary.scalar( "loss" , loss )
    acc_summary  = tf.summary.scalar( "accuracy" , acc )

    trainable_list = tf.trainable_variables()
    for t in trainable_list:
        print( t )
    train_op = tf.train.AdamOptimizer( learning_rate = \
            0.01 ).minimize( loss , var_list = trainable_list )

    summary_merged = tf.summary.merge( [ loss_summary , acc_summary ] )
    train_writer = tf.summary.FileWriter( './tflog/LBPtrain' , graph = tf.get_default_graph() )
    pos_writer = tf.summary.FileWriter( './tflog/LBPpos' )
    neg_writer = tf.summary.FileWriter( './tflog/LBPneg' )

    saver = tf.train.Saver()
    sess.run( tf.global_variables_initializer() )

    for i in range( 10000 ):
        t1_I, t1_L = sess.run( train_hotel_pos )
        t2_I, t2_L = sess.run( train_hotel_neg )
        t3_I, t3_L = sess.run( train_hotel_neg )
        t4_I, t4_L = sess.run( train_hotel_neg )

        t_I = np.concatenate( [t1_I, t2_I, t3_I, t4_I ] , axis = 0 )
        t_L = np.concatenate( [t1_L, t2_L, t3_L, t4_L ] , axis = 0 )

        _ , ACC , LOSS, SUMMARY = sess.run( \
                [train_op , acc , loss , summary_merged ] , \
                feed_dict = { imgs: t_I , \
                label_placeholder : t_L } )
        
        train_writer.add_summary( SUMMARY , i )
        print( "iter = %d , loss = %f "  %( i , LOSS ) )

        if i%200 == 0:
            save_path = saver.save(sess, "./tflog/LBPtmp/model.ckpt")
            print( "Model saved in path: %s" % save_path )

        if i%50 == 0:
            t1_I, t1_L = sess.run( test_hotel_pos )
            t2_I, t2_L = sess.run( test_hotel_neg )

            ACC , LOSS, SUMMARY = sess.run( \
                    [ acc , loss , summary_merged ] , \
                    feed_dict = { imgs: t1_I , \
                    label_placeholder : t1_L } )

            pos_writer.add_summary( SUMMARY , i )
            print( "postive accuracy    =     %f" % ACC )

            ACC , LOSS, SUMMARY = sess.run( \
                    [ acc , loss , summary_merged ] , \
                    feed_dict = { imgs: t2_I , \
                    label_placeholder : t2_L } )

            neg_writer.add_summary( SUMMARY , i )
            print( "negative accuracy    =     %f" % ACC )
