import tensorflow as tf
import tensorflow.contrib.slim as slim
from data_util.streamer import Oulu_NPU, OneLabelData, HomeMadeData
from data_util.parser import *
import os
import numpy as np
import tensorflow_hub as hub
import sys

def _gram_matrix( tensor ):
    """
    tensor [ batchSize , Height, Width, Channel ]
    """
    shape = tensor.get_shape()
    num_channels = int(shape[3])
    num_resolution = int( shape[1] ) * int( shape[2] )

    matrix = tf.reshape( tensor, shape=[ -1 , num_resolution , num_channels ] )
    Tmatrix = tf.transpose( matrix , [0,2,1] )
    gram = tf.matmul( Tmatrix , matrix )

    return tf.reshape( gram , shape = [ -1 , num_channels * num_channels ] )


msu_live_path = "/home/jh/working_data/anti-spoofing/MSU_USSA_Public"
msu_spoof_path = "/home/jh/working_data/anti-spoofing/MSU_USSA_Public/SpoofSubjectImages"

live_train = "/home/jh/working_data/anti-spoofing/MSU_USSA_Public/train_LiveSubjectsImages"
spoo_train1= "/home/jh/working_data/anti-spoofing/MSU_USSA_Public/SpoofSubjectImages/train_MacBook_FrontCamera"

live_test = "/home/jh/working_data/anti-spoofing/MSU_USSA_Public/test_LiveSubjectsImages"
spoo_test1= "/home/jh/working_data/anti-spoofing/MSU_USSA_Public/SpoofSubjectImages/test_MacBook_FrontCamera"

live_train_ops = OneLabelData( live_train, label = 0 ).testDataStream( 64 )
spoo_train1_ops= OneLabelData( spoo_train1,label = 1 ).testDataStream( 64 )

live_test_ops = OneLabelData( live_test, label = 0 ).testDataStream( 64 )
spoo_test1_ops= OneLabelData( spoo_test1,label = 1 ).testDataStream( 64 )

live_test_writer = tf.summary.FileWriter( './tflog/live_test' )
spoo_test_writer1= tf.summary.FileWriter( './tflog/spoof_test1' )

#####################
train_data_ops = [ live_train_ops, spoo_train1_ops ]
test_data_ops  = [ live_test_ops , spoo_test1_ops ]
test_writers   = [ live_test_writer , spoo_test_writer1 ]
####################


if __name__ == "__main__":
    tf.logging.set_verbosity( tf.logging.INFO )
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    sess = tf.Session()
    graph = tf.get_default_graph()

    m = hub.Module( "/home/jh/working_data/models/tensorflow_hub/mobileNet_v2_140_224" \
            , trainable = False )

    imgs = graph.get_tensor_by_name( "module/hub_input/images:0" )
    label_placeholder = tf.placeholder( tf.int64 , [None] , name = "label" )

    low_level_feature1 = graph.get_tensor_by_name( \
            "module/MobilenetV2/expanded_conv/output:0" )

    #llf1_avep = slim.avg_pool2d( low_level_feature1 , [ 112, 112 ] )
    #llf1_avep = tf.squeeze( llf1_avep , [1,2] )

    gm = _gram_matrix( low_level_feature1 )
    gm = slim.batch_norm( gm )
    gm = tf.nn.dropout( gm , keep_prob = 0.2 )

    cnn_net = slim.fully_connected( gm , 2 , scope = "cnn_final_fc" )

    loss = tf.losses.sparse_softmax_cross_entropy( \
            labels = label_placeholder , \
            logits = cnn_net , scope = "loss" )

    argmax = tf.argmax( cnn_net , 1 , name = "argmax" )
    acc = tf.reduce_mean(\
            tf.cast( tf.equal ( label_placeholder , argmax ) , tf.float32 ) , name = "acc" )

    loss_summary = tf.summary.scalar( "loss" , loss )
    acc_summary  = tf.summary.scalar( "accuracy" , acc )

    trainable_list = tf.trainable_variables()
    for t in trainable_list:
        print( t )
    train_op = tf.train.AdamOptimizer( learning_rate = \
            0.001 ).minimize( loss , var_list = trainable_list )

    summary_merged = tf.summary.merge( [ loss_summary , acc_summary ] )
    train_writer = tf.summary.FileWriter( './tflog/train' , graph = tf.get_default_graph() )

    saver = tf.train.Saver()
    sess.run( tf.global_variables_initializer() )

    for i in range( 10000 ):
        t_I = []
        t_L = []

        for op in train_data_ops:
            tii, tll = sess.run( op )
            t_I.append( tii )
            t_L.append( tll )

        t_I = np.concatenate( t_I , axis = 0 )
        t_L = np.concatenate( t_L , axis = 0 )

        _ , ACC , LOSS, SUMMARY = sess.run( \
                [train_op , acc , loss , summary_merged ] , \
                feed_dict = { imgs: t_I , \
                label_placeholder : t_L } )
        
        train_writer.add_summary( SUMMARY , i )
        print( "iter = %d , loss = %f "  %( i , LOSS ) )

        if i%200 == 0:
            save_path = saver.save(sess, "./tflog/tmp/model.ckpt")
            print( "Model saved in path: %s" % save_path )

        if i%50 == 0:
            for op, writer in zip( test_data_ops , test_writers ):
                t_I , t_L = sess.run( op )

                ACC , LOSS, SUMMARY = sess.run( \
                        [ acc , loss , summary_merged ] , \
                        feed_dict = { imgs: t_I , \
                        label_placeholder : t_L } )

                writer.add_summary( SUMMARY , i )
                print( "accuracy accuracy accuracy    =     %f" % ACC )
