import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.python.slim.learning import train_step
from fetchData import *
import os
import numpy as np
from tensorflow.python import debug as tf_debug
import tensorflow_hub as hub
import sys
from util import cos_loss

data_dir = "/Users/pitaloveu/working_data/idCard"
data_dir = "/home/jh/working_data/idCard_whole_extract"

export_path = './model1'


if __name__ == "__main__":
    tf.logging.set_verbosity(tf.logging.INFO)
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

    idCard_data = binary_data( data_dir )
    train_data_ops = idCard_data.trainDataStream( 512 , if_flip = True )
    test_data_ops  = idCard_data.testDataStream ( 256 )

    # i suppose tf.Session is more intuitive
    sess = tf.Session()
    #sess = tf_debug.LocalCLIDebugWrapperSession(sess)


    # has downloading the model into directory
    # MODEL: mobilenet_v2_100_96
    # m = hub.Module( "https://tfhub.dev/google/imagenet/mobilenet_v2_100_96/classification/2" , trainable = True )
    m = hub.Module( os.path.join( "/home/jh/working_data/models/tensorflow_hub/"
            "8120b7321d9e14533232b1ddd4a74db35324b638" ) , trainable = False  )

    graph = tf.get_default_graph()

    input = graph.get_tensor_by_name( "module/hub_input/images:0" )
    imgs_features = graph.get_tensor_by_name( "module/hub_output/feature_vector/SpatialSqueeze:0" )

    # graph = tf.get_default_graph()
    # input = graph.get_tensor_by_name( 'data:0' )
    # feature = graph.get_tensor_by_name( 'fire9/concat:0' )

    label_placeholder = tf.placeholder( tf.int64 , [None] , name = "label" )
    with tf.variable_scope( "head" ) as scope:
        net = slim.fully_connected( imgs_features , 2 , scope = "final_fc" )
        scope.reuse_variables()
        net_test = slim.fully_connected( imgs_features , 2 , scope = "final_fc" )

    #loss_train = tf.losses.sparse_softmax_cross_entropy( \
    #        labels = label_placeholder , \
    #        logits = net , scope = "loss_train" )

    #loss_test = tf.losses.sparse_softmax_cross_entropy( \
    #        labels = label_placeholder , \
    #        logits = net_test , scope = "loss_test" )

    loss_train, train_class = cos_loss( x = net , y = label_placeholder , num_cls = 2 )
    loss_test, test_class = cos_loss( x = net_test , y = label_placeholder , num_cls = 2 , reuse = True)

    train_argmax = tf.argmax( train_class , 1 , name = "train_argmax" )
    test_argmax  = tf.argmax( test_class , 1 , name = "test_argmax" )

    acc_train = tf.reduce_mean(\
            tf.cast( tf.equal ( label_placeholder , train_argmax ) , tf.float32 ) \
            , name = "acc_train" )

    acc_test = tf.reduce_mean(\
            tf.cast( tf.equal ( label_placeholder , test_argmax ) , tf.float32 ) \
            , name = "acc_test" )

    train_acc_summary = tf.summary.scalar( "train_accuracy" , acc_train )
    test_acc_summary  = tf.summary.scalar( "test_accuracy"  , acc_test  )

    train_loss_summary = tf.summary.scalar( "train_loss" , loss_train )
    test_loss_summary  = tf.summary.scalar( "test_loss"  , loss_test  )

    trainable_list = tf.trainable_variables()

    train_op = tf.train.AdamOptimizer( learning_rate = \
            0.0001 ).minimize( loss_train , var_list = trainable_list )

    train_merged = tf.summary.merge( [ train_loss_summary , train_acc_summary ] )
    test_merged  = tf.summary.merge( [ test_loss_summary  , test_acc_summary  ] )

    train_writer = tf.summary.FileWriter( './tflog_largerIMG_COSLOSS/train' , graph = tf.get_default_graph() )
    test_writer = tf.summary.FileWriter( './tflog_largerIMG_COSLOSS/test' )

    saver = tf.train.Saver()
    sess.run( tf.global_variables_initializer() )

    for i in range( 100000 ):
        train_images , train_labels = sess.run( train_data_ops )
        _ , accuracy , loss, summary = sess.run( \
                [train_op , acc_train , loss_train , train_merged ] , \
                feed_dict = { input: train_images , \
                label_placeholder : train_labels } )

        train_writer.add_summary( summary , i )
        print( "iter = %d , loss = %f "  %( i , loss ) )

        if i% 200 == 0:
            save_path = saver.save(sess, "./tmp/model.ckpt")
            print("Model saved in path: %s" % save_path)
       
        if i%50 == 0:
            test_images , test_labels = sess.run( test_data_ops )

            accuracy , loss, summary = sess.run( \
                    [ acc_test , loss_test , test_merged ] , \
                    feed_dict = { input: test_images , \
                    label_placeholder : test_labels } )

            test_writer.add_summary( summary , i )
            print( "accuracy = %f" % accuracy )

    train_writer.close()
    test_writer.close()


    # 
    # export the final model
    #

    builder = tf.saved_model.builder.SavedModelBuilder(export_path)
    tensor_info_input  = tf.saved_model.utils.build_tensor_info( input )
    tensor_info_output = tf.saved_model.utils.build_tensor_info( net_test )

    prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs  = { 'images' : tensor_info_input } , 
                outputs = { 'scores' : tensor_info_output} , 
                method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))

    builder.add_meta_graph_and_variables(
            sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                'predict_images':
                prediction_signature,
                },
            main_op=tf.tables_initializer(),
            strip_default_attrs=True)
    builder.save()
