import tensorflow as tf
import tensorflow.contrib.slim as slim
import os
import tensorflow_hub as hub
import tempfile
import random
import os.path as op

class test_data( object ):
    """
    binary data input class, 
    the directory "0" stands for 0 label
    the directory "1" stands for 1 label

    """
    def __init__( self, data_path ):
        self._data_path  = data_path
        self._height = 128
        self._width  = 128
        self._test_ratio = 0.1

        tempfile.tempdir = os.path.join( self._data_path , 'tmp'  )

        if not os.path.exists( tempfile.tempdir ):
            os.makedirs( tempfile.tempdir )

        self._prepareEverything( )
    def _prepareEverything( self ):
        self._data0_path = os.path.join( self._data_path , 'pos' )
        self._data1_path = os.path.join( self._data_path , 'neg' )

        if not op.isdir( self._data0_path ) or not op.isdir( self._data1_path ):
            raise ValueError( "data doesn't exist" )

        img_list0 = os.listdir( self._data0_path )
        img_list1 = os.listdir( self._data1_path )

        img_list0 = list( map( lambda s: op.join( self._data0_path , s ) , img_list0 ) )
        img_list1 = list( map( lambda s: op.join( self._data1_path , s ) , img_list1 ) )

        img_list0 = list( map( lambda s: s + ' ' + '0' + '\n', img_list0 ) )
        img_list1 = list( map( lambda s: s + ' ' + '1' + '\n', img_list1 ) )

        img_list = img_list0 + img_list1
        random.shuffle( img_list )

        img_list[-1] = img_list[-1].strip()

        self._data_info_file = op.join( tempfile.tempdir , 'data_info.txt' )
        self._img_nums = len( img_list )
       
        if not os.path.exists( self._data_info_file ):
            with open( self._data_info_file , 'w' ) as fw:
                fw.writelines( img_list )

    def trainDataStream( self , batch_size , if_shuffle = False , if_flip = False ):
        dataset = tf.data.TextLineDataset( self._data_info_file )
        dataset = dataset.skip( int( self._img_nums * self._test_ratio ) )
        dataset = dataset.map( lambda \
                line : self._parser( line , if_flip = if_flip ) )
        if if_shuffle:
            dataset = dataset.shuffle( 5 * batch_size )
        dataset = dataset.repeat(1).prefetch( 20 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset

    def testDataStream( self , batch_size ):
        dataset = tf.data.TextLineDataset( self._data_info_file )
        dataset = dataset.take( int( self._img_nums * self._test_ratio ) )
        dataset = dataset.map( self._parser )
        dataset = dataset.repeat().prefetch( 10 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset
    
    def _parser( self , line , if_flip = False ):
        """
        data line in landmark file in a form:
            data_path ID_number x1 y1 x2 y2 x3 y3 x4 y4 x5 y5
        """
        FIELD_DEFAULT = [ ['IMG_PATH'] , [0] ]
        fields = tf.decode_csv( line , FIELD_DEFAULT , field_delim = ' ' )
        content = tf.read_file( fields[0] )

        tf_image = tf.image.decode_jpeg( content )
        if if_flip:
            tf_image = tf.image.random_flip_left_right( tf_image )
        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( tf_image , [ self._height , self._width ] )

        tf_image = tf.reshape( tf_image , [-1] )

        # try to benefit from tensorflow feature_columns API
        return ( { "imgs" : tf_image }, fields[1] )

def model_fn( features , labels , mode , params ):
    """
    load the model from tensorflow HUB, 
    hope it cooperate with Estimator well
    """
    imgs = tf.feature_column.input_layer( features, params['feature_columns'] )
    imgs = tf.reshape( imgs , [ -1 , 128 , 128 , 3 ] )

    m = hub.Module( "/home/jh/working_data/models/tensorflow_hub/mobileNet_v2_035_128" , \
          trainable = False )

    imgs_features = m( imgs )
    logits  = slim.fully_connected( imgs_features , 2 , scope = "final_fc" )

    predicted_classes = tf.argmax( logits , 1 , name = "argmax" )

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
                'class_ids' : predicted_classes[ : , tf.newaxis ] ,
                'probabilities' : tf.nn.softmax(logits) ,
                'logits' : logits
                }
        return tf.estimator.EstimatorSpec( mode, predictions=predictions )

    loss = tf.losses.sparse_softmax_cross_entropy( labels=labels , logits=logits )
    accuracy = tf.metrics.accuracy( labels=labels , \
            predictions = predicted_classes , name = 'acc_op' )

    metrics = { 'accuracy': accuracy }
    tf.summary.scalar( 'accuracy', accuracy[1] )
    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=metrics )

    assert mode == tf.estimator.ModeKeys.TRAIN

    optimizer = tf.train.AdamOptimizer( learning_rate = 0.0001 )
    train_op = optimizer.minimize( loss, global_step=tf.train.get_global_step() )
    return tf.estimator.EstimatorSpec( mode, loss=loss, train_op=train_op , eval_metric_ops=metrics )

if __name__ == "__main__":
    tf.logging.set_verbosity(tf.logging.INFO)
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    idCard_data = test_data( "/home/jh/working_data/idData_face_expanding_0.5WH" )

    my_feature_columns = []
    my_feature_columns.append( tf.feature_column.numeric_column( key = "imgs" , \
           shape = [ 128*128*3 ]  ) )

    classifier = tf.estimator.Estimator( model_fn = model_fn , \
            model_dir = "./estimator_tmp" , \
            params = { 'feature_columns' : my_feature_columns })

    train_spec = tf.estimator.TrainSpec( input_fn = \
            lambda: idCard_data.trainDataStream( 256 , if_flip = True ) , max_steps=1000 )
    eval_spec = tf.estimator.EvalSpec( input_fn = \
            lambda: idCard_data.testDataStream( 256 ) , \
            steps = 1, \
            start_delay_secs = 10 , 
            throttle_secs = 200 )

    tf.estimator.train_and_evaluate( classifier , train_spec, eval_spec )
