import cv2
import tensorflow as tf
import numpy as np
import os.path as op
import os
import matplotlib.pyplot as plt
from functools import reduce
import tempfile
from CONFIGURES import args
import random
import re
import logging
from data_util.generate_tfrecord import generate_whole_tfrecord

class binary_data( object ):
    """
    binary data input class, 
    the directory "0" stands for 0 label
    the directory "1" stands for 1 label

    """
    def __init__( self, data_path ):
        self._data_path  = data_path
        self._height = 224
        self._width  = 224
        self._test_ratio = 0.2

        tempfile.tempdir = os.path.join( self._data_path , 'tmp'  )

        if not os.path.exists( tempfile.tempdir ):
            os.makedirs( tempfile.tempdir )

        self._prepareEverything( )
    def _prepareEverything( self ):
        self._data0_path = os.path.join( self._data_path , 'pos' )
        self._data1_path = os.path.join( self._data_path , 'neg' )

        if not op.isdir( self._data0_path ) or not op.isdir( self._data1_path ):
            raise ValueError( "data doesn't exist" )

        img_list0 = os.listdir( self._data0_path )
        img_list1 = os.listdir( self._data1_path )

        img_list0 = list( map( lambda s: op.join( self._data0_path , s ) , img_list0 ) )
        img_list1 = list( map( lambda s: op.join( self._data1_path , s ) , img_list1 ) )

        img_list0 = list( map( lambda s: s + ' ' + '0' + '\n', img_list0 ) )
        img_list1 = list( map( lambda s: s + ' ' + '1' + '\n', img_list1 ) )

        img_list = img_list0 + img_list1
        random.shuffle( img_list )

        img_list[-1] = img_list[-1].strip()

        self._data_info_file = op.join( tempfile.tempdir , 'data_info.txt' )
        self._img_nums = len( img_list )
       
        if not os.path.exists( self._data_info_file ):
            with open( self._data_info_file , 'w' ) as fw:
                fw.writelines( img_list )

    def trainDataStream( self , batch_size , if_shuffle = False , if_flip = False ):
        dataset = tf.data.TextLineDataset( self._data_info_file )
        dataset = dataset.skip( int( self._img_nums * self._test_ratio ) )
        #dataset = dataset.take( self._num_train_images )
        dataset = dataset.map( lambda \
                line : self._parser( line , if_flip = if_flip ) )
        if if_shuffle:
            dataset = dataset.shuffle( 5 * batch_size )
        dataset = dataset.repeat().prefetch( 20 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset.make_one_shot_iterator().get_next()

    def testDataStream( self , batch_size ):
        dataset = tf.data.TextLineDataset( self._data_info_file )
        dataset = dataset.take( int( self._img_nums * self._test_ratio ) )
        dataset = dataset.map( self._parser )
        dataset = dataset.repeat().prefetch( 10 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset.make_one_shot_iterator().get_next()
    
    def _parser( self , line , if_flip = False ):
        """
        data line in landmark file in a form:
            data_path ID_number x1 y1 x2 y2 x3 y3 x4 y4 x5 y5
        """
        FIELD_DEFAULT = [ ['IMG_PATH'] , [0] ]
        fields = tf.decode_csv( line , FIELD_DEFAULT , field_delim = ' ' )
        content = tf.read_file( fields[0] )

        tf_image = tf.image.decode_jpeg( content )
        if if_flip:
            tf_image = tf.image.random_flip_left_right( tf_image )
        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( tf_image , [ self._height , self._width ] )

        return tf_image , fields[1]

    def checkDecoding( self ):
        with open( self._data_info_file , 'r' ) as fo:
            lines = fo.readlines()

        for line in lines:
            img_name = line.strip().split(' ')[0]
            img = cv2.imread( img_name )
            print( img_name )
            cv2.imshow( "" , img )
            cv2.waitKey( 33 )

    def showImgs( self , sess , dataStream ):
        """
        trying to show the images very roughly
            while we need a running session to take all the operations
            and a dataStream with ( images , landmarks ) as output is needed
        
        till now we show the landmarks in a loop tradition
        """
        imgs , labels = sess.run( dataStream( batch_size = 50 ) )

        for i in range( len( imgs ) ):
            plt.imshow( imgs[i] )
            plt.show()

class FakedImgData( binary_data ):
    def __init__(self , data_path ):
        super( FakedImgData , self ).__init__( data_path )

    def _prepareEverything( self ):
        self._data1_path = os.path.join( self._data_path , 'neg' )

        if not op.isdir( self._data1_path ):
            raise ValueError( "data doesn't exist" )

        img_list1 = os.listdir( self._data1_path )

        img_list1 = list( map( lambda s: op.join( self._data1_path , s ) , img_list1 ) )

        img_list1 = list( map( lambda s: s + ' ' + '1' + '\n', img_list1 ) )

        img_list = img_list1
        random.shuffle( img_list )

        img_list[-1] = img_list[-1].strip()

        self._data_info_file = op.join( tempfile.tempdir , 'data_info.txt' )
        self._img_nums = len( img_list )

        with open( self._data_info_file , 'w' ) as fw:
            fw.writelines( img_list )

class Oulu_NPU( object ):
    """
    manipulate OULU datasets
    """

    HEIGHT = 224
    WIDTH  = 224

    def __init__( self , path , use_tfrecord = True ):
        """

        init with the containing directory path with structure:
        path/
            Dev_files/
                1_1_22_1.avi
                1_1_22_1.txt
                ...
                ....
            Protocols/
                Protocol_1
                Protocol_2
                Protocol_3
                Protocol_4
            Test_files/
                1_1_22_1.avi
                1_1_22_1.txt
                ...
                ....
            Train_files/
                1_1_22_1.avi
                1_1_22_1.txt
                ...
                ...
                ...
        """
        self._use_tfrecord = use_tfrecord
        self._base_path = os.path.abspath( path )
        self._split_dir = os.path.join( self._base_path , 'tmp' , 'imgs' )
        self._protocol_dir = os.path.join( self._base_path , 'tmp' , 'protocols' )
        if not os.path.exists( self._split_dir ):
            os.makedirs( self._split_dir )
        if not os.path.exists( self._protocol_dir ):
            os.makedirs( self._protocol_dir )
        
        # for safety, only split the .avis when the split_dir is empty
        # cause the time consuming


    
        #self._check_split_video()
        #self._process_protocols()

        self._tfrecord_dir = os.path.join( self._base_path , 'tmp' , 'tfrecord' )
        #if not os.path.exists( self._tfrecord_dir ):
        #    os.makedirs( self._tfrecord_dir )
        #if self._use_tfrecord:
        #    self._generate_tfrecord()






    def _generate_tfrecord( self ):
        txt_list = self._find_files( self._protocol_dir , r'.*txt' )
        transfered_list = list( map( lambda s : os.path.basename(s) , txt_list ) )
        transfered_list = list( map( lambda s: s[:-4] + '.tfrecord' , transfered_list ) )
        transfered_list = list( map( lambda s : os.path.join( \
                self._base_path , 'tmp' , 'tfrecord', s ) , transfered_list ) )

        for fi_txt, fo_tfr in zip( txt_list , transfered_list ):
            # generate p1 first
            if 'p1_' in fi_txt:
                generate_whole_tfrecord( fi_txt , fo_tfr )

    def trainDataStream( self , batch_size , \
            protocolID, data_type, cross_check_id = '' ):
       
        data_info_file = self._find_protocol( \
                protocolID , data_type , cross_check_id )
        dataset = tf.data.TextLineDataset( data_info_file )
        #dataset = dataset.filter( self._filter_cannot_crop )
        dataset = dataset.map( self._parser_whole_img )
        dataset = dataset.shuffle( 5 * batch_size )
        dataset = dataset.repeat().prefetch( 20 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset.make_one_shot_iterator().get_next()

    def testDataStream( self , batch_size , \
            protocolID, data_type, cross_check_id = '' ):

        data_info_file = self._find_protocol( \
                protocolID , data_type , cross_check_id )
        dataset = tf.data.TextLineDataset( data_info_file )
        #dataset = dataset.filter( self._filter_cannot_crop )
        dataset = dataset.map( self._parser_whole_img )
        #dataset = dataset.shuffle( 10 * batch_size )
        #dataset = dataset.shuffle( 3 * batch_size )
        dataset = dataset.repeat().prefetch( 20 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset.make_one_shot_iterator().get_next()

    def DataStream( self , batch_size , pID, dataT, ccID = "" ):
        tfr_file = self._find_tfrecord( pID , dataT , ccID )
        dataset = tf.data.TFRecordDataset( [tfr_file] )
        dataset = dataset.map( self._tfrecord_parser )
        #dataset = dataset.map( self._tfrecord_parser_noresize )
        dataset = dataset.shuffle( 3 * batch_size )
        dataset = dataset.repeat().prefetch( 5 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset.make_one_shot_iterator().get_next()

    def _tfrecord_parser( self , data_record ):
        features = {
                "img" : tf.FixedLenFeature( [] , tf.string ) , 
                "lx" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "ly" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "rx" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "ry" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "label" : tf.FixedLenFeature( [] , tf.int64 )
                }
        sample = tf.parse_single_example( data_record , features )

        tf_image = tf.image.decode_jpeg( sample['img'] )
        
        lx = tf.to_int32( sample['lx'] )
        ly = tf.to_int32( sample['ly'] )
        rx = tf.to_int32( sample['rx'] )
        ry = tf.to_int32( sample['ry'] )

        ave_y = tf.to_int32( ( ly + ry ) / 2 )

        img_height = tf.shape( tf_image )[0]
        img_width  = tf.shape( tf_image )[1]

        # specify judging area first
        eye_wide = rx - lx

        judgeL = lx - eye_wide
        judgeR = rx + eye_wide
        judgeU = ave_y - eye_wide
        judgeD = tf.to_int32( ave_y + 2 * eye_wide )

        # clip into image original dimensions
        judgeL = tf.maximum( 0, judgeL )
        judgeU = tf.maximum( 0, judgeU )
        judgeR = tf.minimum( img_width -1 , judgeR )
        judgeD = tf.minimum( img_height-1 , judgeD )

        tf_image = tf.image.crop_to_bounding_box( tf_image , \
                judgeU, judgeL, judgeD- judgeU , judgeR - judgeL )

        #eye_position = tuple( fields[ 1: -1 ] )
        #height, width, _ = tf_image.shape
        #height = tf.shape( tf_image )[0]
        #width  = tf.shape( tf_image )[1]

        #offset_x , offset_y, judgeArea = self._random_crop_wrt_eye_position(\
        #offset_x , offset_y = self._random_crop_wrt_eye_position(\
        #        eye_position , ( height , width ) )

        #tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
        #        offset_x , Oulu_NPU.HEIGHT, Oulu_NPU.WIDTH )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( \
                tf_image , [ Oulu_NPU.HEIGHT , Oulu_NPU.WIDTH ] )

        label = tf.to_int32( sample['label'] )
        return tf.cond( label > 0 , lambda:  (tf_image, 1) , \
                lambda: (tf_image, 0 )  )

    def showImgs( self , sess , dataStream ):
        """
        user should use lambda expression to reformulate the dataStream like:

            mm = Oulu_NPU( path )
            ds = lambda : mm.trainDataStream( 128, 'p3', 'Train' , 2 )

            mm.showImgs( sess , ds )
        """
        #imgs , labels, imgs_copy, judgeA  = sess.run( dataStream() )
        imgs , labels = sess.run( dataStream() )

        def value_change( x, y ):
            tmp = x
            x = y
            y = tmp
            return x,y

        for i in range( len( imgs ) ):
            #plt.imshow( imgs[i] )
            #plt.show()

            h, w, c = imgs[i].shape
            for ih in range(h):
                for iw in range(w):
                    imgs[i][ih][iw][0], imgs[i][ih][iw][2] = \
                            value_change( imgs[i][ih][iw][0], imgs[i][ih][iw][2] )

            img_name = str(i) + '.jpg'
            cv2.imwrite( img_name , imgs[i] )

    def _random_crop_wrt_eye_position( self , eye_position , img_dimension ):
        """

        eye_position: a tuple holding ( left_x, left_y, right_x, right_y )
        img_dimension: a tuple holding ( img_height , img_width )

        return the first two parameters in tf.image.crop_to_bounding_box

        offset_height and offset_width, 

        the target height and target width will be set to Oulu_NPU.HEIGHT and
        Oulu_NPU.WIDTH, respectively

        if the img dimension is less than the crop size, then 
        return ""
        this situation will seldom happen


        """
        lx = eye_position[0]
        ly = eye_position[1]
        rx = eye_position[2]
        ry = eye_position[3]

        ave_y = tf.to_int32( ( ly + ry ) / 2 )

        img_height = img_dimension[0]
        img_width  = img_dimension[1]

        # specify judging area first
        eye_wide = rx - lx

        judgeL = lx - eye_wide
        judgeR = rx + eye_wide
        judgeU = ave_y - eye_wide
        judgeD = tf.to_int32( ave_y + 2 * eye_wide )

        # clip into image original dimensions
        judgeL = tf.maximum( 0, judgeL )
        judgeU = tf.maximum( 0, judgeU )
        judgeR = tf.minimum( img_width -1 , judgeR )
        judgeD = tf.minimum( img_height-1 , judgeD )

        #TODO
        #if judgeR < judgeL + Oulu_NPU.WIDTH - 1 \
        #        or judgeD < judgeU + Oulu_NPU.HEIGHT -1 :
        #    return None

        # shrink the bounding box of crop box's offset point
        judgeR = judgeR - ( Oulu_NPU.WIDTH -1 )
        judgeD = judgeD - ( Oulu_NPU.HEIGHT-1 )

        # generate random point 
        offset_x = tf.random_uniform( shape = () , minval = judgeL, \
                maxval = judgeR +1 , dtype = tf.int32 )

        offset_y = tf.random_uniform( shape = () , minval = judgeU, \
                maxval = judgeD +1 , dtype = tf.int32 )
        #offset_x = np.random.randint( low = judgeL , high = judgeR +1 )
        #offset_y = np.random.randint( low = judgeU , high = judgeD +1 )

        #return offset_x, offset_y , ( judgeL, judgeR, judgeU, judgeD )
        return offset_x, offset_y

    """
    def _filter_cannot_crop( self , line ):
        #if the face area too small to crop a specified size
        #area, then ignore it : return False

        FIELD_DEFAULT = [ ['IMG_PATH'] ,[0], [0], [0], [0], [0] ]
        fields = tf.decode_csv( line , FIELD_DEFAULT , field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )

        eye_position = tuple( fields[ 1: -1 ] )
        #height, width, _ = tf_image.shape
        height = tf_image.shape[0]
        width  = tf_image.shape[1]

        offset = self._random_crop_wrt_eye_position(\
                eye_position , ( height , width ) )

        if offset == None:
            return False

        return True
    """

    def _parser( self , line ):
        """
        img_path left_x left_y right_x right_y label
        """
        FIELD_DEFAULT = [ ['IMG_PATH'] ,[0], [0], [0], [0], [0] ]
        fields = tf.decode_csv( line , FIELD_DEFAULT , field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )

        #tf_image_copy = tf_image

        eye_position = tuple( fields[ 1: -1 ] )
        #height, width, _ = tf_image.shape
        height = tf.shape( tf_image )[0]
        width  = tf.shape( tf_image )[1]

        #offset_x , offset_y, judgeArea = self._random_crop_wrt_eye_position(\
        offset_x , offset_y = self._random_crop_wrt_eye_position(\
                eye_position , ( height , width ) )

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , Oulu_NPU.HEIGHT, Oulu_NPU.WIDTH )

        #tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        #tf_image = tf.image.resize_images( \
        #        tf_image , [ Oulu_NPU.HEIGHT , Oulu_NPU.WIDTH ] )

        #return tf_image , fields[-1], tf_image_copy, judgeArea

        return tf.cond( fields[-1] > 0 , lambda:  (tf_image, 1) , \
                lambda: (tf_image, 0 )  )


    def _tfrecord_parser_noresize( self , data_record ):
        features = {
                "img" : tf.FixedLenFeature( [] , tf.string ) , 
                "lx" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "ly" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "rx" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "ry" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "label" : tf.FixedLenFeature( [] , tf.int64 )
                }
        sample = tf.parse_single_example( data_record , features )

        tf_image = tf.image.decode_jpeg( sample['img'] )
        
        lx = tf.to_int32( sample['lx'] )
        ly = tf.to_int32( sample['ly'] )
        rx = tf.to_int32( sample['rx'] )
        ry = tf.to_int32( sample['ry'] )

        height = tf.shape( tf_image )[0]
        width  = tf.shape( tf_image )[1]

        eye_position = ( lx, ly, rx , ry )

        offset_x , offset_y = self._random_crop_wrt_eye_position(\
                eye_position , ( height , width ) )

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , Oulu_NPU.HEIGHT, Oulu_NPU.WIDTH )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )

        label = tf.to_int32( sample['label'] )
        return tf.cond( label > 0 , lambda:  (tf_image, 1) , \
                lambda: (tf_image, 0 )  )


    def _parser_whole_img( self , line ):
        """
        img_path left_x left_y right_x right_y label
        """
        FIELD_DEFAULT = [ ['IMG_PATH'] ,[0], [0], [0], [0], [0] ]
        fields = tf.decode_csv( line , FIELD_DEFAULT , field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )
        
        lx = fields[1]
        ly = fields[2]
        rx = fields[3]
        ry = fields[4]

        ave_y = tf.to_int32( ( ly + ry ) / 2 )

        img_height = tf.shape( tf_image )[0]
        img_width  = tf.shape( tf_image )[1]

        # specify judging area first
        eye_wide = rx - lx

        judgeL = lx - eye_wide
        judgeR = rx + eye_wide
        judgeU = ave_y - eye_wide
        judgeD = tf.to_int32( ave_y + 2 * eye_wide )

        # clip into image original dimensions
        judgeL = tf.maximum( 0, judgeL )
        judgeU = tf.maximum( 0, judgeU )
        judgeR = tf.minimum( img_width -1 , judgeR )
        judgeD = tf.minimum( img_height-1 , judgeD )

        tf_image = tf.image.crop_to_bounding_box( tf_image , \
                judgeU, judgeL, judgeD- judgeU , judgeR - judgeL )

        #eye_position = tuple( fields[ 1: -1 ] )
        #height, width, _ = tf_image.shape
        #height = tf.shape( tf_image )[0]
        #width  = tf.shape( tf_image )[1]

        #offset_x , offset_y, judgeArea = self._random_crop_wrt_eye_position(\
        #offset_x , offset_y = self._random_crop_wrt_eye_position(\
        #        eye_position , ( height , width ) )

        #tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
        #        offset_x , Oulu_NPU.HEIGHT, Oulu_NPU.WIDTH )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( \
                tf_image , [ Oulu_NPU.HEIGHT , Oulu_NPU.WIDTH ] )

        #return tf_image , fields[-1], tf_image_copy, judgeArea

        return tf.cond( fields[-1] > 0 , lambda:  (tf_image, 1) , \
                lambda: (tf_image, 0 )  )

    def _find_protocol( self , p , data_type , cross_check_id ):
        if p not in [ 'p1' , 'p2' , 'p3' , 'p4']:
            logging.error( "p should be : p1, p2, p3 or p4 " )

        if data_type not in [ 'Train' , 'Test' , 'Dev' ]:
            logging.error( "data_type should be : Train, Test or Dev " )
        
        if p in ['p3', 'p4'] and cross_check_id not in [1,2,3,4,5,6]:
            logging.error( "cross_check_id should be: 1,2,3,4,5 or 6 " )

        if p in ['p1', 'p2']:
            return os.path.join( self._protocol_dir , p + '_' + data_type+ '.txt')
        return os.path.join( self._protocol_dir , \
                p + '_' + data_type+ '_' + str(cross_check_id) + '.txt')

    def _find_tfrecord( self , p , data_type , cross_check_id ):
        if p not in [ 'p1' , 'p2' , 'p3' , 'p4']:
            logging.error( "p should be : p1, p2, p3 or p4 " )

        if data_type not in [ 'Train' , 'Test' , 'Dev' ]:
            logging.error( "data_type should be : Train, Test or Dev " )
        
        if p in ['p3', 'p4'] and cross_check_id not in [1,2,3,4,5,6]:
            logging.error( "cross_check_id should be: 1,2,3,4,5 or 6 " )

        if p in ['p1', 'p2']:
            return os.path.join( self._tfrecord_dir , p + '_' + data_type+ '.tfrecord')
        return os.path.join( self._tfrecord_dir , \
                p + '_' + data_type+ '_' + str(cross_check_id) + '.tfrecord')

    def _process_protocols( self ):
        txt_list = self._find_files( os.path.join( \
                self._base_path, 'Protocols' ) , r'.*txt' )

        transfered_txt_list = list( map( lambda p : \
                'p' + p.strip().split( '/' )[-2][-1]+ '_' + \
                p.strip().split( '/' )[-1], txt_list ) )

        transfered_txt_list = list( map( lambda p : \
                os.path.join( self._protocol_dir , p ) , transfered_txt_list ) )

        eye_anno_txt_list = self._find_files( os.path.join( \
                self._base_path , 'Train_files' ) , r'.*txt' ) + \
                self._find_files( os.path.join( self._base_path , \
                'Dev_files' ) , r'.*txt' ) + \
                self._find_files( os.path.join( self._base_path , \
                'Test_files' ) , r'.*txt' )

        for fi_txt , fo_txt in zip( txt_list , transfered_txt_list ):
            self._process_txt_file( fi_txt , fo_txt , eye_anno_txt_list )

    def _read_anno_to_dict( self , f ):
        with open( f , 'r' ) as fi:
            lines = fi.readlines()

        d = {}
        for line in lines:
            line_split = line.strip().split( ',' )
            line_split = list( map( lambda s: int(s) , line_split ) )

            d[ line_split[0] ] = line_split[1:]
        return d

    def _process_txt_file( self , fi_txt , fo_txt , eye_anno_txt_list ):
        if os.path.exists( fo_txt ):
            return
        fi_dict = {}
        with open( fi_txt , 'r' ) as fi:
            line = fi.readline()
            line = line.strip()

            while( line != '' ):
                line_list = line.split( ',' )
                fi_dict[line_list[1]] = int( line_list[0] )
                line = fi.readline()
                line = line.strip()

        # read all the eye annos from txts into dict( dict() )
        anno_dict = {}
        
        for file_name in fi_dict.keys():
            for anno_txt in eye_anno_txt_list:
                if file_name in anno_txt:
                    anno_dict[file_name] = self._read_anno_to_dict( anno_txt )
                    break

        img_list = os.listdir( self._split_dir )
       
        with open( fo_txt , 'w' ) as fw:
            for img in img_list:
                if img[:8] in fi_dict.keys():
                    # find frame id
                    frame_id = int( img.split('_')[-1].split('.')[0] )
                    anno_info = anno_dict[img[:8]][frame_id]
                    # filter out all the 0,0,0,0 annotation files
                    if anno_info[0] == 0 and anno_info[1] == 0 and anno_info[2] \
                            == 0 and anno_info[3] == 0:
                                continue
                    
                    # write the file, with format
                    # img_name_abspath , left_x, left_y, right_x, right_y , label
                    fw.write( os.path.join( self._split_dir , img )  + ' ' )
                    fw.write( str(anno_info[0]) + ' ' )
                    fw.write( str(anno_info[1]) + ' ' )
                    fw.write( str(anno_info[2]) + ' ' )
                    fw.write( str(anno_info[3]) + ' ' )
                    fw.write( str(fi_dict[img[:8]]) )
                    fw.write( '\n' )
                    
    def _check_split_video( self ):
        split_dir_list = os.listdir( self._split_dir )
        if len( split_dir_list ) == 0:
            self._break_all_avis()
    
    def _break_one_video_into_dict( self , video ):
        video = os.path.abspath( video )

        cap = cv2.VideoCapture( video )
        d = {}

        while( True ):
            next_frame_id = cap.get ( cv2.CAP_PROP_POS_FRAMES )
            ret , frame = cap.read()
            if ret == True:
                frame_name = os.path.basename( video )[:-4] + '_' + str( int( next_frame_id ) ) + '.jpg'
                d[frame_name] = frame
            else:
                break

        cap.release()
        return d

    def _find_files( self , dir_path , pattern ):
        file_list = os.listdir( dir_path )

        mov_list = []
        for f in file_list:
            file_abspath = os.path.join( dir_path , f )
            if os.path.isfile( file_abspath ):
                if re.match( pattern , f ):
                    mov_list.append( file_abspath )
            elif os.path.isdir( file_abspath ):
                mov_list.extend( self._find_files( file_abspath , pattern ) )
            else:
                pass
 
        return mov_list

    def _break_all_avis( self ):
        avi_list = self._find_files( self._base_path , r'.*avi' )
        for idx, avi in enumerate( avi_list ):
            logging.info( "breaking videos: %d / %d " %( idx , len(avi_list) ) )
            d = self._break_one_video_into_dict( avi )
            for p, img in d.items():
                p = os.path.join( self._split_dir , p )
                cv2.imwrite( p , img )

if __name__ == "__main__":
    logging.basicConfig( level = logging.INFO )
    #sess = tf.InteractiveSession()
   
    #data_path = "/Users/pitaloveu/working_data/MTFL"
    #ms_data = binary_data( '/home/jh/working_data/idData' )
    #ms_data.showImgs( sess , ms_data.testDataStream )
    

    sess = tf.InteractiveSession()
    oulu = Oulu_NPU( "/home/jh/working_data/anti-spoofing/Oulu_NPU" )

    #ds = lambda : oulu.trainDataStream( 20 , 'p1' , 'Test' )
    ds = lambda : oulu.DataStream( 20 , 'p1', 'Test' )
    oulu.showImgs( sess , ds )
