import tensorflow as tf
import os.path as op
import os
from .parser import Parser, RPcropParser
import matplotlib.pyplot as plt
import random
import cv2
import re
import logging
import sys

class BaseData( object ):
    def __init__( self , db_path , **kwargs ):
        self._db_path = db_path
        self._train_file = ""
        self._test_file = ""
        self._train_test_file = ""

        self._train_parser = ""
        self._test_parser = ""
        self._train_test_parser = ""

        self._prepareEveryting( **kwargs )

    def _prepareEveryting( self , **kwargs ):
        raise NotImplementedError

    def trainDataStream( self , batch_size ):
        dataset = tf.data.TextLineDataset( self._train_file )
        dataset = dataset.map( self._train_parser )
        dataset = dataset.repeat().batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

    def testDataStream( self , batch_size ):
        dataset = tf.data.TextLineDataset( self._test_file )
        dataset = dataset.map( self._test_parser )
        dataset = dataset.repeat().batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

    def allDataStream( self , batch_size ):
        """
        for testing other datasets
        """
        dataset = tf.data.TextLineDataset( self._train_test_file )
        dataset = dataset.map( self._train_test_parser )
        dataset = dataset.repeat().batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

    def showStream( self , sess , dataStream ):
        imgs , labels = sess.run( dataStream() )
        for i in range( len( imgs ) ):
            plt.imshow( imgs[i] )
            plt.show()

class HomeMadeData( BaseData ):
    def __init__( self , db_path ):
        super( HomeMadeData , self ).__init__( db_path )

    def _prepareEveryting( self ):
        """
        this method used to generate data_info file for
        traditional data structure by our own:

        db_path/
            pos/
                0.jpg
                5.jpg
                ...
            neg/
                1.jpg
                2.jpg
                ...
            tmp/
                train_info.txt
                test_info.txt
                train_test_info.txt

        only works for JPEG files, generateing data_info.txt file
        if it exists, won't regenerate
        """
        self._test_ratio = 0.2
        self._parser = Parser( 224 , 224 )

        self._train_parser = self._parser.std_full_img_parser
        self._test_parser = self._parser.std_full_img_parser
        self._train_test_parser = self._parser.std_full_img_parser

        db_path = os.path.abspath( self._db_path )
        pos_data_path = op.join( db_path , 'pos' )
        neg_data_path = op.join( db_path , 'neg' )
        tmp_data_path = op.join( db_path , 'tmp' )

        if not op.isdir( pos_data_path ) or not op.isdir( neg_data_path ):
            raise ValueError( "data doesn't exist" )

        if not op.exists( tmp_data_path ):
            os.mkdir( tmp_data_path )
            logging.info( "create a tmp directory" )

        img_list0 = os.listdir( pos_data_path )
        img_list1 = os.listdir( neg_data_path )

        # filter out all pics satisfy:  prefix == '.' or suffix != '.jpg'
        img_list0 = [ x for x in img_list0 if (x[-4:] == '.jpg') & (x[0] != '.') ]
        img_list1 = [ x for x in img_list1 if (x[-4:] == '.jpg') & (x[0] != '.') ]

        img_list0 = list(map( lambda s:op.join( pos_data_path , s ) , img_list0 ))
        img_list1 = list(map( lambda s:op.join( neg_data_path , s ) , img_list1 ))

        img_list0 = list(map( lambda s: s + ' ' + '1' + '\n', img_list0 ))
        img_list1 = list(map( lambda s: s + ' ' + '0' + '\n', img_list1 ))

        img_list = img_list0 + img_list1
        random.shuffle( img_list )
        img_list[-1] = img_list[-1].strip()

        train_test_num = len( img_list )
        test_num = int( train_test_num * self._test_ratio )
        train_num = train_test_num - test_num

        self._train_file = op.join( tmp_data_path , "train_file.txt" )
        self._test_file = op.join( tmp_data_path , "test_file.txt" )
        self._train_test_file = op.join( tmp_data_path , "train_test_file.txt" )

        if not os.path.exists( self._train_file ):
            with open( self._train_file , 'w' ) as fw:
                fw.writelines( img_list[:train_num] )

        if not os.path.exists( self._test_file ):
            with open( self._test_file , 'w' ) as fw:
                fw.writelines( img_list[train_num:] )

        if not os.path.exists( self._train_test_file ):
            with open( self._train_test_file , 'w' ) as fw:
                fw.writelines( img_list )

class OneLabelData( BaseData ):
    def __init__( self , db_path , **kwargs ):
        super( OneLabelData , self ).__init__( db_path , **kwargs )

    def _prepareEveryting( self , **kwargs ):
        """
        this method used to generate data_info file for one label data
        the data_info file will be parallel with pictures

        only works for JPEG files, generateing data_info.txt file
        if it exists, won't regenerate

        should add key: label and ssd_info
        """
        self._parser = Parser( 224 , 224 )
        self._label = str( kwargs['label'] )
        self._ssd_txt = "ssd_info.txt"
        #self._ssd_txt = kwargs['ssd_info']
        #self._test_parser = self._parser.center_area_ssd_parser
        #self._test_parser = self._parser.Homemade_data_random_crop_ssd_parser
        self._test_parser = self._parser.Homemade_data_face_crop_ssd_parser3

        db_path = os.path.abspath( self._db_path )
        
        def find_max_rect( rect_list ):
            num = len( rect_list ) // 4
            max_area = 0
            for inum in range( num ):
                rect_now = rect_list[ 4 * inum : 4 * (inum+1) ]
                area = ( rect_now[2] - rect_now[0] + 1 ) * ( rect_now[3] - rect_now[1] + 1 )
                if area > max_area:
                    max_area = area
                    max_rect = rect_now
            return max_rect

        with open( os.path.join( db_path , self._ssd_txt ) , 'r' ) as fr:
            ssd_anno = fr.readlines()

        img_list = []
        for issd_anno in ssd_anno:
            info = issd_anno.strip().split( " " )

            if len( info ) == 1 or info[1][-4:] == '.jpg':
                continue

            img_name = os.path.join( db_path , info[0] )
            img_anno = list(map( lambda s: int(s) , info[1:] ))
            img_rect = find_max_rect( img_anno )

            if not os.path.exists( img_name ):
                continue
   
            img_label = self._label

            img_list.append( img_name + " " + str( img_rect[0] ) + " " + str( img_rect[1] ) \
                    + " "+str( img_rect[2] ) +" "+ str( img_rect[3] ) + " " + self._label + '\n' )
            

        img_list[-1] = img_list[-1].strip()
        self._test_file = op.join( db_path , "test_file.txt" )

        if not os.path.exists( self._test_file ):
            with open( self._test_file , 'w' ) as fw:
                fw.writelines( img_list )

    def trainDataStream( self , batch_size ):
        raise NotImplementedError("shouldn't use this data as a training stream")
    def allDataStream( self , batch_size ):
        raise NotImplementedError("shouldn't use this data as a training stream")


class REPLAY_ATTACK( BaseData ):
    def __init__( self , db_path ):
        super( REPLAY_ATTACK , self ).__init__( db_path )

    def _prepareEveryting( self , **kwargs ):
        self._parser = RPcropParser( 224, 224 ).parse_func

        #self._train_parser = self._parser.Oulu_parser
        #self._test_parser = self._parser.Oulu_parser
        #self._train_test_parser = self._parser.Oulu_parser
        self._train_parser = self._parser
        self._test_parser  = self._parser
        self._train_test_parser = self._parser

        self._train_split_dir = os.path.join( self._db_path , 'tmp' , 'imgs' , 'train')
        self._test_split_dir = os.path.join( self._db_path , 'tmp' , 'imgs' , 'test')
        self._devel_split_dir = os.path.join( self._db_path , 'tmp' , 'imgs' , 'devel')
        self._protocol_dir = os.path.join( self._db_path , 'tmp' , 'protocols' )

        self._train_file = os.path.join( self._train_split_dir , 'train_file.txt' )
        self._test_file = os.path.join( self._test_split_dir , 'test_file.txt' )
        self._train_test_file = os.path.join( self._devel_split_dir , 'devel_file.txt' )

        if not os.path.exists( self._train_split_dir ):
            os.makedirs( self._train_split_dir )
        if not os.path.exists( self._test_split_dir ):
            os.makedirs( self._test_split_dir )
        if not os.path.exists( self._devel_split_dir ):
            os.makedirs( self._devel_split_dir )
        if not os.path.exists( self._protocol_dir ):
            os.makedirs( self._protocol_dir )

        self._check_split_video()
        self._process_txt( 'train' )
        self._process_txt( 'test' )
        self._process_txt( 'devel' )

    def trainDisplayDataStream( self , batch_size ):
        trainFileBefore = self._train_file
        trainFileAfter = os.path.join( self._train_split_dir , 'train_Display.txt' )
        self._filterByAttackType( trainFileBefore , trainFileAfter , Includes =  \
                [] , Excludes = ['print', ] )

        dataset = tf.data.TextLineDataset( trainFileAfter )
        dataset = dataset.map( self._train_parser )
        dataset = dataset.repeat().batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

    def devDisplayDataStream( self , batch_size ):
        trainFileBefore = self._test_file
        trainFileAfter = os.path.join( self._test_split_dir , 'test_Display.txt' )
        self._filterByAttackType( trainFileBefore , trainFileAfter , Includes =  \
                [] , Excludes = ['print', ] )

        dataset = tf.data.TextLineDataset( trainFileAfter )
        dataset = dataset.map( self._test_parser )
        dataset = dataset.repeat().batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

    def _filterByAttackType( self , in_txt , out_txt , Includes , Excludes ):
        with open( in_txt , 'r' ) as fi:
            in_lines = fi.readlines()

        out_lines = []
        for line in in_lines:
            l = line.split( ' ' )[0]
            l = l.split( '/' )[-1]

            if l[0:7] == 'client':
                out_lines.append( line )
                continue

            if_include = True
            for INC in Includes:
                if not INC in l:
                    if_include = False
                    break
            if not if_include:
                continue

            if_exclude = True
            for EXC in Excludes:
                if EXC in l:
                    if_exclude = False
                    break
            if not if_exclude:
                continue

            out_lines.append( line )
        out_lines[-1].strip()

        with open( out_txt , 'w' ) as fw:
            fw.writelines( out_lines )

    def _process_txt( self , dataType ):
        # collecting all the extracted imgs
        if dataType == 'train':
            img_path = self._train_split_dir
            write_path = self._train_file
        elif dataType == 'test':
            img_path = self._test_split_dir
            write_path = self._test_file
        else:
            img_path = self._devel_split_dir
            write_path = self._train_test_file
        
        if os.path.exists( write_path ):
            return

        face_path = os.path.join( self._db_path , 'face-locations' , dataType )
        if not os.path.exists( face_path ):
            return

        # read all tha face information into a dict
        # key: img_name
        # value: [ lx, ly, rx, ry ]
        d = {}
        face_txt_list = self._find_files( face_path , r'.*face' )
        for face_txt in face_txt_list:
            d.update( self._read_anno_to_dict( face_txt ) )

        if not os.path.exists( img_path ):
            return

        img_list = os.listdir( img_path )
        img_list = list( map( lambda s: os.path.basename(s) , img_list ) )
        
        with open( write_path , 'w' ) as fw:
            for img in img_list:
                if img not in d.keys():
                    continue
                anno_info = d[img]
                if 'attack' in img:
                    anno_class = 1
                else:
                    anno_class = 0
                fw.write( os.path.join( img_path , img )  + ' ' )
                fw.write( str(anno_info[0]) + ' ' )
                fw.write( str(anno_info[1]) + ' ' )
                fw.write( str(anno_info[2]) + ' ' )
                fw.write( str(anno_info[3]) + ' ' )
                fw.write( str( anno_class ) )
                fw.write( '\n' )

    def _read_anno_to_dict( self , f ):
        with open( f , 'r' ) as fi:
            lines = fi.readlines()

        f = os.path.basename( f )
        if f[-5:] != '.face':
            return {}

        f = f[:-5]

        d = {}
        for line in lines:
            line_split = line.strip().split( ' ' )
            #line_split = list( map( lambda s: int(s) , line_split ) )

            d_key = f + '_' + line_split[0] + '.jpg'
            d_value = list( map( lambda s: int(s) , line_split[1:] ) )

            if d_value[0] + d_value[1] + d_value[2] + d_value[3] == 0:
                continue

            # left_x, left_Y , right_X, right_Y
            d_value[2] = d_value[0] + d_value[2]
            d_value[3] = d_value[1] + d_value[3]

            d[d_key] = d_value

        return d

    def _find_files( self , dir_path , pattern ):
        file_list = os.listdir( dir_path )

        mov_list = []
        for f in file_list:
            file_abspath = os.path.join( dir_path , f )
            if os.path.isfile( file_abspath ):
                if re.match( pattern , f ):
                    mov_list.append( file_abspath )
            elif os.path.isdir( file_abspath ):
                mov_list.extend( self._find_files( file_abspath , pattern ) )
            else:
                pass

        return mov_list

    def _check_split_video( self ):
        split_dir_list = os.listdir( self._train_split_dir )
        if len( split_dir_list ) == 0:
            self._break_all_avis( os.path.join( self._db_path , 'train') , \
                    self._train_split_dir )
            self._break_all_avis( os.path.join( self._db_path , 'test') , \
                    self._test_split_dir )
            self._break_all_avis( os.path.join( self._db_path , 'devel') , \
                    self._devel_split_dir )

    def _break_all_avis( self , data_path , split_path ):
        avi_list = self._find_files( data_path , r'.*mov' )
        for idx, avi in enumerate( avi_list ):
            logging.info( "breaking videos: %d / %d " %( idx , len(avi_list) ) )
            d = self._break_one_video_into_dict( avi )
            for p, img in d.items():
                p = os.path.join( split_path , p )
                cv2.imwrite( p , img )

    def _break_one_video_into_dict( self , video ):
        video = os.path.abspath( video )
        cap = cv2.VideoCapture( video )
        d = {}

        while( True ):
            next_frame_id = cap.get ( cv2.CAP_PROP_POS_FRAMES )
            ret , frame = cap.read()
            
            if ret == False:
                break
            if next_frame_id % 50 != 0:
                continue

            if ret == True:
                frame_name = os.path.basename( video )[:-4] + \
                        '_' + str( int( next_frame_id ) ) + '.jpg'
                d[frame_name] = frame
            else:
                break

        cap.release()
        return d

class Oulu_NPU( BaseData ):
    def __init__( self , db_path ):
        """
        init with the containing directory path with structure:
        path/
            Dev_files/
                1_1_22_1.avi
                1_1_22_1.txt
                ...
                ....
            Protocols/
                Protocol_1
                Protocol_2
                Protocol_3
                Protocol_4
            Test_files/
                1_1_22_1.avi
                1_1_22_1.txt
                ...
                ....
            Train_files/
                1_1_22_1.avi
                1_1_22_1.txt
                ...
                ...
        """
        super( Oulu_NPU , self ).__init__( db_path )

    def _prepareEveryting( self ):
        self._parser = Parser( 224 , 224 )

        self._train_parser = self._parser.Oulu_parser
        self._test_parser = self._parser.Oulu_parser
        self._train_test_parser = self._parser.Oulu_parser

        self._split_dir = os.path.join( self._db_path , 'tmp' , 'imgs' )
        self._protocol_dir = os.path.join( self._db_path , 'tmp' , 'protocols' )

        self.set_train_file( 'p1' , "" )
        self.set_test_file ( 'p1' , "" )
        self.set_train_test_file( 'p1' , "" )

        if not os.path.exists( self._split_dir ):
            os.makedirs( self._split_dir )
        if not os.path.exists( self._protocol_dir ):
            os.makedirs( self._protocol_dir )

        self._check_split_video()
        self._process_protocols()

    def trainDisplayDataStream( self , batch_size ):
        trainFileBefore = self._find_protocol_file( 'p1' , 'Train', "" )
        trainFileAfter = os.path.join( self._protocol_dir , 'p1_Train_Display.txt' )
        self._filterByAttackType( trainFileBefore , trainFileAfter , [4,5,1] )

        dataset = tf.data.TextLineDataset( trainFileAfter )
        dataset = dataset.map( self._train_parser )
        dataset = dataset.repeat().batch( batch_size ).prefetch( 20 )
        #dataset = dataset.batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

    def devDisplayDataStream( self , batch_size ):
        trainFileBefore = self._find_protocol_file( 'p1' , 'Dev', "" )
        trainFileAfter = os.path.join( self._protocol_dir , 'p1_Dev_Display.txt' )
        self._filterByAttackType( trainFileBefore , trainFileAfter , [4,5,1] )

        dataset = tf.data.TextLineDataset( trainFileAfter )
        dataset = dataset.map( self._train_parser )
        dataset = dataset.repeat().batch( batch_size ).prefetch( 20 )
        #dataset = dataset.batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

    def _filterByAttackType( self , in_txt , out_txt , AttackType ):
        with open( in_txt , 'r' ) as fi:
            in_lines = fi.readlines()

        out_lines = []
        for line in in_lines:
            l = line.split( ' ' )[0]
            l = l.split( '/' )[-1]
            l = l.split( '_' )[-2]

            if int(l) in AttackType:
                out_lines.append( line )
        out_lines[-1].strip()

        with open( out_txt , 'w' ) as fw:
            fw.writelines( out_lines )

    def _find_protocol_file( self , p, data_type, cross_check_id ):
        if p not in [ 'p1' , 'p2' , 'p3' , 'p4']:
            logging.error( "p should be : p1, p2, p3 or p4 " )

        if data_type not in [ 'Train' , 'Test' , 'Dev' ]:
            logging.error( "data_type should be : Train, Test or Dev " )

        if p in ['p3', 'p4'] and cross_check_id not in [1,2,3,4,5,6]:
            logging.error( "cross_check_id should be: 1,2,3,4,5 or 6 " )

        if p in ['p1', 'p2']:
            return os.path.join( self._protocol_dir , p + '_' + data_type+ '.txt')
        return os.path.join( self._protocol_dir , \
                p + '_' + data_type+ '_' + str(cross_check_id) + '.txt')

    def set_train_file( self , p , cross_check_id ):
        self._train_file = self._find_protocol_file( p, 'Train', cross_check_id )

    def set_test_file( self , p , cross_check_id ):
        self._test_file = self._find_protocol_file( p, 'Dev', cross_check_id )

    def set_train_test_file( self , p , cross_check_id ):
        self._train_test_file = self._find_protocol_file( p, 'Test', cross_check_id )

    def _process_protocols( self ):
        """
        transfer all the files in protoco dir to the ones holding eye location ones
        """
        txt_list = self._find_files( os.path.join( \
                self._db_path, 'Protocols' ) , r'.*txt' )

        transfered_txt_list = list( map( lambda p : \
                'p' + p.strip().split( '/' )[-2][-1]+ '_' + \
                p.strip().split( '/' )[-1], txt_list ) )

        transfered_txt_list = list( map( lambda p : \
                os.path.join( self._protocol_dir , p ) , transfered_txt_list ) )

        eye_anno_txt_list = self._find_files( os.path.join( \
                self._db_path , 'Train_files' ) , r'.*txt' ) + \
                self._find_files( os.path.join( self._db_path , \
                'Dev_files' ) , r'.*txt' ) + \
                self._find_files( os.path.join( self._db_path , \
                'Test_files' ) , r'.*txt' )

        for fi_txt , fo_txt in zip( txt_list , transfered_txt_list ):
            self._process_txt_file( fi_txt , fo_txt , eye_anno_txt_list )

    def _process_txt_file( self , fi_txt , fo_txt , eye_anno_txt_list ):
        if os.path.exists( fo_txt ):
            return
        fi_dict = {}
        with open( fi_txt , 'r' ) as fi:
            line = fi.readline()
            line = line.strip()

            while( line != '' ):
                line_list = line.split( ',' )
                fi_dict[line_list[1]] = int( line_list[0] )
                line = fi.readline()
                line = line.strip()

        # read all the eye annos from txts into dict( dict() )
        anno_dict = {}

        for file_name in fi_dict.keys():
            for anno_txt in eye_anno_txt_list:
                if file_name in anno_txt:
                    anno_dict[file_name] = self._read_anno_to_dict( anno_txt )
                    break

        img_list = os.listdir( self._split_dir )

        with open( fo_txt , 'w' ) as fw:
            for img in img_list:
                if img[:8] in fi_dict.keys():
                    # find frame id
                    frame_id = int( img.split('_')[-1].split('.')[0] )
                    anno_info = anno_dict[img[:8]][frame_id]
                    # filter out all the 0,0,0,0 annotation files
                    if anno_info[0] == 0 and anno_info[1] == 0 and anno_info[2] \
                            == 0 and anno_info[3] == 0:
                                continue

                    # write the file, with format
                    # img_name_abspath , left_x, left_y, right_x, right_y , label
                    fw.write( os.path.join( self._split_dir , img )  + ' ' )
                    fw.write( str(anno_info[0]) + ' ' )
                    fw.write( str(anno_info[1]) + ' ' )
                    fw.write( str(anno_info[2]) + ' ' )
                    fw.write( str(anno_info[3]) + ' ' )
                    fw.write( str(fi_dict[img[:8]]) )
                    fw.write( '\n' )

    def _read_anno_to_dict( self , f ):
        with open( f , 'r' ) as fi:
            lines = fi.readlines()

        d = {}
        for line in lines:
            line_split = line.strip().split( ',' )
            line_split = list( map( lambda s: int(s) , line_split ) )

            d[ line_split[0] ] = line_split[1:]
        return d

    def _find_files( self , dir_path , pattern ):
        file_list = os.listdir( dir_path )

        mov_list = []
        for f in file_list:
            file_abspath = os.path.join( dir_path , f )
            if os.path.isfile( file_abspath ):
                if re.match( pattern , f ):
                    mov_list.append( file_abspath )
            elif os.path.isdir( file_abspath ):
                mov_list.extend( self._find_files( file_abspath , pattern ) )
            else:
                pass

        return mov_list

    def _check_split_video( self ):
        split_dir_list = os.listdir( self._split_dir )
        if len( split_dir_list ) == 0:
            self._break_all_avis()

    def _break_all_avis( self ):
        avi_list = self._find_files( self._db_path , r'.*avi' )
        for idx, avi in enumerate( avi_list ):
            logging.info( "breaking videos: %d / %d " %( idx , len(avi_list) ) )
            d = self._break_one_video_into_dict( avi )
            for p, img in d.items():
                p = os.path.join( self._split_dir , p )
                cv2.imwrite( p , img )

    def _break_one_video_into_dict( self , video ):
        video = os.path.abspath( video )
        cap = cv2.VideoCapture( video )
        d = {}

        while( True ):
            next_frame_id = cap.get ( cv2.CAP_PROP_POS_FRAMES )
            ret , frame = cap.read()
            
            if ret == False:
                break
            if next_frame_id % 50 != 0:
                continue

            if ret == True:
                frame_name = os.path.basename( video )[:-4] + \
                        '_' + str( int( next_frame_id ) ) + '.jpg'
                d[frame_name] = frame
            else:
                break

        cap.release()
        return d

class Oulu_NPU_write( Oulu_NPU ):
    def __init__( self , db_path ):
        super( Oulu_NPU , self ).__init__( db_path )

        self._train_parser = self._parser.Oulu_parser
        self._test_parser = self._parser.Oulu_parser
        self._train_test_parser = self._parser.Oulu_parser

    def writeAllImages( self , sess , dataStream ):
        # write all the dataStream out
        # so it should be a dataset without repeat() operation
        import tempfile

        try:
            while True:
                imgs , labels = sess.run( dataStream() )
                for idx in range(len(imgs)):
                    I = cv2.normalize( imgs[idx] , None , 0 , 255 \
                            , cv2.NORM_MINMAX , cv2.CV_8U )
                    I = cv2.cvtColor( I , cv2.COLOR_BGR2RGB )
                    if labels[idx] == 0:
                        img_name = tempfile.mktemp( dir = "./pos" , suffix = ".jpg" )
                    else:
                        img_name = tempfile.mktemp( dir = "./neg" , suffix = ".jpg" )
                    cv2.imwrite( img_name , I )
                pass
        except tf.errors.OutOfRangeError:
            pass

    def trainDataStream( self , batch_size ):
        dataset = tf.data.TextLineDataset( self._train_file )
        dataset = dataset.map( self._train_parser )
        dataset = dataset.batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

    def testDataStream( self , batch_size ):
        dataset = tf.data.TextLineDataset( self._test_file )
        dataset = dataset.map( self._test_parser )
        dataset = dataset.batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

    def allDataStream( self , batch_size ):
        """
        for testing other datasets
        """
        dataset = tf.data.TextLineDataset( self._train_test_file )
        dataset = dataset.map( self._train_test_parser )
        dataset = dataset.batch( batch_size ).prefetch( 20 )

        return dataset.make_one_shot_iterator().get_next()

class Oulu_NPU_TFR( Oulu_NPU ):
    """
    handle tf record format dataset, this class is used to hold
    the methods , be familiar with tf.Example related things
    """
    def __init__( self , db_path ):
        super( Oulu_NPU_TFR , self ).__init__( db_path )

    def _prepareEveryting( self ):
        self._split_dir = os.path.join( self._db_path , 'tmp' , 'imgs' )
        self._protocol_dir = os.path.join( self._db_path , 'tmp' , 'protocols' )
        self._tfrecord_dir = os.path.join( self._db_path , 'tmp' , 'tfrecord' )

        self.set_train_file( 'p1' , "" )
        self.set_test_file ( 'p1' , "" )
        self.set_train_test_file( 'p1' , "" )

        if not os.path.exists( self._split_dir ):
            os.makedirs( self._split_dir )
        if not os.path.exists( self._protocol_dir ):
            os.makedirs( self._protocol_dir )
        if not os.path.exists( self._tfrecord_dir ):
            os.makedirs( self._tfrecord_dir )

        self._parser = Parser( 224 , 224 )
        self._train_parser = self._parser.tfrecord_parser_resize
        self._test_parser = self._parser.tfrecord_parser_resize
        self._train_test_parser = self._parser.tfrecord_parser_resize

        self._check_split_video()
        self._process_protocols()
        self._generate_tfrecord()

    def trainDataStream( self , batch_size ):
        dataset = tf.data.TFRecordDataset( self._train_file )
        dataset = dataset.map( self._train_parser )
        dataset = dataset.repeat().prefetch( 20 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset.make_one_shot_iterator().get_next()

    def testDataStream( self , batch_size ):
        dataset = tf.data.TFRecordDataset( self._test_file )
        dataset = dataset.map( self._test_parser )
        dataset = dataset.repeat().prefetch( 20 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset.make_one_shot_iterator().get_next()

    def allDataStream( self , batch_size ):
        """
        for testing other datasets
        """
        dataset = tf.data.TFRecordDataset( self._train_test_file )
        dataset = dataset.map( self._train_test_parser )
        dataset = dataset.repeat().prefetch( 20 * batch_size )
        dataset = dataset.batch( batch_size )

        return dataset.make_one_shot_iterator().get_next()

    def _find_tfrecord_file( self , p, data_type, cross_check_id ):
        if p not in [ 'p1' , 'p2' , 'p3' , 'p4']:
            logging.error( "p should be : p1, p2, p3 or p4 " )

        if data_type not in [ 'Train' , 'Test' , 'Dev' ]:
            logging.error( "data_type should be : Train, Test or Dev " )

        if p in ['p3', 'p4'] and cross_check_id not in [1,2,3,4,5,6]:
            logging.error( "cross_check_id should be: 1,2,3,4,5 or 6 " )

        if p in ['p1', 'p2']:
            return os.path.join( self._tfrecord_dir , p + '_' + data_type+ '.tfrecord')
        return os.path.join( self._tfrecord_dir , \
                p + '_' + data_type+ '_' + str(cross_check_id) + '.tfrecord')

    def set_train_file( self , p , cross_check_id ):
        self._train_file = self._find_tfrecord_file( p, 'Train', cross_check_id )

    def set_test_file( self , p , cross_check_id ):
        self._test_file = self._find_tfrecord_file( p, 'Dev', cross_check_id )

    def set_train_test_file( self , p , cross_check_id ):
        self._train_test_file = self._find_tfrecord_file( p, 'Test', cross_check_id )

    def _generate_tfrecord( self ):
        txt_list = self._find_files( self._protocol_dir , r'.*txt' )
        transfered_list = list( map( lambda s : os.path.basename(s) , txt_list ) )
        transfered_list = list( map( lambda s: s[:-4] + '.tfrecord' , transfered_list ) )
        transfered_list = list( map( lambda s : os.path.join( \
                self._db_path , 'tmp' , 'tfrecord', s ) , transfered_list ) )

        for fi_txt, fo_tfr in zip( txt_list , transfered_list ):
                self._generate_whole_tfrecord( fi_txt , fo_tfr )

    def _generate_example( self, img_path , eye_pos , label ):
        """
        follow the code style in Tensorflow_model/research/obj_detect 's data
        util
        """
        with tf.gfile.GFile( img_path , 'rb' ) as fid:
            encoded_jpg = fid.read()

        lx = eye_pos[0]
        ly = eye_pos[1]
        rx = eye_pos[2]
        ry = eye_pos[3]

        example = tf.train.Example( features = tf.train.Features( feature = { \
                'img' : tf.train.Feature( bytes_list = tf.train.BytesList( \
                value =[encoded_jpg] ) ), \
                'lx' : tf.train.Feature( int64_list = tf.train.Int64List( value = [lx] ) ), \
                'ly' : tf.train.Feature( int64_list = tf.train.Int64List( value = [ly] ) ), \
                'rx' : tf.train.Feature( int64_list = tf.train.Int64List( value = [rx] ) ), \
                'ry' : tf.train.Feature( int64_list = tf.train.Int64List( value = [ry] ) ), \
                'label' : tf.train.Feature( int64_list = tf.train.Int64List( \
                value = [label] ) )
                }) )

        return example

    def _generate_whole_tfrecord( self,  data_info_dir , export_dir ):
        """
        this func is compitable with the txt files generated by
        Oulu_NPU class
        """
        if os.path.exists( export_dir ):
            return
        writer = tf.python_io.TFRecordWriter( export_dir )

        with open( data_info_dir , 'r' ) as fi:
            lines = fi.readlines()

        for idx, line in enumerate( lines ):
            if idx % 100 == 0:
                logging.info( 'On image %d of %d' , idx , len(lines) )
            line_split = line.strip().split( ' ' )
            img_path = line_split[0]
            eye_pos = list( map(lambda s: int( s) , line_split[1:5] ) )
            label = int( line_split[-1] )

            tf_example = self._generate_example( img_path , eye_pos , label )
            writer.write( tf_example.SerializeToString() )

        writer.close()

if __name__ == "__main__":
    logging.basicConfig( level = logging.INFO )
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    db_path = "/home/jh/working_data/anti-spoofing/replay_attack/original_REPLAY_ATTACK/replayattack"
    sess = tf.Session()
    rpA = REPLAY_ATTACK( db_path )
    ds = lambda: rpA.trainDisplayDataStream( 32 )
    rpA.showStream( sess , ds )
