import tensorflow as tf
import tempfile
import logging
import os
from skimage.feature import local_binary_pattern
import numpy as np
from abc import ABC

class BaseParser( ABC ):
    def __init__( self ):
        pass

    def parse_func( self , line ):
        pass

class RPcropParser( BaseParser ):
    def __init__( self , height , width ):
        super( RPcropParser , self ).__init__()
        
        self._height = height
        self._width  = width

    def parse_func( self , line ):
        FIELD_DEFAULT = [['IMG_PATH'], [0], [0], [0], [0], [0]]
        fields = tf.decode_csv( line, FIELD_DEFAULT, field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )

        imgH = tf.shape( tf_image )[0]
        imgW = tf.shape( tf_image )[1]

        lx = fields[1]
        ly = fields[2]
        rx = fields[3] 
        ry = fields[4]

        cy = ( ly + ry ) // 2
        l = lx - ( rx - lx ) // 2
        r = rx + ( rx - lx ) // 2
        u = cy - ( rx - lx )
        d = cy + 2 * ( rx - lx )

        merged_l = tf.maximum( 0 , l )
        merged_u = tf.maximum( 0 , u )
        merged_r = tf.minimum( imgW -1 , r )
        merged_d = tf.minimum( imgH -1 , d )

        offset_x , offset_y = merged_l, merged_u

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , merged_d - merged_u , merged_r - merged_l )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( tf_image , [ self._height , self._width ] )

        #tf_image = tf.image.rgb_to_grayscale( tf_image )

        return tf.cond( fields[-1] > 0 , lambda:  ( tf_image , 0) , \
                lambda: ( tf_image , 1 )  )

class Parser( object ):
    """
    this class include some kinds of Parser for Full Image Parse or
    for cropping areas annotated by some annotation files

    e.g. crop some sub-images including faces
    """
    def __init__( self , height , width):

        # specify the parse dimensions
        # all the method in this class will parse the img 
        # into this dimension:[ height, width ]
        self._height = height
        self._width  = width

    def Oulu_parser( self , line ):
        # positive: label =1 ;
        # negative: label =0 ;
        FIELD_DEFAULT = [['IMG_PATH'], [0], [0], [0], [0], [0]]
        fields = tf.decode_csv( line, FIELD_DEFAULT, field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )

        imgH = tf.shape( tf_image )[0]
        imgW = tf.shape( tf_image )[1]

        lx = fields[1]
        ly = fields[2]
        rx = fields[3] 
        ry = fields[4]

        cy = ( ly + ry ) // 2
        l = lx - ( rx - lx ) // 2
        r = rx + ( rx - lx ) // 2
        u = cy - ( rx - lx )
        d = cy + 2 * ( rx - lx )

        merged_l = tf.maximum( 0 , l )
        merged_u = tf.maximum( 0 , u )
        merged_r = tf.minimum( imgW -1 , r )
        merged_d = tf.minimum( imgH -1 , d )

        offset_x , offset_y = merged_l, merged_u

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , merged_d - merged_u , merged_r - merged_l )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( tf_image , [ self._height , self._width ] )

        #tf_image = tf.image.rgb_to_grayscale( tf_image )

        return tf.cond( fields[-1] > 0 , lambda:  ( tf_image , 1) , \
                lambda: ( tf_image , 0 )  )

    def std_full_img_parser( self, line ):
        """
        line format:
           img_absolute_path if_fake_label
        """
        
        FIELD_DEFAULT = [ ['IMG_PATH'] , [0] ]
        fields = tf.decode_csv( line , FIELD_DEFAULT , field_delim = ' ' )
        content = tf.read_file( fields[0] )

        tf_image = tf.image.decode_jpeg( content )

        # recently just do ordinary parsing
        #if if_flip:
        #    tf_image = tf.image.random_flip_left_right( tf_image )
        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( tf_image , [ self._height , self._width ] )

        return tf_image , fields[1]


    def Homemade_data_random_crop_ssd_parser( self , line ):
        FIELD_DEFAULT = [['IMG_PATH'], [0], [0], [0], [0], [0]]
        fields = tf.decode_csv( line, FIELD_DEFAULT, field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )

        l = fields[1]
        u = fields[2]
        r = fields[3]
        d = fields[4]

        height = tf.shape( tf_image )[0]
        width = tf.shape( tf_image )[1]
        
        #offset_x = l + ( r - l ) //4
        #offset_y = u + ( d - u ) //4

        offset_x = tf.random_uniform( shape = () , minval = 0, \
                maxval = width - 1 - self._width , dtype = tf.int32 )

        offset_y = tf.random_uniform( shape = () , minval = 0, \
                maxval = height - 1 - self._height , dtype = tf.int32 )

        #crop_w = ( r - l ) //2
        #crop_h = ( d - u ) //2

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , self._height , self._width )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        #tf_image = tf.image.resize_images( \
        #        tf_image , [ self._height , self._width ] )

        return tf_image , fields[-1]

    def Homemade_data_face_crop_ssd_parser3( self , line ):
        FIELD_DEFAULT = [['IMG_PATH'], [0], [0], [0], [0], [0]]
        fields = tf.decode_csv( line, FIELD_DEFAULT, field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )

        l = fields[1]
        u = fields[2]
        r = fields[3]
        d = fields[4]

        height = tf.shape( tf_image )[0]
        width = tf.shape( tf_image )[1]
        
        #offset_x = l + ( r - l ) //4
        #offset_y = u + ( d - u ) //4

        #offset_x = tf.random_uniform( shape = () , minval = 0, \
        #        maxval = width - 1 - self._width , dtype = tf.int32 )

        #offset_y = tf.random_uniform( shape = () , minval = 0, \
        #        maxval = height - 1 - self._height , dtype = tf.int32 )

        #crop_w = ( r - l ) //2
        #crop_h = ( d - u ) //2

        offset_x = l
        offset_y = u

        r = tf.minimum( r , width  -1 )
        d = tf.minimum( d , height -1 )

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , d - u -1 , r - l-1 )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( \
                tf_image , [ self._height , self._width ] )

        #tf_image = tf.image.rgb_to_grayscale( tf_image )
        #tf_image = tf.squeeze( tf_image )

        return tf_image , fields[-1]

    def Homemade_data_random_crop_ssd_parser2( self , line ):
        FIELD_DEFAULT = [['IMG_PATH'], [0], [0], [0], [0], [0]]
        fields = tf.decode_csv( line, FIELD_DEFAULT, field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )

        l = fields[1]
        u = fields[2]
        r = fields[3]
        d = fields[4]

        height = tf.shape( tf_image )[0]
        width = tf.shape( tf_image )[1]
        
        hard_boundary_l = 0
        hard_boundary_r = width  -1 - self._width
        hard_boundary_u = 0
        hard_boundary_d = height -1 - self._height

        soft_boundary_l = l - self._width // 2
        soft_boundary_r = r - self._width // 2
        soft_boundary_u = u - self._height // 2
        soft_boundary_d = d - self._height // 2

        # merge hard boundary and soft boundary

        merged_boundary_l = tf.maximum( hard_boundary_l , soft_boundary_l )
        merged_boundary_u = tf.maximum( hard_boundary_u , soft_boundary_u )
        merged_boundary_r = tf.minimum( hard_boundary_r , soft_boundary_r )
        merged_boundary_d = tf.minimum( hard_boundary_d , soft_boundary_d )

        offset_x = tf.random_uniform( shape = () , minval = merged_boundary_l, \
                maxval = merged_boundary_r , dtype = tf.int32 )

        offset_y = tf.random_uniform( shape = () , minval = merged_boundary_u, \
                maxval = merged_boundary_d , dtype = tf.int32 )

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , self._height , self._width )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        #tf_image = tf.image.resize_images( \
        #        tf_image , [ self._height , self._width ] )

        return tf_image , fields[-1]

    def center_area_ssd_parser( self , line ):
        FIELD_DEFAULT = [['IMG_PATH'], [0], [0], [0], [0], [0]]
        fields = tf.decode_csv( line, FIELD_DEFAULT, field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )

        l = fields[1]
        u = fields[2]
        r = fields[3]
        d = fields[4]
        
        offset_x = l + ( r - l ) //4
        offset_y = u + ( d - u ) //4

        crop_w = ( r - l ) //2
        crop_h = ( d - u ) //2

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , crop_h , crop_w )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( \
                tf_image , [ self._height , self._width ] )

        return tf_image , fields[-1]

    def random_crop_img_wrt_eye_location_parser( self , line ):
        """
        this parser crops images corresponds to given eye location
        files, it's special for Oulu DataSet
        line format:
            img_absolute_path left_x left_y right_x right_y if_fake_label
        """

        FIELD_DEFAULT = [['IMG_PATH'], [0], [0], [0], [0], [0]]
        fields = tf.decode_csv( line, FIELD_DEFAULT, field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )

        eye_position = tuple( fields[ 1: -1 ] )
        height = tf.shape( tf_image )[0]
        width  = tf.shape( tf_image )[1]

        offset_x , offset_y = self._random_crop_wrt_eye_position(\
                eye_position , ( height , width ) )

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , self._height, self._width )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )

        return tf.cond( fields[-1] > 0 , lambda:  (tf_image, 1) , \
                lambda: (tf_image, 0 )  )


    def random_crop_central_area_parser( self , line ):
        """
        crop central area of the face, mainly contain eyes and nose
        using two eyes annotation
        """
        FIELD_DEFAULT = [['IMG_PATH'], [0], [0], [0], [0], [0]]
        fields = tf.decode_csv( line, FIELD_DEFAULT, field_delim = ' ' )
        content = tf.read_file( fields[0] )
        tf_image = tf.image.decode_jpeg( content )


        eye_position = tuple( fields[ 1: -1 ] )
        height = tf.shape( tf_image )[0]
        width  = tf.shape( tf_image )[1]

        offset_x , offset_y = self._random_central_area(\
                eye_position , ( height , width ) )

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , self._height, self._width )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )

        return tf.cond( fields[-1] > 0 , lambda:  (tf_image, 1) , \
                lambda: (tf_image, 0 )  )

    def _random_central_area( self , eye_position , img_dimension ):
        lx = eye_position[0]
        ly = eye_position[1]
        rx = eye_position[2]
        ry = eye_position[3]

        center_x = ( lx + rx ) // 2
        center_y = ( ly + ry ) // 2 +   tf.to_int32( tf.to_float( rx - lx )  * 0.8 )

        img_height = img_dimension[0]
        img_width  = img_dimension[1]

        # confirm hard boundary
        # the central point of cropping area
        hard_boundary_l = self._width // 2 + 1
        hard_boundary_r = img_width -1  - self._width // 2
        hard_boundary_u = self._height // 2 + 1
        hard_boundary_d = img_height -1 - self._height // 2

        # confirm user-defined soft boundary
        soft_boundary_l = center_x - ( rx -lx ) // 4
        soft_boundary_r = center_x + ( rx -lx ) // 4
        soft_boundary_u = center_y - ( rx -lx ) // 4
        soft_boundary_d = center_y + ( rx -lx ) // 4

        # merge hard boundary and soft boundary

        merged_boundary_l = tf.maximum( hard_boundary_l , soft_boundary_l )
        merged_boundary_u = tf.maximum( hard_boundary_u , soft_boundary_u )
        merged_boundary_r = tf.minimum( hard_boundary_r , soft_boundary_r )
        merged_boundary_d = tf.minimum( hard_boundary_d , soft_boundary_d )

        offset_x = tf.random_uniform( shape = () , minval = merged_boundary_l - self._width //2, \
                maxval = merged_boundary_r - self._width //2 , dtype = tf.int32 )

        offset_y = tf.random_uniform( shape = () , minval = merged_boundary_u - self._height //2, \
                maxval = merged_boundary_d - self._height //2 , dtype = tf.int32 )

        return offset_x, offset_y

    def _random_crop_wrt_eye_position( self , eye_position , img_dimension ):
        """

        eye_position: a tuple holding ( left_x, left_y, right_x, right_y )
        img_dimension: a tuple holding ( img_height , img_width )

        return the first two parameters in tf.image.crop_to_bounding_box

        offset_height and offset_width,

        the target height and target width will be set to Oulu_NPU.HEIGHT and
        Oulu_NPU.WIDTH, respectively

        if the img dimension is less than the crop size, then
        return ""
        this situation will seldom happen


        """
        lx = eye_position[0]
        ly = eye_position[1]
        rx = eye_position[2]
        ry = eye_position[3]

        ave_y = tf.to_int32( ( ly + ry ) / 2 )

        img_height = img_dimension[0]
        img_width  = img_dimension[1]

        # specify judging area first
        eye_wide = rx - lx

        judgeL = lx - eye_wide
        judgeR = rx + eye_wide
        judgeU = ave_y - eye_wide
        judgeD = tf.to_int32( ave_y + 2 * eye_wide )

        # clip into image original dimensions
        judgeL = tf.maximum( 0, judgeL )
        judgeU = tf.maximum( 0, judgeU )
        judgeR = tf.minimum( img_width -1 , judgeR )
        judgeD = tf.minimum( img_height-1 , judgeD )

        #TODO
        #if judgeR < judgeL + Oulu_NPU.WIDTH - 1 \
        #        or judgeD < judgeU + Oulu_NPU.HEIGHT -1 :
        #    return None

        # shrink the bounding box of crop box's offset point
        judgeR = judgeR - ( self._width -1 )
        judgeD = judgeD - ( self._height-1 )

        # generate random point
        offset_x = tf.random_uniform( shape = () , minval = judgeL, \
                maxval = judgeR +1 , dtype = tf.int32 )

        offset_y = tf.random_uniform( shape = () , minval = judgeU, \
                maxval = judgeD +1 , dtype = tf.int32 )

        return offset_x, offset_y

    def tfrecord_parser_noresize( self , data_record ):
        features = {
                "img" : tf.FixedLenFeature( [] , tf.string ) ,
                "lx" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "ly" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "rx" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "ry" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "label" : tf.FixedLenFeature( [] , tf.int64 )
                }
        sample = tf.parse_single_example( data_record , features )

        tf_image = tf.image.decode_jpeg( sample['img'] )

        lx = tf.to_int32( sample['lx'] )
        ly = tf.to_int32( sample['ly'] )
        rx = tf.to_int32( sample['rx'] )
        ry = tf.to_int32( sample['ry'] )

        height = tf.shape( tf_image )[0]
        width  = tf.shape( tf_image )[1]

        eye_position = ( lx, ly, rx , ry )

        offset_x , offset_y = self._random_crop_wrt_eye_position(\
                eye_position , ( height , width ) )

        tf_image = tf.image.crop_to_bounding_box( tf_image , offset_y, \
                offset_x , self._height, self._width )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )

        label = tf.to_int32( sample['label'] )
        return tf.cond( label > 0 , lambda:  (tf_image, 1) , \
                lambda: (tf_image, 0 )  )

    def tfrecord_parser_resize( self , data_record ):
        features = {
                "img" : tf.FixedLenFeature( [] , tf.string ) ,
                "lx" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "ly" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "rx" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "ry" : tf.FixedLenFeature( [] , tf.int64 ) ,
                "label" : tf.FixedLenFeature( [] , tf.int64 )
                }
        sample = tf.parse_single_example( data_record , features )

        tf_image = tf.image.decode_jpeg( sample['img'] )

        lx = tf.to_int32( sample['lx'] )
        ly = tf.to_int32( sample['ly'] )
        rx = tf.to_int32( sample['rx'] )
        ry = tf.to_int32( sample['ry'] )

        ave_y = tf.to_int32( ( ly + ry ) / 2 )

        img_height = tf.shape( tf_image )[0]
        img_width  = tf.shape( tf_image )[1]

        # specify judging area first
        eye_wide = rx - lx

        judgeL = lx - tf.to_int32(eye_wide/2)
        judgeR = rx + tf.to_int32(eye_wide/2)
        judgeU = ave_y - eye_wide
        judgeD = tf.to_int32( ave_y + eye_wide )

        # clip into image original dimensions
        judgeL = tf.maximum( 0, judgeL )
        judgeU = tf.maximum( 0, judgeU )
        judgeR = tf.minimum( img_width -1 , judgeR )
        judgeD = tf.minimum( img_height-1 , judgeD )

        tf_image = tf.image.crop_to_bounding_box( tf_image , \
                judgeU, judgeL, judgeD- judgeU , judgeR - judgeL )

        tf_image = tf.image.convert_image_dtype( tf_image , tf.float32 )
        tf_image = tf.image.resize_images( \
                tf_image , [ self._height , self._width ] )

        label = tf.to_int32( sample['label'] )
        return tf.cond( label > 0 , lambda:  (tf_image, 1) , \
                lambda: (tf_image, 0 )  )
