#!/usr/bin/env python

'''
Provides the functionality required to get frames of video from the capture
device, detect frame differences, and apply effects/overlays on the frames.
'''

import glob
import os
import time

import gst
import gobject
gobject.threads_init()

import Image, ImageEnhance, ImageOps, ImageDraw, ImageFont

from eventmanager import EventManager
from videoframe import VideoFrame
from natsort import natsorted
from motiondetector import MotionDetector

#-------------------------------------------------------------------------------
# module constants
DEVICE_DEFAULT                      = '/dev/video0'

WIDTH_DEFAULT                       = 320
HEIGHT_DEFAULT                      = 240
SIZE_DEFAULT                        = WIDTH_DEFAULT, HEIGHT_DEFAULT

BPP_DEFAULT                         = 24
DEPTH_DEFAULT                       = 24

FRAME_RATE_MIN                      = 0
FRAME_RATE_MAX                      = 2147483647
FRAME_RATE_DEFAULT                  = 30

COLOR_SPACE_RGB                     = 'rgb'
COLOR_SPACE_YUV                     = 'yuv'
COLOR_SPACE_GRAY                    = 'gray'
COLOR_SPACE_DEFAULT                 = COLOR_SPACE_RGB

BRIGHTNESS_MIN                      = 0.0
BRIGHTNESS_MAX                      = 2.0
BRIGHTNESS_DEFAULT                  = 1.0

CONTRAST_MIN                        = 0.0
CONTRAST_MAX                        = 2.0
CONTRAST_DEFAULT                    = 1.0

COLOR_MIN                           = 0.0
COLOR_MAX                           = 2.0
COLOR_DEFAULT                       = 1.0

SHARPNESS_MIN                       = 0.0
SHARPNESS_MAX                       = 2.0
SHARPNESS_DEFAULT                   = 1.0

ROTATION_MIN                        = 0
ROTATION_MAX                        = 360
ROTATION_DEFAULT                    = 0

FLIP_DEFAULT                        = False
MIRROR_DEFAULT                      = False
GRAYSCALE_DEFAULT                   = False
AUTO_CONTRAST_DEFAULT               = False

ASPECT_RATIO_DEFAULT                = 4.0 / 3.0

MOTION_DETECTION_DEFAULT            = False   
MOTION_DETECTION_THRESHOLD_MAX      = 1.0
MOTION_DETECTION_THRESHOLD_MIN      = 0.0
MOTION_DETECTION_THRESHOLD_DEFAULT  = 0.01

TIMESTAMP_OVERLAY_DEFAULT           = True
LABEL_OVERLAY_DEFAULT               = True
OVERLAY_FONT_DEFAULT                = '/usr/share/fonts/truetype/freefont/FreeSans.ttf'

def list_devices():
    '''
    Lists all devices with a V4L device name (ie, /dev/video*).
    
    @return:    A list of V4L device paths.
    @rtype:     List
    '''
    return natsorted(glob.glob('/dev/video*'))

def is_device(device):
    '''
    Checks if a device file exists by performing a C{stat} call on it.  Does
    not actually check if C{device} is a V4L device, but rather just verifies
    its existence.
    
    @param  device: The path of the device to check.
    @type   device: String
    @return:        True if the file exists, otherwise False.
    @rtype:         Boolean
    '''
    try:
        os.stat(device)
        return True
    except OSError as e:
        if e.errno == 2:
            return False
        else: raise    

class FrameGrabber(EventManager):
    '''
    Provides video frames from a Gstreamer pipeline, abstracting away the
    Gstreamer internals.
    '''
    
#-------------------------------------------------------------------------------
# private variables
    __device = DEVICE_DEFAULT
    __size = SIZE_DEFAULT
    __bpp = BPP_DEFAULT
    __depth = DEPTH_DEFAULT
    __frame_rate = FRAME_RATE_DEFAULT
    __color_space = COLOR_SPACE_DEFAULT
    __last_frame_time = time.time()
    
#-------------------------------------------------------------------------------
# getters
    def __get_device(self):
        return self.__gst_v4l2src.get_property('device')
    def __get_size(self): return self.__size
    def __get_bpp(self): return self.__bpp
    def __get_depth(self): return self.__depth
    def __get_frame_rate(self): return self.__frame_rate
    def __get_color_space(self): return self.__color_space
    def __get_motion_detection_threshold(self):
        return self.__motion_detector.threshold

#-------------------------------------------------------------------------------
# setters
    def __set_device(self, value):
        self.__gst_v4l2src.set_property('device', value)
    def __set_size(self, value):
        self.__size = value
        self.__update_caps()
    def __set_bpp(self, value):
        self.__bpp = value
        self.__update_caps()
    def __set_depth(self, value):
        self.__depth = value
        self.__update_caps()
    def __set_frame_rate(self, value):
        self.__framerate = value
        self.__update_caps()
    def __set_color_space(self, value):
        self.__color_space = value
        self.__update_caps()  
    def __set_motion_detection_threshold(self, value):
        self.__motion_detector.threshold = value

#-------------------------------------------------------------------------------
# public properties
    # -- gstreamer properties
    device = property(__get_device, __set_device)
    size = property(__get_size, __set_size)
    bpp = property(__get_bpp, __set_bpp)
    depth = property(__get_depth, __set_depth)
    frame_rate = property(__get_frame_rate, __set_frame_rate)
    color_space = property(__get_color_space, __set_color_space)
    #-- PIL properties
    brightness = BRIGHTNESS_DEFAULT
    contrast = CONTRAST_DEFAULT
    color = COLOR_DEFAULT
    sharpness = SHARPNESS_DEFAULT
    rotation = ROTATION_DEFAULT
    flip = FLIP_DEFAULT
    mirror = MIRROR_DEFAULT
    grayscale = GRAYSCALE_DEFAULT
    auto_contrast = AUTO_CONTRAST_DEFAULT
    aspect_ratio = ASPECT_RATIO_DEFAULT
    motion_detection = MOTION_DETECTION_DEFAULT
    timestamp_overlay = TIMESTAMP_OVERLAY_DEFAULT
    label_overlay = LABEL_OVERLAY_DEFAULT
    overlay_enabled = True

#-------------------------------------------------------------------------------
# private methods
    def __init__(self, device, size=(320,240), 
        framerate=FRAME_RATE_DEFAULT, label=None, autostart=True):
        '''
        Initialize a new VideoSource object.
        '''
        events = ['new-frame', 'motion-detected']
        EventManager.__init__(self, events)
        self.__device = device
        self.__size = size
        self.__frame_rate = framerate
        if label == None:
            self.label = device
        else:
            self.label = label
        self.__init_gstreamer(autostart)
        self.__motion_detector = MotionDetector(
            threshold=MOTION_DETECTION_THRESHOLD_DEFAULT)
        
        #self.encoder = theora.TheoraEncoder('/home/mbrush/Desktop/testfile.ogv', size[0], size[1])
        
    def __init_gstreamer(self, autostart=True):
        '''
        Initialize the Gstreamer pipeline by adding elements, linking them, and
        setting their properties.
        
        @param  autostart:  If True, frame grabbing will begin immediately.
        @type   autostart:  Boolean
        '''
        self.__gst_pipeline = gst.parse_launch('''
            v4l2src name="gst_v4l2src" !
            videoscale ! videorate ! ffmpegcolorspace !
            capsfilter name="gst_capsfilter" !
            appsink name="gst_appsink"
            ''')
        self.__gst_v4l2src = self.__gst_pipeline.get_by_name('gst_v4l2src')
        self.__gst_capsfilter = self.__gst_pipeline.get_by_name('gst_capsfilter')
        self.__gst_appsink = self.__gst_pipeline.get_by_name('gst_appsink')
        
        self.__gst_v4l2src.set_property('device', self.__device)
        
        self.__update_caps()
        
        self.__gst_appsink.set_property('emit-signals', True)
        self.__gst_appsink.connect('new-buffer', self.__on_new_buffer)
        self.__gst_appsink.connect('eos', self.__on_eos)
        
        if autostart: self.start()

    
    def __on_new_buffer(self, element):
        '''
        This method gets called when gst_appsink sends a 'new-buffer' signal.
        It calls each of the callbacks with the new frame and the pre-defined
        positional arguments as the callback function's arguments.
        '''
        time_begin = time.time()
        
        buffer = element.emit('pull-buffer')
        img = Image.fromstring('RGB', self.size, buffer)
        
        if self.motion_detection:
            thumbnail = img.resize((64,48))
            if self.__motion_detector.next_image(thumbnail):
                self.emit('motion-detected', self.__motion_detector.last_value)
        
        #-- perform image transformations
        enhancer = ImageEnhance.Brightness(img)
        img = enhancer.enhance(self.brightness)
        
        enhancer = ImageEnhance.Contrast(img)
        img = enhancer.enhance(self.contrast)
        
        enhancer = ImageEnhance.Color(img)
        img = enhancer.enhance(self.color)
        
        enhancer = ImageEnhance.Sharpness(img)
        img = enhancer.enhance(self.sharpness)
        
        img = img.rotate(self.rotation)
        
        if self.flip: img = ImageOps.flip(img)
        if self.mirror: img = ImageOps.mirror(img)
        if self.grayscale: img = ImageOps.grayscale(img)
        if self.auto_contrast: img = ImageOps.autocontrast(img)
        
        timestamp = time.time()
        
        if self.overlay_enabled:
            img = self.__overlay_image(img, timestamp, self.label)
        
        vf = VideoFrame(img, timestamp)
        self._last_frame = vf
        self.emit('new-frame', vf)
        
        time_end = time.time()
        delay_time = (1.0 / self.frame_rate) - (time_end - time_begin)
        try:
            time.sleep(delay_time)
        except IOError as e:
            if e.errno == 22: time.sleep(1.0/self.frame_rate)
            else: pass

    def __overlay_image(self, img, timestamp, label):
        '''
        Draws overlays onto an image and returns it.
        '''
        overlay_img = Image.new('RGBA', img.size, (255,255,255,0))

        timeobj = time.localtime(timestamp)
        timestr = time.strftime('%d/%m/%Y %H:%M:%S', timeobj)
        # this is hackey
        secs = int(timestamp)
        msec = round(timestamp - float(secs), 3)
        msec = str(msec).split('.')[1].ljust(3, '0')
        timestr = '%s.%s' % (timestr, msec)
        
        draw = ImageDraw.Draw(overlay_img)
        #font = ImageFont.truetype(OVERLAY_FONT_DEFAULT, 12)
        font = ImageFont.load_default()
        
        textsize = draw.textsize(timestr, font=font)
        pad = 2
        recttop = img.size[1] - textsize[1] - (pad*2)
        rectleft = -1
        rectbottom = img.size[1]
        rectright = img.size[0]
        
        draw.rectangle(
            [rectleft, recttop, rectright, rectbottom],
            outline=(255, 255, 255, 255), fill=(0,0,0,127))
        texttop = img.size[1] - textsize[1] - pad
        textleft = pad
        draw.text((textleft, texttop), timestr, font=font)
        
        textsize = draw.textsize(label, font=font)
        textleft = img.size[0] - textsize[0] - pad - 1
        
        draw.text((textleft, texttop), label, font=font)
        
        del draw
        img.paste(overlay_img, box=None, mask=overlay_img)
        
        return img

    def __on_eos(self, element):
        '''
        This method gets called when gst_appsink emits an EOS signal.
        '''
        raise IOError('Gstreamer source has reached End of Stream.')
    
    def __update_caps(self):
        '''
        Updates the capsfilter caps property based on current state.
        '''
        #self.__gst_caps = gst.caps_from_string('''
            #video/x-raw-%s,width=%d,height=%d,framerate=%d/1,bpp=%d,depth=%s
            #''' % (self.__color_space, 
                    #self.__size[0], 
                    #self.__size[1], 
                    #100,
                    #self.__bpp,
                    #self.__depth))
        self.__gst_caps = gst.caps_from_string('''
            video/x-raw-%s,width=%d,height=%d,framerate=%d/1,bpp=%d,depth=%s
            ''' % (self.__color_space, 
                    self.__size[0], 
                    self.__size[1], 
                    self.__frame_rate,
                    self.__bpp,
                    self.__depth))
        self.__gst_capsfilter.set_property('caps', self.__gst_caps)

#-------------------------------------------------------------------------------
# public methods
    def start(self):
        '''
        Start grabbing frames of video.
        '''
        self.__gst_pipeline.set_state(gst.STATE_PLAYING)
        
    def pause(self):
        '''
        Pause grabbing frames of video.
        '''
        self.__gst_pipeline.set_state(gst.STATE_PAUSED)
        
    def stop(self):
        '''
        Stop grabbing frames of video.
        '''
        self.__gst_pipeline.set_state(gst.STATE_NULL)

#-------------------------------------------------------------------------------
# test script
if __name__ == '__main__':

    import gtk, sys
    
    class GtkMain():
        
        last_timestamp = time.time()
        
        def __init__(self, device):
            
            vsrc = FrameGrabber(device, size=(320,240), framerate=15)
            #vsrc.brightness = 2.0
            #vsrc.contrast = 2.0
            #vsrc.color = 0.0
            #vsrc.sharpness = 0.0
            #vsrc.auto_contrast = True
            #vsrc.grayscale = True
            #vsrc.rotation = 80
            vsrc.motion_detection = True
            
            wind = gtk.Window()
            wind.set_title('Frame Grabber Test')
            
            wind.connect('destroy', gtk.main_quit)
            
            self.imgbox = gtk.Image()
            wind.add(self.imgbox)
            
            vsrc.connect('new-frame', self.on_new_frame)
            vsrc.connect('motion-detected', self.on_motion_detected)
            
            wind.show_all()
            
        def on_new_frame(self, video_frame):
            self.imgbox.set_from_pixbuf(video_frame.pixbuf)
            this_timestamp = time.time()
            frame_period = this_timestamp - self.last_timestamp
            self.last_timestamp = this_timestamp
            frame_rate = 1.0 / frame_period
            print 'frame period: %ss (%d fps)' % (round(frame_period, 4),
                int(round(frame_rate, 0)))
        
        def on_motion_detected(self, value):
            #print 'motion detected: %s' % value
            pass
    
    if len(sys.argv) == 1: device = '/dev/video0'
    else: device = sys.argv[1]
    
    GtkMain(device)
    gtk.main()
