#-*- coding: utf-8 -*-

"""
description	: 2013 HS-RM ComputerVision Schwanecke, Uebung3A3 - ROI invertieren

date			:20131023
ersion		: 1.0
autor           : Pascal Trebert + U. Schwanecke
usage		:python roi.py

Tested on:
Distributor ID:	LinuxMint
Description:	Linux Mint 15 Olivia
Release:	15
Codename:	olivia
Python 2.7.4

"""

import cv2, sys, numpy as np

from PyQt4 import QtCore
from PyQt4 import Qt
from PyQt4 import QtGui
from PyQt4 import QtOpenGL

from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *


class CameraDevice(QtCore.QObject):
    
    _DEFAULT_FPS = 30
    
    newFrame = QtCore.pyqtSignal(np.ndarray)
    
    def __init__(self, cameraId=0, mirrored=False, parent=None):
        super(CameraDevice, self).__init__(parent)
        
        self.mirrored = mirrored
        
        self._cameraDevice = cv2.VideoCapture(cameraId) 
        
        self._timer = QtCore.QTimer(self)
        self._timer.timeout.connect(self._queryFrame)
        self._timer.setInterval(1000/self.fps)
        
        self.paused = False
    
    @QtCore.pyqtSlot()
    def _queryFrame(self):
        success, frame = self._cameraDevice.read()
        if self.mirrored:
            frame = cv2.flip(frame, 1)
        self.newFrame.emit(frame)

    @property
    def paused(self):
        return not self._timer.isActive()
    
    @paused.setter
    def paused(self, p):
        if p:
            self._timer.stop()
        else:
            self._timer.start()
    
    @property
    def frameSize(self):
        w = self._cameraDevice.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
        h = self._cameraDevice.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
        return int(w), int(h)
    
    @property
    def fps(self):
        _fps = self._cameraDevice.get(cv2.cv.CV_CAP_PROP_FPS)
        if not _fps > 0:
            _fps = self._DEFAULT_FPS
        return _fps



class ARWidget(QtOpenGL.QGLWidget):
    
    newFrame = QtCore.pyqtSignal(np.ndarray)
    
    def __init__(self, cameraDevice, parent=None):
        super(ARWidget, self).__init__(parent)
        
        self._frame = None

        self._pose = np.eye(4, dtype = np.float64)
        
        self._cameraDevice = cameraDevice
        self._cameraDevice.newFrame.connect(self._onNewFrame)
        
        w, h = self._cameraDevice.frameSize

        if not w*h:
            w = 640
            h = 480
            raise ValueError("Incorrect image size! (An error seems to have occured with the video device)")
        
        self.setMinimumSize(w, h)
        self.setMaximumSize(w, h)

    def initializeGL(self):
        glViewport(0, 0, self.width(), self.height());
        glClearColor(1.0, 0.5, 0.0, 1.0)
        glClearDepth(1.0)
        glPolygonMode( GL_FRONT_AND_BACK, GL_FILL )
        glEnable(GL_NORMALIZE);
        glEnable(GL_DEPTH_TEST);
        glShadeModel(GL_SMOOTH);
        glDepthMask(GL_TRUE);
        glDepthFunc(GL_LEQUAL);
        glEnable(GL_LIGHT0);
        glLineWidth(3.0)


    def paintGL(self):
        if self._frame is None:
            return
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        self.drawFrame()
        #self.draw3DScene()

    def resizeGL(self, w, h):
        pass
    
    @QtCore.pyqtSlot(np.ndarray)
    def _onNewFrame(self, frame):
        self._frame = np.copy(frame)
        self.newFrame.emit(self._frame)
        
        ### TODO: (Ignore for assignment 3)         ###
        ### Estimate the camera/marker pose         ###
        ### For example:                            ###
        
        #self._pose = tracker.estimatePose(self._frame)

        #and delete this:
        self._pose[2, 3] = (self._pose[2, 3] + 1)%100
        
        self.updateGL()

    def draw3DScene(self):
        glMatrixMode(GL_PROJECTION);
        glLoadIdentity();
        gluPerspective(45.0, float(self.width())/float(self.height()), 0.1, 1000.0)
        # Better: glMultMatrixd(tracker.getProjectionMatrix().T)
        glMatrixMode(GL_MODELVIEW);
        glLoadIdentity();
        gluLookAt(0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, -1.0, 0.0)

        # use the etimated pose for model transformation
        glMultMatrixd(self._pose.T)

        # draw simple coordinate axes
        glBegin(GL_LINES)
        glColor3d(1.0,0.0,0.0)
        glVertex3d(0.0, 0.0, 0.0)
        glVertex3d(10.0, 0.0, 0.0)
        glColor3d(0.0,1.0,0.0)
        glVertex3d(0.0, 0.0, 0.0)
        glVertex3d(0.0, 10.0, 0.0)
        glColor3d(0.0, 0.0, 1.0)
        glVertex3d(0.0, 0.0, 0.0)
        glVertex3d(0.0, 0.0, 10.0)
        glEnd()

        # draw teapot
        glEnable(GL_LIGHTING)
        glPushMatrix()
        glTranslate(0.0, 0.0, 1.0)
        glRotate(90.0, 1.0, 0.0, 0.0)
        glutSolidTeapot(1)
        glPopMatrix()
        glDisable(GL_LIGHTING)


    def drawFrame(self):
        glMatrixMode(GL_PROJECTION);
        glLoadIdentity();
        glOrtho(0.0, self.width(), self.height(), 0.0, -1.0, 1.0);
        glMatrixMode(GL_MODELVIEW);
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glLoadIdentity();

        # convert the numpy array to an opengl texture
        glTexImage2D(GL_TEXTURE_2D, 0, 3, self._frame.shape[1], self._frame.shape[0], 0, GL_BGR, GL_UNSIGNED_BYTE, self._frame.tostring());
        glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
        glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
            
        glDisable(GL_DEPTH_TEST);

        glPolygonMode( GL_FRONT_AND_BACK, GL_FILL );
        glColor3d(1.0,1.0,1.0);

        # draw the frame mapped to a textured quad
        glEnable(GL_TEXTURE_2D);
        glBegin(GL_QUADS);
        glTexCoord2f( 0.0, 0.0);
        glVertex3f( 0, 0, 0 );
        
        glTexCoord2f( 1.0, 0.0 );
        glVertex3f( self.width(), 0, 0 );
        
        glTexCoord2f( 1.0, 1.0 );
        glVertex3f( self.width(), self.height(), 0 );
        
        glTexCoord2f( 0.0, 1.0 );
        glVertex3f( 0, self.height(), 0 );
        glEnd();
        glDisable(GL_TEXTURE_2D);
            
        glEnable(GL_DEPTH_TEST);
    

    def changeEvent(self, e):
        if e.type() == QtCore.QEvent.EnabledChange:
            if self.isEnabled():
                self._cameraDevice.newFrame.connect(self._onNewFrame)
            else:
                self._cameraDevice.newFrame.disconnect(self._onNewFrame)


class Basics(QtGui.QWidget):
    """Primary simple functions and GUI-Elements

    GUI Elements
    - gray toggle-button
    - horizontal position slider of the box
    - box toggle-button
    
    """

    def __init__(self, *args):
        QWidget.__init__(self, *args)
        self.createWidgets()

    def createWidgets(self):
        """Setup GUI Elements and Layout"""

        # Erste Zeile
        hLayout = QHBoxLayout()
        
        # Zweite Zeile

        # dem vertikalen Boxlayout hinzufuegen
        boxLayout = QtGui.QVBoxLayout()
        self.setLayout(Q

        
        

    


class MyMainWindow(QtGui.QWidget):
    def __init__(self):
        QtGui.QWidget.__init__(self, None)
        self.setWindowTitle('Simple AR Display')
        
        # specify layout
        vbox = QtGui.QVBoxLayout(self)
        vbox.setDirection(QtGui.QBoxLayout.LeftToRight)
        
        # get camera device
        self.cameraDevice = CameraDevice(mirrored=False)
        self.cameraDevice.newFrame.connect(self.onNewCameraFrame)
        
        # add widget to show the augmented video input image
        arWidget = ARWidget(self.cameraDevice)
        arWidget.newFrame.connect(self.onNewCameraFrame)
        vbox.addWidget(arWidget)

        # box
        self.box_w = 100
        self.box_h = 100
        self.w, self.h = self.cameraDevice.frameSize
        self.aspRatio = self.w/float(self.h)

        # ROI
        self.last = None
       
        # add slider to control vertical position
        self.w, self.h = self.cameraDevice.frameSize
        self.verticalPositionSlider = QtGui.QSlider(QtCore.Qt.Vertical)
        self.verticalPositionSlider.setRange(1,self.h)
        self.verticalPositionSlider.setSingleStep(2);
        self.verticalPositionSlider.valueChanged[int].connect(\
            self.verticalPositionSlidervalue)
        self.verticalPositionSlider.setValue(self.h/2+self.box_h/2)
        vbox.addWidget(self.verticalPositionSlider)

        # add slider to control horizontal position
        self.horizontalPositionSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
        self.horizontalPositionSlider.setRange(1,self.w)
        self.horizontalPositionSlider.setSingleStep(2);
        self.horizontalPositionSlider.valueChanged[int].connect(\
            self.horizontalPositionSlidervalue) # Funktion die aufgerufen wird
        self.horizontalPositionSlider.setValue(self.w/2-self.box_w/2) # Startwert
        vbox.addWidget(self.horizontalPositionSlider)

        # add slider to control box size
        self.verticalPositionSlider = QtGui.QSlider(QtCore.Qt.Vertical)
        self.verticalPositionSlider.setRange(1,self.h*2)
        self.verticalPositionSlider.setSingleStep(2);
        self.verticalPositionSlider.valueChanged[int].connect(\
            self.boxSizeSlidervalue)
        self.verticalPositionSlider.setValue(self.h/2)
        vbox.addWidget(self.verticalPositionSlider)

        self.b_invert = QtGui.QPushButton("invertere", self)
        self.b_invert.setMaximumWidth(120)
        
        


    # Slider Callback-Funktionen
    def verticalPositionSlidervalue(self, value):
        self.verticalPositionvalue = value


    def horizontalPositionSlidervalue(self, value):
        self.horizontalPositionvalue = value


    def boxSizeSlidervalue(self, value):
        # boxverhaeltnis an Bildseitenverhaeltnis angepasst
        self.box_w = int(value*self.aspRatio)
        self.box_h = value


    def showImg(self, img, # numpyarray
             windowname # unique string
             ):
        """Zeigt ein cv2-image in einem neue Fenster an"""
        cv2.namedWindow(windowname, cv2.CV_WINDOW_AUTOSIZE)
        cv2.imshow(windowname, img)

    
    def onNewCameraFrame(self, frame):
        # set vertical and horizontal position of rectangle
        verticalPosition = int(self.verticalPositionvalue)
        horizontalPosition = int(self.horizontalPositionvalue)
        
        # box boundingbox
        ax = horizontalPosition
        ay = self.h-verticalPosition
        bx = self.box_w/2+horizontalPosition
        by = self.h-verticalPosition+self.box_h/2
        cv2.rectangle(frame, (ax, ay), (bx, by), (255, 255, 255))

        # ROI durch slicen und invertieren
        roi = cv2.bitwise_not(frame[ay:by, ax:bx])
        
        # ROI im Normalbild ersetzen    
        frame[ay:by,ax:bx] = roi

        # gespeicherten frame anzeigen und aktuellen speichern        
        actual = frame.copy()
        if self.last is not None:
            frame[:] = self.last
        self.last = actual

        
        ### TODO:                                    ###
        ### 1. set horizontal position of box        ###        
        ### 2. set size of box                       ###        
        ### 3. Inverting image region in ROI defined ###
        ###    by the box                            ###


if __name__ == "__main__":
    glutInit(sys.argv) # don't need this under Mac OS X
    app = QtGui.QApplication(sys.argv)
    w = MyMainWindow()
    w.show()
        
    sys.exit(app.exec_())
