#-*- coding: utf-8 -*-

"""
description	: 2013 HS-RM ComputerVision Schwanecke, Uebung4 

date			:20131106
version		: 
autor           : Pascal Trebert + U. Schwanecke
usage		: python roi_u4.py

Python 2.7.4

"""

import cv2, sys, numpy as np

from PyQt4 import QtCore
from PyQt4 import Qt
from PyQt4 import QtGui
from PyQt4 import QtOpenGL

from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *


class CameraDevice(QtCore.QObject):
    
    _DEFAULT_FPS = 30
    
    newFrame = QtCore.pyqtSignal(np.ndarray)
    
    def __init__(self, cameraId=0, mirrored=False, parent=None):
        super(CameraDevice, self).__init__(parent)
        
        self.mirrored = mirrored
        
        self._cameraDevice = cv2.VideoCapture(cameraId) 
        
        self._timer = QtCore.QTimer(self)
        self._timer.timeout.connect(self._queryFrame)
        self._timer.setInterval(1000/self.fps)
        
        self.paused = False
    
    @QtCore.pyqtSlot()
    def _queryFrame(self):
        success, frame = self._cameraDevice.read()
        if self.mirrored:
            frame = cv2.flip(frame, 1)
        self.newFrame.emit(frame)

    @property
    def paused(self):
        return not self._timer.isActive()
    
    @paused.setter
    def paused(self, p):
        if p:
            self._timer.stop()
        else:
            self._timer.start()
    
    @property
    def frameSize(self):
        w = self._cameraDevice.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
        h = self._cameraDevice.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
        return int(w), int(h)
    
    @property
    def fps(self):
        _fps = self._cameraDevice.get(cv2.cv.CV_CAP_PROP_FPS)
        if not _fps > 0:
            _fps = self._DEFAULT_FPS
        return _fps



class ARWidget(QtOpenGL.QGLWidget):
    
    newFrame = QtCore.pyqtSignal(np.ndarray)
    
    def __init__(self, cameraDevice, parent=None):
        super(ARWidget, self).__init__(parent)
        
        self._frame = None

        self._pose = np.eye(4, dtype = np.float64)
        
        self._cameraDevice = cameraDevice
        self._cameraDevice.newFrame.connect(self._onNewFrame)
        
        w, h = self._cameraDevice.frameSize

        if not w*h:
            w = 640
            h = 480
            raise ValueError("Incorrect image size! (An error seems to have occured with the video device)")
        
        self.setMinimumSize(w, h)
        self.setMaximumSize(w, h)

    def initializeGL(self):
        glViewport(0, 0, self.width(), self.height());
        glClearColor(1.0, 0.5, 0.0, 1.0)
        glClearDepth(1.0)
        glPolygonMode( GL_FRONT_AND_BACK, GL_FILL )
        glEnable(GL_NORMALIZE);
        glEnable(GL_DEPTH_TEST);
        glShadeModel(GL_SMOOTH);
        glDepthMask(GL_TRUE);
        glDepthFunc(GL_LEQUAL);
        glEnable(GL_LIGHT0);
        glLineWidth(3.0)


    def paintGL(self):
        if self._frame is None:
            return
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        self.drawFrame()
        #self.draw3DScene()

    def resizeGL(self, w, h):
        pass
    
    @QtCore.pyqtSlot(np.ndarray)
    def _onNewFrame(self, frame):
        self._frame = np.copy(frame)
        self.newFrame.emit(self._frame)
        
        ### TODO: (Ignore for assignment 3)         ###
        ### Estimate the camera/marker pose         ###
        ### For example:                            ###
        
        #self._pose = tracker.estimatePose(self._frame)

        #and delete this:
        self._pose[2, 3] = (self._pose[2, 3] + 1)%100
        
        self.updateGL()

    def draw3DScene(self):
        glMatrixMode(GL_PROJECTION);
        glLoadIdentity();
        gluPerspective(45.0, float(self.width())/float(self.height()), 0.1, 1000.0)
        # Better: glMultMatrixd(tracker.getProjectionMatrix().T)
        glMatrixMode(GL_MODELVIEW);
        glLoadIdentity();
        gluLookAt(0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, -1.0, 0.0)

        # use the etimated pose for model transformation
        glMultMatrixd(self._pose.T)

        # draw simple coordinate axes
        glBegin(GL_LINES)
        glColor3d(1.0,0.0,0.0)
        glVertex3d(0.0, 0.0, 0.0)
        glVertex3d(10.0, 0.0, 0.0)
        glColor3d(0.0,1.0,0.0)
        glVertex3d(0.0, 0.0, 0.0)
        glVertex3d(0.0, 10.0, 0.0)
        glColor3d(0.0, 0.0, 1.0)
        glVertex3d(0.0, 0.0, 0.0)
        glVertex3d(0.0, 0.0, 10.0)
        glEnd()

        # draw teapot
        glEnable(GL_LIGHTING)
        glPushMatrix()
        glTranslate(0.0, 0.0, 1.0)
        glRotate(90.0, 1.0, 0.0, 0.0)
        glutSolidTeapot(1)
        glPopMatrix()
        glDisable(GL_LIGHTING)


    def drawFrame(self):
        glMatrixMode(GL_PROJECTION);
        glLoadIdentity();
        glOrtho(0.0, self.width(), self.height(), 0.0, -1.0, 1.0);
        glMatrixMode(GL_MODELVIEW);
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glLoadIdentity();

        # convert the numpy array to an opengl texture
        glTexImage2D(GL_TEXTURE_2D, 0, 3, self._frame.shape[1], self._frame.shape[0], 0, GL_BGR, GL_UNSIGNED_BYTE, self._frame.tostring());
        glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
        glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
            
        glDisable(GL_DEPTH_TEST);

        glPolygonMode( GL_FRONT_AND_BACK, GL_FILL );
        glColor3d(1.0,1.0,1.0);

        # draw the frame mapped to a textured quad
        glEnable(GL_TEXTURE_2D);
        glBegin(GL_QUADS);
        glTexCoord2f( 0.0, 0.0);
        glVertex3f( 0, 0, 0 );
        
        glTexCoord2f( 1.0, 0.0 );
        glVertex3f( self.width(), 0, 0 );
        
        glTexCoord2f( 1.0, 1.0 );
        glVertex3f( self.width(), self.height(), 0 );
        
        glTexCoord2f( 0.0, 1.0 );
        glVertex3f( 0, self.height(), 0 );
        glEnd();
        glDisable(GL_TEXTURE_2D);
            
        glEnable(GL_DEPTH_TEST);
    

    def changeEvent(self, e):
        if e.type() == QtCore.QEvent.EnabledChange:
            if self.isEnabled():
                self._cameraDevice.newFrame.connect(self._onNewFrame)
            else:
                self._cameraDevice.newFrame.disconnect(self._onNewFrame)


    

class MyMainWindow(QtGui.QWidget):
    def __init__(self):
        QtGui.QWidget.__init__(self, None)
        self.setWindowTitle('Simple AR Display')

        # get camera device
        self.cameraDevice = CameraDevice(mirrored=False)
        self.cameraDevice.newFrame.connect(self.onNewCameraFrame)

        # Bildgroesse
        self.w, self.h = self.cameraDevice.frameSize
        self.aspRatio = self.w/float(self.h)
        
        # ROI
        self.ax = self.ay = None
        self.bx = self.by = None
        self.box_w = 100
        self.box_h = 100

        # buffer um (live)Bild zu bearbeiten
        self.last = None

        # Binarisierungs-Schwellwert
        self.thresholdValue = 0
        
        # Bildbearbeitungs-Flags
        self.histogram = False
        self.gray = False
        self.invert = False      
        self.binary = False
        self.otsu = False
        self.graySpreading = False

        
        # LAYOUT
        vParentLayout = QtGui.QVBoxLayout()
        self.setLayout(vParentLayout)
                
        hLineLayout1 = QtGui.QHBoxLayout()
        vParentLayout.addLayout(hLineLayout1)
        
        hLineLayout2 = QtGui.QHBoxLayout()
        vParentLayout.addLayout(hLineLayout2)
        
        hLineLayout3 = QtGui.QHBoxLayout()
        vParentLayout.addLayout(hLineLayout3)
        
        hLineLayout4 = QtGui.QHBoxLayout()
        vParentLayout.addLayout(hLineLayout4)
        
        # 1. Zeile
        # Button grau-toggle
        self.b_gray = QtGui.QPushButton("S/W", self)
        Qt.QObject.connect(self.b_gray, Qt.SIGNAL('clicked()'), self.grayColor)
        hLineLayout1.addWidget(self.b_gray)
        
        # Button invertcolor-toggle
        self.b_invert = QtGui.QPushButton("invertiere", self)
        Qt.QObject.connect(self.b_invert, Qt.SIGNAL('clicked()'), self.invertColor)
        hLineLayout1.addWidget(self.b_invert)        

        # Button Otsu-Binarisierung
        self.b_otsu = QtGui.QPushButton("Otsu", self)
        Qt.QObject.connect(self.b_otsu, Qt.SIGNAL('clicked()'), self.toggleOtsu)
        hLineLayout1.addWidget(self.b_otsu)

        # Binarisierung (manuell) Schwellwert-Slider
        self.lthreshold = QtGui.QLabel(u"Binarisierung", self)
        hLineLayout1.addWidget(self.lthreshold)
        self.horizontalPositionSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
        self.horizontalPositionSlider.setRange(0,255)
        self.horizontalPositionSlider.setSingleStep(1);
        self.horizontalPositionSlider.valueChanged[int].connect(\
            self.thresholdSlidervalue)
        self.horizontalPositionSlider.setValue(0)
        hLineLayout1.addWidget(self.horizontalPositionSlider)


        # 2. Zeile
        # Button 
        self.b_graySpreading = QtGui.QPushButton("Grauwertspeizung", self)
        Qt.QObject.connect(self.b_graySpreading, Qt.SIGNAL('clicked()'), self.toggleGraySpreading)
        hLineLayout2.addWidget(self.b_graySpreading)

        # Button 
        self.b_histogram = QtGui.QPushButton("Histogramm", self)
        Qt.QObject.connect(self.b_histogram, Qt.SIGNAL('clicked()'), self.togglehistogram)
        hLineLayout2.addWidget(self.b_histogram)

        # Button Linear-Mapping
        self.b_linearMapping = QtGui.QPushButton("Linear-Mapping", self)
        Qt.QObject.connect(self.b_linearMapping, Qt.SIGNAL('clicked()'), self.toggleLinearMapping)
        hLineLayout2.addWidget(self.b_linearMapping)
        
        # Slider Box-Groesse
        self.lBoxSize = QtGui.QLabel(u"Größe", self)
        hLineLayout2.addWidget(self.lBoxSize)
        
        self.horizontalPositionSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
        self.horizontalPositionSlider.setRange(1,self.h*2)
        self.horizontalPositionSlider.setSingleStep(2);
        self.horizontalPositionSlider.valueChanged[int].connect(\
            self.boxSizeSlidervalue)
        self.horizontalPositionSlider.setValue(self.h/2)
        hLineLayout2.addWidget(self.horizontalPositionSlider)

        # 3. Zeile
        # Slider horizontale Position
        self.lBoxPosHorizontal = QtGui.QLabel("Position", self)
        hLineLayout3.addWidget(self.lBoxPosHorizontal)

        self.horizontalPositionSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
        self.horizontalPositionSlider.setRange(1,self.w)
        self.horizontalPositionSlider.setSingleStep(2);
        self.horizontalPositionSlider.valueChanged[int].connect(\
            self.horizontalPositionSlidervalue) # Funktion die aufgerufen wird
        self.horizontalPositionSlider.setValue(self.w/2-self.box_w/2) # Startwert
        hLineLayout3.addWidget(self.horizontalPositionSlider)

        
        # 4. Zeile
        # add widget to show the augmented video input image
        arWidget = ARWidget(self.cameraDevice)
        arWidget.newFrame.connect(self.onNewCameraFrame)
        hLineLayout4.addWidget(arWidget)
        
        # Slider vertikale Position
        self.w, self.h = self.cameraDevice.frameSize
        self.verticalPositionSlider = QtGui.QSlider(QtCore.Qt.Vertical)
        self.verticalPositionSlider.setRange(1,self.h)
        self.verticalPositionSlider.setSingleStep(2);
        self.verticalPositionSlider.valueChanged[int].connect(\
            self.verticalPositionSlidervalue)
        self.verticalPositionSlider.setValue(self.h/2+self.box_h/2)
        hLineLayout4.addWidget(self.verticalPositionSlider)



    # Slider Callback-Funktionen
    def thresholdSlidervalue(self, value):      
        self.thresholdValue = value
        self.gray = True
        self.binary = True        
        if self.thresholdValue==0: # binarisierung aus
            self.gray = False
            self.binary = False
        
    def verticalPositionSlidervalue(self, value):
        self.verticalPositionvalue = value

    def horizontalPositionSlidervalue(self, value):
        self.horizontalPositionvalue = value

    def boxSizeSlidervalue(self, value):
        # boxverhaeltnis an Bildseitenverhaeltnis angepasst
        self.box_w = int(value*self.aspRatio)
        self.box_h = value


    # Button Callback-Funktionen
    def toggleGraySpreading(self):
        # Bildbearbeitungs-Flags
##        self.gray = True
        self.binary = False
        self.otsu = False
        self.graySpreading = not self.graySpreading
        print 'graySpreading ', self.graySpreading

    def togglehistogram(self):
        self.histogram = not self.histogram
    
    def toggleLinearMapping(self):
        pass
    
    def toggleOtsu(self):
        self.binary = False
        self.gray = True
        self.otsu = not self.otsu
        print 'Otsu ', self.otsu
        
    
    def invertColor(self):
        self.invert = not self.invert
        print 'invert ', self.invert
        

    def grayColor(self):
        self.gray = not self.gray if not self.binary else self.gray
        print 'gray ', self.gray
        

    
    def showImg(self, img, # numpyarray
             windowname # unique string
             ):
        """Zeigt ein cv2-image in einem neue Fenster an"""
        cv2.namedWindow(windowname, cv2.CV_WINDOW_AUTOSIZE)
        cv2.imshow(windowname, img)

    def doGrayScale(self, frame):
        # Grauwertbild 1 Kanal
        actualFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Grauwertbild 3 Kanaele, damit frame[:] = ... klappt
        actualFrame = cv2.cvtColor(actualFrame, cv2.COLOR_GRAY2BGR)
        return actualFrame

    def doInvertColor(self, frame, bbox # [obenlinks, unten rechts]
                      ):
        """Invertiert einen rechteckigen Bereich im Bild"""
        ax, ay = bbox[0]
        bx, by = bbox[1]
        
        # ROI durch slicen und invertieren
        roi = cv2.bitwise_not(frame[ay:by, ax:bx])
        # ROI im Normalbild ersetzen
        frame[ay:by,ax:bx] = roi
        return frame
    

    def updateBoxValues(self):
        """Haelt Auswahlrechteck aktuell"""
        # set vertical and horizontal position of rectangle
        verticalPosition = int(self.verticalPositionvalue)
        horizontalPosition = int(self.horizontalPositionvalue)
        
        # box boundingbox
        self.ax = horizontalPosition
        self.ay = self.h-verticalPosition
        self.bx = self.box_w/2+horizontalPosition
        self.by = self.h-verticalPosition+self.box_h/2

        
    def drawBox(self, frame, bbox, # [obenlinks, unten rechts]
                color=(255, 0, 0)):
        """Zeichnet ein farbiges Rechteck in das Bild"""
        ax, ay = bbox[0]
        bx, by = bbox[1]
        cv2.rectangle(frame, (ax, ay), (bx, by), color)

        
    def doBinary(self, frame, thresh, maxval, method=cv2.THRESH_BINARY):
        """Binarisiert ein Bild anhand eines Schwellwertes"""
        threshold, actualFrame = cv2.threshold(frame, thresh, maxval,
                                    method)
        return actualFrame


    def doOtsu(self, frame):
        """Binarisiert ein Bild mit Otsu-Schwellwert"""
        
        # Grauwertbild 1 Kanal
        actualFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        method = cv2.THRESH_BINARY | cv2.THRESH_OTSU
        actualFrame = self.doBinary(actualFrame, 127, 255, method)
        
        # Grauwertbild 3 Kanaele, damit frame[:] = ... klappt
        actualFrame = cv2.cvtColor(actualFrame, cv2.COLOR_GRAY2BGR)
        return actualFrame
    

    def doGraySpreading(self, frame):
        """Nimmt eine Grauwertspreizung des Bildes vor"""
        actualFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)          
        gMin = actualFrame.min()
        gMax = actualFrame.max()
        actualFrame[:] = (actualFrame[:] - gMin) * 255./(gMax-gMin)
        actualFrame = cv2.cvtColor(actualFrame, cv2.COLOR_GRAY2BGR)
        return actualFrame
                
    def drawHistogram(self, frame):
        #(300,256,3)
        h, w, c = frame.shape
        fak = 1
        h, w, c = 200, 200, 3
        
        print 'shape ', frame.shape
        h = np.zeros((h,w,c))
         
        bins = np.arange(w).reshape(w,1)
        color = [ (255,0,0),(0,255,0),(0,0,255) ]
        for ch, col in enumerate(color):
            hist_item = cv2.calcHist([frame],[ch],None,[w],[0,w])
            cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
            hist=np.int32(np.around(hist_item))
            pts = np.column_stack((bins,hist))
            cv2.polylines(h,[pts],False,col)
            
        h=np.flipud(h)
        print 'shepe h ', h.shape
        frame[0:200,0:200] = h.copy()
        actualFrame = frame
        return actualFrame
    
    def onNewCameraFrame(self, frame):
        """Ausfuehrung pro Kamera-Frame"""

        self.updateBoxValues()
        self.drawBox(frame, [(self.ax,self.ay),(self.bx,self.by)])
        
        # auf Kopie arbeiten
        actualFrame = frame.copy()

        # Histogram
        if self.histogram:
            actualFrame = self.drawHistogram(frame)
        
        # Graubild
        if self.gray:            
            actualFrame = self.doGrayScale(actualFrame)

        # Grauwert-Spreizung
        if self.graySpreading:
            actualFrame = self.doGraySpreading(actualFrame)
            
        # invertiere Farben
        if self.invert:
            actualFrame = self.doInvertColor(actualFrame,
                               [(self.ax,self.ay),(self.bx,self.by)])

        # manuell binarisieren
        if self.binary:
            actualFrame = self.doBinary(actualFrame, self.thresholdValue,
                                   255, cv2.THRESH_BINARY)

        # automatisch binarisieren nach Otsu
        if self.otsu:
            actualFrame = self.doOtsu(actualFrame)

            
        # umspeichern des Frames (cv2 Eigenart)
        if self.last is not None:
            frame[:] = self.last
        self.last = actualFrame




if __name__ == "__main__":
    glutInit(sys.argv) # don't need this under Mac OS X
    app = QtGui.QApplication(sys.argv)
    w = MyMainWindow()
    w.show()
        
    sys.exit(app.exec_())
