__author__ = 'karaayak'
import cv2, sys, numpy as np
from PyQt4 import QtCore
from PyQt4 import Qt
from PyQt4 import QtGui
from PyQt4 import QtOpenGL
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *


class CameraDevice(QtCore.QObject):

    _DEFAULT_FPS = 30


    newFrame = QtCore.pyqtSignal(np.ndarray)

    def __init__(self, cameraId=0, mirrored=False, parent=None):
        super(CameraDevice, self).__init__(parent)

        self.mirrored = mirrored

        self._cameraDevice = cv2.VideoCapture(cameraId)

        self._timer = QtCore.QTimer(self)
        self._timer.timeout.connect(self._queryFrame)
        self._timer.setInterval(1000/self.fps)

        self.paused = False

    @QtCore.pyqtSlot()
    def _queryFrame(self):
        success, frame = self._cameraDevice.read()
        if self.mirrored:
            frame = cv2.flip(frame, 1)
        self.newFrame.emit(frame)

    @property
    def paused(self):
        return not self._timer.isActive()

    @paused.setter
    def paused(self, p):
        if p:
            self._timer.stop()
        else:
            self._timer.start()

    @property
    def frameSize(self):
        w = self._cameraDevice.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
        h = self._cameraDevice.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
        return int(w), int(h)

    @property
    def fps(self):
        _fps = self._cameraDevice.get(cv2.cv.CV_CAP_PROP_FPS)
        if not _fps > 0:
            _fps = self._DEFAULT_FPS
        return _fps



class ARWidget(QtOpenGL.QGLWidget):

    newFrame = QtCore.pyqtSignal(np.ndarray)

    def __init__(self, cameraDevice, parent=None):
        super(ARWidget, self).__init__(parent)

        self._frame = None

        self._pose = np.eye(4, dtype = np.float64)

        self._cameraDevice = cameraDevice
        self._cameraDevice.newFrame.connect(self._onNewFrame)

        w, h = self._cameraDevice.frameSize

        if not w*h:
            w = 640
            h = 480
            raise ValueError("Incorrect image size! (An error seems to        have occured with the video device)")

        self.setMinimumSize(w, h)
        self.setMaximumSize(w, h)

    def initializeGL(self):
        glViewport(0, 0, self.width(), self.height());
        glClearColor(1.0, 0.5, 0.0, 1.0)
        glClearDepth(1.0)
        glPolygonMode( GL_FRONT_AND_BACK, GL_FILL )
        glEnable(GL_NORMALIZE);
        glEnable(GL_DEPTH_TEST);
        glShadeModel(GL_SMOOTH);
        glDepthMask(GL_TRUE);
        glDepthFunc(GL_LEQUAL);
        glEnable(GL_LIGHT0);
        glLineWidth(3.0)


    def paintGL(self):
        if self._frame is None:
            return
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        self.drawFrame()
        #self.draw3DScene()

    def resizeGL(self, w, h):
        pass

    @QtCore.pyqtSlot(np.ndarray)
    def _onNewFrame(self, frame):
        self._frame = np.copy(frame)
        self.newFrame.emit(self._frame)

        ### TODO: (Ignore for assignment 3)         ###
        ### Estimate the camera/marker pose         ###
        ### For example:                            ###

        #self._pose = tracker.estimatePose(self._frame)

        #and delete this:
        self._pose[2, 3] = (self._pose[2, 3] + 1)%100

        self.updateGL()

    def draw3DScene(self):
        glMatrixMode(GL_PROJECTION);
        glLoadIdentity();
        gluPerspective(45.0, float(self.width())/float(self.height()), 0.1,
1000.0)
        # Better: glMultMatrixd(tracker.getProjectionMatrix().T)
        glMatrixMode(GL_MODELVIEW);
        glLoadIdentity();
        gluLookAt(0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, -1.0, 0.0)

        # use the etimated pose for model transformation
        glMultMatrixd(self._pose.T)

        # draw simple coordinate axes
        glBegin(GL_LINES)
        glColor3d(1.0,0.0,0.0)
        glVertex3d(0.0, 0.0, 0.0)
        glVertex3d(10.0, 0.0, 0.0)
        glColor3d(0.0,1.0,0.0)
        glVertex3d(0.0, 0.0, 0.0)
        glVertex3d(0.0, 10.0, 0.0)
        glColor3d(0.0, 0.0, 1.0)
        glVertex3d(0.0, 0.0, 0.0)
        glVertex3d(0.0, 0.0, 10.0)
        glEnd()

        # draw teapot
        glEnable(GL_LIGHTING)
        glPushMatrix()
        glTranslate(0.0, 0.0, 1.0)
        glRotate(90.0, 1.0, 0.0, 0.0)
        glutSolidTeapot(1)
        glPopMatrix()
        glDisable(GL_LIGHTING)


    def drawFrame(self):
        glMatrixMode(GL_PROJECTION);
        glLoadIdentity();
        glOrtho(0.0, self.width(), self.height(), 0.0, -1.0, 1.0);
        glMatrixMode(GL_MODELVIEW);
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glLoadIdentity();

        # convert the numpy array to an opengl texture
        glTexImage2D(GL_TEXTURE_2D, 0, 3, self._frame.shape[1],
self._frame.shape[0], 0, GL_BGR, GL_UNSIGNED_BYTE, self._frame.tostring());
        glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
        glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);

        glDisable(GL_DEPTH_TEST);

        glPolygonMode( GL_FRONT_AND_BACK, GL_FILL );
        glColor3d(1.0,1.0,1.0);

        # draw the frame mapped to a textured quad
        glEnable(GL_TEXTURE_2D);
        glBegin(GL_QUADS);
        glTexCoord2f( 0.0, 0.0);
        glVertex3f( 0, 0, 0 );

        glTexCoord2f( 1.0, 0.0 );
        glVertex3f( self.width(), 0, 0 );

        glTexCoord2f( 1.0, 1.0 );
        glVertex3f( self.width(), self.height(), 0 );

        glTexCoord2f( 0.0, 1.0 );
        glVertex3f( 0, self.height(), 0 );
        glEnd();
        glDisable(GL_TEXTURE_2D);

        glEnable(GL_DEPTH_TEST);


    def changeEvent(self, e):
        if e.type() == QtCore.QEvent.EnabledChange:
            if self.isEnabled():
                self._cameraDevice.newFrame.connect(self._onNewFrame)
            else:
                self._cameraDevice.newFrame.disconnect(self._onNewFrame)

class MyMainWindow(QtGui.QWidget):
    def __init__(self):
        QtGui.QWidget.__init__(self, None)
        self.setWindowTitle('Simple AR Display')
        self.letztesFenster = None
        self.buttonClicked = False
        self.color = [(255,0,0)]
        self.bins = np.arange(256).reshape(256,1)
        self.grau = False
        # manuelle Schellwert
        self.manSchellwertValue = False
        # Schellwert Otsu
        self.otsuValue = False
        # Histogrammausgleich
        self.histogrammAusgleichValue = False
        # Grauwertspeizung
        self.grauwertSpreizungValue = False

        # specify layout
        vbox = QtGui.QGridLayout(self)

        # get camera device
        self.cameraDevice = CameraDevice(mirrored=False)
        self.cameraDevice.newFrame.connect(self.onNewCameraFrame)

        # add widget to show the augmented video input image
        arWidget = ARWidget(self.cameraDevice)
        arWidget.newFrame.connect(self.onNewCameraFrame)
        vbox.addWidget(arWidget, 0, 0)

        # add Button to invert image in grey
        self.button = QtGui.QPushButton('Bild in Graustufen        umwandeln',self)
        self.button.clicked.connect(self.handleButton)
        vbox.addWidget(self.button)

        # add button Histogrammdarstellung
        self.histoButton =        QtGui.QPushButton("Histogrammdarstellung", self)
        self.histoButton.clicked.connect(self.handleHistogrammdarstellung)
        vbox.addWidget(self.histoButton)

        # add button Grauwertspreizung
        self.grauwertspreizungButton =QtGui.QPushButton("Grauwertspreizung", self)

self.grauwertspreizungButton.clicked.connect(self.handleGrauwertspreizung)
        vbox.addWidget(self.grauwertspreizungButton)

        # add button Histogrammausgleich
        self.ausgleichButton =        QtGui.QPushButton("Histogrammausgleich",
self)
        self.ausgleichButton.clicked.connect(self.handleHistogrammausgleich)
        vbox.addWidget(self.ausgleichButton)

        # add button manuelle schwellwertbasierende Binarisierung
        self.manuelleBinarisierungButton = QtGui.QPushButton("manuelleBinarisierung", self)

self.manuelleBinarisierungButton.clicked.connect(self.handleManuelleBinarisierung)
        vbox.addWidget(self.manuelleBinarisierungButton)

        # add button Schellwertbestimmung nach Otsu
        self.otsuButton = QtGui.QPushButton("Otsu", self)
        self.otsuButton.clicked.connect(self.handleOtsu)
        vbox.addWidget(self.otsuButton)


        # add slider to control vertical position
        self.w, self.h = self.cameraDevice.frameSize
        self.verticalPositionSlider = QtGui.QSlider(QtCore.Qt.Vertical)
        self.verticalPositionSlider.setRange(1,self.h)
        self.verticalPositionSlider.setSingleStep(2);

self.verticalPositionSlider.valueChanged[int].connect(self.verticalPositionSlidervalue)
        self.verticalPositionSlider.setValue(self.h/2)
        vbox.addWidget(self.verticalPositionSlider)

        # add slider to control horizontal position
        self.w, self.h = self.cameraDevice.frameSize
        self.horizontalPositionSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
        self.horizontalPositionSlider.setRange(1,self.w)
        self.horizontalPositionSlider.setSingleStep(2);

self.horizontalPositionSlider.valueChanged[int].connect(self.horizontalPositionSlidervalue)
        self.horizontalPositionSlider.setValue(self.w/2)
        vbox.addWidget(self.horizontalPositionSlider)

        # add slider to control the size of rectangle
        self.w, self.h = self.cameraDevice.frameSize
        self.boxSizeSlider = QtGui.QSlider(QtCore.Qt.Vertical)
        self.boxSizeSlider.setRange(1,self.h)
        self.boxSizeSlider.setSingleStep(2);

self.boxSizeSlider.valueChanged[int].connect(self.boxSizeSlidervalue)
        self.boxSizeSlider.setValue(self.h/2)
        vbox.addWidget(self.boxSizeSlider)

        #add slider to control horizontal position
        self.w, self.h = self.cameraDevice.frameSize
        self.verticalSchwellwertSlider = QtGui.QSlider(QtCore.Qt.Vertical)
        self.verticalSchwellwertSlider.setRange(0,255)
        self.verticalSchwellwertSlider.setSingleStep(2);

self.verticalSchwellwertSlider.valueChanged[int].connect(self.verticalSchwellwertSlidervalue)
        self.verticalSchwellwertSlider.setValue(127)
        vbox.addWidget(self.verticalSchwellwertSlider,0,3)

    def handleButton (self):
        print "Button wurde geklickt"
        if self.buttonClicked == True:
            self.button.setText("Bild in Graustufen darstellen")
            self.buttonClicked = False
            self.grau = False
            #print "Grau Wert",self.grau
        else:
            self.button.setText("Bild in Farbe darstellen")
            self.buttonClicked = True
            self.grau = True

    def handleHistogrammdarstellung (self):
        print  "Histogrammdarstellung"
    def handleGrauwertspreizung (self):
        print "Grauwertspreizung"
        if self.grauwertSpreizungValue == True:
            self.grauwertSpreizungValue = False
        else:
            self.grauwertSpreizungValue = True
    def handleHistogrammausgleich (self):
        print "Histogrammausgleich"
        if self.histogrammAusgleichValue == True:
            self.histogrammAusgleichValue = False
        else:
            self.histogrammAusgleichValue = True
    def handleManuelleBinarisierung (self):
        print "manuelle schwellwert Binarisierung"
        if self.manSchellwertValue == True:
            self.manSchellwertValue = False
        else:
            self.manSchellwertValue = True
    def handleOtsu (self):
        print "Schellwertbestimmung nach Otsu"
        if self.otsuValue == True:
            self.otsuValue = False
        else:
            self.otsuValue = True
    def verticalPositionSlidervalue(self, value):
        self.verticalPositionvalue = value

    def horizontalPositionSlidervalue(self, value):
        self.horizontalPositionvalue = value

    def verticalSchwellwertSlidervalue(self,value):
        self.verticalSchwellwertvalue = value

    def boxSizeSlidervalue(self, value):
        self.boxSizeSlidervalue = value
    def histogramm(self,frame):
        #print "Histogramm Methde"
        h = np.zeros((300,256,3))
        im = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        hist_item = cv2.calcHist([im],[0],None,[256],[0,256])
        cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
        hist=np.int32(np.around(hist_item))
        cdf = hist.cumsum()
        cdf_normalized = cdf * hist.max()/ cdf.max()
        for x,y in enumerate(hist):
            cv2.line(h,(x,0),(x,y),(255,255,255))
        for x,y in enumerate(cdf_normalized):
            cv2.line(h,(x,y),(x,y),(255,0,0))
        y = np.flipud(h)
        return y
    def onNewCameraFrame(self, frame):

        # set vertical position of rectangle
        verticalPosition = int(self.verticalPositionvalue)
        # set horizontal position of rectangle
        horizontalPosition = int(self.horizontalPositionvalue)
        #set boxsize
        boxSize = int(self.boxSizeSlidervalue)
        verticalSchwellwert = int(self.verticalSchwellwertvalue)

        # inverting image region in Roi defined by the box
        roi = cv2.bitwise_not(frame[self.h-verticalPosition :
self.h-verticalPosition+boxSize,self.w-horizontalPosition:self.w-horizontalPosition+boxSize])
        cv2.rectangle(frame,(self.w-horizontalPosition,
self.h-verticalPosition),(self.w-horizontalPosition+boxSize,
self.h-verticalPosition+boxSize),(255, 255, 255))
        frame[self.h-verticalPosition :
self.h-verticalPosition+boxSize,self.w-horizontalPosition:self.w-horizontalPosition+boxSize]=roi
        aktuellesFenster = frame.copy()

        if self.buttonClicked == True:
            #in Grau anzeigen
            aktuellesFenster =
cv2.cvtColor(aktuellesFenster,cv2.COLOR_BGR2GRAY)
            # in RGB anzeigen
            aktuellesFenster =
cv2.cvtColor(aktuellesFenster,cv2.COLOR_GRAY2BGR)

        if self.grau == True:

            #print "Bild ist grau"
            aktuellesFenster =
cv2.cvtColor(aktuellesFenster,cv2.COLOR_BGR2GRAY)
            hist=self.histogramm(self.letztesFenster)
            cv2.imshow('hist',hist)

            # Berechnung Grauwertspreizung
            if self.grauwertSpreizungValue:
                aktFenstermin = float(aktuellesFenster[:].min())
                aktFenstermax = float(aktuellesFenster[:].max())
                aktuellesFenster[:]=
(aktuellesFenster[:]-aktFenstermin)*(255.0/(aktFenstermax-aktFenstermin))
                print "Grauwertspreizung abgeschlossen"


            # Histogrammausgleich
            if self.histogrammAusgleichValue:
                aktuellesFenster = cv2.equalizeHist(aktuellesFenster)


            aktuellesFenster =
cv2.cvtColor(aktuellesFenster,cv2.COLOR_GRAY2BGR)
        if self.letztesFenster is not None:
            frame[:] = self.letztesFenster
        self.letztesFenster = aktuellesFenster

        ### TODO:                                    ###
        ### 1. set horizontal position of box        ###
        ### 2. set size of box                       ###
        ### 3. Inverting image region in ROI defined ###
        ###    by the box                            ###


if __name__ == "__main__":
    glutInit(sys.argv) # don't need this under Mac OS X
    app = QtGui.QApplication(sys.argv)
    w = MyMainWindow()
    w.show()

    sys.exit(app.exec_())



Am 6. November 2013 22:54 schrieb Pascal Trebert <pascal@trebert.org>:

> svn checkout
>
https://computervision-hsrm-2013-ws.googlecode.com/svn/trunk/
> computervision-hsrm-2013-ws --username pascal.trebert@gmail.com
>
>
> roi_u4.py ist aktuell
> bla.py ist von viktor
> stefan.py is von stefan
>
> SCHICK MA DEINS !!! =))
>
>
>
>

