# -*- coding: utf-8 -*-

from my_simulation import MySimulatedIRSensor, MySystem

import quantities
import signal
import time
import sys
import random

import vtk
from vtk.util.colors import red, green, blue
from abstract_sensor.definition import VTKSmartAbstractSensor

import vtk
import pygst
pygst.require("0.10")
import gst

import artoolkit as ar


class MyAbstractSensorNode(VTKSmartAbstractSensor):
    
    def __init__(self, outputDataSheet, sensor_interface, appName, outputLayer=[], logger=[]):
        VTKSmartAbstractSensor.__init__(self, 
                                     outputDataSheet=outputDataSheet,
                                     sensor_interface=sensor_interface,
                                     appName = appName)
        
    def input_event_mapping(self, value, time=[]):
        '''
        The user have to map the incoming sensor measurements to the input event format
        Here MOSAIC ensures that only valid information (correct size, unit, etc.) are
        integrated.
        '''
        self.input_layer.input_event.set_value(value*quantities.meter, 'Measurement')
        self.input_layer.input_event.set_value([0.5]*quantities.centimeter**2, 'Uncertainty')   

    
    def output_event_mapping(self, results, common_validity):
        meas=self.input_events[0].get_value('Measurement')
        self.output_event.set_value(meas, 'Measurement')
        self.output_event.set_value(5.0*quantities.second, 'Timestamp')
        self.output_event.set_value([0.5]*quantities.centimeter**2, 'Uncertainty')
        self.output_event.set_value([100, 200, 0]*quantities.centimeter, 'Position')
        self.output_event.set_value([100, 200, 0]*quantities.degree, 'Orientation') 
        self.output_event.set_value([common_validity], 'Validity')        


if __name__ == "__main__":
    
    sensor_file = '../datasheets/sensors/' + 'IRDistanceSensor.xml'
    simulatedSensorInterf = MySimulatedIRSensor(sensorDataSheet=sensor_file,
                                               position=[],
                                               orientation=[]) 

    channel_file='../datasheets/events/'+"Dist_IRe.xml"
    SAS = MyAbstractSensorNode(outputDataSheet=channel_file,
                                 sensor_interface=simulatedSensorInterf,
                                 appName="SAS Test",
                                 logger=[])
    
    position=[1,2,3]*quantities.centimeter
    SAS.setPosition(position)
    simulatedSensorInterf.set_position(position.magnitude)
    orientation=[0,0,0]*quantities.degree
    SAS.setOrientation(orientation)
    simulatedSensorInterf.set_orientation(orientation.magnitude)    
 
    #SAS.startMeasurements()

    print ("setting up Gstreamer-Pipeline ... "),
    conf = "v4l2src device=/dev/video0 ! video/x-raw-rgb,width=640,height=480 ! videorate ! \
            video/x-raw-rgb,framerate=25/2 ! videoscale ! video/x-raw-rgb,width=640,height=480 ! \
            ffmpegcolorspace ! identity name=artoolkit ! appsink name=sink"
    
    player = gst.parse_launch(conf)
    sink = player.get_by_name("sink")
    print ("done")
    
    print ("setting up ImageImport ... "),
    imgCamera = vtk.vtkImageImport()
    imgCamera.SetDataScalarTypeToUnsignedChar()
    imgCamera.SetNumberOfScalarComponents(3)
    imgCamera.SetDataExtent(0,640-1,0,480-1,0,0)
    imgCamera.SetWholeExtent(0,640-1,0,480-1,0,0)
    print ("done")
    
    print ("setting up ImageActor ... "),
    actCamera = vtk.vtkImageActor()
    actCamera.SetInput(imgCamera.GetOutput())
    
    upside_down = vtk.vtkTransform()
    upside_down.RotateX(180)
    actCamera.SetUserTransform(upside_down)
    print ("done")
    
    actSensor=SAS.initSensorBeam()
    actText=SAS.initText()
    
    print ("setting up Renderer ... "),
    renCamera = vtk.vtkRenderer()
    # set the camera-image to the background
    renCamera.SetLayer(0)
    renCamera.AddActor(actCamera)
    renCube = vtk.vtkRenderer()
    renCube.AddActor(actSensor)
    renCube.AddActor(actText)
    renCube.SetLayer(1)
    print ("done")      
    
    print ("setting up cameras ... "),
    camCube = renCube.GetActiveCamera()
    renCamera.GetActiveCamera().SetWindowCenter(1,-1)
    # this was done manualy, maby someone can explain these values
    renCamera.GetActiveCamera().SetPosition(0,0,2*446)
    print ("done")
    
    print ("setting up RenderWindow ... "),
    renderWindow = vtk.vtkRenderWindow()
    renderWindow.SetNumberOfLayers(2)
    renderWindow.AddRenderer( renCamera )
    renderWindow.AddRenderer( renCube  )
    renderWindow.SetSize( 640, 480 )
    print ("done")
    
    
    print ("setting up ARToolkit ... "),
    # load the camera-parameter and set the x,y-pixel-lenghtn
    ar.VideoParameter("./Data/camera_para.dat", 640, 480)
    # generate a new marker (pattern, size in millimeter, center-x, center-y)
    pattern = ar.MakePattern("./Data/patt.sample1", 80, 0, 0)
    # load the marker to artoolkit
    ar.LoadPattern(pattern)
    
    # that was all, we ar done...
    print ("done")
    
    # start webcam-capturing
    player.set_state(gst.STATE_PLAYING)
    
    # transformation-matrix for the camera
    transformation = vtk.vtkPerspectiveTransform()
    
    while(True):
        # get the next frame from the webcam
        buffer = sink.emit("pull-buffer")
        
        imgCamera.CopyImportVoidPointer(buffer.data, len(buffer.data))
        
        # try to detect markers in the current frame (with a threshold of 120)
        arMarkerInfo = ar.DetectMarker(buffer.data, 120)
      
        # true if the registered pattern was found (0 is here the minimum confidence)
        # and the 4th parameter defines the detection mode (0 == arGetTransMat and 
        # 1 == arGetTransMatCont)
        if ar.FindPattern(arMarkerInfo, pattern, 0, 1):
            renCube.DrawOn()
            # transformate the artoolkit-transfomation-matrix to an opengl-camera-view
            gl_para = ar.glCameraViewRH(pattern.dTransformationMatrix)
            # set the transformation
            transformation.SetMatrix(gl_para)
            # and calculate the inverse
            transformation.Inverse()
            # get the matrix
            mat = transformation.GetMatrix()
            # and change the camera parameters
            camCube.SetPosition(   mat.GetElement(3,0), mat.GetElement(3,1), mat.GetElement(3,2))
            camCube.SetViewUp(     mat.GetElement(1,0), mat.GetElement(1,1), mat.GetElement(1,2))
            camCube.SetFocalPoint( mat.GetElement(3,0) - mat.GetElement(2,0),
                                   mat.GetElement(3,1) - mat.GetElement(2,1),
                                   mat.GetElement(3,2) - mat.GetElement(2,2))
    
        else:
            renCube.DrawOff()
            
        renderWindow.Render()
        time.sleep(0.05)
    
    
    player.set_state(gst.STATE_NULL)