﻿using Microsoft.Kinect;
using Microsoft.Kinect.Toolkit.Fusion;
using System;
using System.IO;
using System.Windows;
using System.Media;
using System.Windows.Threading;
using System.Timers;

namespace AMEE.MyFusionTest
{
    public class Kinect : IDisposable
    {

        #region Constants

        /// <summary>
        /// The zero-based device index to choose for reconstruction processing if the 
        /// ReconstructionProcessor AMP options are selected.
        /// Here we automatically choose a device to use for processing by passing -1, 
        /// </summary>
        public const int GPU_IDX_TO_USE = -1;

        /// <summary>Dichte des Rekonstruktionskubus in Voxel pro Meter (vpm). 1000mm / 256vpm = ~3.9mm/voxel</summary>
        public const int VOXELS_PER_METER = 256;

        /// <summary>Voxel Aufloesung des Rekonstruktionsvolumens der X-Achse. Bei 256vpm ist Breite x = 512 / 256 = 2m</summary>
        public const int VOXEL_RESOL_X = 512;

        /// <summary>Voxel Aufloesung des Rekonstruktionsvolumens der Y-Achse. Höhe y = 384 / 256 = 1.5m</summary>
        public const int VOXEL_RESOL_Y = 384;

        /// <summary>Voxel Aufloesung des Rekonstruktionsvolumens der Z-Achse. Tiefe z = 512 / 256 = 2m</summary>
        public const int VOXEL_RESOL_Z = 512;

        /// <summary>
        /// Minimum depth distance threshold in meters. Depth pixels below this value will be
        /// returned as invalid (0). Min depth must be positive or 0.
        /// </summary>
        public const float MIN_DEPTH = FusionDepthProcessor.DefaultMinimumDepth;

        /// <summary>
        /// Maximum depth distance threshold in meters. Depth pixels above this value will be
        /// returned as invalid (0). Max depth must be greater than 0.
        /// </summary>
        public const float MAX_DEPTH = FusionDepthProcessor.DefaultMaximumDepth;

        /// <summary>
        /// Event interval for FPS timer (5 sec)
        /// </summary>
        public const int FPS_INTERVAL = 10;

        #endregion


        #region private Members

        /// <summary>
        /// Prozessortyp (GPU/CPU) für die Volumen Rekunstruktion. Dieser Parameter legt fest, ob AMP oder CPU-Verarbeitung verwendet wird.
        /// Beachten Sie, dass CPU-Verarbeitung wahrscheinlich zu langsam für Echtzeit-Verarbeitung sein wird.
        /// </summary>
        private ReconstructionProcessor prozessorTyp;

        /// <summary>Active Kinect sensor</summary>
        private KinectSensor _sensor;

        /// <summary>The resolution of the depth image to be processed. (640x480 Fps30)</summary>
        private const DepthImageFormat _TiefenBildAufloesung = DepthImageFormat.Resolution640x480Fps30;

        /// <summary>The sensor depth frame data length</summary>
        private int _frameDataLength;

        /// <summary>
        /// Timer to count FPS
        /// </summary>
        Timer fpsTimer;
        //DispatcherTimer fpsTimer;

        /// <summary>Zeitstempel des letzten Frames. Sync Zugriff über frameCountLocker!</summary>
        DateTime lastFPSTimestamp;
        /// <summary>Anzahl verarbeiteter Frames. Sync Zugriff über frameCountLocker!</summary>
        int processedFrameCount;
        Object frameCountLocker = new Object();

        /// <summary>Transformationsmatrix des Welt- und Kamerasicht-Koordinatensystems.</summary>
        private Matrix4 worldToCameraTransform;

        /// <summary>The Kinect Fusion volume (cubus)</summary>
        private Reconstruction _volume;

        /// <summary>The default transformation between the world and volume coordinate system</summary>
        private Matrix4 _defaultWorldToVolumeTransform;

        /// <summary>Zwischenlagerung für die Tiefen float-Daten, umgewandelt aus dem Tiefen-Einzelbild.</summary>
        private FusionFloatImageFrame depthFloatBuffer;
        /// <summary>Zwischenlagerung für die point cloud Daten, converted from depth float image frame</summary>
        private FusionPointCloudImageFrame pointCloudBuffer;

        /// <summary>
        /// Parameter für die Translation der Rekonstruktion basieren auf der Einstellung für die minimale
        /// Tiefe. Wenn auf false gesetzt, startet die +Z Achse an der Kameralinse und erstreckt sich in
        /// die Szene rein. Setzt man diesen Parameter im Konstruktor auf true, wird das Volumen sich
        /// entlang +Z um den minimalen Tiefenschwellwert nach vorne, weg von der Kamera, bewegen, um das
        /// Erfassen sehr kleiner Rekonstruktionsvolumina durch Setzen einer Nicht-Identität Welt-Volumen
        /// Transformation in der Methode <code>ResetReconstruction</code> zu ermöglichen.
        /// Kleine Volumina sollten verschoben werden, da die Kinect Hardware einen minimalen
        /// Erfassungsgrenzwert von ~ 0,35 m hat, in denen keine gültige Tiefe zurückgegeben wird, daher
        /// ist es schwierig robust zu initialisieren und zu verfolgen, wenn die Mehrheit eines kleinen
        /// Volumens innerhalb dieser Distanz ist.
        /// </summary>
        private bool translateResetPoseByMinDepthThreshold = true;

        /// <summary>
        /// Indicates that a Frame is beeing processed
        /// </summary>
        private bool isProcessingFrame;

        bool doSound = false;
        SoundPlayer _entrySound;
        SoundPlayer _finishedSound;
        const String entrySoundFile = @"media\button-41.wav";
        const String finishedSoundFile = @"media\beep-8.wav";

        #endregion

        /// <summary>
        /// Indicates, that a new value of processed frames per second was calculated.
        /// </summary>
        public event EventHandler<FPSEventArg> FPSEvent;


        #region Properties

        //public SoundPlayer EntrySound { get { return _entrySound; } }
        //public SoundPlayer FinishedSound { get { return _finishedSound; } }

        /// <summary>
        /// Get the image size of fusion images and bitmap.
        /// </summary>
        public static Size ImageSize
        {
            get
            {
                return GetImageSize(_TiefenBildAufloesung);
            }
        }

        #endregion

        double _fps = 0.0;

        /// <summary>
        /// Track whether Dispose has been called
        /// </summary>
        private bool disposed;
        ///// <summary>
        ///// Gets the processed FPS.
        ///// </summary>
        ///// <value>
        ///// The FPS. Raises a FPSEvent.
        ///// </value>
        //public double Fps {
        //    get { return _fps; }
        //    protected set { 
        //        _fps = value;
        //        if (FPSEvent != null)
        //        {
        //            FPSEvent(this, new FPSEventArg { Fps = _fps });
        //        }
        //    }
        //}

        public Kinect() : this(false, false)
        {
        }

        /// <summary>
        /// 
        /// </summary>
        /// <param name="GPUprocessing">true, um Fusion mit der GPU (DirectX 11 auszuführen. false, für CPU-Nutzung)</param>
        /// /// <param name="playSound">Wenn true, wird Beginn und Ende einer Frameverarbeitung durch (verschiedene) Beeps signalisiert</param>
        public Kinect(bool GPUprocessing, bool playSound)
        {
            doSound = playSound;
            this.prozessorTyp = GPUprocessing ? ReconstructionProcessor.Amp : ReconstructionProcessor.Cpu;
            if (doSound)
            {
                initSound();
            }
        }

        /// <summary>
        /// Inits the sound, be played at beginning and end of image processing.
        /// </summary>
        void initSound()
        {
            _entrySound = new SoundPlayer(entrySoundFile);
            _finishedSound = new SoundPlayer(finishedSoundFile);
            try
            {
                _entrySound.Load();
                _finishedSound.Load();
            }
            catch (Exception e)
            {
                Console.Error.WriteLine(e.Message);
            }
        }

        public void StartFpsTimer()
        {
            this.fpsTimer = new Timer(FPS_INTERVAL * 1000);
            //fpsTimer = new DispatcherTimer(DispatcherPriority.Send); //höchstprio Timer
            //fpsTimer.Interval = new TimeSpan(0, 0, FPS_INTERVAL);
            //fpsTimer.Tick += this.fpsTimerTickHandler;
            fpsTimer.Elapsed += this.fpsTimerTickHandler;
            fpsTimer.Start();
            lock (frameCountLocker)
            {
                this.lastFPSTimestamp = DateTime.Now; // Set last fps timestamp as now 
            }
        }

        public void StopFpsTimer()
        {
            this.fpsTimer.Stop();
            //fpsTimer.Tick -= this.fpsTimerTickHandler;
            fpsTimer.Elapsed -= this.fpsTimerTickHandler;
            fpsTimer.Dispose();
            fpsTimer = null;
            lock (frameCountLocker)
            {
                this.processedFrameCount = 0;
            }
            
        }

        /// <summary>
        /// Timer Event Handler, zum Ermitteln der Fps.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void fpsTimerTickHandler(object sender, ElapsedEventArgs e)
        {
            // Calculate time span from last calculation of FPS
            DateTime now = e.SignalTime;
            int frames;
            DateTime last;
            lock (frameCountLocker)
            {
                frames = this.processedFrameCount;
                last = this.lastFPSTimestamp;
            }
            double intervalSeconds = (now - last).TotalSeconds;
            _fps = (double)frames / intervalSeconds;
            if (FPSEvent != null)
            {
                FPSEvent(this, new FPSEventArg { Fps = _fps });
            }
            // Reset frame counter
            lock (frameCountLocker)
            {
                this.processedFrameCount = 0;
                this.lastFPSTimestamp = now;
            }            
        }

        /// <summary>
        /// Startup Aufgaben ausführen.
        /// </summary>
        public void init()
        {
            // Alle Sensoren durchschauen und den ersten verbundenen starten.
            // Dafür muss die Kinect bei Programmstart verbunden sein.
            // Um die Anwendung bezüglich (un-)plug robust zu machen,
            // ist es empfehlenswerter, KinectSensorChooser zu nutzen (in Microsoft.Kinect.Toolkit).
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this._sensor = potentialSensor;
                    break;
                }
            }
            if (null == this._sensor)
            {
                Console.WriteLine("Es konnte keine angeschlossene Kinect gefunden werden.");
                return;
            }

            // Tiefen Stream aktivieren, um Tiefen Frames zu empfangen
            this._sensor.DepthStream.Enable(_TiefenBildAufloesung);
            this._frameDataLength = this._sensor.DepthStream.FramePixelDataLength;
            // Allocate space to put the color pixels we'll create
            //this.colorPixels = new int[this._frameDataLength];

            // Add an event handler to be called whenever there is new depth frame data
            this._sensor.DepthFrameReady += this.SensorDepthFrameReady;

            var volParam = new ReconstructionParameters(VOXELS_PER_METER, VOXEL_RESOL_X, VOXEL_RESOL_Y, VOXEL_RESOL_Z);
            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;
            // Dies erzeugt einen Kubus mit der Kinect im Centrum der "near" Oberfläche und
            // dem Volumen direkt vor der Kinect.
            this._volume = Reconstruction.FusionCreateReconstruction(volParam, prozessorTyp, GPU_IDX_TO_USE, this.worldToCameraTransform);
            // ?
            this._defaultWorldToVolumeTransform = this._volume.GetCurrentWorldToVolumeTransform();
            //if (this.translateResetPoseByMinDepthThreshold) {
            //    this.ResetReconstruction();
            //}

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);
            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);
            //// Create images to raycast the Reconstruction Volume
            //this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this._sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this._sensor = null;
                Console.WriteLine(ex.Message);

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this._sensor = null; Console.WriteLine(ex.Message);

                return;
            }

            // Set Near Mode by default
            try
            {
                this._sensor.DepthStream.Range = DepthRange.Near;
            }
            catch
            {
                // device not near mode capable
            }

            // Reset the reconstruction
            this.ResetReconstruction();
        }

        

        /*
         /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
        ...
            // This is the bitmap we'll display on-screen
            this.colorBitmap = new WriteableBitmap(
                (int)ImageSize.Width,
                (int)ImageSize.Height,
                96.0,
                96.0,
                PixelFormats.Bgr32,
                null);

            // Set the image we display to point to the bitmap where we'll put the image data
            this.Image.Source = this.colorBitmap;
        ...
          try
            {
                // This creates a volume cube with the Kinect at center of near plane, and volume directly
                // in front of Kinect.
                this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

                this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

                if (this.translateResetPoseByMinDepthThreshold)
                {
                    this.ResetReconstruction();
                }
            }
            catch (InvalidOperationException ex)
            {
                this.statusBarText.Text = ex.Message;
                return;
            }
            catch (DllNotFoundException)
            {
                this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
                return;
            }

            // Depth frames generated from the depth input
            this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Point cloud frames generated from the depth float input
            this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Create images to raycast the Reconstruction Volume
            this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

            // Start the sensor!
            try
            {
                this.sensor.Start();
            }
            catch (IOException ex)
            {
                // Device is in use
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }
            catch (InvalidOperationException ex)
            {
                // Device is not valid, not supported or hardware feature unavailable
                this.sensor = null;
                this.statusBarText.Text = ex.Message;

                return;
            }

            // Set Near Mode by default
            try
            {
                this.sensor.DepthStream.Range = DepthRange.Near;
                checkBoxNearMode.IsChecked = true;
            }
            catch
            {
                // device not near mode capable
            }

            // Initialize and start the FPS timer
            this.fpsTimer = new DispatcherTimer();
            this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick);
            this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);

            this.fpsTimer.Start();

            // Reset the reconstruction
            this.ResetReconstruction();
        }
         */

        /// <summary>
        /// Delegate zur asynchronen Verarbeitung der Tiefendaten.
        /// </summary>
        /// 
        /// <param name="depthPixels">The depth pixels.</param>
        delegate void ProcessorDelegate(DepthImagePixel[] depthPixels);

        /// <summary>
        /// Event handler für das Kinect Event 'DepthFrameReady'.
        /// </summary>
        /// <param name="sender">The sender.</param>
        /// <param name="e">The <see cref="DepthImageFrameReadyEventArgs"/> instance containing the event data.</param>
        /// <exception cref="System.NotImplementedException"></exception>
        private void SensorDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            //lesen des nächsten TiefenFrames, null wenn inzwischen überschrieben (veraltet)
            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
                if (depthFrame != null && !this.isProcessingFrame)
                {
                    //playSound(Sound.StartBeep);

                    // Kopiere Pixeldaten vom Bild in temporäres Array
                    var depthPixels = new DepthImagePixel[this._frameDataLength];
                    depthFrame.CopyDepthImagePixelDataTo(depthPixels);

                    // Asynchrones Verarbeiten starten
                    ProcessorDelegate dlgt = new ProcessorDelegate(ProcessDepthData);
                    dlgt.BeginInvoke(depthPixels, null, null);

                    // Mark that one frame will be processed
                    this.isProcessingFrame = true;

                    //copy
                    // The input frame was processed successfully, increase the processed frame count
                    //++this.processedFrameCount;
                    //}
                }
            }
        }

        /// <summary>
        /// Verabeite die Tiefeninformation.
        /// <para>
        /// 
        /// </para>
        /// </summary>
        /// <param name="depthPixels">Das zu verarbeitende depth data Array</param>
        private void ProcessDepthData(DepthImagePixel[] depthPixels)
        {
            try
            {
                // Umwandeln des depth image frame vom unsigned short Format in ein float-Format,
                // das den Abstand in Meter von der optischen Zentralachse angiebt.
                FusionDepthProcessor.DepthToDepthFloatFrame(
                    depthPixels,
                    (int)ImageSize.Width,
                    (int)ImageSize.Height,
                    this.depthFloatBuffer,
                    FusionDepthProcessor.DefaultMinimumDepth,
                    FusionDepthProcessor.DefaultMaximumDepth,
                    false);

                // ProcessFrame will first calculate the camera pose and then integrate
                // if tracking is successful. Äquivalent zum Einzelaufruf von
                //  1.AlignDepthFloatToReconstruction
                //  2.IntegrateFrame
                // Danach kann man CalculatePointCloud und ShadePointCloud aufrufen.
                bool trackingSucceeded = this._volume.ProcessFrame(
                    this.depthFloatBuffer,
                    5/*FusionDepthProcessor.DefaultAlignIterationCount*/, //Maximale Iterationen des Algos für align camera Tracking. (default = 7)
                    100/*FusionDepthProcessor.DefaultIntegrationWeight*/,    // niedriger Wert = mehr rauschen aber besser für dynamische Umgebung (default = 150)
                    this._volume.GetCurrentWorldToCameraTransform()); //Die beste Schätzung der neuesten Kamera-Pose (in der Regel die Kameraposition als Ergebnis aus dem letzten Prozess Anruf).

                if (!trackingSucceeded)
                {
                    // falls Kameratracking fehlschlägt, findet keine Datenintegration oder 'raycast' für die
                    // Referenzpunktwolke statt und die interne Kameraposition bleibt unverändert.
                    //this.trackingErrorCount++;
                    Console.WriteLine("Tracking failed");
                }
                else
                {
                    // Übernehme neue Kameraposition
                    Matrix4 calculatedCameraPose = this._volume.GetCurrentWorldToCameraTransform();
                    this.worldToCameraTransform = calculatedCameraPose;
                    //this.trackingErrorCount = 0;
                }

                // ... auto reset!?

                /*
                 Berechnen einer Punktwolke durch Raycasting in das Wiederaufbau-Volumen. Gibt die Punktewolke mit 3D-Punkten
                 und Normalen der Nulldurchgänge der dichten Fläche (dense surface) an alle sichtbaren Pixel des Bildes wieder,
                 ausgehend von gegebenen Kamera-Pose.
                 Diese Punktwolke kann als Bezugssystem beim nächsten Aufruf von FusionDepthProcessor.AlignPointClouds verwendet
                 oder FusionDepthProcessor.ShadePointCloud übergeben werden, um ein sichtbares Bild auszugeben.
                 */
                // Calculate the point cloud
                this._volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);

                // ... shade point cloud; copy to bitmap !?

                // The input frame was processed successfully, increase the processed frame count
                lock (frameCountLocker)
                {
                    ++this.processedFrameCount;
                }
            }
            catch (InvalidOperationException ex)
            {
                Console.WriteLine(ex.Message);
            }
            finally
            {
                this.isProcessingFrame = false;
                //Console.Write("f");
                playSound(Sound.EndBeep);
            }
        }

        /// <summary>
        /// Spielt Beep ab, falls Sound im Konstruktor aktiviert wurde
        /// </summary>
        /// <param name="sound">Start- oder EndBeep</param>
        void playSound(Sound sound)
        {
            if (doSound)
            {
                try
                {
                    switch (sound)
                    {
                        case Sound.StartBeep:
                            _entrySound.Play();
                            break;
                        case Sound.EndBeep:
                            _finishedSound.Play();
                            break;
                        default:
                            break;
                    }
                }
                catch (Exception exc)
                {
                    Console.WriteLine("Sound Error: " + exc.Message);
                }
            }
        }


        /// <summary>
        /// Get the depth image size from the input depth image format.
        /// </summary>
        /// <param name="imageFormat">The depth image format.</param>
        /// <returns>The widht and height of the input depth image format.</returns>
        public static Size GetImageSize(DepthImageFormat imageFormat)
        {
            switch (imageFormat)
            {
                case DepthImageFormat.Resolution320x240Fps30:
                    return new Size(320, 240);

                case DepthImageFormat.Resolution640x480Fps30:
                    return new Size(640, 480);

                case DepthImageFormat.Resolution80x60Fps30:
                    return new Size(80, 60);
            }

            throw new ArgumentOutOfRangeException("imageFormat");
        }

        /// <summary>
        /// Reset the reconstruction to initial value
        /// </summary>
        private void ResetReconstruction()
        {
            // Reset tracking error counter
            //this.trackingErrorCount = 0;

            // Set the world-view transform to identity, so the world origin is the initial camera location.
            this.worldToCameraTransform = Matrix4.Identity;

            if (null != this._volume)
            {
                // Translation der Lage des Rekonstruktionsvolumens abseits des Weltursprungs um einen Betrag in 
                // Höhe der minimalen Tiefenschwelle. Dies gewährleistet, dass einige Tiefensignale innerhalb 
                // des Volumens liegen. Wenn false gesetzt ist, wird der Standartweltursprung auf das Zentrum der
                // Vorderseite des Kubus gesetzt, was den Kubus direkt vor die initiale Kameraposition mit +Z
                // Achse in Blickrichtung der Kamera in den Kubus hinein setzt.
                if (this.translateResetPoseByMinDepthThreshold)
                {
                    Matrix4 worldToVolumeTransform = this._defaultWorldToVolumeTransform;

                    // Translate the volume in the Z axis by the minDepthThreshold distance
                    float minDist = (Kinect.MIN_DEPTH < Kinect.MAX_DEPTH) ? MIN_DEPTH : MAX_DEPTH;
                    worldToVolumeTransform.M43 -= minDist * VOXELS_PER_METER;

                    this._volume.ResetReconstruction(this.worldToCameraTransform, worldToVolumeTransform);
                }
                else
                {
                    this._volume.ResetReconstruction(this.worldToCameraTransform);
                }
            }

            if (null != this.fpsTimer)
            {
                // Reset the processed frame count and reset the FPS timer
                this.fpsTimer.Stop();

                lock (frameCountLocker)
                {
                    this.processedFrameCount = 0;
                }
                this.fpsTimer.Start();
            }
        }


        public void Dispose()
        {
            this.Dispose(true);

            // This object will be cleaned up by the Dispose method.
            GC.SuppressFinalize(this);
        }

        /// <summary>
        /// Frees all memory associated with the FusionImageFrame.
        /// </summary>
        /// <param name="disposing">Whether the function was called from Dispose.</param>
        protected virtual void Dispose(bool disposing)
        {
            if (!this.disposed)
            {
                if (disposing)
                {
                    if (null != this._volume)
                    {
                        this._volume.Dispose();
                    }
                    if (null != fpsTimer)
                    {
                        StopFpsTimer();
                    }
                    if (null != _entrySound)
                    {
                        _entrySound.Dispose();
                    }
                    if (null != _finishedSound)
                    {
                        _finishedSound.Dispose();
                    }
                    
                }
            }

            this.disposed = true;
        }

    }


    /// <summary>
    /// FPS Event Argument.
    /// </summary>
    public class FPSEventArg : EventArgs
    {
        public double Fps { get; set; }
    }

    enum Sound
    {
        StartBeep, EndBeep
    }
}
