﻿// Date: 30.05.11

// Version 1.0: (01.06.11) Hand Tracker
// Version 1.1: (07.06.11) Skeleton Tracker
// Version 1.2: (01.08.11) Object Scan

// Version 2: (01.10.11) Changed from OpenNI to Microsoft SDK

// Revision 11

using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Microsoft.Xna.Framework.Graphics;
using System.Threading;
using Microsoft.Xna.Framework;
using NOVA.Utilities;
using NOVA.UI;
using Microsoft.Speech.Recognition;
using Microsoft.Kinect;
using System.IO;

using Skeleton = NOVA.Components.Kinect.Skeleton;
using Microsoft.Kinect.Toolkit.Interaction;
using System.Collections.ObjectModel;


namespace NOVA.Components.Kinect
{
    #region Enums

    /// <summary>
    /// Enum defining the different state, in which 
    /// the tracking can be
    /// </summary>
    public enum KinectState
    {
        IN_SESSION,
        NOT_IN_SESSION,
        QUICK_REFOCUS
    }

    /// <summary>
    /// Enum defining the different Kinect application modes
    /// </summary>
    public enum KinectApplication
    {
        HandTracker,
        StickFigure,
        ObjectScan
    }

    #endregion

    public class JointInfo
    {
        /// <summary>
        /// Returns the normalized screen position. Range between 0 and 1.
        /// </summary>
        public Vector2 ScreenPosition;
        public Vector3 WorldPosition;
        public JointTrackingState TrackingState;

        public JointInfo(Vector2 screen, Vector3 world, JointTrackingState state)
        {
            ScreenPosition = screen;
            WorldPosition = world;
            TrackingState = state;
        }
    }

    public class Skeleton
    {
        private Dictionary<JointType, JointInfo> joints = null;
        private Vector3 worldPosition = Vector3.Zero;
        private Vector2 screenPosition = Vector2.Zero;
        private int trackingID = 0;
        private SkeletonTrackingState skeletonTrackingState = SkeletonTrackingState.NotTracked;
        private int userIndex = 0;
        private ReadOnlyCollection<InteractionHandPointer> handPointers;

        public Dictionary<JointType, JointInfo> Joints
        {
            get { return this.joints; }
            internal set { this.joints = value; }
        }

        public Vector3 WorldPosition
        {
            get { return this.worldPosition; }
            internal set { this.worldPosition = value; }
        }

        public Vector2 ScreenPosition
        {
            get { return this.screenPosition; }
            internal set { this.screenPosition = value; }
        }

        public int TrackingID
        {
            get { return this.trackingID; }
            internal set { this.trackingID = value; }
        }

        public SkeletonTrackingState TrackingState
        {
            get { return this.skeletonTrackingState; }
            set { this.skeletonTrackingState = value; }
        }

        public int UserIndex
        {
            get { return this.userIndex; }
            internal set { this.userIndex = value; }
        }

        public ReadOnlyCollection<InteractionHandPointer> HandPointers
        {
            get { return this.handPointers; }
            internal set { this.handPointers = value; }
        }
    }

    class InteractionClient : IInteractionClient
    {
        public InteractionInfo GetInteractionInfoAtLocation(int skeletonTrackingId, InteractionHandType handType, double x, double y)
        {
            var info = new InteractionInfo();
            info.IsGripTarget = true;
            info.IsPressTarget = false;
            info.PressAttractionPointX = 0f;
            info.PressAttractionPointY = 0f;
            info.PressTargetControlId = 0;

            return info;
        }
    }

    public class Kinect : IDisposable
    {
        public enum KinectCameraImage
        {
            /// <summary>
            /// No kinect image is shown.
            /// </summary>
            None = 0,
            /// <summary>
            /// The depth image of the kinect is shown.
            /// </summary>
            Depth,
            /// <summary>
            /// The RGB image of the kinect is shown.
            /// </summary>
            RGB,
            /// <summary>
            /// A reduced version of the RGB image of the kinect is shown,
            /// which shows only the active players using alpha blend.
            /// </summary>
            ReducedRGB
        }

        #region Members
        KinectSensor sensor = null;
        int totalFrames = 0;
        int lastFrames = 0;
        DateTime lastTime = DateTime.MaxValue;
        SkeletonFrame m_skeletonFrame;
        private Vector3 handPosition = Vector3.Zero;
        private bool isTrackingPose = false;

        byte[] m_hollowFrame = new byte[640 * 480];

        public int m_iFPS;

        Texture2D m_texColor;
        Texture2D m_texDepth;

        // We want to control how depth data gets converted into false-color data
        // for more intuitive visualization, so we keep 32-bit color frame buffer versions of
        // these, to be updated whenever we receive and process a 16-bit frame.
        const int RED_IDX = 2;
        const int GREEN_IDX = 1;
        const int BLUE_IDX = 0;
        byte[] depthFrame32 = new byte[320 * 240 * 4];

        //private List<Dictionary<JointID, JointInfo>> skeletons;
        //private List<Skeleton> this.skeletons;
        private Skeleton[] skeletons = null;

        private KinectCameraImage showCameraImage = KinectCameraImage.None;

        Thread thread;

        // Interaction members
        InteractionStream interactionStream;
        
        #endregion

        #region Properties

        /// <summary>
        /// Gets the skeleton engine of the Kinect SDK.
        /// </summary>
        public KinectSensor Sensor
        {
            get { return this.sensor; }
        }

        /// <summary>
        /// Gets the RGB video image.
        /// </summary>
        public Texture2D TextureColor { get { return m_texColor; } }

        /// <summary>
        /// Gets the depth video image.
        /// </summary>
        public Texture2D TextureDepth { get { return m_texDepth; } }

        // TO_DELETE >
        public Vector3 HandPosition
        {
            get { return this.handPosition; }
            set { this.handPosition = value; }
        }

        //public Dictionary<int, KinectSkeleton> Skeletons { get; set; }

        public bool IsTrackingPose
        {
            get { return this.isTrackingPose; }
            set { this.isTrackingPose = value; }
        }

        // TO_DELETE <              

        public Skeleton Player1 { get { return this.skeletons[0]; } }
        public Skeleton Player2 { get { return this.skeletons[1]; } }
        public Skeleton Player3 { get { return this.skeletons[2]; } }
        public Skeleton Player4 { get { return this.skeletons[3]; } }
        public Skeleton Player5 { get { return this.skeletons[4]; } }
        public Skeleton Player6 { get { return this.skeletons[5]; } }
        public Skeleton Player7 { get { return this.skeletons[6]; } }
        public Skeleton Player8 { get { return this.skeletons[7]; } }

        /// <summary>
        /// Gets a list of all skeletons.
        /// </summary>
        public List<Skeleton> Skeletons { get { return this.skeletons.ToList<Skeleton>(); } }

        /// <summary>
        /// Gets amount of tracked skeletons.
        /// </summary>
        public int SkeletonCount { get { return this.skeletons.Length; } }

        /// <summary>
        /// Set if skeleton data is ready.
        /// </summary>
        public bool SkeletonDataReady { get { return (m_skeletonFrame != null); } }

        /// <summary>
        /// If enabled, the camera image is shown.
        /// </summary>
        public KinectCameraImage ShowCameraImage
        {
            get { return this.showCameraImage; }
            set { this.showCameraImage = value; }
        }

        #endregion

        #region Constructor
        public Kinect(GraphicsDevice graphicsDevice)
        {
            try
            {
                foreach (var potentialSensor in KinectSensor.KinectSensors)
                {
                    if (potentialSensor.Status == KinectStatus.Connected)
                    {
                        this.sensor = potentialSensor;
                        break;
                    }
                }
            }
            catch (InvalidOperationException)
            {
                ExceptionManager.Add(new Exception("Runtime initialization failed. Please make sure Kinect device is plugged in."));
                return;
            }

            m_texDepth = new Texture2D(graphicsDevice, 640, 480, false, SurfaceFormat.Bgra4444);
            m_texColor = new Texture2D(graphicsDevice, 640, 480, false, SurfaceFormat.Color);

            this.skeletons = new Skeleton[6];

            if (null != this.sensor)
            {
                // Turn on the skeleton stream to receive skeleton frames
                this.sensor.SkeletonStream.Enable();
                
                // Start the sensor!
                try
                {
                    TransformSmoothParameters param = new TransformSmoothParameters();
                    param.Correction = 0.05f;
                    param.JitterRadius = 0.9f;
                    param.MaxDeviationRadius = 0.55f;
                    param.Prediction = 0.005f;
                    param.Smoothing = 0.001f;

                    InteractionClient client = new InteractionClient();
                    this.interactionStream = new InteractionStream(this.sensor, client);
                    this.interactionStream.InteractionFrameReady += new EventHandler<InteractionFrameReadyEventArgs>(this.InteractionFrameReady);

                    this.sensor.SkeletonStream.Enable();//param);

                    this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
                    this.sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);

                    this.sensor.DepthFrameReady += new EventHandler<DepthImageFrameReadyEventArgs>(this.Sensor_DepthFrameReady);
                    this.sensor.SkeletonFrameReady += new EventHandler<SkeletonFrameReadyEventArgs>(this.Sensor_SkeletonFrameReady);
                    this.sensor.ColorFrameReady += new EventHandler<ColorImageFrameReadyEventArgs>(this.Sensor_ColorFrameReady);

                    this.sensor.Start();
                }
                catch (IOException)
                {
                    ExceptionManager.Add(new Exception("Failed to open stream. Please make sure to specify a supported image type and resolution."));
                    this.sensor = null;
                    return;
                }
            }

            lastTime = DateTime.Now;



            m_iFPS = 0;
            this.showCameraImage = KinectCameraImage.None;

            //thread = new Thread(Update);
            //thread.Start();
        }

        #endregion

        #region Events
        private void Sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            if (this.sensor == null)
            {
                return;
            }

            DepthImageFrame image = e.OpenDepthImageFrame();

            if (image == null)
            {
                return;
            }

            short[] bytes16 = new short[image.PixelDataLength];
            image.CopyPixelDataTo(bytes16);

            DepthImagePixel[] data = new DepthImagePixel[image.PixelDataLength];
            image.CopyDepthImagePixelDataTo(data);
            //byte[] convertedDepthFrame = convertDepthFrame(bytes16);

            //Feed depth data to interactionStream
            this.interactionStream.ProcessDepth(data, image.Timestamp);

            m_texDepth.SetData<short>(bytes16);//convertedDepthFrame);


            ++totalFrames;

            DateTime cur = DateTime.Now;
            if (cur.Subtract(lastTime) > TimeSpan.FromSeconds(1))
            {
                int frameDiff = totalFrames - lastFrames;
                lastFrames = totalFrames;
                lastTime = cur;

                m_iFPS = frameDiff;
            }
        }

        private void Sensor_SkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
        {
            if (this.sensor == null)
            {
                return;
            }

            SkeletonFrame skeletonFrame = e.OpenSkeletonFrame();

            if (skeletonFrame == null)
            {
                return;
            }

            m_skeletonFrame = skeletonFrame;

            Microsoft.Kinect.Skeleton[] skeletons = new Microsoft.Kinect.Skeleton[skeletonFrame.SkeletonArrayLength];
            skeletonFrame.CopySkeletonDataTo(skeletons);

            //Feed skeleton data to interactionStream
            var accelerometerReading = this.Sensor.AccelerometerGetCurrentReading();
            interactionStream.ProcessSkeleton(skeletons, accelerometerReading, skeletonFrame.Timestamp);

            //this.skeletons.Clear();

            for (int i = 0; i < skeletons.Length; i++)            //foreach (SkeletonData data in skeletonFrame.Skeletons)
            {
                Skeleton skeleton = new Skeleton();

                // Set all skeleton data
                skeleton.WorldPosition = GetWorldVector3(skeletons[i].Position);//data.Position);
                ColorImagePoint v2 = this.sensor.MapSkeletonPointToColor(skeletons[i].Position, ColorImageFormat.RgbResolution640x480Fps30);

                // Since the mapping is now to a 640x640 resolution, scale to the actual one
                //v2.X = (float)(v2.X /* (float)Core.Width*/ / 640f);
                //v2.Y = (float)(v2.Y /* (float)Core.Height*/ / 480f);

                skeleton.ScreenPosition = new Vector2(v2.X, v2.Y); //GetDisplayPosition(skeletons[i].Position);//data.Position);                
                skeleton.TrackingID = skeletons[i].TrackingId;
                skeleton.TrackingState = skeletons[i].TrackingState;

                Dictionary<JointType, JointInfo> joints = new Dictionary<JointType, JointInfo>();

                // Process all joints
                //if (SkeletonTrackingState.Tracked == skeletons[i].TrackingState)
                {
                    foreach (Joint joint in skeletons[i].Joints)
                    {
                        ColorImagePoint jointPt = this.sensor.MapSkeletonPointToColor(joint.Position, ColorImageFormat.RgbResolution640x480Fps30);
                        
                        // Normalize the joint position and scale it to the current screen size
                        Vector2 screenPos = new Vector2((jointPt.X * Core.Width) / 640f, (jointPt.Y * Core.Height) / 480f);/*GetDisplayPosition(joint.Position)*/
                        joints.Add(joint.JointType, new JointInfo(screenPos, GetWorldVector3(joint.Position), joint.TrackingState));
                    }
                }

                skeleton.Joints = joints;

                // Add this skeleton to the known ones
                this.skeletons[i] = skeleton;//Add(skeleton);
            }
        }

        private void Sensor_ColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
        {
            if (this.sensor == null)
            {
                return;
            }

            // 32-bit per pixel, RGBA image
            ColorImageFrame image = e.OpenColorImageFrame();

            if (image == null)
            {
                return;
            }

            byte[] newImage = new byte[image.Width * image.Height * image.BytesPerPixel];

            byte[] bytes = new byte[image.PixelDataLength];
            image.CopyPixelDataTo(bytes);

            // Change from BRGA- to RGBA-format
            for (int i = 0; i < newImage.Length; i += 4)
            {
                // Variable alpha level on reduced RGB mode
                if (this.showCameraImage == KinectCameraImage.ReducedRGB)
                {
                    newImage[i + 2] = (byte)(bytes[i + 0] * m_hollowFrame[i / 4]);
                    newImage[i + 1] = (byte)(bytes[i + 1] * m_hollowFrame[i / 4]);
                    newImage[i + 0] = (byte)(bytes[i + 2] * m_hollowFrame[i / 4]);
                    newImage[i + 3] = (byte)(m_hollowFrame[i / 4] * 0xFF);
                }
                else // Full alpha level on normal RGB mode
                {
                    newImage[i + 2] = (byte)(bytes[i + 0]);
                    newImage[i + 1] = (byte)(bytes[i + 1]);
                    newImage[i + 0] = (byte)(bytes[i + 2]);
                    newImage[i + 3] = 0xFF;
                }
            }

            m_texColor.SetData<byte>(newImage);
        }

        void InteractionFrameReady(object sender, InteractionFrameReadyEventArgs e)
        {
            using (InteractionFrame interactionFrame = e.OpenInteractionFrame())
            {
                if (interactionFrame != null)
                {
                    UserInfo[] userInfo = new UserInfo[InteractionFrame.UserInfoArrayLength];
                    interactionFrame.CopyInteractionDataTo(userInfo);
                    
                    foreach (var skel in Skeletons)
                    {
                        var ui = (from UserInfo u in userInfo where u.SkeletonTrackingId == skel.TrackingID select u).FirstOrDefault();

                        skel.HandPointers = ui.HandPointers;
                    }
                }
            }
        }

        #endregion

        #region Private Methods

        /// <summary>
        /// Event handler for Kinect sensor's SkeletonFrameReady event
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void SensorSkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
        {
            Microsoft.Kinect.Skeleton[] skeletons = new Microsoft.Kinect.Skeleton[0];

            using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
            {
                if (skeletonFrame != null)
                {
                    skeletons = new Microsoft.Kinect.Skeleton[skeletonFrame.SkeletonArrayLength];
                    skeletonFrame.CopySkeletonDataTo(skeletons);
                }
            }
        }

        Vector3 GetWorldVector3(SkeletonPoint vector)
        {
            return new Vector3(vector.X, vector.Y, vector.Z);
        }

        // Converts a 16-bit grayscale depth frame which includes player indexes into a 32-bit frame
        // that displays different players in different colors
        byte[] convertDepthFrame(short[] depthFrame16)
        {
            int n = 0;

            for (int i16 = 0, i32 = 0; i16 < depthFrame16.Length && i32 < depthFrame32.Length; i16 += 2, i32 += 4)
            {
                int player = depthFrame16[i16] & 0x07;
                int depthPixelValue = (depthFrame16[i16 + 1] << 8) | depthFrame16[i16];
                int realDepth = (depthFrame16[i16 + 1] << 5) | (depthFrame16[i16] >> 3);

                // Transform 13-bit depth information into an 8-bit intensity appropriate
                // for display (we disregard information in most significant bit)
                byte intensity = (byte)(255 - (255 * realDepth / 0x0fff));

                depthFrame32[i32 + RED_IDX] = 0;
                depthFrame32[i32 + GREEN_IDX] = 0;
                depthFrame32[i32 + BLUE_IDX] = 0;


                // If kinect mode is "ReducedRGB", create the hollow mask from the depth data
                // Attention: The depth image has 320x240 resolution, while the RGB image is VGA!!
                if (this.showCameraImage == KinectCameraImage.ReducedRGB)
                {
                    // Use the interal kinect camera calibration to calculate 
                    // the corrisponding RGB image pixel position from the depth pixel position.
                    int depthX = (i16 / 2) % 320;
                    int depthY = (i16 / 2) / 320;
                    int colorX, colorY;
                    ColorImagePoint color = this.sensor.MapDepthToColorImagePoint(DepthImageFormat.Resolution320x240Fps30, depthX, depthY, (short)depthPixelValue, ColorImageFormat.RgbResolution640x480Fps30);
                    colorX = Math.Max(0, Math.Min(639, color.X));
                    colorY = Math.Max(0, Math.Min(479, color.Y));

                    // Set if this pixel is part of a player
                    byte set = (player > 0) ? (byte)1 : (byte)0;

                    // Calculate the position in RGB image space
                    int position = colorX + colorY * 640;

                    // To adapt the resolution of the RGB image, map one depth pixel 
                    // into 4 pixels in the hollow mask.
                    m_hollowFrame[position] = set;
                    m_hollowFrame[position + 1] = set;

                    // The second line of the last line is unnessessary and
                    // causes an exception for range overflow
                    if ((position + 640) < m_hollowFrame.Length)
                    {
                        m_hollowFrame[position + 640] = set;
                        m_hollowFrame[position + 640 + 1] = set;
                    }
                }


                // Choose different display colors based on player
                switch (player)
                {
                    case 0:
                        depthFrame32[i32 + RED_IDX] = (byte)(intensity / 2);
                        depthFrame32[i32 + GREEN_IDX] = (byte)(intensity / 2);
                        depthFrame32[i32 + BLUE_IDX] = (byte)(intensity / 2);
                        break;
                    case 1:
                        depthFrame32[i32 + RED_IDX] = intensity;
                        break;
                    case 2:
                        depthFrame32[i32 + GREEN_IDX] = intensity;
                        break;
                    case 3:
                        depthFrame32[i32 + RED_IDX] = (byte)(intensity / 4);
                        depthFrame32[i32 + GREEN_IDX] = (byte)(intensity);
                        depthFrame32[i32 + BLUE_IDX] = (byte)(intensity);
                        break;
                    case 4:
                        depthFrame32[i32 + RED_IDX] = (byte)(intensity);
                        depthFrame32[i32 + GREEN_IDX] = (byte)(intensity);
                        depthFrame32[i32 + BLUE_IDX] = (byte)(intensity / 4);
                        break;
                    case 5:
                        depthFrame32[i32 + RED_IDX] = (byte)(intensity);
                        depthFrame32[i32 + GREEN_IDX] = (byte)(intensity / 4);
                        depthFrame32[i32 + BLUE_IDX] = (byte)(intensity);
                        break;
                    case 6:
                        depthFrame32[i32 + RED_IDX] = (byte)(intensity / 2);
                        depthFrame32[i32 + GREEN_IDX] = (byte)(intensity / 2);
                        depthFrame32[i32 + BLUE_IDX] = (byte)(intensity);
                        break;
                    case 7:
                        depthFrame32[i32 + RED_IDX] = (byte)(255 - intensity);
                        depthFrame32[i32 + GREEN_IDX] = (byte)(255 - intensity);
                        depthFrame32[i32 + BLUE_IDX] = (byte)(255 - intensity);
                        break;
                }
            }

            return depthFrame32;
        }

        public Vector2 GetDisplayPosition(SkeletonPoint point)
        {
            DepthImagePoint depth = this.sensor.MapSkeletonPointToDepth(point, DepthImageFormat.Resolution320x240Fps30);
            float depthX = depth.X * 320; //convert to 320, 240 space
            float depthY = depth.Y * 240; //convert to 320, 240 space

            int colorX, colorY;
            //ImageViewArea iv = new ImageViewArea();

            // Only ImageResolution.Resolution640x480 is supported at this point
            ColorImagePoint color = this.sensor.MapDepthToColorImagePoint(DepthImageFormat.Resolution320x240Fps30, (int)depthX, (int)depthY, (short)0, ColorImageFormat.RgbResolution640x480Fps30);

            // map back to skeleton.Width & skeleton.Height
            return new Vector2((float)(color.X / 640f), (float)(color.Y / 480f));
        }

        #endregion

        #region Public Methods
        
        #endregion

        public void Dispose()
        {
            if (this.sensor != null)
            {
                this.sensor.DepthFrameReady -= this.Sensor_DepthFrameReady;
                this.sensor.SkeletonFrameReady -= this.Sensor_SkeletonFrameReady;
                this.sensor.ColorFrameReady -= this.Sensor_ColorFrameReady;

                this.sensor.ColorStream.Disable();
                this.sensor.SkeletonStream.Disable();
                this.sensor.DepthStream.Disable();

                this.sensor.Stop();

                this.sensor = null;
            }
        }
    }
}