// --------------------------------------------------------------------------------------------------------------------
// <copyright file="KinectMessageProcessorUtilities.cs" company="Microsoft Corporation">
// The MIT License (MIT)
// 
// Copyright (c) 2014, Microsoft Corporation
// 
// Permission is hereby granted, free of charge, to any person obtaining a copy
//  of this software and associated documentation files (the "Software"), to deal
//  in the Software without restriction, including without limitation the rights
//  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//  copies of the Software, and to permit persons to whom the Software is
//  furnished to do so, subject to the following conditions:
// 
// The above copyright notice and this permission notice shall be included in
//  all copies or substantial portions of the Software.
// 
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
//  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
//  THE SOFTWARE.
// </copyright>
// --------------------------------------------------------------------------------------------------------------------
namespace Microsoft.Robotics.Vision.Cameras
{
    using System;
    using System.Drawing;
    using System.Runtime.CompilerServices;
    using System.Runtime.Serialization;
    using Microsoft.Robotics.Numerics;

    /// <summary>
    /// Provides utility support for Transforming raw kinect images (KinectAgentMessageProcessor)
    /// Current processing steps are:
    /// 1) Clamp depth readings to lie in range [0 - MaximumDepthImageRange]
    /// 2) Assign pixel values less than  0 or having NoReadingValue the value NoReadingValue (Note 
    ///      the kinect driver creates KinectAgentRawMessage with noReadingValue) 
    /// 3) Flip images from right-left (Kinect format) to left to right (Historical mars format)
    /// 4) Apply a linear rescaling of the depth. d' = m*d + c. We found this type of correction improves
    ///     absolute depth readings
    /// </summary>
    public class KinectMessageProcessorUtilities
    {
        /// <summary>
        /// Floor filter
        /// </summary>
        private IKinectFloorFilter floorFilter;

        /// <summary>
        /// Vertical number of correction buckets
        /// </summary>
        private int correctionPolynomialOrder = 0;

        /// <summary>
        /// Correction coefficients
        /// </summary>
        private double[][] correctionCoefficients = null;

        /// <summary>
        /// Disables depth mirroring
        /// </summary>
        private bool disableDepthMirror;

        /// <summary>
        /// Disable floor filtering
        /// </summary>
        private bool disableFloorFiltering;

        /// <summary>
        /// Initializes a new instance of the <see cref="KinectMessageProcessorUtilities"/> class.
        /// </summary>
        /// <param name="disableDepthMirror">Flag indicating if depth image should be flipped left to right</param>
        /// <param name="floorFilter">Floor filter</param>
        /// <param name="disableFloorFiltering">Disable floor filtering of the image</param>
        public KinectMessageProcessorUtilities(bool disableDepthMirror, IKinectFloorFilter floorFilter, bool disableFloorFiltering = false)
        {
            this.disableDepthMirror = disableDepthMirror;
            this.disableFloorFiltering = disableFloorFiltering;

            this.floorFilter = floorFilter;
        }

        /// <summary>
        /// Transform a "raw" depth Kinect image to a standard version that downstream components consume
        /// Current processing steps are:
        /// 1) Clamp depth readings to lie in range [0 - MaximumDepthImageRange]
        /// 2) Assign pixel values less than  0 or having NoReadingValue the value NoReadingValue (Note 
        ///      the kinect driver creates KinectAgentRawMessage with noReadingValue) 
        /// 3) If enabled Flip images from right-left (Kinect format) to left to right (Historical mars format)
        /// 4) Apply a linear rescaling of the depth. d' = m*d + c. We found this type of correction improves
        ///     absolute depth readings
        /// </summary>
        /// <param name="depthImageSize">Raw image size</param>
        /// <param name="depthFrame">Raw image</param>
        /// <param name="noReadingValue">Kinect no reading value</param>
        /// <param name="maximumRange">Kinect maximum depth</param>
        /// <param name="furtherThanMaxDepthValue">Value to substitute when over range</param>
        /// <returns>Transformed image</returns>
        public short[] TransformDepthImage(Size depthImageSize, short[] depthFrame, short noReadingValue, short maximumRange, short furtherThanMaxDepthValue)
        {
            short[] pixelBuf = new short[depthImageSize.Height * depthImageSize.Width];

            int bucketsHorizontal = 0; // pre-compute for perf
            int bucketSideInPixels = 0; // pre-compute for perf

            if (null != this.correctionCoefficients)
            {
                int bucketLenghtInPixels = (depthImageSize.Width * depthImageSize.Height) / this.correctionCoefficients.Length;

                bucketSideInPixels = (int)Math.Sqrt(bucketLenghtInPixels);
                bucketsHorizontal = depthImageSize.Width / bucketSideInPixels;
            }

            int frameWidth = depthImageSize.Width; // caching property for perf
            int frameHeight = depthImageSize.Height; // caching property for perf      
 
            if (false == this.disableFloorFiltering)
            {
                // We attempt to learn the floor for each frame. It is more encapsulated design-wise and slightly faster
                // than making a per-pixel filtering. Note. It is advisable to call this prior to Kinect bias correction
                // since filtered out pixels wont undergo a relatively expensive floating point correction
                this.floorFilter.FilterPlanesOnFrame(depthFrame, frameWidth, frameHeight);
            }

            double correctedDepth;

            unchecked
            {
                unsafe
                {
                    fixed (short* depthImage = depthFrame)
                    {
                        fixed (short* pixelData = pixelBuf)
                        {
                            // By default the kinect depth and color images are right to left
                            // Our pipeline expects it to be left to right (Mars Kinect Interop did the flip for us)
                            // Replicate that behavior by flipping left to right.
                            // Other transformations include linear rescaling and clamping of depth values
                            for (int row = 0; row < frameHeight; ++row)
                            {
                                int indexRowTemp = row * frameWidth;

                                for (int col = 0; col < frameWidth; ++col)
                                {
                                    int pixelIndex = indexRowTemp + col;
                                    int depthIndex;

                                    // Added Jan 2013 as an option to not flip the image
                                    if (true == this.disableDepthMirror)
                                    {
                                        depthIndex = pixelIndex;
                                    }
                                    else
                                    {
                                        depthIndex = indexRowTemp + frameWidth - 1 - col;
                                    }

                                    short depthVal = depthImage[pixelIndex];

                                    if (depthVal == noReadingValue)
                                    {
                                        pixelData[depthIndex] = noReadingValue;
                                    }
                                    else
                                    {   
                                        // Because floor removal is cheaper than correction, and because we dont need to do both on the same pixel - 
                                        // we first filter out floor - and then apply correcttion to non-floor pixels.
                                        if (depthVal > 0 && null != this.correctionCoefficients)
                                        {
                                            correctedDepth = this.CorrectError(
                                                depthVal,
                                                row,
                                                col,
                                                bucketsHorizontal,
                                                bucketSideInPixels);
                                        }
                                        else 
                                        {
                                            correctedDepth = depthVal;
                                        }

                                        /* Kinect sometimes returns non-reserved non-positive values
                                           We should consider those as noreading values.
                                           Note that noReadingValue is different from nearerThanMinDepthValue,
                                           which should be considered as obstacles.*/
                                        if (DepthImageConstants.FloorValue == correctedDepth)
                                        {
                                            pixelData[depthIndex] = DepthImageConstants.FloorValue;
                                        }
                                        else if (DepthImageConstants.CeilingValue == correctedDepth)
                                        {
                                            pixelData[depthIndex] = DepthImageConstants.CeilingValue;
                                        }
                                        else if (correctedDepth <= 0)
                                        {
                                            pixelData[depthIndex] = noReadingValue;
                                        }
                                        else
                                        {
                                            pixelData[depthIndex] = (short)Math.Round(correctedDepth);
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
            }

            return pixelBuf;
        }

        /// <summary>
        /// Convert and correct a kinect field of view
        /// </summary>
        /// <param name="fieldOfViewDegrees">Kinect field of view in degrees</param>
        /// <returns>Corrected FOV in radians</returns>
        public double FieldOfViewInRadians(double fieldOfViewDegrees)
        {
            // K4W Nominal FOV is twice the actual field of view, which is not explained in the documentation.
            return fieldOfViewDegrees / 2 * MathConstants.Degrees2Radians;
        }

        /// <summary>
        /// Set correction coefficients
        /// </summary>
        /// <param name="bucketCoefficients">An array initialized with per bucket correction coefficients.
        /// A bucket is a rectangular patch of pixels for which we calculate (at calibration time) and then apply (here)
        /// correction polys. The motivation behind per-bucket correction is that a single, pixel position insensitive
        /// correction does not deal with the 'doughnut' effect exhibited by Kinect. At their extremes, buckets can be 
        /// 1x1 pixel or 640x480 pixels, former case resulting in per-pixel correction and later on a single per-frame correction.
        /// Per-pixel correction did not yield good results due to noise exhibited by each pixel, and larger bucket sizes 
        /// (over 64x64) do not address the 'doughnut' well</param>
        public void SetBucketCalibrationData(double[][] bucketCoefficients)
        {
            this.correctionCoefficients = null;
            this.correctionPolynomialOrder = 0;

            if (null != bucketCoefficients)
            {
                int polyOrder = bucketCoefficients[0].Length - 1;

                if (polyOrder < 1)
                {
                    throw new InvalidOperationException("Incorrect polynomial correction file. Order cannot be less than 1");
                }

                this.correctionPolynomialOrder = polyOrder;
                this.correctionCoefficients = bucketCoefficients;
            }
        }

        /// <summary>
        /// Applies a linear correction to a depth reading. Right now, this supports 1st and second degree polynomials
        /// It probably makes sense to optimize for one and remove the other once we have enough trust in one of them
        /// </summary>
        /// <param name="depth">Depth reading to convert</param>
        /// <param name="row">The row</param>
        /// <param name="col">The column</param>
        /// <param name="bucketsHorizontal">Number of horizontal correction buckets pre-calculated for performance reasons</param>
        /// <param name="bucketSideInPixels">Square bucket side size in pixels</param>
        /// <returns>Corrected depth reading</returns>
        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        private unsafe double CorrectError(short depth, int row, int col, int bucketsHorizontal, int bucketSideInPixels)
        {
            unchecked
            {
                int bucketRow = row / bucketSideInPixels;
                int bucketColumn = col / bucketSideInPixels;

                int bucketindex = bucketRow * bucketsHorizontal + bucketColumn;

                if (this.correctionPolynomialOrder == 1)
                {
                    return
                        this.correctionCoefficients[bucketindex][0] * depth +
                        this.correctionCoefficients[bucketindex][1];
                }
                else if (this.correctionPolynomialOrder == 2)
                {
                    int depthSquared = depth * depth; // to avoid one more floating point multiply

                    return
                        this.correctionCoefficients[bucketindex][0] * depthSquared +
                        this.correctionCoefficients[bucketindex][1] * depth +
                        this.correctionCoefficients[bucketindex][2];
                }

                return depth;
            }
        }

        /////// <summary>
        /////// Attempts to remove the banding effect by detecting consecutive columns that deviate in one direction
        /////// </summary>
        /////// <param name="depthFrame">The image frame to correct</param>
        /////// <param name="frameWidth">The width of the image</param>
        /////// <param name="frameHeight">The height of the image</param>
        ////private void RemoveBands(short[] depthFrame, int frameWidth, int frameHeight)
        ////{
        ////    for (int col = 32; col < frameWidth - 32; col++)
        ////    {
        ////        int pixelIndex = col;
        ////        int votesLeft = 0;
        ////        int votesRight = 0;
        ////        double estimatedShiftLeft = 0;
        ////        double estimatedShiftRight = 0;
        ////        for (int row = 0; row < frameHeight; row++, pixelIndex += frameWidth)
        ////        {
        ////            if (depthFrame[pixelIndex - 1] <= 0 || depthFrame[pixelIndex] <= 0)
        ////            {
        ////                continue;
        ////            }

        ////            int delta = depthFrame[pixelIndex] - depthFrame[pixelIndex - 1];
        ////            if (delta == 0)
        ////            {
        ////                continue;
        ////            }

        ////            if (delta < 0) 
        ////            {
        ////                votesLeft++;
        ////                estimatedShiftLeft += depthFrame[pixelIndex] * depthFrame[pixelIndex] / delta;
        ////            }
        ////            else if (delta > 0)
        ////            {
        ////                votesRight++;
        ////                estimatedShiftRight += depthFrame[pixelIndex] * depthFrame[pixelIndex] / delta;
        ////            }
        ////        }

        ////        if (votesRight > votesLeft * 5)
        ////    }
        ////}
    }
}
