// --------------------------------------------------------------------------------------------------------------------
// <copyright file="ObjectPosePredictor.cs" company="Microsoft Corporation">
// The MIT License (MIT)
// 
// Copyright (c) 2014, Microsoft Corporation
// 
// Permission is hereby granted, free of charge, to any person obtaining a copy
//  of this software and associated documentation files (the "Software"), to deal
//  in the Software without restriction, including without limitation the rights
//  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//  copies of the Software, and to permit persons to whom the Software is
//  furnished to do so, subject to the following conditions:
// 
// The above copyright notice and this permission notice shall be included in
//  all copies or substantial portions of the Software.
// 
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
//  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
//  THE SOFTWARE.
// </copyright>
// --------------------------------------------------------------------------------------------------------------------

namespace Microsoft.Robotics.Vision.ObjectDetection
{
    using System;
    using System.Collections.Generic;
    using System.IO;
    using System.Windows;
    using System.Windows.Media.Imaging;

    using Microsoft.Robotics.Numerics;
    using Microsoft.Robotics.Vision;
    using Microsoft.Robotics.Vision.Cameras;
    using Microsoft.Search;

    /// <summary>
    /// Runs object pose prediction on captured kinect images
    /// </summary>
    public class ObjectPosePredictor : IDisposable
    {
        /// <summary>
        /// Level 1 classifier
        /// </summary>
        private nnRunTimeManaged level1Classifier;

        /// <summary>
        /// Level 2 classifier. It gives a confidence score that the patch contains the object
        /// </summary>
        private nnRunTimeManaged level2Classifier;

        /// <summary>
        /// Level 2 regression - estimates the location for patches (only if it is an object patch)
        /// </summary>
        private nnRunTimeManaged level2LocationRegressor;

        /// <summary>
        /// Level 2 regression - estimates the orientation for patches (only if it is an object patch)
        /// </summary>
        private nnRunTimeManaged level2OrientationRegressor;

        /// <summary>
        /// Post processor confidence estimator
        /// </summary>
        private nnRunTimeManaged confidenceEstimator;

        /// <summary>
        /// Track whether Dispose has been called.
        /// </summary>
        private bool disposed = false;

        /// <summary>
        /// Initializes a new instance of the <see cref="ObjectPosePredictor"/> class.
        /// </summary>
        /// <param name="detectorConfiguration">Detector configuration parameters</param>
        public ObjectPosePredictor(KinectIRObjectDetectorConfiguration detectorConfiguration)
        {
            this.DetectorConfiguration = detectorConfiguration;
            this.level1Classifier = new nnRunTimeManaged(this.DetectorConfiguration.Level1Classifier.GetBuffer());
            this.level2Classifier = new nnRunTimeManaged(this.DetectorConfiguration.Level2Classifier.GetBuffer());
            this.level2LocationRegressor = new nnRunTimeManaged(this.DetectorConfiguration.Level2Regress.GetBuffer());

            if (this.DetectorConfiguration.Level2OrientationRegress != null)
            {
                this.level2OrientationRegressor = new nnRunTimeManaged(this.DetectorConfiguration.Level2OrientationRegress.GetBuffer());
            }

            if (this.DetectorConfiguration.ConfidenceEstimator != null)
            {
                this.confidenceEstimator = new nnRunTimeManaged(this.DetectorConfiguration.ConfidenceEstimator.GetBuffer());
            }
        }

        /// <summary>
        /// Gets the detector configuration parameters
        /// </summary>
        public KinectIRObjectDetectorConfiguration DetectorConfiguration { get; private set; }

        /// <summary>
        /// Runs pose predictions for a kinect captured IR and depth images
        /// </summary>
        /// <param name="featurizer">Featurizer to use</param>
        /// <param name="cameraCalibration">Kinect camera calibration</param>
        /// <param name="classifyFrameL1">Optional image to capture level1 results</param>
        /// <returns>List of patch cluster estimates</returns>
        public List<PatchCluster> Predict(ObjectDetectionFeaturization featurizer, KinectCameraCalibration cameraCalibration, ImageFrame<ushort> classifyFrameL1 = null)
        {
            System.Diagnostics.Stopwatch timer = new System.Diagnostics.Stopwatch();
            timer.Start();
            int colIncr = 1;
            int rowIncr = 1;

            ImageFrame<ushort> irImage = featurizer.IrFrame;
            ImageFrameDepth depthImage = featurizer.DepthFrame;

            if (null == irImage || null == depthImage)
            {
                throw new ArgumentException("Input images cannot be null");
            }

            if (irImage.Width != depthImage.Width || irImage.Height != depthImage.Height)
            {
                throw new ArgumentException(string.Format("IR image ({0}, {1}) size != Depth image size ({2}, {3})", irImage.Width, irImage.Height, depthImage.Width, depthImage.Height));
            }

            List<PatchCluster> patchClusters = new List<PatchCluster>();
            double depthBy2 = this.DetectorConfiguration.ObjectDepthMM / 2.0;
            int level1Count = 0;
            PhysicalObjects.PhysicalObjectDescription manipObject = this.DetectorConfiguration.ManipulatedObjectDescription;

            for (int col = 0; col < irImage.Width; col += colIncr)
            {
                for (int row = 0; row < irImage.Height; row += rowIncr)
                {
                    double depthM = depthImage[col, row] / MathConstants.MilliunitsPerUnit;
                    if (depthM > 0)
                    {
                        double patchWidthPix;
                        double patchHeightPix;
                        double patchDepthMM;
                        cameraCalibration.GetPatchSizeInPixForObject(col, row, depthM, manipObject.WidthInM, manipObject.HeightInM, manipObject.DepthInM, this.DetectorConfiguration.PatchContextScale, out patchWidthPix, out patchHeightPix, out patchDepthMM);

                        float level1Result = this.ClassifyPatchL1(
                            featurizer,
                            col,
                            row,
                            (int)patchWidthPix,
                            (int)patchHeightPix);

                        if (null != classifyFrameL1)
                        {
                            classifyFrameL1[col, row] = (ushort)(level1Result * ushort.MaxValue);
                        }

                        if (level1Result >= this.DetectorConfiguration.Level1Threshold)
                        {
                            ++level1Count;
                            List<float> level2Results = this.ClassifyPatchL2(featurizer, col, row, (int)patchWidthPix, (int)patchHeightPix, this.DetectorConfiguration.Level2Threshold, this.DetectorConfiguration.NoReadingDepthValue);

                            if (null != level2Results && level2Results[0] > this.DetectorConfiguration.Level2Threshold && level2Results.Count >= 3)
                            {
                                double xCenter = (double)(col + patchWidthPix * (level2Results[1] - 0.5));
                                double yCenter = (double)(row + patchHeightPix * (level2Results[2] - 0.5));
                                double depthCenter = depthM + patchDepthMM * (level2Results[3] - 0.5);
                                double widthBy2 = (double)(patchWidthPix / 2);
                                double heightBy2 = (double)(patchHeightPix / 2);

                                bool isAdded = false;
                                for (int i = 0; i < patchClusters.Count && false == isAdded; ++i)
                                {
                                    isAdded = patchClusters[i].Add(xCenter, yCenter, depthCenter, level2Results[0], xCenter - widthBy2, xCenter + widthBy2, yCenter - patchHeightPix, yCenter);
                                }

                                if (false == isAdded)
                                {
                                    patchClusters.Add(new PatchCluster(xCenter, yCenter, depthCenter, level2Results[0], xCenter - widthBy2, xCenter + widthBy2, yCenter - heightBy2, yCenter + heightBy2));
                                }
                            }
                        }
                    }
                }
            }

            foreach (PatchCluster p in patchClusters)
            {
                p.Estimate(this.DetectorConfiguration.NoReadingDepthValue);
                Pose cameraPose = this.EstimateL2Orientation(
                    featurizer,
                    cameraCalibration,
                    p,
                    this.DetectorConfiguration.PatchContextScale,
                    this.DetectorConfiguration.NoReadingDepthValue);
                double confidence = this.EstimateClusterConfidence(p, colIncr * rowIncr);
                p.PoseEstimate = new ObjectPoseEstimate(
                    cameraPose, 
                    confidence, 
                    this.DetectorConfiguration.ConfidenceThreshold, 
                    this.DetectorConfiguration.ManipulatedObjectDescription, 
                    timer.ElapsedMilliseconds);
            }

            List<PatchCluster> finalClusters = this.Recluster(patchClusters, featurizer, cameraCalibration, timer.ElapsedMilliseconds, colIncr * rowIncr);
            return finalClusters;
        }

        /// <summary>
        /// Run level 1 classification on an rectangular image patch
        /// </summary>
        /// <param name="featurizer">Featurizer to use</param>
        /// <param name="col">Patch column</param>
        /// <param name="row">Patch row</param>
        /// <param name="patchWidth">Actual patch width used</param>
        /// <param name="patchHeight">Actual patch height used</param>
        /// <returns>Confidence score that patch is positive</returns>
        public float ClassifyPatchL1(
            ObjectDetectionFeaturization featurizer,
            int col,
            int row,
            int patchWidth,
            int patchHeight)
        {
            int[] feats = featurizer.GetIntegralFeatures(col, row, patchWidth, patchHeight);

            if (null != feats)
            {
                List<float> results = this.level1Classifier.Classify<int>(feats);
                return results[1];
            }

            return 0.0F;
        }

        /// <summary>
        /// Run level 2 classification and optionally location prediction on a patch
        /// </summary>
        /// <param name="featurizer">Featurizer to use</param>
        /// <param name="col">Patch column</param>
        /// <param name="row">Patch row</param>
        /// <param name="patchWidth">Raw patch width</param>
        /// <param name="patchHeight">Raw patch height</param>
        /// <param name="level2Threshold">Level 2 threshold</param>
        /// <param name="noReading">No reading value</param>
        /// <returns>Level 2 confidence score and optionally location prediction</returns>
        public List<float> ClassifyPatchL2(ObjectDetectionFeaturization featurizer, int col, int row, int patchWidth, int patchHeight, double level2Threshold, short noReading)
        {
            int[] feat2 = featurizer.GetPatchFeaturesLevel2(col, row, patchWidth, patchHeight, this.DetectorConfiguration.Level2Patch, noReading);

            if (null != feat2)
            {
                List<float> results = new List<float>();

                List<float> classify = this.level2Classifier.Classify(feat2);
                results.Add(classify[1]);

                if (results[0] >= level2Threshold)
                {
                    List<float> regress = this.level2LocationRegressor.Run(feat2);
                    results.AddRange(regress);
                }

                return results;
            }

            return null;
        }

        /// <summary>
        /// Estimate L2 orientation for object patch in image
        /// </summary>
        /// <param name="featurizer">Featurizer to use</param>
        /// <param name="cameraCalibration">Kinect camera calibration</param>
        /// <param name="patch">Patch in image</param>
        /// <param name="patchScale">Patch scale</param>
        /// <param name="noReading">no reading value for depth image</param>
        /// <returns>Estimate orientation</returns>
        public Pose EstimateL2Orientation(ObjectDetectionFeaturization featurizer, KinectCameraCalibration cameraCalibration, PatchCluster patch, double patchScale, short noReading)
        {
            Pose pose = patch.PoseEstimate.CameraPixelPose;
            if (null == this.level2OrientationRegressor)
            {
                return pose;
            }

            double row = patch.PoseEstimate.CameraPixelPose.Position.Y;
            row = Math.Max(0, row);
            double col = patch.PoseEstimate.CameraPixelPose.Position.X;
            double depth = patch.PoseEstimate.CameraPixelPose.Position.Z;

            PhysicalObjects.PhysicalObjectDescription manipObject = this.DetectorConfiguration.ManipulatedObjectDescription;

            double patchWidthPix;
            double patchHeightPix;
            double patchDepthPix;
            cameraCalibration.GetPatchSizeInPixForObject(col, row, depth, manipObject.WidthInM, manipObject.HeightInM, manipObject.DepthInM, this.DetectorConfiguration.PatchContextScale, out patchWidthPix, out patchHeightPix, out patchDepthPix);

            int[] feats = featurizer.GetOrientationFeatures((int)col, (int)row, (int)patchWidthPix, (int)patchHeightPix, this.DetectorConfiguration.Level2Patch, noReading);

            if (null != feats)
            {
                List<float> regress = this.level2OrientationRegressor.Run(feats);

                double w = regress[0];
                if (w < 0)
                {
                    w = Math.Sqrt((1 + Math.Max(w, -1)) / 2);
                }
                else
                {
                    w = -Math.Sqrt((1 - Math.Min(w, 1)) / 2);
                }

                double z = Math.Sqrt(1 - w * w);

                pose = new Pose(patch.PoseEstimate.CameraPixelPose.Position, new Quaternion(0, 0, z, w));
            }

            return pose;
        }

        /// <summary>
        /// Estimate a patch cluster confidence
        /// </summary>
        /// <param name="cluster">Input cluster</param>
        /// <param name="samplingFactor">Down scale pixel sampling factor</param>
        /// <returns>Confidence score</returns>
        public double EstimateClusterConfidence(PatchCluster cluster, int samplingFactor)
        {
            double conf;

            if (null != this.confidenceEstimator)
            {
                int[] feats = ObjectDetectionFeaturization.GetPostProcessorFeatures(cluster, samplingFactor);

                List<float> pred = this.confidenceEstimator.Classify(feats);
                conf = pred[1];
            }
            else
            {
                conf = cluster.MeanConfidence;
            }

            return conf;
        }

        /// <summary>
        /// Implement IDisposable
        /// </summary>
        public void Dispose()
        {
            this.Dispose(true);
            GC.SuppressFinalize(this);
        }

        /// <summary>
        /// Dispose unmanaged resources
        /// </summary>
        /// <param name="disposing">True if dispose is called by user code, false otherwise</param>
        protected virtual void Dispose(bool disposing)
        {
            // Check to see if Dispose has already been called. 
            if (false == this.disposed)
            {
                if (disposing)
                {
                    if (null != this.level1Classifier)
                    {
                        this.level1Classifier.Dispose();
                        this.level1Classifier = null;
                    }

                    if (null != this.level2Classifier)
                    {
                        this.level2Classifier.Dispose();
                        this.level1Classifier = null;
                    }

                    if (null != this.level2LocationRegressor)
                    {
                        this.level2LocationRegressor.Dispose();
                        this.level2LocationRegressor = null;
                    }

                    if (null != this.level2OrientationRegressor)
                    {
                        this.level2OrientationRegressor.Dispose();
                        this.level2OrientationRegressor = null;
                    }

                    if (null != this.confidenceEstimator)
                    {
                        this.confidenceEstimator.Dispose();
                        this.confidenceEstimator = null;
                    }
                }

                this.disposed = true;
            }
        }

        /// <summary>
        /// Re-cluster detected patches by combining patches that overlap
        /// </summary>
        /// <param name="patchClusters">Input of over segmented patches</param>
        /// <param name="featurizer">Featurizer use to create patches</param>
        /// <param name="cameraCalibration">Kinect camera calibration</param>
        /// <param name="predictTimeMsec">Prediction time in milli second</param>
        /// <param name="samplingFactor">Down scale pixel sampling factor</param>
        /// <returns>Re-clustered patches</returns>
        private List<PatchCluster> Recluster(List<PatchCluster> patchClusters, ObjectDetectionFeaturization featurizer, KinectCameraCalibration cameraCalibration, long predictTimeMsec, int samplingFactor)
        {
            List<Shape2DCluster> clusters = Shape2DCluster.Cluster(patchClusters) as List<Shape2DCluster>;
            List<PatchCluster> finalClusters = new List<PatchCluster>();

            foreach (Shape2DCluster cluster in clusters)
            {
                ((List<IShape2D>)cluster.Components).Sort((a, b) => { return -((PatchCluster)a).Components.Count.CompareTo(((PatchCluster)b).Components.Count); });

                PatchCluster baseCluster = cluster.Components[0] as PatchCluster;
                int componentCount = baseCluster.Components.Count;

                for (int i = 1; i < cluster.Components.Count; ++i)
                {
                    PatchCluster p = cluster.Components[i] as PatchCluster;

                    for (int j = 0; j < p.Components.Count; ++j)
                    {
                        Vector3 c = p.Components[j].Center;
                        baseCluster.Add(c.X, c.Y, c.Z, p.Components[j].Confidence, p.Left, p.Right, p.Top, p.Bottom);
                    }
                }

                if (baseCluster.Components.Count > componentCount)
                {
                    baseCluster.Estimate(this.DetectorConfiguration.NoReadingDepthValue);
                    double confidence = this.EstimateClusterConfidence(baseCluster, samplingFactor);
                    Pose cameraPose = this.EstimateL2Orientation(
                        featurizer,
                        cameraCalibration,
                        baseCluster,
                        this.DetectorConfiguration.PatchContextScale,
                        this.DetectorConfiguration.NoReadingDepthValue);

                    baseCluster.PoseEstimate = new ObjectPoseEstimate(
                                cameraPose, 
                                confidence, 
                                this.DetectorConfiguration.ConfidenceThreshold,
                                this.DetectorConfiguration.ManipulatedObjectDescription, 
                                predictTimeMsec);
                }

                finalClusters.Add(baseCluster);
            }

            return finalClusters;
        }
    }
}
