// --------------------------------------------------------------------------------------------------------------------
// <copyright file="ParticleFilter.cs" company="Microsoft Corporation">
// The MIT License (MIT)
// 
// Copyright (c) 2014, Microsoft Corporation
// 
// Permission is hereby granted, free of charge, to any person obtaining a copy
//  of this software and associated documentation files (the "Software"), to deal
//  in the Software without restriction, including without limitation the rights
//  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//  copies of the Software, and to permit persons to whom the Software is
//  furnished to do so, subject to the following conditions:
// 
// The above copyright notice and this permission notice shall be included in
//  all copies or substantial portions of the Software.
// 
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
//  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
//  THE SOFTWARE.
// </copyright>
// --------------------------------------------------------------------------------------------------------------------
namespace Microsoft.Robotics.Numerics
{
    using System;
    using System.Collections.Generic;
    using System.Diagnostics;
    using System.Linq;
    using System.Threading;
    using System.Threading.Tasks;
    using Microsoft.Robotics.PerformanceCounters;
    using Microsoft.Robotics.Tracing;

    /// <summary>
    /// Implementation of Monte Carlo filter with pluggable motion and sensor models.
    /// Random sampling of a subset of particles is an optional feature.
    /// This started off as a direct implementation of the Augmented MCL algorithm in Probabilistic Robotics (Thrun, Burgard, Fox), page 258.
    /// </summary>
    /// <typeparam name="TParticleState">The type of state x(t) carried by each particle. For SLAM2D, this is a location and the grid map.</typeparam>
    /// <typeparam name="TUpdate">The type of state update u(t), e.g. an odometry reading or a motion command</typeparam>
    /// <typeparam name="TObservation">The type of observation z(t), e.g. a range scan</typeparam>
    public abstract class ParticleFilter<TParticleState, TUpdate, TObservation>
    {
        /// <summary>
        /// The trace context to use in this class and derived classes
        /// </summary>
        protected static readonly TraceContext PFTraceContext = TraceContexts.FromString(TraceContexts.Common, "PF");

        /// <summary>
        /// The probability of replacing a sample with a (more or less) completely random sample
        /// </summary>
        private const double ProbabilityOfRandomReplacement = 0.01;

        /// <summary>
        /// Constant used to implement the moving average in Augmented MCL. 
        /// The smaller the value, the more stable the average becomes over time.
        /// </summary>
        private const double SlowWeightAlpha = 0.001;

        /// <summary>
        /// Constant used to implement the moving average in Augmented MCL.
        /// Together with SlowWeightAlpha this dictates how fast the filter starts resampling when the average weight of the particles becomes worse than the slow average.
        /// </summary>
        private const double FastWeightAlpha = 0.01;

        /// <summary>
        /// Used to determine the rate of convergence of slowWeightAlpha to SlowWeightAlpha (which defines the warm-up period of the filter)
        /// </summary>
        private const double SlowWeightConvergenceFactor = 1.1;

        /// <summary>
        /// Moving average factor converging to SlowWeightAlpha
        /// </summary>
        private double slowWeightAlpha;

        /// <summary>
        /// The count of particles maintained by this filter.
        /// </summary>
        private int particleCount;

        /// <summary>
        /// The set of particles maintained by the particle filter.
        /// </summary>
        private TParticleState[] particles;

        /// <summary>
        /// A buffer of particles meant to reduce the amount of allocation churn.
        /// </summary>
        private TParticleState[] resamplingParticleBuffer;

        /// <summary>
        /// The set of particle weights, one for each particle. These are a cumulative between resampling steps.
        /// </summary>
        private double[] weights;

        /// <summary>
        /// The max weight for the set of particles in each sampling loop.
        /// </summary>
        private double maxWeight;

        /// <summary>
        /// A set of particle weights representing each particle's fitness to the last observation.
        /// </summary>
        private double[] singleIterationWeights;

        /// <summary>
        /// The slow average used in implementing the moving average of Augmented MCL
        /// </summary>
        private double slowWeightAvg;

        /// <summary>
        /// The fast average used in implementing the moving average of Augmented MCL
        /// </summary>
        private double fastWeightAvg;

        /// <summary>
        /// The average of the weights after the last iteration
        /// </summary>
        private double weightAvg;

        /// <summary>
        /// The index of the particle that is the likeliest approximation of the true state.
        /// </summary>
        private int highestLikelyhoodParticleIndex;

        /// <summary>
        /// Number of iterations we've done without resampling
        /// </summary>
        private int countOfIterationsSinceLastResampling;

        /// <summary>
        /// Random generator used in the resampling step.
        /// </summary>
        private Random rand = new Random();

        /// <summary>
        /// Random numbers for each particle, generated outside the parallel for loop to avoid threading issues
        /// </summary>
        private double[] randomNumbers;

        /// <summary>
        /// The set of counters for this instance.
        /// </summary>
        private Dictionary<ParticleFilterCounters, PerformanceCounter> counters;

        /// <summary>
        /// Initializes a new instance of the <see cref="ParticleFilter{TParticleState,TUpdate,TObservation}"/> class. 
        /// Initializes a new instance of the ParticleFilter class.
        /// </summary>
        /// <param name="particleCount">
        /// The count of particles to maintain.
        /// </param>
        /// <param name="filterName">Filter name (performance counter instance name)</param>
        public ParticleFilter(int particleCount, string filterName)
        {
            this.particles = new TParticleState[particleCount];
            this.resamplingParticleBuffer = new TParticleState[particleCount];
            this.weights = new double[particleCount];
            this.particleCount = particleCount;
            this.singleIterationWeights = new double[particleCount];
            this.randomNumbers = new double[particleCount];
            this.counters = ParticleFilterCounterManager.AddInstance(filterName);
            TraceOut.Info(PFTraceContext, "Particle filter {0} initialized with {1} particles", filterName, particleCount);
        }

        /// <summary>
        /// Gets the current set of particles.
        /// </summary>
        public TParticleState[] Particles
        {
            get
            {
                return this.particles;
            }
        }

        /// <summary>
        /// Gets the current max weight (max likelihood).
        /// The metric for SLAM is that the value of the most likelihood shouldn't exceed a certain threshold after it reaches asymptotic equilibrium 
        /// </summary>
        public double MaxWeight
        {
            get
            {
                return this.maxWeight;
            }
        }

        /// <summary>
        /// Gets or sets a value indicating whether radom resampling should be used to increase particle diversity. 
        /// This is only interesting when the probability distribution is multi-modal (e.g. global localization)
        /// </summary>
        protected bool EnableRandomResampling { get; set; }

        /// <summary>
        /// Resets all particles to random states.
        /// </summary>
        public void ResampleAll()
        {
            for (int i = 0; i < this.particleCount; i++)
            {
                this.weights[i] = -MathConstants.ErrorEpsilon;
                this.singleIterationWeights[i] = 0.0;
                this.particles[i] = this.CreateRandomSample();
            }

            this.fastWeightAvg = -0.0;
            this.slowWeightAvg = -0.0;

            // make the slow alpha a higher value than the fast one initially, to avoid resampling before the filter is warm
            // the higher the multiplication factor, the longer the delay to the first resampling.
            this.slowWeightAlpha = SlowWeightConvergenceFactor * FastWeightAlpha;

            this.maxWeight = 0.0;
            if (this.counters != null)
            {
                this.counters[ParticleFilterCounters.ReplacedParticleCount].IncrementBy(this.particleCount);
            }
        }

        /// <summary>
        /// Returns the particle with the highest likelihood, in other words the best candidate we have so far
        /// </summary>
        /// <returns>The particle that appears to be closest to the true state of the robot</returns>
        public TParticleState GetHighestLikelihoodParticle()
        {
            return this.particles[this.highestLikelyhoodParticleIndex];
        }

        /// <summary>
        /// Updates all particles based on the specified motion update and observation. 
        /// This method implements the MCL algorithm. Specifically:
        /// - the state in each particle is updated based on the motion update
        /// - the weight of each particle is updated based on the observation. This means computing the likelihood that the given observation could be made in the state identified by each particle.
        /// - the particles are resampled with replacement, where the resampling probability is proportional to the particle weight
        /// </summary>
        /// <param name="update">The motion update to apply to all particles.</param>
        /// <param name="observation">The observation made after the motion completed. This observation is used to determine the fitness of each particle.</param>
        /// <remarks>This implementation uses log likelihoods instead of probabilities (i.e. log(p(x)) instead of p(x)) to avoid rounding errors</remarks>
        public virtual void Update(TUpdate update, TObservation observation)
        {
            double totalWeight = 0;
            double totalInverseWeight = 0;

            // update particles and recompute weights
            double thresholdForReplacement = Math.Min(this.weightAvg, Math.Min(this.fastWeightAvg, this.slowWeightAvg));
            TParticleState mostLikelyParticle = this.particles[this.highestLikelyhoodParticleIndex];
            int randomResamplingCount = 0;

            // using the same Random instance in a parallel loop doesn't work, so we fill this array ahead of time instead
            for (int i = 0; i < this.particleCount; i++)
            {
                this.randomNumbers[i] = this.rand.NextDouble();
            }

            Parallel.For(
                0,
                this.particleCount,
                i =>
                {
                    bool shouldResampleFromRandom = false;
                    double fitnessToObservation = 0;
                    if (this.EnableRandomResampling && (this.singleIterationWeights[i] < thresholdForReplacement && this.weightAvg < Math.Min(this.slowWeightAvg, this.fastWeightAvg)))
                    {
                        // with low probability (<1%), replace some weak samples with samples drawn at random
                        double probablityOfReplacement = ProbabilityOfRandomReplacement * this.weights[i] / this.weightAvg;
                        shouldResampleFromRandom = this.randomNumbers[i] < (1 - this.slowWeightAvg / (this.fastWeightAvg + this.slowWeightAvg)) * probablityOfReplacement;
                    }

                    if (shouldResampleFromRandom)
                    {
                        this.particles[i] = this.CreateRandomSampleFromPriorAndObservation(mostLikelyParticle, update, observation, this.weights[i], out fitnessToObservation);
                        Interlocked.Increment(ref randomResamplingCount);
                    }
                    else
                    {
                        // compute the new particle state using the motion update
                        this.particles[i] = this.UpdateSample(this.particles[i], update, observation, this.weights[i], out fitnessToObservation);
                    }

                    if (double.IsInfinity(fitnessToObservation) || double.IsNaN(fitnessToObservation))
                    {
                        throw new InvalidOperationException(string.Format("Particle {0} generated an invalid fitness value {1}", i, fitnessToObservation));
                    }

                    // update the weight based on fitness to observation
                    this.singleIterationWeights[i] = fitnessToObservation;
                });

            // compute the weights
            this.maxWeight = double.MinValue;
            int multiplier = this.countOfIterationsSinceLastResampling + 1;
            for (int i = 0; i < this.particleCount; i++)
            {
                this.weights[i] = (multiplier * this.weights[i] + this.singleIterationWeights[i]) / (multiplier + 1);

                totalWeight += this.weights[i];
                totalInverseWeight += 1d / this.weights[i];

                if (this.weights[i] > this.maxWeight)
                {
                    this.maxWeight = this.weights[i];
                    this.highestLikelyhoodParticleIndex = i;
                }
            }

            this.countOfIterationsSinceLastResampling++;
            this.weightAvg = totalWeight / this.particleCount;
            this.slowWeightAvg = this.slowWeightAvg + (this.slowWeightAlpha * (this.weightAvg - this.slowWeightAvg));
            this.fastWeightAvg = this.fastWeightAvg + (FastWeightAlpha * (this.weightAvg - this.fastWeightAvg));

            // make the slow alpha converge to its constant value over a few iterations, to allow for a gentle warm-up of the filter
            this.slowWeightAlpha = Math.Max(SlowWeightAlpha, this.slowWeightAlpha / SlowWeightConvergenceFactor);

            // waiting for enabled/disabled tracing functionality 
            ////TraceOut.Info(PFTraceContext, "SMLP: {0:N4}, MLP: {1:N4}, avg: {2:N4}, fast: {3:N4}, slow: {4:N4}, resample: {5:D}", maxSingleIterationWeight, this.maxWeight, weightAvg, this.fastWeightAvg, this.slowWeightAvg, this.countOfIterationsSinceLastResampling);
            if (this.counters != null)
            {
                const int WeightMultiplier = 1000;
                double singleIterationWeightAverage = this.singleIterationWeights.Average();
                double maxSingleIterationWeight = this.singleIterationWeights.Max();
                this.counters[ParticleFilterCounters.IterationRate].Increment();
                this.counters[ParticleFilterCounters.SingleIterationBestWeight].RawValue = (long)(-maxSingleIterationWeight * WeightMultiplier);
                this.counters[ParticleFilterCounters.SingleIterationWeightAverage].RawValue = (long)(-singleIterationWeightAverage * WeightMultiplier);
                this.counters[ParticleFilterCounters.BestWeight].RawValue = (long)(-this.maxWeight * WeightMultiplier);
                this.counters[ParticleFilterCounters.WeightAverage].RawValue = (long)(-this.weightAvg * WeightMultiplier);
                this.counters[ParticleFilterCounters.FastWeightAverage].RawValue = (long)(-this.fastWeightAvg * WeightMultiplier);
                this.counters[ParticleFilterCounters.SlowWeightAverage].RawValue = (long)(-this.slowWeightAvg * WeightMultiplier);
                this.counters[ParticleFilterCounters.RandomSampleCount].RawValue = randomResamplingCount;
                this.counters[ParticleFilterCounters.PopulationAge].RawValue = this.countOfIterationsSinceLastResampling;
            }

            // do we need to resample?
            if (this.fastWeightAvg >= this.slowWeightAvg)
            {
                return;
            }

            // resample with replacement - using un-normalized likelihoods
            // the probability to resample a particle is proportional to its weight
            // See Low-variance sampler, p. 110 Probabilistic Robotics (Thrun, Burgard, Fox) 2006
            int replacedCount = 0;
            double inverseAvg = -totalInverseWeight / this.particleCount;
            double r = this.rand.NextDouble() * inverseAvg;
            double c = -1d / this.weights[0];
            int j = 0;
            for (int m = 0; m < this.particleCount; m++)
            {
                double u = r + (m * inverseAvg);

                // if the while loop below doesn't execute, we are copying the same j particle again, which means we are eliminating a particle
                if (u <= c)
                {
                    replacedCount++;
                }

                // skip over particles with low weights
                while (u > c)
                {
                    j++;
                    if (this.weights[j] != 0)
                    {
                        c = c - (1d / this.weights[j]);
                    }
                }

                // copy the particles with large weights
                if (m == j)
                {
                    this.resamplingParticleBuffer[m] = this.particles[j];
                }
                else
                {
                    this.resamplingParticleBuffer[m] = this.CloneSample(this.particles[j]);
                }
            }

            // swap the buffers
            TParticleState[] temp = this.particles;
            this.particles = this.resamplingParticleBuffer;
            this.resamplingParticleBuffer = temp;
            this.countOfIterationsSinceLastResampling = 0;

            // reset the weights
            for (int i = 0; i < this.particleCount; i++)
            {
                this.weights[i] = this.slowWeightAvg;
            }

            if (this.counters != null)
            {
                this.counters[ParticleFilterCounters.ReplacedParticleCount].IncrementBy(replacedCount);
            }
        }

        /// <summary>
        /// Creates a copy of an existing particle.
        /// </summary>
        /// <param name="sourceParticle">The old particle particle.</param>
        /// <returns>A copy of the old particle.</returns>
        protected abstract TParticleState CloneSample(TParticleState sourceParticle);

        /// <summary>
        /// Returns a sample from the state space that matches the observation.
        /// The default behavior is to ignore the observation and return a random sample.
        /// </summary>
        /// <param name="priorState">The old particle state.</param>
        /// <param name="update">The update to apply.</param>
        /// <param name="observation">The observation to match.</param>
        /// <param name="weight">The current weight of the particle</param>
        /// <param name="fitnessToObservation">The probability of a fit, in log-likelihood form.</param>
        /// <returns>A sample from the state space.</returns>
        protected virtual TParticleState CreateRandomSampleFromPriorAndObservation(
            TParticleState priorState, TUpdate update, TObservation observation, double weight, out double fitnessToObservation)
        {
            throw new NotImplementedException();
        }

        /// <summary>
        /// Creates a random initial sample. Default implementation simply clones the first particle.
        /// </summary>
        /// <returns>A random sample drawn from the state space.</returns>
        protected virtual TParticleState CreateRandomSample()
        {
            return this.CloneSample(this.particles[0]);
        }

        /// <summary>
        /// Updates the given particle based on motion and observation and returns the probability that the state specified by the particle would result in the given observation.
        /// </summary>
        /// <param name="priorState">The old particle state.</param>
        /// <param name="update">The update to apply.</param>
        /// <param name="observation">The observation, assumed noisy.</param>
        /// <param name="weight">The current weight of the particle</param>
        /// <param name="fitnessToObservation">The probability of a fit, in log-likelihood form.</param>
        /// <returns>The updated particle.</returns>
        protected abstract TParticleState UpdateSample(
            TParticleState priorState, TUpdate update, TObservation observation, double weight, out double fitnessToObservation);
    }
}
