﻿#region Usings
using System.Collections.Generic;
using System.Drawing;
using System.Windows;
using System.Windows.Media;
using AForge;
using AForge.Imaging;
using AForge.Imaging.Filters;
using LaserTag.Infrastructure.Events;
using Microsoft.Practices.Composite.Events;
using Size=System.Windows.Size;
#endregion

namespace LaserTag.Module.AForgeModule.VideoProcessors
{
    /// <summary>
    /// The AForge based video processor used to find objects
    /// in video frames
    /// </summary>
    public class AForgeVideoProcessor
    {
        /// <summary>
        /// Reference to the event mediator that was recieved by dependency injection
        /// </summary>
        private readonly IEventAggregator m_eventAggregator;

        /// <summary>
        /// Creates a new instance of the AForgeVideoProcessor.  This class
        /// is created by dependency injection (ie, container.Resolve<AForgeVideoProcessor>()),
        /// so you generally cannot directly create this class
        /// </summary>
        /// <param name="eventAggregator">The event mediator</param>
        public AForgeVideoProcessor(IEventAggregator eventAggregator)
        {
            m_eventAggregator = eventAggregator;

            /* Subscribes to the NewVideoFrame event.  The 'false' parameter
             * tells the event mediator to use weak references avoiding memory leaks you
             * generally get when you forget to unhook a general .NET event/delegate */
            m_eventAggregator.GetEvent<NewVideoFrameEvent>().Subscribe(NewVideoFrame, false);
        }

        public Rect TrackSearchAreaRatio
        {
            get;
            set;
        }

        /// <summary>
        /// The minimum hue.  The range is 0 to 359.
        /// </summary>
        public int HueMinimum { get; set; }

        /// <summary>
        /// The maximum hue.  The range is 0 to 359.
        /// </summary>
        public int HueMaximum { get; set; }

        /// <summary>
        /// The minimum saturation. The range is 0 to 1.
        /// </summary>
        public double SaturationMinimum { get; set; }

        /// <summary>
        /// The maximum saturation. The range is 0 to 1.
        /// </summary>
        public double SaturationMaximum { get; set; }

        /// <summary>
        /// The minimum luminace.  The range is 0 to 1.
        /// </summary>
        public double LuminanceMinimum { get; set; }

        /// <summary>
        /// The maximum luminace.  The range is 0 to 1.
        /// </summary>
        public double LuminanceMaximum { get; set; }

        /// <summary>
        /// The minimum blob height to find, in pixels.
        /// </summary>
        public int BlobMinimumHeight { get; set; }

        /// <summary>
        /// The maximum blogh width to find, in pixels
        /// </summary>
        public int BlobMinimumWidth { get; set; }

        private Size? m_bitmapSize;

        private Bitmap m_lastBitmap;

        /// <summary>
        /// Called by the event mediator to notify a new, raw video frame is recieved
        /// </summary>
        /// <param name="bitmap">The new, raw video frame recieved</param>
        private void NewVideoFrame(Bitmap bitmap)
        {
            /* Rotate our image as DShow likes to give us our image upside down */
            bitmap.RotateFlip(RotateFlipType.Rotate180FlipX);
            if (m_bitmapSize == null || bitmap != m_lastBitmap)
                m_bitmapSize = new Size(bitmap.Width, bitmap.Height);

            Rect targetSearchArea = new Rect(0, 0, m_bitmapSize.Value.Width, m_bitmapSize.Value.Height);


            if (TrackSearchAreaRatio != Rect.Empty && !double.IsNaN(TrackSearchAreaRatio.X) && !double.IsNaN(TrackSearchAreaRatio.Y) && (TrackSearchAreaRatio.X != 0 && TrackSearchAreaRatio.Y != 0 && (TrackSearchAreaRatio.Width != 0 || TrackSearchAreaRatio.Height != 0)))
            {
                targetSearchArea = new Rect(TrackSearchAreaRatio.X * (double)m_bitmapSize.Value.Width,
                                                     (TrackSearchAreaRatio.Y * (double)m_bitmapSize.Value.Height),
                                                     TrackSearchAreaRatio.Width * m_bitmapSize.Value.Width,
                                                     TrackSearchAreaRatio.Height * m_bitmapSize.Value.Height);

                if (targetSearchArea.Width >= m_bitmapSize.Value.Width)
                    targetSearchArea.Width = m_bitmapSize.Value.Width;

                if (targetSearchArea.Height >= m_bitmapSize.Value.Height)
                    targetSearchArea.Height = m_bitmapSize.Value.Height;
            }

            m_lastBitmap = bitmap;

            /* This AForge class helps us filter out the pixels we do not want */
            var hsl = new HSLFiltering
            {
                Hue = new IntRange(HueMinimum, HueMaximum),
                Saturation = new DoubleRange(SaturationMinimum, SaturationMaximum),
                Luminance = new DoubleRange(LuminanceMinimum, LuminanceMaximum)
            };

            var bitmapData = bitmap.LockBits(new Rectangle((int)targetSearchArea.X, (int)targetSearchArea.Y, (int)targetSearchArea.Width, (int)targetSearchArea.Height),
                             System.Drawing.Imaging.ImageLockMode.ReadWrite, m_lastBitmap.PixelFormat);

            
            /* Apply the AForge filter.  Doing it "in place" is more efficient as
             * new bitmap does not have to be allocated and copied */
            hsl.ApplyInPlace(bitmapData);

            /* The blobs counter only works on grayscale images */
            var grayscale = new Grayscale(0.5, 0.5, 0.5);
            Bitmap grayImage = grayscale.Apply(bitmapData);

            bitmap.UnlockBits(bitmapData);

            /* Create an initialize our blob counter */
            var blobsCounter = new BlobCounter
                                   {
                                       FilterBlobs = true,
                                       ObjectsOrder = ObjectsOrder.Size,
                                       MinHeight = BlobMinimumHeight,
                                       MinWidth = BlobMinimumWidth,
                                       MaxWidth = 25,
                                       MaxHeight = 25
                                    };

            /* We first let the blob counter process our image */
            blobsCounter.ProcessImage(grayImage);

            /* Retrieve a list of blobs that were found */
            Blob[] blobs = blobsCounter.GetObjects(grayImage);

            if (blobs.Length > 4)
            {
                blobs = new Blob[0];
            }

            /* This is the list we store all of our detected objects in */
            var detectedObjectsList = new List<DetectedObject>();

            targetSearchArea.X = 0;
            targetSearchArea.Y = 0;

            foreach(Blob blob in blobs)
            {
                /* Copy the data from the AForge Blob class to our structure */
                var detectedObject = new DetectedObject
                                         {
                                         OriginalAreaSize = m_bitmapSize.Value,
                                         DetectionAreaSize = targetSearchArea.Size,
                                         ObjectArea = new Rect(blob.Rectangle.X, 
                                                               blob.Rectangle.Y,
                                                               blob.Rectangle.Width,
                                                               blob.Rectangle.Height)
                                     };
                
                
                if(targetSearchArea.Contains(detectedObject.ObjectArea))
                    detectedObjectsList.Add(detectedObject);
            }

            /* Dispose our image to free the memory */
            grayImage.Dispose();

            /* If we have detected blobs, then publish the event out the mediator */
            m_eventAggregator.GetEvent<NewObjectsDetectedEvent>().Publish(detectedObjectsList);

            /* Publish our filtered bitmap, so any subscriber can use it, ie render it */
            m_eventAggregator.GetEvent<NewPostProcessedVideoFrameEvent>().Publish(bitmap);
        }
    }
}