﻿using Common.FrontEnd;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using Util;
using Util.Props;

namespace FrontEnd.EndPoint
{
    /// <summary>
    ////// Implements a level tracking endpointer invented by Bent Schmidt Nielsen.
    /// <p/>
    /// <p>This endpointer is composed of three main steps. <ol> <li>classification of audio into speech and non-speech
    /// <li>inserting SPEECH_START and SPEECH_END signals around speech <li>removing non-speech regions </ol>
    /// <p/>
    /// <p>The first step, classification of audio into speech and non-speech, uses Bent Schmidt Nielsen's algorithm. Each
    /// time audio comes in, the average signal level and the background noise level are updated, using the signal level of
    /// the current audio. If the average signal level is greater than the background noise level by a certain threshold
    /// value (configurable), then the current audio is marked as speech. Otherwise, it is marked as non-speech.
    /// <p/>
    /// <p>The second and third step of this endpointer are documented in the classes {@link SpeechMarker SpeechMarker} and
    /// {@link NonSpeechDataFilter NonSpeechDataFilter}.
    ///
    /// @see SpeechMarker
    /// </summary>
    public class SpeechClassifier : AbstractVoiceActivityDetector
    {
        /** The property specifying the endpointing frame length in milliseconds. */
        [S4Integer(defaultValue = 10)]
        public static String PROP_FRAME_LENGTH_MS = "frameLengthInMs";

        /** The property specifying the minimum signal level used to update the background signal level. */
        [S4Double(defaultValue = 0)]
        public static String PROP_MIN_SIGNAL = "minSignal";

        /**
        /// The property specifying the threshold. If the current signal level is greater than the background level by
        /// this threshold, then the current signal is marked as speech. Therefore, a lower threshold will make the
        /// endpointer more sensitive, that is, mark more audio as speech. A higher threshold will make the endpointer less
        /// sensitive, that is, mark less audio as speech.
         */
        [S4Double(defaultValue = 10)]
        public static String PROP_THRESHOLD = "threshold";

        /** The property specifying the adjustment. */
        [S4Double(defaultValue = 0.003)]
        public static String PROP_ADJUSTMENT = "adjustment";

        protected double averageNumber = 1;
        protected double adjustment;
        protected double level;               // average signal level
        protected double background;          // background signal level
        protected double minSignal;           // minimum valid signal level
        protected double threshold;
        protected float frameLengthSec;
        protected Boolean _isSpeech;

        /* Statistics */
        protected long speechFrames;
        protected long backgroundFrames;
        protected double totalBackgroundLevel;
        protected double totalSpeechLevel;
    
        public SpeechClassifier(int frameLengthMs, double adjustment, double threshold, double minSignal ) 
        {
            this.frameLengthSec = frameLengthMs / 1000.0f;

            this.adjustment = adjustment;
            this.threshold = threshold;
            this.minSignal = minSignal;

            initialize();
        }

        public SpeechClassifier() {
        }

        override
        public void newProperties(PropertySheet ps)
        {
            base.newProperties(ps);
            int frameLengthMs = ps.getInt(PROP_FRAME_LENGTH_MS);
            frameLengthSec = frameLengthMs / 1000.0f;

            adjustment = ps.getDouble(PROP_ADJUSTMENT);
            threshold = ps.getDouble(PROP_THRESHOLD);
            minSignal = ps.getDouble(PROP_MIN_SIGNAL);
 
            initialize();
        }


        /** Initializes this LevelTracker endpointer and DataProcessor predecessor. */
        public void initialize() 
        {
            base.initialize();
            reset();
        }


        /** Resets this LevelTracker to a starting state. */
        protected void reset() 
        {
            level = 0;
            background = 300;
            resetStats();
        }


        /**
        /// Returns the logarithm base 10 of the root mean square of the given samples.
         *
        /// @param samples the samples
        /// @return the calculated log root mean square in log 10
         */
        public static double logRootMeanSquare(double[] samples)
        {
            Trace.Assert(samples.Length > 0);
            double sumOfSquares = 0.0f;
            foreach (double sample in samples) 
            {
                sumOfSquares += sample* sample;
            }
            double rootMeanSquare = Math.Sqrt(sumOfSquares / samples.Length);
            rootMeanSquare = Math.Max(rootMeanSquare, 1);
            return (LogMath.log10((float) rootMeanSquare)* 20);
        }


        /**
        /// Classifies the given audio frame as speech or not, and updates the endpointing parameters.
         *
        /// @param audio the audio frame
         */
        public SpeechClassifiedData classify(DoubleData audio) 
        {
            double current = logRootMeanSquare(audio.Values);
            // System.out.println("current: " + current);
            _isSpeech = false;
            if (current >= minSignal) 
            {
                level = ((level* averageNumber) + current) / (averageNumber + 1);
                if (current < background) {
                    background = current;
                } else {
                    background += (current - background)* adjustment;
                }
                if (level < background) {
                    level = background;
                }
                _isSpeech = (level - background > threshold);
            }

            SpeechClassifiedData labeledAudio = new SpeechClassifiedData(audio, _isSpeech);


            String speech = "";
            if (labeledAudio.isSpeech())
                speech = "*";

            Trace.WriteLine("Bkg: " + background + ", level: " + level +
                    ", current: " + current + ' ' + speech);


            collectStats (_isSpeech);
        
            return labeledAudio;
        }

        /**
        /// Reset statistics
         */
        private void resetStats () 
        {
            backgroundFrames = 1;
            speechFrames = 1;
            totalSpeechLevel = 0;
            totalBackgroundLevel = 0;
        }
    
        /**
        /// Collects the statistics to provide information about signal to noise ratio in channel
        /// 
        /// @param isSpeech if the current frame is classified as speech
         */
        private void collectStats(Boolean isSpeech) 
        {
            if (isSpeech) {
                totalSpeechLevel = totalSpeechLevel + level;
                speechFrames = speechFrames + 1;
            } else {
                totalBackgroundLevel = totalBackgroundLevel + background;
                backgroundFrames = backgroundFrames + 1;
            }        
        }

        /**
        /// Returns the next Data object.
         *
        /// @return the next Data object, or null if none available
        /// @throws DataProcessingException if a data processing error occurs
         */
        public IData getData()
        {
            IData audio = getPredecessor().getData();

            if (audio is DataStartSignal)
                reset();

            if (audio is DoubleData) 
            {
                DoubleData data = (DoubleData) audio;
                audio = classify(data);
            }
            return audio;
        }
    
        /**
        /// Method that returns if current returned frame contains speech. 
        /// It could be used by noise filter for example to adjust noise 
        /// spectrum estimation.
        /// 
        /// @return if current frame is speech 
         */
        override
        public Boolean isSpeech() {
    	    return _isSpeech;
        }
    
        /** 
        /// Retrieves accumulated signal to noise ratio in dbScale 
        /// 
        /// @return signal to noise ratio
         */
        public double getSNR () 
        {
            double snr = (totalBackgroundLevel / backgroundFrames - totalSpeechLevel / speechFrames);
            Trace.WriteLine("Background " + totalBackgroundLevel / backgroundFrames);
            Trace.WriteLine("Speech " + totalSpeechLevel / speechFrames);
            Trace.WriteLine("SNR is " + snr);
            return snr;
        }
 
        /** 
        /// Return the estimation if input data was noisy enough to break
        /// recognition. The audio is counted noisy if signal to noise ratio
        /// is less then -20dB.
        /// 
        /// @return estimation of data being noisy
         */
        public Boolean getNoisy () 
        {
            return (getSNR() > -20);
        }

    }
}
