﻿using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Globalization;
using System.ComponentModel;
using LFSRecord2.Model.Media;

namespace LFSRecord2.Model.Sound
{
    public sealed class SoundMixer : INotifyPropertyChanged, IDisposable
    {
        // Objects we need for input and output
        private LfsRecordProject _project;
        private WaveIO _waveIO;

        // Main audio settings
        byte _numChannels = 2;
        public byte NumChannels
        {
            get { return _numChannels; }
            private set { _numChannels = value; }
        }
        byte _wordLength = 16;
        public byte WordLength
        {
            get { return _wordLength; }
            private set
            {
                if (value % 8 > 0)
                    throw new Exception("Invalid word length. Must be multiple of 8.");
                else if (value > 24)
                    throw new Exception("Word length exceeds maximum of 24bits.");
                _wordLength = value;
            }
        }
        uint _sampleRate = 44100;
        public uint SampleRate
        {
            get { return _sampleRate; }
            private set 
            { 
                _sampleRate = value;

                foreach (MediaBase mb in _project.MediaObjects)
                {
                    if (mb.MediaType != MediaTypes.Audio)
                        continue;
                    MediaAudio ma = (MediaAudio)mb;
                    ma.AudioStream.Duration = ma.AudioStream.Length / (double)(ma.AudioStream.Format.nBlockAlign * LfsRecordController.Mixer.SampleRate);
                    ma.AudioStream.GenerateWaveView();
                    ma.FileStatus = MediaFileStatus.None;
                    ma.FileStatus = MediaFileStatus.Loaded;
                }

                foreach (AudioLayer l in _project.AudioLayers)
                {
                    foreach (AudioRegion ar in l.AudioRegions)
                    {
                        ar.EndTime = ar.AudioStream.Duration;
                    }
                }

                OnPropertyChanged("SampleRate");
            }
        }

        private float _masterVolume = Int32.MaxValue * 0.999f;

        // Size of channel buffer (total buffer size is then numchannels * channelbufsize)
        // Channel buffers are like the mixBuffer, 32 bits floating point
        private static int _channelBufNumSamples = 4096;
        private static int _channelBufSize = _channelBufNumSamples * sizeof(float);

        // buffer used to mix layers into
        private float[][] _mixBuffer;

        // Buffer used to write the final output data to.
        private byte[] _playBuffer;

        void fillOutBuffer(IntPtr data, int size)
        {
            Marshal.Copy(_playBuffer, 0, data, size);
        }

        void populateOutBuffer(int size)
        {
            if (_playBuffer == null || size > _playBuffer.Length)
                _playBuffer = new byte[size];
            
            if (_mixBuffer == null || _mixBuffer.Length != NumChannels)
                _mixBuffer = new float[NumChannels][];
            for (int c = 0; c < NumChannels; c++)
            {
                if (_mixBuffer[c] == null || _mixBuffer[c].Length != _channelBufSize)
                    _mixBuffer[c] = new float[_channelBufSize];
                Array.Clear(_mixBuffer[c], 0, _channelBufSize);
            }

            // Mix our audiolayers
            int a;
            float[][] layerBuffer;
            foreach (AudioLayer l in _project.AudioLayers)
            {
                if (l.AudioRegions.Count == 0)
                    continue;

                // Read audio data, as a 32bit float array, from our audio layer
                layerBuffer = l.ReadAudio(_waveIO.WaveFormat, _channelBufNumSamples, _channelBufSize);
                if (l.IsMuted)
                    continue;

                // Mix the layer into the _mixBuffer
                a = 0;
                while (a < _channelBufNumSamples)
                {
                    // Mix channel samples from layerBuffer
                    for (int c = 0; c < NumChannels; c++)
                    {
                        _mixBuffer[c][a] += layerBuffer[c][a];
                    }

                    a++;
                }
            }

            // Fill _playBuffer
            a = 0;
            int p = 0;
            int byteOffset = 4 - WordLength / 8;
            byte[] sampleBytes = new byte[4];
            while (a < _channelBufNumSamples)
            {
                for (int c = 0; c < NumChannels; c++)
                {
                    sampleBytes = BitConverter.GetBytes((int)(PeakLimiter.Limit(_mixBuffer[c][a]) * _masterVolume));
                    for (int b = byteOffset; b < 4; b++)
                        _playBuffer[p++] = sampleBytes[b];
                }

                a++;
            }
        }

        public int CurrentWaveOutDevice
        {
            get { return _waveIO.CurrentWaveOutDevice; }
        }

        public int CurrentWaveInDevice
        {
            get { return _waveIO.CurrentWaveInDevice; }
        }

        public event PropertyChangedEventHandler PropertyChanged;
        private void OnPropertyChanged(string propertyName)
        {
            PropertyChangedEventHandler handler = this.PropertyChanged;
            if (handler != null)
            {
                var e = new PropertyChangedEventArgs(propertyName);
                handler(this, e);
            }
        }

        public SoundMixer(LfsRecordProject project)
        {
            _project = project;
            _waveIO = new WaveIO();
            ResetIO(-1, 2, 16, SampleRate);
        }
        ~SoundMixer()
        {
            Dispose();
        }
        public void Dispose()
        {
            _waveIO.Dispose();
            _playBuffer = null;
            GC.SuppressFinalize(this);
        }

        public void ResetIO(int audioDevice, int numChannels, int wordLength, uint sampleRate)
        {
            // Check if audio device is valid
            if (audioDevice < -1 || audioDevice >= WaveIO.NumOutputDevices)
                return;
            WaveOutCaps caps = WaveIO.DeviceCaps(audioDevice);

            // Check sample rate
            List<string> sampleRates = WaveIO.GetSampleRatesList(caps.dwFormats);
            if (!sampleRates.Contains(sampleRate.ToString(CultureInfo.InvariantCulture)))
                return;

            // ok
            SampleRate = sampleRate;

            _channelBufNumSamples = (SampleRate > 48000) ? 8192 : 4096;
            _channelBufSize = _channelBufNumSamples * sizeof(float);
            int playBufferSize = _channelBufNumSamples  * NumChannels * WordLength / 8;
            
            if (_waveIO != null)
                _waveIO.Dispose();
            _waveIO = new WaveIO(new BufferPopulateEventHandler(populateOutBuffer), new BufferFillEventHandler(fillOutBuffer), playBufferSize);
            _waveIO.CurrentWaveOutDevice = audioDevice;
            _waveIO.WaveFormat = new WaveFormatEx(SampleRate, WordLength, NumChannels);
        }

        public void StartPlayback(double timePtr)
        {
            StopPlayback();

            int numAudioLayers = 0;
            foreach (AudioLayer l in _project.AudioLayers)
            {
                l.Position = timePtr;
                numAudioLayers++;
            }

            if (numAudioLayers > 0)
            {
                populateOutBuffer(_channelBufSize);
                _waveIO.StartPlayback();
            }
        }

        public void StopPlayback()
        {
            _waveIO.StopPlayback();
        }
    }
}
