/////////////////////////////////////////////////////////////////////////////
// Name:        tts_sapi.cpp
// Purpose:     A wxWidgets wrapper around SAPI
// Author:      Julian Smart
// Modified by:
// Created:     2009-02-10
// RCS-ID:
// Copyright:   (c) Julian Smart
// Licence:     New BSD License
/////////////////////////////////////////////////////////////////////////////

#include "wx/wx.h"

#ifdef __WXMSW__

#include "wx/filename.h"
#include "wx/textfile.h"
#include "wx/datstrm.h"
#include "wx/wfstream.h"
#include "wx/dir.h"
#include "wx/log.h"

#include "tts/tts_sapi.h"

#define WM_TTSAPPCUSTOMEVENT WM_APP

#include <sapi.h>
#include <sphelper.h>

/*
 * wxTTSSapiData: SAPI wave file data
 */

class wxTTSSapiWaveData
{
public:
    wxTTSSapiWaveData() { }
    ~wxTTSSapiWaveData() { }

    CComPtr<ISpStream>          m_waveStream;
    CComPtr<ISpStreamFormat>    m_oldStream;
    CSpStreamFormat             m_streamFormat;
};

/*
 * wxTTSSapiData: SAPI data
 */

class wxTTSSapiData
{
public:
    wxTTSSapiData() { m_writingToWave = false; m_waveData = NULL; m_isSpeaking = false; m_pauseCount = 0; }
    ~wxTTSSapiData() { }

    void ResetWaveData();

    CComPtr<ISpVoice>   m_voice;
    wxTTSSapiWaveData*  m_waveData;

    bool                m_writingToWave;

    bool                m_isSpeaking;
    int                 m_pauseCount;

    wxString            m_defaultVoice;
    wxString            m_currentVoice;
    wxArrayString       m_voiceNames;
    wxArrayString       m_voiceIds;
};

void CALLBACK wxTTSSapiCallback(WPARAM WXUNUSED(wParam), LPARAM lParam)
{
    ((wxTTSSapiHandler *)lParam)->Callback();
}

void wxTTSSapiData::ResetWaveData()
{
    if (m_waveData)
    {
        m_waveData->m_waveStream.Release();

        // Reset output
        m_voice->SetOutput( m_waveData->m_oldStream, FALSE );

        delete m_waveData;
        m_waveData = NULL;
    }

    m_writingToWave = false;
}

/*
 * wxTTSSapiHandler implementation
 */

IMPLEMENT_DYNAMIC_CLASS( wxTTSSapiHandler, wxTTSHandler )

wxArrayString wxTTSSapiHandler::sm_simultaneousVoiceExclusions;

wxTTSSapiHandler::wxTTSSapiHandler(): wxTTSHandler(wxT("Microsoft SAPI"), _("Microsoft SAPI"))
{
    Init();

    if (sm_simultaneousVoiceExclusions.GetCount() == 0)
    {
        wxArrayString excludedVoices;
        excludedVoices.Add(wxT("eSpeak"));
        excludedVoices.Add(wxT("CereVoice"));
        excludedVoices.Add(wxT("Cepstral"));

        // An application can call this to customize which engines cannot be used for
        // multiple simultaneous voices. It can't specify two different, incompatible
        // voices that have no common match, but this should be rare.
        SetSimultaneousVoiceExclusions(excludedVoices);
    }
}

wxTTSSapiHandler::~wxTTSSapiHandler()
{
    delete m_data;
}

// Member initialisation
void wxTTSSapiHandler::Init()
{
    m_data = new wxTTSSapiData;

    // Initialize generic properties

    SetVoice(wxT("")); // ?? No concept of default voice since can install any voice
    SetSpeed(0);      // Speed in words per minute, 80-390
    SetVolume(50);
    SetPitch(50);       // 0-100
    SetVoiceVariant(wxT("None"));

    SetProperty(wxTTS_PROP_INPUT_FORMAT, wxTTS_PROP_INPUT_FORMAT_TEXT);
    SetProperty(wxTTS_PROP_NO_SUBSTITUTIONS, false);
    SetProperty(wxTTS_PROP_SIMPLIFY_PUNCTUATION, false);
    SetProperty(wxTTS_PROP_TEXT_ENCODING, wxString(wxT("utf-8")));
    SetProperty(wxTTS_PROP_ENGINE_WEBSITE, wxString(wxT("http://www.microsoft.com/speech")));
    SetProperty(wxTTS_PROP_PRIORITY, 0);

    // Initialize SAPI properties
}

// Speak the text
bool wxTTSSapiHandler::Speak(const wxString& text, int options)
{
    return DoSpeak(text, false, options);
}

// Speak the file
bool wxTTSSapiHandler::SpeakFile(const wxString& filename, int options, const wxString& waveFilename)
{
    return DoSpeak(filename, true, options, waveFilename);
}

// Speak the text or file
bool wxTTSSapiHandler::DoSpeak(const wxString& textOrFilename, bool isFilename, int options, const wxString& waveFilename)
{
    if (IsSpeaking() || IsPaused())
    {
        Stop();
    }

    DeleteTempFile();

    SetOptions(options);

    // Get the flags - and always simplify punctuation
    int transformationFlags = CreateTransformerOptions();

    int flags = SPF_PURGEBEFORESPEAK;
    if (options & (wxTTS_SPEAK_ASYNC|wxTTS_SPEAK_SYNC_YIELD))
        flags |= SPF_ASYNC;

    wxString inputFormat = GetPropertyString(wxTTS_PROP_INPUT_FORMAT);
    if (inputFormat == wxTTS_PROP_INPUT_FORMAT_HTML)
    {
        transformationFlags |= (wxTTS_TRANSFORM_HTML_TO_SAPI_XML|wxTTS_TRANSFORM_SPEAK_ORDERED_LIST);
    }
    else if (inputFormat == wxTTS_PROP_INPUT_FORMAT_NATIVE)
    {
        transformationFlags |= wxTTS_TRANSFORM_PASS_RAW_TEXT;
    }
    else if (inputFormat == wxTTS_PROP_INPUT_FORMAT_SSML)
    {
        wxLogError(_("Sorry, SAPI 5 doesn't support SSML."));
        transformationFlags |= (wxTTS_TRANSFORM_HTML_TO_SAPI_XML|wxTTS_TRANSFORM_SPEAK_ORDERED_LIST);
    }
    else
    {
        transformationFlags |= wxTTS_TRANSFORM_TEXT_TO_SAPI_XML;
    }

    if (transformationFlags & wxTTS_TRANSFORM_PASS_RAW_TEXT)
    {
        // This may be unnecessary; we might assume that raw text is XML
        if (isFilename)
        {
            if (wxTextToSpeech::IsXmlFile(textOrFilename))
                flags |= SPF_IS_XML;
        }
        else
        {
            if (wxTextToSpeech::IsXmlString(textOrFilename))
                flags |= SPF_IS_XML;
        }
    }

    if (m_data->m_voice)
    {
        // Change to the current voice
        wxString thisVoice = GetVoice();
        if (!thisVoice.IsEmpty() && (thisVoice != m_data->m_currentVoice))
        {
            int idx = m_data->m_voiceNames.Index(thisVoice);
            if (idx != wxNOT_FOUND)
            {
                wxString id = m_data->m_voiceIds[idx];
                m_data->m_currentVoice = thisVoice;

                ISpObjectToken *pToken = NULL;
                if (SUCCEEDED(SpGetTokenFromId(id, &pToken, FALSE)))
                {
                    m_data->m_voice->SetVoice(pToken);
                    pToken->Release();
                }
            }
        }

        HRESULT hr;
        hr = m_data->m_voice->SetRate(GetPropertyLong(wxTTS_PROP_SPEED));
        hr = m_data->m_voice->SetVolume(GetPropertyLong(wxTTS_PROP_VOLUME));

        // Possibly allow voices to speak simultaneously
        SPVPRIORITY pri = SPVPRI_NORMAL;
        int priority = GetPropertyLong(wxTTS_PROP_PRIORITY);
        if (priority == wxTTS_Over)
            pri = SPVPRI_OVER;
        else if (priority == wxTTS_Alert)
            pri = SPVPRI_ALERT;
        hr = m_data->m_voice->SetPriority(pri);
    }

    if (!isFilename)
    {
        if (m_data->m_voice)
        {
            wxString text(textOrFilename);
            if (GetTransformer())
                GetTransformer()->TransformString(textOrFilename, text, transformationFlags);

            HRESULT hr = m_data->m_voice->Speak(text, flags, NULL);
            if (SUCCEEDED(hr))
            {
                m_data->m_isSpeaking = true;
                return true;
            }
        }
    }
    else
    {
        flags |= SPF_IS_FILENAME;

        if (!waveFilename.IsEmpty())
        {
            m_data->ResetWaveData();
            m_data->m_waveData = new wxTTSSapiWaveData;

            HRESULT hr = m_data->m_voice->GetOutputStream( & m_data->m_waveData->m_oldStream );
            if (hr == S_OK)
            {
                hr = m_data->m_waveData->m_streamFormat.AssignFormat(m_data->m_waveData->m_oldStream);
            }
            else
            {
                hr = E_FAIL;
            }

            // User SAPI helper function in sphelper.h to create a wav file
            if (SUCCEEDED(hr))
            {
                hr = SPBindToFile( waveFilename, SPFM_CREATE_ALWAYS, & m_data->m_waveData->m_waveStream, & m_data->m_waveData->m_streamFormat.FormatId(), m_data->m_waveData->m_streamFormat.WaveFormatExPtr() );
            }

            if( SUCCEEDED( hr ) )
            {
                // Set the voice's output to the wav file instead of the speakers
                hr = m_data->m_voice->SetOutput(m_data->m_waveData->m_waveStream, TRUE);
            }

            m_data->m_writingToWave = true;
        }

        if (m_data->m_voice)
        {
            // First do some substitutions, if we have a transformer.
            wxString filename = DoTransformation(textOrFilename, transformationFlags);

            HRESULT hr = m_data->m_voice->Speak(filename, flags, NULL);
            if (SUCCEEDED(hr))
            {
                m_data->m_isSpeaking = true;
                if (options & wxTTS_SPEAK_SYNC_YIELD)
                {
                    // Psuedo-asyncronous - processes events while writing.
                    do
                    {
                        wxYield();
                    }
                    while (m_data->m_voice->WaitUntilDone(10));
                    m_data->m_isSpeaking = false;
                }

                if (options & (wxTTS_SPEAK_SYNC|wxTTS_SPEAK_SYNC_YIELD))
                    m_data->ResetWaveData();

                return true;
            }
        }
    }

    return false;
}

// Is it speaking? The meaning is slightly different from normal SAPI,
// in that it returns true after a request to speak, even if it didn't
// start actually speaking yet. This simplifies user interface considerations
// and ensures that we don't prematurely assume it's stopped speaking when
// it hasn't actually started yet.
bool wxTTSSapiHandler::IsSpeaking() const
{
    if (m_data->m_writingToWave && m_data->m_waveData)
    {
        /*
            Accord to MS docs: "Voice status and voice events are closely associated with
            the status of the audio output device. A voice speaking to a file stream
            produces no audio output, generates no events, and has no audio output
            status. As a result, the ISpeechVoiceStatus data returned by that voice
            will always indicate that it is inactive.
        */

        if (GetOptions() & wxTTS_SPEAK_ASYNC)
        {
            if (!m_data->m_voice->WaitUntilDone(10))
            {
                m_data->ResetWaveData();
                m_data->m_isSpeaking = false;
                return false;
            }
            else
                return true;
        }
        else
            return true; // we're still in a wxYield loop
    }
    else
    {
        return m_data->m_isSpeaking;
#if 0
        SPVOICESTATUS status;
        HRESULT hr = m_data->m_voice->GetStatus(& status, NULL);
        if (SUCCEEDED(hr))
        {
            return (status.dwRunningState == SPRS_IS_SPEAKING);
        }
#endif
    }
    return false;
}

// Stop speaking
bool wxTTSSapiHandler::Stop()
{
    bool success = true;

    if (IsPaused())
        Resume();

    if (IsSpeaking())
    {
        // Stop current rendering with a PURGEBEFORESPEAK...
        HRESULT hr = m_data->m_voice->Speak( NULL, SPF_PURGEBEFORESPEAK, 0 );
        success = SUCCEEDED(hr);

        m_data->m_voice->WaitUntilDone(INFINITE);

        // Without this, we might get the SPEI_END_INPUT_STREAM event too
        // late, and it'll reset the m_isSpeaking variable to false
        // just after it's started speaking again.
        if (m_data->m_isSpeaking)
        {
            // WM_APP is the message that is being processed when SPEI_END_INPUT_STREAM
            // is encountered. So let's only process these messages.
            MSG msg;
            while (m_data->m_isSpeaking && ::PeekMessage(&msg, (HWND)0, WM_APP, WM_APP, PM_REMOVE) )
            {
                ::TranslateMessage(&msg);
                ::DispatchMessage(&msg);
            }
        }

        m_data->ResetWaveData();
    }

    // TODO: do we need to resume first?
    m_data->m_pauseCount = 0;

    m_data->m_isSpeaking = false;

    return success;
}

void wxTTSSapiHandler::Yield()
{
#if 1
    wxSafeYield();
#else
    while (m_data->m_isSpeaking)
        m_data->m_voice->WaitUntilDone(10);
#endif
}

// Is it paused?
bool wxTTSSapiHandler::IsPaused() const
{
    return m_data->m_pauseCount > 0;
}

// Pause
bool wxTTSSapiHandler::Pause()
{
    if (IsSpeaking())
    {
        m_data->m_pauseCount ++;
        m_data->m_voice->Pause();
        return true;
    }
    else
        return false;
}

// Resume
bool wxTTSSapiHandler::Resume()
{
    if (IsPaused())
    {
        m_data->m_voice->Resume();
        m_data->m_pauseCount --;
        return true;
    }
    else
        return false;
}

// Skip forward or backward the given number of items. A positive value skips forward,
// and a negative value skips back. A value of zero skips to the start of the item.
// itemType may be ignored by some or all engines.
bool wxTTSSapiHandler::Skip(int toSkip, wxTTSSkipType WXUNUSED(itemType))
{
    if (IsSpeaking())
    {
        ULONG numSkipped;
        HRESULT res = m_data->m_voice->Skip(L"Sentence", toSkip, & numSkipped);
        return SUCCEEDED(res);
    }
    else
        return false;
}

// Can we skip right now?
bool wxTTSSapiHandler::CanSkip() const
{
    return IsSpeaking();
}

// Get a list of the available voices
wxArrayString wxTTSSapiHandler::GetAvailableVoices() const
{
    return m_data->m_voiceNames;
}

// Get a list of the available voice variants
wxArrayString wxTTSSapiHandler::GetAvailableVoiceVariants() const
{
    wxArrayString variants;
    return variants;
}

// Does this handler have the specified capability?
bool wxTTSSapiHandler::HasCapability(wxTTSCapability capability) const
{
    if (capability == wxTTS_CAP_PROGRAM_LOCATION)
        return false;
    if (capability == wxTTS_CAP_DATA_LOCATION)
        return false;
    if (capability == wxTTS_CAP_WRITE_WAVE_FILE)
        return true;
    if (capability == wxTTS_CAP_SPEAK_HTML)
        return true;
    if (capability == wxTTS_CAP_VOICE_VARIATIONS)
        return false;
    if (capability == wxTTS_CAP_VOICE)
        return true;
    if (capability == wxTTS_CAP_PITCH)
        return false; // Only in 5.3
    if (capability == wxTTS_CAP_SPEED)
        return true;
    if (capability == wxTTS_CAP_VOLUME)
        return true;
    if (capability == wxTTS_CAP_SSML)
        return false; // TODO: need to test version of SAPI: 5.3 on Vista supports SSML
    if (capability == wxTTS_CAP_PAUSE)
        return true;

    return false;
}

// Get the default voice
wxString wxTTSSapiHandler::GetDefaultVoice() const
{
    return m_data->m_defaultVoice;
}

// Get the default voice variant
wxString wxTTSSapiHandler::GetDefaultVoiceVariant() const
{
    return wxT("None");
}

// Initialize the engine. Can call multiple times.
bool wxTTSSapiHandler::Initialize()
{
    if (!GetInitialized())
    {
        ::CoInitialize(NULL);

        //HRESULT hr = CoCreateInstance(CLSID_SpVoice, NULL, CLSCTX_ALL, IID_ISpVoice, (void **)&m_data->m_voice);
        HRESULT hr = m_data->m_voice.CoCreateInstance(CLSID_SpVoice);
        if (SUCCEEDED( hr ))
        {
            SetInitialized(true);

            m_data->m_voice->SetNotifyCallbackFunction(wxTTSSapiCallback, 0, (LPARAM)this );

            // May need to do this so we can process all pending messages
            // m_data->m_voice->SetNotifyWindowMessage( hWnd, WM_TTSAPPCUSTOMEVENT, 0, 0 );

            m_data->m_voice->SetInterest(SPFEI_ALL_TTS_EVENTS, SPFEI_ALL_TTS_EVENTS);

            m_data->m_voice->SetAlertBoundary(SPEI_PHONEME);

            long rate = 0;
            USHORT volume = 0;
            hr = m_data->m_voice->GetRate(& rate);
            if (SUCCEEDED( hr ))
                SetProperty(wxT("Speed"), rate);
            hr = m_data->m_voice->GetVolume(& volume);
            if (SUCCEEDED( hr ))
                SetProperty(wxT("Volume"), volume);

            ISpObjectToken* token;
            if (SUCCEEDED(m_data->m_voice->GetVoice(& token)))
            {
                WCHAR* description = NULL;
                if (SUCCEEDED(SpGetDescription(token, & description)))
                {
                    wxString strDescription(description);
                    m_data->m_defaultVoice = description;

                    SetProperty(wxT("Voice"), strDescription);

                    CoTaskMemFree(description);
                }

                token->Release();
            }

            // Get all available voices and cache their ids
            ULONG ulNumTokens = 0;

            CComPtr<IEnumSpObjectTokens> cpEnum;
            if (SUCCEEDED(SpEnumTokens(SPCAT_VOICES, NULL, NULL, &cpEnum)))
            {
                if (SUCCEEDED(cpEnum->GetCount( &ulNumTokens )) && ulNumTokens != 0)
                {
                    ISpObjectToken* pToken = NULL;

                    while (cpEnum->Next(1, &pToken, NULL) == S_OK)
                    {
                        WCHAR* description = NULL;
                        WCHAR* id = NULL;
                        wxString strDescription;
                        wxString strId;
                        if (SUCCEEDED(SpGetDescription(pToken, &description)))
                        {
                            strDescription = description;
                            CoTaskMemFree(description);
                        }

                        if (SUCCEEDED(pToken->GetId(& id)))
                        {
                            strId = id;
                            CoTaskMemFree(id);
                        }

                        if (!strDescription.IsEmpty() && !strId.IsEmpty())
                        {
                            m_data->m_voiceNames.Add(strDescription);
                            m_data->m_voiceIds.Add(strId);
                        }

                        pToken->Release();
                        pToken = NULL;
                    }
                }
            }
            return true;
        }
        else
            return false;
    }
    return true;
}

// Clean up the engine. Can call multiple times.
bool wxTTSSapiHandler::Uninitialize()
{
    if (GetInitialized())
    {
        Stop();

        DeleteTempFile();

        if (m_data->m_voice)
        {
            m_data->m_voice.Release();
            m_data->m_voice = NULL;
        }

        ::CoUninitialize();
    }

    return true;
}

// Implementation: callback
void wxTTSSapiHandler::Callback()
{
    CSpEvent event;

    while( event.GetFrom(m_data->m_voice) == S_OK )
    {
        switch( event.eEventId )
        {
        case SPEI_START_INPUT_STREAM:
            {
                break;
            }
        case SPEI_END_INPUT_STREAM:
            {
                // TODO: will this be sent when pausing?
                // if so, we must not set m_isSpeaking to false.
                m_data->m_isSpeaking = false;
                m_data->m_pauseCount = 0;
                break;
            }
        case SPEI_VOICE_CHANGE:
            {
                break;
            }
        default:
            {
                break;
            }
        }
    }
}

// Create transformer options from handler's properties
int wxTTSSapiHandler::CreateTransformerOptions(int mandatoryOptions) const
{
    int options = mandatoryOptions;

    if (GetPropertyBool(wxTTS_PROP_SIMPLIFY_PUNCTUATION))
        options |= wxTTS_TRANSFORM_OPTIONS_SIMPLIFY_PUNCTUATION;

    if (!GetPropertyBool(wxTTS_PROP_NO_SUBSTITUTIONS))
        options |= wxTTS_TRANSFORM_OPTIONS_WORD_SUBSTITUTION;

    return options;
}

// Can these two voices be used together? Pass an empty string to the voice parameters
// to find out whether the engine in general supports simultaneous voices. Pass voice names
// to find out if these voices can be used simultaneously, when using the given engine.
// The voice names can be the same.
bool wxTTSSapiHandler::VoicesAreCompatible(const wxString& voice1, const wxString& voice2) const
{
    // Passing empty strings means 'does this support simultaneous voices in general'?
    if (voice1.IsEmpty() && voice2.IsEmpty())
        return true;

    int voiceOneMatch = -1;
    int voiceTwoMatch = -1;

    size_t i;
    for (i = 0; i < sm_simultaneousVoiceExclusions.GetCount(); i++)
    {
        int matchOne = voice1.Find(sm_simultaneousVoiceExclusions[i]);
        int matchTwo = voice2.Find(sm_simultaneousVoiceExclusions[i]);

        if (matchOne != -1)
            voiceOneMatch = i;
        if (matchTwo != -1)
            voiceTwoMatch = i;
    }

    // At least one has no match against the excluded list; so should be OK (if we've covered all likely bases)
    if (voiceOneMatch == -1 || voiceTwoMatch == -1)
        return true;

    // Can't use these simultaneously
    if (voiceOneMatch == voiceTwoMatch)
        return false;

    // Can use, say, eSpeak and CereVoice simultaneously
    return true;
}

#endif
    // __WXMSW__
