#-------------------------------------------------------------------------------
# Name:       qar_to_audio_converter.py
# Purpose:    Convert text in SlideSpeech QAR sequence to audio files
#
# Authors:    Glenn Ramsey <glenn.ramsey@slidespeech.com>
#             John Graves <john.graves@slidespeech.com>
#
# Copyright:  (c) Slidespeech Ltd 2012
# Licence:    MIT license
#-------------------------------------------------------------------------------
import sys
from concurrent import futures

from converter import *
from converter_error import ConverterError

if sys.platform.startswith("linux"):
    from audio_converter_linux import *
elif sys.platform.startswith("darwin"):
    from audio_converter_osx import *
elif sys.platform.startswith("win"):
    from audio_converter_windows import *
else:
    raise NotImplementedError()

from utility import *

for dependency in AudioConfig.dependencies:
    if findDependency(dependency) is None:
        raise RuntimeError("Missing dependency: {0}".format(dependency))

def convertAudio(text, language, path, basename):

    inFileName = textToAudio(text, language, path, basename)

    outFileName = os.path.join(path, basename + '.ogg')
    ret = encodeOgg(path, inFileName, outFileName)
    if ret:
        raise RuntimeError("Could not generate .ogg audio, {0} failed".format(AudioConfig.oggEncoder))

    outFileName = os.path.join(path, basename + '.mp3')
    ret = encodeMp3(path, inFileName, outFileName)
    if ret:
        raise RuntimeError("Could not generate .mp3 audio, {0} failed".format(AudioConfig.mp3Encoder))

    os.remove(inFileName)


def textToSpeech(textList, path, prefix):
    # textlist is a list of named tuples (text, language)

    try:
#        # Run the audio conversion serially
#        # (~1.0 seconds on MacBook Pro 8,1 for 4slide-presentation.odp)
#        for i, text in enumerate(textList, 1):
#            basename = "{0}{1}".format(prefix, i)
#            convertAudio(text.text, text.language, path, basename)

        # Run the audio conversion in parallel using threads
        # (~6.9 seconds on MacBook Pro 8,1 for 4slide-presentation.odp)
        # How many threads to use? Probably not more than the number of CPUs available.
        # How do we find this out?
        with futures.ThreadPoolExecutor(max_workers=maxWorkers(textList)) as executor:
            fs = []
            for text in textList:
                basename = "{0}{1}".format(prefix, text.part)
                fs.append(executor.submit(convertAudio, text.text, text.language, path, basename))

            # reraise any exceptions that the futures caused
            for future in futures.as_completed(fs):
                if future.exception() is not None:
                    raise future.exception()

    except Exception as e:
        ex = ConverterError("Audio conversion failed: {0}".format(e))
        ex.cause = e
        raise ex


class QARToAudioConverter(Converter):
    def __init__(self, previous=None):
        Converter.__init__(self, previous)

    def _convertImpl(self):

        path = os.path.join(self.tempDir(), "audio")
        try:
            os.makedirs(path)
        except OSError:
            # dir already exists, shouldn't happen
            pass

        prefix = 'Slide'

        textList = []
        try:
            seq = self.script()['S']
            # Extract the speaker notes from the slides and add to a list
            for q in seq:
                questionText = q['q']
                if 'l' in q:
                    lang = q['l']
                else:
                    lang = 'en'
                if not 'A' in q:
                    # add voice over for slide
                    part = "{0}".format(q['n'])
                    AudioText = namedtuple('Text', 'text language part')
                    textList.append(AudioText(questionText, lang, part))
                else:
                    # add question
                    part = "{0}q{1}".format(q['n'],q['c'])
                    AudioText = namedtuple('Text', 'text language part')
                    textList.append(AudioText(questionText, lang, part))

                    # add answers
                    for answerNumber, a in enumerate(q['A']):
                        answerText = a['a']
                        if 'l' in a:
                            lang = a['l']
                        else:
                            lang = 'en'
                        part = '{0}q{1}a{2}'.format(q['n'],q['c'],answerNumber+1)
                        AudioText = namedtuple('Text', 'text language part')
                        textList.append(AudioText(answerText, lang, part))
                        # add response
                        if 'r' in a:
                            responseText = a['r']
                            part = '{0}q{1}r{2}'.format(q['n'],q['c'],answerNumber+1)
                            AudioText = namedtuple('Text', 'text language part')
                            textList.append(AudioText(responseText, lang, part))


        except KeyError as e:
            ex = ConverterError("Field missing in slide script", e)
            raise ex

        # create the audio files
        textToSpeech(textList, path, prefix)

if __name__ == '__main__':
    # a simple test/demo
    c = NotesToQARConverter()
    seq = c.script()
    del seq['S']
    seq['S'] = [{'q': u' A small river with some flowers.\n?\nWhere is this city located?\nhttp://test.com In New York;;\nIn Arizona;', 'l': 'en'}, {'q': u' In New York City.', 'l': 'en'}]
    a = QARToAudioConverter(c)
    a.convert()
    print("Audio files in {0}".format(os.path.join(c.tempDir(), "audio")))
    for file in os.listdir(os.path.join(c.tempDir(), "audio")):
        print(file)