# -*- coding: utf-8-*-
"""
    The cmic class handles all interactions with the microphone and speaker.
"""
from __future__ import absolute_import
import ctypes
import logging
import tempfile
import wave
import audioop
import time
import pyaudio
from . import dingdangpath
from . import mute_alsa
from .app_utils import wechatUser
from . import config
from . import player
from . import plugin_loader

from . import statusNotice
# from client.cmic import Audio
from client.cmic.speech_recognizer import  SpeechRecognizer
import Queue as queue



class Mic:
    speechRec = None
    speechRec_persona = None

    def __init__(self, speaker, passive_stt_engine, active_stt_engine):
        """
        Initiates the pocketsphinx instance.

        Arguments:
        speaker -- handles platform-independent audio output
        passive_stt_engine -- performs STT while Dingdang is in passive listen
                              mode
        acive_stt_engine -- performs STT while Dingdang is in active listen
                            mode
        """
        self.robot_name = config.get('robot_name_cn', u'叮当')
        self._logger = logging.getLogger(__name__)
        self.speaker = speaker
        self.wxbot = None
        self.passive_stt_engine = passive_stt_engine
        self.active_stt_engine = active_stt_engine
        self.dingdangpath = dingdangpath
        self._logger.info("Initializing PyAudio. ALSA/Jack error messages " +
                          "that pop up during this process are normal and " +
                          "can usually be safely ignored.")

        self.audio = SpeechRecognizer()
        self._logger.info("Initialization of PyAudio completed.")
        self.sound = player.get_sound_manager(self.audio)
        self.stop_passive = False
        self.skip_passive = False
        self.chatting_mode = False
        self.pixels = None
        # self.avdio_queue=queue.Queue(maxsize=1024)
        # self.startFrame=False


    def __del__(self):
        self.audio.StopCapture()










    def stopPassiveListen(self):
        """
        Stop passive listening
        """
        self.stop_passive = True

    def passiveListen(self, PERSONA):
        """
        Listens for PERSONA in everyday sound. Times out after LISTEN_TIME, so
        needs to be restarted.
        """
        attachment=self.audio.Recognize(timeout=10)
        frames=[]
        for chunk in attachment:
            frames.append(chunk)

        transcribed = self.passive_stt_engine.transcribe_keyword(
            ''.join(frames))


        if transcribed is not None and \
           any(PERSONA in phrase for phrase in transcribed):
            return True, PERSONA

        return False, transcribed

    def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
        """
            Records until a second of silence or times out after 12 seconds

            Returns the first matching string or None
        """

        options = self.activeListenToAllOptions(THRESHOLD, LISTEN, MUSIC)
        if options:
            return options[0]

    def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True,
                                 MUSIC=False,pixels=None):
        """
            Records until a second of silence or times out after 12 seconds

            Returns a list of the matching options or None
        """
        self.pixels=pixels
        self.beforeListen();
        attachment=self.audio.Recognize()
        frames=[]
        RATE=16000
        for chunk in attachment:
            frames.append(chunk)

        with tempfile.SpooledTemporaryFile(mode='w+b') as f:
            wav_fp = wave.open(f, 'wb')
            wav_fp.setnchannels(1)
            wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
            wav_fp.setframerate(RATE)
            wav_fp.writeframes(''.join(frames))
            wav_fp.close()
            f.seek(0)
            return self.active_stt_engine.transcribe(f)

    def beforeListen(self):
        statusNotice.beforeListen(self, self.wxbot, self.pixels)

    def afterLister(self):
        statusNotice.afterListen(self, self.wxbot, self.pixels)



    def say(self, phrase,
            OPTIONS=" -vdefault+m3 -p 40 -s 160 --stdout > say.wav",
            cache=False):
        self._logger.info(u"机器人说：%s" % phrase)
        statusNotice.beforeSay(self, self.wxbot, self.pixels)
        self.stop_passive = True
        if self.wxbot is not None:
            wechatUser(config.get(), self.wxbot, "%s: %s" %
                       (self.robot_name, phrase), "")
        # incase calling say() method which
        # have not implement cache feature yet.
        # the count of args should be 3.
        if self.speaker.say.__code__.co_argcount > 2:
            self.speaker.say(phrase, cache)
        else:
            self.speaker.say(phrase)
        statusNotice.afterSay(self,self.wxbot,self.pixels)
        time.sleep(1)  # 避免叮当说话时误唤醒
        self.stop_passive = False


    def play(self, src):
        # play a voice
        self.sound.play_block(src)

    def play_no_block(self, src):
        self.sound.play(src)



