#!/usr/bin/python
import roslib; roslib.load_manifest('speech_recognition')
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import Point

from config import *
from struct import pack

import utterance
import stream_manager
import voice_activity_detector
import recognizer

import subprocess
import xmlrpclib
import sys
import time

PACKAGE_PATH = roslib.packages.get_pkg_dir(PACKAGE_NAME)

################################################################################
# Speech Handling
################################################################################

class SpeechRecognizer:
    def __init__(self):
        # Stream Manager object: handles recording, etc.
        self.sm = stream_manager.StreamManager(PACKAGE_PATH, WAV_FILENAME,
                                               channels=NUM_CHANNELS)
        # Voice activity detector: decides whether speech is being detected
        self.vad = voice_activity_detector.VoiceActivityDetector()
        # Used for Timo's learning server; not part of the whole system yet
        self.learning_server = xmlrpclib.Server(LEARNING_ADDWORD_URL)
        # This publishes messages to terminal_input.py, e.g. recognition results
        self.pub = rospy.Publisher(ASR_MSGS_TOPIC, String)
        # Object for interfacing with the recognizer server
        self.rec = recognizer.Recognizer()
        
        self.start_time = time.time()
        self.restart_vad = True

    # Call this to get speech; returns an Utterance
    def get_speech(self, recognizer_url):
        global handler
        # If the VAD hasn't set a noise energy yet, do that now
        if self.vad.noise_energy == 0:
            self.vad.start()
        # Open a connection to the server for streaming
        self.rec.start(recognizer_url)
        # This streams speech to the recognizer in addition to recording
        self.record_speech()
        guess = self.get_utterance()
        guess.print_nbest()
        # If the recognizer didn't return anything, it was probably not speech,
        # so we should try again
        if guess.phrase == "":
            self.pub.publish("Didn't register speech. Trying again.")
            self.restart_vad = False
            return None
        else:
            # This resets Timo's word learning thing and unlearns learned words
            if "reset" in guess.phrase:
                self.learning_server.reset()
            handler.switch("stop")
            # All of this is just sending output to terminal_input.py
            processing_time = str(time.time() - self.start_time)
            if len(processing_time) > 5:
                processing_time = processing_time[:4]
            self.pub.publish("DETECTED SPEECH: "+guess.phrase)
            self.pub.publish("Processing took %s seconds. Press enter to start talking again."
                             % processing_time)
            # Return the Utterance so we can do something with it
            return guess

    def record_speech(self):
        # Start streaming
        self.sm.start()
        
        if self.restart_vad == True:
            self.vad.restart()
        else:
            self.restart_vad = True
        prev_talking = False
        prev_pub = False
        sending_buffer = False
        recording_buffer = True
        count = 0
        if self.vad.measuring_noise == True:
            self.pub.publish("Measuring ambient noise. Please wait.")
        while True:
            self.sm.read()
            # Data for just one channel
            data = self.sm.channel_frame(0)
            self.vad.update(data)
            # Send a message to terminal_input.py after 10 frames or so
            if self.vad.measuring_noise == False and prev_pub == False:
                if count >= 10:
                    self.pub.publish("You can start talking now.")
                    prev_pub = True
                count += 1
            if sending_buffer == True:
                # While we're streaming to the recognizer, we send the
                # oldest data in the buffer
                if len(self.sm.buffer) > 0:
                    self.rec.send_chunk(self.sm.buffer.popleft())
                    print "streaming audio to recognizer"
                # When the buffer is empty, stop streaming and return
                else:
                    print "stopped streaming"
                    self.sm.stop()
                    self.sm.write_files()
                    return None
            if recording_buffer == True:
                self.sm.record_buffer(data)
                print "recorded buffer"

            # While we detect speech, record and send the buffer
            if self.vad.is_talking():
                self.sm.record()
                prev_talking = True
                sending_buffer = True
            elif prev_talking:
                self.start_time = time.time()
                recording_buffer = False
                print "stopped recording buffer"
            # Currently this only still exists for recording purposes;
            # needs to be refactored
            else:
                self.sm.record_prebuffer()

    # Gets the nbest from the recognizer and returns an Utterance
    def get_utterance(self):
        nbest = self.rec.get_results()
        guess = utterance.Utterance(nbest)
        return guess

################################################################################
# Speech Handlers
################################################################################

# This is mainly just a wrapper for the existing modes because I didn't want
# to have a million global variables
class ModeHandler:
    def __init__(self):
        self.word_learner = WordLearner()
        self.command_publisher = CommandPublisher()
        self.speech_recognizer = SpeechRecognizer()

        # Off by default
        self.idle = True

        # Default mode is commands
        self.mode = self.command_publisher

    # Handle switching between modes
    def switch(self, mode_name):
        if mode_name.lower() == "learning":
            self.mode = self.word_learner
            self.speech_recognizer.pub.publish("Changed mode to learning.")
        elif mode_name.lower() == "commands":
            self.mode = self.command_publisher
            self.speech_recognizer.pub.publish("Changed mode to commands.")
        elif mode_name.lower() == "stop":
            print "Stopped processing"
            self.idle = True
        elif mode_name.lower() == "start":
            print "Started processing"
            self.idle = False
        else:
            print "Mode switch not successful"

    def get_speech(self):
        return self.speech_recognizer.get_speech(self.mode.recognizer_url)

    def process(self, text):
        self.mode.process(text)

# NOTE: All modes need to have a process(self, utterance) method

# Used for Timo's word learning; not in use right now
class WordLearner:
    def __init__(self):
        self.prev_utterance = utterance.Utterance([(0,"")])
        self.recognizer_url = LEARNING_RECOGNIZER_URL
        self.addword_server = xmlrpclib.Server(LEARNING_ADDWORD_URL)
    
    def process(self, utterance):
        if utterance.phrase.strip() == "yes":
            self.learn_word(self.prev_utterance)
            stream_manager.synthesize("Word learned")
            print "word learned"
        elif utterance.learned_word != "":
            stream_manager.synthesize("I already know "+utterance.learned_word)
        elif utterance.new_word != "":
            text = "I think you said a word I don't know. Did you say "+utterance.new_word+"?"
            stream_manager.synthesize(text)
        else:
            stream_manager.synthesize("I think you said "+utterance.phrase)
        self.prev_utterance = utterance

    def learn_word(self, utterance):
        print "Learning word: "+utterance.new_word+" ("+utterance.new_word_raw+")"
        self.addword_server.message(utterance.new_word_raw)


# Publishes speech recognition output to the robot
class CommandPublisher:
    def __init__(self):
        self.pub = rospy.Publisher(ASR_OUTPUT_TOPIC, String)
        self.recognizer_url = COMMANDS_RECOGNIZER_URL

    def process(self, utterance):
        print "sending: '%s'" % utterance.phrase
        #stream_manager.synthesize("I think you said "+utterance.phrase)
        self.pub.publish(utterance.phrase)
        print "sent message"
    
################################################################################
# Main Loop
################################################################################

def change_mode(string):
    global handler
    handler.switch(string.data)

if __name__ == '__main__':
    global handler

    rospy.init_node(ASR_NODE_NAME, anonymous=False)
    #pub_loc = rospy.Publisher("blocknlp/speech_localization", Point)
    rospy.Subscriber(ASR_CHANGEMODE_TOPIC, String, change_mode)
    
    handler = ModeHandler()

    # Start in active mode for testing
    #handler.idle = False

    while not rospy.is_shutdown():
        if handler.idle == False:
            guess = handler.get_speech()
            if guess != None:
                handler.process(guess)
