import cv2
import os
from pocketsphinx import LiveSpeech, get_model_path
import speech_recognition as sr
import subprocess
import pyttsx3
import time
import _thread

debug_flag = 0

speak_lang = 'zh'

SPEAK_THREAD_STOPPED = 'stopped'
SPEAK_THREAD_RUNNING = 'running'
SPEAK_THREAD_CANCLE = 'cancle'
SPEAK_THREAD_EXIT = 'exit'

MP3_STATUS_STOPPED = 'stopped'
MP3_STATUS_PLAYING = 'playing'
MP3_STATUS_PAUSED = 'paused'

MP3_CMD_IDLE = 'idle'
MP3_CMD_NEXT = 'next'
MP3_CMD_BACK = 'back'
MP3_CMD_SEARCH = 'search'
MP3_CMD_PAUSE = 'pause'
MP3_CMD_RESUME = 'resume'
MP3_CMD_STOP = 'stop'
MP3_CMD_EXIT = 'exit'

FACE_CMD_LEARN = 'learn'
FACE_CMD_STOP = 'stop'
FACE_CMD_EXIT = 'exit'

def getFaceCMD():
	while True:
		with open(r"c:\AICHAIN\myAI\test\facecmd.txt", 'r', encoding='utf-8') as f:
			cmdstr = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return cmdstr

def setFaceCMD(cmdstr):
	while True:
		with open(r"c:\AICHAIN\myAI\test\facecmd.txt", 'w', encoding='utf-8') as f:
			f.write(cmdstr)
			f.close()
			break
		time.sleep(1)

def getCurFaceName():
	while True:
		with open(r"c:\AICHAIN\myAI\test\curfacename.txt", 'r', encoding='utf-8') as f:
			name = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return name

def setCurFaceName(name):
	while True:
		with open(r"c:\AICHAIN\myAI\test\curfacename.txt", 'w', encoding='utf-8') as f:
			f.write(name)
			f.close()
			break
		time.sleep(1)

def getFaceName():
	while True:
		with open(r"c:\AICHAIN\myAI\test\facename.txt", 'r', encoding='utf-8') as f:
			name = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return name

def setFaceName(name):
	while True:
		with open(r"c:\AICHAIN\myAI\test\facename.txt", 'w', encoding='utf-8') as f:
			f.write(name)
			f.close()
			break
		time.sleep(1)

def getSpeakThreadMode():
	speaktrhead_mode = SPEAK_THREAD_STOPPED
	while True:
		with open(r"c:\AICHAIN\myAI\test\speakctrl.txt", 'r', encoding='utf-8') as f:
			speaktrhead_mode = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return speaktrhead_mode

def setSpeakThreadMode(mode):
	while True:
		with open(r"c:\AICHAIN\myAI\test\speakctrl.txt", 'w', encoding='utf-8') as f:
			f.write(mode)
			f.close()
			break
		time.sleep(1)

def execute_unix(inputcommand):
   p = subprocess.Popen(inputcommand, stdout=subprocess.PIPE, shell=True)
   (output, err) = p.communicate()
   return output

def callSearch(searchStr):
	msgfile = open(r"c:\AICHAIN\myAI\test\baikectrl.txt", 'w', encoding='utf-8')
	msgfile.write(searchStr)
	msgfile.close()

	c = 'C:\\AICHAIN\\myAI\\myWebForm\\myWebForm\\bin\\Release\\myWebForm.exe'
	ret = execute_unix(c)
	print (ret)

	runflag = "running"
	while True:
		with open(r"c:\AICHAIN\myAI\test\baikectrl.txt", 'r', encoding='utf-8') as f:
			runflag = f.readline()
			f.close()
		if runflag.find('completed') >= 0 :
			break
		time.sleep(1)

def espeakSay(wordsStr):
	# tts using espeak
	# because windows 7 do not use utf-8 string , espeak must use utf-8, so we use python to save string to a file then use espeak to read from file
	msgfile = open(r"c:\AICHAIN\myAI\test\msg.txt", 'w', encoding='utf-8')
	msgfile.write(wordsStr)
	msgfile.close()

	c = 'espeak -vzhy -s150 -g2 -a200 -f c:\\AICHAIN\\myAI\\test\\msg.txt'
	ret = execute_unix(c)
	print (ret)

def sapi5Say(wordsStr):
	engine = pyttsx3.init('sapi5')
	# 设置发音速率，默认值为200
	engine.setProperty('rate', 120)

	engine.say(wordsStr)
	engine.runAndWait()

def robotSay(wordsStr):
	global speak_lang

	if speak_lang == 'zh' :
		sapi5Say(wordsStr)
	elif speak_lang == 'zh-yue':
		espeakSay(wordsStr)
	else :
		sapi5Say(wordsStr)

def getMp3Status():
	status_flag = ''
	while True:
		with open(r"c:\AICHAIN\myAI\test\mp3status.txt", 'r', encoding='utf-8') as f:
			status_flag = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return status_flag

def setMp3Status(status_flag):
	while True:
		with open(r"c:\AICHAIN\myAI\test\mp3status.txt", 'w', encoding='utf-8') as f:
			f.write(status_flag)
			f.close()
			break
		time.sleep(1)

def resetMp3CMD():
	while True:
		with open(r"c:\AICHAIN\myAI\test\mp3ctrl.txt", 'w', encoding='utf-8') as f:
			f.write(MP3_CMD_IDLE)
			f.close()
			break
		time.sleep(1)

def getMp3CMD():
	cmdflag = ''
	while True:
		with open(r"c:\AICHAIN\myAI\test\mp3ctrl.txt", 'r', encoding='utf-8') as f:
			cmdflag = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return cmdflag

def setMp3CMD(mode):
	while True:
		# only can set mp3 cmd when cmd flag is idle
		if getMp3CMD() != MP3_CMD_IDLE :
			time.sleep(1)
			continue

		with open(r"c:\AICHAIN\myAI\test\mp3ctrl.txt", 'w', encoding='utf-8') as f:
			f.write(mode)
			f.close()
			break
		time.sleep(1)

def setMp3SearchKey(keystr):
	while True:
		with open(r"c:\AICHAIN\myAI\test\mp3.txt", 'w', encoding='utf-8') as f:
			f.write(keystr)
			f.close()
			break
		time.sleep(1)

def getMp3SearchKey():
	keystr = 'hello'
	while True:
		with open(r"c:\AICHAIN\myAI\test\mp3.txt", 'r', encoding='utf-8') as f:
			keystr = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return keystr

def soundRecog(audio_file):
	r = sr.Recognizer()
	#r.energy_threshold = 300

	with sr.AudioFile(audio_file) as source:
		audio = r.record(source)
		print('文本内容：',r.recognize_sphinx(audio, language='zh-CN2'))

def microRecogEN():
	r = sr.Recognizer()
	#r.energy_threshold = 5000

	with sr.Microphone() as source:
		r.adjust_for_ambient_noise(source, duration = 5)  # listen for 5 second to calibrate the energy threshold for ambient noise levels
		
		while True:
			runflag = "0"
			with open(r"c:\AICHAIN\myAI\test\appctrl.txt", 'r', encoding='utf-8') as f:
				runflag = f.readline()
				f.close()
			print ("run flag =", runflag)
			if runflag.find('exit') >= 0 :
				break

			print('say something')
			try:
				audio = r.listen(source, timeout=5, phrase_time_limit=5)
				print('got some voice...')
			except :
				print("Exception : no sound ...")
			else :
				try:
					print('call recognize_sphinx...')
					print("Sphinx thinks you said: " + r.recognize_sphinx(audio, language='en-US'))
				except :
					print("Exception : can not access recognize_sphinx!")

def microRecogZH():
	r = sr.Recognizer()
	#r.energy_threshold = 4000

	with sr.Microphone() as source:
		while True:
			runflag = "0"
			with open(r"c:\AICHAIN\myAI\test\appctrl.txt", 'r', encoding='utf-8') as f:
				runflag = f.readline()
				f.close()
			print ("run flag =", runflag)
			if runflag.find('exit') >= 0 :
				break

			r.adjust_for_ambient_noise(source, duration = 0.5)  # listen for 5 second to calibrate the energy threshold for ambient noise levels

			print('say something')
			try:
				audio = r.listen(source, timeout=3, phrase_time_limit=10)
				print('got some voice...')
			except :
				print("Exception : no sound ...")
			else :
				try:
					print('call recognize_sphinx...')
					print("Sphinx thinks you said: " + r.recognize_sphinx(audio, language='zh-CN'))
				except :
					print("Exception : can not access recognize_sphinx!")

def microRecogGoogle():
	global speak_lang

	r = sr.Recognizer()
	r.energy_threshold = 4000

	with sr.Microphone() as source:
		while True:
			# r.adjust_for_ambient_noise(source, duration = 0.5)  # listen for 5 second to calibrate the energy threshold for ambient noise levels
			print('say something')
			try:
				if debug_flag :
					# just sleep
					time.sleep(3)
				else :
					audio = r.listen(source, timeout=3, phrase_time_limit=5)
				print('got some voice...')
			except :
				print("Exception : no sound ...")
			else :
				try:
					if debug_flag :
						# read phrase from txt file
						with open(r"c:\AICHAIN\myAI\test\debug.txt", 'r', encoding='utf-8') as f:
							phrase = f.readline()
							f.close()
						
						with open(r"c:\AICHAIN\myAI\test\debug.txt", 'w', encoding='utf-8') as f:
							f.write('nothing')
							f.close()
						
						if phrase == 'nothing' :
							print("no debug phrase ...")
							continue
						
						print('get debug phrase:', phrase)
					else :
						print('call recognize_google...')
						phrase = r.recognize_google(audio, language='zh-CN')

					print("AI thinks you said: " + phrase)
				except :
					print("Exception : can not access recognize_google!")
				else :
					speaktrhead_mode = getSpeakThreadMode()
					if phrase.find('测试') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED or getMp3Status() != MP3_STATUS_STOPPED :
							continue
						else :
							robotSay('可以工作了')

					elif phrase.find('太慢了') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED or getMp3Status() != MP3_STATUS_STOPPED :
							continue
						else :
							robotSay('要有耐心，你应该向唐僧学习，猴急猴急的人类！')
					
					elif phrase.find('你认识我吗') >= 0 or phrase.find('我是谁') >= 0:
						if speaktrhead_mode != SPEAK_THREAD_STOPPED or getMp3Status() != MP3_STATUS_STOPPED :
							continue
						else :
							name = getCurFaceName()
							if name.find('unkown') >= 0 :
								robotSay('我不认识你，四海皆兄弟，相见就是缘分，要不报个名字？')
							else
								wordStr = '你是鼎鼎大名的' + name + '，真想和你来个法式拥抱'
								robotSay(wordStr)

					elif phrase.find('认识一下') >= 0 or phrase.find('交个朋友') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED or getMp3Status() != MP3_STATUS_STOPPED :
							continue
						else :
							robotSay('我很荣幸，请问你叫什么名字？')
							retry_name = 3
							while retry_name > 0 :
								retry_name = retry_name - 1
								try:
									audio = r.listen(source, timeout=3, phrase_time_limit=5)
									print('got some voice...')
								except :
									print("Exception : no sound ...")
									robotSay('请问你叫什么名字？')
								else :
									try:
										print('call recognize_google...')
										name_phrase = r.recognize_google(audio, language='zh-CN')
										print("AI thinks you said: " + name_phrase)
									except :
										print("Exception : can not access recognize_google!")
										robotSay('请问你叫什么名字？')
									else :
										if name_phrase.find('是') >= 0 :
											name_phrase = name_phrase[name_phrase.find('是')+1:]
										elif name_phrase.find('叫') >= 0 :
											name_phrase = name_phrase[name_phrase.find('叫')+1:]
										
										setFaceName(name_phrase)
										setFaceCMD(FACE_CMD_LEARN)

										while getFaceCMD() != FACE_CMD_STOP :
											time.sleep(1)
							
							if retry_name <= 0 :
								robotSay('看样子你还不想和我交朋友，你是在耍我吗？')

					elif phrase.find('说粤语') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED or getMp3Status() != MP3_STATUS_STOPPED :
							continue
						else :
							speak_lang = 'zh-yue'
							robotSay('敢说粤语，你有我讲的好吗？我是最靓的仔，哈哈！')

					elif phrase.find('说普通话') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED or getMp3Status() != MP3_STATUS_STOPPED :
							continue
						else :
							speak_lang = 'zh'
							robotSay('你会卷舌头吗？四是四，十是十，你也来一个！')

					elif phrase.find('你叫什么') >= 0 or phrase.find('你的名字') >= 0 or phrase.find('你是谁') >= 0:
						if speaktrhead_mode != SPEAK_THREAD_STOPPED or getMp3Status() != MP3_STATUS_STOPPED :
							continue
						else :
							robotSay('居然连我都不认识，赶紧去问问段云泷，我就是他的超级无敌小乌龟，哈哈！')

					elif phrase.find('小乌龟') >= 0 :
						if speaktrhead_mode == SPEAK_THREAD_RUNNING:
							setSpeakThreadMode(SPEAK_THREAD_CANCLE)
						
						# wait for speak thread to exit, thread will set speaktrhead_mode = SPEAK_THREAD_STOPPED
						while getSpeakThreadMode() != SPEAK_THREAD_STOPPED:
							time.sleep(1)
						
						setMp3CMD(MP3_CMD_STOP)
						while getMp3Status() != MP3_STATUS_STOPPED:
							time.sleep(1)

						robotSay('你好，我在')

					elif phrase.find('你知道') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED or getMp3Status() != MP3_STATUS_STOPPED :
							continue
						else :
							robotSay('等我一小会，我得回忆一下！')

							print('start search baike...')
							searchStr = phrase[phrase.find('你知道')+3:]
							searchStr = searchStr.replace('吗',' ')
							callSearch(searchStr)

							print('start speak baike information...')
							setSpeakThreadMode(SPEAK_THREAD_RUNNING)

					elif phrase.find('是谁写的') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED or getMp3Status() != MP3_STATUS_STOPPED :
							continue
						else :
							robotSay('就等一小会，我可厉害了！')

							searchStr = phrase[0:phrase.find('是谁写的')]
							callSearch(searchStr)

							setSpeakThreadMode(SPEAK_THREAD_RUNNING)

					elif phrase.find('继续播放') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED:
							continue
						else :
							setMp3CMD(MP3_CMD_RESUME)

					elif phrase.find('下一首') >= 0 or phrase.find('下一个') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED:
							continue
						else :
							setMp3CMD(MP3_CMD_NEXT)

					elif phrase.find('上一首') >= 0 or phrase.find('上一个') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED:
							continue
						else :
							setMp3CMD(MP3_CMD_BACK)

					elif phrase.find('播放') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED:
							continue
						else :
							searchStr = phrase[phrase.find('播放')+2:]
							print(searchStr)
							setMp3SearchKey(searchStr)
							setMp3CMD(MP3_CMD_SEARCH)

					elif phrase.find('暂停') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED:
							continue
						else :
							setMp3CMD(MP3_CMD_PAUSE)

					elif phrase.find('关机') >= 0 :
						if speaktrhead_mode != SPEAK_THREAD_STOPPED:
							continue
						else :
							setFaceCMD(FACE_CMD_EXIT)
							setSpeakThreadMode(SPEAK_THREAD_EXIT)
							setMp3CMD(MP3_CMD_EXIT)
							robotSay('拜拜，我会想你的！')
							break

def liveSoundRecogZH():
	model_path = get_model_path()

	speech = LiveSpeech(
		audio_device='hw:0,1',
        verbose=False,
        sampling_rate=16000,
        buffer_size=2048,
        no_search=False,
        full_utt=False,
        hmm=os.path.join(model_path, 'zh_cn.cd_cont_5000'),
        lm=os.path.join(model_path, 'zh_cn.lm.bin'),
        dic=os.path.join(model_path, 'zh_cn.dic')
    )
	for phrase in speech:
		print("phrase:", phrase)
		#print(phrase.segments(detailed=True))

def liveSoundRecogEN():
	model_path = get_model_path()

	speech = LiveSpeech(
        verbose=False,
        sampling_rate=16000,
        buffer_size=2048,
        no_search=False,
        full_utt=False,
        hmm=os.path.join(model_path, 'en-us'),
        lm=os.path.join(model_path, 'en-us.lm.bin'),
        dic=os.path.join(model_path, 'cmudict-en-us.dict')
    )
	for phrase in speech:
		print("phrase:", phrase)

def liveSoundRecogDK():
	model_path = get_model_path()

	speech = LiveSpeech(
		audio_device='hw:0,1',
        verbose=False,
        sampling_rate=16000,
        buffer_size=2048,
        no_search=False,
        full_utt=False,
        hmm=os.path.join(model_path, 'zh_cn.cd_cont_5000'),
        lm=os.path.join(model_path, '8394.lm.bin'),
        dic=os.path.join(model_path, '8394.dic')
    )
	for phrase in speech:
		print("phrase:", phrase)

if __name__=='__main__':
	#print (sr.Microphone().list_microphone_names())

	#soundRecog('c:\\AICHAIN\\myAI\\test\\zh.wav')
	microRecogGoogle()
