#this app need pip install opencv-contrib-python --upgrade , not opencv-python
# do not support Chinese word in path
# the face'name is id, which must be a number. in realize function, there is name={23:"DK"} , which is used to set id:name array inside.

import cv2
import os
import shutil
import time
import _thread
import string
import configparser
import pyttsx3
import numpy as np
from PIL import Image, ImageDraw, ImageFont

faceNum = 0
cur_faceID = 0
faces_name = []

facePath=r'C:\AICHAIN\myAI\test\ALLface'

SPEAK_THREAD_STOPPED = 'stopped'
SPEAK_THREAD_RUNNING = 'running'
SPEAK_THREAD_CANCLE = 'cancle'
SPEAK_THREAD_EXIT = 'exit'

def getSpeakThreadMode():
	speaktrhead_mode = SPEAK_THREAD_STOPPED
	while True:
		with open(r"c:\AICHAIN\myAI\test\speakctrl.txt", 'r', encoding='utf-8') as f:
			speaktrhead_mode = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return speaktrhead_mode

MP3_STATUS_STOPPED = 'stopped'
MP3_STATUS_PLAYING = 'playing'
MP3_STATUS_PAUSED = 'paused'

def getMp3Status():
	status_flag = ''
	while True:
		with open(r"c:\AICHAIN\myAI\test\mp3status.txt", 'r', encoding='utf-8') as f:
			status_flag = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return status_flag

def canSpeak():
	if getSpeakThreadMode() != SPEAK_THREAD_RUNNING and getMp3Status() != MP3_STATUS_PLAYING :
		return True
	else :
		return False

FACE_CMD_LEARN = 'learn'
FACE_CMD_STOP = 'stop'
FACE_CMD_EXIT = 'exit'

def getFaceCMD():
	while True:
		with open(r"c:\AICHAIN\myAI\test\facecmd.txt", 'r', encoding='utf-8') as f:
			cmdstr = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return cmdstr

def setFaceCMD(cmdstr):
	while True:
		with open(r"c:\AICHAIN\myAI\test\facecmd.txt", 'w', encoding='utf-8') as f:
			f.write(cmdstr)
			f.close()
			break
		time.sleep(1)

def getFaceName():
	while True:
		with open(r"c:\AICHAIN\myAI\test\facename.txt", 'r', encoding='utf-8') as f:
			name = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return name

def setFaceName(name):
	while True:
		with open(r"c:\AICHAIN\myAI\test\facename.txt", 'w', encoding='utf-8') as f:
			f.write(name)
			f.close()
			break
		time.sleep(1)

def getCurFaceName():
	while True:
		with open(r"c:\AICHAIN\myAI\test\curfacename.txt", 'r', encoding='utf-8') as f:
			name = f.readline()
			f.close()
			break
		time.sleep(1)
		
	return name

def setCurFaceName(name):
	while True:
		with open(r"c:\AICHAIN\myAI\test\curfacename.txt", 'w', encoding='utf-8') as f:
			f.write(name)
			f.close()
			break
		time.sleep(1)

engine = pyttsx3.init('sapi5')
# 设置发音速率，默认值为200
engine.setProperty('rate', 120)

speak_lang = 'zh'

def execute_unix(inputcommand):
   p = subprocess.Popen(inputcommand, stdout=subprocess.PIPE, shell=True)
   (output, err) = p.communicate()
   return output

def espeakSay(wordsStr):
	# tts using espeak
	# because windows 7 do not use utf-8 string , espeak must use utf-8, so we use python to save string to a file then use espeak to read from file
	msgfile = open(r"c:\AICHAIN\myAI\test\msg.txt", 'w', encoding='utf-8')
	msgfile.write(wordsStr)
	msgfile.close()

	c = 'espeak -vzhy -s150 -g2 -a200 -f c:\\AICHAIN\\myAI\\test\\msg.txt'
	ret = execute_unix(c)
	print (ret)

def sapi5Say(wordsStr):
	global engine
	engine.say(wordsStr)
	engine.runAndWait()

def robotSay(wordsStr):
	global speak_lang

	if speak_lang == 'zh' :
		sapi5Say(wordsStr)
	elif speak_lang == 'zh-yue':
		espeakSay(wordsStr)
	else :
		sapi5Say(wordsStr)

def getFacesInfo():
	global faces_name
	global faceNum

	config = configparser.ConfigParser()
	config.read(r"c:\AICHAIN\myAI\test\faces.ini")
	faceNum = int(config.get('FACES', 'num'))
	index = 1
	while index <= faceNum:
		faces_name.append(config.get('FACES', str(index)))
		index = index + 1

def saveOneFaceInfo(faceID, name):
	global faces_name
	global faceNum

	config = configparser.ConfigParser()
	config.read(r"c:\AICHAIN\myAI\test\faces.ini")
	faceNum = int(config.get('FACES', 'num'))
	faceNum = faceNum + 1
	config.set('FACES', 'num', str(faceNum))
	config.set('FACES', str(faceNum), name)
	config.write(open(r"c:\AICHAIN\myAI\test\faces.ini", 'w', encoding='utf-8'))

	faces_name.append(name)

def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
	if (isinstance(img, np.ndarray)):  #判断是否OpenCV图片类型
		img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
	draw = ImageDraw.Draw(img)
	fontText = ImageFont.truetype("font/simsun.ttc", textSize, encoding="utf-8")
	draw.text((left, top), text, textColor, font=fontText)
	return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

def red(date):
	global faceNum

	faces = cv2.CascadeClassifier(r"C:\ProgramData\Anaconda3\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml")
	cap=cv2.VideoCapture(1, cv2.CAP_DSHOW)
	face_id = faceNum + 1
	count=0
	while True:
		ret, img = cap.read()
		if ret:
			gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
			face = faces.detectMultiScale(gray,1.05, 10, minSize=(32,32))
			for (x,y,w,h) in face:
				cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
				if h+w>=300:
					new_img = cv2.resize(img[y:y + h, x:x + w], (92, 112)) # 调整图像大小
					count+=1
					cv2.imwrite("ALLFace/User."+str(face_id)+"."+str(count)+".jpg",new_img)
			cv2.imshow('image',img)
		k=cv2.waitKey(1)
		if k==27:
			break
		if count == 30 :
			wordStr = '请前后移动一两步，让我看看远处的你和近处的你'
			robotSay(wordStr)

		if count == 100 :	# save 100 face pictures for learning
			break
	
	cap.release()
	cv2.destroyAllWindows()

def getImageAndLabels(path):
	detector = cv2.CascadeClassifier(r"C:\ProgramData\Anaconda3\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml")
	imagePaths = [os.path.join(path, f) for f in os.listdir(path)] # join函数的作用
	faceSamples = []
	ids = []
	for imagePath in imagePaths:
		PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
		img_numpy = np.array(PIL_img, 'uint8')

		id = int(os.path.split(imagePath)[-1].split(".")[1])
		faces = detector.detectMultiScale(img_numpy)
		for (x, y, w, h) in faces:
			faceSamples.append(img_numpy[y:y + h, x: x + w])
			ids.append(id)

	return faceSamples, ids

def train(path):
	recohnizer=cv2.face.LBPHFaceRecognizer_create()
	faces,ids=getImageAndLabels(path)
	recohnizer.train(faces,np.array(ids))
	recohnizer.write(r'C:\AICHAIN\myAI\test\train\trainer.yml')
	print("{0} faces trained.exiting program".format(len(np.unique(ids))))

def realize():
	global cur_faceID

	if faceNum > 0 :
		recohnizer = cv2.face.LBPHFaceRecognizer_create()
		recohnizer.read(r'C:\AICHAIN\myAI\test\train\trainer.yml')

	cascadePath=(r"C:\ProgramData\Anaconda3\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml")
	facaeCascade=cv2.CascadeClassifier(cascadePath)
	font=cv2.FONT_HERSHEY_SIMPLEX
	idnum=0
	cap=cv2.VideoCapture(1, cv2.CAP_DSHOW)
	minW=0.1*cap.get(3)
	minH=0.1*cap.get(4)

	unknown_counter = 0
	while True:
		if getFaceCMD() != FACE_CMD_STOP :
			break

		ret, img = cap.read()
		if ret:
			gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
			face = facaeCascade.detectMultiScale(gray,1.05, 10,minSize=(32,32))
			for (x,y,w,h) in face:
				cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)

				if faceNum > 0 :
					idnum,confidence=recohnizer.predict(gray[y:y+h,x:x+w])
					# print(idnum)
					if confidence<100 : #置信度大于50%
						face_name=faces_name[idnum-1]
						unknown_counter = 0		# clear unknown_counter 
						confidence="{0}%".format(round(100-confidence))
					else:
						face_name="unknown"
						unknown_counter = unknown_counter + 1
						confidence="{0}%".format(round(100-confidence))

						if unknown_counter == 10 : # if continueous images are unkown, cunter > 10 , set unkown
							setCurFaceName(face_name)
							unknown_counter = 11	# set 11, != 10 , is used to set unkown just once

					# cv2.putText(img,face_name,(x+5,y-5),font,1,(255,0,0),1)
					img = cv2ImgAddText(img,face_name,x+5,y-5,(255,0,0),60)
					cv2.putText(img,str(confidence),(x+5,y+h-5),font,1,(0,0,0),1)

					if cur_faceID != idnum :
						# a new and known person coming ...
						cur_faceID = idnum
						setCurFaceName(face_name)
						if canSpeak() :
							wordStr = '你好，' + face_name
							robotSay(wordStr)
						break	# use break, AI will only know one person, the first one

			cv2.imshow('camera',img)

		k=cv2.waitKey(1)
		if k==27:
			break
	
	cap.release()
	cv2.destroyAllWindows()

if __name__=='__main__':
	setFaceCMD(FACE_CMD_STOP)
	getFacesInfo()
	setCurFaceName("unknown")

	while True:
		cmd = getFaceCMD()
		if cmd == FACE_CMD_LEARN :
			# learn a new person's face
			wordStr = '很高兴认识你，请保持脸部正面对着我，大概需要10秒钟，我要仔细看看你'
			robotSay(wordStr)

			new_name = getFaceName()
			print('get FACE_CMD_LEARN:',new_name)
			red(facePath)

			wordStr = '我正在记忆你的脸蛋，请耐心等一会'
			robotSay(wordStr)
			train(facePath)	# currently, this function is used to learn all the faces again!!!

			wordStr = '好了，以后我就认识你了，' + new_name
			robotSay(wordStr)

			saveOneFaceInfo(faceNum + 1, new_name)
			setFaceCMD(FACE_CMD_STOP)

		elif cmd == FACE_CMD_STOP :
			realize()

		elif cmd == FACE_CMD_EXIT :
			break
		else :
			time.sleep(1)
