#!/usr/bin/env python
#######################################################################################
'''
This Script controls Facial Recognition For Humia. This Script was written and designed
by Michael Spinali in the python language. Local Binary Pattern algoithms, loading and
saving runtime information are all utilized to increase performance on the embedded
arm processor
'''
__author__ = "Michael Spinali"
__copyright__ = "Copyright 2014, The Humia Project"
__license__ = "GPL"
__maintainer__ = "Michael Spinali"
__email__ = "mspinali@rohan.sdsu.edu"
__status__ = "Production"
#######################################################################################

import numpy as np
import cv2
import cv2.cv as cv
import csv,sys
import re
import os
from datetime import datetime
from time import sleep

global max_id
img_count = 20			# Images Saved (for New user saving)
UCount = 0			# Unidentified Usr Count
uid = 0				# UID of person
name = ''			# Name of person
NUFlag = 0			# New User Flag
EFlag = 0			# CSV Empty Flag
RFlag = 0			# Recording Flag
WFlag = 0			# Used to tell if Unidentified User is real
CurTime = datetime.now()	# Current Time Global

def detect(img, cascade_fn='lbpcascade_frontalface.xml',
           scaleFactor=1.1, minNeighbors=3, minSize=(80, 80),
           flags=cv.CV_HAAR_SCALE_IMAGE):
    "Detect Faces from feed"
    cascade = cv2.CascadeClassifier(cascade_fn)
    rects = cascade.detectMultiScale(img, scaleFactor=scaleFactor,
                                     minNeighbors=minNeighbors,
                                     minSize=minSize, flags=flags)
    if len(rects) == 0:
        return []
    rects[:, 2:] += rects[:, :2]
    return rects

def read_images(filename):
	"Read Images from Data Directory"
	image,label,names = [],[],[]
	with open(filename,'rb') as f:
		reader = csv.reader(f, delimiter=';')
		try:
			for row in reader:
				im = cv2.imread(row[0], cv2.IMREAD_GRAYSCALE)
				image.append(np.asarray(im, dtype=np.uint8))
				label.append(np.asarray(row[1], dtype=np.int32))
				max_id = (int)(row[1])
				m = re.search(r"\/([A-Za-z0-9_]+)\/", row[0])
				if names == [] or (m.group(1) != names[-1]):
					names.append(m.group(1))
		except csv.Error as e:
			sys.exit('file %s,line %d: %s' % (filename,reader.line_num,e))
		return image,label,names,max_id

def save_image(n_uid,n_name,count,img):
	"Save Image to file and update CSV record keeping file"
	raw_path = 'data/'+ n_name
	imPath = str(raw_path)+'/'+str(count)+'.pgm'
	fd = open('humia.csv','a')
	fd.write(imPath + ';'+str(n_uid)+"\n")
	fd.close()
	cv2.imwrite(imPath,img)
	return

if __name__ == '__main__':
	model = cv2.createLBPHFaceRecognizer()
	image,label,names,max_id = read_images('humia.csv')
	if image != []:
		model.train(np.asarray(image), np.asarray(label))
		#model.load("LBPHfaces_alt.yml")
	else:
		# New User Flag
		NUFlag = 1
		# CSV Empty Flag
		EFlag = 1
<<<<<<< .mine

	cap = cv2.VideoCapture(0)
=======
	cap = cv2.VideoCapture(3)
>>>>>>> .r54
	# Init dummy image
	b, large = cap.read()
	while(True):
		# Delay in between face saves. This allows training at a human pace
		if img_count < 20:
			sleep(1)
		# Capture frame-by-frame
		ret, frame = cap.read()
		original = frame.copy()

		# Convert to Greyscale, equalize
		gray = cv2.cvtColor(frame, cv.CV_BGR2GRAY)
		gray = cv2.equalizeHist(gray)
	
		# Detect Faces from image
		rects = detect(gray)
		color = (0, 255, 0)
		
		# If Time to register new User and a face was detected
		if NUFlag == 1 and rects != []:
			max_id += 1
			uid,name = max_id,raw_input('Enter your name : ')
			name = name.lower().replace (" ", "_")
			NUFlag = 0
			if name != 'ignore':
				img_count = 0
				path = os.path.join(os.getcwd(), 'data/'+name)
				i,RFlag = 1,1
				while(True):
					if not os.path.exists(path):
				    		os.makedirs(path)
						break
					else:
						name = name + str(i)
						path = os.path.join(os.getcwd(), 'data/'+name)
						i += 1
		y2p,x2p = 0,0
		delta = datetime.now() - CurTime
		
		# If Wait Flag is enabled, an elpased time of 10 seconds has occured and at least 100
		#   largest face's were found, iniate training. This method reduces error
		if WFlag == 1 and (int)(delta.total_seconds()) > 10 and UCount > 100:
			WFlag,UCount = 0,0
			NUFlag,RFlag = 1,1
		
		# Face Increment, and Largest Image inc holder
		i,lrg_inc = 0,0
		# Confidence Array (used to detect if a new person was detected)
		conf_lst = []

		# Iterate through detected faces 
		for x1, y1, x2, y2 in rects:
			
			# Crop and find largest (face with largest area)
			crop_img = gray[y1:y2, x1:x2]
			if (y2*x2) > y2p *x2p:
				lrg_inc = i
				y2p = y2
				x2p = x2
				large = crop_img

			# If the facerecognizer was trained (e.g. non-empty csv)
			if EFlag != 1:
				[p_label, p_confidence] = model.predict(crop_img)
				box_text = names[p_label-2]
				#box_text = "Predicted label = %d (confidence=%.2f)" % (p_label, p_confidence)
				conf_lst.append(p_confidence)
				
			# Empty-Csv no reocrds to go off, this means this image is a face save
			else:
				box_text = "Training..."

			# Add Prediction Text and Illustrations
			cv2.rectangle(original, (x1, y1), (x2, y2), color, 2)
			cv2.putText(original, box_text, (x1, y1-5), cv2.FONT_HERSHEY_PLAIN, 1, color, 2)
			i+=1
		# If Confidence > threshold start wait timer (WFlag)
		# WFlag is used to tell if a newly detected face is an error, or a real person
		# We wait 3 seconds and count how many time an unreocgnized face was found (UCount)
		if conf_lst != [] and conf_lst[lrg_inc] > 50 and WFlag == 0:
			CurTime = datetime.now()
			WFlag = 1
			UCount += 1
		elif conf_lst != [] and conf_lst[lrg_inc] > 50 and WFlag == 1:
			UCount += 1
		elif (int)(delta.total_seconds()) > 12:
			WFlag = 0

		# Save Detected Faces one-by one (Largest Only)
		if img_count < 20:
			save_image(uid,name,img_count,large)
			img_count += 1
			print "Face Saved"

		# If recording is done reset
		elif RFlag == 1:
			image,label,names,max_id = read_images('humia.csv')
			model.train(np.asarray(image), np.asarray(label))
			model.save("LBPHfaces_alt.yml")
			EFlag,RFlag = 0,0
		# Display the resulting frame
		cv2.imshow('Face Recognizer',original)

		# Detect if 'q' Pressed (quit)
		if cv2.waitKey(15) & 0xFF == ord('q'):
			#model.save("LBPHfaces_alt.yml")
			break
		
	# When everything done, release the capture
	cap.release()
	cv2.destroyAllWindows()
