import sensor, image, time, lcd,gc,time,binascii,ujson
from xyxkc import SD_read, SD_write
from maix import KPU,GPIO, utils
from uart import uart_read,send_data
from autonomous import autonomousLearning,endKpu,startKpu
from fpioa_manager import fm
class FuncTag:
	START = 0
	COLOR_RECOGNITION  = 1
	FIND_BARCODES  = 2
	FIND_QRCODES = 3
	FIND_APRILTAGS  = 4
	FIND_FACE_YOLO  = 5
	FIND_FACE_FEATURE  = 6
	FIND_FACE_MASK  = 7
	FIND_OBJECT  = 8
	FIND_SELF_LEARNING  = 9
	FIND_DIGITAL  = 10
	FIND_FACE_RECOGNIZE  = 11
	FIND_RED_FOLLOW  = 12
	FIND_SIGNPOST_FOLLOW	= 13
	FIND_DIGITAL_CARD	= 14
	GARBAGE_SORTING = 15
	FIND_MULTI_COLOR = 16
currentFuncTagArray = [i for i in range(1,17)]
k21CurrentFuncTag = 'k21CurrentFuncTag.json'
a = SD_read(k21CurrentFuncTag)
jsonk21CurrentFuncTag = {}
if a != '':
	jsonk21CurrentFuncTag = ujson.loads(a)
if 'tag' in jsonk21CurrentFuncTag:
	currentFuncTag = jsonk21CurrentFuncTag['tag']
else:
	currentFuncTag = 0
lcd.init()
key1_pin_down = False
key1_pin = 26
key2_pin = 47
fm.register(key1_pin, fm.fpioa.GPIOHS0)
key1_gpio = GPIO(GPIO.GPIOHS0, GPIO.IN)
def set_key1_state(*_):
	global key1_pin_down
	key1_pin_down = True
	time.sleep_ms(50)
key1_gpio.irq(set_key1_state, GPIO.IRQ_RISING, GPIO.WAKEUP_NOT_SUPPORT)
key2_pin_down = False
fm.register(key2_pin, fm.fpioa.GPIOHS1)
key2_gpio = GPIO(GPIO.GPIOHS1, GPIO.IN)
def set_key2_state(*_):
	global key2_pin_down
	key2_pin_down = True
	time.sleep_ms(50)
key2_gpio.irq(set_key2_state, GPIO.IRQ_RISING, GPIO.WAKEUP_NOT_SUPPORT)
def uartTag(tag):
	global currentFuncTag
	if tag >-1:
		jsonData = {'tag':tag}
		SD_write(k21CurrentFuncTag,ujson.dumps(jsonData))
		currentFuncTag = tag
def colorRecognitionFollow():
	global currentFuncTag
	global key1_pin_down
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "colorRecognitionFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	sensor.set_auto_gain(False)
	sensor.set_auto_whitebal(False)
	clock = time.clock()
	r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50]
	while (not key1_pin_down) and currentFuncTagArray[currentFuncTag] == FuncTag.COLOR_RECOGNITION:
		tag = uart_read()
		if tag >-1:
			currentFuncTag = tag
		img = sensor.snapshot()
		img.draw_rectangle(r)
		lcd.display(img)
	key1_pin_down = False
	print("Start learning ...")
	if currentFuncTagArray[currentFuncTag] == FuncTag.COLOR_RECOGNITION:
		threshold = [50, 50, 0, 0, 0, 0]
		for i in range(50):
			img = sensor.snapshot()
			hist = img.get_histogram(roi=r)
			lo = hist.get_percentile(0.01)
			hi = hist.get_percentile(0.99)
			threshold[0] = (threshold[0] + lo.l_value()) // 2
			threshold[1] = (threshold[1] + hi.l_value()) // 2
			threshold[2] = (threshold[2] + lo.a_value()) // 2
			threshold[3] = (threshold[3] + hi.a_value()) // 2
			threshold[4] = (threshold[4] + lo.b_value()) // 2
			threshold[5] = (threshold[5] + hi.b_value()) // 2
			for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10):
				img.draw_rectangle(blob.rect())
				img.draw_cross(blob.cx(), blob.cy())
				img.draw_rectangle(r, color=(0,255,0))
			lcd.display(img)
	print("Thresholds Learning completed...")
	print("Start Color Recognition...")
	while currentFuncTagArray[currentFuncTag] == FuncTag.COLOR_RECOGNITION:
		tag = uart_read()
		uartTag(tag)
		clock.tick()
		img = sensor.snapshot()
		for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10):
			img.draw_rectangle(blob.rect())
			img.draw_cross(blob.cx(), blob.cy())
			send_x = blob.x()
			send_y = blob.y()
			send_w = blob.w()
			send_h = blob.h()
			send_msg = 'u'
		if send_msg == 'u':
			send_data(send_x,send_y,send_w,send_h,None,currentFuncTagArray[currentFuncTag])
			send_msg = ""
		lcd.display(img)
def findBarcodesFollow():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findBarcodesFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	sensor.set_auto_gain(True)
	clock = time.clock()
	while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_BARCODES:
		tag = uart_read()
		uartTag(tag)
		clock.tick()
		img = sensor.snapshot()
		fps = clock.fps()
		barcodes = img.find_barcodes()
		if barcodes:
			for code in barcodes:
				img.draw_rectangle(code.rect(), color=(0, 255, 0))
				send_x = code.x()
				send_y = code.y()
				send_w = code.w()
				send_h = code.h()
				send_msg = code.payload()
		if send_msg != '':
			send_data(send_x,send_y,send_w,send_h,send_msg,currentFuncTagArray[currentFuncTag])
			send_msg = ''
		img.draw_string(0, 0, "%2.1ffps" %(fps), color=(0, 60, 128), scale=2.0)
		lcd.display(img)
def findQrcodesFollow():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findQrcodesFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	clock = time.clock()
	while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_QRCODES:
		tag = uart_read()
		uartTag(tag)
		clock.tick()
		img = sensor.snapshot()
		for code in img.find_qrcodes():
			img.draw_rectangle(code.rect(), color = 127, thickness=3)
			img.draw_string(code.x(),code.y()-20,code.payload(),color=(255,0,0),scale=2)
			send_x = code.x()
			send_y = code.y()
			send_w = code.w()
			send_h = code.h()
			send_msg = code.payload()
		if send_msg != "":
			send_data(send_x,send_y,send_w,send_h,send_msg,currentFuncTagArray[currentFuncTag])
			send_msg = ""
		lcd.display(img)
		print(clock.fps())
def findApriltagsFollow():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findApriltagsFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QQVGA)
	sensor.skip_frames(time = 100)
	sensor.set_auto_gain(False)
	sensor.set_auto_whitebal(False)
	clock = time.clock()
	tag_families = 0
	tag_families |= image.TAG16H5
	tag_families |= image.TAG25H7
	tag_families |= image.TAG25H9
	tag_families |= image.TAG36H10
	tag_families |= image.TAG36H11
	tag_families |= image.ARTOOLKIT
	def family_name(tag):
		if(tag.family() == image.TAG16H5):
			return "TAG16H5"
		if(tag.family() == image.TAG25H7):
			return "TAG25H7"
		if(tag.family() == image.TAG25H9):
			return "TAG25H9"
		if(tag.family() == image.TAG36H10):
			return "TAG36H10"
		if(tag.family() == image.TAG36H11):
			return "TAG36H11"
		if(tag.family() == image.ARTOOLKIT):
			return "ARTOOLKIT"
	while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_APRILTAGS:
		tag = uart_read()
		uartTag(tag)
		clock.tick()
		img = sensor.snapshot()
		for code in img.find_apriltags(families=tag_families):
			img.draw_rectangle(code.rect(), color = (255, 0, 0))
			img.draw_cross(code.cx(), code.cy(), color = (0, 255, 0))
			send_x  = code.x()
			send_y  = code.y()
			send_w  = code.w()
			send_h  = code.h()
			if (code.id()<10):
				send_msg  = '0'+str(code.id())+ str(family_name(code))
			else:
				send_msg  = str(code.id())+ str(family_name(code))
		if send_msg != '' :
			send_data(send_x,send_y,send_w,send_h,send_msg,currentFuncTagArray[currentFuncTag])
			send_msg = ''
		lcd.display(img)
		print(clock.fps())
def findFaceYolo():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findFaceYolo")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	clock = time.clock()
	img_obj = image.Image(size=(320,256))
	anchor = (0.893, 1.463, 0.245, 0.389, 1.55, 2.58, 0.375, 0.594, 3.099, 5.038, 0.057, 0.090, 0.567, 0.904, 0.101, 0.160, 0.159, 0.255)
	kpu = KPU()
	kpu.load_kmodel("/sd/KPU/yolo_face_detect/yolo_face_detect.kmodel")
	kpu.init_yolo2(anchor, anchor_num=9, img_w=320, img_h=240, net_w=320, net_h=256, layer_w=10, layer_h=8, threshold=0.7, nms_value=0.3, classes=1)
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_FACE_YOLO:
			tag = uart_read()
			uartTag(tag)
			clock.tick()
			img = sensor.snapshot()
			img_obj.draw_image(img, 0,0)
			img_obj.pix_to_ai()
			kpu.run_with_output(img_obj)
			dect = kpu.regionlayer_yolo2()
			fps = clock.fps()
			if len(dect) > 0:
				for l in dect :
					img.draw_rectangle(l[0],l[1],l[2],l[3], color=(0, 255, 0))
					send_x = l[0]
					send_y = l[1]
					send_w = l[2]
					send_h = l[3]
				send_data(send_x,send_y,send_w,send_h,None,currentFuncTagArray[currentFuncTag])
			img.draw_string(0, 0, "%2.1ffps" %(fps), color=(0, 60, 128), scale=2.0)
			lcd.display(img)
	except Exception as e:
		raise e
	finally:
		kpu.deinit()
def extend_box(x, y, w, h, scale):
	x1_t = x - scale*w
	x2_t = x + w + scale*w
	y1_t = y - scale*h
	y2_t = y + h + scale*h
	x1 = int(x1_t) if x1_t>1 else 1
	x2 = int(x2_t) if x2_t<320 else 319
	y1 = int(y1_t) if y1_t>1 else 1
	y2 = int(y2_t) if y2_t<240 else 239
	cut_img_w = x2-x1+1
	cut_img_h = y2-y1+1
	return x1, y1, cut_img_w, cut_img_h
def findFacefeatureFollow():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findFacefeatureFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	clock = time.clock()
	anchor = (0.1075, 0.126875, 0.126875, 0.175, 0.1465625, 0.2246875, 0.1953125, 0.25375, 0.2440625, 0.351875, 0.341875, 0.4721875, 0.5078125, 0.6696875, 0.8984375, 1.099687, 2.129062, 2.425937)
	kpu = KPU()
	kpu.load_kmodel("/sd/KPU/yolo_face_detect/face_detect_320x240.kmodel")
	kpu.init_yolo2(anchor, anchor_num=9, img_w=320, img_h=240, net_w=320 , net_h=240 ,layer_w=10 ,layer_h=8, threshold=0.7, nms_value=0.2, classes=1)
	lm68_kpu = KPU()
	print("ready load model")
	lm68_kpu.load_kmodel("/sd/KPU/face_detect_with_68landmark/landmark68.kmodel")
	img = sensor.snapshot()
	print("img:",img)
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_FACE_FEATURE:
			tag = uart_read()
			uartTag(tag)
			clock.tick()
			img = sensor.snapshot()
			try:
				kpu.run_with_output(img)
			except ValueError as e:
				print("img input error.")
				continue
			dect = kpu.regionlayer_yolo2()
			fps = clock.fps()
			if len(dect) > 0:
				for l in dect :
					x1, y1, cut_img_w, cut_img_h = extend_box(l[0], l[1], l[2], l[3], scale=0.08)
					face_cut = img.cut(x1, y1, cut_img_w, cut_img_h)
					img.draw_rectangle(l[0],l[1],l[2],l[3], color=(0, 255, 0))
					face_cut_128 = face_cut.resize(128, 128)
					face_cut_128.pix_to_ai()
					out = lm68_kpu.run_with_output(face_cut_128, getlist=True)
					send_x = l[0]
					send_y = l[1]
					send_w = l[2]
					send_h = l[3]
					for j in range(68):
						x = int(KPU.sigmoid(out[2 * j])*cut_img_w + x1)
						y = int(KPU.sigmoid(out[2 * j + 1])*cut_img_h + y1)
						a = img.draw_circle(x, y, 2, color=(0, 0, 255), fill=True)
					del (face_cut_128)
					del (face_cut)
				send_data(send_x,send_y,send_w,send_h,None,currentFuncTagArray[currentFuncTag])
			img.draw_string(0, 0, "%2.1ffps" %(fps), color=(0, 60, 255), scale=2.0)
			lcd.display(img)
	except Exception as e:
		raise e
	finally:
		kpu.deinit()
		lm68_kpu.deinit()
def findFacemaskFollow():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findFacemaskFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 1000)
	clock = time.clock()
	anchor = (0.156250, 0.222548, 0.361328, 0.489583, 0.781250, 0.983133, 1.621094, 1.964286, 3.574219, 3.94000)
	img_obj = image.Image(size=(320,256), copy_to_fb=False)
	kpu = KPU()
	kpu.load_kmodel("/sd/KPU/face_mask_detect/detect_5.kmodel")
	kpu.init_yolo2(anchor, anchor_num=5, img_w=320, img_h=240, net_w=320 , net_h=256 ,layer_w=10 ,layer_h=8, threshold=0.7, nms_value=0.4, classes=2)
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_FACE_MASK:
			tag = uart_read()
			uartTag(tag)
			clock.tick()
			img = sensor.snapshot()
			img_obj.draw_image(img, 0,0)
			img_obj.pix_to_ai()
			kpu.run_with_output(img_obj)
			dect = kpu.regionlayer_yolo2()
			fps = clock.fps()
			if len(dect) > 0:
				for l in dect :
					send_x = l[0]
					send_y = l[1]
					send_w = l[2]
					send_h = l[3]
					if l[4] :
						img.draw_rectangle(l[0],l[1],l[2],l[3], color=(0, 255, 0), scale=2)
						img.draw_string(l[0],l[1]-24, "with mask", color=(0, 255, 0), scale=2)
						send_msg = "Y"
					else:
						img.draw_rectangle(l[0],l[1],l[2],l[3], color=(255, 0, 0), scale=2)
						img.draw_string(l[0],l[1]-24, "without mask", color=(255, 0, 0), scale=2)
						send_msg = "N"
				send_data(send_x,send_y,send_w,send_h,send_msg,currentFuncTagArray[currentFuncTag])
			img.draw_string(0, 0, "%2.1ffps" %(fps), color=(0, 60, 128), scale=2.0)
			lcd.display(img)
	except Exception as e:
		raise e
	finally:
		kpu.deinit()
def findObjectFollow():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findObjectFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	clock = time.clock()
	obj_name = ("aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "potted", "sheep", "sofa", "train", "tvmonitor")
	img_obj = image.Image(size=(320,256))
	anchor = (1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071)
	kpu = KPU()
	kpu.load_kmodel("/sd/KPU/voc20_object_detect/voc20_detect.kmodel")
	kpu.init_yolo2(anchor, anchor_num=5, img_w=320, img_h=240, net_w=320 , net_h=256 ,layer_w=10 ,layer_h=8, threshold=0.7, nms_value=0.2, classes=20)
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_OBJECT:
			tag = uart_read()
			uartTag(tag)
			clock.tick()
			img = sensor.snapshot()
			img_obj.draw_image(img, 0,0)
			img_obj.pix_to_ai()
			kpu.run_with_output(img_obj)
			dect = kpu.regionlayer_yolo2()
			fps = clock.fps()
			if len(dect) > 0:
				for l in dect :
					img.draw_rectangle(l[0],l[1],l[2],l[3], color=(0, 255, 0))
					img.draw_string(l[0],l[1], obj_name[l[4]], color=(0, 255, 0), scale=1.5)
					send_x = l[0]
					send_y = l[1]
					send_w = l[2]
					send_h = l[3]
					send_msg = obj_name[l[4]]
				send_data(send_x,send_y,send_w,send_h,send_msg,currentFuncTagArray[currentFuncTag])
			img.draw_string(0, 0, "%2.1ffps" %(fps), color=(0, 60, 128), scale=2.0)
			lcd.display(img)
	except Exception as e:
		raise e
	finally:
		kpu.deinit()
def findSelfLearning():
	global currentFuncTag,key2_pin_down
	lcd.clear()
	lcd.draw_string(0, 0, "findSelfLearning")
	startKpu(currentFuncTagArray[currentFuncTag])
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_SELF_LEARNING:
			tag = uart_read()
			uartTag(tag)
			autonomousLearning(key2_pin_down)
			key2_pin_down = False
	except Exception as e:
		raise e
	finally:
		endKpu()
def findDigitalFollow():
	global currentFuncTag
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findDigitalFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.set_windowing((224, 224))
	sensor.skip_frames(time = 100)
	clock = time.clock()
	kpu = KPU()
	kpu.load_kmodel("/sd/KPU/mnist/uint8_mnist_cnn_model.kmodel")
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_DIGITAL:
			tag = uart_read()
			uartTag(tag)
			gc.collect()
			img = sensor.snapshot()
			img_mnist1=img.to_grayscale(1)
			img_mnist2=img_mnist1.resize(100,100)
			img_mnist2.invert()
			img_mnist2.strech_char(1)
			img_mnist2.pix_to_ai()
			out = kpu.run_with_output(img_mnist2, getlist=True)
			max_mnist = max(out)
			index_mnist = out.index(max_mnist)
			send_msg = str(index_mnist)
			send_data(0,0,0,0,send_msg,currentFuncTagArray[currentFuncTag])
			score = KPU.sigmoid(max_mnist)
			if index_mnist == 1:
				if score > 0.999:
					display_str = "num: %d" % index_mnist
					print(display_str, score)
					img.draw_string(4,3,display_str,color=(0,0,0),scale=2)
			elif index_mnist == 5:
				if score > 0.999:
					display_str = "num: %d" % index_mnist
					print(display_str, score)
					img.draw_string(4,3,display_str,color=(0,0,0),scale=2)
			else:
				display_str = "num: %d" % index_mnist
				print(display_str, score)
				img.draw_string(4,3,display_str,color=(0,0,0),scale=2)
			lcd.display(img)
	except Exception as e:
		raise e
	finally:
		kpu.deinit()
def findFaceRecog():
	global currentFuncTag,key1_pin_down,key2_pin_down
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findFaceRecog")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	clock = time.clock()
	feature_img = image.Image(size=(64,64), copy_to_fb=False)
	feature_img.pix_to_ai()
	FACE_PIC_SIZE = 64
	dst_point =[(int(38.2946 * FACE_PIC_SIZE / 112), int(51.6963 * FACE_PIC_SIZE / 112)),
				(int(73.5318 * FACE_PIC_SIZE / 112), int(51.5014 * FACE_PIC_SIZE / 112)),
				(int(56.0252 * FACE_PIC_SIZE / 112), int(71.7366 * FACE_PIC_SIZE / 112)),
				(int(41.5493 * FACE_PIC_SIZE / 112), int(92.3655 * FACE_PIC_SIZE / 112)),
				(int(70.7299 * FACE_PIC_SIZE / 112), int(92.2041 * FACE_PIC_SIZE / 112)) ]
	anchor = (0.1075, 0.126875, 0.126875, 0.175, 0.1465625, 0.2246875, 0.1953125, 0.25375, 0.2440625, 0.351875, 0.341875, 0.4721875, 0.5078125, 0.6696875, 0.8984375, 1.099687, 2.129062, 2.425937)
	kpu = KPU()
	kpu.load_kmodel("/sd/KPU/yolo_face_detect/face_detect_320x240.kmodel")
	kpu.init_yolo2(anchor, anchor_num=9, img_w=320, img_h=240, net_w=320 , net_h=240 ,layer_w=10 ,layer_h=8, threshold=0.7, nms_value=0.2, classes=1)
	ld5_kpu = KPU()
	print("ready load model")
	ld5_kpu.load_kmodel("/sd/KPU/face_recognization/ld5.kmodel")
	fea_kpu = KPU()
	print("ready load model")
	fea_kpu.load_kmodel("/sd/KPU/face_recognization/feature_extraction.kmodel")
	record_ftrs_json_path = 'record_ftrs_json_path.json'
	a = SD_read('record_ftrs_json_path.json')
	jsonRecord = {}
	if a != '':
		jsonRecord = ujson.loads(a)
	if 'record' in jsonRecord:
		record_ftrs = jsonRecord['record']
	else:
		record_ftrs = []
	THRESHOLD = 90.5
	recog_flag = False
	def extend_box(x, y, w, h, scale):
		x1_t = x - scale*w
		x2_t = x + w + scale*w
		y1_t = y - scale*h
		y2_t = y + h + scale*h
		x1 = int(x1_t) if x1_t>1 else 1
		x2 = int(x2_t) if x2_t<320 else 319
		y1 = int(y1_t) if y1_t>1 else 1
		y2 = int(y2_t) if y2_t<240 else 239
		cut_img_w = x2-x1+1
		cut_img_h = y2-y1+1
		return x1, y1, cut_img_w, cut_img_h
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_FACE_RECOGNIZE:
			tag = uart_read()
			uartTag(tag)
			gc.collect()
			clock.tick()
			img = sensor.snapshot()
			kpu.run_with_output(img)
			dect = kpu.regionlayer_yolo2()
			fps = clock.fps()
			if len(dect) > 0:
				for l in dect :
					x1, y1, cut_img_w, cut_img_h= extend_box(l[0], l[1], l[2], l[3], scale=0)
					face_cut = img.cut(x1, y1, cut_img_w, cut_img_h)
					face_cut_128 = face_cut.resize(128, 128)
					face_cut_128.pix_to_ai()
					out = ld5_kpu.run_with_output(face_cut_128, getlist=True)
					face_key_point = []
					for j in range(5):
						x = int(KPU.sigmoid(out[2 * j])*cut_img_w + x1)
						y = int(KPU.sigmoid(out[2 * j + 1])*cut_img_h + y1)
						face_key_point.append((x,y))
					T = image.get_affine_transform(face_key_point, dst_point)
					image.warp_affine_ai(img, feature_img, T)
					feature = fea_kpu.run_with_output(feature_img, get_feature = True)
					del face_key_point
					scores = []
					for j in range(len(record_ftrs)):
						score = kpu.feature_compare(record_ftrs[j], feature)
						scores.append(score)
					record_ftrs_id = -1
					if len(scores):
						max_score = max(scores)
						index = scores.index(max_score)
						record_ftrs_id = index
						if max_score > THRESHOLD:
							img.draw_string(0, 195, "persion:%d,score:%2.1f" %(index+1, max_score), color=(0, 255, 0), scale=2)
							recog_flag = True
						else:
							img.draw_string(0, 195, "unregistered,score:%2.1f" %(max_score), color=(255, 0, 0), scale=2)
					del scores
					if key2_pin_down:
						record_ftrs = []
						jsonData = {'record':record_ftrs}
						SD_write(record_ftrs_json_path,ujson.dumps(jsonData))
						key2_pin_down = False
					send_x = l[0]
					send_y = l[1]
					send_w = l[2]
					send_h = l[3]
					if recog_flag:
						img.draw_rectangle(l[0],l[1],l[2],l[3], color=(0, 255, 0))
						recog_flag = False
						send_msg = str(record_ftrs_id+1)
					else:
						img.draw_rectangle(l[0],l[1],l[2],l[3], color=(255, 255, 255))
						send_msg = str(0)
						if key1_pin_down:
							record_ftrs.append(feature)
							jsonData = {'record':record_ftrs}
							SD_write(record_ftrs_json_path,ujson.dumps(jsonData))
							key1_pin_down = False
					del (face_cut_128)
					del (face_cut)
				send_data(send_x,send_y,send_w,send_h,send_msg,currentFuncTagArray[currentFuncTag])
			img.draw_string(0, 0, "%2.1ffps" %(fps), color=(0, 60, 255), scale=2.0)
			img.draw_string(0, 215, "press key1 to regist face", color=(255, 100, 0), scale=2.0)
			lcd.display(img)
	except Exception as e:
		raise e
	finally:
		kpu.deinit()
		ld5_kpu.deinit()
		fea_kpu.deinit()
def findRedFollow():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	lcd.clear()
	lcd.draw_string(0, 0, "findRedFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	sensor.set_auto_gain(False)
	sensor.set_auto_whitebal(False)
	clock = time.clock()
	r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50]
	color_thresholds = [
		(20,80,20,62,20,35),
	]
	print("Start Color Recognition...")
	while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_RED_FOLLOW:
		tag = uart_read()
		uartTag(tag)
		clock.tick()
		img = sensor.snapshot()
		fps = clock.fps()
		for color_idx, threshold in enumerate(color_thresholds):
			blobs = img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10)
			if blobs:
				area_max = 0
				blob_max = blobs[0]
				for blob in blobs:
					area = blob.w()*blob.h()
					if(area_max < area):
						blob_max = blob
						area_max = area
				if area_max < 200:
					continue
				img.draw_rectangle(blob_max.rect())
				img.draw_cross(blob_max.cx(), blob_max.cy())
				img.draw_string(blob_max.cx() + 10, blob_max.cy() - 10, 'Red', color=(255, 255, 255))
				send_x = blob_max.x()
				send_y = blob_max.y()
				send_w = blob_max.w()
				send_h = blob_max.h()
				send_data(send_x,send_y,send_w,send_h,None,currentFuncTagArray[currentFuncTag])
		img.draw_string(0, 0, "%2.1ffps" %(fps), color=(0, 60, 255), scale=2.0)
		lcd.display(img)
def findSignpostFollow():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findSignpostFollow")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	clock = time.clock()
	labels = ["redblock", "straight", "left", "right", "round", "park", "stop", "redlight", "yellowlight", "greenlight"]
	img_obj = image.Image(size=(320,240))
	anchor = (2.16, 1.62, 2.09, 2.03, 2.53, 2.56, 2.97, 3.06, 3.75, 3.84)
	kpu = KPU()
	kpu.load_kmodel('/sd/KPU/signpost_card/signpost_card.kmodel')
	kpu.init_yolo2(anchor, anchor_num=(int)(len(anchor)/2), img_w=320, img_h=240, net_w=320 , net_h=240 ,layer_w=10 ,layer_h=8, threshold=0.85, nms_value=0.3, classes=len(labels))
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_SIGNPOST_FOLLOW:
			tag = uart_read()
			uartTag(tag)
			gc.collect()
			clock.tick()
			img = sensor.snapshot()
			kpu.run_with_output(img)
			dect = kpu.regionlayer_yolo2()
			fps = clock.fps()
			if len(dect) > 0:
				for l in dect :
					img.draw_rectangle(l[0],l[1],l[2],l[3], color=(0, 255, 0))
					img.draw_string(l[0],l[1], labels[l[4]], color=(0, 255, 0), scale=1.5)
					send_x = l[0]
					send_y = l[1]
					send_w = l[2]
					send_h = l[3]
					send_msg = labels[l[4]]
				send_data(send_x,send_y,send_w,send_h,send_msg,currentFuncTagArray[currentFuncTag])
			img.draw_string(0, 0, "%2.1ffps" %(fps), color=(0, 60, 128), scale=2.0)
			lcd.display(img)
	except Exception as e:
		raise e
	finally:
		kpu.deinit()
def findDigitalCard():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findDigitalCard")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 1000)
	clock = time.clock()
	kpu = KPU()
	kpu.load_kmodel('/sd/KPU/find_digital_card/find_digital_card.kmodel')
	labels = ["1", "2", "3", "4", "5"]
	anchor = (2.25, 2.16, 2.78, 2.69, 3.31, 3.22, 3.72, 3.66, 4.19, 4.16)
	kpu.init_yolo2(anchor, anchor_num=(int)(len(anchor)/2), img_w=320, img_h=240, net_w=320 , net_h=240 ,layer_w=10 ,layer_h=8, threshold=0.9, nms_value=0.3, classes=len(labels))
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_DIGITAL_CARD:
			tag = uart_read()
			uartTag(tag)
			gc.collect()
			clock.tick()
			img = sensor.snapshot()
			kpu.run_with_output(img)
			dect = kpu.regionlayer_yolo2()
			fps = clock.fps()
			if len(dect) > 0:
				for l in dect :
					send_x = l[0]
					send_y = l[1]
					send_w = l[2]
					send_h = l[3]
					send_data(send_x,send_y,send_w,send_h,labels[l[4]],currentFuncTagArray[currentFuncTag])
					a = img.draw_rectangle(l[0],l[1],l[2],l[3],color=(0,255,0))
					info = "%s  %.1f" % (labels[l[4]], l[5])
					a = img.draw_string(l[0],l[1],info,color=(255,0,0),scale=2.0)
					del info
			lcd.display(img)
	except Exception as e:
		raise e
	finally:
		kpu.deinit()
def garbageSorting():
	global currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findMultiColor")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 1000)
	clock = time.clock()
	kpu = KPU()
	kpu.load_kmodel('/sd/KPU/garbage_sorting/garbage_sorting.kmodel')
	labels = ["oral_bottle", "color_pen", "battery", "brush", "plastic_bottle", "umbrella", "banana_peel", "ketchup", "bone", "chopstick", "plate", "cigarette_end"]
	anchor = (1.38, 1.41, 2.00, 2.03, 2.78, 2.81, 3.72, 3.66, 5.66, 5.47)
	kpu.init_yolo2(anchor, anchor_num=(int)(len(anchor)/2), img_w=320, img_h=240, net_w=320 , net_h=240 ,layer_w=10 ,layer_h=8, threshold=0.6, nms_value=0.3, classes=len(labels))
	try:
		while currentFuncTagArray[currentFuncTag] == FuncTag.GARBAGE_SORTING:
			tag = uart_read()
			uartTag(tag)
			gc.collect()
			clock.tick()
			img = sensor.snapshot()
			kpu.run_with_output(img)
			dect = kpu.regionlayer_yolo2()
			fps = clock.fps()
			if len(dect) > 0:
				for l in dect :
					send_x = l[0]
					send_y = l[1]
					send_w = l[2]
					send_h = l[3]
					send_data(send_x,send_y,send_w,send_h,labels[l[4]],currentFuncTagArray[currentFuncTag])
					a = img.draw_rectangle(l[0],l[1],l[2],l[3],color=(0,255,0))
					info = "%s  %.1f" % (labels[l[4]], l[5])
					a = img.draw_string(l[0],l[1],info,color=(255,0,0),scale=2.0)
					del info
			lcd.display(img)
	except Exception as e:
		raise e
	finally:
		kpu.deinit()
def findMultiColor():
	global key1_pin_down,currentFuncTag
	send_x = 0
	send_y = 0
	send_w = 0
	send_h = 0
	send_msg = ""
	lcd.clear()
	lcd.draw_string(0, 0, "findMultiColor")
	sensor.reset()
	sensor.set_pixformat(sensor.RGB565)
	sensor.set_framesize(sensor.QVGA)
	sensor.skip_frames(time = 100)
	sensor.set_auto_gain(False)
	sensor.set_auto_whitebal(False)
	clock = time.clock()
	r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50]
	color_thresholds = [
		(15,80,15,62,15,35),
		(14, 61, -39, -6, 0, 14),
		(21,50,-7,8,-35,-11),
		(65, 78, -10, -5, 38, 50),
		(20, 50, 17, 37, -34, -14),
	]
	color_strings = ['Red', 'Green', 'Blue', 'Yellow', 'purple']
	print("Start Color Recognition...")
	while currentFuncTagArray[currentFuncTag] == FuncTag.FIND_MULTI_COLOR:
		tag = uart_read()
		uartTag(tag)
		img = sensor.snapshot()
		for color_idx, threshold in enumerate(color_thresholds):
			blobs = img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10)
			if blobs:
				area_max = 0
				blob_max = blobs[0]
				for blob in blobs:
					area = blob.w()*blob.h()
					if(area_max < area):
						blob_max = blob
						area_max = area
				if area_max < 2000:
					continue
				img.draw_rectangle(blob_max.rect())
				img.draw_cross(blob_max.cx(), blob_max.cy())
				img.draw_string(blob_max.cx() + 10, blob_max.cy() - 10, color_strings[color_idx], color=(255, 255, 255))
				send_x = blob_max.x()
				send_y = blob_max.y()
				send_w = blob_max.w()
				send_h = blob_max.h()
				send_data(send_x,send_y,send_w,send_h,color_strings[color_idx],currentFuncTagArray[currentFuncTag])
		lcd.display(img)
while True:
	if currentFuncTagArray[currentFuncTag] == FuncTag.COLOR_RECOGNITION:
		colorRecognitionFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_BARCODES:
		findBarcodesFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_QRCODES:
		findQrcodesFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_APRILTAGS:
		findApriltagsFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_FACE_YOLO:
		findFaceYolo()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_FACE_FEATURE:
		findFacefeatureFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_FACE_MASK:
		findFacemaskFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_OBJECT:
		findObjectFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_SELF_LEARNING:
		findSelfLearning()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_DIGITAL:
		findDigitalFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_FACE_RECOGNIZE:
		findFaceRecog()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_RED_FOLLOW:
		findRedFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_SIGNPOST_FOLLOW:
		findSignpostFollow()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_DIGITAL_CARD:
		findDigitalCard()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.GARBAGE_SORTING:
		garbageSorting()
	elif currentFuncTagArray[currentFuncTag] == FuncTag.FIND_MULTI_COLOR:
		findMultiColor()