# -*- coding: utf-8 -*-

# from mtcnn.base_function import *
import numpy as np
import cv2
from mtcnn.mtcnn import mtcnn
import math
 
if __name__ == "__main__":
    
    m_mtcnn = mtcnn()

    print ("Load model end!!!")
    image_rgb = "./timg.jpg"
    image_gray = "./timg1.jpg"
    bounding_boxes, points = m_mtcnn.detect_rgb_gray_face(image_rgb, image_gray)
    draw = cv2.imread(image_rgb)

    if type(bounding_boxes) is np.ndarray:
        # bounding_boxes = bounding_boxes.astype(int)
        # points = points.astype(int)
        nrof_faces = bounding_boxes.shape[0]
        print('Total %d face(s) detected' % (nrof_faces))
        for i in xrange(nrof_faces):
            c_box    = bounding_boxes[i]
            c_points = points[:,i]
            # if m_mtcnn.face_is_normal(c_box, c_points):
            #     print "normal"
            # else:
            #     print "profile"
            # # c_face_crop_img = draw[max(0, int(c_box[1])):int(c_box[3]),int(c_box[0]):int(c_box[2])]
            cv2.rectangle(draw, (c_box[0],c_box[1]), (c_box[2],c_box[3]), (0, 255, 0), 3) 
            for points_index in xrange(5):
                c_point_x = c_points[points_index]
                c_point_y = c_points[points_index + 5]
                cv2.circle(draw, (c_point_x, c_point_y), 5, (0, 0, 255), 4)
            print c_box
            print c_points
            print "*********************************************"
        cv2.imwrite('timg2.jpg',draw)

        # print bounding_boxes
        # print points
        # print "*********************************************"
        # print bounding_boxes[1].shape
        # print points[:,1].shape
        # for c_box in bounding_boxes:
        # 	c_face_crop_img = draw[max(0, int(c_box[1])):int(c_box[3]),int(c_box[0]):int(c_box[2])]
        	# do some things
    else:
    	print('No face detected')
    exit()

    # model._save_network_nobn("/home/oeasy/lqf/FaceRecognition/models/")
