import sys

import numpy as np

sys.path.append("../../../../common")
sys.path.append("../")
project_path = sys.path[0] + "/../"
sys.path.append(project_path)
import datetime

from cameracapture import CameraCapture
import presenteragent.presenter_channel as presenter_channel
from acllite_model import AclLiteModel
from acllite_resource import AclLiteResource
from vgg_ssd import VggSsd
import cv2 as cv
import argparse

MODEL_PATH = project_path + "/model/face_detection.om"
MODEL_WIDTH = 304
MODEL_HEIGHT = 300
FACE_DETEC_CONF= project_path + "/scripts/face_detection.conf"
#BODYPOSE_CONF="body_pose.conf"
CAMERA_FRAME_WIDTH = 1280
CAMERA_FRAME_HEIGHT = 720

parser = argparse.ArgumentParser()
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--thr', default=0.2, type=float, help='Threshold value for pose parts heat map')
parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.')
parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.')

args = parser.parse_args()

BODY_PARTS = {"Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
              "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
              "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
              "LEye": 15, "REar": 16, "LEar": 17, "Background": 18}

POSE_PAIRS = [["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"],
              ["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
              ["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"],
              ["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"],
              ["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"]]

inWidth = args.width
inHeight = args.height
net = cv.dnn.readNetFromTensorflow("graph_opt.pb")

paintSketchMode = True
poses_trace = []

def paintPose(frame):
    frameWidth = frame.shape[1]
    frameHeight = frame.shape[0]
    net.setInput(
        cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True,
                             crop=False))
    out = net.forward()
    out = out[:, :19, :, :]  # MobileNet output [1, 57, -1, -1], we only need the first 19 elements

    assert (len(BODY_PARTS) == out.shape[1])

    points = []
    for i in range(len(BODY_PARTS)):
        # Slice heatmap of corresponging body's part.
        heatMap = out[0, i, :, :]

        # Originally, we try to find all the local maximums. To simplify a sample
        # we just find a global one. However only a single pose at the same time
        # could be detected this way.
        _, conf, _, point = cv.minMaxLoc(heatMap)
        x = (frameWidth * point[0]) / out.shape[3]
        y = (frameHeight * point[1]) / out.shape[2]
        # Add a point if it's confidence is higher than threshold.
        points.append((int(x), int(y)) if conf > args.thr else None)

    poses_trace.append(points)

    if paintSketchMode:
        for pair in POSE_PAIRS:
            partFrom = pair[0]
            partTo = pair[1]
            assert (partFrom in BODY_PARTS)
            assert (partTo in BODY_PARTS)

            idFrom = BODY_PARTS[partFrom]
            idTo = BODY_PARTS[partTo]

            if points[idFrom] and points[idTo]:
                cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)
                cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)
                cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)

        t, _ = net.getPerfProfile()
        freq = cv.getTickFrequency() / 1000
        cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
    return frame, out


def YUVtoRGB(byteArray):
    """Convert YUV format image to RGB"""
    e = 1280 * 720
    Y = byteArray[0:e]
    Y = np.reshape(Y, (720, 1280))

    s = e
    V = byteArray[s::2]
    V = np.repeat(V, 2, 0)
    V = np.reshape(V, (360, 1280))
    V = np.repeat(V, 2, 0)

    U = byteArray[s + 1::2]
    U = np.repeat(U, 2, 0)
    U = np.reshape(U, (360, 1280))
    U = np.repeat(U, 2, 0)

    RGBMatrix = (np.dstack([Y, U, V])).astype(np.uint8)
    RGBMatrix = cv.cvtColor(RGBMatrix, cv.COLOR_YUV2RGB, 3)
    return RGBMatrix



def main():
    """main"""
    #Initialize acl
    acl_resource = AclLiteResource()
    acl_resource.init()
    #Create a detection network instance, currently using the vgg_ssd network. 
    # When the detection network is replaced, instantiate a new network here
    detect = VggSsd(acl_resource, MODEL_WIDTH, MODEL_HEIGHT)
    #Load offline model
    #model = AclLiteModel(MODEL_PATH)
    #Connect to the presenter server according to the configuration, 
    # and end the execution of the application if the connection fails
    chan = presenter_channel.open_channel(FACE_DETEC_CONF)
    if chan is None:
        print("Open presenter channel failed")
        return
    #Open the CARAMER0 camera on the development board
    cap = CameraCapture(0)
    
    
    #fourcc = cv.VideoWriter_fourcc(*'MP42')
    #res_video = cv.VideoWriter('records/video1.avi', fourcc, 5, (640, 360), True)
    
    
    while True:
        #Read a picture from the camera
        image = cap.read()
        if image is None:
            print("Get memory from camera failed")
            break

        image = image.byte_data_to_np_array().astype('uint8')
        image = YUVtoRGB(image)
        image = cv.resize(image, (640, 360))

        image = cv.flip(image, 1)
        image, out = paintPose(image)
        #res_video.write(image)

        # img = frame
        if image is None:
            print("The jpeg image for present is None")
            break

        
        #model_input = detect.pre_process(image)
        #result = model.execute(model_input)
        
        #jpeg_image, detection_list = detect.post_process(result, image)
        _, jpeg_image = cv.imencode('.jpg', image)
        chan.send_detection_data(CAMERA_FRAME_WIDTH, CAMERA_FRAME_HEIGHT, jpeg_image, [])
        np.save('trace.npy', poses_trace)

if __name__ == '__main__':
    main()



#The detection network processes images into model input data
        # model_input = detect.pre_process(image)
        # if model_input is None:
        #     print("Pre process image failed")
        #     break
        # #Send data to offline model inference
        # result = model.execute(model_input)
        # #Detecting network analysis inference output
        # jpeg_image, detection_list = detect.post_process(result, image)
