import argparse
import time
import cv2
import numpy as np
import math
#import joblib
#from classier import get_joint_theta, joint_linear_k
from pose.estimator import TfPoseEstimator
from pose.networks import get_graph_path, model_wh

fps_time = 0

def str2bool(v):
    return v.lower() in ("yes", "true", "t", "1")

def gstreamer_pipeline(
        capture_width=1280,
        capture_height=720,
        display_width=640,
        display_height=480,
        framerate=60,
        flip_method=0,
):
    return (
            "nvarguscamerasrc ! "
            "video/x-raw(memory:NVMM), "
            "width=(int)%d, height=(int)%d, "
            "format=(string)NV12, framerate=(fraction)%d/1 ! "
            "nvvidconv flip-method=%d ! "
            "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
            "videoconvert ! "
            "video/x-raw, format=(string)BGR ! appsink"
            % (
                capture_width,
                capture_height,
                framerate,
                flip_method,
                display_width,
                display_height,
            )
    )

def fuc(img):
    start = time.time()
    w, h = model_wh('432x368')
    if w > 0 and h > 0:
        e = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(w, h))
    else:
        e = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(432, 368))
    # cam = cv2.VideoCapture(0)
    # while True:
    #     ret_val, image = cam.read()
    humans = e.inference(img)
    image = TfPoseEstimator.draw_humans(img, humans, imgcopy=False)
    end = time.time()
    fps = end -start
    cv2.putText(image, 'FPS: %.2f' % fps, (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
    return img


