"""1. Predict with pre-trained Simple Pose Estimation models
==========================================

This article shows how to play with pre-trained Simple Pose models with only a few
lines of code.

First let's import some necessary libraries:
"""

from matplotlib import pyplot as plt
from gluoncv import model_zoo, data, utils
from gluoncv.data.transforms.pose import detector_to_simple_pose, heatmap_to_coord
import numpy as np
import cv2
import os
import math
import imutils
from check_pose import check_pose
from compress import compress


def dist(x1, y1, x2, y2):
    return np.sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
######################################################################
# Load a pretrained model
# -------------------------
#
# Let's get a Simple Pose model trained with input images of size 256x192 on MS COCO
# dataset. We pick the one using ResNet-18 V1b as the base model. By specifying
# ``pretrained=True``, it will automatically download the model from the model
# zoo if necessary. For more pretrained models, please refer to
# :doc:`../../model_zoo/index`.
#
# Note that a Simple Pose model takes a top-down strategy to estimate
# human pose in detected bounding boxes from an object detection model.

detector = model_zoo.get_model('yolo3_mobilenet1.0_coco', pretrained=True)
pose_net = model_zoo.get_model('simple_pose_resnet18_v1b', pretrained=True)

# Note that we can reset the classes of the detector to only include
# human, so that the NMS process is faster.

detector.reset_class(["person"], reuse_weights=['person'])

######################################################################
# Pre-process an image for detector, and make inference
# --------------------
#
# Next we download an image, and pre-process with preset data transforms. Here we
# specify that we resize the short edge of the image to 512 px. But you can
# feed an arbitrarily sized image.
#
# This function returns two results. The first is a NDArray with shape
# ``(batch_size, RGB_channels, height, width)``. It can be fed into the
# model directly. The second one contains the images in numpy format to
# easy to be plotted. Since we only loaded a single image, the first dimension
# of `x` is 1.
'''
im_fname = utils.download('https://github.com/dmlc/web-data/blob/master/' +
                          'gluoncv/pose/soccer.png?raw=true',
                          path='soccer.png')
'''

#path='/Users/seerking/Desktop/gluoncv/rotated'
path='D:\\gluoncv\\gluoncv\\gluon\\data\\compress'
imgname=os.listdir(path)

cnt = 0
##threshold = set difference between the max and the min distance between shouder and wrist
threshold = 80
prev = 0
cur = 0
max_val = 0
min_val = 0
##turn_flag == false表示从大到小， turn_flag == true表示从小到大
turn_flag = False
find_max = False
find_min = False
complete = True
pose = True

dist_store=[]
right_turn=1
for name in range(1,233):
    im_fname = path + '/' + str(name) + '.jpg'
    x, img = data.transforms.presets.ssd.load_test(im_fname, short=512)
    #print('Shape of pre-processed image:', x.shape)

    class_IDs, scores, bounding_boxs = detector(x)

    ######################################################################
    # Process tensor from detector to keypoint network
    # --------------------
    #
    # Next we process the output from the detector.
    #
    # For a Simple Pose network, it expects the input has the size 256x192,
    # and the human is centered. We crop the bounding boxed area
    # for each human, and resize it to 256x192, then finally normalize it.
    #
    # In order to make sure the bounding box has included the entire person,
    # we usually slightly upscale the box size.


    pose_input, upscale_bbox = detector_to_simple_pose(img, class_IDs, scores, bounding_boxs)

    ######################################################################
    # Predict with a Simple Pose network
    # --------------------
    #
    # Now we can make prediction.
    #
    # A Simple Pose network predicts the heatmap for each joint (i.e. keypoint).
    # After the inference we search for the highest value in the heatmap and map it to the
    # coordinates on the original image.
    if pose_input is not None:
        predicted_heatmap = pose_net(pose_input)
        pred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox)

        ######################################################################
        # Display the pose estimation results
        # ---------------------
        #
        # We can use :py:func:`gluoncv.utils.viz.plot_keypoints` to visualize the
        # results.


        pred_coords=pred_coords.asnumpy()
        res=check_pose(pred_coords, False)
        # res1 = check_pose(pred_coords, True)
        #
        # print(res[0], res[1])
        # print(res1[0], res1[1])
        #print(res)
        '''
        plt.axis([0,img.shape[1],img.shape[0],0])
        plt.scatter([x1,x2,x3,x4],[y1,y2,y3,y4])

        '''
        ax = utils.viz.plot_keypoints(img, pred_coords, confidence,
            class_IDs, bounding_boxs, scores,
            box_thresh=0.9, keypoint_thresh=0.2)

        ##scores threshold
        #print(im_fname)
        if((res[0]>0.8 or res[0]<-0.8) and (res[1]>0.9 or res[1]<-0.9)):
            eva='good'
        elif(res[1]>0.5 and res[1]<0.9):
            right_turn=0
            eva='knee too bent'
            pose = False
        elif(res[1]>0.8 or res[1]<-0.8):
            right_turn=0
            eva='buttocks too cocky'
            pose = False
        else:
            right_turn=0
            eva='unknown action'
            pose = False

        ##设置threshold——查看每次max和min之间的差值
        cur = dist(pred_coords[0,6,0], 3*pred_coords[0,6,1], pred_coords[0,10,0], 3*pred_coords[0,10,1])
        dist_store.append(cur)
        #print(pred_coords[0,6,0], 3*pred_coords[0,6,1], pred_coords[0,10,0], 3*pred_coords[0,10,1])
        # print(dist(pred_coords[0,5,0], 3*pred_coords[0,5,1], pred_coords[0,10,0], 3*pred_coords[0,10,1]))
        # if name == 1:
        #     max_val = prev = cur
        #     find_max = True
        # if math.fabs(prev - cur) > 15 and math.fabs(prev - cur) < 50:
        #     if find_max == True and find_min == True:
        #         if max_val - min_val > threshold:
        #         #else:
        #             if pose == True:
        #                 cnt = cnt + 1
        #         else:
        #             complete = False
        #         find_max = find_min = False
        #         pose = True
        #     else:
        #         if turn_flag == False:
        #             if cur > prev:
        #                 find_min = True
        #                 turn_flag = True
        #                 min_val = prev
        #                 #print(prev)
        #         else:
        #             if cur < prev:
        #                 find_max = True
        #                 turn_flag = False
        #                 max_val = prev
        #                 complete = True
                        #print(prev)
                    ##不然一直输出标准信息
                    ##plt.text(50,50, eva,color = "r",bbox = dict(facecolor = "r", alpha = 0.2))
        print(name)
        print(find_max,find_min)
        print(cur)
        print(res)

        if(find_max==False and find_min==False):
            right_turn=1
        print(right_turn)
        if(cur>250 and cur<300 and find_min==True):
            find_max=True
        elif(cur <200 and cur>100):
            find_min=True
        
        if(find_max and find_min):
            if (right_turn==1):
                cnt=cnt+1
            right_turn=1
            find_max=False
            find_min=False

        #prev = cur
        ##输出count，坐标位置需要修改
        ##plt.text(50,50, eva,color = "r",bbox = dict(facecolor = "r", alpha = 0.2))
        plt.text(50, 50, str(cnt), color="w", bbox=dict(facecolor="r", alpha=0.8),size=18)
        plt.text(100,50, eva,color = "w",bbox = dict(facecolor = "r", alpha = 0.8),size=18)

        #plt.text(200,300, str(turn_flag),color = "r",bbox = dict(facecolor = "r", alpha = 0.2))

        plt.gca().xaxis.set_major_locator(plt.NullLocator())

        plt.gca().yaxis.set_major_locator(plt.NullLocator())

        plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)

        plt.margins(0,0)

        plt.savefig('D:\\gluoncv\\gluoncv\\gluon\\data\\fuwo_test3/' + str(name) + '.jpg',dpi=100)


        plt.close()
        im = cv2.imread('D:\\gluoncv\\gluoncv\\gluon\\data\\fuwo_test3/' + str(name) + '.jpg')
        
        im = compress(im, 2.5)
        cv2.imwrite('D:\\gluoncv\\gluoncv\\gluon\\data\\fuwo_test3/' + str(name) + '.jpg', im)

# x=np.linspace(0, 1, len(dist_store))
# np.savetxt("filename.txt",dist_store)
# plt.plot(x,dist_store)
# plt.show()
