import logging

import cv2
import numpy as np
import tensorflow as tf
import os

from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
from tf_pose import common
from pose_estimate import PoseEstimate

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

resolution = "432x368"
# model = "mobilenet_thin"
model = "cmu"

g1 = tf.Graph()
g2 = tf.Graph()

logger = logging.getLogger('TfPoseEstimator-Video')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
    '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)

logger.debug('initialization %s : %s' % (model, get_graph_path(model)))
w, h = model_wh(resolution)

config = tf.ConfigProto(log_device_placement=True)

with g1.as_default():
    if w == 0 or h == 0:
        e = TfPoseEstimator(get_graph_path(model),
                            target_size=(432, 368),
                            tf_config=config)
    else:
        e = TfPoseEstimator(get_graph_path(model),
                            target_size=(w, h),
                            tf_config=config)
with g2.as_default():
    pe = PoseEstimate("./feed_lstm_attention-fwcce-1/model")

# camera = cv2.VideoCapture(0) # 参数0表示第一个摄像头
videoCapture = cv2.VideoCapture(0)  # 从文件读取视频
# 获得码率及尺寸
fps = videoCapture.get(cv2.CAP_PROP_FPS)
# size = (int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)))
# size = (int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)),
# int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
size = (1920, 1080)
print(fps)
print(size)
video_writer_1080 = cv2.VideoWriter(
    './video/resultVideo_1080.mp4', cv2.VideoWriter_fourcc('M', 'P', '4', '2'), fps, size)
video_writer_720 = cv2.VideoWriter(
    './video/resultVideo_720.mp4', cv2.VideoWriter_fourcc('M', 'P', '4', '2'), fps, (720, 405))

c = 0
timeF = 10
# 数据序列
result = 0
data_sequence = []
while videoCapture.isOpened():
    c = c + 1
    grabbed, frame_lwpCV = videoCapture.read()# 逐帧采集视频流
    frame111 = frame_lwpCV
    if not grabbed:
        break
    # 改变视频分辨率 1080 675 (8:5)
    res = cv2.resize(frame_lwpCV, (760, 480), interpolation=cv2.INTER_CUBIC)
    # 裁剪图片

    humans = e.inference(frame_lwpCV,
                         resize_to_default=(w > 0 and h > 0),
                         upsample_size=4.0)
    frame_lwpCV = TfPoseEstimator.draw_humans(frame_lwpCV, humans)
    centers = {}
    flat = [0.0 for i in range(36)]
    image_h, image_w = frame111.shape[:2]
    for human in humans:
        # draw point
        for i in range(common.CocoPart.Background.value):
            if i not in human.body_parts.keys():
                continue
            body_part = human.body_parts[i]
            center = (int(body_part.x * image_w), int(body_part.y * image_h))
            centers[i] = center
            flat[i * 2] = center[0]
            flat[i * 2 + 1] = center[1]
            cv2.circle(frame111, center, 3, common.CocoColors[i], thickness=3, lineType=8, shift=0)
            # draw x,y
            text = "X:" + str(center[0]) + "Y:" + str(center[1])
            cv2.putText(frame111, text, (center[0], center[1]), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255),
                        thickness=1)
    if c % timeF == 0:

        if len(humans) > 0:
            fame_feature = []
            human_body = humans[0].body_parts
            for i in range(17):
                body_feature = []
                try:
                    body_feature.append(human_body[i].part_idx)
                    body_feature.append(human_body[i].x)
                    body_feature.append(human_body[i].y)
                    body_feature.append(human_body[i].score)
                except:
                    body_feature.append(i)
                    body_feature.append(0)
                    body_feature.append(0)
                    body_feature.append(0)
                fame_feature.append(body_feature)
            arr = np.array(fame_feature)
            arr = np.reshape(arr, [-1])
            data_sequence.append(arr)

    if len(data_sequence) == 10:
        input_data = np.array(data_sequence)
        input_data = input_data.reshape([1, 10, 68])

        pre = pe.estimate(input_data)
        print(pre)
        result = pre[0][0][1]
        data_sequence = []
    cv2.putText(frame_lwpCV, "%.4f %s" % (result, "standard"), (0, 50),
                cv2.FONT_HERSHEY_PLAIN, 3.5, (0, 0, 255), 3)
    # cv2.putText(res, "%.4f %s" % (acc[1], dict[result[1]]), (x0, y0-40), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0))
    # cv2.putText(res, "%.4f %s" % (acc[2], dict[result[2]]), (x0, y0-20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0))
    # cv2.rectangle(res, (x0, y0), (x1, y1), (0, 255, 0), 2)
    # cv2.imshow('lwpCVWindow', res)

    # key = cv2.waitKey(int(600 / int(fps))) & 0xFF

    # key = cv2.waitKey(1) & 0xFF
    # if key == ord('q'):
    #     break
    # 输出1080p
    frame_lwpCV = cv2.resize(frame_lwpCV, size)
    video_writer_1080.write(frame_lwpCV)
    cv2.imshow("FOLLOWME", frame_lwpCV)
    # 输出720p
    frame2 = cv2.resize(frame_lwpCV, (720, 405))
    # video_writer_720.write(frame2)
    print("已完成 %d 帧 " % c)
# reg.close()
video_writer_1080.release()
video_writer_720.release()
videoCapture.release()
cv2.destroyAllWindows()
