from flask import Flask, jsonify, send_file, Response



import logging

import cv2

import numpy as np

import tensorflow as tf

import os

import json

from tf_pose.estimator import TfPoseEstimator

from tf_pose.networks import get_graph_path, model_wh

from tf_pose import common

from pose_estimate import PoseEstimate

app = Flask(__name__)

@app.route('/tfposecamera')

def supvisedcamera():

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"

    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    resolution = "432x368"

    # model = "mobilenet_thin"

    model = "cmu"

    g1 = tf.Graph()

    g2 = tf.Graph()

    logger = logging.getLogger('TfPoseEstimator-Video')

    logger.setLevel(logging.DEBUG)

    ch = logging.StreamHandler()

    ch.setLevel(logging.DEBUG)

    formatter = logging.Formatter(

        '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')

    ch.setFormatter(formatter)

    logger.addHandler(ch)

    logger.debug('initialization %s : %s' % (model, get_graph_path(model)))

    w, h = model_wh(resolution)

    config = tf.ConfigProto(log_device_placement=True)

    with g1.as_default():

        if w == 0 or h == 0:

            e = TfPoseEstimator(get_graph_path(model),

                                target_size=(432, 368),

                                tf_config=config)

        else:

            e = TfPoseEstimator(get_graph_path(model),

                                target_size=(w, h),

                                tf_config=config)

    with g2.as_default():

        pe = PoseEstimate("./feed_lstm_attention-fwcce-1/model")

    # camera = cv2.VideoCapture(0) # 参数0表示第一个摄像头

    videoCapture = cv2.VideoCapture(0)  # 从文件读取视频

    # 获得码率及尺寸

    fps = videoCapture.get(cv2.CAP_PROP_FPS)

    # size = (int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)))

    # size = (int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)),

    # int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    size = (1920, 1080)

    print(fps)

    print(size)

    video_writer_1080 = cv2.VideoWriter(

        './video/resultVideo_1080.mp4', cv2.VideoWriter_fourcc('M', 'P', '4', '2'), fps, size)

    video_writer_720 = cv2.VideoWriter(

        './video/resultVideo_720.mp4', cv2.VideoWriter_fourcc('M', 'P', '4', '2'), fps, (720, 405))

    c = 0

    timeF = 10

    # 数据序列

    result = 0

    data_sequence = []

    def generate():

        while videoCapture.isOpened():

            if not grabbed:

                break

                # 将图像编码为 JPEG 格式并通过网络传输

            ret, buffer = cv2.imencode('.jpg', frame_lwpCV)

            if not ret:

                break

            key = cv2.waitKey(1)  # 用于处理视频或实时摄像头捕获的帧

            if key == 27:

                break

            yield (b'--frame_lwpCV\r\n'

                   b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() + b'\r\n')

    c = c + 1

    grabbed, frame_lwpCV = videoCapture.read()  # 逐帧采集视频流

    # if not grabbed:

    #     break

    # 改变视频分辨率 1080 675 (8:5)

    res = cv2.resize(frame_lwpCV, (760, 480), interpolation=cv2.INTER_CUBIC)

    # 裁剪图片

    humans = e.inference(frame_lwpCV,

                         resize_to_default=(w > 0 and h > 0),

                         upsample_size=4.0)

    frame_lwpCV = TfPoseEstimator.draw_humans(frame_lwpCV, humans)

    centers = {}

    flat = [0.0 for i in range(36)]

    image_h, image_w = frame_lwpCV.shape[:2]

    for human in humans:

        # draw point

        for i in range(common.CocoPart.Background.value):

            if i not in human.body_parts.keys():

                continue

            body_part = human.body_parts[i]

            center = (int(body_part.x * image_w), int(body_part.y * image_h))

            centers[i] = center

            flat[i * 2] = center[0]

            flat[i * 2 + 1] = center[1]

            cv2.circle(frame_lwpCV, center, 3, common.CocoColors[i], thickness=3, lineType=8, shift=0)

            # draw x,y

            text = "X:" + str(center[0]) + "Y:" + str(center[1])

            cv2.putText(frame_lwpCV, text, (center[0], center[1]), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255),

                        thickness=1)

    if c % timeF == 0:

        if len(humans) > 0:

            fame_feature = []

            human_body = humans[0].body_parts

            for i in range(17):

                body_feature = []

                try:

                    body_feature.append(human_body[i].part_idx)

                    body_feature.append(human_body[i].x)

                    body_feature.append(human_body[i].y)

                    body_feature.append(human_body[i].score)

                except:

                    body_feature.append(i)

                    body_feature.append(0)

                    body_feature.append(0)

                    body_feature.append(0)

                fame_feature.append(body_feature)

            arr = np.array(fame_feature)

            arr = np.reshape(arr, [-1])

            data_sequence.append(arr)

    for pair_order, pair in enumerate(common.CocoPairsRender):  # draw lines

        if pair[0] not in human.body_parts.keys() or pair[1] not in human.body_parts.keys():

            continue

    if len(data_sequence) == 10:

        input_data = np.array(data_sequence)

        input_data = input_data.reshape([1, 10, 68])

        pre = pe.estimate(input_data)

        print(pre)

        result = pre[0][0][1]

        data_sequence = []

    cv2.putText(frame_lwpCV, "%.4f %s" % (result, "standard"), (0, 50),

                cv2.FONT_HERSHEY_PLAIN, 3.5, (0, 0, 255), 3)

    frame_lwpCV = cv2.resize(frame_lwpCV, size)

    print("已完成 %d 帧 " % c)

    return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame_lwpCV')

# app.run 启动接口

# https://github.com/Ailln/python-api-template

# app.run 启动接口

if __name__ == "__main__":

    app.run(host="0.0.0.0", port=10001, debug=True)