# python3
import cv2
import base64
from PIL import Image
from io import BytesIO
import requests
import json
import numpy as np
import threading
import time
import subprocess as sp
# 导入 数据库的工具
from conDatabase import dataUtils

shared_image = (np.ones((540, 960, 3), dtype=np.uint8) * 255).astype(np.uint8)
process_image = (np.ones((540, 960, 3), dtype=np.uint8) * 255).astype(np.uint8)
dataUtils = dataUtils()
in_num = 0
out_num = 0

# 左上角
# 右上角
# 左下角
# 右下角
# area 代表的是要检测的区域范围


# 检测函数。 得到的是一个编码后的图像， 然后返回检测后的图像和入站， 出站的人数。
def getResult(img):
    params = {
        "area": "1,1,     930,1,    930,350,     1,350",
        "case_id": 1,
        "case_init": "false",
        "dynamic": "true",
        "image": img,
        "show": "true", }
    # 请求参数

    request_url = "https://aip.baidubce.com/rest/2.0/image-classify/v1/body_tracking"
    access_token = '#################################################################'
    request_url = request_url + "?access_token=" + access_token
    headers = {'content-type': 'application/x-www-form-urlencoded'}

    res = requests.post(request_url, data=params, headers=headers)

    return res

# 定义CV线程， 提取视频流 抽帧, 返回抽帧后的 图像 shared_image 全局变量
class CvThread(threading.Thread):
    def __init__(self):
        super(CvThread, self).__init__()  # 注意：一定要显式的调用父类的初始化函数。
    def run(self):  # 定义每个线程要运行的函数
        print('CvThread thread is run!')
        global shared_image
        camera = cv2.VideoCapture(0)
        if (camera.isOpened()):
            print('Open camera 1')
        else:
            print('Fail to open camera 1!')
            time.sleep(0.05)
        camera.set(cv2.CAP_PROP_FRAME_WIDTH, 864)  # 2560x1920 2217x2217 2952×1944 1920x1080
        camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        camera.set(cv2.CAP_PROP_FPS, 5)  # 抽帧参数
        size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)), int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        sizeStr = str(960) + 'x' + str(540)
        fps = camera.get(cv2.CAP_PROP_FPS)  # 30p/self
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('res_mv.avi', fourcc, fps, size)
        while True:
            ret, frame = camera.read()  # 逐帧采集视频流
            if frame is not None:
                image = Image.fromarray(frame)
                image = image.resize((960, 540))
                frame = np.array(image)
                shared_image = frame

# 定义检测线程， 检测视频 得到 CvThread 返回的全局参数 shared_image，把shared_image 转为BASE64。
# 调用 getResult 调用 AI 接口 返回转化后的 图像
# 并且  process_image 赋值为 res ， 为检测后的图像
class BaiDuThread(threading.Thread):
    def __init__(self):
        super(BaiDuThread, self).__init__()  # 注意：一定要显式的调用父类的初始化函数。

    def run(self):  # 定义每个线程要运行的函数
        print('BaiDuThread thread is run!')
        global shared_image
        global process_image
        global in_num
        global out_num
        while True:
            # 把图片编码
            img = Image.fromarray(shared_image)  # 将每一帧转为Image
            output_buffer = BytesIO()  # 创建一个BytesIO
            img.save(output_buffer, format='JPEG')  # 写入output_buffer
            byte_data = output_buffer.getvalue()  # 在内存中读取
            base64_data = base64.b64encode(byte_data)  # 转为BASE64
            response = getResult(base64_data)
            json_str = json.dumps(response.json())
            if response:
                # 将 Python 字典类型转换为 JSON 对象
                # print(response.json())
                json_str = json.dumps(response.json())
                # 转化为 json字符串
                d = json.loads(json_str)
                person_count = d['person_count']
                in_num = person_count['in']
                out_num = person_count['out']
                print("in_num: ", in_num)
                print("out_num: ", out_num)
                # 再把加密后的结果解码， 结果为二进制数据
                temp = base64.b64decode(d['image'])
                # print(temp)
                # 二进制数据流转np.ndarray [np.uint8: 8位像素]
                img = cv2.imdecode(np.frombuffer(temp, np.uint8), cv2.IMREAD_COLOR)
                #   将bgr转为rbg
                rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                process_image = rgb_img
                time.sleep(0.05)

# 推流设置的一些参数
RTMP_HOST = '######'
rtmpUrl = 'rtmp://' + RTMP_HOST + ':1935/live/test'
command = ['ffmpeg',
    '-y',
    '-f', 'rawvideo',
    '-vcodec','rawvideo',
    '-pix_fmt', 'bgr24',
    '-s', '960x540',
    '-r', str(5),
    '-i', '-',
    '-c:v', 'libx264',
    '-pix_fmt', 'yuv420p',
    '-preset', 'ultrafast',
    '-f', 'flv',
    rtmpUrl]

global pipe
pipe = sp.Popen(command, stdin=sp.PIPE)

# 推流线程
# 拿到BaiDuThread 线程检测后的 process_image 图像，然后讲  process_image 推流
class PushThread(threading.Thread):
    def __init__(self):
        super(PushThread, self).__init__()  # 注意：一定要显式的调用父类的初始化函数。
        # self.arg=arg
    def run(self):  # 定义每个线程要运行的函数
        print('PushThread thread is run!')
        #   可以在这里推送当前的 in 、out 数
        global process_image
        url = "http://127.0.0.1:80/people"
        count = 0
        while True:
            pipe.stdin.write(process_image.tostring())  # 存入管道
            cv2.imwrite('1.jpg',process_image)
            param = {'inNum': str(in_num),'outNum': str(out_num) }
            count += 1
            if count % 25 == 0:
                try:
                    r = requests.post(url=url, data=param)
                except:
                    pass
            time.sleep(0.198)

# 定义CV线程， 提取视频流 抽帧, 返回抽帧后的 图像 shared_image 全局变量
CvThread = CvThread()
CvThread.start()
# 定义检测线程，检测视频 得到 CvThread 返回的全局参数 shared_image，把shared_image 转为BASE64。
BaiDuThread = BaiDuThread()
BaiDuThread.start()
# 推流线程
PushThread = PushThread()
PushThread.start()



