import argparse
import os
import os.path as osp
import time
import cv2
import torch
import numpy as np
import base64
import json
import asyncio
import websockets

from bytetrack.utils import fuse_model, get_model_info, postprocess
from bytetrack.utils.visualize import plot_tracking, subimg_corp, subimg_corp_expand
from bytetrack.tracker.byte_tracker import BYTETracker
from bytetrack.tracking_utils.timer import Timer

import sys
import subprocess as sp

sys.path.append('/disk1/chengw/django_vue_demo/django-demo/yolov5_face/')
from yolov5 import YoloV5

IMAGE_EXT = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 95]

CVSHOW = False

pushurl = 'rtmp://10.82.14.164:1935/face/h264major'

class PushStream(object):
    def __init__(self, pushurl = "rtmp://127.0.0.1:1935/"):
        self.command = ""
        # 自行设置
        self.pushurl = pushurl

    def initcmd(self, fps, width, height):
        self.command = ['ffmpeg',
                '-y',
                '-f', 'rawvideo',
                '-vcodec', 'rawvideo',
                '-pix_fmt', 'bgr24',
                '-s', "{}x{}".format(width, height),
                '-r', str(fps),
                '-i', '-',
                '-c:v', 'libx264',
                '-pix_fmt', 'yuv420p',
                '-preset', 'ultrafast',
                '-f', 'flv',
                self.pushurl]
        self.pipe = sp.Popen(self.command, stdin=sp.PIPE)


def write_results(filename, results):
    save_format = '{frame},{id},{x1},{y1},{w},{h},{s},-1,-1,-1\n'
    with open(filename, 'w') as f:
        for frame_id, tlwhs, track_ids, scores in results:
            for tlwh, track_id, score in zip(tlwhs, track_ids, scores):
                if track_id < 0:
                    continue
                x1, y1, w, h = tlwh
                line = save_format.format(frame=frame_id, id=track_id, x1=round(
                    x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1), s=round(score, 2))
                f.write(line)


class SshaDetector(object):
    sys.path.append('/disk1/work/PycharmProjects/mxnet-ssha/')

    # from ssha_detector import SSHDetector

    def __init__(self):
        self.detector = SSHDetector(
            '/disk1/work/PycharmProjects/mxnet-ssha/model/symbol_ssh_hi3559a/symbol_ssh_hi3559a', 0)
        self.test_size = (720, 1280)

    def inference(self, img, timer):

        img_info = {"id": 0}
        if isinstance(img, str):
            img_info["file_name"] = osp.basename(img)
            img = cv2.imread(img)
        else:
            img_info["file_name"] = None

        height, width = img.shape[:2]
        img_info["height"] = height
        img_info["width"] = width
        img_info["raw_img"] = img

        faces = self.detector.detect(img, threshold=0.5)
        for num in range(faces.shape[0]):
            bbox = faces[num, 0:4]
            cv2.rectangle(img, (bbox[0], bbox[1]),
                          (bbox[2], bbox[3]), (0, 255, 0), 2)
            kpoint = faces[num, 5:15]
            for knum in range(5):
                cv2.circle(
                    img, (kpoint[2 * knum], kpoint[2 * knum + 1]), 1, [0, 0, 255], 2)
            cv2.putText(img, 'score: %.2f' % (faces[num, 4]),
                        (bbox[0], bbox[1]), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), thickness=1)

        # cv2.imwrite("res.jpg", img)
        outputs = list()
        outputs.append(faces[:, :5])
        return outputs, img_info


class Yolov5Detector(object):
    def __init__(self):
        self.detector = YoloV5()
        self.test_size = (720, 1280)

    def inference(self, img, timer):

        img_info = {"id": 0}
        if isinstance(img, str):
            img_info["file_name"] = osp.basename(img)
            img = cv2.imread(img)
        else:
            img_info["file_name"] = None

        height, width = img.shape[:2]
        img_info["height"] = height
        img_info["width"] = width
        img_info["raw_img"] = img

        faces = self.detector.detect(img, threshold=0.5)
        # for num in range(faces.shape[0]):
        #     bbox = faces[num, 0:4]
        #     bbox = list(map(int, bbox))
        #     cv2.rectangle(img, (bbox[0], bbox[1]),
        #                   (bbox[2], bbox[3]), (0, 255, 0), 2)
        #     kpoint = list(map(int, faces[num, 5:15]))
        #     for knum in range(5):
        #         cv2.circle(
        #             img, (kpoint[2 * knum], kpoint[2 * knum + 1]), 1, [0, 0, 255], 2)
        #     cv2.putText(img, 'score: %.2f' % (faces[num, 4]),
        #                 (bbox[0], bbox[1]), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), thickness=1)

        # cv2.imwrite("res.jpg", img)
        outputs = list()
        outputs.append(faces[:, :5])
        return outputs, img_info

async def videoflow_demo(websocket, predictor):
    img_size = (960, 540)
    aspect_ratio_thresh = 2
    min_box_area = 200
    # cap = cv2.VideoCapture(r'/disk1/test_video/264a.mp4')
    if CVSHOW:
        cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE)
    cap = cv2.VideoCapture(r'rtsp://admin:Admin123@10.82.14.202/1/h264major')
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))  # float to int
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # float to int
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    # img_size = (width, height)
    print('img_size:', img_size)
    print('fps:', fps)

    if len(pushurl)>0:
        pushstream = PushStream(pushurl)
        pushstream.initcmd(fps, img_size[0], img_size[1])

    # vid_writer = cv2.VideoWriter(
    #     "bytetrack_res.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, img_size)

    tracker = BYTETracker()
    timer = Timer()
    frame_id = 0
    results = []

    while True:
        ret_val, frame = cap.read()
        frame = cv2.resize(frame, img_size)
        if ret_val:
            outputs, img_info = predictor.inference(frame, timer)
            if outputs[0] is not None:
                online_targets = tracker.update(
                    outputs[0], [img_info['height'], img_info['width']], (img_size[1], img_size[0]))
                online_tlwhs = []
                online_ids = []
                online_scores = []
                for t in online_targets:
                    tlwh = t.tlwh
                    tid = t.track_id
                    vertical = tlwh[3] / tlwh[2] > aspect_ratio_thresh
                    if tlwh[2] * tlwh[3] > min_box_area and not vertical:
                        online_tlwhs.append(tlwh)
                        online_ids.append(tid)
                        online_scores.append(t.score)
                        results.append(
                            f"{frame_id},{tid},{tlwh[0]:.2f},{tlwh[1]:.2f},{tlwh[2]:.2f},{tlwh[3]:.2f},{t.score:.2f},-1,-1,-1\n"
                        )
                timer.toc()
                online_im = plot_tracking(
                    img_info['raw_img'], online_tlwhs, online_ids, frame_id=frame_id + 1, fps=1. / timer.average_time
                )
                result, imgencode = cv2.imencode(
                    '.jpg', online_im, encode_param)
                data = np.array(imgencode)
                img = data.tobytes()
                # base64编码传输
                img = base64.b64encode(img).decode()
                # await websocket.send("data:image/jpg;base64," + img)
            else:
                timer.toc()
                online_im = img_info['raw_img']
            # vid_writer.write(online_im)
            if len(pushurl)>0:
                pushstream.pipe.stdin.write(online_im.tobytes())
            if CVSHOW:
                cv2.imshow("Remote", online_im)
                cv2.waitKey(1)
        else:
            break
        frame_id += 1

async def imageflow_demo(websocket, predictor):
    img_size = (1280, 720)
    aspect_ratio_thresh = 2
    min_box_area = 200
    # cap = cv2.VideoCapture(r'/disk1/test_video/264a.mp4')
    if CVSHOW:
        cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE)
    cap = cv2.VideoCapture(r'rtsp://admin:Admin123@10.82.14.202/1/h264major')
    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)  # float
    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float
    fps = cap.get(cv2.CAP_PROP_FPS)

    # vid_writer = cv2.VideoWriter(
    #     "bytetrack_res.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, img_size)

    tracker = BYTETracker()
    timer = Timer()
    frame_id = 0
    results = []
    res_id_subimg = {}
    res_id_score = {}
    res_id_untrack_num = {}
    res_id_track_num = {}

    while True:
        ret_val, frame = cap.read()
        frame = cv2.resize(frame, img_size)
        if ret_val:
            outputs, img_info = predictor.inference(frame, timer)
            if outputs[0] is not None:
                online_targets = tracker.update(
                    outputs[0], [img_info['height'], img_info['width']], (img_size[1], img_size[0]))
                online_tlwhs = []
                online_ids = []
                online_scores = []
                for res_id in res_id_untrack_num.keys():
                    res_id_untrack_num[res_id] += 1
                for t in online_targets:
                    tlwh = t.tlwh
                    tid = t.track_id
                    vertical = tlwh[3] / tlwh[2] > aspect_ratio_thresh
                    if tlwh[2] * tlwh[3] > min_box_area and not vertical:
                        online_tlwhs.append(tlwh)
                        online_ids.append(tid)
                        online_scores.append(t.score)
                        results.append(
                            f"{frame_id},{tid},{tlwh[0]:.2f},{tlwh[1]:.2f},{tlwh[2]:.2f},{tlwh[3]:.2f},{t.score:.2f},-1,-1,-1\n"
                        )
                        sub_img = subimg_corp_expand(frame, tlwh)
                        if tid in res_id_subimg.keys():
                            res_id_subimg[tid].append(sub_img)
                            res_id_score[tid].append(t.score)
                            res_id_track_num[tid] += 1

                        else:
                            res_id_subimg[tid] = []
                            res_id_subimg[tid].append(sub_img)
                            res_id_score[tid] = []
                            res_id_score[tid].append(t.score)
                            res_id_track_num[tid] = 0
                        res_id_untrack_num[tid] = 0
                res_id_list = list(res_id_untrack_num.keys())
                for res_id in res_id_list:
                    if res_id_untrack_num[res_id] > 30:
                        id_best_score_list = res_id_score[res_id]

                        sub_img = res_id_subimg[res_id][id_best_score_list.index(
                            max(id_best_score_list))]
                        if (sub_img is None):
                            continue
                        result, imgencode = cv2.imencode(
                            '.jpg', sub_img, encode_param)
                        data = np.array(imgencode)
                        img = data.tobytes()
                        # base64编码传输
                        img = base64.b64encode(img).decode()
                        # await websocket.send("data:image/jpg;base64," + img)
                        await websocket.send(img)
                        res_id_untrack_num.pop(res_id)
                        res_id_subimg.pop(res_id)
                        res_id_score.pop(res_id)
                        res_id_track_num.pop(res_id)
                res_id_list = list(res_id_track_num.keys())
                for res_id in res_id_list:
                    if res_id_track_num[res_id] > 200:
                        id_best_score_list = res_id_score[res_id]
                        sub_img = res_id_subimg[res_id][id_best_score_list.index(
                            max(id_best_score_list))]
                        result, imgencode = cv2.imencode(
                            '.jpg', sub_img, encode_param)
                        data = np.array(imgencode)
                        img = data.tobytes()
                        # base64编码传输
                        img = base64.b64encode(img).decode()
                        await websocket.send("data:image/jpg;base64," + img)
                        # await websocket.send(img)
                        res_id_untrack_num.pop(res_id)
                        res_id_subimg.pop(res_id)
                        res_id_score.pop(res_id)
                        res_id_track_num.pop(res_id)
                timer.toc()
                online_im = plot_tracking(
                    img_info['raw_img'], online_tlwhs, online_ids, frame_id=frame_id + 1, fps=1. / timer.average_time
                )
            else:
                timer.toc()
                online_im = img_info['raw_img']
            # vid_writer.write(online_im)
            if CVSHOW:
                cv2.imshow("Remote", online_im)
                cv2.waitKey(1)
        else:
            break
        frame_id += 1


async def main_logic():
    async with websockets.connect('ws://10.82.14.162:8000/ws/video/wms/', ping_interval=None) as websocket:
        predictor = Yolov5Detector()
        await videoflow_demo(websocket, predictor)


if __name__ == "__main__":
    asyncio.get_event_loop().run_until_complete(main_logic())
