import argparse
import os
import os.path as osp
import time
import cv2
import numpy as np
import base64
import json
import asyncio
import websockets

from bytetrack.utils import fuse_model, get_model_info, postprocess
from bytetrack.utils.visualize import plot_tracking, subimg_corp, subimg_corp_expand
from bytetrack.tracker.byte_tracker import BYTETracker
from bytetrack.tracking_utils.timer import Timer

import sys
import subprocess as sp
import queue

# from yolov5_face.yolov5 import YoloV5
sys.path.append('/home/ubuntu/work/django_vue_demo/django-demo/yolov5_face/')
from yolov5 import YoloV5

IMAGE_EXT = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 95]

CVSHOW = False

pushurl = 'rtmp://10.82.14.248:1935/face/h264major'


def write_results(filename, results):
    save_format = '{frame},{id},{x1},{y1},{w},{h},{s},-1,-1,-1\n'
    with open(filename, 'w') as f:
        for frame_id, tlwhs, track_ids, scores in results:
            for tlwh, track_id, score in zip(tlwhs, track_ids, scores):
                if track_id < 0:
                    continue
                x1, y1, w, h = tlwh
                line = save_format.format(frame=frame_id, id=track_id, x1=round(
                    x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1), s=round(score, 2))
                f.write(line)


class SshaDetector(object):
    sys.path.append('/disk1/work/PycharmProjects/mxnet-ssha/')

    # from ssha_detector import SSHDetector

    def __init__(self):
        self.detector = SSHDetector(
            '/disk1/work/PycharmProjects/mxnet-ssha/model/symbol_ssh_hi3559a/symbol_ssh_hi3559a', 0)
        self.test_size = (720, 1280)

    def inference(self, img, timer):

        img_info = {"id": 0}
        if isinstance(img, str):
            img_info["file_name"] = osp.basename(img)
            img = cv2.imread(img)
        else:
            img_info["file_name"] = None

        height, width = img.shape[:2]
        img_info["height"] = height
        img_info["width"] = width
        img_info["raw_img"] = img

        faces = self.detector.detect(img, threshold=0.5)
        for num in range(faces.shape[0]):
            bbox = faces[num, 0:4]
            cv2.rectangle(img, (bbox[0], bbox[1]),
                          (bbox[2], bbox[3]), (0, 255, 0), 2)
            kpoint = faces[num, 5:15]
            for knum in range(5):
                cv2.circle(
                    img, (kpoint[2 * knum], kpoint[2 * knum + 1]), 1, [0, 0, 255], 2)
            cv2.putText(img, 'score: %.2f' % (faces[num, 4]),
                        (bbox[0], bbox[1]), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), thickness=1)

        # cv2.imwrite("res.jpg", img)
        outputs = list()
        outputs.append(faces[:, :5])
        return outputs, img_info


class Yolov5Detector(object):
    def __init__(self):
        self.detector = YoloV5()
        self.test_size = (720, 1280)

    def inference(self, img, timer):

        img_info = {"id": 0}
        if isinstance(img, str):
            img_info["file_name"] = osp.basename(img)
            img = cv2.imread(img)
        else:
            img_info["file_name"] = None

        height, width = img.shape[:2]
        img_info["height"] = height
        img_info["width"] = width
        img_info["raw_img"] = img

        faces = self.detector.detect(img, threshold=0.5)
        # for num in range(faces.shape[0]):
        #     bbox = faces[num, 0:4]
        #     bbox = list(map(int, bbox))
        #     cv2.rectangle(img, (bbox[0], bbox[1]),
        #                   (bbox[2], bbox[3]), (0, 255, 0), 2)
        #     kpoint = list(map(int, faces[num, 5:15]))
        #     for knum in range(5):
        #         cv2.circle(
        #             img, (kpoint[2 * knum], kpoint[2 * knum + 1]), 1, [0, 0, 255], 2)
        #     cv2.putText(img, 'score: %.2f' % (faces[num, 4]),
        #                 (bbox[0], bbox[1]), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), thickness=1)
        # cv2.imwrite("res.jpg", img)
        outputs = list()
        outputs.append(faces[:, :5])
        return outputs, img_info

class Control(object):
    def __init__(self):
        self.tracker = BYTETracker()
        self.predictor = Yolov5Detector()
        self.timer = Timer()
        self.frame_id = 0
        self.results = []

        self.track_id_imgs = {}
        self.track_id_scores = {}
        self.track_id_num = {}
        self.track_id_break_num = {}


        self.img_size = (1280, 720)
        self.aspect_ratio_thresh = 2
        self.min_box_area = 200
        self.subimg_queue = queue.Queue()

        # 跟踪优选
        ### 0: 表示定时推图
        ### 1: 表示id离开后推图
        self.snapshot_strategy = 1

        #### 定时优选, snapshot_strategy=0时生效
        self.snapshot_fix_time = 3.0
        self.snapshot_fix_frame = int(self.snapshot_fix_time * 25)

        #### 离开优选, snapshot_strategy=1时生效
        self.snapshot_leave_time = 5.0
        self.snapshot_leave_frame = int(self.snapshot_leave_time * 25)
    

    # 优选抓图
    def preferred(self, img_list, score_list):
        max_score = max(score_list)
        ind = score_list.index(max_score)
        img = img_list[ind]
        return img, max_score
        

    def imageflow(self, frame):
        self.timer.tic()
        # frame = cv2.resize(frame, self.img_size)
        self.img_size = (frame.shape[1], frame.shape[0])
        outputs, img_info = self.predictor.inference(frame, self.timer)
        if outputs[0] is not None:
            online_targets = self.tracker.update(
                outputs[0], [img_info['height'], img_info['width']], (self.img_size[1], self.img_size[0]))
            online_tlwhs = []
            online_ids = []
            online_scores = []
            for track_id in self.track_id_break_num.keys():
                self.track_id_num[track_id] += 1
                self.track_id_break_num[track_id] += 1
            for t in online_targets:
                tlwh = t.tlwh
                tid = t.track_id
                vertical = tlwh[3] / tlwh[2] > self.aspect_ratio_thresh
                if tlwh[2] * tlwh[3] > self.min_box_area and not vertical:
                    online_tlwhs.append(tlwh)
                    online_ids.append(tid)
                    online_scores.append(t.score)
                    self.results.append(
                        f"{self.frame_id},{tid},{tlwh[0]:.2f},{tlwh[1]:.2f},{tlwh[2]:.2f},{tlwh[3]:.2f},{t.score:.2f},-1,-1,-1\n"
                    )
                    sub_img = subimg_corp_expand(frame, tlwh)
                    new_sub_img = sub_img.copy()
                    if not tid in self.track_id_imgs.keys():
                        self.track_id_imgs[tid] = []
                        self.track_id_scores[tid] = []
                        self.track_id_num[tid] = 0
                        self.track_id_break_num[tid] = 0
                    self.track_id_imgs[tid].append(new_sub_img)
                    self.track_id_scores[tid].append(t.score)
                    self.track_id_break_num[tid] = 0
            for track_id in list(self.track_id_break_num.keys()):
                # 定时优选
                if self.snapshot_strategy == 0:
                    if self.track_id_num[track_id] % self.snapshot_fix_frame == 0:
                        if len(self.track_id_imgs[track_id]) > 0:
                            preferred_img, preferred_score = self.preferred(self.track_id_imgs[track_id], self.track_id_scores[track_id])
                            self.subimg_queue.put(preferred_img)
                            self.track_id_imgs[track_id] = []
                            self.track_id_scores[track_id] = []
                # 离开优选
                if self.track_id_break_num[track_id] >= self.snapshot_leave_frame:
                    if len(self.track_id_imgs[track_id]) > 0:
                        preferred_img, preferred_score = self.preferred(self.track_id_imgs[track_id], self.track_id_scores[track_id])
                        self.subimg_queue.put(preferred_img)
                    self.track_id_imgs.pop(track_id)
                    self.track_id_scores.pop(track_id)
                    self.track_id_num.pop(track_id)
                    self.track_id_break_num.pop(track_id)
           
            self.timer.toc()
            online_im = plot_tracking(
                img_info['raw_img'], online_tlwhs, online_ids, frame_id=self.frame_id + 1, fps=1. / self.timer.average_time
            )
        else:
            self.timer.toc()
            online_im = img_info['raw_img']
        self.frame_id += 1
        return online_im

