#!/usr/bin/python3
# -*- coding: utf-8 -*-

# @file repnet_tf_inference.py
# @brief
# @author QRS
# @version 1.0
# @date 2021-05-26 17:48

import argparse
import time, os, json
import zmq
import shutil
import numpy as np
import cv2
import traceback
import pickle
import tempfile

import matplotlib.pyplot as plt
# import scipy.stats as stats
import io
import requests
import tensorflow_probability as tfp

from collections import Counter
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist # noqa
from matplotlib.colors import LogNorm
from utils import get_model, read_video, cal_rect_points
from repnet import get_counts, get_sims

from omegaconf import OmegaConf
from frepai.utils.logger import (frep_set_loglevel, frep_set_logfile, Logger)
from frepai.utils.misc import ( # noqa
        frep_oss_client,
        frep_object_put,
        frep_object_put_jsonconfig,
        frep_object_remove,
        frep_report_result,
        frep_data)

frep_set_loglevel('info')
frep_set_logfile('/tmp/frepai-repnet_tf.log')

context = zmq.Context()
zmqsub = context.socket(zmq.SUB)
zmqsub.connect('tcp://{}:{}'.format('0.0.0.0', 5555))

from sklearn.decomposition import PCA
from sklearn import preprocessing


osscli = frep_oss_client()

parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--path", default='', type=str, help="Input video file path or root.")
parser.add_argument("--topic", type=str, default="a.b.c", help="topic")
parser.add_argument("--out", default="/tmp/export", help="Output video file path or root")
parser.add_argument("--ckpt", default="/tmp/weights", type=str, help="Checkpoint weights root.")
main_args = parser.parse_args()

ffmpeg_args = '-preset ultrafast -vcodec libx264 -pix_fmt yuv420p'

input_width = 112
input_height = 112

pcaks_ckpts_path = '/ckpts/pcaks'


def _report_result(msgkey, resdata, errcode=0, errtxt=None):
    if errcode < 0:
        resdata['errno'] = errcode
        if errtxt:
            resdata['errtxt'] = errtxt
        resdata['progress'] = 100
    frep_report_result(msgkey, resdata)
    if msgkey[:2] == 'nb':
        frep_report_result('zmp_run', f'{main_args.topic}_{msgkey}:10')
    else:
        frep_report_result('zmp_run', f'{main_args.topic}:10')


def _detect_focus(msgkey, vfile, retrieve_count, conf_thresh, iou_thresh, box_size, progress_cb):
    msgkey = msgkey + '.det'
    reqdata = '''{
        "task": "zmq.yolov5.ladder.l.inference",
        "cfg": {
            "pigeon": {
                "msgkey": "%s",
                "user": "private",
                "uuid": "repnet_tf"
            },
            "data": {
                "class_name": "frepai.data.process.VideoDataLoader",
                "params": {
                    "data_source": "%s",
                    "dataset": {
                        "class_name": "frepai.data.VideoFramesDataset",
                        "params": {
                            "retrieve_count": %d
                        }
                    }
                }
            },
            "nms":{
                "conf_thres": %f,
                "iou_thres": %f
            }
        }
    }''' % (msgkey, vfile, retrieve_count, conf_thresh, iou_thresh)
    RACEURL = 'http://0.0.0.0:9119'
    API_INFERENCE = f'{RACEURL}/frepai/framework/inference'
    API_POPMSG = f'{RACEURL}/frepai/private/popmsg'
    json.loads(requests.get(url=f'{API_POPMSG}?key={msgkey}').text)
    json.loads(requests.post(url=API_INFERENCE, json=eval(reqdata)).text)
    for i in range(200):
        resdata = json.loads(requests.get(url=f'{API_POPMSG}?key={msgkey}').text)
        for item in resdata:
            detinfo = {
                'focus_skewing': True,
            }
            centers = []
            detect_box = {}
            for result in item['result']:
                frame_idx = int(result['image_path'].split('.')[0])
                imagew = result['image_width']
                imageh = result['image_height']
                box = result['predict_box']
                if len(box) > 0:
                    detect_box[frame_idx] = box[-1]
                    xyxy = box[-1]['xyxy'] # last is the max confidence score
                    centers.append((int(0.5 * (xyxy[0] + xyxy[2])), int(0.5 * (xyxy[1] + xyxy[3]))))
            centers = np.array(centers)
            if len(centers) < 0.2 * retrieve_count:
                Logger.warning('not detect focus box')
                return None
            detinfo['valid_count'] = len(centers)
            half_cnt = int(0.5 * len(centers))
            model = KMeans(n_clusters=3)
            model.fit_predict(centers[:half_cnt])
            centroid1 = model.cluster_centers_
            model.fit_predict(centers[half_cnt:])
            centroid2 = model.cluster_centers_

            # Logger.info(f'{centroid1} vs {centroid2}')

            dist = np.linalg.norm(centroid1.mean(axis=0) - centroid2.mean(axis=0))
            if dist > min(50, 0.5 * max(box_size)):
                detinfo['centroid_dist'] = dist
            else:
                detinfo['focus_skewing'] = False

            clusters = model.fit_predict(centers)
            most_idx = Counter(clusters).most_common()[0][0]
            center = [int(x) for x in model.cluster_centers_[most_idx]]

            detinfo['focus_box'] = [
                min(max(center[0] - box_size[0], 0), imagew - box_size[0]),
                min(max(center[1] - box_size[1], 0), imageh - box_size[1]),
                min(center[0] + box_size[0], imagew),
                min(center[1] + box_size[1], imageh)]
            Logger.info(detinfo)
            detinfo['detect_box'] = detect_box
            return detinfo
        progress_cb(100 * min(float(i * 15) / retrieve_count, 1))
        time.sleep(1)
    return None


def _denormal_image(x):
    x -= x.mean()
    x /= x.std()
    x *= 64
    x += 128
    x = np.clip(x, 0, 255).astype('uint8')
    return x


def draw_osd_sim(sim, size=128):
    fig, ax = plt.subplots()
    plt.axis('off')
    fig.set_size_inches(size / 100.0, size / 100.0)
    plt.gca().xaxis.set_major_locator(plt.NullLocator())
    plt.gca().yaxis.set_major_locator(plt.NullLocator())
    plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
    plt.margins(0,0)
    plt.imshow(sim, cmap='hot', interpolation='nearest', norm=LogNorm())
    with io.BytesIO() as fw:
        plt.savefig(fw, dpi=100.0, bbox_inches=0)
        buffer_ = np.frombuffer(fw.getvalue(), dtype=np.uint8)
        plt.close()
        return cv2.imdecode(buffer_, cv2.IMREAD_COLOR)
    raise


def draw_osd_feat(feat, X, Y, width, height):
    W, H, _ = feat.shape
    osd_feat = np.zeros((H * Y, W * X))
    fk = 0
    for hi in range(Y):
        for wj in range(X):
            fmap = _denormal_image(feat[:, :, fk])
            osd_feat[hi * H:(hi + 1) * H, wj * W:(wj + 1) * W] = fmap
            fk += 1
    fig, ax = plt.subplots()
    plt.axis('off')
    fig.set_size_inches(width / 100.0, height / 100.0)
    plt.gca().xaxis.set_major_locator(plt.NullLocator())
    plt.gca().yaxis.set_major_locator(plt.NullLocator())
    plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
    plt.margins(0,0)
    plt.imshow(osd_feat, cmap='viridis', interpolation='nearest')
    with io.BytesIO() as fw:
        plt.savefig(fw, dpi=100.0, bbox_inches=0)
        buffer_ = np.frombuffer(fw.getvalue(), dtype=np.uint8)
        plt.close()
        return cv2.imdecode(buffer_, cv2.IMREAD_COLOR)
    raise


def draw_hist_density(x, bins, width, height):
    fig, ax = plt.subplots()
    fig.set_size_inches(width / 100.0, height / 100.0)
    plt.axis('off')
    plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
    plt.margins(0,0)
    # density = stats.gaussian_kde(x)
    # plt.hist(x, bins=bins, histtype=u'step', density=True)
    # plt.plot(x, density(x))
    plt.hist(x, bins=bins, histtype=u'step', color='blue')
    with io.BytesIO() as fw:
        plt.savefig(fw, dpi=100.0, bbox_inches=0)
        buffer_ = np.frombuffer(fw.getvalue(), dtype=np.uint8)
        plt.close()
        return cv2.imdecode(buffer_, cv2.IMREAD_COLOR)
    raise


def inference(model, opt, resdata):
    msgkey = opt.pigeon.msgkey
    best_stride_video = False
    user_code = 'unkown'
    args = {
        'rmstill_frame_enable': True,
        'color_tracker_enable': False
    }
    if 'user_code' in opt.pigeon:
        user_code = opt.pigeon.user_code
    save_video = False
    if 'save_video' in opt:
        save_video = opt.save_video
    best_stride_video = False
    if 'best_stride_video' in opt:
        best_stride_video = opt.best_stride_video
    osd_feat, osd_sims = False, False
    if best_stride_video or save_video:
        if 'osd_feat' in opt:
            osd_feat = opt.osd_feat
        if 'osd_sims' in opt:
            osd_sims = opt.osd_sims
    batch_size = 1
    if 'batch_size' in opt:
        batch_size = opt.batch_size
    in_threshold = 0.5
    if 'in_threshold' in opt:
        in_threshold = opt.in_threshold
    tsm_last_enable, tsm_last_threshold, tsm_last_smooth = True, 0.5, False
    if 'tsm_last_enable' in opt:
        tsm_last_enable = opt.tsm_last_enable
    if tsm_last_enable:
        if 'tsm_last_threshold' in opt:
            tsm_last_threshold = opt.tsm_last_threshold
        if 'tsm_last_smooth' in opt:
            tsm_last_smooth = opt.tsm_last_smooth

    strides = [1, 3]
    if 'strides' in opt:
        strides = list(opt.strides)
    constant_speed = False
    if 'constant_speed' in opt:
        constant_speed = opt.constant_speed
    median_filter = True
    if 'median_filter' in opt:
        median_filter = opt.median_filter
    temperature = 13.544
    if 'temperature' in opt:
        temperature = opt.temperature
    model.temperature = temperature
    embs_filter_path = None
    if 'ef_is_send' in opt and opt.ef_is_send:
        embs_filter_path = opt.ef_url
    angle = None
    if 'angle' in opt and opt['angle'] != 0:
        angle = opt.angle
    reg_factor = None
    if 'reg_factor' in opt and opt['reg_factor'] != 1:
        reg_factor = opt['reg_factor']
    if 'rmstill_frame_enable' in opt:
        args['rmstill_frame_enable'] = opt.rmstill_frame_enable
    if args['rmstill_frame_enable']:
        args['rmstill_rate_threshold'] = opt.rmstill_rate_threshold
        args['rmstill_bin_threshold'] = opt.rmstill_bin_threshold
        args['rmstill_brightness_norm'] = opt.rmstill_brightness_norm
        args['rmstill_area_mode'] = opt.rmstill_area_mode
        args['rmstill_noise_level'] = opt.rmstill_noise_level
    else:
        if 'color_tracker_enable' in opt:
            args['color_tracker_enable'] = opt.color_tracker_enable
        if args['color_tracker_enable']:
            args['color_select'] = opt.color_select
            args['color_rate_threshold'] = opt.color_rate_threshold
            args['color_buffer_size'] = opt.color_buffer_size
            args['color_lower_rate'] = opt.color_lower_rate
            args['color_upper_rate'] = opt.color_upper_rate
            args['color_track_direction'] = opt.color_track_direction

    if 'dev_args' in opt and len(opt['dev_args']) > 0:
        args.update(json.loads(opt['dev_args']))

    #### focus
    detect_focus, retrieve_count, box_size = False, 1, (10, 10)
    conf_thresh, iou_thresh = 0.5, 0.5
    focus_box, focus_box_repnum = None, 1
    black_box, black_overlay = None, False
    if 'detect_focus' in opt:
        detect_focus = opt.detect_focus
    if not detect_focus:
        if 'focus_box' in opt:
            if 0 == opt.focus_box[0] and 0 == opt.focus_box[1] \
                    and 1 == opt.focus_box[2] and 1 == opt.focus_box[3]:
                # Logger.warning(f'error box: {opt.focus_box}')
                pass
            else:
                focus_box = opt.focus_box
        else:
            if 'center_rate' in opt:
                w_rate, h_rate = opt.center_rate
                if w_rate != 0 and h_rate != 0:
                    focus_box = [
                        (1 - w_rate) * 0.5, (1 - h_rate) * 0.5,
                        (1 + w_rate) * 0.5, (1 + h_rate) * 0.5,
                    ]
        if 'block_box' in opt or 'black_box' in opt:
            if 'black_box' in opt:
                if 0 == opt.black_box[0] and 0 == opt.black_box[1] \
                        and 0 == opt.black_box[2] and 0 == opt.black_box[3]:
                    pass
                else:
                    black_box = opt.black_box
            else:
                if 0 == opt.block_box[0] and 0 == opt.block_box[1] \
                        and 0 == opt.block_box[2] and 0 == opt.block_box[3]:
                    pass
                else:
                    black_box = opt.block_box
            if 'black_overlay' in opt:
                black_overlay = opt['black_overlay']
    else:
        if 'retrieve_count' in opt:
            retrieve_count = opt.retrieve_count
        if 'box_size' in opt:
            box_size = opt.box_size
            if isinstance(box_size, int):
                box_size = (box_size, box_size)
        if 'conf_thresh' in opt:
            conf_thresh = opt.conf_thresh
        if 'iou_thresh' in opt:
            iou_thresh = opt.iou_thresh
    if 'focus_box_repnum' in opt:
        focus_box_repnum = opt.focus_box_repnum

    if detect_focus:
        read_video_weight = 0.2
    else:
        read_video_weight = 0.3

    if save_video or best_stride_video:
        model_progress_weight = 0.30
    else:
        model_progress_weight = 0.68

    ts_token = 'repnet_tf'
    outdir = os.path.join(main_args.out, user_code, ts_token)
    shutil.rmtree(main_args.out, ignore_errors=True)
    os.makedirs(outdir, exist_ok=True)

    # parse path info
    if 'https://' in opt.video:
        uri = opt.video[8:]
    else:
        _report_result(msgkey, resdata, errcode=-10)
        Logger.warning('video url invalid: %s' % opt.video)
        return
    video_file = frep_data(opt.video)

    segs = uri.split('/')
    # bucketname = segs[0].split('.')[0]
    # oss_domain = 'https://%s' % segs[0]
    oss_domain = 'https://frepai-1301930378.cos.ap-beijing.myqcloud.com'
    oss_path_counts = os.path.join('/', *segs[1:-2], 'counts')
    oss_path = os.path.join('/', *segs[1:-2], 'outputs', segs[-1].split('.')[0], ts_token)

    pcaks = None
    if embs_filter_path:
        epath = frep_data(embs_filter_path, pcaks_ckpts_path)
        with open(epath, 'rb') as fr:
            pcaks = pickle.load(fr)
            pcaks['alpha'] = opt.ef_alpha if 'ef_alpha' in opt else 0.01
            pcaks['beta'] = opt.ef_beta if 'ef_beta' in opt else 0.5
            pcaks['gamma'] = opt.ef_gamma if 'ef_gamma' in opt else 0.7

    def _detect_focus_progress(x):
        resdata['progress'] = round(x * 0.1, 2)
        Logger.info(resdata['progress'])
        _report_result(msgkey, resdata)

    def _video_read_progress(x):
        if detect_focus:
            resdata['progress'] = round(10 + x * read_video_weight, 2)
        else:
            resdata['progress'] = round(x * read_video_weight, 2)
        _report_result(msgkey, resdata)

    def _model_strides_progress(x):
        resdata['progress'] = round(30 + x * model_progress_weight, 2)
        Logger.info(resdata['progress'])
        _report_result(msgkey, resdata)

    def _video_save_progress(x):
        resdata['progress'] = round(60 + x * 0.4, 2)
        Logger.info(resdata['progress'])
        _report_result(msgkey, resdata)

    resdata['progress'] = 0.0
    _report_result(msgkey, resdata)
    try:
        if detect_focus:
            Logger.info('detect focus...')
            detinfo = _detect_focus(msgkey, video_file, retrieve_count, conf_thresh, iou_thresh, box_size, _detect_focus_progress)
            if detinfo:
                focus_box = detinfo['focus_box']

        frames, vid_fps, still_frames, binframes, points = read_video(
                video_file, width=input_width, height=input_height, rot=angle,
                black_box=black_box, focus_box=focus_box, focus_box_repnum=focus_box_repnum,
                progress_cb=_video_read_progress,
                args=args,
                dev=(save_video or best_stride_video))
    except Exception:
        errtxt = traceback.format_exc(limit=6)
        _report_result(msgkey, resdata, errcode=-20, errtxt=errtxt)
        Logger.warning('read video error: %s' % opt.video)
        Logger.error(errtxt)
        os.remove(video_file)
        return
    if len(frames) <= 64:
        if len(frames) == 0:
            _report_result(msgkey, resdata, errcode=-21, errtxt='video valid frames is 0')
            os.remove(video_file)
            return
        # TODO
        Logger.warning('read video warnning: %s num_frames[%d]' % (opt.video, len(frames)))

    _report_result(msgkey, resdata)

    s_time = time.time()

    (pred_period, pred_score,
            within_period, per_frame_counts,
            chosen_stride, final_embs, feature_maps, feat_factors) = get_counts(
            model,
            frames,
            strides=strides,
            batch_size=batch_size,
            tsm_last_threshold=tsm_last_threshold,
            within_period_threshold=in_threshold,
            tsm_last_smooth=tsm_last_smooth,
            constant_speed=constant_speed,
            median_filter=median_filter,
            osd_feat=osd_feat,
            pcaks=pcaks,
            progress_cb=_model_strides_progress)
    infer_time = time.time() - s_time
    Logger.info('model inference using time: %d, chosen_stride:%d' % (infer_time, chosen_stride))

    all_frames_count = len(frames) + len(still_frames)
    is_still_frames = [False] * all_frames_count

    final_within_period = [.0] * all_frames_count
    final_per_frame_counts = [.0] * all_frames_count
    i, j = 0, 0
    for k in range(all_frames_count):
        if j < len(still_frames) and k == still_frames[j][0]:
            is_still_frames[k] = True
            j += 1
        elif i < len(frames):
            final_within_period[k] = within_period[i]
            final_per_frame_counts[k] = per_frame_counts[i]
            i += 1
        else:
            _report_result(msgkey, resdata, errcode=-30, errtxt='frames count invalid')
            Logger.warning('frames count invalid: %d vs %d vs %d' % (i, j, k))
            os.remove(video_file)
            return

    within_period = final_within_period
    per_frame_counts = np.asarray(final_per_frame_counts, dtype=np.float)
    if reg_factor:
        per_frame_counts = reg_factor * per_frame_counts
    sum_counts = np.cumsum(per_frame_counts)

    # del frames

    json_result = {}
    json_result['period'] = pred_period
    json_result['score'] = pred_score
    json_result['stride'] = chosen_stride
    json_result['fps'] = 1 # TODO vid_fps
    json_result['num_frames'] = all_frames_count
    json_result['infer_time'] = infer_time
    frames_info = []
    spf = 1 / vid_fps # time second for per frame
    for i, (in_period, p_count, is_still) in enumerate(zip(within_period, per_frame_counts, is_still_frames)):
        if i % vid_fps == 0:
            frames_info.append({
                'image_id': '%d.jpg' % i,
                'at_time': round((i + 1) * spf, 3),
                'is_still': is_still,
                'within_period': in_period,
                'pframe_counts': p_count,
                'cum_counts': sum_counts[i]
            })
    else:
        frames_info.append({
            'image_id': '%d.jpg' % i,
            'at_time': round((i + 1) * spf, 3),
            'is_still': is_still,
            'within_period': in_period,
            'pframe_counts': p_count,
            'cum_counts': sum_counts[i]
        })
    json_result['frames_period'] = frames_info

    if osd_sims:
        np.save(os.path.join(outdir, 'embs_feat.npy'), final_embs)
        resdata['embs_feat'] = oss_domain + os.path.join(oss_path, 'embs_feat.npy')
        embs_sims = get_sims(final_embs, temperature=temperature)
        embs_sims = np.squeeze(embs_sims, -1)
        Logger.info(f'embs_feat.shape: {final_embs.shape}  embs_sims.shape: {embs_sims.shape}')

    detect_box = None
    if detect_focus and detinfo:
        detect_box = detinfo.pop('detect_box')
        resdata['detinfo'] = detinfo

    del within_period, per_frame_counts, final_embs

    if save_video or best_stride_video:
        cap = cv2.VideoCapture(video_file)
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fmt = cv2.VideoWriter_fourcc(*'mp4v')
        if save_video:
            mp4v_file = os.path.join(outdir, 'tmp_target.mp4')
            h264_file = os.path.join(outdir, 'target.mp4')
            vid = cv2.VideoWriter(mp4v_file, fmt, fps, (width, height))
        if best_stride_video:
            mp4v_stride_file = os.path.join(outdir, 'tmp-target-stride.mp4')
            h264_stride_file = os.path.join(outdir, 'target-stride.mp4')
            stride_vid = cv2.VideoWriter(mp4v_stride_file, fmt, fps, (width, height))

        if osd_sims:
            if black_box is not None:
                bx1, by1, bx2, by2 = cal_rect_points(width, height, black_box)
            if focus_box is not None:
                fx1, fy1, fx2, fy2 = cal_rect_points(width, height, focus_box)
        else:
            black_box = None
            focus_box = None

        bottom_text = '%s %s %s' % (
            'P:%d' % focus_box_repnum,
            'F:%.3f' % (float(len(frames)) / all_frames_count),
            'R:%.1f' % angle if angle else '',
        )

        if args['rmstill_frame_enable']:
            bottom_text += '%s %s %s' % (
                'A:%.4f' % args['rmstill_rate_threshold'],
                'B:%d' % args['rmstill_bin_threshold'],
                'M:%.4f' % (float(1) / ((fy2 - fy1) * (fx2 - fx1)))
            )
        if args['color_tracker_enable']:
            bottom_text += '%s %s %s %s' % (
                'A:%.2f' % args['color_rate_threshold'],
                'B:%d' % args['color_buffer_size'],
                'C:%.2f,%.2f' % (args['color_lower_rate'], args['color_upper_rate']),
                'D:%d' % args['color_track_direction']
            )

        if cap.isOpened():
            cur_db = None
            idx, valid_idx = 0, 0
            th = int(0.08 * height)
            osd, osd_size, alpha = 0, int(width*0.25), 0.5 # noqa
            osd_blend = None
            hist_blend = None
            if len(points) > 0:
                hist_blend = draw_hist_density(points, 20, input_width, input_height)
            while True:
                success, frame_bgr = cap.read()
                if not success:
                    break
                if black_box is not None:
                    if black_overlay:
                        frame_bgr[by1:by2, bx1:bx2, :] = 0
                    else:
                        cv2.rectangle(frame_bgr, (bx1, by1), (bx2, by2), (0, 0, 0), 2)
                    cv2.putText(frame_bgr,
                            '%d,%d' % (bx1, by1),
                            (bx1 + 2, by1 + 16),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (210, 210, 210), 1)
                    cv2.putText(frame_bgr,
                            '%d,%d' % (bx2, by2),
                            (bx2 - 65, by2 - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (210, 210, 210), 1)
                try:
                    if osd_sims and not is_still_frames[idx] \
                            and valid_idx % (chosen_stride * model.num_frames) == 0:
                        Logger.info(f'valid_idx: {valid_idx} idx: {idx} osd: {osd}')
                        osd_blend = draw_osd_sim(embs_sims[osd], osd_size)
                        if pcaks:
                            cv2.putText(osd_blend,
                                    '%.2f %.2f %.2f' % (pcaks['alpha'], pcaks['beta'], pcaks['gamma']),
                                    (int(0.05 * osd_size), int(0.2 * osd_size)),
                                    cv2.FONT_HERSHEY_SIMPLEX,
                                    0.6,
                                    (255, 0, 0), 1)
                            cv2.putText(osd_blend,
                                    '%.2f %.2f' % (feat_factors[osd][0], feat_factors[osd][1]),
                                    (int(0.05 * osd_size), int(0.85 * osd_size)),
                                    cv2.FONT_HERSHEY_SIMPLEX,
                                    0.6,
                                    (255, 0, 0), 1)
                        cv2.putText(osd_blend,
                                '%d' % osd,
                                (int(0.4 * osd_size), int(0.55 * osd_size)),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                1,
                                (255, 0, 0), 2)
                        osd += 1
                    if osd_blend is not None:
                        frame_bgr[th:osd_size + th, width - osd_size:, :] = osd_blend
                except Exception as err:
                    Logger.info(err)
                    Logger.error(traceback.format_exc(limit=6))
                cv2.putText(frame_bgr,
                        '%dX%d %.1f S:%d C:%.1f/%.1f %s %s' % (width, height,
                            fps, chosen_stride, sum_counts[idx], sum_counts[-1],
                            'L:%.2f' % tsm_last_threshold if tsm_last_enable else '',
                            'ST' if is_still_frames[idx] else ''),
                        (2, int(0.06 * height)),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.7 if height < 500 else 2,
                        (255, 255, 255), 2)

                if focus_box is not None:
                    cv2.putText(frame_bgr,
                            '%d,%d' % (fx1, fy1),
                            (fx1 + 2, fy1 + 16),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
                    cv2.putText(frame_bgr,
                            '%d,%d' % (fx2, fy2),
                            (fx2 - 65, fy2 - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
                    cv2.rectangle(frame_bgr, (fx1, fy1), (fx2, fy2), (0, 255, 0), 2)

                if osd_sims and valid_idx < len(frames) and valid_idx < len(binframes):
                    frame_bgr[height - input_height - 10:, :input_width + 10, :] = 222
                    frame_bgr[height - input_height - 5:height - 5, 5:input_width + 5, :] = frames[valid_idx][:,:,::-1]
                    if hist_blend is None:
                        frame_bgr[th:input_height + th, 5:input_width + 5, :] = binframes[valid_idx]
                    else:
                        frame_bgr[th:input_height + th, 5:input_width + 5, :] = cv2.addWeighted(
                                hist_blend, alpha,
                                binframes[valid_idx], 1 - alpha,
                                0)

                if detect_box:
                    if idx in detect_box:
                        cur_db = detect_box[idx]
                    if cur_db:
                        xyxy = cur_db['xyxy']
                        cv2.rectangle(frame_bgr, (xyxy[0], xyxy[1]), (xyxy[2], xyxy[3]), (211, 211, 211), 2)
                        cv2.putText(frame_bgr, cur_db['conf'],
                                (xyxy[0] + 2, xyxy[1] + 2),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)

                cv2.putText(frame_bgr, bottom_text,
                        (input_width + 12, height - int(th * 0.35)),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.7 if height < 500 else 2,
                        (255, 255, 255), 2)

                if idx % 200 == 0:
                    _video_save_progress((90 * float(idx)) / all_frames_count)

                if save_video:
                    vid.write(frame_bgr)
                if best_stride_video and not is_still_frames[idx] \
                        and valid_idx % chosen_stride == 0:
                    if osd_feat:
                        fmap = draw_osd_feat(feature_maps[0][int(valid_idx / chosen_stride)], 7, 3, 450, 200)
                        frame_bgr[th:th + 200, 0:450, :] = fmap
                        fmap = draw_osd_feat(feature_maps[2][int(valid_idx / chosen_stride)], 8, 1, 500, 70)
                        frame_bgr[-th - 80: -th - 10, width - 500:, :] = fmap
                    stride_vid.write(frame_bgr)
                if not is_still_frames[idx]:
                    valid_idx += 1
                idx += 1
        cap.release()
        _video_save_progress(91)
        if save_video:
            vid.release()
            os.system(f'ffmpeg -an -i {mp4v_file} {ffmpeg_args} {h264_file} 2>/dev/null')
            os.remove(mp4v_file)
            resdata['target_mp4'] = oss_domain + os.path.join(oss_path, os.path.basename(h264_file))
            json_result['target_mp4'] = resdata['target_mp4']
        _video_save_progress(94)
        if best_stride_video:
            stride_vid.release()
            os.system(f'ffmpeg -an -i {mp4v_stride_file} {ffmpeg_args} {h264_stride_file} 2>/dev/null')
            os.remove(mp4v_stride_file)
            resdata['stride_mp4'] = oss_domain + os.path.join(oss_path, os.path.basename(h264_stride_file))
            json_result['stride_mp4'] = resdata['stride_mp4']
        _video_save_progress(96)
        if osd_sims:
            np.save(os.path.join(outdir, 'embs_sims.npy'), embs_sims)
            resdata['embs_sims'] = oss_domain + os.path.join(oss_path, 'embs_sims.npy')
        mkvid_time = time.time() - s_time - infer_time
        json_result['mkvideo_time'] = mkvid_time
        _video_save_progress(98)

    resdata['sumcnt'] = round(float(sum_counts[-1]), 2)

    json_result_file = os.path.join(outdir, 'results.json')
    with open(json_result_file, 'w') as fw:
        json.dump(json_result, fw, indent=4)

    if msgkey[:2] != 'nb':
        json_config_file = os.path.join(outdir, 'config.json')
        with open(json_config_file, 'w') as fw:
            opt.sumcnt = resdata['sumcnt']
            json.dump(OmegaConf.to_container(opt), fw, indent=4)

    # oss_path_counts
    try:
        ts = os.path.basename(opt.video)[:-4]
        Logger.info(f'touch {oss_path_counts}/{ts}_{resdata["sumcnt"]}.json')
        frep_object_put_jsonconfig(
                osscli, [], f'{oss_path_counts}/{ts}_{resdata["sumcnt"]}.json')
    except Exception as err:
        Logger.warning('%s' % err)

    frep_object_remove(osscli, outdir[1:] + '/')
    prefix_map = [outdir, oss_path]
    frep_object_put(osscli, outdir, prefix_map=prefix_map)

    resdata['progress'] = 100.0
    resdata['target_json'] = oss_domain + os.path.join(oss_path, os.path.basename(json_result_file))
    Logger.info(json.dumps(resdata))
    _report_result(msgkey, resdata)

    del frames, still_frames, json_result, frames_info
    if pcaks:
        del feat_factors
    if osd_sims:
        del embs_sims
    if osd_feat:
        del feature_maps
    os.remove(video_file)
    del resdata


def pcaks_test(opt, resdata):
    # from statsmodels.distributions.empirical_distribution import ECDF # noqa

    Logger.info(opt)
    msgkey = opt.pigeon.msgkey
    remote_path = 'https://frepai-1301930378.cos.ap-beijing.myqcloud.com/datasets/embs_feat.npy'
    if 'out_path' in opt.pigeon:
        remote_path = opt.pigeon.out_path

    segs = remote_path[8:].split('/')
    # bucketname = segs[0].split('.')[0]
    oss_path = os.path.join('/', *segs[1:])

    nc = opt.n_components
    sc = opt.scaler
    pca = PCA(n_components=nc)

    if sc == 'Normalizer':
        scaler = preprocessing.Normalizer()
    elif sc == 'Standard':
        scaler = preprocessing.StandardScaler()
    elif sc == 'MinMax':
        scaler = preprocessing.MinMaxScaler()
    elif sc == 'Robust':
        scaler = preprocessing.RobustScaler(quantile_range=(25., 75.))
    else:
        scaler = preprocessing.Normalizer()

    # datasets
    feat_list = []
    with tempfile.TemporaryDirectory() as tmp_dir:
        for item in opt.pcaks:
            epath = frep_data(item['ef_url'], tmp_dir)
            if os.path.exists(epath):
                efnpy = np.load(epath)
                if efnpy.shape[0] > max(item['slices']):
                    feat_list.append(efnpy[item['slices']])
                else:
                    Logger.error(f'nfnpy.shape[0] ({efnpy.shape}) < max_slices ({max(item["slices"])})')

    feat_np = np.concatenate(feat_list, axis=0).reshape((-1, 512))
    Logger.info(f'feat_np.shape: {feat_np.shape}')

    scaler.fit(feat_np)
    data_out = pca.fit_transform(scaler.transform(feat_np))

    # ecdfs = [ECDF(sample) for sample in data_out.T]
    ecdfs = tfp.distributions.Empirical(data_out.T)

    pcaks = {
        'pca': pca,
        'scaler': scaler,
        'ecdfs': ecdfs,
    }

    with tempfile.TemporaryDirectory() as tmp_dir:
        with open(f'{tmp_dir}/{os.path.basename(oss_path)}', 'wb') as fw:
            pickle.dump(pcaks, fw)
        prefix_map = [tmp_dir, os.path.dirname(oss_path)]
        frep_object_put(osscli, tmp_dir, prefix_map=prefix_map)
        resdata['pcaks'] = f'https://{segs[0]}{oss_path}'
    _report_result(msgkey, resdata)


if __name__ == "__main__":
    zmqsub.subscribe(main_args.topic)
    frep_report_result('add_topic', main_args.topic)
    Logger.info('main_args.topic: %s' % main_args.topic)

    os.system('rm /tmp/*.mp4 2>/dev/null')
    os.system('rm /tmp/tmp*.py 2>/dev/null')

    try:
        # Load model
        repnet_model = get_model(main_args.ckpt)

        while True:
            Logger.info('wait task')
            zmq_cfg = ''.join(zmqsub.recv_string().split(' ')[1:])
            zmq_cfg = OmegaConf.create(zmq_cfg)
            Logger.info(zmq_cfg)
            if 'pigeon' not in zmq_cfg:
                continue
            Logger.info(zmq_cfg.pigeon)
            resdata = {'pigeon': OmegaConf.to_container(zmq_cfg.pigeon), 'task': main_args.topic, 'errno': 0}
            if zmq_cfg.pigeon.msgkey[:2] == 'nb':
                frep_report_result('zmp_run', f'{main_args.topic}_{zmq_cfg.pigeon.msgkey}:5')
            else:
                frep_report_result('zmp_run', f'{main_args.topic}:5')
            try:
                if 'pcaks' in zmq_cfg:
                    pcaks_test(zmq_cfg, resdata)
                else:
                    inference(repnet_model, zmq_cfg, resdata)
            except Exception as err:
                if 'OOM' in str(err):
                    _report_result(zmq_cfg.pigeon.msgkey, resdata, errcode=-9, errtxt='OOM')
                    raise err
                errtxt = traceback.format_exc(limit=6)
                Logger.error(errtxt)
                _report_result(zmq_cfg.pigeon.msgkey, resdata, errcode=-99, errtxt=errtxt)
                os.system('rm /tmp/*.mp4 2>/dev/null')
                os.system('rm /tmp/tmp*.py 2>/dev/null')
            if zmq_cfg.pigeon.msgkey[:2] == 'nb':
                frep_report_result('zmp_end', f'{main_args.topic}_{zmq_cfg.pigeon.msgkey}')
            else:
                frep_report_result('zmp_end', main_args.topic)
            time.sleep(0.01)
    except Exception as err:
        Logger.error(err)
        Logger.error(traceback.format_exc(limit=6))
    finally:
        Logger.info('end')
