#!/usr/bin/env python3.9
# coding=utf-8

"""
Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved.
Description: python api test.
Author: MindX SDK
Create: 2022
History: NA
"""

import logging
import time
from multiprocessing import Queue
import traceback
import numpy as np
from sklearn import preprocessing
from mindx.sdk import base
from mindx.sdk.base import Image, ImageProcessor, Size, Tensor, Dim, Color
import cv2
from utils import face_post_process, timer, common
from config import *
from sort import *
from queue import Queue as QQueue
import logging.config
logging.config.fileConfig("logger.conf")
logger = logging.getLogger("pipeline")


class PortraitDection(object):
    def __init__(self, det_model_path, batch_size, device_id, pid):
        self.det_model = base.model(det_model_path, deviceId=device_id)
        self.image_processor = ImageProcessor(device_id) # initialize mxbase image_process
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid
        self.mytimer = timer.Timer(batch_size)

        self.MODEL_INPUT_WIDTH = 640
        self.MODEL_INPUT_HEIGHT = 640
        self.INFER_BREAK_WAIT_TIME = 5
        self.USE_BENCHMARK = False

        self.sorts = {}
        self.queues = {}

    def det_preprocess(self, image_ori, image_ori_height, image_ori_width):
        decodeImage = Image(image_ori[0], base.rgb)
        decodeImage.set_original_size(Size(image_ori_width, image_ori_height))

        image_ori = image_ori[:, :image_ori_height, :image_ori_width, :]
        image_ori = image_ori.transpose((0, 3, 1, 2))

        # image resize and pad by dvpp
        decodeImage.to_device(self.device_id)
        resize_tuple, pad_tuple, scale_factor = face_post_process.resize_factor(decodeImage,
                                                                                resize_shape=(
                                                                                    self.MODEL_INPUT_WIDTH,
                                                                                    self.MODEL_INPUT_HEIGHT))
        resize_conf = Size(resize_tuple[0], resize_tuple[1])
        pad_conf = Dim(pad_tuple[2], pad_tuple[3], pad_tuple[0], pad_tuple[1])
        color_conf = Color(112, 112, 112)
        decodeImage = self.image_processor.resize(decodeImage, resize_conf, base.bilinear_similar_opencv)
        decodeImage = self.image_processor.padding(decodeImage, pad_conf, color_conf, base.border_constant)
        decodeImage.to_host()

        # transfer to ndarray and put original and resized image array into queue
        image_src = np.array(decodeImage.to_tensor())  # NHWC
        image_src = image_src[:, :decodeImage.original_height, :decodeImage.original_width, :]
        image_src = image_src.transpose((0, 3, 1, 2))  # NCHW

        return image_src, scale_factor, image_ori

    def infer(self, input_tensor_q_con: Queue,  output: Queue):
        try:
            ImageProcessor(self.device_id)
            post_retina = face_post_process.RetinaFace(self.det_model, rac="net3", masks=False)
            img_ndarray_list = []
            scale_factor_list = []
            img_ori_ndarray_list = []
            input_data_list = []
            count = 0
            while True:
                input_data = input_tensor_q_con.get()

                video_id = input_data["video_id"]
                if video_id not in self.sorts:
                    self.sorts[video_id] = Sort()
                if video_id not in self.queues:
                    self.queues[video_id] = QQueue(maxsize=500)
                sort = self.sorts[video_id]

                image = input_data["image"]

                image_ori_height = input_data["height"]
                image_ori_width = input_data["width"]
                input_data_list.append(input_data)
                # _, image, image_ori_height, image_ori_width = input_tensor_q_con.get(timeout=self.INFER_BREAK_WAIT_TIME)

                count += 1
                image_src, scale_factor, image_ori = self.det_preprocess(image, image_ori_height, image_ori_width)

                # 3. Read input array and transfer array type, put into tensor list
                img_ndarray = image_src.astype(np.float32)  # NCHW, RGB
                scale_factor = scale_factor
                img_ori_ndarray = image_ori.astype(np.float32)  # NCHW, RGB

                img_ndarray_list.append(img_ndarray)
                scale_factor_list.append(scale_factor)
                img_ori_ndarray_list.append(img_ori_ndarray)

                # 4. If BATCH_SIZE smaller than config, wait until get enough
                if len(img_ori_ndarray_list) < self.batch_size:
                    continue

                # 5.1 Prepare batch input
                img_mxtensor = Tensor(np.squeeze(np.array(img_ndarray_list), axis=1))
                img_mxtensor.to_device(self.device_id)
                img_mxtensor_list = [img_mxtensor]
                stime = time.time()
                # 5.2 Retina model infer
                if self.USE_BENCHMARK:
                    self.mytimer.infer_infer_and_post_start.append(time.time())
                all_det, all_landmarks = post_retina.detect(img_mxtensor_list, scale_factor_list, PORTRAIL_DET_THRESHOLD)
                if DEBUG:
                    logger.info(f"==============================人像检测时间:{(time.time() - stime)*1000}ms")
                    stime = time.time()

                rs = face_post_process.get_aligned_face_base_with_mface_all(all_det, all_landmarks, img_ori_ndarray_list)

                if DEBUG:
                    etime = time.time()
                    logger.info(f"==============================人像后处理时间:{(etime - stime)*1000}ms")
                for i, _ in enumerate(rs):
                    dets = []
                    small_imgs = []
                    if rs[i] is not None:
                        for j, item in enumerate(rs[i]):
                            img_arr, boxes, landmarks, scores, _ = item
                            box = boxes.copy()
                            box = list(box)
                            box.append(j)
                            dets.append(box)
                            small_imgs.append(img_arr)

                        post_data = input_data_list[i].copy()
                        # sort
                        idxs = []
                        if len(dets) > 0:
                            tracks = sort.update(np.array(dets))
                            for i, track in enumerate(tracks):
                                idx = int(track[-2])
                                idxs.append(idx)
                                track_id = int(track[-1])
                                queue = self.queues[video_id]
                                queue_data = list(queue.queue)[::-1]

                                if queue.full():
                                    queue.get()

                                x1, y1, x2, y2 = dets[idx][:4]
                                w = abs(x2 - x1)
                                h = abs(y2 - y1)

                                pw, ph, p_track_id = 0, 0, 0
                                for data in queue_data:
                                    p_track_id = list(data.keys())[0]
                                    if p_track_id == track_id:
                                        pw, ph = list(data.values())[0]
                                        break

                                if w * h > pw * ph * config.TRACK_THRESHOLD or pw * ph > w * h * config.TRACK_THRESHOLD:
                                    queue.put({track_id: (w, h)})
                                    post_data["track_id"] = track_id
                                    post_data["det"] = (small_imgs[idx], dets[idx][:4], 0)

                                    output.put(post_data, block=True)
                                else:
                                    # print("【{}】变化不大，跳过".format(model_type))
                                    if p_track_id != 0:
                                        queue.put({p_track_id: (pw, ph)})

                    if self.USE_BENCHMARK:
                        self.mytimer.infer_infer_and_post_end.append(time.time())
                # logger.info(f"==============================人像PUT队列时间:{time.time() - etime}")
                # 6. Clear lists and timer
                img_ndarray_list = []
                scale_factor_list = []
                img_ori_ndarray_list = []
                input_data_list = []

        except Exception as e:
            print("process1_infer failed.", repr(e))
            traceback.print_exc()


class PortraitQuality(object):
    def __init__(self, qua_bb_model_path, qua_sc_model_path, batch_size, device_id, pid):
        self.qua_bb_model = base.model(qua_bb_model_path, deviceId=device_id)
        self.qua_sc_model = base.model(qua_sc_model_path, deviceId=device_id)
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid
        self.mytimer = timer.Timer(batch_size)

        self.INFER_BREAK_WAIT_TIME = 10
        self.USE_BENCHMARK = False

    def infer(self, input_tensor_q_con: Queue, post_process_q_feat: Queue):
        try:
            count = 0
            img_arr_list = []
            input_data_list = []
            while True:
                input_data = input_tensor_q_con.get()
                input_data_list.append(input_data)
                img_ndarray = input_data["det"][0]
                if img_ndarray is None:
                    continue

                # 3. Read input array and transfer array type, put into tensor list
                img_arr_list.append(img_ndarray)
                count += 1

                # 5.3 Post process
                if self.USE_BENCHMARK:
                    self.mytimer.qua_pre_start.append(time.time())

                # 4. If BATCH_SIZE smaller than config, wait until get enough
                if len(img_arr_list) < self.batch_size:
                    continue

                # 5.1 Quality model preprocess.
                img_ndarray_batch = face_post_process.face_quality_preprocess(img_arr_list)

                if self.USE_BENCHMARK:
                    self.mytimer.qua_pre_end.append(time.time())

                # 5.2 Quality model infer.
                if self.USE_BENCHMARK:
                    self.mytimer.qua_infer_and_post_start.append(time.time())
                img_mxtensor = Tensor(img_ndarray_batch)
                img_mxtensor.to_device(self.device_id)
                img_mxtensor_vec = [img_mxtensor]
                output_tensors = self.qua_bb_model.infer(img_mxtensor_vec)
                score_tensors = self.qua_sc_model.infer(output_tensors)[0]
                score_tensors.to_host()
                scores = np.array(score_tensors)

                for i, score in enumerate(scores):
                    score = score[0]
                    score = round(score, 2)
                    logger.info("q_socre={}".format(score))

                    ##########  debug begin ###############
                    if DEBUG:
                        small_id = input_data_list[i]["small_id"]
                        small_img = img_arr_list[i].copy()
                        small_img = small_img.transpose(1, 2, 0)
                        small_img = cv2.cvtColor(small_img, cv2.COLOR_BGR2RGB)
                        small_img = cv2.putText(small_img, str(score), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 3)
                        small_name = str(input_data_list[i]["frame_id"]) + "_" + str(small_id) + ".jpg"
                        cv2.imwrite("/data/images/face2/quality/" + small_name, small_img)
                    ##########  debug end ###############

                    if score >= QUALITY_SCORE_THRESHOLD:
                        post_process_q_feat.put(input_data_list[i], block=True)

                # 5.4 Clear lists.
                img_arr_list = []
                input_data_list = []
                if self.USE_BENCHMARK:
                    self.mytimer.qua_infer_and_post_end.append(time.time())

        except Exception as e:
            print("process2_pattern_quality failed.", repr(e))
            traceback.print_exc()


class PortraitFeature(object):
    def __init__(self, feat_model_path, batch_size, device_id, pid):
        self.feat_model = base.model(feat_model_path, deviceId=device_id)
        self.batch_size = batch_size
        self.device_id = device_id
        self.pid = pid
        self.mytimer = timer.Timer(batch_size)

        self.INFER_BREAK_WAIT_TIME = 10
        self.USE_BENCHMARK = False

    def infer(self, post_process_q_con: Queue):
        try:
            ImageProcessor(self.device_id)
            count = 0
            img_arr_list = []
            input_data_list = []
            while True:
                input_data = post_process_q_con.get()
                input_data_list.append(input_data)
                img_ndarray = input_data["det"][0]
                if img_ndarray is None:
                    continue

                # 3. Read input array and transfer array type, put into tensor list
                img_arr_list.append(img_ndarray)
                count += 1

                # 5.3 Post process
                if self.USE_BENCHMARK:
                    self.mytimer.rec_pre_start.append(time.time())

                # 4. If BATCH_SIZE smaller than config, wait until get enough
                if len(img_arr_list) < self.batch_size:
                    continue

                # 5.1 Feature model preprocess.
                img_ndarray_batch, img_ndarray_flip_batch = face_post_process.get_feature_batch(
                    np.array(img_arr_list))
                if self.USE_BENCHMARK:
                    self.mytimer.rec_pre_end.append(time.time())

                # 5.2 Feature model infer.
                if self.USE_BENCHMARK:
                    self.mytimer.rec_infer_and_post_start.append(time.time())
                img_mxtensor = Tensor(img_ndarray_batch)
                img_mxtensor.to_device(self.device_id)
                img_mxtensor_vec = [img_mxtensor]
                output_tensors = self.feat_model.infer(img_mxtensor_vec)[0]
                output_tensors.to_host()
                output_tensors = np.array(output_tensors)

                img_mxtensor_flip = Tensor(img_ndarray_flip_batch)
                img_mxtensor_flip.to_device(self.device_id)
                img_mxtensor_flip_vec = [img_mxtensor_flip]
                output_flip_tensors = self.feat_model.infer(img_mxtensor_flip_vec)[0]

                output_flip_tensors.to_host()
                output_flip_tensors = np.array(output_flip_tensors)

                for i, output_tensor in enumerate(output_flip_tensors):
                    # 5.3 Feature model postprocess.
                    embedding = output_tensors[i] + output_flip_tensors[i]
                    embedding = preprocessing.normalize([embedding])
                    # 结果推送到kafka
                    post_data = input_data_list[i]
                    post_data["feature"] = embedding
                    # print("推送kafka---->", post_data)

                    # 拼装kafka消息
                    kafka_data = dict()
                    id = common.get_id()
                    kafka_data["id"] = id
                    kafka_data["obj_id"] = post_data["track_id"]
                    kafka_data["big_id"] = id
                    kafka_data["point_id"] = post_data["video_id"]
                    kafka_data["point_name"] = post_data["video_name"]
                    kafka_data["capture_time"] = post_data["capture_time"]
                    kafka_data["feature"] = common.float_arr_2_base64(embedding[0])
                    small_pic = f"/face/{id}_crop.jpg"
                    big_pic = f"/face/{id}_orig.jpg"
                    # small_pic = f"/face/{post_data['frame_id']}_crop.jpg"
                    # big_pic = f"/face/{post_data['frame_id']}_orig.jpg"
                    orig = input_data_list[i]["image"]
                    x1, y1, x2, y2 = input_data_list[i]["det"][1]
                    x1 = int(x1)
                    y1 = int(y1)
                    x2 = int(x2)
                    y2 = int(y2)
                    img = orig[0]
                    small_img = img[y1 - 50: y2 + 50, x1 - 50: x2 + 50].copy()

                    # cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 255), 1)
                    try:
                        common.upload_pic(UPLOAD_PATH + small_pic, small_img)
                        common.upload_pic(UPLOAD_PATH + big_pic, img)
                    except:
                        continue
                    kafka_data["small_url"] = NGINX_URL + small_pic
                    kafka_data["big_url"] = NGINX_URL + big_pic
                    common.send_kafka(KAFKA_FACE_TOPIC, kafka_data)

                # 5.4 Clear lists.
                # logger.info(f"process2_pattern_rec infer: {count} samples finished.")
                img_arr_list = []
                input_data_list = []
                if self.USE_BENCHMARK:
                    self.mytimer.rec_infer_and_post_end.append(time.time())

        except Exception as e:
            print("process2_pattern_rec failed.", repr(e))
            traceback.print_exc()


def portrait_detect(det_model_path, batch_size, device_id, pid,
                    input_tensor_q_con: Queue,
                    post_process_q_track: Queue,
                    model_loading: list):
    model = PortraitDection(det_model_path, batch_size, device_id, pid)
    model_loading.append(True)
    model.infer(input_tensor_q_con, post_process_q_track)


def portrait_quality(qua_bb_model_path, qua_sc_model_path, batch_size, device_id, pid,
                     input_tensor_q_con: Queue,
                     post_process_q_feat: Queue,
                     model_loading: list):
    model = PortraitQuality(qua_bb_model_path, qua_sc_model_path, batch_size, device_id, pid)
    model_loading.append(True)
    model.infer(input_tensor_q_con, post_process_q_feat)


def portrait_feature(reid_model_path, batch_size, device_id, pid,
                     input_tensor_q_con: Queue,
                     model_loading: list):
    model = PortraitFeature(reid_model_path, batch_size, device_id, pid)
    model_loading.append(True)
    model.infer(input_tensor_q_con)



