# -*- coding: utf-8 -*-
"""
模型推理的管理

@Time 2021-7-8
@Author liang jin hao
"""

from mmdet.apis import init_detector, inference_detector
import mmcv
import os
import os.path as osp
import json
import numpy as np
import cv2
from utils.MQ.message import send_message
from utils.AI_2d.preprocessing.v2.preprocessing import Preprocessing

DEVICE = 'cuda:0'  # 显卡选择
CONFIDENCE = 0.5  # 置信度


class TestManager():
    def __init__(self, MQ_send, connection_send, color_map, pth_file, cfg_file):
        """
        class to reference
        Auth: WZW
        Args:
            self:
            MQ_send:
            connection_send:
            color_map:  label type and color
                        [
                            {
                                "name": "ok", "color": "green"
                            },
                            {
                                "name": "nok", "color": "red"
                            }
                        ]
            pth_file: pth file absolute path
            cfg_file: config file absolute path
            detection_target: target folder to save result img

        Returns:
        """
        self.MQ_send = MQ_send
        self.connection_send = connection_send
        self.task_id = -1
        self.task_type = -1

        self.color_map = color_map
        self.pth_file = pth_file
        self.cfg_file = cfg_file
        self.img_path = ""
        self.defect_model = init_detector(self.cfg_file, self.pth_file, device=DEVICE)
        self.classes = self.defect_model.CLASSES
        self.preprocessing = ""
        self.detection_target = None

    def inference(self):
        """
        Args:
        Returns:
            result_list(list): 推理结果
                [x1,y1,x2,y2,class]
        """
        result_list = []
        try:
            img = mmcv.imread(self.img_path)
        except TypeError:
            print('the input must be img')
        else:
            result = inference_detector(self.defect_model, img)
            for index, one_class_res in enumerate(result):
                for res in one_class_res:
                    if len(res) == 0 or res[4] < CONFIDENCE:
                        continue
                    result_list.append([res[0], res[1], res[2], res[3], index, res[4]])
        return result_list

    def get_result_dic(self, result_list):
        """
         Args:
            result_list(list): 推理结果
        Returns:
            result_dic(dic)
        """
        label_list = [-1, -1, -1]
        out_dic = {
            'image_name': self.img_path,
        }
        result_list = sorted(result_list, key=(lambda x: x[5]), reverse=True)
        h, w, _ = cv2.imread(self.img_path).shape
        out_dic['box'] = dict(version="4.2.10", flags={}, shapes=[], imagePath=self.img_path, imageHeight=h,
                              imageWidth=w)
        # labelme type
        for index, item in enumerate(result_list):
            # if item[0] > 1500 and item[1] > 0 and item[2] < 2500 and item[3] < 500 and label_list[0] == -1:
            if item[0] > 900 and item[1] > 0 and item[2] < 3000 and item[3] < 800 and label_list[0] == -1:
                obj_item = \
                    {
                        "label": self.classes[item[4]],
                        "points": [
                            [
                                np.float64(item[0]),
                                np.float64(item[1])
                            ],
                            [
                                np.float64(item[2]),
                                np.float64(item[3])
                            ]
                        ],
                        "group_id": self.get_color(self.classes[item[4]]),
                        "shape_type": "rectangle",
                        "flags": {}
                    }
                out_dic['box']['shapes'].append(obj_item)
                label_list[0] = self.classes[item[4]]
            # elif item[0] > 500 and item[1] > 300 and item[2] < 1300 and item[3] < 900 and label_list[1] == -1:
            elif item[0] > 250 and item[1] > 100 and item[2] < 1500 and item[3] < 1100 and label_list[1] == -1:
                obj_item = \
                    {
                        "label": self.classes[item[4]],
                        "points": [
                            [
                                np.float64(item[0]),
                                np.float64(item[1])
                            ],
                            [
                                np.float64(item[2]),
                                np.float64(item[3])
                            ]
                        ],
                        "group_id": self.get_color(self.classes[item[4]]),
                        "shape_type": "rectangle",
                        "flags": {}
                    }
                out_dic['box']['shapes'].append(obj_item)
                label_list[1] = self.classes[item[4]]
            # elif item[0] > 0 and item[1] > 900 and item[2] < 800 and item[3] < 1500 and label_list[2] == -1:
            elif item[0] > 0 and item[1] > 700 and item[2] < 1000 and item[3] < 1700 and label_list[2] == -1:
                obj_item = \
                    {
                        "label": self.classes[item[4]],
                        "points": [
                            [
                                np.float64(item[0]),
                                np.float64(item[1])
                            ],
                            [
                                np.float64(item[2]),
                                np.float64(item[3])
                            ]
                        ],
                        "group_id": self.get_color(self.classes[item[4]]),
                        "shape_type": "rectangle",
                        "flags": {}
                    }
                out_dic['box']['shapes'].append(obj_item)
                label_list[2] = self.classes[item[4]]
        out_dic['pin_labels'] = label_list
        return out_dic

    def set_args(self, task_id, task_type, img_path, detection_target=None):
        self.task_id = task_id
        self.task_type = task_type
        self.img_path = img_path
        self.detection_target = detection_target

    def set_preprocessing(self, task_id, task_type, npy_file, png_file, area, targetFolder='./out',
                          detection_target=None):
        self.task_id = task_id
        self.task_type = task_type
        self.preprocessing = Preprocessing(self.MQ_send, self.connection_send, task_id, task_type, npy_file, png_file,
                                           area, targetFolder)
        self.detection_target = detection_target

    def send_result(self):
        res = {
            "task_id": self.task_id,
            "task_type": "PIN_AI_DETECTION",
            "img_path": self.img_path,
        }
        try:
            if self.task_type == '116':
                preRes = self.preprocessing.generate_manager()
                del self.preprocessing
                self.img_path = preRes['image_output']
                res["img_path"] = self.img_path
            res_list = self.inference()
            res_dic = self.get_result_dic(res_list)
            if not (self.detection_target is None):
                res_dir = self.visualize_result(res_dic)
                res["detection_result_image"] = res_dir
            else:
                res["detection_result_image"] = "did not get target to save result"
            # res["Pin1_Up_Appearance"] = res_dic['pin_labels'][2]
            # res["Pin2_Up_Appearance"] = res_dic['pin_labels'][1]
            # res["Pin3_Up_Appearance"] = res_dic['pin_labels'][0]
            if res_dic['pin_labels'][2] == 'ok':
                res["Pin3_Up_Appearance"] = 0
            elif res_dic['pin_labels'][2] == 'nok':
                res["Pin3_Up_Appearance"] = 1
            else:
                res["Pin3_Up_Appearance"] = -1
            if res_dic['pin_labels'][1] == 'ok':
                res["Pin2_Up_Appearance"] = 0
            elif res_dic['pin_labels'][1] == 'nok':
                res["Pin2_Up_Appearance"] = 1
            else:
                res["Pin2_Up_Appearance"] = -1
            if res_dic['pin_labels'][0] == 'ok':
                res["Pin1_Up_Appearance"] = 0
            elif res_dic['pin_labels'][0] == 'nok':
                res["Pin1_Up_Appearance"] = 1
            else:
                res["Pin1_Up_Appearance"] = -1
            # if res_dic['pin_labels'][2] == -1:
            #     res["Pin1_Up_Appearance"] = 'unknown'
            # if res_dic['pin_labels'][1] == -1:
            #     res["Pin2_Up_Appearance"] = 'unknown'
            # if res_dic['pin_labels'][0] == -1:
            #     res["Pin3_Up_Appearance"] = 'unknown'
        except Exception as e:
            print(e)
            res['error'] = e
        finally:
            if self.task_type == '115' or self.task_type == '116':
                send_message(self.MQ_send, self.connection_send, json.dumps(res, ensure_ascii=False))

    def get_color(self, class_name):
        for map in self.color_map:
            if map.get('name') == class_name:
                return map.get('color')
        raise ValueError('no such class name in model')

    def visualize_result(self, out_dic):
        img_path = out_dic['image_name']
        img = cv2.imread(img_path)
        obj = out_dic['box']['shapes']
        for obj_item in obj:
            print(obj_item)
            left_top = (int(float((obj_item['points'][0][0]))), int(float((obj_item['points'][0][1]))))
            right_bottom = (int(float((obj_item['points'][1][0]))), int(float((obj_item['points'][1][1]))))
            if obj_item['group_id'] == 'green':
                cv2.rectangle(img, left_top, right_bottom, (0, 255, 0), 5)
                cv2.putText(img, obj_item['label'], right_bottom, 5, 1.2, (255, 0, 0), 2)
            if obj_item['group_id'] == 'red':
                cv2.rectangle(img, left_top, right_bottom, (0, 0, 255), 5)
                cv2.putText(img, obj_item['label'], right_bottom, 5, 1.2, (255, 0, 0), 2)
        res_dir = os.path.join(self.detection_target, os.path.basename(out_dic['image_name'])[:58] + "_result.png")
        cv2.imwrite(res_dir, img)
        return res_dir
        # for obj_item in out_dic['box']:
        #     print(obj_item)
        #     left_top = (int(float((obj_item['leftTopX']))), int(float((obj_item['leftTopY']))))
        #     right_bottom = (int(float((obj_item['rightBottomX']))), int(float((obj_item['rightBottomY']))))
        #     cv2.rectangle(img, left_top, right_bottom, (0, 0, 255), 5)
        #     cv2.putText(img, obj_item['text'], right_bottom, 5, 1.2, (0, 0, 255), 2)
        # img = cv2.resize(img, (500, 500))
        # cv2.imshow('result', img)
        # if cv2.waitKey(0) & 0xFF == 27:
        #     cv2.destroyWindow('result')

    def visualize(self, out_dic):
        targetFolder = "/home/hycx/PONINT_ALGORITHM_DATA_TEST/output"
        img_path = os.path.basename(self.img_path)[:-4] + "_result.png"
        img = cv2.imread(self.img_path)
        cv2.namedWindow("image", cv2.WINDOW_NORMAL)
        for obj_item in out_dic:
            print(obj_item)
            left_top = (int(float((obj_item[0]))), int(float((obj_item[1]))))
            right_bottom = (int(float((obj_item[2]))), int(float((obj_item[3]))))
            if obj_item[4] == 1:
                cv2.rectangle(img, left_top, right_bottom, (0, 255, 0), 5)
                cv2.putText(img, 'ok', right_bottom, 5, 1.2, (255, 0, 0), 2)
            if obj_item[4] == 0:
                cv2.rectangle(img, left_top, right_bottom, (0, 0, 255), 5)
                cv2.putText(img, 'nok', right_bottom, 5, 1.2, (255, 0, 0), 2)
            cv2.imshow('image', img)
            cv2.waitKey(0)
        cv2.imwrite(os.path.join(targetFolder, img_path), img)
        # for obj_item in out_dic['box']:
        #     print(obj_item)
        #     left_top = (int(float((obj_item['leftTopX']))), int(float((obj_item['leftTopY']))))
        #     right_bottom = (int(float((obj_item['rightBottomX']))), int(float((obj_item['rightBottomY']))))
        #     cv2.rectangle(img, left_top, right_bottom, (0, 0, 255), 5)
        #     cv2.putText(img, obj_item['text'], right_bottom, 5, 1.2, (0, 0, 255), 2)
        # img = cv2.resize(img, (500, 500))
        # cv2.imshow('result', img)
        # if cv2.waitKey(0) & 0xFF == 27:
        #     cv2.destroyWindow('result')


"""
Inference test json

{
 "task_id":3,
 "task_type":3,
 "data_set_name":"test_dataset",
 "data_set_id":110,
 "color_map":[
    {
        "name":"unnormal","color":"green"
    },
    {
    "name":"normal","color":"red"
    }
 ],
 "data_dir": "/home/ubuntu/data_test/inference_data",
 "cfg_file": "/home/ubuntu/data_test/test.py",
 "pth_file": "/home/ubuntu/data_test/work_dir/epoch_12.pth",
 "model_id": 111,
 "label_person": "google",
 "image_total": 10
}
"""

if __name__ == "__main__":
    # targetFolder = "/home/hycx/Desktop/0624traingdataset/inference_meta"
    # color_map = [
    #     {
    #         "name": "ok", "color": "green"
    #     },
    #     {
    #         "name": "nok", "color": "red"
    #     }
    # ]
    # data_dir = "/home/hycx/Desktop/0624traingdataset/inference_meta"
    # pth_file = "/home/hycx/pointCloudTrain/training/work_dir/latest.pth"
    # cfg_file = "/home/hycx/pointCloudTrain/training/work_dir/pointCloudCfg.py"
    # re = TestManager(color_map=color_map, data_dir=data_dir, pth_file=pth_file, cfg_file=cfg_file)
    # for item in os.listdir(targetFolder):
    #     img_path = osp.join(targetFolder, item)
    #     if img_path.split('.')[-1] in ['jpg', 'png', 'bmp', 'jpeg', 'JPG', 'PNG', 'BMP', 'JPEG']:
    #         res_list = re.inference(img_path)
    #         res_dic = re.get_result_dic(res_list, item)
    #         message_result = json.dumps(res_dic, ensure_ascii=False)
    #         print('test_manager res is ==>' + message_result)
    #         re.visualize_result(res_dic, targetFolder='/home/hycx/Desktop/0624traingdataset/inference_out')
    #     else:
    #         pass
    # print("all done")
    color_map = [
        {
            "name": "ok", "color": "green"
        },
        {
            "name": "nok", "color": "red"
        }
    ]
    pth_file = "./pointCloudLatest.pth"
    cfg_file = "./pointCloudCfg.py"
    testManager = TestManager(1, 1, color_map=color_map, pth_file=pth_file, cfg_file=cfg_file)
    testManager.set_args("666", "115",
                         "/home/hycx/PONINT_ALGORITHM_DATA_TEST/output/3505277134ML085B085301110012200465_up_2022-06-28-09-04-52_meta_channelsBlend.png")
    # testManager.send_result()
    res_list = testManager.inference()
    res_dic = testManager.get_result_dic(res_list)
    testManager.visualize_result(res_dic)
    # testManager.visualize(res_list)
    print(res_dic)
