import argparse
import ast
import math
import os
import time
import cv2
import numpy as np
import yaml
from datetime import datetime

import mindspore as ms
from mindspore import Tensor, context, nn

from mindyolo.models import create_model
from mindyolo.utils import logger
from mindyolo.utils.config import parse_args
from mindyolo.utils.metrics import non_max_suppression, scale_coords, xyxy2xywh
from mindyolo.utils.utils import draw_result, set_seed




class DetectionEngine(object):
    def __init__(self,  
                model: nn.Cell,
                cls_net: nn.Cell,
                ssim_ae_net: nn.Cell,
                ssim_ae_conf,
                ssim_ae_func,
                patchcore_net,
                patchcore_conf,
                patchcore_func,
                conf_thres: float = 0.25,
                iou_thres: float = 0.65,
                conf_free: bool = False,
                nms_time_limit: float = 60.0,
                img_size: list = [5120, 1280],
                stride: int = 32,
                is_coco_dataset: bool = False,
                gf_num = 145,
                ):
        '''
        DetectionEngine initialization
        model: network model
        conf_thres: 
        iou_thres:
        conf_free:
        '''
        self.result_dict = {"category_id": [], "bbox": [], "score": [], "type":{}}
        self.model = model
        self.classes_model = cls_net
        self.ssim_ae_model = ssim_ae_net
        self.ssim_ae_conf = ssim_ae_conf
        self.ssim_ae_func = ssim_ae_func
        
        self.patchcore_model = patchcore_net
        self.patchcore_conf = patchcore_conf
        self.patchcore_func = patchcore_func


        self.conf_thres = conf_thres
        self.iou_thres = iou_thres
        self.conf_free = conf_free
        self.nms_time_limit = nms_time_limit
        self.img_size = img_size
        self.stride = stride
        self.is_coco_dataset = is_coco_dataset
        self.gf_bum = gf_num

    def detect_object(self, img_path, is_crop=False):
        '''
        detection functions
        img_path: input image path
        is_crop: if True crop picture
        '''
        print("current file name:", img_path)
        self.result_dict = {"category_id": [], "bbox": [], "score": [], "type":{}}
        # Resize
        img = cv2.imread(img_path)
        ori_img = img.copy()
        h_ori, w_ori = img.shape[:2]  # orig hw
        interp = cv2.INTER_LINEAR
        img = cv2.resize(img, (self.img_size[1], self.img_size[0]), interpolation=interp)
        img = img[:, :, ::-1].transpose(2, 0, 1) / 255.0
        imgs_tensor = Tensor(img[None], ms.float32)

        # Run infer
        _t = time.time()
        out = self.model(imgs_tensor)  # inference and training outputs
        out = out[0] if isinstance(out, (tuple, list)) else out
        infer_times = time.time() - _t
        
        # Run NMS
        t = time.time()
        out = out.asnumpy()
        out = non_max_suppression(
            out,
            conf_thres=self.conf_thres,
            iou_thres=self.iou_thres,
            conf_free=self.conf_free,
            multi_label=True,
            time_limit=self.nms_time_limit,
        )
        nms_times = time.time() - t
        total_category_ids, total_bboxes, total_scores, total_type = [], [], [], {}
        
        for si, pred in enumerate(out):
            if len(pred) == 0:
                continue 
  
            # Predictions
            predn = np.copy(pred)
            scale_coords(img.shape[1:], predn[:, :4], (h_ori, w_ori))  # native-space pred
            box = xyxy2xywh(predn[:, :4])  # xywh

            gf_index = 0
            cls_dict = {}
            box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner  
            category_ids, bboxes, scores = [], [], []
            
            for p, b in zip(pred.tolist(), box.tolist()):
                category_ids.append(0 if self.is_coco_dataset else int(p[5]))
                bboxes.append([round(x, 3) for x in b])
                scores.append(round(p[4], 5))
                gf_index = gf_index + 1
                if is_crop:
                    crop_img = self.crop_picture(ori_img, [round(x, 3) for x in b]) # rgb img data
                    img = self.data_preprocess(crop_img)
                    predict = self.classes_model(img).asnumpy()
                    if len(predict) > 0:
                        if len(predict[0]) == 2:
                            # print(predict[0])
                            predict_label = predict[0].argmax()
                            if predict[0][0] > predict[0][1] and predict[0][0] > 0.55: # ng
                                print("type ng")
                                cls_dict[gf_index] =  'ng'
                            elif predict[0][1] > predict[0][0] and predict[0][1] > 0.55: # ok
                                print("type ok")
                                cls_dict[gf_index] =  'ok'
                            else: # 不确定的
                                print("two network detection....")
                                # print("buquding thype:", predict_label)
                                # ssim_ae
                                # mask_img = self.ssim_ae_func(self.ssim_ae_conf, self.ssim_ae_mode, crop_img)
                                # area = self.ssim_ae_calc_coutours_area(mask_img)
                                # if area < 500:
                                #     cls_dict[gf_index] = 'ok'
                                #     print("type ok")
                                # else:
                                #     cls_dict[gf_index] = 'ng'
                                #     print("type ng")
                                
                                # patchcore
                                score = self.patchcore_func(crop_img, self.patchcore_model)
                                if score < 3.5:
                                    cls_dict[gf_index] = 'ok'
                                    print("type ok")
                                else:
                                    cls_dict[gf_index] = 'ng'
                                    print("type ng")
                        else:
                            return None
                    else:
                        return None
                    
            total_category_ids.extend(category_ids)
            total_bboxes.extend(bboxes)
            total_scores.extend(scores)
            total_type[si] = cls_dict

        self.result_dict["category_id"].extend(total_category_ids)
        self.result_dict["bbox"].extend(total_bboxes)
        self.result_dict["score"].extend(total_scores)
        self.result_dict["type"] = total_type

        ext_times = time.time() - t
        return self.result_dict

    def crop_picture(self, im, bbox):
        x_l, y_t, w, h = bbox[:]
        x_r, y_b = x_l + w, y_t + h
        x_l, y_t, x_r, y_b = int(x_l) - 1, int(y_t) - 1, int(x_r), int(y_b)
        y_b = y_b if y_t + 1 > im.shape[0] else y_b + 1
        crop = im[max(0, y_t):max(0, y_b), max(0, x_l):max(0, x_r)]
        return crop 

    def _normalize(self, img, mean, std):
        assert img.dtype != np.uint8
        mean = np.float64(mean.reshape(1, -1))
        stdinv = 1 / np.float64(std.reshape(1, -1))
        cv2.subtract(img, mean, img)
        cv2.multiply(img, stdinv, img)
        return img

    def data_preprocess(self, img):
        img = cv2.resize(img, (240, 80))
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
        mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
        std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
        img = self._normalize(img.astype(np.float32), np.asarray(mean), np.asarray(std))
        img = img.transpose(2, 0, 1)
        img = ms.Tensor(img.reshape((1, 3, 80, 240)), ms.float32)
        return img
    
    def ssim_ae_calc_coutours_area(self, img):
        coutours, hierarchy = cv2.findContours(np.uint8(img), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        area = 0
        for con in coutours:
            area += cv2.contourArea(con)
        return area