from typing import *
import numpy as np
from mmdeploy_runtime import TextDetector
from mmdeploy_runtime import TextRecognizer

class MMTextDetector(object):
    """
    OCR的检测器, 与Detector类似, 区别在于返回的是可旋转的检测框
    """
    def __init__(self, 
                 model_dir,
                 keys,
                 device="cuda:0") -> None:
        self.keys = keys
        if ":" in device:
            device_name, device_id = device.split(":")
            device_id = int(device_id)
        else:
            device_name = device
            device_id = 0
        self.detector = TextDetector(model_path=model_dir, device_name=device_name, device_id=device_id)
    
    def __call__(self, data) -> Dict:
        """
        input:
            data: 字典格式或者np.ndarray格式的数据
                注意输入的图片数据格式为[NHWC]
        """
        input_tensor = data[self.keys["in"]]
        dets = self.detector.batch(input_tensor)
        data[self.keys["out"]] = dets
        return data

class SubMMTextDetector(object):
    """
    子任务版本的OCR的检测器, 与MMTextDetector类似, 区别在于可以作为二级子任务接收CropTarget或者FilterTarget的输入
    """
    def __init__(self, 
                 model_dir,
                 keys,
                 device="cuda:0") -> None:
        self.keys = keys
        if ":" in device:
            device_name, device_id = device.split(":")
            device_id = int(device_id)
        else:
            device_name = device
            device_id = 0
        self.detector = TextDetector(model_path=model_dir, device_name=device_name, device_id=device_id)
    
    def __call__(self, data) -> Dict:
        """
        input:
            data: 字典格式或者np.ndarray格式的数据
                注意输入的图片数据格式为[NHWC]
        """
        batch_data = data[self.keys["in"]]
        ocr_boxes = []
        for data_ in batch_data:
            ocr_boxes_ = []
            for img_data, img_box in zip(data_["imgs"],data_["boxes"]):
                start_point = np.array([item for _ in range(4) for item in img_box[:2]]+[0])
                ocr_boxes_np = self.detector(img_data)
                ocr_boxes_np = ocr_boxes_np + start_point
                ocr_boxes_.extend(ocr_boxes_np.tolist())
            ocr_boxes.append(ocr_boxes_)
        data[self.keys["out"]] = ocr_boxes
        return data


class MMTextRecognizer(object):
    """
    OCR的文字识别器
    """
    def __init__(self, 
                 model_dir,
                 keys,
                 device="cuda:0") -> None:
        self.keys = keys
        assert "det" in self.keys, "the keyworld 'det' must be included"
        if ":" in device:
            device_name, device_id = device.split(":")
            device_id = int(device_id)
        else:
            device_name = device
            device_id = 0
        self.detector = TextRecognizer(model_path=model_dir, device_name=device_name, device_id=device_id)
    
    def __call__(self, data) -> Dict:
        """
        input:
            data: 字典格式或者np.ndarray格式的数据
                注意输入的图片数据格式为[NHWC]
        """
        input_tensor = data[self.keys["img_data"]]
        batch_dets = data[self.keys["det"]]
        result = []
        for img, dets in zip(input_tensor, batch_dets):
            dets = np.array(dets)
            bboxes = dets.flatten().tolist()
            recogn = self.detector(img,bboxes)
            result.append(recogn)
        
        data[self.keys["out"]] = result
        return data


if __name__ == "__main__":
    def test():
        import cv2
        from glob import glob
        import numpy as np
        file_list = [
            "data/input/ocr/demo_text_ocr.jpg",
            "data/input/ocr/demo_text_ocr.jpg",
        ]
        detector = MMTextDetector(
            model_dir="projects/shipping_lane/weights/dbnet_resnet18",
            keys={
                "in":"input_data",
                "out":"detect",
            },
            device="cuda:0"
        )
        recognizer = MMTextRecognizer(
            model_dir= "projects/shipping_lane/weights/crnn_mini-vgg_5e_mj",
            device="cuda:0",
            keys={
                "in":"input_data",
                "out":"recogizer",
                "det":"detect",
            }
        )
        
        img_datas = []
        for path in file_list[:4]:
            img_datas.append(cv2.resize(cv2.imread(path),(1280,720))[None,...])
        
        img_data = np.concatenate(img_datas)
        data = {
            "input_data": img_data,

        }
        data = detector(data)
        data = recognizer(data)
        for img, dets, recogn in zip(img_datas, data["detect"],data["recogizer"]):
            img = img[0]
            scores = dets[:,8]
            min_score = min(scores)
            print(min_score)
            pts = dets[:,:8].reshape([dets.shape[0],-1,2]).astype(np.int64)
            cv2.polylines(img, pts, True, (0,255,0), 2)
            cv2.imwrite("data/output/temp.jpg",img)
            break
            # print(batch_dets)
            # print(batch_recogn)
    test()
