
import cv2
import glob
import logging
import os
import PIL.Image
import re
import timeit

from ultralytics import YOLO


logger = logging.getLogger(__name__)


try:
    from cv2 import version
    cv2_version = cv2.version.opencv_version
except ImportError:
    try:
        from cv2 import __version__
        cv2_version = cv2.__version__
    except ImportError:
        raise ImportError('cv2 version not found')

basename_regex = re.compile(r'_(\d+)__(\d+)__(\d+)$')


def batching(gen, size=1):
    batch = []
    for item in gen:
        batch.append(item)
        if len(batch) == size:
            yield batch
            batch = []
    if len(batch) > 0:
        yield batch


def yolo_predict(model_fullname, images: list[str], batch_size: int, **kwargs):
    # PIL.Image.MAX_IMAGE_PIXELS = None

    model = YOLO(model_fullname)
    # pt_info = model.info()

    # {0: 'vehicle', 1: 'ship'}
    print('classes', model.names)

    kwargs = {
        'imgsz': (640, 640),
        'conf': 0.10,
        'half': False,
        # 'classes': [],
        'max_det': 300,
        'iou': 0.45,
        # 类别无关的 NMS
        # 'agnostic_nms': True,

        # 预处理图像
        'augment': True,

        **kwargs,

        # 可视化模型特征
        # 'visualize': True,

        # 'show': True,
        'show_labels': True,
        'show_conf': True,
        'show_boxes': True,

        # 是否保存标注图像
        'save': False,
        # 是否额外保存有标注的裁剪图像
        'save_crop': False,
        # 是否保存标注格式文本
        'save_txt': False,
        # 标注格式文本中是否包含置信度
        'save_conf': True,
    }

    if 'device' not in kwargs:
        kwargs['device'] = '0'

    class Current:
        def __init__(self):
            self.rasterId = None
            self.clses = []
            self.boxes = []
            self.confs = []

    outs = []
    current = Current()

    def close_current():
        nonlocal current

        if current.rasterId is not None and len(current.boxes) > 0:
            if cv2_version >= '4.7.0':
                filtered = cv2.dnn.NMSBoxesBatched(
                    current.boxes,
                    current.confs,
                    current.clses,
                    kwargs['conf'],
                    kwargs['iou'],
                    eta=1.0)
            else:
                filtered = cv2.dnn.NMSBoxes(
                    current.boxes,
                    current.confs,
                    kwargs['conf'],
                    kwargs['iou'],
                    eta=1.0)

            labels = []
            for index in filtered:
                labels.append((current.clses[index], *current.boxes[index], current.confs[index]))

            if len(filtered) != len(current.boxes):
                print('filtered %s / %s' % (len(filtered), len(current.boxes)))
            else:
                print('filtered %s' % (len(current.boxes)))

            outs.append((current.rasterId, labels))

        current = Current()

    for batched in batching(sorted(images), batch_size):
        # print(batched)
        retgen = model.predict(**kwargs,
            source=batched,
            stream=True,
        )

        # ultralytics.engine.results.Results
        for ret in retgen:
            # print(ret.path)

            # speed: {'preprocess': 9.999752044677734, 'inference': 65.41454792022705, 'postprocess': 4.750072956085205}
            # print(ret.speed)

            filename = os.path.basename(ret.path)
            basename, extname = os.path.splitext(filename)

            matches = basename_regex.search(basename)
            if matches:
                rasterId, h_offset, w_offset = matches.groups()
            else:
                rasterId, h_offset, w_offset = basename, 0, 0
            # print(rasterId, h_offset, w_offset)

            if current.rasterId != rasterId:
                close_current()
                current.rasterId = rasterId

            # h, w = ret.orig_shape

            clses = []
            boxes = []
            confs = []
            # Results.save_txt() -> ultralytics.engine.results.Boxes
            for j, d in enumerate(ret.boxes):
                c, conf = int(d.cls), float(d.conf)
                x, y, w, h = list(map(int, d.xywh.view(-1)))
                box = (x + w_offset, y + h_offset, w, h)
                clses.append(c)
                boxes.append(box)
                confs.append(conf)
                # boxes.append(('%g ' * len(line)).rstrip() % line)

            # print('boxes', boxes)
            current.clses.extend(clses)
            current.boxes.extend(boxes)
            current.confs.extend(confs)

    close_current()
    print(outs)


def main(args):
    PIL.Image.MAX_IMAGE_PIXELS = None

    imgs = list(glob.glob(args.glob))
    print('imgs', len(imgs))

    yolo_predict(args.model, imgs, args.batch_size, **{
        'device': args.device,
        'verbose': args.verbose,
    })


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('model', type=str)
    parser.add_argument('glob', type=str)
    parser.add_argument('--batch-size', type=int, default=4)
    parser.add_argument('--device', type=str, default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--verbose', action='store_true')
    args = parser.parse_args()
    timeit.timeit(lambda: main(args), number=1)
