import argparse
import os
from glob import glob
from datetime import datetime
import random
import io
import time

import cv2
import numpy as np
import torch
from PIL import Image

from damo.base_models.core.ops import RepConv
from damo.config.base import parse_config
from damo.detectors.detector import build_local_model
from damo.utils import vis, postprocess
from damo.utils.demo_utils import transform_img
from damo.structures.image_list import ImageList
from damo.structures.bounding_box import BoxList


class DAMOYOLOPredictor:
    def __init__(self, config_file, engine_path, infer_size=[640, 640],
                 device='cuda', output_dir='./extract_records', end2end=False):
        """
        初始化DAMO-YOLO预测器

        参数:
            config_file: 配置文件路径
            engine_path: 模型权重路径
            infer_size: 推理尺寸 [width, height]
            device: 使用的设备 'cuda' 或 'cpu'
            output_dir: 输出目录
            end2end: 是否使用端到端的TensorRT引擎
        """
        self.config = parse_config(config_file)
        self.engine_path = engine_path
        self.infer_size = infer_size
        self.device = device
        self.output_dir = output_dir
        self.end2end = end2end

        # 创建输出目录
        os.makedirs(self.output_dir, exist_ok=True)

        # 初始化推理引擎
        self.infer_engine = Infer(
            self.config,
            infer_size=self.infer_size,
            device=self.device,
            output_dir=self.output_dir,
            ckpt=self.engine_path,
            end2end=self.end2end
        )

    def _generate_random_filename(self, extension='.jpg'):
        """生成包含日期时间的随机文件名"""
        now = datetime.now().strftime("%Y%m%d_%H%M%S")
        random_str = ''.join(random.choices('abcdefghijklmnopqrstuvwxyz0123456789', k=6))
        return f"pred_{now}_{random_str}{extension}"

    def predict(self, image_input, conf_threshold=0.6, save_result=False,
                show_result=False, filename=None):
        """
        通用预测方法，支持文件路径或文件流

        参数:
            image_input: 图片路径(PATH)或文件流(BytesIO)
            conf_threshold: 置信度阈值
            save_result: 是否保存可视化结果
            show_result: 是否显示可视化结果
            filename: 保存结果时的文件名(仅当save_result=True时有效)

        返回:
            dict: 包含预测结果和可视化结果的字典
        """
        start_time = time.time()

        # 处理输入是文件路径还是文件流
        if isinstance(image_input, (str, os.PathLike)):
            # 文件路径
            origin_img = np.asarray(Image.open(image_input).convert('RGB'))
            if filename is None:
                filename = os.path.basename(image_input)
        elif isinstance(image_input, io.BytesIO):
            # 文件流
            origin_img = np.asarray(Image.open(image_input).convert('RGB'))
            if filename is None:
                filename = self._generate_random_filename()
        else:
            raise ValueError("image_input must be either a file path or BytesIO object")

        # 进行预测
        bboxes, scores, cls_inds = self.infer_engine.forward(origin_img)

        # 根据置信度过滤结果
        keep = scores >= conf_threshold
        bboxes = bboxes[keep]
        scores = scores[keep]
        cls_inds = cls_inds[keep]

        # 将Tensor转换为NumPy数组（安全方式）
        def tensor_to_numpy(tensor):
            if torch.is_tensor(tensor):
                return tensor.detach().cpu().numpy()
            return tensor

        bboxes = tensor_to_numpy(bboxes)
        scores = tensor_to_numpy(scores)
        cls_inds = tensor_to_numpy(cls_inds)

        # 初始化返回结果
        result = {
            'bboxes': bboxes,
            'scores': scores,
            'cls_inds': cls_inds,
            'filename': filename,
            'elapsed_time': time.time() - start_time
        }

        # 仅在需要时生成可视化结果
        if save_result or show_result:
            vis_res = self.infer_engine.visualize(
                origin_img, bboxes, scores, cls_inds,
                conf=conf_threshold,
                save_name=filename if save_result else None,
                save_result=save_result
            )

            result['vis_res'] = vis_res

            # 显示结果
            if show_result:
                cv2.namedWindow("DAMO-YOLO", cv2.WINDOW_NORMAL)
                cv2.imshow("DAMO-YOLO", vis_res)
                cv2.waitKey(0)

        print(f"预测完成，耗时: {result['elapsed_time']:.3f}秒")
        return result

    def predict_image(self, image_path, conf_threshold=0.6, save_result=False, show_result=False):
        """
        对单张图片进行预测 (兼容旧方法)

        参数:
            image_path: 图片路径
            conf_threshold: 置信度阈值
            save_result: 是否保存可视化结果
            show_result: 是否显示可视化结果

        返回:
            dict: 包含预测结果和可视化结果的字典
        """
        return self.predict(
            image_path,
            conf_threshold=conf_threshold,
            save_result=save_result,
            show_result=show_result
        )

    def predict_bytes(self, image_bytes, conf_threshold=0.6, filename=None,
                      save_result=False, show_result=False):
        """
        对文件流进行预测

        参数:
            image_bytes: 图片字节流(BytesIO)
            conf_threshold: 置信度阈值
            filename: 保存结果时的文件名
            save_result: 是否保存可视化结果
            show_result: 是否显示可视化结果

        返回:
            dict: 包含预测结果和可视化结果的字典
        """
        return self.predict(
            image_bytes,
            conf_threshold=conf_threshold,
            save_result=save_result,
            show_result=show_result,
            filename=filename
        )

    def predict_folder(self, folder_path, conf_threshold=0.6,
                       extensions=['.jpg', '.png', '.jpeg'],
                       save_result=False, show_result=False):
        """
        对文件夹中的所有图片进行预测

        参数:
            folder_path: 文件夹路径
            conf_threshold: 置信度阈值
            extensions: 支持的图片扩展名
            save_result: 是否保存可视化结果
            show_result: 是否显示可视化结果

        返回:
            list: 包含所有图片预测结果的列表
        """
        start_time = time.time()

        # 获取文件夹中所有图片路径
        image_paths = []
        for ext in extensions:
            image_paths.extend(glob(os.path.join(folder_path, f'*{ext}')))

        print(f"开始预测，共发现 {len(image_paths)} 张图片")

        results = []
        success_count = 0
        for idx, image_path in enumerate(image_paths, 1):
            try:
                # 对每张图片进行预测
                result = self.predict(
                    image_path,
                    conf_threshold=conf_threshold,
                    save_result=save_result,
                    show_result=show_result
                )
                results.append(result)
                success_count += 1
                print(f"进度: {idx}/{len(image_paths)} - {image_path}")
            except Exception as e:
                print(f"处理失败 {image_path}: {str(e)}")

        # 计算总耗时
        total_time = time.time() - start_time
        avg_time = total_time / success_count if success_count > 0 else 0

        print("\n预测完成:")
        print(f"总图片数量: {len(image_paths)}")
        print(f"成功预测数量: {success_count}")
        print(f"失败数量: {len(image_paths) - success_count}")
        print(f"总耗时: {total_time:.2f}秒")
        print(f"平均每张图片耗时: {avg_time:.3f}秒")

        return results

# 保留原有的Infer类
class Infer():
    def __init__(self, config, infer_size=[640,640], device='cuda', output_dir='./', ckpt=None, end2end=False):

        self.ckpt_path = ckpt
        suffix = ckpt.split('.')[-1]
        if suffix == 'onnx':
            self.engine_type = 'onnx'
        elif suffix == 'trt':
            self.engine_type = 'tensorRT'
        elif suffix in ['pt', 'pth']:
            self.engine_type = 'torch'
        self.end2end = end2end # only work with tensorRT engine
        self.output_dir = output_dir
        os.makedirs(self.output_dir, exist_ok=True)
        if torch.cuda.is_available() and device=='cuda':
            self.device = 'cuda'
        else:
            self.device = 'cpu'

        if "class_names" in config.dataset:
            self.class_names = config.dataset.class_names
        else:
            self.class_names = []
            for i in range(config.model.head.num_classes):
                self.class_names.append(str(i))
            self.class_names = tuple(self.class_names)

        self.infer_size = infer_size
        config.dataset.size_divisibility = 0
        self.config = config
        self.model = self._build_engine(self.config, self.engine_type)

    def _pad_image(self, img, target_size):
        n, c, h, w = img.shape
        assert n == 1
        assert h<=target_size[0] and w<=target_size[1]
        target_size = [n, c, target_size[0], target_size[1]]
        pad_imgs = torch.zeros(*target_size)
        pad_imgs[:, :c, :h, :w].copy_(img)

        img_sizes = [img.shape[-2:]]
        pad_sizes = [pad_imgs.shape[-2:]]

        return ImageList(pad_imgs, img_sizes, pad_sizes)


    def _build_engine(self, config, engine_type):

        print(f'Inference with {engine_type} engine!')
        if engine_type == 'torch':
            model = build_local_model(config, self.device)
            ckpt = torch.load(self.ckpt_path, map_location=self.device)
            # 训练后的pth文件中没有model，只有state_dict对象
            if 'model' in ckpt:
                model.load_state_dict(ckpt['model'], strict=True)  # 完整 checkpoint
            else:
                model.load_state_dict(ckpt['state_dict'], strict=True)  # 纯 state_dict
            # model.load_state_dict(ckpt['model'], strict=False)
            for layer in model.modules():
                if isinstance(layer, RepConv):
                    layer.switch_to_deploy()
            model.eval()
        elif engine_type == 'tensorRT':
            model = self.build_tensorRT_engine(self.ckpt_path)
        elif engine_type == 'onnx':
            model, self.input_name, self.infer_size, _, _ = self.build_onnx_engine(self.ckpt_path)
        else:
            NotImplementedError(f'{engine_type} is not supported yet! Please use one of [onnx, torch, tensorRT]')

        return model

    def build_tensorRT_engine(self, trt_path):

        import tensorrt as trt
        from cuda import cuda
        loggert = trt.Logger(trt.Logger.INFO)
        trt.init_libnvinfer_plugins(loggert, '')
        runtime = trt.Runtime(loggert)
        with open(trt_path, 'rb') as t:
            model = runtime.deserialize_cuda_engine(t.read())
            context = model.create_execution_context()

        allocations = []
        inputs = []
        outputs = []
        for i in range(context.engine.num_bindings):
            is_input = False
            if context.engine.binding_is_input(i):
                is_input = True
            name = context.engine.get_binding_name(i)
            dtype = context.engine.get_binding_dtype(i)
            shape = context.engine.get_binding_shape(i)
            if is_input:
                batch_size = shape[0]
            size = np.dtype(trt.nptype(dtype)).itemsize
            for s in shape:
                size *= s
            allocation = cuda.cuMemAlloc(size)
            binding = {
                'index': i,
                'name': name,
                'dtype': np.dtype(trt.nptype(dtype)),
                'shape': list(shape),
                'allocation': allocation,
                'size': size
            }
            allocations.append(allocation[1])
            if context.engine.binding_is_input(i):
                inputs.append(binding)
            else:
                outputs.append(binding)
        trt_out = []
        for output in outputs:
            trt_out.append(np.zeros(output['shape'], output['dtype']))

        def predict(batch):  # result gets copied into output
            # transfer input data to device
            cuda.cuMemcpyHtoD(inputs[0]['allocation'][1],
                          np.ascontiguousarray(batch), int(inputs[0]['size']))
            # execute model
            context.execute_v2(allocations)
            # transfer predictions back
            for o in range(len(trt_out)):
                cuda.cuMemcpyDtoH(trt_out[o], outputs[o]['allocation'][1],
                              outputs[o]['size'])
            return trt_out

        return predict




    def build_onnx_engine(self, onnx_path):

        import onnxruntime

        session = onnxruntime.InferenceSession(onnx_path)
        input_name = session.get_inputs()[0].name
        input_shape = session.get_inputs()[0].shape

        out_names = []
        out_shapes = []
        for idx in range(len(session.get_outputs())):
            out_names.append(session.get_outputs()[idx].name)
            out_shapes.append(session.get_outputs()[idx].shape)
        return session, input_name, input_shape[2:], out_names, out_shapes



    def preprocess(self, origin_img):

        img = transform_img(origin_img, 0,
                            **self.config.test.augment.transform,
                            infer_size=self.infer_size)
        # img is a image_list
        oh, ow, _  = origin_img.shape
        img = self._pad_image(img.tensors, self.infer_size)

        img = img.to(self.device)
        return img, (ow, oh)

    def postprocess(self, preds, image, origin_shape=None):

        if self.engine_type == 'torch':
            output = preds

        elif self.engine_type == 'onnx':
            scores = torch.Tensor(preds[0])
            bboxes = torch.Tensor(preds[1])
            output = postprocess(scores, bboxes,
                self.config.model.head.num_classes,
                self.config.model.head.nms_conf_thre,
                self.config.model.head.nms_iou_thre,
                image)
        elif self.engine_type == 'tensorRT':
            if self.end2end:
                nums = preds[0]
                boxes = preds[1]
                scores = preds[2]
                pred_classes = preds[3]
                batch_size = boxes.shape[0]
                output = [None for _ in range(batch_size)]
                for i in range(batch_size):
                    img_h, img_w = image.image_sizes[i]
                    boxlist = BoxList(torch.Tensor(boxes[i][:nums[i][0]]),
                              (img_w, img_h),
                              mode='xyxy')
                    boxlist.add_field(
                        'objectness',
                        torch.Tensor(np.ones_like(scores[i][:nums[i][0]])))
                    boxlist.add_field('scores', torch.Tensor(scores[i][:nums[i][0]]))
                    boxlist.add_field('labels',
                              torch.Tensor(pred_classes[i][:nums[i][0]] + 1))
                    output[i] = boxlist
            else:
                cls_scores = torch.Tensor(preds[0])
                bbox_preds = torch.Tensor(preds[1])
                output = postprocess(cls_scores, bbox_preds,
                             self.config.model.head.num_classes,
                             self.config.model.head.nms_conf_thre,
                             self.config.model.head.nms_iou_thre, image)

        output = output[0].resize(origin_shape)
        bboxes = output.bbox
        scores = output.get_field('scores')
        cls_inds = output.get_field('labels')

        return bboxes,  scores, cls_inds


    def forward(self, origin_image):

        image, origin_shape = self.preprocess(origin_image)

        if self.engine_type == 'torch':
            output = self.model(image)

        elif self.engine_type == 'onnx':
            image_np = np.asarray(image.tensors.cpu())
            output = self.model.run(None, {self.input_name: image_np})

        elif self.engine_type == 'tensorRT':
            image_np = np.asarray(image.tensors.cpu()).astype(np.float32)
            output = self.model(image_np)

        bboxes, scores, cls_inds = self.postprocess(output, image, origin_shape=origin_shape)

        return bboxes, scores, cls_inds

    def visualize(self, image, bboxes, scores, cls_inds, conf, save_name='vis.jpg', save_result=True):
        vis_img = vis(image, bboxes, scores, cls_inds, conf, self.class_names)
        if save_result:
            save_path = os.path.join(self.output_dir, save_name)
            print(f"save visualization results at {save_path}")
            cv2.imwrite(save_path, vis_img[:, :, ::-1])
        return vis_img



if __name__ == '__main__':

    # 判断是否可使用GPU
    device="cuda" if torch.cuda.is_available() else "cpu"
    print(f"using {device} device")

    # 使用示例
    predictor = DAMOYOLOPredictor(
        config_file='./configs/seal_two_class_detect_config.py',
        engine_path='./models/seal_two_class_280_ckpt.pth',
        infer_size=[640, 640],
        device=device
    )

    # 单张图片预测
    result = predictor.predict(
        './datasets/1.jpg',
        conf_threshold=0.6,
        save_result=False,
        show_result=False
    )
    # 2. 使用predict_bytes方法预测文件流
    with open('./datasets/1.jpg', 'rb') as f:
        image_bytes = io.BytesIO(f.read())
    result = predictor.predict_bytes(
        image_bytes,
        conf_threshold=0.6,
        filename='custom_name.jpg',  # 可选
        save_result=True
    )

    # 文件夹批量预测
    results = predictor.predict_folder(
        './datasets/seal_test',
        conf_threshold=0.6,
        save_result=True
    )
