import os
import json
import shutil
import argparse
import torch
import numpy as np
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
import cv2  # 添加cv2导入
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms import Resize
from mmdet.registry import MODELS
from mmdet.utils import register_all_modules
from mmdet.structures import DetDataSample
from mmdet.apis import DetInferencer  # 替换推理逻辑

class AutoLabeler:
    """自动标注器，专为Grounding DINO等语言引导检测模型设计"""
    
    def __init__(self, config_path, checkpoint_path, class_names, device='cuda:0', conf_thresh=0.3):
        """初始化自动标注器"""
        # 注册所有模块（必须）
        register_all_modules()
        
        self.class_names = class_names
        self.device = device
        self.conf_thresh = conf_thresh
        self.class_mapping = {name: i for i, name in enumerate(class_names)}
        
        print(f"初始化模型 (device: {device})...")
        # 使用 DetInferencer 初始化推理器
        self.inferencer = DetInferencer(
            model=config_path,
            weights=checkpoint_path,
            device=device
        )
        self.annotation_id = 0
        
        # 设置颜色映射，为每个类别分配一个颜色
        np.random.seed(42)
        self.colors = {i: [int(c) for c in np.random.randint(0, 255, 3)] for i, _ in enumerate(class_names)}
    
    def process_dataset(self, input_dir, output_dir, vis_dir=None, samples_per_dir=50):
        """处理数据集，生成COCO格式标注"""
        # 初始化COCO数据格式
        coco_data = self._init_coco_template()
        image_id = 0
        
        # 创建输出目录
        os.makedirs(output_dir, exist_ok=True)
        
        # 创建可视化输出目录（如果提供）
        if vis_dir:
            os.makedirs(vis_dir, exist_ok=True)
        
        # 处理每个相机方向
        for camera_type in ['front', 'rear', 'left', 'right']:
            camera_dir = os.path.join(input_dir, f'camera_{camera_type}_undistort')
            if not os.path.exists(camera_dir):
                print(f"相机目录不存在: {camera_dir}")
                continue
                
            # 创建输出目录
            target_dir = os.path.join(output_dir, camera_type, "images")
            os.makedirs(target_dir, exist_ok=True)
            
            # 如果有可视化目录，创建相机子目录
            if vis_dir:
                vis_camera_dir = os.path.join(vis_dir, camera_type)
                os.makedirs(vis_camera_dir, exist_ok=True)
            
            # 获取图像路径并采样
            image_files = [os.path.join(camera_dir, f) for f in os.listdir(camera_dir) if f.endswith('.png')]
            if not image_files:
                continue
                
            sample_size = min(samples_per_dir, len(image_files))
            sampled_files = np.random.choice(image_files, sample_size, replace=False).tolist()
            
            # 处理每张图像
            for img_path in tqdm(sampled_files, desc=f"处理{camera_type}相机图像"):
                try:
                    # 复制图像到目标目录
                    img_filename = os.path.basename(img_path)
                    target_path = os.path.join(target_dir, img_filename)
                    shutil.copy(img_path, target_path)
                    
                    # 获取图像尺寸
                    from PIL import Image
                    img = Image.open(img_path)
                    img_width, img_height = img.size
                    
                    # 添加图像信息到COCO数据
                    coco_data['images'].append({
                        'id': image_id,
                        'file_name': f"{camera_type}/images/{img_filename}",
                        'width': img_width,
                        'height': img_height,
                        'camera_type': camera_type
                    })
                    
                    # 执行推理
                    result = self._inference_single(img_path, image_id)
                    
                    # 解析结果并添加标注
                    self._add_annotations(result, image_id, coco_data)
                    
                    # 如果提供了可视化目录，生成可视化结果
                    if vis_dir and hasattr(result, 'pred_instances'):
                        # 读取原始图像用于可视化
                        vis_img = cv2.imread(img_path)
                        # 在图像上绘制检测结果
                        vis_img = self._draw_detection_results(vis_img, result)
                        # 保存可视化结果
                        vis_path = os.path.join(vis_camera_dir, img_filename)
                        cv2.imwrite(vis_path, vis_img)
                    
                    # 更新图像ID
                    image_id += 1
                    
                except Exception as e:
                    print(f"处理图像时出错: {e}")
                    continue
        
        # 保存COCO格式数据
        annotations_file = os.path.join(output_dir, 'annotations.json')
        print(f"保存标注文件到 {annotations_file}")
        with open(annotations_file, 'w') as f:
            json.dump(coco_data, f, indent=4)
        
        print(f"处理完成，共标注 {len(coco_data['images'])} 张图像，{len(coco_data['annotations'])} 个目标")
        if vis_dir:
            print(f"可视化结果已保存到: {vis_dir}")

    def _inference_single(self, img_path, img_id=0):
        """使用 DetInferencer 进行推理"""
        try:
            # 构造文本提示
            text_prompt = " . ".join(self.class_names) + " ."
            
            # 调用 inferencer 进行推理，传递 pred_score_thr 参数
            print(f"执行推理，使用置信度阈值: {self.conf_thresh}")
            results = self.inferencer(
                img_path, 
                return_datasamples=True, 
                texts=text_prompt,
                custom_entities=True,
                # 注意：DetInferencer虽然接受pred_score_thr参数，但内部不一定会过滤结果
                # 我们需要在后续处理中手动过滤
                pred_score_thr=self.conf_thresh  # 在调用时传递置信度阈值
            )
            
            # 正确获取预测结果 - DetInferencer返回的是字典，需要通过'predictions'键访问
            if 'predictions' in results and len(results['predictions']) > 0:
                result = results['predictions'][0]  # 获取第一个预测结果
                
                # 输出检测到的目标数量及其置信度，便于调试
                if hasattr(result, 'pred_instances') and hasattr(result.pred_instances, 'scores'):
                    all_scores = result.pred_instances.scores.cpu().numpy()
                    high_conf = (all_scores >= self.conf_thresh).sum()
                    print(f"检测到 {len(all_scores)} 个目标，其中 {high_conf} 个置信度≥{self.conf_thresh}")
                
                return result
            else:
                print(f"未获取到有效的推理结果")
                return DetDataSample()
        except Exception as e:
            print(f"推理过程中出错: {e}")
            import traceback
            traceback.print_exc()
            return DetDataSample()

    def _add_annotations(self, result, image_id, coco_data):
        """添加检测结果到COCO数据"""
        if not hasattr(result, 'pred_instances'):
            return
            
        pred_instances = result.pred_instances
        
        # 获取预测信息
        scores = pred_instances.scores.cpu().numpy()
        labels = pred_instances.labels.cpu().numpy()
        bboxes = pred_instances.bboxes.cpu().numpy()
        
        # 添加标注，只添加置信度高于阈值的结果
        for label, score, bbox in zip(labels, scores, bboxes):
            # 如果置信度低于阈值，跳过
            if float(score) < self.conf_thresh:
                continue
                
            # 确保标签在范围内
            if int(label) >= len(self.class_names):
                continue
                
            class_name = self.class_names[int(label)]
            class_id = self.class_mapping.get(class_name)
            
            # 转换为COCO格式[x, y, width, height]
            x1, y1, x2, y2 = bbox
            width = max(0, x2 - x1)
            height = max(0, y2 - y1)
            
            # 添加标注信息
            coco_data['annotations'].append({
                'id': self.annotation_id,
                'image_id': image_id,
                'category_id': class_id,
                'bbox': [float(x1), float(y1), float(width), float(height)],
                'area': float(width * height),
                'segmentation': [],
                'iscrowd': 0,
                'score': float(score)
            })
            self.annotation_id += 1
            
    def _init_coco_template(self):
        """初始化COCO格式数据模板"""
        return {
            'info': {
                'description': 'MM Grounding DINO自动标注数据集',
                'version': '1.0',
                'year': datetime.now().year,
                'contributor': 'Auto-Labeling Script',
                'date_created': datetime.now().strftime('%Y-%m-%d')
            },
            'licenses': [{'id': 1, 'name': 'Unknown', 'url': ''}],
            'images': [],
            'annotations': [],
            'categories': [
                {'id': id, 'name': name, 'supercategory': 'none'} 
                for id, name in enumerate(self.class_names)
            ]
        }

    def _draw_detection_results(self, image, result):
        """在图像上绘制检测结果"""
        if not hasattr(result, 'pred_instances'):
            return image
            
        pred_instances = result.pred_instances
        
        # 获取预测信息
        scores = pred_instances.scores.cpu().numpy()
        labels = pred_instances.labels.cpu().numpy()
        bboxes = pred_instances.bboxes.cpu().numpy()
        
        # 在图像上绘制每个检测框
        for label, score, bbox in zip(labels, scores, bboxes):
            # 如果置信度低于阈值，跳过
            if float(score) < self.conf_thresh:
                continue
                
            # 确保标签在范围内
            if int(label) >= len(self.class_names):
                continue
                
            class_name = self.class_names[int(label)]
            color = self.colors[int(label)]
            
            # 转换为整数坐标
            x1, y1, x2, y2 = [int(coord) for coord in bbox]
            
            # 绘制边界框
            cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
            
            # 准备标签文本
            text = f"{class_name}: {score:.2f}"
            
            # 获取文本大小
            (text_width, text_height), _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
            
            # 绘制文本背景
            cv2.rectangle(image, (x1, y1-text_height-5), (x1+text_width, y1), color, -1)
            
            # 绘制文本
            cv2.putText(image, text, (x1, y1-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            
        return image

def main():
    parser = argparse.ArgumentParser(description='自动标注图像并转换为COCO格式')
    parser.add_argument('--source', type=str, default='/home/jetson/datasets/camera/rosbag2_2025_01_24-15_06_06_content', help='数据集路径')
    parser.add_argument('--output', type=str, default='/home/jetson/datasets/convert', help='输出路径')
    parser.add_argument('--vis-dir', type=str, default='./output', help='可视化结果保存路径，不设置则不生成可视化')
    parser.add_argument('--config', type=str, default='./configs/mm_grounding_dino/grounding_dino_swin-l_pretrain_all.py', help='配置文件')
    parser.add_argument('--weights', type=str, default='./weights/grounding_dino_swin-l_pretrain_all-56d69e78.pth', help='权重文件')
    parser.add_argument('--conf-thresh', type=float, default=0.3, help='置信度阈值')
    parser.add_argument('--samples', type=int, default=50, help='每个方向采样的图像数量')
    parser.add_argument('--classes', type=str, default=None, help='类别列表，用逗号分隔，例如"desk,chair,person"')
    parser.add_argument('--device', type=str, default='cuda:0', help='设备，例如cuda:0')
    parser.add_argument('--test-image', type=str, default=None, help='测试单张图片推理')
    parser.add_argument('--no-vis', action='store_true', help='不生成可视化结果')
    args = parser.parse_args()
    
    # 设置源目录和输出目录
    source_dir = os.path.expanduser(args.source)
    output_dir = os.path.expanduser(args.output)
    
    # 设置可视化目录
    vis_dir = None if args.no_vis else os.path.expanduser(args.vis_dir)
    
    # 处理类别
    DEFAULT_CLASSES = [
        'desk', 'chair', 'table', 'person', 'trash bin', 
        'water dispenser', 'shelf', 'bed', 'robot', 'sofa', 
        'small vehicle', 'trolley', 'box'
    ]
    
    class_list = args.classes.split(',') if args.classes else DEFAULT_CLASSES
    
    # 检查文件
    if not os.path.exists(args.config) or not os.path.exists(args.weights):
        print(f"错误: 配置文件或权重文件不存在")
        return
    
    # 优化CUDA设置
    if 'cuda' in args.device and torch.cuda.is_available():
        # Orin Nano优化
        torch.backends.cudnn.benchmark = True
        torch.backends.cuda.matmul.allow_tf32 = True
        torch.backends.cudnn.allow_tf32 = True
    
    # 初始化标注器
    print(f"初始化自动标注器，使用设备: {args.device}")
    print(f"类别列表: {class_list}")
    
    try:
        # 创建标注器
        labeler = AutoLabeler(
            config_path=args.config,
            checkpoint_path=args.weights,
            class_names=class_list,
            device=args.device,
            conf_thresh=args.conf_thresh
        )
        
        # 测试单张图片或处理数据集
        if args.test_image and os.path.exists(args.test_image):
            result = labeler._inference_single(args.test_image)
            
            # 简单打印结果
            if hasattr(result, 'pred_instances'):
                scores = result.pred_instances.scores.cpu().numpy()
                labels = result.pred_instances.labels.cpu().numpy()
                
                print(f"检测到 {len(scores)} 个目标:")
                for i, (label, score) in enumerate(zip(labels, scores)):
                    class_name = class_list[int(label)] if int(label) < len(class_list) else f"未知({label})"
                    print(f"  {i+1}: {class_name}, 置信度: {score:.4f}")
                
                # 如果有可视化目录，保存可视化结果
                # if vis_dir:
                #     os.makedirs(vis_dir, exist_ok=True)
                #     img = cv2.imread(args.test_image)
                #     vis_img = labeler._draw_detection_results(img, result)
                #     vis_path = os.path.join(vis_dir, os.path.basename(args.test_image))
                #     cv2.imwrite(vis_path, vis_img)
                #     print(f"可视化结果已保存到: {vis_path}")
        else:
            # 处理数据集
            labeler.process_dataset(
                input_dir=source_dir,
                output_dir=output_dir,
                vis_dir=vis_dir,
                samples_per_dir=args.samples
            )
            
    except Exception as e:
        print(f"处理过程中出错: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()
