import torch
import torchvision
from torchvision.models.detection import RetinaNet
from torchvision.models.detection.retinanet import RetinaNet_ResNet50_FPN_Weights
from torchvision.transforms import functional as F
from torch.utils.data import DataLoader, Dataset
import os
from PIL import Image
import numpy as np
import random
import json # 用于加载潜在的标注格式
from collections import defaultdict # 用于组织标注
from torchvision.models import ResNet50_Weights

# --- 配置 ---
NUM_CLASSES = 40  # 您的40个垃圾类别 + 1个背景类别
BATCH_SIZE = 4
NUM_EPOCHS = 20
LEARNING_RATE = 0.0001
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0005
IMAGE_SIZE = (600, 600) # RetinaNet通常使用更大的图片，根据需要调整

# 路径
# 重要：请将这些路径替换为您的实际路径
DATA_ROOT = 'datasets_coco/images' # 例如, 'data/garbage_dataset/images'
ANNOTATIONS_FILE = 'datasets_coco/annotations/instances_train.json' # 例如, 'data/garbage_dataset/annotations.json'
MODEL_SAVE_PATH = 'retinanet_garbage_detector.pth'

# 检查GPU
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {DEVICE}")

# --- 自定义数据集类 ---
class GarbageDataset(Dataset):
    def __init__(self, root, annotations, transforms=None):
        self.root = root
        self.transforms = transforms

        # 加载和解析标注
        # 这里假设您的标注是字典列表，例如：
        # {
        #   "image_id": "image_0001.jpg",
        #   "annotations": [
        #     {"bbox": [x1, y1, x2, y2], "category_id": class_id},
        #     ...
        #   ]
        # }
        # 如果您有COCO-like的JSON，您将以不同的方式解析它。
        # 为了简单起见，我们假设一个预处理的字典列表：
        # [{
        #   'image_path': 'path/to/image.jpg',
        #   'boxes': [[x1,y1,x2,y2], ...],
        #   'labels': [class_id, ...]
        # }, ...]
        
        # 示例虚拟标注结构（请替换为您的实际加载逻辑）
        self.img_data = []
        if os.path.exists(annotations):
            with open(annotations, 'r') as f:
                data = json.load(f)
            
            # 解析COCO格式标注
            # COCO格式包含"images"和"annotations"两个主要字段
            # "images": [{"id": int, "file_name": str, ...}, ...]
            # "annotations": [{"image_id": int, "bbox": [x, y, w, h], "category_id": int, ...}, ...]
            images = {img['id']: img for img in data['images']}
            img_to_anns = defaultdict(list)
            for ann in data['annotations']:
                img_to_anns[ann['image_id']].append(ann)
            for img_id, img_info in images.items():
                image_full_path = os.path.join(self.root, img_info['file_name'])
                anns = img_to_anns[img_id]
                boxes = []
                labels = []
                for ann in anns:
                    # COCO bbox: [x, y, width, height] -> [x1, y1, x2, y2]
                    x1, y1, w, h = ann['bbox']
                    x2 = x1 + w
                    y2 = y1 + h
                    boxes.append([x1, y1, x2, y2])
                    labels.append(ann['category_id'])
                if boxes:  # 只添加有目标的图片
                    self.img_data.append({
                        'image_path': image_full_path,
                        'boxes': torch.tensor(boxes, dtype=torch.float32),
                        'labels': torch.tensor(labels, dtype=torch.int64)
                    })
        
        else:
            print(f"未找到标注文件：{annotations}。请创建或指定正确的路径。")
            print("正在生成虚拟数据用于演示。")
            # 如果未找到标注文件，则生成一些虚拟数据
            for i in range(3000):
                img_filename = f"dummy_image_{i:04d}.jpg"
                image_full_path = os.path.join(self.root, img_filename)
                
                # 为测试创建虚拟图片文件
                if not os.path.exists(image_full_path):
                    dummy_img = Image.new('RGB', IMAGE_SIZE, color = (i%255, (i+50)%255, (i+100)%255))
                    dummy_img.save(image_full_path)

                num_objects = random.randint(1, 5)
                boxes = []
                labels = []
                for _ in range(num_objects):
                    x1 = random.randint(0, IMAGE_SIZE[0] - 50)
                    y1 = random.randint(0, IMAGE_SIZE[1] - 50)
                    x2 = random.randint(x1 + 10, IMAGE_SIZE[0])
                    y2 = random.randint(y1 + 10, IMAGE_SIZE[1])
                    boxes.append([x1, y1, x2, y2])
                    labels.append(random.randint(0, NUM_CLASSES - 1)) # 标签从0开始索引

                self.img_data.append({
                    'image_path': image_full_path,
                    'boxes': torch.tensor(boxes, dtype=torch.float32),
                    'labels': torch.tensor(labels, dtype=torch.int64)
                })
        
        if not self.img_data:
            raise ValueError("未加载任何数据。请确保您的标注文件正确且可访问。")

    def __getitem__(self, idx):
        item = self.img_data[idx]
        img_path = item['image_path']
        image = Image.open(img_path).convert("RGB")

        boxes = item['boxes'].clone()
        labels = item['labels'].clone()

        # 确保boxes不为空（如果过滤掉没有对象的图片可能会发生）
        if boxes.numel() == 0:
            # 如果没有对象，创建一个虚拟框和标签（可能会导致训练问题）
            # 最好在数据集创建时过滤掉没有对象的图片
            boxes = torch.zeros((0, 4), dtype=torch.float32)
            labels = torch.zeros((0,), dtype=torch.int64)


        target = {}
        target["boxes"] = boxes
        target["labels"] = labels
        # target["image_id"] = torch.tensor([idx]) # 对于RetinaNet训练来说，不一定必要
        # target["area"] = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
        # target["iscrowd"] = torch.zeros((len(boxes),), dtype=torch.int64)

        if self.transforms:
            image, target = self.transforms(image, target)
            
        return image, target

    def __len__(self):
        return len(self.img_data)

class ToTensor(object):
    def __call__(self, image, target):
        return F.to_tensor(image), target

class Normalize(object):
    def __init__(self, mean, std):
        self.mean = mean
        self.std = std
    def __call__(self, image, target):
        image = F.normalize(image, mean=self.mean, std=self.std)
        return image, target

# --- 变换 ---
def get_transform(train):
    transforms = []
    transforms.append(ToTensor())
    # ImageNet均值/标准差
    transforms.append(Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
    if train:
        transforms.append(RandomHorizontalFlip(0.5))
    return Compose(transforms)

class Compose(object):
    def __init__(self, transforms):
        self.transforms = transforms

    def __call__(self, image, target):
        for t in self.transforms:
            image, target = t(image, target)
        return image, target

class RandomHorizontalFlip(object):
    def __init__(self, prob):
        self.prob = prob

    def __call__(self, image, target):
        if random.random() < self.prob:
            _, width = image.shape[-2:]
            image = image.flip(-1)
            bbox = target["boxes"].clone()
            x1 = width - bbox[:, 2]
            x2 = width - bbox[:, 0]
            bbox[:, 0] = x1
            bbox[:, 2] = x2
            target["boxes"] = bbox
        return image, target

# --- DataLoader的Collate函数 ---
def collate_fn(batch):
    return tuple(zip(*batch))

# --- 模型定义 ---
def get_retinanet_model(num_classes):
    # 只加载backbone的预训练权重，避免num_classes冲突
    if num_classes == 91:
        model = torchvision.models.detection.retinanet_resnet50_fpn(
            weights=RetinaNet_ResNet50_FPN_Weights.COCO_V1,
            num_classes=num_classes
        )
    else:
        model = torchvision.models.detection.retinanet_resnet50_fpn(
            weights=None,
            weights_backbone=ResNet50_Weights.IMAGENET1K_V1,
            num_classes=num_classes
        )
    return model

# --- 训练函数 ---
def train_one_epoch(model, optimizer, data_loader, device, epoch):
    model.train()
    running_loss = 0.0
    for i, (images, targets) in enumerate(data_loader):
        images = list(image.to(device) for image in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        loss_dict = model(images, targets)
        losses = sum(loss for loss in loss_dict.values())
        
        optimizer.zero_grad()
        losses.backward()
        optimizer.step()

        running_loss += losses.item()
        if i % 10 == 9:    # 每10个批次打印一次
            print(f"Epoch {epoch}, Batch {i+1}, Loss: {running_loss / 10:.4f}")
            running_loss = 0.0

# --- 主训练脚本 ---
def main():
    # 1. 准备数据
    if not os.path.exists(DATA_ROOT):
        os.makedirs(DATA_ROOT)
        print(f"已创建数据根目录: {DATA_ROOT}。请将您的图片放入此处。")


    dataset = GarbageDataset(DATA_ROOT, ANNOTATIONS_FILE, get_transform(train=True))
    data_loader = DataLoader(
        dataset,
        batch_size=BATCH_SIZE,
        shuffle=True,
        num_workers=4, # 建议4，避免多进程问题
        collate_fn=collate_fn
    )

    print(f"数据集大小: {len(dataset)} 张图片")

    # 2. 加载模型
    # NUM_CLASSES是40个类别 + 1个背景类别
    model = get_retinanet_model(NUM_CLASSES + 1) # 加1用于背景类别
    model.to(DEVICE)

    # 3. 定义优化器和学习率调度器
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params, lr=LEARNING_RATE,
                                momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
    
    # Cosine Annealing 学习率调度器
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=NUM_EPOCHS)

    # 4. 训练循环
    print("开始训练...")
    for epoch in range(NUM_EPOCHS):
        train_one_epoch(model, optimizer, data_loader, DEVICE, epoch)
        lr_scheduler.step()
        print(f"Epoch {epoch} 完成。当前学习率: {optimizer.param_groups[0]['lr']}")
        
        # 可选：定期保存模型检查点
        if (epoch + 1) % 5 == 0:
            torch.save(model.state_dict(), f"retinanet_epoch_{epoch+1}.pth")
            print(f"已将模型检查点保存到 retinanet_epoch_{epoch+1}.pth")

    # 5. 保存训练好的模型
    torch.save(model.state_dict(), MODEL_SAVE_PATH)
    print(f"训练完成。模型已保存到 {MODEL_SAVE_PATH}")

    # --- 简单的推断示例（训练后） ---
    print("\n--- 运行一个简单的推断示例 ---")
    model.eval() # 将模型设置为评估模式
    
    # 从数据集中获取一张图片进行推断
    # 您可能需要一个单独的测试数据集来进行适当的评估
    
    # 尽可能获取一张有实际数据的图片
    example_image, example_target = None, None
    for img, target in dataset:
        if target["boxes"].numel() > 0: # 确保它有对象
            example_image, example_target = img, target
            break
    
    if example_image is None:
        print("未找到包含对象的图片用于推断演示。")
        example_image = F.to_tensor(Image.new('RGB', IMAGE_SIZE, color = (100, 150, 200)))
        example_target = {"boxes": torch.tensor([[50., 50., 150., 150.]], dtype=torch.float32), "labels": torch.tensor([0], dtype=torch.int64)}

    with torch.no_grad():
        # 添加批次维度并移动到设备
        input_image = example_image.unsqueeze(0).to(DEVICE)
        
        # 执行推断
        predictions = model(input_image)

        # 处理预测结果
        # predictions是一个字典列表，批次中的每张图片一个字典
        # 对于单张图片，predictions[0]包含：
        # 'boxes': (N, 4) 张量，边界框格式为 [x1, y1, x2, y2]
        # 'labels': (N,) 张量，预测的类别标签
        # 'scores': (N,) 张量，置信度分数
        
        output_boxes = predictions[0]['boxes'].cpu().numpy()
        output_labels = predictions[0]['labels'].cpu().numpy()
        output_scores = predictions[0]['scores'].cpu().numpy()
        if len(output_boxes) == 0:
            print("未检测到任何对象。")
        else:
            print(f"检测到 {len(output_boxes)} 个对象:")
            for i in range(len(output_boxes)):
                box = output_boxes[i]
                label = output_labels[i]
                score = output_scores[i]
                if score > 0.5:
                    print(f"  框: [{box[0]:.2f}, {box[1]:.2f}, {box[2]:.2f}, {box[3]:.2f}], 标签: {label}, 分数: {score:.4f}")
        print("\n注意: 对于正确的评估（mAP），您将需要使用`torchmetrics`或`pycocotools`。")


if __name__ == "__main__":
    main()