import os
import xml.etree.ElementTree as ET

import numpy as np
import torch
import torch.optim as optim
import torchvision.transforms as T
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision.models.detection import fasterrcnn_resnet50_fpn, FasterRCNN_ResNet50_FPN_Weights
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from tqdm import tqdm


class VOCDataset(Dataset):
    def __init__(self, xml_dir, img_dir):
        self.xml_dir = xml_dir
        self.img_dir = img_dir

        # 获取所有XML文件
        self.xml_files = [os.path.join(xml_dir, f) for f in os.listdir(xml_dir)
                          if f.endswith('.xml')]

        self.class_dict = {'bolt': 1, 'nut': 2}

        # 基础图像转换
        self.transform = T.Compose([
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406],
                        std=[0.229, 0.224, 0.225])
        ])

    def _parse_xml(self, xml_file):
        """解析XML文件并确保边界框在有效范围内"""
        tree = ET.parse(xml_file)
        root = tree.getroot()

        filename = root.find('filename').text
        boxes = []
        labels = []

        # 遍历所有对象
        for obj in root.findall('object'):
            name = obj.find('name').text
            if name not in self.class_dict:
                continue

            bbox = obj.find('bndbox')
            xmin = float(bbox.find('xmin').text)
            ymin = float(bbox.find('ymin').text)
            xmax = float(bbox.find('xmax').text)
            ymax = float(bbox.find('ymax').text)

            # 检查边界框的有效性
            if xmax <= xmin or ymax <= ymin:
                continue

            boxes.append([xmin, ymin, xmax, ymax])
            labels.append(self.class_dict[name])

        return filename, np.array(boxes, dtype=np.float32), np.array(labels, dtype=np.int64)

    def __len__(self):
        return len(self.xml_files)

    def __getitem__(self, idx):
        xml_file = self.xml_files[idx]
        filename, boxes, labels = self._parse_xml(xml_file)

        # 读取图片
        img_path = os.path.join(self.img_dir, filename)
        image = Image.open(img_path).convert('RGB')

        # 转换图像
        image = self.transform(image)

        # 转换标注为tensor
        if len(boxes) == 0:
            boxes = torch.zeros((0, 4), dtype=torch.float32)
            labels = torch.zeros(0, dtype=torch.int64)
        else:
            boxes = torch.as_tensor(boxes, dtype=torch.float32)
            labels = torch.as_tensor(labels, dtype=torch.int64)

        target = {
            'boxes': boxes,
            'labels': labels,
            'image_id': torch.tensor([idx])
        }

        return image, target


def get_model(num_classes):
    """加载预训练的Faster R-CNN模型并修改分类器"""
    model = fasterrcnn_resnet50_fpn(weights=FasterRCNN_ResNet50_FPN_Weights.DEFAULT)
    # 不懂这个调用的作用
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
    return model


def train_one_epoch(model, optimizer, data_loader, device):
    """训练一个epoch"""
    model.train()
    total_loss = 0

    pbar = tqdm(data_loader, desc='Training')

    for images, targets in pbar:
        # 将数据移到GPU
        images = list(image.to(device) for image in images)  # 可以转换成(batch,channel,w,h)后to(device)也可
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        # 前向传播
        loss_dict = model(images, targets)
        losses = sum(loss for loss in loss_dict.values())

        # 反向传播
        optimizer.zero_grad()
        losses.backward()
        optimizer.step()

        # 更新进度条
        total_loss += losses.item()
        pbar.set_postfix({'loss': losses.item()})

    return total_loss / len(data_loader)


def collate_fn(batch):
    """自定义批处理函数"""
    return tuple(zip(*batch))


def main():
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")

    # 设置数据集路径
    XML_DIR = "annotations_voc"
    IMG_DIR = "lslm/lslm"  # 替换为你的图片目录

    # 创建数据集和数据加载器
    dataset = VOCDataset(xml_dir=XML_DIR, img_dir=IMG_DIR)

    data_loader = DataLoader(
        dataset,
        batch_size=4,
        shuffle=True,
        num_workers=4,
        collate_fn=collate_fn
    )

    # 创建模型
    num_classes = 3  # 背景 + bolt + nut
    model = get_model(num_classes)
    model.to(device)

    # 设置优化器
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)

    # 设置学习率调度器
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=3,
                                                   gamma=0.1)

    # 训练参数
    num_epochs = 10
    best_loss = float('inf')

    # 创建保存模型的目录
    save_dir = 'model_checkpoints'
    os.makedirs(save_dir, exist_ok=True)

    # 训练循环
    for epoch in range(num_epochs):
        print(f"\nEpoch {epoch + 1}/{num_epochs}")

        # 训练一个epoch
        train_loss = train_one_epoch(model, optimizer, data_loader, device)

        # 更新学习率
        lr_scheduler.step()

        print(f"Learning Rate: {optimizer.param_groups[0]['lr']}")
        print(f"Train Loss: {train_loss:.4f}")

        # 保存最佳模型
        if train_loss < best_loss:
            best_loss = train_loss
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': train_loss,
            }, os.path.join(save_dir, '../pt_file/pretrain_faster_rcnn.pt'))

        # 保存checkpoint
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'loss': train_loss,
        }, os.path.join(save_dir, f'checkpoint_epoch_{epoch + 1}.pt'))


if __name__ == '__main__':
    main()
