import os
import xml.etree.ElementTree as ET
from typing import List, Tuple, Dict

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision.models import vgg16


class VOCDataset(Dataset):
    def __init__(self, xml_dir, img_dir):
        self.xml_dir = xml_dir
        self.img_dir = img_dir

        # 获取所有XML文件
        self.xml_files = [os.path.join(xml_dir, f) for f in os.listdir(xml_dir)
                          if f.endswith('.xml')]

        self.class_dict = {'bolt': 1, 'nut': 2}

        # 基础图像转换
        self.transform = T.Compose([
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406],
                        std=[0.229, 0.224, 0.225]),
        ])

    def _parse_xml(self, xml_file):
        """解析XML文件并确保边界框在有效范围内"""
        tree = ET.parse(xml_file)
        root = tree.getroot()

        filename = root.find('filename').text
        boxes = []
        labels = []

        # 遍历所有对象
        for obj in root.findall('object'):
            name = obj.find('name').text
            if name not in self.class_dict:
                continue

            bbox = obj.find('bndbox')
            xmin = float(bbox.find('xmin').text)
            ymin = float(bbox.find('ymin').text)
            xmax = float(bbox.find('xmax').text)
            ymax = float(bbox.find('ymax').text)

            # 检查边界框的有效性
            if xmax <= xmin or ymax <= ymin:
                continue

            boxes.append([xmin, ymin, xmax, ymax])
            labels.append(self.class_dict[name])

        return filename, np.array(boxes, dtype=np.float32), np.array(labels, dtype=np.int64)

    def __len__(self):
        return len(self.xml_files)

    def __getitem__(self, idx):
        xml_file = self.xml_files[idx]
        filename, boxes, labels = self._parse_xml(xml_file)

        # 读取图片
        img_path = os.path.join(self.img_dir, filename)
        image = Image.open(img_path).convert('RGB')

        # 转换图像
        image = self.transform(image)

        # 转换标注为tensor
        if len(boxes) == 0:
            boxes = torch.zeros((0, 4), dtype=torch.float32)
            labels = torch.zeros(0, dtype=torch.int64)
        else:
            boxes = torch.as_tensor(boxes, dtype=torch.float32)
            labels = torch.as_tensor(labels, dtype=torch.int64)

        # 直接返回图像和(boxes, labels)元组
        return image, boxes, labels


def collate_fn(batch):
    """自定义批处理函数"""
    return tuple(zip(*batch))


class AnchorGenerator:
    def __init__(self, scales: List[int] = [8, 16, 32],
                 ratios: List[float] = [0.5, 1, 2],
                 feature_stride: int = 16):
        """
        初始化Anchor生成器
        Args:
            scales: anchor的尺度
            ratios: anchor的宽高比
            feature_stride: 特征图相对于原图的步长
        """
        self.scales = torch.as_tensor(scales)
        self.ratios = torch.as_tensor(ratios)
        self.feature_stride = feature_stride

        # 生成基础anchor模板
        self.cell_anchor = self._generate_cell_anchor()

    def _generate_cell_anchor(self) -> torch.Tensor:
        """生成单个位置的基础anchors"""
        scales, ratios = self.scales, self.ratios

        # 计算anchor的宽和高
        h_ratios = torch.sqrt(ratios)
        w_ratios = 1 / h_ratios

        # 扩展维度以便广播
        ws = (w_ratios[:, None] * scales[None, :]).view(-1)
        hs = (h_ratios[:, None] * scales[None, :]).view(-1)

        # 生成基础anchor，中心点为(0, 0)
        base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2

        return base_anchors

    def generate_anchors(self, feature_map_size: Tuple[int, int],
                         device: torch.device) -> torch.Tensor:
        """
        为整个特征图生成anchors
        Args:
            feature_map_size: (H, W)特征图的高和宽
            device: 计算设备
        Returns:
            anchors: shape (H*W*num_anchors, 4)
        """
        # 生成特征图的网格坐标
        grid_height, grid_width = feature_map_size
        shifts_x = torch.arange(0, grid_width, device=device) * self.feature_stride
        shifts_y = torch.arange(0, grid_height, device=device) * self.feature_stride

        # 生成网格
        # shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing='ij')
        shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
        shift_x = shift_x.reshape(-1)
        shift_y = shift_y.reshape(-1)
        shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)

        # 在每个位置生成anchors
        anchors = (shifts.view(-1, 1, 4) +
                   self.cell_anchor.to(device).view(1, -1, 4))

        return anchors.reshape(-1, 4)


def box_iou(boxes1: torch.Tensor, boxes2: torch.Tensor) -> torch.Tensor:
    """
    计算两组框之间的IOU
    Args:
        boxes1: (N, 4) [x1, y1, x2, y2]
        boxes2: (M, 4) [x1, y1, x2, y2]
    Returns:
        iou: (N, M)
    """
    area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
    area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])

    # 计算交集
    lt = torch.max(boxes1[:, None, :2], boxes2[None, :, :2])  # (N,M,2)
    rb = torch.min(boxes1[:, None, 2:], boxes2[None, :, 2:])  # (N,M,2)

    wh = (rb - lt).clamp(min=0)  # (N,M,2)
    inter = wh[:, :, 0] * wh[:, :, 1]  # (N,M)

    # 计算并集
    union = area1[:, None] + area2[None, :] - inter

    return inter / union


def create_target_labels(
        anchors: torch.Tensor,
        gt_boxes: List[torch.Tensor],
        gt_labels: List[torch.Tensor],
        pos_iou_thresh: float = 0.7,
        neg_iou_thresh: float = 0.3
) -> Tuple[torch.Tensor, torch.Tensor]:
    """
    为anchors分配训练标签
    Args:
        anchors: (num_anchors, 4) [x1, y1, x2, y2]
        gt_boxes: List of (num_boxes, 4)
        gt_labels: List of (num_boxes,)
        pos_iou_thresh: 正样本IOU阈值
        neg_iou_thresh: 负样本IOU阈值
    Returns:
        labels: (num_anchors,) -1表示忽略，0表示负样本，1表示正样本
        bbox_targets: (num_anchors, 4)
    """
    device = anchors.device
    num_anchors = anchors.size(0)

    # 初始化标签为-1（忽略）
    labels = torch.full((num_anchors,), -1, dtype=torch.float32, device=device)
    bbox_targets = torch.zeros((num_anchors, 4), dtype=torch.float32, device=device)

    if len(gt_boxes) == 0:
        # 如果没有gt_boxes，所有anchors都是负样本
        labels = torch.zeros_like(labels)
        return labels, bbox_targets

    # 计算所有anchors和gt_boxes的IOU
    ious = box_iou(anchors, torch.cat(gt_boxes, dim=0))

    # 为每个anchor找到最匹配的gt_box
    max_iou, gt_assignment = ious.max(dim=1)

    # 根据IOU阈值分配正负样本
    labels[max_iou < neg_iou_thresh] = 0  # 负样本
    labels[max_iou >= pos_iou_thresh] = 1  # 正样本

    # 为正样本生成bbox regression目标
    pos_inds = torch.nonzero(labels == 1).squeeze(1)
    if len(pos_inds) > 0:
        gt_assignment = gt_assignment[pos_inds]
        matched_gt_boxes = torch.cat(gt_boxes, dim=0)[gt_assignment]
        pos_anchors = anchors[pos_inds]

        # 计算回归目标 (tx, ty, tw, th)
        targets = bbox2delta(pos_anchors, matched_gt_boxes)
        bbox_targets[pos_inds] = targets

    return labels, bbox_targets


def bbox2delta(anchors: torch.Tensor, gt_boxes: torch.Tensor) -> torch.Tensor:
    """
    计算anchor到gt_box的回归目标
    Args:
        anchors: (N, 4) [x1, y1, x2, y2]
        gt_boxes: (N, 4) [x1, y1, x2, y2]
    Returns:
        deltas: (N, 4) [tx, ty, tw, th]
    """
    # anchor的宽高和中心点
    wa = anchors[:, 2] - anchors[:, 0]
    ha = anchors[:, 3] - anchors[:, 1]
    cxa = anchors[:, 0] + wa * 0.5
    cya = anchors[:, 1] + ha * 0.5

    # gt_box的宽高和中心点
    w = gt_boxes[:, 2] - gt_boxes[:, 0]
    h = gt_boxes[:, 3] - gt_boxes[:, 1]
    cx = gt_boxes[:, 0] + w * 0.5
    cy = gt_boxes[:, 1] + h * 0.5

    # 计算回归目标
    tx = (cx - cxa) / wa
    ty = (cy - cya) / ha
    tw = torch.log(w / wa)
    th = torch.log(h / ha)

    return torch.stack([tx, ty, tw, th], dim=1)


class RPN(nn.Module):
    def __init__(self,
                 in_channels: int = 512,
                 anchor_scales: List[int] = [8, 16, 32],
                 anchor_ratios: List[float] = [0.5, 1, 2],
                 feature_stride: int = 16):
        super(RPN, self).__init__()

        # Anchor生成器
        self.anchor_generator = AnchorGenerator(
            scales=anchor_scales,
            ratios=anchor_ratios,
            feature_stride=feature_stride
        )

        # RPN卷积层
        self.conv = nn.Conv2d(in_channels, 512, kernel_size=3, padding=1)
        num_anchors = len(anchor_scales) * len(anchor_ratios)
        self.cls_score = nn.Conv2d(512, num_anchors * 2, kernel_size=1)
        self.bbox_pred = nn.Conv2d(512, num_anchors * 4, kernel_size=1)

        # 初始化权重
        for layer in [self.conv, self.cls_score, self.bbox_pred]:
            nn.init.normal_(layer.weight, std=0.01)
            nn.init.constant_(layer.bias, 0)

    def forward(self, x: torch.Tensor, gt_boxes: List[torch.Tensor] = None,
                gt_labels: List[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
        """
        Args:
            x: 特征图 (batch_size, C, H, W)
            gt_boxes: List[Tensor], 每个tensor shape为(num_boxes, 4)
            gt_labels: List[Tensor], 每个tensor shape为(num_boxes,)
        """
        batch_size = x.shape[0]
        feature_map_size = x.shape[2:]
        device = x.device

        # 生成anchors
        anchors = self.anchor_generator.generate_anchors(feature_map_size, device)

        # RPN前向传播
        x = F.relu(self.conv(x))
        rpn_cls_score = self.cls_score(x)  # (B, A*2, H, W)
        rpn_bbox_pred = self.bbox_pred(x)  # (B, A*4, H, W)

        # 调整维度顺序
        rpn_cls_score = rpn_cls_score.permute(0, 2, 3, 1).contiguous()
        rpn_bbox_pred = rpn_bbox_pred.permute(0, 2, 3, 1).contiguous()

        # 计算分类概率
        rpn_cls_prob = F.softmax(rpn_cls_score.view(batch_size, -1, 2), dim=2)  # 2表示将最后一个维度设置为2

        result = {
            'rpn_cls_prob': rpn_cls_prob,
            'rpn_bbox_pred': rpn_bbox_pred.view(batch_size, -1, 4),
            'anchors': anchors
        }

        # 训练时生成标签
        if self.training and gt_boxes is not None:
            rpn_labels, rpn_bbox_targets = create_target_labels(
                anchors, gt_boxes, gt_labels)
            result.update({
                'rpn_labels': rpn_labels,
                'rpn_bbox_targets': rpn_bbox_targets
            })

        return result


def compute_rpn_loss(rpn_cls_prob: torch.Tensor,
                     rpn_bbox_pred: torch.Tensor,
                     rpn_labels: torch.Tensor,
                     rpn_bbox_targets: torch.Tensor,
                     cls_weight: float = 1.0,
                     bbox_weight: float = 1.0) -> Tuple[torch.Tensor, torch.Tensor]:
    """
    计算RPN损失
    Args:
        rpn_cls_prob: (batch_size, num_anchors, 2)
        rpn_bbox_pred: (batch_size, num_anchors, 4)
        rpn_labels: (num_anchors,)
        rpn_bbox_targets: (num_anchors, 4)
    Returns:
        rpn_cls_loss: 分类损失
        rpn_bbox_loss: 回归损失
    """
    # 只选择非忽略的样本计算分类损失
    valid_mask = rpn_labels >= 0
    rpn_cls_prob = rpn_cls_prob.view(-1, 2)
    rpn_labels = rpn_labels.long()

    rpn_cls_loss = F.cross_entropy(
        rpn_cls_prob[valid_mask],
        rpn_labels[valid_mask],
        reduction='mean'
    )

    # 只对正样本计算回归损失
    pos_mask = rpn_labels == 1
    rpn_bbox_pred = rpn_bbox_pred.view(-1, 4)

    if pos_mask.sum() > 0:
        rpn_bbox_loss = F.smooth_l1_loss(
            rpn_bbox_pred[pos_mask],
            rpn_bbox_targets[pos_mask],
            reduction='mean'
        )
    else:
        rpn_bbox_loss = torch.tensor(0.0).to(rpn_cls_prob.device)

    return cls_weight * rpn_cls_loss, bbox_weight * rpn_bbox_loss


def train_rpn(model: nn.Module,
              backbone: nn.Module,
              data_loader: DataLoader,
              optimizer: torch.optim.Optimizer,
              device: torch.device,
              num_epochs: int):
    """
    训练RPN网络
    """
    model.train()
    backbone.eval()  # backbone使用预训练权重，不需要训练

    for epoch in range(num_epochs):
        epoch_cls_loss = 0.0
        epoch_bbox_loss = 0.0
        num_batches = len(data_loader)

        for batch_idx, (images, gt_boxes, gt_labels) in enumerate(data_loader):
            images = [image.to(device) for image in images]
            gt_boxes = [boxes.to(device) for boxes in gt_boxes]
            gt_labels = [labels.to(device) for labels in gt_labels]

            # 提取特征
            with torch.no_grad():
                features = backbone(torch.stack(images, dim=0))

            # RPN前向传播
            rpn_output = model(features, gt_boxes, gt_labels)

            # 计算损失
            rpn_cls_loss, rpn_bbox_loss = compute_rpn_loss(
                rpn_output['rpn_cls_prob'],
                rpn_output['rpn_bbox_pred'],
                rpn_output['rpn_labels'],
                rpn_output['rpn_bbox_targets']
            )

            loss = rpn_cls_loss + rpn_bbox_loss

            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # 累计损失
            epoch_cls_loss += rpn_cls_loss.item()
            epoch_bbox_loss += rpn_bbox_loss.item()

            # 打印训练信息
            if (batch_idx + 1) % 10 == 0:
                print(f'Epoch [{epoch + 1}/{num_epochs}], '
                      f'Batch [{batch_idx + 1}/{num_batches}], '
                      f'Cls Loss: {rpn_cls_loss.item():.4f}, '
                      f'Bbox Loss: {rpn_bbox_loss.item():.4f}')

        # 打印每个epoch的平均损失
        avg_cls_loss = epoch_cls_loss / num_batches
        avg_bbox_loss = epoch_bbox_loss / num_batches
        print(f'Epoch [{epoch + 1}/{num_epochs}], '
              f'Avg Cls Loss: {avg_cls_loss:.4f}, '
              f'Avg Bbox Loss: {avg_bbox_loss:.4f}')


def nms(boxes: torch.Tensor, scores: torch.Tensor,
        iou_threshold: float) -> torch.Tensor:
    """
    执行非极大值抑制
    Args:
        boxes: (N, 4) [x1, y1, x2, y2]
        scores: (N,) 预测分数
        iou_threshold: IOU阈值
    Returns:
        keep: 保留的boxes索引
    """
    if boxes.shape[0] == 0:
        return torch.zeros(0, dtype=torch.long, device=boxes.device)

    # 计算所有框的面积
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]
    areas = (x2 - x1) * (y2 - y1)

    # 按照分数降序排序
    _, order = scores.sort(0, descending=True)
    keep = []

    while order.numel() > 0:
        if order.numel() == 1:
            keep.append(order.item())
            break

        i = order[0]
        keep.append(i)

        # 计算当前框与其他框的IOU
        xx1 = x1[order[1:]].clamp(min=x1[i])
        yy1 = y1[order[1:]].clamp(min=y1[i])
        xx2 = x2[order[1:]].clamp(max=x2[i])
        yy2 = y2[order[1:]].clamp(max=y2[i])

        w = (xx2 - xx1).clamp(min=0)
        h = (yy2 - yy1).clamp(min=0)
        inter = w * h

        ovr = inter / (areas[i] + areas[order[1:]] - inter)

        # 保留IOU小于阈值的框
        ids = (ovr <= iou_threshold).nonzero().squeeze()
        if ids.numel() == 0:
            break
        order = order[ids + 1]

    return torch.tensor(keep, dtype=torch.long, device=boxes.device)


def eval_rpn(model: nn.Module,
             backbone: nn.Module,
             data_loader: DataLoader,
             device: torch.device,
             iou_threshold: float = 0.5,
             score_threshold: float = 0.7):
    """
    评估RPN性能
    计算召回率和准确率
    """
    model.eval()
    backbone.eval()

    total_gt = 0
    total_predictions = 0
    total_correct = 0

    with torch.no_grad():
        for images, (gt_boxes, gt_labels) in data_loader:
            images = images.to(device)
            gt_boxes = [boxes.to(device) for boxes in gt_boxes]

            # 提取特征
            features = backbone(images)

            # RPN预测
            rpn_output = model(features)

            # 对每张图片进行处理
            for i in range(len(images)):
                # 获取预测结果
                scores = rpn_output['rpn_cls_prob'][i, :, 1]  # 前景概率
                bbox_deltas = rpn_output['rpn_bbox_pred'][i]
                anchors = rpn_output['anchors']

                # 应用NMS
                mask = scores > score_threshold
                if mask.sum() > 0:
                    scores = scores[mask]
                    bbox_deltas = bbox_deltas[mask]
                    anchors = anchors[mask]

                    # 将bbox_deltas应用到anchors上得到预测框
                    pred_boxes = apply_deltas_to_anchors(bbox_deltas, anchors)

                    # 执行NMS
                    keep = nms(pred_boxes, scores, iou_threshold)
                    pred_boxes = pred_boxes[keep]

                    # 计算与ground truth的IOU
                    if len(gt_boxes[i]) > 0:
                        ious = box_iou(pred_boxes, gt_boxes[i])
                        correct = (ious.max(dim=1)[0] >= iou_threshold).sum().item()

                        total_correct += correct
                        total_gt += len(gt_boxes[i])
                        total_predictions += len(pred_boxes)

    # 计算指标
    recall = total_correct / total_gt if total_gt > 0 else 0
    precision = total_correct / total_predictions if total_predictions > 0 else 0

    return {
        'recall': recall,
        'precision': precision,
        'f1_score': 2 * (precision * recall) / (precision + recall) if precision + recall > 0 else 0
    }


def apply_deltas_to_anchors(deltas: torch.Tensor, anchors: torch.Tensor) -> torch.Tensor:
    """
    将预测的偏移量应用到anchor上得到预测框
    Args:
        deltas: (N, 4) [tx, ty, tw, th]
        anchors: (N, 4) [x1, y1, x2, y2]
    Returns:
        pred_boxes: (N, 4) [x1, y1, x2, y2]
    """
    # anchor的宽高和中心点
    wa = anchors[:, 2] - anchors[:, 0]
    ha = anchors[:, 3] - anchors[:, 1]
    cxa = anchors[:, 0] + wa * 0.5
    cya = anchors[:, 1] + ha * 0.5

    # 应用偏移量
    tx, ty, tw, th = deltas.unbind(1)

    # 预测框的中心点和宽高
    cx = tx * wa + cxa
    cy = ty * ha + cya
    w = wa * torch.exp(tw)
    h = ha * torch.exp(th)

    # 转换回x1,y1,x2,y2格式
    x1 = cx - w * 0.5
    y1 = cy - h * 0.5
    x2 = cx + w * 0.5
    y2 = cy + h * 0.5

    return torch.stack([x1, y1, x2, y2], dim=1)


if __name__ == '__main__':
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")

    # 设置数据集路径
    XML_DIR = "/Users/jiangfeng/PycharmProjects/Net/Faster_RCNN/annotations_voc"
    IMG_DIR = "/Users/jiangfeng/PycharmProjects/Net/Faster_RCNN/lslm/lslm"  # 替换为你的图片目录

    # 创建数据集和数据加载器
    dataset = VOCDataset(xml_dir=XML_DIR, img_dir=IMG_DIR)
    train_loader = DataLoader(
        dataset,
        batch_size=1,
        shuffle=True,
        num_workers=4,
        collate_fn=collate_fn
    )

    # 初始化模型
    backbone = vgg16().features[:-1].to(device)
    rpn = RPN(in_channels=512).to(device)
    optimizer = torch.optim.SGD(rpn.parameters(), lr=0.001, momentum=0.9)

    # 训练模型
    num_epochs = 10
    train_rpn(rpn, backbone, train_loader, optimizer, device, num_epochs)

    # 评估模型
    eval_results = eval_rpn(rpn, backbone, train_loader, device)
    print("Evaluation Results:", eval_results)

# 为什么没有正负样本
