"""
@Description :   加载 RefCOCO / RefCOCOg 数据集
@Author      :   tqychy 
@Time        :   2025/08/23 17:08:59
"""
import sys

sys.path.append("./")
sys.path.append("./dataset")
import os

import torch
from torch.utils.data import Dataset

from dataset.raw.refcoco import REFER


class RefCOCODataset(Dataset):
    def __init__(self, *args, data_root="./dataset/scripts/refcoco/data", **kwargs):

        """
        RefCOCO/RefCOCOg 数据集
        """
        super().__init__()
        self.cfg, self.logger = args
        self.data_root = data_root
        self.dataset = kwargs.get("dataset", "refcocog")
        self.refer = REFER(data_root = data_root, dataset=dataset, splitBy='google')
        self.ref_ids = self.refer.getRefIds(split=kwargs.get("split", "val"))
        max_len = kwargs.get("max_len", -1)
        self.max_len = -1 if max_len == -1 else min(max_len, len(self.ref_ids))

        # self.logger.info(f"Loaded dataset {dataset} split {split} with {len(self.ref_ids)} samples.")

    def __len__(self):
        return len(self.ref_ids) if self.max_len == -1 else self.max_len

    def __getitem__(self, idx):
        ref_id = self.ref_ids[idx]
        ref = self.refer.loadRefs(ref_id)[0]

        image_id = ref["image_id"]
        ref_id = ref["ref_id"]
        category_id = ref["category_id"]
        sentences = ref["sentences"][0]["sent"] # 只取第一个句子

        # 加载图像 & 标签
        img_info = self.refer.loadImgs(image_id)[0]
        image_path = os.path.join(self.data_root, f"images/mscoco/images/train2014/{img_info["file_name"]}")
        category = self.refer.loadCats(category_id)[0]

        # 加载 bounding box
        bbox = 	torch.tensor(self.refer.getRefBox(ref['ref_id']))  # [x_min, y_min, width, height]


        return {
            "image_path": image_path,
            "bbox": bbox,
            "sentences": sentences,
            "category": category
        }

if __name__ == "__main__":
    import matplotlib
    import matplotlib.pyplot as plt
    import torchvision.transforms as T
    from matplotlib.patches import Rectangle
    from torch.utils.data import DataLoader
    from PIL import Image

    matplotlib.use('Agg')
    save_path = "./dataset/refcoco_sample_test"
    os.makedirs(save_path, exist_ok=True)

    dataset = RefCOCODataset(None, None, dataset="refcocog", split="val")
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False)

    print(f"Total {len(dataloader)} samples.")
    for i, data in enumerate(dataloader):
        image_path = data["image_path"][0]
        bbox = data["bbox"]
        sentences = data["sentences"]

        transforms = T.ToTensor()
        image = Image.open(image_path).convert("RGB")
        image = transforms(image)
        image = image.squeeze().permute(1, 2, 0)

        ax = plt.gca()
        ax.imshow(image)
        box_plot = Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], fill=False, edgecolor='green', linewidth=3)
        ax.add_patch(box_plot)

        for sid, sent in enumerate(sentences):
            print(f"{sid+1}. {sent["sent"]}")
        plt.savefig(os.path.join(save_path, f"{i}.png"))
        plt.clf()
        c = input("Press Enter to continue, q to quit.")
        if c == 'q':
            break  