# data_preprocessing.py

import os
import json
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
from transformers import BertTokenizer

# 初始化 BERT 分词器
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")

class CocoDataset(Dataset):
    def __init__(self, image_dir, captions_file, transform=None, max_length=64):
        """
        image_dir:      COCO 图像目录
        captions_file:  COCO captions JSON 文件路径
        transform:      图像预处理
        max_length:     文本最大 token 数
        """
        self.image_dir = image_dir
        self.transform = transform
        self.max_length = max_length

        # 载入 COCO 注释
        with open(captions_file, 'r', encoding='utf-8') as f:
            captions_data = json.load(f)
        self.annotations = captions_data['annotations']
        # 构建 image_id -> file_name 的映射
        self.image_id_to_file = {
            img['id']: img['file_name']
            for img in captions_data.get('images', [])
        }

    def __len__(self):
        return len(self.annotations)

    def __getitem__(self, idx):
        ann = self.annotations[idx]
        caption = ann['caption']
        image_id = ann['image_id']
        # 获取对应文件名，fallback 为 zero-pad 12 位
        fname = self.image_id_to_file.get(image_id, f"{image_id:012d}.jpg")
        img_path = os.path.join(self.image_dir, fname)

        # --- 图像处理 ---
        image = Image.open(img_path).convert("RGB")
        if self.transform:
            image = self.transform(image)

        # --- 文本处理 ---
        encoding = tokenizer(
            caption,
            padding='max_length',
            truncation=True,
            max_length=self.max_length,
            return_tensors='pt'
        )
        input_ids      = encoding['input_ids'].squeeze(0)       # (seq_len,)
        attention_mask = encoding['attention_mask'].squeeze(0) # (seq_len,)

        return input_ids, attention_mask, image


def get_dataloaders(
    batch_size: int = 32,
    num_workers: int = 12,
    pin_memory: bool = True,
    persistent_workers: bool = False,
    prefetch_factor: int = 4
):
    """
    返回训练与验证 DataLoader，支持多进程和预取等参数。

    参数:
      - batch_size: 批大小
      - num_workers: DataLoader 的 worker 数
      - pin_memory: 是否将张量锁页（加速 CPU->GPU 拷贝）
      - persistent_workers: 是否在 epoch 间保持 worker 不关闭
      - prefetch_factor: 每个 worker 在预取时加载的 batch 数

    返回:
      train_loader, val_loader
    """
    # 请根据实际存放位置调整以下路径
    coco_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))
    train_img_dir       = os.path.join(coco_root, "train2017")
    val_img_dir         = os.path.join(coco_root, "val2017")
    train_caption_file  = os.path.join(coco_root, "annotations", "captions_train2017.json")
    val_caption_file    = os.path.join(coco_root, "annotations", "captions_val2017.json")

    transform = transforms.Compose([
        transforms.Resize((64, 64)),  # ← 方案 A：将输入图像缩放到 64×64
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std =[0.229, 0.224, 0.225]),
    ])

    train_dataset = CocoDataset(train_img_dir, train_caption_file, transform=transform)
    val_dataset   = CocoDataset(val_img_dir,   val_caption_file,   transform=transform)

    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=pin_memory,
        persistent_workers=persistent_workers,
        prefetch_factor=prefetch_factor
    )

    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=pin_memory,
        persistent_workers=persistent_workers,
        prefetch_factor=prefetch_factor
    )

    return train_loader, val_loader
