from torch.utils.data import Dataset
from torch.utils.data import DataLoader, RandomSampler
import json
import os

class LlavaDataset(Dataset):
    def __init__(self, dataset_dir: str, text_filename: str, image_dir: str) -> None:
        self.dataset_dir = dataset_dir
        self.image_dir = os.path.join(self.dataset_dir, image_dir)
        self.text_filepath = os.path.join(self.dataset_dir, text_filename)

        self.text_data = []
        with open(self.text_filepath, "r") as f:
            for line in f:
                line = line.strip()
                if line: #跳过空行
                    self.text_data.append(json.loads(line))

    def __len__(self):
        return len(self.text_data)
    
    def __getitem__(self, index) -> tuple[str, str]:
        filename = self.text_data[index]["filename"]
        image_filepath = os.path.join(self.image_dir, filename)
        caption = self.text_data[index]["caption"]
        return image_filepath, caption


def get_loader(dataset_dir, text_filename, image_dir, batch_size):
    TextOCR_dataset = LlavaDataset(dataset_dir, text_filename, image_dir)
    TextOCR_sampler = RandomSampler(TextOCR_dataset)
    TextOCR_dataloader = DataLoader(TextOCR_dataset,
                                    sampler=TextOCR_sampler,
                                    batch_size=batch_size,
                                    num_workers=0)
    return TextOCR_dataloader


if __name__ == "__main__":

    loader = get_loader("dataset/TextOCR", 2)
    for image_filepath, caption in loader:
        print(type(image_filepath))
        break