from datasets import DatasetDict, load_dataset, SplitGenerator, Split import datasets import json _Base_url = "https://huggingface.co/datasets/Arabic-Clip/xtd_11/resolve/main/test/" _Languages = ["ar", "de", "en", "es", "fr", "jp", "ko", "pl", "ru", "tr", "zh"] # List all your languages here class XTD_11Config(datasets.BuilderConfig): """ Builder config for Joud Dataset. """ def __init__(self, subset, **kwargs): super(XTD_11Config, self).__init__(**kwargs) if subset !="all": self.subset = [subset] else: self.subset = _Languages class XTD_11(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS_CLASS = XTD_11Config BUILDER_CONFIGS = [ XTD_11Config(name=subset, subset=subset, version=datasets.Version("1.1.0", ""), description='') for subset in _Languages ] + [ XTD_11Config( name = "all", subset="all", version=datasets.Version("1.1.0", ""), description='') ] def _info(self): # Assumes all files have the same structure. Adjust as needed. return datasets.DatasetInfo( # This is a placeholder, adjust according to your dataset structure features=datasets.Features({ "text": datasets.Value("string"), "image_name": datasets.Value("string"), "url": datasets.Value("string"), }), description="A benchmark to test model capability in image retrieval named xtd.", ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # Define your base URL or local path where the files are located data_urls = [f"{_Base_url}{lang}.json" for lang in self.config.subset] # Generate splits for each language return [datasets.SplitGenerator( name=datasets.Split.TEST, # Assuming the use of URLs. For local files, adjust accordingly. gen_kwargs={"filepaths": dl_manager.download(data_urls)} )] def _generate_examples(self, filepaths=None): """Yields examples.""" # This method needs to handle both single and multiple filepaths # Adjust the logic based on how you want to load and yield data from your JSON files id_ = 0 for filepath in filepaths: # Load data from a single file with open(filepath, encoding="utf-8") as f: for row in f: if row: data = json.loads(row) yield id_, data id_ +=1