File size: 2,772 Bytes
b15d7dc
 
da8dbb2
b15d7dc
 
c1606a2
b15d7dc
 
 
 
 
97ea514
b15d7dc
 
 
 
 
d83080d
b15d7dc
 
 
 
 
 
 
 
 
 
d83080d
b15d7dc
 
 
 
 
 
 
 
 
 
 
 
 
c9e6473
 
b15d7dc
 
 
 
 
 
 
dd5cb44
1456d80
b15d7dc
e2ec93a
b961258
 
be36a45
e2ec93a
b15d7dc
 
 
b961258
b15d7dc
 
 
9cc7afb
 
b15d7dc
 
9cc7afb
b389817
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
from datasets import DatasetDict, load_dataset, SplitGenerator, Split
import datasets
import json

_Base_url = "https://huggingface.co/datasets/khalidalt/xtd_11/resolve/main/test/"
_Languages = ["ar", "de", "en", "es", "fr", "jp", "ko", "pl", "ru", "tr", "zh"]  # List all your languages here

class XTD_11Config(datasets.BuilderConfig):
    """ Builder config for Joud Dataset. """

    def __init__(self, subset, **kwargs):
        super(XTD_11Config, self).__init__(**kwargs)

        if subset !="all":
            
            self.subset = [subset]
        else:
            self.subset = _Languages

            
class XTD_11(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    BUILDER_CONFIGS_CLASS = XTD_11Config
    BUILDER_CONFIGS = [
        XTD_11Config(name=subset,
        subset=subset,
        version=datasets.Version("1.1.0", ""),
        description='') 
        for subset in _Languages
    ] + [
        XTD_11Config(
            name = "all",
            subset="all",
            version=datasets.Version("1.1.0", ""),
            description='')
    ]
    def _info(self):
        # Assumes all files have the same structure. Adjust as needed.
        return datasets.DatasetInfo(
            # This is a placeholder, adjust according to your dataset structure
            features=datasets.Features({
                "text": datasets.Value("string"),
                "image_name": datasets.Value("string"),
                "url": datasets.Value("string"),
            }),
            description="A benchmark to test model capability in image retrieval named xtd.",
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # Define your base URL or local path where the files are located
        data_urls = [f"{_Base_url}{lang}.json" for lang in self.config.subset]
        
        # Generate splits for each language
        return [datasets.SplitGenerator(
            name=datasets.Split.TEST,
            # Assuming the use of URLs. For local files, adjust accordingly.
            gen_kwargs={"filepaths": dl_manager.download(data_urls)}
        )]
        
        

    def _generate_examples(self, filepaths=None):
        """Yields examples."""
        # This method needs to handle both single and multiple filepaths
        # Adjust the logic based on how you want to load and yield data from your JSON files
        id_ = 0 
        for filepath in filepaths:
            # Load data from a single file
            with open(filepath, encoding="utf-8") as f:
                for row in f:
                    if row:
                        
                        data = json.loads(row)
                        yield id_, data
                        id_ +=1