khalidalt commited on
Commit
b15d7dc
1 Parent(s): b1900fa

Create xtd_11.py

Browse files
Files changed (1) hide show
  1. xtd_11.py +86 -0
xtd_11.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import DatasetDict, load_dataset, SplitGenerator, Split
2
+ import datasets
3
+
4
+ _Base_url = "https://huggingface.co/datasets/khalidalt/xtd_11/resolve/main/test/"
5
+ _Languages = ["ar", "de", "en", "es", "fr", "jp", "ko", "pl", "ru", "tr", "zh"] # List all your languages here
6
+
7
+ class XTD_11Config(datasets.BuilderConfig):
8
+ """ Builder config for Joud Dataset. """
9
+
10
+ def __init__(self, subset, **kwargs):
11
+ super(JoudConfig, self).__init__(**kwargs)
12
+
13
+ if subset !="all":
14
+
15
+ self.subset = [subset]
16
+ else:
17
+ self.subset = _SUBSETS
18
+
19
+
20
+ class XTD_11(datasets.GeneratorBasedBuilder):
21
+ VERSION = datasets.Version("1.0.0")
22
+ BUILDER_CONFIGS_CLASS = XTD_11Config
23
+ BUILDER_CONFIGS = [
24
+ XTD_11Config(name=subset,
25
+ subset=subset,
26
+ version=datasets.Version("1.1.0", ""),
27
+ description='')
28
+ for subset in _SUBSETS
29
+ ] + [
30
+ XTD_11Config(
31
+ name = "all",
32
+ subset="all",
33
+ version=datasets.Version("1.1.0", ""),
34
+ description='')
35
+ ]
36
+ def _info(self):
37
+ # Assumes all files have the same structure. Adjust as needed.
38
+ return datasets.DatasetInfo(
39
+ # This is a placeholder, adjust according to your dataset structure
40
+ features=datasets.Features({
41
+ "text": datasets.Value("string"),
42
+ "image_name": dataset.Value("string"),
43
+ "url": dataset.Value("string"),
44
+ }),
45
+ description="A benchmark to test model capability in image retrieval named xtd.",
46
+ )
47
+
48
+ def _split_generators(self, dl_manager):
49
+ """Returns SplitGenerators."""
50
+ # Define your base URL or local path where the files are located
51
+
52
+ # Generate splits for each language
53
+ splits = []
54
+ for lang in _Languages:
55
+ splits.append(SplitGenerator(
56
+ name=lang,
57
+ # Assuming the use of URLs. For local files, adjust accordingly.
58
+ gen_kwargs={"filepath": f"{_Base_url}{lang}.json"}
59
+ ))
60
+
61
+ # Optionally, define a combined split that includes all languages
62
+ # This could be more complex to implement, depending on how you want to combine the data
63
+ # Here's a conceptual placeholder for the combined split
64
+ splits.append(SplitGenerator(
65
+ name="combined_test",
66
+ gen_kwargs={"filepaths": [f"{_Base_url}{lang}.json" for lang in languages]}
67
+ ))
68
+
69
+ return splits
70
+
71
+ def _generate_examples(self, filepath=None, filepaths=None):
72
+ """Yields examples."""
73
+ # This method needs to handle both single and multiple filepaths
74
+ # Adjust the logic based on how you want to load and yield data from your JSON files
75
+
76
+ if filepath:
77
+ # Load data from a single file
78
+ with open(filepath, encoding="utf-8") as f:
79
+ for id, row in enumerate(json.load(f)):
80
+ yield id, row
81
+ elif filepaths:
82
+ # Example logic for combined split, adjust as needed
83
+ for fp in filepaths:
84
+ with open(fp, encoding="utf-8") as f:
85
+ for id, row in enumerate(json.load(f)):
86
+ yield id, row