|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The SOTAB dataset is a large-scale dataset for the task of column type annotation on semi-structured tables.""" |
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
_DATA_URL = ( |
|
|
|
) |
|
|
|
|
|
class WikiTableQuestions(datasets.GeneratorBasedBuilder): |
|
"""The SOTAB dataset is a large-scale dataset for the task of column type annotation on semi-structured tables.""" |
|
|
|
VERSION = datasets.Version("1.0.2") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="random-split-1", |
|
version=VERSION, |
|
description="", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = ( |
|
"random-split-1" |
|
) |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"column_index": datasets.Value("int32"), |
|
"label": datasets.features.Sequence(datasets.Value("string")), |
|
"table": { |
|
"header": datasets.features.Sequence(datasets.Value("string")), |
|
"rows": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))), |
|
"name": datasets.Value("string"), |
|
}, |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
train_url = "https://huggingface.co/datasets/shivangibithel/SOTAB/resolve/main/CTA_Training.zip" |
|
dev_url = "https://huggingface.co/datasets/shivangibithel/SOTAB/resolve/main/CTA_Validation.zip" |
|
test_url = "https://huggingface.co/datasets/shivangibithel/SOTAB/resolve/main/CTA_Test.zip" |
|
|
|
CTA_Training = os.path.join(dl_manager.download_and_extract(train_url), "CTA_Training") |
|
CTA_Validation = os.path.join(dl_manager.download_and_extract(dev_url), "CTA_Validation") |
|
CTA_Test = os.path.join(dl_manager.download_and_extract(test_url), "CTA_Test") |
|
|
|
train_file = "{}.json.gz".format(self.config.name) |
|
test_file = "{}.json.gz".format(self.config.name) |
|
dev_file = "{}.json.gz".format(self.config.name) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"main_filepath": os.path.join(root_dir, "Train", train_file), "root_dir": CTA_Training}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"main_filepath": os.path.join(root_dir, "Test", test_file), "root_dir": CTA_Test}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={"main_filepath": os.path.join(root_dir, "Validation", dev_file), "root_dir": CTA_Validation}, |
|
), |
|
] |
|
|
|
def _read_table_from_file(self, table_name: str, root_dir: str): |
|
def _extract_table_content(_line: str): |
|
_vals = [_.replace("\n", " ").strip() for _ in _line.strip("\n").split("\t")] |
|
return _vals |
|
|
|
rows = [] |
|
|
|
|
|
table_name = table_name.replace(".csv", ".tsv") |
|
with open(os.path.join(root_dir, table_name), "r", encoding="utf8") as table_f: |
|
table_lines = table_f.readlines() |
|
|
|
header = _extract_table_content(table_lines[0]) |
|
for line in table_lines[1:]: |
|
rows.append(_extract_table_content(line)) |
|
return {"header": header, "rows": rows, "name": table_name} |
|
|
|
|
|
def _generate_examples(self, main_filepath, root_dir): |
|
|
|
with open(main_filepath, encoding="utf-8") as f: |
|
|
|
next(f) |
|
for idx, line in enumerate(f): |
|
example_id, question, table_name, answer = line.strip("\n").split("\t") |
|
answer = answer.split("|") |
|
|
|
table_content = self._read_table_from_file(table_name, root_dir) |
|
|
|
yield idx, {"id": example_id, "question": question, "answers": answer, "table": table_content} |
|
|