|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The SOTAB dataset is a large-scale dataset for the task of column type annotation on semi-structured tables.""" |
|
|
|
import os |
|
import datasets |
|
import pandas as pd |
|
import gzip |
|
import pandas as pd |
|
from ast import literal_eval |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{madoc63868, pages = {14--19}, booktitle = {SemTab 2022 : Proceedings of the Semantic Web Challenge on Tabular Data to Knowledge Graph Matching, co-located with the 21st International semantic Web Conference (ISWC 2022), virtual conference, October 23-27, 2022}, address = {Aachen, Germany}, editor = {Vasilis Efthymiou and Ernesto Jim{\'e}nez-Ruiz and Jiaoyan Chen and Vincenzo Cutrona and Oktie Hassanzadeh and Juan Sequeda and Kavitha Srinivas and Nora Abdelmageed and Madelon Hulsebos}, journal = {CEUR Workshop Proceedings}, year = {2022}, title = {SOTAB: The WDC Schema.org table annotation benchmark}, publisher = {RWTH Aachen}, language = {Englisch}, author = {Keti Korini and Ralph Peeters and Christian Bizer}, volume = {3320}, abstract = {Understanding the semantics of table elements is a prerequisite for many data integration and data discovery tasks. Table annotation is the task of labeling table elements with terms from a given vocabulary. This paper presents the WDC Schema.org Table Annotation Benchmark (SOTAB) for comparing the performance of table annotation systems. SOTAB covers the column type annotation (CTA) and columns property annotation (CPA) tasks. SOTAB provides {$\sim$}50,000 annotated tables for each of the tasks containing Schema.org data from different websites. The tables cover 17 different types of entities such as movie, event, local business, recipe, job posting, or product. The tables stem from the WDC Schema.org Table Corpus which was created by extracting Schema.org annotations from the Common Crawl. Consequently, the labels used for annotating columns in SOTAB are part of the Schema.org vocabulary. The benchmark covers 91 types for CTA and 176 properties for CPA distributed across textual, numerical and date/time columns. The tables are split into fixed training, validation and test sets. The test sets are further divided into subsets focusing on specific challenges, such as columns with missing values or different value formats, in order to allow a more fine-grained comparison of annotation systems. The evaluation of SOTAB using Doduo and TURL shows that the benchmark is difficult to solve for current state-of-the-art systems.}, url = {https://madoc.bib.uni-mannheim.de/63868/} } |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Understanding the semantics of table elements is a prerequisite for many data integration and data discovery tasks. Table annotation is the task of labeling table elements with terms from a given vocabulary. This paper presents the WDC Schema.org Table Annotation Benchmark (SOTAB) for comparing the performance of table annotation systems. SOTAB covers the column type annotation (CTA) and columns property annotation (CPA) tasks. SOTAB provides ∼50,000 annotated tables for each of the tasks containing Schema.org data from different websites. The tables cover 17 different types of entities such as movie, event, local business, recipe, job posting, or product. The tables stem from the WDC Schema.org Table Corpus which was created by extracting Schema.org annotations from the Common Crawl. Consequently, the labels used for annotating columns in SOTAB are part of the Schema.org vocabulary. The benchmark covers 91 types for CTA and 176 properties for CPA distributed across textual, numerical and date/time columns. The tables are split into fixed training, validation and test sets. The test sets are further divided into subsets focusing on specific challenges, such as columns with missing values or different value formats, in order to allow a more fine-grained comparison of annotation systems. The evaluation of SOTAB using Doduo and TURL shows that the benchmark is difficult to solve for current state-of-the-art systems. |
|
""" |
|
|
|
_HOMEPAGE = "https://webdatacommons.org/structureddata/sotab/" |
|
|
|
_LICENSE = "" |
|
|
|
class SOTAB(datasets.GeneratorBasedBuilder): |
|
"""The SOTAB dataset is a large-scale dataset for the task of column type annotation on semi-structured tables.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
|
|
"column_index": datasets.Value("string"), |
|
"label": datasets.Value("string"), |
|
"table": { |
|
"header": datasets.features.Sequence(datasets.Value("string")), |
|
"rows": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))), |
|
"name": datasets.Value("string"), |
|
}, |
|
} |
|
) |
|
|
|
|
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
train_url = "https://data.dws.informatik.uni-mannheim.de/structureddata/sotab/CTA_Training.zip" |
|
dev_url = "https://data.dws.informatik.uni-mannheim.de/structureddata/sotab/CTA_Validation.zip" |
|
test_url = "https://data.dws.informatik.uni-mannheim.de/structureddata/sotab/CTA_Test.zip" |
|
|
|
|
|
|
|
|
|
|
|
CTA_Training = os.path.join(dl_manager.download_and_extract(train_url)) |
|
CTA_Validation = os.path.join(dl_manager.download_and_extract(dev_url)) |
|
CTA_Test = os.path.join(dl_manager.download_and_extract(test_url)) |
|
|
|
train_file = "CTA_training_gt.csv" |
|
test_file = "CTA_test_gt.csv" |
|
dev_file = "CTA_validation_gt.csv" |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"main_filepath": os.path.join(CTA_Training, train_file), "root_dir": os.path.join(CTA_Training, "Train")}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"main_filepath": os.path.join(CTA_Test, test_file), "root_dir": os.path.join(CTA_Test, "Test")}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={"main_filepath": os.path.join(CTA_Validation, dev_file), "root_dir": os.path.join(CTA_Validation, "Validation")}, |
|
), |
|
] |
|
|
|
def _read_table_from_file(self, table_name: str, root_dir: str): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
df_ = pd.read_json(os.path.join(root_dir, table_name), compression='gzip', lines=True) |
|
df_ = df_.astype(str) |
|
|
|
col = [] |
|
for j in range(len(df_.loc[0])): |
|
col.append("col" + str(j+1)) |
|
|
|
row = [] |
|
for index in range(len(df_)): |
|
row.append(df_.loc[index, :].values.tolist()) |
|
|
|
|
|
|
|
table_content = {} |
|
table_content["header"] = col |
|
table_content["rows"] = row |
|
table_content["name"] = table_name |
|
|
|
return table_content |
|
|
|
|
|
def _generate_examples(self, main_filepath, root_dir): |
|
|
|
df = pd.read_csv(main_filepath, encoding="utf8") |
|
df = df.astype({'table_name':'string','column_index':'string', 'label':'string'}) |
|
for ind in df.index: |
|
|
|
table_name = df['table_name'][ind] |
|
column_index = df['column_index'][ind] |
|
label = df['label'][ind] |
|
table_content = self._read_table_from_file(table_name, root_dir) |
|
yield ind, {"column_index": column_index, "label": label, "table": table_content} |
|
|
|
|