|
"""WRENCH classification dataset.""" |
|
|
|
import json |
|
|
|
import datasets |
|
|
|
|
|
class WrenchClassifConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for WRENCH.""" |
|
|
|
def __init__(self, dataset_path, **kwargs): |
|
super(WrenchClassifConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) |
|
self.dataset_path = dataset_path |
|
|
|
|
|
class WrenchSequenceTaggingConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for WRENCH.""" |
|
|
|
def __init__(self, dataset_path, **kwargs): |
|
super(WrenchClassifConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) |
|
self.dataset_path = dataset_path |
|
|
|
|
|
class Wrench(datasets.GeneratorBasedBuilder): |
|
"""WRENCH classification dataset.""" |
|
|
|
ENTITY_RELATED_DATASETS = ["spouse", "cdr", "semeval", "chemprot"] |
|
|
|
SEQUENCETAGGING_DATASET = [ |
|
{"name": "conll", "dataset_path": "./conll"}, |
|
{"name": "laptopreview", "dataset_path": "./laptopreview"}, |
|
{"name": "bc5cdr", "dataset_path": "./bc5cdr"}, |
|
{"name": "mit-movies", "dataset_path": "./mit-movies"}, |
|
{"name": "mit-restaurants", "dataset_path": "./mit-restaurants"}, |
|
{"name": "ncbi-disease", "dataset_path": "./ncbi-disease"}, |
|
{"name": "ontonotes", "dataset_path": "./ontonotes"}, |
|
{"name": "wikigold", "dataset_path": "./wikigold"}, |
|
] |
|
|
|
CLASSIF_DATASETS = [ |
|
{"name": "imdb", "dataset_path": "./imdb"}, |
|
{"name": "agnews", "dataset_path": "./agnews"}, |
|
{"name": "cdr", "dataset_path": "./cdr"}, |
|
{"name": "chemprot", "dataset_path": "./chemprot"}, |
|
{"name": "semeval", "dataset_path": "./semeval"}, |
|
{"name": "sms", "dataset_path": "./sms"}, |
|
{"name": "spouse", "dataset_path": "./spouse"}, |
|
{"name": "trec", "dataset_path": "./trec"}, |
|
{"name": "yelp", "dataset_path": "./yelp"}, |
|
{"name": "youtube", "dataset_path": "./youtube"}, |
|
] |
|
|
|
configs_clf = [ |
|
WrenchClassifConfig(name=i["name"], dataset_path=i["dataset_path"]) for i in CLASSIF_DATASETS |
|
] |
|
BUILDER_CONFIGS = configs_clf |
|
|
|
def _info(self): |
|
name = self.config.name |
|
if name in self.ENTITY_RELATED_DATASETS: |
|
return datasets.DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
"label": datasets.Value("int8"), |
|
"entity1": datasets.Value("string"), |
|
"entity2": datasets.Value("string"), |
|
"weak_labels": datasets.Sequence(datasets.Value("int8")), |
|
} |
|
) |
|
) |
|
else: |
|
return datasets.DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
"label": datasets.Value("int8"), |
|
"weak_labels": datasets.Sequence(datasets.Value("int8")), |
|
} |
|
) |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dataset_path = self.config.dataset_path |
|
name = self.config.name |
|
train_path = dl_manager.download_and_extract(f"{dataset_path}/train.json") |
|
valid_path = dl_manager.download_and_extract(f"{dataset_path}/valid.json") |
|
test_path = dl_manager.download_and_extract(f"{dataset_path}/test.json") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path, "name": name} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_path, "name": name} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": test_path, "name": name} |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, name): |
|
"""Generate Custom examples.""" |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
json_data = json.load(f) |
|
|
|
if name in self.ENTITY_RELATED_DATASETS: |
|
for idx in json_data: |
|
data = json_data[idx] |
|
|
|
text = data["data"]["text"] |
|
weak_labels = data["weak_labels"] |
|
label = data["label"] |
|
entity1 = data["data"]["entity1"] |
|
entity2 = data["data"]["entity2"] |
|
|
|
yield int(idx), { |
|
"text": text, |
|
"label": label, |
|
"entity1": entity1, |
|
"entity2": entity2, |
|
"weak_labels": weak_labels, |
|
} |
|
else: |
|
if name == "trec": |
|
for idx in json_data: |
|
data = json_data[idx] |
|
text = " ".join(data["data"]["text"].split(" ")[1:]) |
|
weak_labels = data["weak_labels"] |
|
label = data["label"] |
|
|
|
yield int(idx), { |
|
"text": text, |
|
"label": label, |
|
"weak_labels": weak_labels, |
|
} |
|
else: |
|
for idx in json_data: |
|
|
|
data = json_data[idx] |
|
|
|
text = data["data"]["text"] |
|
weak_labels = data["weak_labels"] |
|
label = data["label"] |
|
|
|
yield int(idx), { |
|
"text": text, |
|
"label": label, |
|
"weak_labels": weak_labels, |
|
} |
|
|