File size: 5,619 Bytes
b109984 2d791da a3ba818 b109984 a3ba818 f3c09fc b109984 f3c09fc a3ba818 f3c09fc a3ba818 b109984 f3c09fc b109984 dcf22df f3c09fc a3ba818 f3c09fc 98ba4ab dcf22df a3ba818 f3c09fc b109984 f3c09fc b109984 ce5e5e1 dcf22df ce5e5e1 b109984 4441b47 b109984 f3c09fc b109984 f3c09fc b109984 f3c09fc b109984 f3c09fc b109984 dcf22df 4441b47 ce5e5e1 4441b47 dcf22df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
"""WRENCH classification dataset."""
import json
import datasets
class WrenchClassifConfig(datasets.BuilderConfig):
"""BuilderConfig for WRENCH."""
def __init__(self, dataset_path, **kwargs):
super(WrenchClassifConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.dataset_path = dataset_path
class WrenchSequenceTaggingConfig(datasets.BuilderConfig):
"""BuilderConfig for WRENCH."""
def __init__(self, dataset_path, **kwargs):
super(WrenchClassifConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.dataset_path = dataset_path
class Wrench(datasets.GeneratorBasedBuilder):
"""WRENCH classification dataset."""
ENTITY_RELATED_DATASETS = ["spouse", "cdr", "semeval", "chemprot"]
SEQUENCETAGGING_DATASET = [
{"name": "conll", "dataset_path": "./conll"},
{"name": "laptopreview", "dataset_path": "./laptopreview"},
{"name": "bc5cdr", "dataset_path": "./bc5cdr"},
{"name": "mit-movies", "dataset_path": "./mit-movies"},
{"name": "mit-restaurants", "dataset_path": "./mit-restaurants"},
{"name": "ncbi-disease", "dataset_path": "./ncbi-disease"},
{"name": "ontonotes", "dataset_path": "./ontonotes"},
{"name": "wikigold", "dataset_path": "./wikigold"},
]
CLASSIF_DATASETS = [
{"name": "imdb", "dataset_path": "./imdb"},
{"name": "agnews", "dataset_path": "./agnews"},
{"name": "cdr", "dataset_path": "./cdr"},
{"name": "chemprot", "dataset_path": "./chemprot"},
{"name": "semeval", "dataset_path": "./semeval"},
{"name": "sms", "dataset_path": "./sms"},
{"name": "spouse", "dataset_path": "./spouse"},
{"name": "trec", "dataset_path": "./trec"},
{"name": "yelp", "dataset_path": "./yelp"},
{"name": "youtube", "dataset_path": "./youtube"},
]
configs_clf = [
WrenchClassifConfig(name=i["name"], dataset_path=i["dataset_path"]) for i in CLASSIF_DATASETS
]
BUILDER_CONFIGS = configs_clf
def _info(self):
name = self.config.name
if name in self.ENTITY_RELATED_DATASETS:
return datasets.DatasetInfo(
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.Value("int8"),
"entity1": datasets.Value("string"),
"entity2": datasets.Value("string"),
"weak_labels": datasets.Sequence(datasets.Value("int8")),
}
)
)
else:
return datasets.DatasetInfo(
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.Value("int8"),
"weak_labels": datasets.Sequence(datasets.Value("int8")),
}
)
)
def _split_generators(self, dl_manager):
dataset_path = self.config.dataset_path
name = self.config.name
train_path = dl_manager.download_and_extract(f"{dataset_path}/train.json")
valid_path = dl_manager.download_and_extract(f"{dataset_path}/valid.json")
test_path = dl_manager.download_and_extract(f"{dataset_path}/test.json")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path, "name": name}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_path, "name": name}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": test_path, "name": name}
),
]
def _generate_examples(self, filepath, name):
"""Generate Custom examples."""
with open(filepath, encoding="utf-8") as f:
json_data = json.load(f)
if name in self.ENTITY_RELATED_DATASETS:
for idx in json_data:
data = json_data[idx]
text = data["data"]["text"]
weak_labels = data["weak_labels"]
label = data["label"]
entity1 = data["data"]["entity1"]
entity2 = data["data"]["entity2"]
yield int(idx), {
"text": text,
"label": label,
"entity1": entity1,
"entity2": entity2,
"weak_labels": weak_labels,
}
else:
if name == "trec":
for idx in json_data:
data = json_data[idx]
text = " ".join(data["data"]["text"].split(" ")[1:])
weak_labels = data["weak_labels"]
label = data["label"]
yield int(idx), {
"text": text,
"label": label,
"weak_labels": weak_labels,
}
else:
for idx in json_data:
# print(json_data[idx])
data = json_data[idx]
text = data["data"]["text"]
weak_labels = data["weak_labels"]
label = data["label"]
yield int(idx), {
"text": text,
"label": label,
"weak_labels": weak_labels,
}
|