sroie2019_sample / sroie2019_sample.py
Saripudin's picture
Update sroie2019_sample.py
e659f59
import torch
import datasets
from pathlib import Path
from torch.utils.data import Dataset
from datasets import load_dataset, Features, Value, ClassLabel, DownloadConfig
_DESCRIPTION = """\
"""
_CITATION = """\
"""
# it could be file or url path
_TRAIN_DOWNLOAD_URL = "train.txt"
_VAL_DOWNLOAD_URL = "val.txt"
CLASS_NAMES = ["company", "date", "address", "total", "O"]
class CustomTokenDataset(datasets.GeneratorBasedBuilder):
"""CustomTokenDataset dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(names=sorted(list(CLASS_NAMES)))
),
}
),
supervised_keys=None,
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": _TRAIN_DOWNLOAD_URL,
"val": _VAL_DOWNLOAD_URL,
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": downloaded_files["val"]},
),
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
# CustomDataset tokens are space separated
splits = line.split(" ")
tokens.append(splits[0])
ner_tags.append(splits[1].rstrip())
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}