span-based-dataset-sample / span-based-dataset-sample.py
Saripudin's picture
Upload span-based-dataset-sample.py with huggingface_hub
9525005
raw
history blame
1.63 kB
import os
import json
import datasets
from sklearn.model_selection import train_test_split
_DATASET_LABELS = ['O', 'B-CITY', 'I-CITY', 'B-NAMES', 'I-NAMES', 'B-DATE', 'I-DATE']
class Custom(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description='',
features=datasets.Features(
{
'id': datasets.Value('string'),
'tokens': datasets.Sequence(datasets.Value('string')),
'ner_tags': datasets.Sequence(
datasets.features.ClassLabel(
names=_DATASET_LABELS
)
),
}
),
supervised_keys=None,
homepage='',
citation='',
)
def _split_generators(self, dl_manager):
data_path = dl_manager.download_and_extract("data.jsonl")
with open(data_path, 'r') as file:
lines = file.readlines()
train_lines, valid_lines = train_test_split(lines, test_size=0.2, random_state=42)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'lines': train_lines}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'lines': valid_lines}),
]
def _generate_examples(self, lines):
for guid, line in enumerate(lines):
data = json.loads(line)
yield guid, {
'id': str(guid),
'tokens': data['words'],
'ner_tags': data['pos'],
}