Datasets:
rcds
/

ArXiv:
Tags:
legal
License:
MultiLegalNeg / MultiLegalNeg.py
ramonachristen's picture
Update MultiLegalNeg.py
d8115c8
raw
history blame
No virus
4.59 kB
import json
import datasets
import pandas as pd
from huggingface_hub.file_download import hf_hub_url
try:
import lzma as xz
except ImportError:
import pylzma as xz
datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION ="""\
"""
_HOMEPAGE = ""
_LICENSE = ""
_CITATION = ""
_URL = {
'data/'
}
_LANGUAGES = [
"german", "french", "italian", "swiss", "english"
]
_ENGLISH = [
"sherlock", "bioscope", "sfu"
]
_SHERLOCKS = [
"dev", "test_cardboard_GOLD", "test_circle_GOLD", "training"
]
_BIOSCOPES = [
"abstracts", "full_papers"
]
class MultiLegalNegConfig(datasets.BuilderConfig):
def __init__(self, name:str, **kwargs):
super( MultiLegalNegConfig, self).__init__(**kwargs)
self.name = name
self.language = name.split("_")[0]
class MultiLegalNeg(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = MultiLegalNegConfig
BUILDER_CONFIGS = [
MultiLegalNegConfig(f"{language}")
for language in _LANGUAGES + ['all']
]
DEFAULT_CONFIG_NAME = 'all_all'
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"spans": [
{
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"token_start": datasets.Value("int64"),
"token_end": datasets.Value("int64"),
"label": datasets.Value("string")
}
],
"tokens": [
{
"text": datasets.Value("string"),
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"id": datasets.Value("int64"),
"ws": datasets.Value("bool")
}
]
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features = features,
homepage = _HOMEPAGE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
data_files = {
"train": [
"data/train/it_train.jsonl.xz",
"data/train/fr_train.jsonl.xz",
"data/train/de_train.jsonl.xz",
"data/train/swiss_train.jsonl.xz",
"data/train/en_sherlock_train.jsonl.xz",
"data/train/en_sfu_train.jsonl.xz",
"data/train/en_bioscope_train.jsonl.xz"
],
"test": [
"data/test/it_test.jsonl.xz",
"data/test/fr_test.jsonl.xz",
"data/test/de_test.jsonl.xz",
"data/test/swiss_test.jsonl.xz",
"data/test/en_sherlock_test.jsonl.xz",
"data/test/en_sfu_test.jsonl.xz",
"data/test/en_bioscope_test.jsonl.xz"
],
"validation": [
"data/validation/it_validation.jsonl.xz",
"data/validation/fr_validation.jsonl.xz",
"data/validation/de_validation.jsonl.xz",
"data/validation/swiss_validation.jsonl.xz",
"data/validation/en_sherlock_validation.jsonl.xz",
"data/validation/en_sfu_validation.jsonl.xz",
"data/validation/en_bioscope_validation.jsonl.xz"
]
}
train_data = [{"text": line.strip(), "language": lang} for lang, files in data_files.items() for file in files for line in xz.open(file, "rt", encoding="utf-8")]
test_data = [{"text": line.strip(), "language": lang} for lang, files in data_files.items() for file in files for line in xz.open(file, "rt", encoding="utf-8")]
validation_data = [{"text": line.strip(), "language": lang} for lang, files in data_files.items() for file in files for line in xz.open(file, "rt", encoding="utf-8")]
return [
self._split_generate("train", data=train_data),
self._split_generate("test", data=test_data),
self._split_generate("validation", data=validation_data)
]
def _split_generate(self, split, data):
return self.DatasetSplitGenerator(
name=split,
gen_kwargs={"data": data},
)
def _generate_examples(self, data):
for i, example in enumerate(data):
yield i, example