Datasets:
rcds
/

ArXiv:
Tags:
legal
License:
MultiLegalNeg / MultiLegalNeg.py
ramonachristen's picture
Update MultiLegalNeg.py
8086b3a
raw
history blame
No virus
4.02 kB
import json
import datasets
import pandas as pd
from huggingface_hub.file_download import hf_hub_url
try:
import lzma as xz
except ImportError:
import pylzma as xz
datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION ="""\
"""
_HOMEPAGE = ""
_LICENSE = ""
_CITATION = ""
_URL = {
'data/'
}
_LANGUAGES = [
"german", "french", "italian", "swiss", "english"
]
_ENGLISH = [
"sherlock", "bioscope", "sfu"
]
_ENGLISH_NAMES = [
"dev", "test_cardboard_GOLD", "test_circle_GOLD", "training", "abstracts", "full_papers"
]
_BIOSCOPES = [
"abstracts", "full_papers"
]
class MultiLegalNegConfig(datasets.BuilderConfig):
def __init__(self, name:str, **kwargs):
super( MultiLegalNegConfig, self).__init__(**kwargs)
self.name = name
self.language = name.split("_")[0]
class MultiLegalNeg(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = MultiLegalNegConfig
BUILDER_CONFIGS = [
MultiLegalNegConfig(f"{language}")
for language in _LANGUAGES + ['all']
]
DEFAULT_CONFIG_NAME = 'all_all'
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"spans": [
{
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"token_start": datasets.Value("int64"),
"token_end": datasets.Value("int64"),
"label": datasets.Value("string")
}
],
"tokens": [
{
"text": datasets.Value("string"),
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"id": datasets.Value("int64"),
"ws": datasets.Value("bool")
}
]
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features = features,
homepage = _HOMEPAGE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
split_generators = []
for split in [datasets.Split.TRAIN]:
filepaths = []
for language in languages:
if language == "english":
for ds in _ENGLISH:
for name in _ENGLISH_NAMES:
try:
filepaths.append(dl_manager.download((f'data/english/{ds}_{name}.jsonl.xz')))
except:
break
try:
filepaths.append(dl_manager.download((f'data/{language}.jsonl.xz')))
except:
break
split_generators.append(
datasets.SplitGenerator(name=split, gen_kwargs={'filepaths': filepaths})
)
return split_generators
def _generate_examples(self,filepaths):
id_ = 0
for filepath in filepaths:
if filepath:
logger.info("Generating examples from = %s", filepath)
try:
with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
json_list = list(f)
for json_str in json_list:
example = json.loads(json_str)
if example is not None and isinstance(example, dict):
yield id_, example
id_ +=1
except Exception:
logger.exception("Error while processing file %s", filepath)