Datasets:
rcds
/

ArXiv:
Tags:
legal
License:
File size: 3,813 Bytes
959866c
 
 
 
 
5e289ea
959866c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26e5e47
959866c
ba4e7ed
7ef8db4
ba4e7ed
959866c
7dee5d6
5e289ea
 
959866c
a2ba1af
959866c
 
a2ba1af
959866c
 
d41d796
959866c
a2ba1af
959866c
a2ba1af
959866c
 
5e289ea
959866c
f402610
959866c
 
 
 
 
 
 
 
 
 
4cc6ab0
959866c
 
 
 
 
 
 
 
 
 
6bbcc40
959866c
 
 
 
 
 
 
 
 
d8115c8
26e5e47
968cc4e
26e5e47
 
32ed1c3
26e5e47
ba4e7ed
 
 
 
 
 
26e5e47
 
 
959866c
5a49f7c
26e5e47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import json

import datasets
import pandas as pd
from huggingface_hub.file_download import hf_hub_url
from collections import OrderedDict

try:
    import lzma as xz
except ImportError:
    import pylzma as xz

datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)

_DESCRIPTION ="""\

    """

_HOMEPAGE =  ""

_LICENSE = ""

_CITATION = ""

_URL = {
    'data/'
}
_LANGUAGES = [
    "de", "fr", "it", "swiss", "en"
]
_SUBSETS = [
    "_sherlock", "_sfu", "_bioscope", "_dalloux", ""
]

_BUILDS = ['de', 'fr', 'it', 'swiss', 'fr_dalloux', 'fr_all', 'en_bioscope', 'en_sherlock', 'en_sfu', 'en_all', 'all_all']



class MultiLegalNegConfig(datasets.BuilderConfig):
    
    def __init__(self, name:str, **kwargs):
        super( MultiLegalNegConfig, self).__init__(**kwargs)
        self.name = name
        self.language = name.split("_")[0]
        self.subset = f'_{name.split("_")[1]}' if len(name.split("_"))==2 else ""

class MultiLegalNeg(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIG_CLASS = MultiLegalNegConfig
    
    BUILDER_CONFIGS = [
    MultiLegalNegConfig(f"{build}") for build in _BUILDS
    ]

    def _info(self):
        features = datasets.Features(
            {   
                "text": datasets.Value("string"),        
                "spans": [
                    {
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "token_start": datasets.Value("int64"),
                        "token_end": datasets.Value("int64"),
                        "label": datasets.Value("string")                     
                    }
                ],
                "tokens": [
                    {
                        "text": datasets.Value("string"),
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "id": datasets.Value("int64"),
                        "ws": datasets.Value("bool")
                    }
                ]
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features = features,
            homepage = _HOMEPAGE,
            citation=_CITATION
        )
    
    def _split_generators(self, dl_manager):
        languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
        subsets = _SUBSETS if self.config.subset == "_all" else [self.config.subset]

        split_generators = []
        for split in [datasets.Split.TRAIN, datasets.Split.TEST, datasets.Split.VALIDATION]:
            filepaths = []
            for language in languages:
                for subset in subsets:
                    try: 
                        filepaths.append(dl_manager.download((f'data/{split}/{language}{subset}_{split}.jsonl.xz')))
                    except:
                        break 
            split_generators.append(datasets.SplitGenerator(name=split, gen_kwargs={'filepaths': filepaths}))

        return split_generators

    def _generate_examples(self, filepaths):
        id_ = 0
        for filepath in filepaths:
            if filepath:
                logger.info("Generating examples from = %s", filepath)
                try:
                    with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
                        json_list = list(f)
                    
                    for json_str in json_list:
                        example = json.loads(json_str)
                        if example is not None and isinstance(example, dict):
                            yield id_, example
                            id_ +=1

                except Exception:
                    logger.exception("Error while processing file %s", filepath)