Datasets:
rcds
/

ArXiv:
Tags:
legal
License:
File size: 4,634 Bytes
959866c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26e5e47
959866c
 
 
a2ba1af
959866c
 
a2ba1af
959866c
 
 
a2ba1af
959866c
a2ba1af
959866c
 
a2ba1af
959866c
 
 
 
 
 
 
 
 
 
 
 
 
4cc6ab0
959866c
 
 
 
 
 
 
 
 
 
6bbcc40
959866c
 
 
 
 
 
 
 
 
d8115c8
26e5e47
 
0a3916b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26e5e47
 
 
 
 
 
 
 
 
 
 
 
959866c
5a49f7c
26e5e47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import json

import datasets
import pandas as pd
from huggingface_hub.file_download import hf_hub_url

try:
    import lzma as xz
except ImportError:
    import pylzma as xz

datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)

_DESCRIPTION ="""\

    """

_HOMEPAGE =  ""

_LICENSE = ""

_CITATION = ""

_URL = {
    'data/'
}
_LANGUAGES = [
    "de", "fr", "it", "swiss", "en"
]


class MultiLegalNegConfig(datasets.BuilderConfig):
    
    def __init__(self, name:str, **kwargs):
        super( MultiLegalNegConfig, self).__init__(**kwargs)
        self.name = name
        self.language = name.split("_")[0]

class MultiLegalNeg(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIG_CLASS = MultiLegalNegConfig
    
    BUILDER_CONFIGS = [
        MultiLegalNegConfig(f"{language}")
        for language in _LANGUAGES + ['all']
    ]
    DEFAULT_CONFIG_NAME = 'all_all'
    def _info(self):
        features = datasets.Features(
            {   
                "text": datasets.Value("string"),        
                "spans": [
                    {
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "token_start": datasets.Value("int64"),
                        "token_end": datasets.Value("int64"),
                        "label": datasets.Value("string")                     
                    }
                ],
                "tokens": [
                    {
                        "text": datasets.Value("string"),
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "id": datasets.Value("int64"),
                        "ws": datasets.Value("bool")
                    }
                ]
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features = features,
            homepage = _HOMEPAGE,
            citation=_CITATION
        )
    
    def _split_generators(self, dl_manager):
        languages = _LANGUAGES if self.config.language == "all" else [self.config.language]

        data_files = {
            "train": [
                "data/train/it_train.jsonl.xz",
                "data/train/fr_train.jsonl.xz",
                "data/train/de_train.jsonl.xz",
                "data/train/swiss_train.jsonl.xz",
                "data/train/en_sherlock_train.jsonl.xz",
                "data/train/en_sfu_train.jsonl.xz",
                "data/train/en_bioscope_train.jsonl.xz"
            ],
            "test": [
                "data/test/it_test.jsonl.xz",
                "data/test/fr_test.jsonl.xz",
                "data/test/de_test.jsonl.xz",
                "data/test/swiss_test.jsonl.xz",
                "data/test/en_sherlock_test.jsonl.xz",
                "data/test/en_sfu_test.jsonl.xz",
                "data/test/en_bioscope_test.jsonl.xz"
            ],
            "validation": [
                "data/validation/it_validation.jsonl.xz",
                "data/validation/fr_validation.jsonl.xz",
                "data/validation/de_validation.jsonl.xz",
                "data/validation/swiss_validation.jsonl.xz",
                "data/validation/en_sherlock_validation.jsonl.xz",
                "data/validation/en_sfu_validation.jsonl.xz",
                "data/validation/en_bioscope_validation.jsonl.xz"
            ]
        }

        split_generators = []
        for split in data_files.keys():
            filepaths = []
            for file_name in data_files[split]:
                try: 
                    filepaths.append(dl_manager.download((f'{file_name}')))
                except:
                    break 
            split_generators.append(datasets.SplitGenerator(name=split, gen_kwargs={'filepaths': filepaths}))

        return split_generators

    def _generate_examples(self, filepaths):
        id_ = 0
        for filepath in filepaths:
            if filepath:
                logger.info("Generating examples from = %s", filepath)
                try:
                    with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
                        json_list = list(f)
                    
                    for json_str in json_list:
                        example = json.loads(json_str)
                        if example is not None and isinstance(example, dict):
                            yield id_, example
                            id_ +=1

                except Exception:
                    logger.exception("Error while processing file %s", filepath)