Datasets:
rcds
/

ArXiv:
Tags:
legal
License:
File size: 4,592 Bytes
959866c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8086b3a
b035b09
 
 
 
 
 
0a3916b
 
459dabf
 
362d32e
459dabf
959866c
 
 
a2ba1af
959866c
 
a2ba1af
959866c
 
 
a2ba1af
959866c
a2ba1af
959866c
 
a2ba1af
959866c
 
 
 
 
 
 
 
 
 
 
 
 
4cc6ab0
959866c
 
 
 
 
 
 
 
 
 
6bbcc40
959866c
 
 
 
 
 
 
 
 
d8115c8
0a3916b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8115c8
0a3916b
 
 
d8115c8
0a3916b
 
 
 
 
 
 
 
 
 
 
959866c
0a3916b
 
 
959866c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import json

import datasets
import pandas as pd
from huggingface_hub.file_download import hf_hub_url

try:
    import lzma as xz
except ImportError:
    import pylzma as xz

datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)

_DESCRIPTION ="""\

    """

_HOMEPAGE =  ""

_LICENSE = ""

_CITATION = ""

_URL = {
    'data/'
}
_LANGUAGES = [
    "german", "french", "italian", "swiss", "english"
]

_ENGLISH = [
    "sherlock", "bioscope", "sfu"
]

_SHERLOCKS = [
    "dev", "test_cardboard_GOLD", "test_circle_GOLD", "training"
]

_BIOSCOPES = [
    "abstracts", "full_papers"
]


class MultiLegalNegConfig(datasets.BuilderConfig):
    
    def __init__(self, name:str, **kwargs):
        super( MultiLegalNegConfig, self).__init__(**kwargs)
        self.name = name
        self.language = name.split("_")[0]

class MultiLegalNeg(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIG_CLASS = MultiLegalNegConfig
    
    BUILDER_CONFIGS = [
        MultiLegalNegConfig(f"{language}")
        for language in _LANGUAGES + ['all']
    ]
    DEFAULT_CONFIG_NAME = 'all_all'
    def _info(self):
        features = datasets.Features(
            {   
                "text": datasets.Value("string"),        
                "spans": [
                    {
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "token_start": datasets.Value("int64"),
                        "token_end": datasets.Value("int64"),
                        "label": datasets.Value("string")                     
                    }
                ],
                "tokens": [
                    {
                        "text": datasets.Value("string"),
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "id": datasets.Value("int64"),
                        "ws": datasets.Value("bool")
                    }
                ]
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features = features,
            homepage = _HOMEPAGE,
            citation=_CITATION
        )
    
    def _split_generators(self, dl_manager):
        data_files = {
            "train": [
                "data/train/it_train.jsonl.xz",
                "data/train/fr_train.jsonl.xz",
                "data/train/de_train.jsonl.xz",
                "data/train/swiss_train.jsonl.xz",
                "data/train/en_sherlock_train.jsonl.xz",
                "data/train/en_sfu_train.jsonl.xz",
                "data/train/en_bioscope_train.jsonl.xz"
            ],
            "test": [
                "data/test/it_test.jsonl.xz",
                "data/test/fr_test.jsonl.xz",
                "data/test/de_test.jsonl.xz",
                "data/test/swiss_test.jsonl.xz",
                "data/test/en_sherlock_test.jsonl.xz",
                "data/test/en_sfu_test.jsonl.xz",
                "data/test/en_bioscope_test.jsonl.xz"
            ],
            "validation": [
                "data/validation/it_validation.jsonl.xz",
                "data/validation/fr_validation.jsonl.xz",
                "data/validation/de_validation.jsonl.xz",
                "data/validation/swiss_validation.jsonl.xz",
                "data/validation/en_sherlock_validation.jsonl.xz",
                "data/validation/en_sfu_validation.jsonl.xz",
                "data/validation/en_bioscope_validation.jsonl.xz"
            ]
        }
        
        train_data = [{"text": line.strip(), "language": lang} for lang, files in data_files.items() for file in files for line in xz.open(file, "rt", encoding="utf-8")]
        test_data = [{"text": line.strip(), "language": lang} for lang, files in data_files.items() for file in files for line in xz.open(file, "rt", encoding="utf-8")]
        validation_data = [{"text": line.strip(), "language": lang} for lang, files in data_files.items() for file in files for line in xz.open(file, "rt", encoding="utf-8")]
        
        return [
            self._split_generate("train", data=train_data),
            self._split_generate("test", data=test_data),
            self._split_generate("validation", data=validation_data)
        ]

    def _split_generate(self, split, data):
        return self.DatasetSplitGenerator(
            name=split,
            gen_kwargs={"data": data},
        )

    def _generate_examples(self, data):
        for i, example in enumerate(data):
            yield i, example