Datasets:
rcds
/

ArXiv:
Tags:
legal
License:
ramonachristen commited on
Commit
959866c
1 Parent(s): 0b78b08

Create MultiLegalNeg.py

Browse files
Files changed (1) hide show
  1. MultiLegalNeg.py +120 -0
MultiLegalNeg.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+ import pandas as pd
5
+ from huggingface_hub.file_download import hf_hub_url
6
+
7
+ try:
8
+ import lzma as xz
9
+ except ImportError:
10
+ import pylzma as xz
11
+
12
+ datasets.logging.set_verbosity_info()
13
+ logger = datasets.logging.get_logger(__name__)
14
+
15
+ _DESCRIPTION ="""\
16
+
17
+ """
18
+
19
+ _HOMEPAGE = ""
20
+
21
+ _LICENSE = ""
22
+
23
+ _CITATION = ""
24
+
25
+ _URL = {
26
+ 'data/'
27
+ }
28
+ _LANGUAGES = [
29
+ "german", "french"
30
+ ]
31
+
32
+
33
+ class MultiLegalNegConfig(datasets.BuilderConfig):
34
+
35
+ def __init__(self, name:str, **kwargs):
36
+ super( MultiLegalNegConfig, self).__init__(**kwargs)
37
+ self.name = name
38
+ self.language = name.split("_")[0]
39
+
40
+ class MultiLegalNeg(datasets.GeneratorBasedBuilder):
41
+
42
+ BUILDER_CONFIG_CLASS = MultiLegalNegConfig
43
+
44
+ BUILDER_CONFIGS = [
45
+ MultiLegalNegConfig(f"{language}")
46
+ for language in _LANGUAGES + ['all']
47
+ ]
48
+ DEFAULT_CONFIG_NAME = 'all_all'
49
+ def _info(self):
50
+ features = datasets.Features(
51
+ {
52
+ "text": datasets.Value("string"),
53
+ "spans": [
54
+ {
55
+ "start": datasets.Value("int64"),
56
+ "end": datasets.Value("int64"),
57
+ "token_start": datasets.Value("int64"),
58
+ "token_end": datasets.Value("int64"),
59
+ "label": datasets.Value("string"),
60
+ "text": datasets.Value("string"),
61
+ "pattern": datasets.Value("int64")
62
+ }
63
+ ],
64
+ "tokens": [
65
+ {
66
+ "text": datasets.Value("string"),
67
+ "start": datasets.Value("int64"),
68
+ "end": datasets.Value("int64"),
69
+ "id": datasets.Value("int64"),
70
+ "ws": datasets.Value("bool")
71
+ }
72
+ ],
73
+ "multineg": datasets.Value("bool")
74
+ }
75
+ )
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features = features,
79
+ homepage = _HOMEPAGE,
80
+ citation=_CITATION
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+
85
+ languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
86
+
87
+ split_generators = []
88
+ for split in [datasets.Split.TRAIN]:
89
+ filepaths = []
90
+ for language in languages:
91
+ try:
92
+ filepaths.append(dl_manager.download((f'data/{language}.jsonl.xz')))
93
+ except:
94
+ break
95
+
96
+ split_generators.append(
97
+ datasets.SplitGenerator(name=split, gen_kwargs={'filepaths': filepaths})
98
+ )
99
+
100
+ return split_generators
101
+
102
+ def _generate_examples(self,filepaths):
103
+ id_ = 0
104
+ for filepath in filepaths:
105
+ if filepath:
106
+ logger.info("Generating examples from = %s", filepath)
107
+ try:
108
+ with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
109
+ json_list = list(f)
110
+
111
+ for json_str in json_list:
112
+ example = json.loads(json_str)
113
+ if example is not None and isinstance(example, dict):
114
+ yield id_, example
115
+ id_ +=1
116
+
117
+ except Exception:
118
+ logger.exception("Error while processing file %s", filepath)
119
+
120
+