Datasets:
rcds
/

ArXiv:
Tags:
legal
License:
ramonachristen commited on
Commit
0a3916b
1 Parent(s): 6050814

Update MultiLegalNeg.py

Browse files
Files changed (1) hide show
  1. MultiLegalNeg.py +51 -46
MultiLegalNeg.py CHANGED
@@ -33,8 +33,8 @@ _ENGLISH = [
33
  "sherlock", "bioscope", "sfu"
34
  ]
35
 
36
- _ENGLISH_NAMES = [
37
- "dev", "test_cardboard_GOLD", "test_circle_GOLD", "training", "abstracts", "full_papers"
38
  ]
39
 
40
  _BIOSCOPES = [
@@ -89,49 +89,54 @@ class MultiLegalNeg(datasets.GeneratorBasedBuilder):
89
  citation=_CITATION
90
  )
91
 
92
- def _split_generators(self, dl_manager):
93
-
94
- languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
95
-
96
- split_generators = []
97
- for split in [datasets.Split.TRAIN]:
98
- filepaths = []
99
- for language in languages:
100
- if language == "english":
101
- for ds in _ENGLISH:
102
- for name in _ENGLISH_NAMES:
103
- try:
104
- filepaths.append(dl_manager.download((f'data/english/{ds}_{name}.jsonl.xz')))
105
- except:
106
- break
107
- try:
108
- filepaths.append(dl_manager.download((f'data/{language}.jsonl.xz')))
109
- except:
110
- break
111
-
112
- split_generators.append(
113
- datasets.SplitGenerator(name=split, gen_kwargs={'filepaths': filepaths})
114
- )
115
-
116
-
117
- return split_generators
118
-
119
- def _generate_examples(self,filepaths):
120
- id_ = 0
121
- for filepath in filepaths:
122
- if filepath:
123
- logger.info("Generating examples from = %s", filepath)
124
- try:
125
- with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
126
- json_list = list(f)
127
-
128
- for json_str in json_list:
129
- example = json.loads(json_str)
130
- if example is not None and isinstance(example, dict):
131
- yield id_, example
132
- id_ +=1
133
-
134
- except Exception:
135
- logger.exception("Error while processing file %s", filepath)
 
 
136
 
 
 
 
137
 
 
33
  "sherlock", "bioscope", "sfu"
34
  ]
35
 
36
+ _SHERLOCKS = [
37
+ "dev", "test_cardboard_GOLD", "test_circle_GOLD", "training"
38
  ]
39
 
40
  _BIOSCOPES = [
 
89
  citation=_CITATION
90
  )
91
 
92
+ def _split_generators(self, dl_manager):
93
+ data_files = {
94
+ "train": [
95
+ "data/train/it_train.jsonl.xz",
96
+ "data/train/fr_train.jsonl.xz",
97
+ "data/train/de_train.jsonl.xz",
98
+ "data/train/swiss_train.jsonl.xz",
99
+ "data/train/en_sherlock_train.jsonl.xz",
100
+ "data/train/en_sfu_train.jsonl.xz",
101
+ "data/train/en_bioscope_train.jsonl.xz"
102
+ ],
103
+ "test": [
104
+ "data/test/it_test.jsonl.xz",
105
+ "data/test/fr_test.jsonl.xz",
106
+ "data/test/de_test.jsonl.xz",
107
+ "data/test/swiss_test.jsonl.xz",
108
+ "data/test/en_sherlock_test.jsonl.xz",
109
+ "data/test/en_sfu_test.jsonl.xz",
110
+ "data/test/en_bioscope_test.jsonl.xz"
111
+ ],
112
+ "validation": [
113
+ "data/validation/it_validation.jsonl.xz",
114
+ "data/validation/fr_validation.jsonl.xz",
115
+ "data/validation/de_validation.jsonl.xz",
116
+ "data/validation/swiss_validation.jsonl.xz",
117
+ "data/validation/en_sherlock_validation.jsonl.xz",
118
+ "data/validation/en_sfu_validation.jsonl.xz",
119
+ "data/validation/en_bioscope_validation.jsonl.xz"
120
+ ]
121
+ }
122
+
123
+ train_data = [{"text": line.strip(), "language": lang} for lang, files in data_files.items() for file in files for line in xz.open(file, "rt", encoding="utf-8")]
124
+ test_data = [{"text": line.strip(), "language": lang} for lang, files in data_files.items() for file in files for line in xz.open(file, "rt", encoding="utf-8")]
125
+ validation_data = [{"text": line.strip(), "language": lang} for lang, files in data_files.items() for file in files for line in xz.open(file, "rt", encoding="utf-8")]
126
+
127
+ return [
128
+ self._split_generate("train", data=train_data),
129
+ self._split_generate("test", data=test_data),
130
+ self._split_generate("validation", data=validation_data)
131
+ ]
132
+
133
+ def _split_generate(self, split, data):
134
+ return self.DatasetSplitGenerator(
135
+ name=split,
136
+ gen_kwargs={"data": data},
137
+ )
138
 
139
+ def _generate_examples(self, data):
140
+ for i, example in enumerate(data):
141
+ yield i, example
142