Stern5497 commited on
Commit
d3b7dcd
1 Parent(s): d9c3255

Delete swiss_rulings.py

Browse files
Files changed (1) hide show
  1. swiss_rulings.py +0 -107
swiss_rulings.py DELETED
@@ -1,107 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import json
16
- import lzma
17
- import os
18
-
19
- import datasets
20
- try:
21
- import lzma as xz
22
- except ImportError:
23
- import pylzma as xz
24
-
25
-
26
- # TODO: Add BibTeX citation
27
- # Find for instance the citation on arxiv or on the dataset repo/website
28
- _CITATION = """Todo create citation"""
29
-
30
- # You can copy an official description
31
- _DESCRIPTION = """\
32
- This dataset contains Swiss bge rulings. Leading decisions from the supreme court in Switzerland in German, French and Italian.
33
- """
34
-
35
- _URLS = {
36
- "rulings": "https://huggingface.co/datasets/rcds/swiss_rulings/resolve/main/data",
37
- }
38
-
39
-
40
- class SwissRulings(datasets.GeneratorBasedBuilder):
41
- """This dataset contains bge rulings for doc2doc information retrieval task."""
42
-
43
-
44
- BUILDER_CONFIGS = [
45
- datasets.BuilderConfig(name="rulings", description="This part covers all rulings."),
46
- ]
47
-
48
- DEFAULT_CONFIG_NAME = "rulings" # It's not mandatory to have a default configuration. Just use one if it make sense.
49
-
50
- def _info(self):
51
- if self.config.name == "rulings":
52
- features = datasets.Features(
53
- {
54
- "decision_id": datasets.Value("string"),
55
- "language": datasets.Value("string"),
56
- "year": datasets.Value("float64"),
57
- "facts": datasets.Value("string"),
58
- "considerations": datasets.Value("string"),
59
- "rulings": datasets.Value("string"),
60
- "chamber": datasets.Value("string"),
61
- "file_number": datasets.Value("string")
62
- }
63
- )
64
- return datasets.DatasetInfo(
65
- description=_DESCRIPTION,
66
- features=features, # Here we define them above because they are different between the two configurations
67
- )
68
-
69
- def _split_generators(self, dl_manager):
70
- urls = _URLS[self.config.name]
71
- filepath = dl_manager.download(os.path.join(urls, "rulings.jsonl"))
72
- return [
73
- datasets.SplitGenerator(
74
- name=datasets.Split.TRAIN,
75
- # These kwargs will be passed to _generate_examples
76
- gen_kwargs={
77
- "filepath": filepath,
78
- "split": "train",
79
- },
80
- )
81
- ]
82
-
83
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
84
- def _generate_examples(self, filepath, split):
85
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
86
- line_counter = 0
87
- try:
88
- with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
89
- for id, line in enumerate(f):
90
- line_counter += 1
91
- if line:
92
- data = json.loads(line)
93
- if self.config.name == "full":
94
- yield id, {
95
- "decision_id": data["decision_id"],
96
- "language": data["language"],
97
- "year": data["year"],
98
- "facts": data["facts"],
99
- "considerations": data["considerations"],
100
- "rulings": data["rulings"],
101
- "chamber": data["chamber"],
102
- "file_number": data["file_number"],
103
- }
104
- except lzma.LZMAError as e:
105
- print(split, e)
106
- if line_counter == 0:
107
- raise e