vr18 commited on
Commit
7f2f8b2
1 Parent(s): 7a63386

Changes in loading script

Browse files
Files changed (2) hide show
  1. README.md +10 -3
  2. court_view_generation.py +5 -14
README.md CHANGED
@@ -17,21 +17,28 @@ size_categories:
17
  ## Introduction
18
 
19
  This dataset contains court views for different languages and court chambers. It includes information such as decision id, language, chamber, file name, url, and the number of tokens in the facts and considerations sections.
20
- Level 1 contains all the data, Level 2 contains only data with complete origin facts & origin considerations.
21
 
22
  ## Size
23
- ### Level 1 (L1)
24
  - All: 385'840
25
  - validation: 47'661
26
  - test: 106'608
27
  - train: 231'571
28
 
29
- ### Level 2 (L2)
30
  - All: 272
31
  - validation: 9
32
  - test: 236
33
  - train: 27
34
 
 
 
 
 
 
 
 
35
  ## Columns
36
 
37
  - decision_id: unique identifier for the decision
 
17
  ## Introduction
18
 
19
  This dataset contains court views for different languages and court chambers. It includes information such as decision id, language, chamber, file name, url, and the number of tokens in the facts and considerations sections.
20
+ Full (L1) contains all the data, Origin (L2) contains only data with complete origin facts & origin considerations.
21
 
22
  ## Size
23
+ ### Full (L1)
24
  - All: 385'840
25
  - validation: 47'661
26
  - test: 106'608
27
  - train: 231'571
28
 
29
+ ### Origin (L2)
30
  - All: 272
31
  - validation: 9
32
  - test: 236
33
  - train: 27
34
 
35
+ ## Load datasets
36
+
37
+ ```python
38
+ dataset = load_dataset("rcds/court_view_generation")
39
+ dataset_origin = load_dataset("rcds/court_view_generation", "origin")
40
+ ```
41
+
42
  ## Columns
43
 
44
  - decision_id: unique identifier for the decision
court_view_generation.py CHANGED
@@ -53,7 +53,7 @@ _LICENSE = ""
53
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
  _URLS = {
55
  "full": "https://huggingface.co/datasets/rcds/court_view_generation/resolve/main/L1/huggingface",
56
- "level_2": "https://huggingface.co/datasets/rcds/court_view_generation/resolve/main/L2/huggingface"
57
  }
58
 
59
 
@@ -75,7 +75,7 @@ class CourtViewGeneration(datasets.GeneratorBasedBuilder):
75
  # data = datasets.load_dataset('my_dataset', 'second_domain')
76
  BUILDER_CONFIGS = [
77
  datasets.BuilderConfig(name="full", version=VERSION, description="This part of my dataset covers the whole dataset"),
78
- datasets.BuilderConfig(name="level_2", version=VERSION, description="This part of my dataset covers a subset containing only cases with origin data")
79
  ]
80
 
81
  DEFAULT_CONFIG_NAME = "full" # It's not mandatory to have a default configuration. Just use one if it make sense.
@@ -87,9 +87,7 @@ class CourtViewGeneration(datasets.GeneratorBasedBuilder):
87
  "decision_id": datasets.Value("string"),
88
  "facts": datasets.Value("string"),
89
  "considerations": datasets.Value("string"),
90
- "court": datasets.Value("string"),
91
- "origin_facts": datasets.Value("string"),
92
- "origin_considerations": datasets.Value("string")
93
  # These are the features of your dataset like images, labels ...
94
  }
95
  )
@@ -132,10 +130,6 @@ class CourtViewGeneration(datasets.GeneratorBasedBuilder):
132
  filepath_validation = dl_manager.download(os.path.join(urls, "validation.jsonl.xz"))
133
  filepath_test = dl_manager.download(os.path.join(urls, "test.jsonl.xz"))
134
 
135
- print("filepath_train", filepath_train)
136
- # show files in the downloaded and extracted dir
137
- print(os.listdir(os.path.dirname(filepath_train)))
138
-
139
  return [
140
  datasets.SplitGenerator(
141
  name=datasets.Split.TRAIN,
@@ -160,7 +154,7 @@ class CourtViewGeneration(datasets.GeneratorBasedBuilder):
160
  "filepath": filepath_test,
161
  "split": "test"
162
  },
163
- ),
164
  ]
165
 
166
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
@@ -171,17 +165,14 @@ class CourtViewGeneration(datasets.GeneratorBasedBuilder):
171
  with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
172
  for id, line in enumerate(f):
173
  line_counter += 1
174
- print(id, len(line), line[:200])
175
  if line:
176
  data = json.loads(line)
177
- if self.config.name == "all":
178
  yield id, {
179
  "decision_id": data["decision_id"],
180
  "facts": data["facts"],
181
  "considerations": data["considerations"],
182
  "court": data["court"],
183
- "origin_facts": data["origin_facts"],
184
- "origin_considerations": data["origin_considerations"]
185
  }
186
  else:
187
  yield id, {
 
53
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
  _URLS = {
55
  "full": "https://huggingface.co/datasets/rcds/court_view_generation/resolve/main/L1/huggingface",
56
+ "origin": "https://huggingface.co/datasets/rcds/court_view_generation/resolve/main/L2/huggingface"
57
  }
58
 
59
 
 
75
  # data = datasets.load_dataset('my_dataset', 'second_domain')
76
  BUILDER_CONFIGS = [
77
  datasets.BuilderConfig(name="full", version=VERSION, description="This part of my dataset covers the whole dataset"),
78
+ datasets.BuilderConfig(name="origin", version=VERSION, description="This part of my dataset covers a subset containing only cases with origin data")
79
  ]
80
 
81
  DEFAULT_CONFIG_NAME = "full" # It's not mandatory to have a default configuration. Just use one if it make sense.
 
87
  "decision_id": datasets.Value("string"),
88
  "facts": datasets.Value("string"),
89
  "considerations": datasets.Value("string"),
90
+ "court": datasets.Value("string")
 
 
91
  # These are the features of your dataset like images, labels ...
92
  }
93
  )
 
130
  filepath_validation = dl_manager.download(os.path.join(urls, "validation.jsonl.xz"))
131
  filepath_test = dl_manager.download(os.path.join(urls, "test.jsonl.xz"))
132
 
 
 
 
 
133
  return [
134
  datasets.SplitGenerator(
135
  name=datasets.Split.TRAIN,
 
154
  "filepath": filepath_test,
155
  "split": "test"
156
  },
157
+ )
158
  ]
159
 
160
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
165
  with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
166
  for id, line in enumerate(f):
167
  line_counter += 1
 
168
  if line:
169
  data = json.loads(line)
170
+ if self.config.name == "full":
171
  yield id, {
172
  "decision_id": data["decision_id"],
173
  "facts": data["facts"],
174
  "considerations": data["considerations"],
175
  "court": data["court"],
 
 
176
  }
177
  else:
178
  yield id, {