davanstrien HF staff commited on
Commit
e96ea6d
1 Parent(s): 07f9352

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (1d50d84c243faced4cdb6ef1560ca26f0cade44f)
- Delete loading script (1460b4208c645a630774ad60556a7ce684bceb13)
- Delete legacy dataset_infos.json (56eba7a9bf450cb97ce593f6696c58802252a672)

README.md CHANGED
@@ -1,16 +1,15 @@
1
  ---
2
  annotations_creators:
3
  - expert-generated
4
- language:
5
- - en
6
  language_creators:
7
  - expert-generated
8
  - machine-generated
 
 
9
  license:
10
  - cc-by-4.0
11
  multilinguality:
12
  - monolingual
13
- pretty_name: Old Bailey Proceedings
14
  size_categories:
15
  - 1K<n<10K
16
  source_datasets:
@@ -22,6 +21,32 @@ task_ids:
22
  - multi-class-classification
23
  - language-modeling
24
  - masked-language-modeling
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  ---
26
  [Needs More Information]
27
 
1
  ---
2
  annotations_creators:
3
  - expert-generated
 
 
4
  language_creators:
5
  - expert-generated
6
  - machine-generated
7
+ language:
8
+ - en
9
  license:
10
  - cc-by-4.0
11
  multilinguality:
12
  - monolingual
 
13
  size_categories:
14
  - 1K<n<10K
15
  source_datasets:
21
  - multi-class-classification
22
  - language-modeling
23
  - masked-language-modeling
24
+ pretty_name: Old Bailey Proceedings
25
+ dataset_info:
26
+ features:
27
+ - name: id
28
+ dtype: string
29
+ - name: text
30
+ dtype: string
31
+ - name: places
32
+ sequence: string
33
+ - name: type
34
+ dtype: string
35
+ - name: persons
36
+ sequence: string
37
+ - name: date
38
+ dtype: string
39
+ splits:
40
+ - name: train
41
+ num_bytes: 719949847
42
+ num_examples: 2638
43
+ download_size: 370751172
44
+ dataset_size: 719949847
45
+ configs:
46
+ - config_name: default
47
+ data_files:
48
+ - split: train
49
+ path: data/train-*
50
  ---
51
  [Needs More Information]
52
 
data/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10051c5fe9d2ca4005de87f4ce61a5468bb2a1697fee7ddf63f86ad2fa63408f
3
+ size 152740985
data/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8ea3ddcfae256e00a6798a4f0837a23774645a93d42e0751c3f9c4b6126dd8d
3
+ size 218010187
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "The dataset consists of 2,163 transcriptions of the Proceedings and 475 Ordinary's Accounts marked up in TEI-XML, \nand contains some documentation covering the data structure and variables. Each Proceedings file represents one session of the court (1674-1913), \nand each Ordinary's Account file represents a single pamphlet (1676-1772)\n", "citation": "@article{Howard2017,\nauthor = \"Sharon Howard\",\ntitle = \"{Old Bailey Online XML Data}\",\nyear = \"2017\",\nmonth = \"4\",\nurl = \"https://figshare.shef.ac.uk/articles/dataset/Old_Bailey_Online_XML_Data/4775434\",\ndoi = \"10.15131/shef.data.4775434.v2\"\n}\n", "homepage": "https://www.dhi.ac.uk/projects/old-bailey/", "license": "Creative Commons Attribution 4.0 International", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "places": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "persons": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "date": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "old_bailey_proceedings", "config_name": "default", "version": {"version_str": "7.2.0", "description": null, "major": 7, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 719955987, "num_examples": 2638, "dataset_name": "old_bailey_proceedings"}}, "download_checksums": {"https://www.dhi.ac.uk/san/data/oldbailey/oldbailey.zip": {"num_bytes": 326259415, "checksum": "60d1e3b889089d0b47c933cf24b8db62838ab0fec793569bf516173a12622ca0"}}, "download_size": 326259415, "post_processing_size": null, "dataset_size": 719955987, "size_in_bytes": 1046215402}}
 
old_bailey_proceedings.py DELETED
@@ -1,149 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import os
16
- import datasets
17
- import glob
18
- import xml.etree.ElementTree as ET
19
-
20
- _CITATION = """@article{Howard2017,
21
- author = "Sharon Howard",
22
- title = "{Old Bailey Online XML Data}",
23
- year = "2017",
24
- month = "4",
25
- url = "https://figshare.shef.ac.uk/articles/dataset/Old_Bailey_Online_XML_Data/4775434",
26
- doi = "10.15131/shef.data.4775434.v2"
27
- }
28
- """
29
-
30
-
31
- _DESCRIPTION = """The dataset consists of 2,163 transcriptions of the Proceedings and 475 Ordinary's Accounts marked up in TEI-XML,
32
- and contains some documentation covering the data structure and variables. Each Proceedings file represents one session of the court (1674-1913),
33
- and each Ordinary's Account file represents a single pamphlet (1676-1772)
34
- """
35
-
36
- _HOMEPAGE = "https://www.dhi.ac.uk/projects/old-bailey/"
37
-
38
- _DATASETNAME = "old_bailey_proceedings"
39
-
40
- _LICENSE = "Creative Commons Attribution 4.0 International"
41
-
42
- _URL = "https://www.dhi.ac.uk/san/data/oldbailey/oldbailey.zip"
43
-
44
- logger = datasets.utils.logging.get_logger(__name__)
45
-
46
-
47
- class OldBaileyProceedings(datasets.GeneratorBasedBuilder):
48
- """The dataset consists of 2,163 transcriptions of the Proceedings and 475 Ordinary's Accounts marked up in TEI-XML,
49
- and contains some documentation covering the data structure and variables. Each Proceedings file represents one session of the court (1674-1913),
50
- and each Ordinary's Account file represents a single pamphlet (1676-1772)"""
51
-
52
- VERSION = datasets.Version("7.2.0")
53
-
54
- def _info(self):
55
- features = datasets.Features(
56
- {
57
- "id": datasets.Value("string"),
58
- "text": datasets.Value("string"),
59
- "places": datasets.Sequence(datasets.Value("string")),
60
- "type": datasets.Value("string"),
61
- "persons": datasets.Sequence(datasets.Value("string")),
62
- "date": datasets.Value("string"),
63
- }
64
- )
65
- return datasets.DatasetInfo(
66
- description=_DESCRIPTION,
67
- features=features,
68
- homepage=_HOMEPAGE,
69
- license=_LICENSE,
70
- citation=_CITATION,
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- data_dir = dl_manager.download_and_extract(_URL)
75
- oa_dir = "ordinarysAccounts"
76
- obp_dir = "sessionsPapers"
77
- return [
78
- datasets.SplitGenerator(
79
- name=datasets.Split.TRAIN,
80
- gen_kwargs={
81
- "data_dirs": {
82
- "OA": os.path.join(data_dir, oa_dir),
83
- "OBP": os.path.join(data_dir, obp_dir),
84
- },
85
- },
86
- ),
87
- ]
88
-
89
- def convert_text_to_features(self, file, key):
90
- if key == "OA":
91
- root_tag = "p"
92
- else:
93
- root_tag = "div1/p"
94
- try:
95
- xml_data = ET.parse(file)
96
- root = xml_data.getroot()
97
- start = root.find("./text/body/div0")
98
- id = start.attrib["id"]
99
- date = start.find("interp[@type='date']").attrib["value"]
100
- text_parts = []
101
- places, persons = [], []
102
- for content in start.findall(root_tag):
103
- for place in content.findall("placeName"):
104
- if place.text:
105
- place_name = place.text.replace("\n", "").strip()
106
- if place_name:
107
- places.append(place.text)
108
- for person in content.findall("persName"):
109
- full_name = []
110
- for name_part in person.itertext():
111
- name_part = (
112
- name_part.replace("\n", "").replace("\t", "").strip()
113
- )
114
- if name_part:
115
- full_name.append(name_part)
116
- if full_name:
117
- persons.append(" ".join(full_name))
118
- for text_snippet in content.itertext():
119
- text_snippet = (
120
- text_snippet.replace("\n", "").replace("\t", "").strip()
121
- )
122
- if text_snippet:
123
- text_parts.append(text_snippet)
124
- full_text = " ".join(text_parts)
125
- return (
126
- 0,
127
- {
128
- "id": id,
129
- "date": date,
130
- "type": key,
131
- "places": places,
132
- "persons": persons,
133
- "text": full_text,
134
- },
135
- )
136
- except Exception as e:
137
- return -1, repr(e)
138
-
139
- def _generate_examples(self, data_dirs):
140
- for key, data_dir in data_dirs.items():
141
- for file in glob.glob(os.path.join(data_dir, "*.xml")):
142
- status_code, ret_val = self.convert_text_to_features(file, key)
143
- if status_code:
144
- logger.exception(
145
- f"{os.path.basename(file)} could not be parsed properly"
146
- )
147
- continue
148
- else:
149
- yield ret_val["id"], ret_val