Datasets:

Modalities:
Text
Libraries:
Datasets
License:
Waleed-bin-Qamar commited on
Commit
0ba220d
1 Parent(s): 0572966

Upload NER-BAILII-UK-CCA.py

Browse files
Files changed (1) hide show
  1. NER-BAILII-UK-CCA.py +188 -0
NER-BAILII-UK-CCA.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Waleed-bin-Qamar/NER-BAILII-UK-CCA. Criminal court appeals of uk"""
18
+
19
+ import csv
20
+ import json
21
+ import os
22
+ import datasets
23
+
24
+
25
+ logger = datasets.logging.get_logger(__name__)
26
+
27
+
28
+ _CITATION = """\
29
+ @inproceedings{title = "A great new dataset",
30
+ author = "A great new dataset",
31
+ booktitle = "A great new dataset",
32
+ month = sep,
33
+ year = "2050",
34
+ address = "a,b",
35
+ publisher = "Association in b",
36
+ doi = " ",
37
+ pages = " ",
38
+ abstract = " ",
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """\
43
+ A great new dataset
44
+ """
45
+ # due privacy issues private addresses are used
46
+ _URL = "https://drive.google.com/"
47
+ _TRAINING_FILE_URL = "uc?id=1Hn30-V5tm_JxKB7Q09BpZJu1VylinpGu&export=download"
48
+ _DEV_FILE_URL = "uc?id=1saGAPzk0zaj6dIo5FKa1rb5SCdHwJnAq&export=download"
49
+ _TEST_FILE_URL = "uc?id=1stsN0Iq1guKNH0oJc0ZjqIj57ExR_iYT&export=download"
50
+
51
+
52
+ class NER_BAILIIConfig(datasets.BuilderConfig):
53
+ def __init__(self, **kwargs):
54
+ """BuilderConfig for NER_BAILIIConfig.
55
+
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super(NER_BAILIIConfig, self).__init__(**kwargs)
60
+
61
+
62
+ class NER_BAILII(datasets.GeneratorBasedBuilder):
63
+ """NER-BAILII-UK-CCA. Criminal court appeals of uk"""
64
+ BUILDER_CONFIGS = [
65
+ NER_BAILIIConfig(
66
+ name="NER-BAILII-UK-CCA", version=datasets.Version("1.0.0"), description="The NER-BAILII-UK-CCA Name Entities recognization Dataset"
67
+ )
68
+ ]
69
+
70
+ def _info(self):
71
+ return datasets.DatasetInfo(
72
+
73
+ description=_DESCRIPTION,
74
+ features=datasets.Features(
75
+ {"id": datasets.Value("string"),
76
+ "tokens": datasets.Sequence(datasets.Value("string")),
77
+ "ner_tags": datasets.Sequence(
78
+ datasets.features.ClassLabel(
79
+ names=[
80
+ 'O',
81
+ 'B-defendant_pleaded_guilty',
82
+ 'I-defendant_pleaded_guilty',
83
+ 'B-appeal_against_sentence',
84
+ 'I-appeal_against_sentence',
85
+ 'B-defendant',
86
+ 'I-defendant',
87
+ 'B-date_of_original_trial_or_conviction',
88
+ 'I-date_of_original_trial_or_conviction',
89
+ 'B-offence',
90
+ 'I-offence',
91
+ 'B-defendant_original_sentence',
92
+ 'I-defendant_original_sentence',
93
+ 'B-decision_granted',
94
+ 'I-decision_granted',
95
+ 'B-date_of_crime',
96
+ 'I-date_of_crime',
97
+ 'B-decision_refused',
98
+ 'I-decision_refused',
99
+ 'B-expert_witness_used_at_original_trail',
100
+ 'I-expert_witness_used_at_original_trail',
101
+ 'B-victim_age',
102
+ 'I-victim_age',
103
+ 'B-defendant_is_female',
104
+ 'I-defendant_is_female',
105
+ 'B-appeal_against_conviction',
106
+ 'I-appeal_against_conviction',
107
+ 'B-defendant_custodial_sentence_years',
108
+ 'I-defendant_custodial_sentence_years',
109
+ 'B-date_of_offence',
110
+ 'I-date_of_offence',
111
+ 'B-decision_quashed',
112
+ 'I-decision_quashed',
113
+ 'B-defendant_gender',
114
+ 'B-victim_is_male',
115
+ 'B-victim_is_female',
116
+ 'B-victim_gender',
117
+ 'B-previous_convictions',
118
+ 'I-previous_convictions',
119
+ 'I-s_age',
120
+ ]
121
+ )
122
+ )
123
+
124
+ }
125
+
126
+ ),
127
+ supervised_keys=None,
128
+ homepage="http://linkedin",
129
+ citation=_CITATION,
130
+ )
131
+
132
+ def _split_generators(self, dl_manager):
133
+ """Returns SplitGenerators."""
134
+ #_TEST_FILE_URL
135
+ #_DEV_FILE_URL
136
+ #_TRAINING_FILE_URL
137
+ urls_to_download = {
138
+ "train": f"{_URL}{_TRAINING_FILE_URL}",
139
+ "dev": f"{_URL}{_DEV_FILE_URL}",
140
+ "test": f"{_URL}{_TEST_FILE_URL}",
141
+ }
142
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
143
+
144
+ return [
145
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
146
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
147
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
148
+ ]
149
+
150
+ def _generate_examples(self, filepath):
151
+ logger.info("⏳ Generating examples from = %s", filepath)
152
+ with open(filepath, encoding="utf-8") as f:
153
+ current_tokens = []
154
+ current_labels = []
155
+ sentence_counter = 0
156
+
157
+ for row in f:
158
+ row = row.rstrip()
159
+ if row:
160
+ token, label = row.split("\t")
161
+ current_tokens.append(token)
162
+ current_labels.append(label)
163
+ else:
164
+ # New sentence
165
+ if not current_tokens:
166
+ # Consecutive empty lines will cause empty sentences
167
+ continue
168
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
169
+ sentence = (
170
+ sentence_counter,
171
+ {
172
+ "id": str(sentence_counter),
173
+ "tokens": current_tokens,
174
+ "ner_tags": current_labels,
175
+ },
176
+ )
177
+ sentence_counter += 1
178
+ current_tokens = []
179
+ current_labels = []
180
+ yield sentence
181
+
182
+ # Don't forget last sentence in dataset 🧐
183
+ if current_tokens:
184
+ yield sentence_counter, {
185
+ "id": str(sentence_counter),
186
+ "tokens": current_tokens,
187
+ "ner_tags": current_labels,
188
+ }