patriziobellan commited on
Commit
005d947
·
1 Parent(s): a6b1197

Upload PET.py

Browse files
Files changed (1) hide show
  1. PET.py +170 -0
PET.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TO CREATE dataset_infos.json use: datasets-cli test PET --save_infos --all_configs
2
+ #
3
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # TODO: Address all TODOs and remove all explanatory comments
17
+ """TODO: Add a description here."""
18
+
19
+
20
+ import csv
21
+ import json
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ # TODO: Add BibTeX citation
28
+ # Find for instance the citation on arxiv or on the dataset repo/website
29
+ _CITATION = """\
30
+ @InProceedings{huggingface:dataset,
31
+ title = {A great new dataset},
32
+ author={huggingface, Inc.
33
+ },
34
+ year={2020}
35
+ }
36
+ """
37
+
38
+ # TODO: Add description of the dataset here
39
+ # You can copy an official description
40
+ _DESCRIPTION = """\
41
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
42
+ """
43
+
44
+ # TODO: Add a link to an official homepage for the dataset here
45
+ _HOMEPAGE = ""
46
+
47
+ # TODO: Add the licence for the dataset here if you can find it
48
+ _LICENSE = ""
49
+
50
+ _URL = "https://pdi.fbk.eu/pet/PETHuggingFace/"
51
+ # _TRAINING_FILE = "train.json"
52
+ # _DEV_FILE = "emerging.dev.conll"
53
+ _TEST_FILE = "test.json"
54
+
55
+
56
+
57
+ class PETConfig(datasets.BuilderConfig):
58
+ """The WNUT 17 Emerging Entities Dataset."""
59
+
60
+ def __init__(self, **kwargs):
61
+ """BuilderConfig for PET.
62
+ Args:
63
+ **kwargs: keyword arguments forwarded to super.
64
+ """
65
+ super(PETConfig, self).__init__(**kwargs)
66
+
67
+ class PET(datasets.GeneratorBasedBuilder):
68
+ """PET DATASET."""
69
+ BUILDER_CONFIGS = [
70
+ PETConfig(
71
+ name="PET", version=datasets.Version("1.0.0"), description="The PET Dataset"
72
+ ),
73
+ ]
74
+
75
+ def _info(self):
76
+ features = datasets.Features(
77
+ {
78
+ "document name": datasets.Value("string"),
79
+ "sentence-ID": datasets.Value("int8"),
80
+ "tokens": datasets.Sequence(datasets.Value("string")),
81
+ "ner-tags": datasets.Sequence(
82
+ datasets.features.ClassLabel(
83
+ names=[
84
+ "O",
85
+ "B-Actor",
86
+ "I-Actor",
87
+ "B-Activity",
88
+ "I-Activity",
89
+ "B-Activity Data",
90
+ "I-Activity Data",
91
+ "B-Further Specification",
92
+ "I-Further Specification",
93
+ "B-XOR Gateway",
94
+ "I-XOR Gateway",
95
+ "B-Condition Specification",
96
+ "I-Condition Specification",
97
+ "B-AND Gateway",
98
+ "I-AND Gateway",
99
+ ]
100
+ )
101
+ ),
102
+ }
103
+ )
104
+
105
+ return datasets.DatasetInfo(
106
+ # This is the description that will appear on the datasets page.
107
+ description=_DESCRIPTION,
108
+ # This defines the different columns of the dataset and their types
109
+ features=features, # Here we define them above because they are different between the two configurations
110
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
111
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
112
+ # supervised_keys=("sentence", "label"),
113
+ # Homepage of the dataset for documentation
114
+ homepage=_HOMEPAGE,
115
+ # License for the dataset if available
116
+ license=_LICENSE,
117
+ # Citation for the dataset
118
+ citation=_CITATION,
119
+ )
120
+
121
+ def _split_generators(self, dl_manager):
122
+ urls_to_download = {
123
+ # "train": f"{_URL}{_TRAINING_FILE}",
124
+ # "dev": f"{_URL}{_DEV_FILE}",
125
+ "test": f"{_URL}{_TEST_FILE}",
126
+ }
127
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
128
+
129
+ return [
130
+ # datasets.SplitGenerator(
131
+ # name=datasets.Split.TRAIN,
132
+ # # These kwargs will be passed to _generate_examples
133
+ # gen_kwargs={
134
+ # "filepath": downloaded_files["train"], #'/Users/patrizio/Documents/PhD/PythonProjects/HuggingFacePETtest/PET/train.json', #os.path.join(data_dir, "train.json"),
135
+ # "split": "train",
136
+ # },
137
+ # ),
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TEST,
140
+ # These kwargs will be passed to _generate_examples
141
+ gen_kwargs={
142
+ "filepath": downloaded_files["test"], #'/Users/patrizio/Documents/PhD/PythonProjects/HuggingFacePETtest/PET/test.json', #os.path.join(data_dir, "test.json"),
143
+ "split": "test"
144
+ },
145
+ ),
146
+ #
147
+ # datasets.SplitGenerator(
148
+ # name=datasets.Split.VALIDATION,
149
+ # # These kwargs will be passed to _generate_examples
150
+ # gen_kwargs={
151
+ # "filepath": os.path.join(data_dir, "dev.jsonl"),
152
+ # "split": "dev",
153
+ # },
154
+ # ),
155
+ ]
156
+
157
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
158
+ def _generate_examples(self, filepath, split):
159
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
160
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
161
+
162
+ with open(filepath, encoding="utf-8", mode='r') as f:
163
+ for key, row in enumerate(f):
164
+ row = json.loads(row)
165
+ yield key, {
166
+ "document name": row["document name"],
167
+ "sentence-ID": row["sentence-ID"],
168
+ "tokens": row["tokens"],
169
+ "ner-tags": row["ner-tags"]
170
+ }