ro-h commited on
Commit
f254ece
1 Parent(s): f58e7ca

Upload regulatory_comments.py

Browse files
Files changed (1) hide show
  1. regulatory_comments.py +125 -0
regulatory_comments.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import datasets
17
+
18
+ # Description of the dataset
19
+ _DESCRIPTION = """\
20
+ United States governmental agencies often make proposed regulations open to the public for comment.
21
+ Proposed regulations are organized into "dockets". This project will use Regulation.gov public API
22
+ to aggregate and clean public comments for dockets that mention opioid use.
23
+
24
+ Each example will consist of one docket, and include metadata such as docket id, docket title, etc.
25
+ Each docket entry will also include information about the top 10 comments, including comment metadata
26
+ and comment text.
27
+ """
28
+
29
+ # Homepage URL of the dataset
30
+ _HOMEPAGE = "https://www.regulations.gov/"
31
+
32
+ # URL to download the dataset
33
+ _URLS = {"url": "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/docket_comments_v6.json"}
34
+
35
+ # Citation of repository
36
+ _CITATION = """@misc{ro_huang_regulatory_2023-1,
37
+ author = {{Ro Huang}},
38
+ date = {2023-03-19},
39
+ publisher = {Hugging Face},
40
+ title = {Regulatory Comments},
41
+ url = {https://huggingface.co/datasets/ro-h/regulatory_comments},
42
+ version = {1.1.4},
43
+ bdsk-url-1 = {https://huggingface.co/datasets/ro-h/regulatory_comments}}
44
+ """
45
+
46
+ # Class definition for handling the dataset
47
+ class RegComments(datasets.GeneratorBasedBuilder):
48
+
49
+ # Version of the dataset
50
+ VERSION = datasets.Version("1.1.4")
51
+
52
+ # Method to define the structure of the dataset
53
+ def _info(self):
54
+ # Defining the structure of the dataset
55
+ features = datasets.Features({
56
+ "id": datasets.Value("string"),
57
+ "agency": datasets.Value("string"), #Added In
58
+ "title": datasets.Value("string"),
59
+ "update_date": datasets.Value("string"), #Added In
60
+ "update_time": datasets.Value("string"), #Added In
61
+ "purpose": datasets.Value("string"),
62
+ "keywords": datasets.Sequence(datasets.Value("string")),
63
+ "comments": datasets.Sequence({
64
+ "text": datasets.Value("string"),
65
+ "comment_id": datasets.Value("string"),
66
+ "comment_url": datasets.Value("string"),
67
+ "comment_date": datasets.Value("string"),
68
+ "comment_time": datasets.Value("string"),
69
+ "commenter_fname": datasets.Value("string"),
70
+ "commenter_lname": datasets.Value("string"),
71
+ "comment_length": datasets.Value("int32")
72
+ })
73
+ })
74
+
75
+ # Returning the dataset structure
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=features,
79
+ homepage=_HOMEPAGE,
80
+ citation = _CITATION
81
+ )
82
+
83
+ # Method to handle dataset splitting (e.g., train/test)
84
+ def _split_generators(self, dl_manager):
85
+ urls = _URLS["url"]
86
+ data_dir = dl_manager.download_and_extract(urls)
87
+ # Defining the split (here, only train split is defined)
88
+ return [
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TRAIN,
91
+ gen_kwargs={
92
+ "filepath": data_dir,
93
+ },
94
+ ),
95
+ ]
96
+
97
+ # Method to generate examples from the dataset
98
+ def _generate_examples(self, filepath):
99
+ """This function returns the examples in the raw (text) form."""
100
+ key = 0
101
+ with open(filepath, 'r', encoding='utf-8') as f:
102
+ data = json.load(f)
103
+ for docket in data:
104
+ # Extracting data fields from each docket
105
+ docket_id = docket["id"]
106
+ docket_agency = docket["agency"]
107
+ docket_title = docket["title"]
108
+ docket_update_date = docket["update_date"]
109
+ docket_update_time = docket["update_time"]
110
+ docket_purpose = docket.get("purpose", "unspecified")
111
+ docket_keywords = docket.get("keywords", [])
112
+ comments = docket["comments"]
113
+
114
+ # Yielding each docket with its information
115
+ yield key, {
116
+ "id": docket_id,
117
+ "agency": docket_agency,
118
+ "title": docket_title,
119
+ "update_date": docket_update_date,
120
+ "update_time": docket_update_time,
121
+ "purpose": docket_purpose,
122
+ "keywords": docket_keywords,
123
+ "comments": comments
124
+ }
125
+ key += 1