ro-h commited on
Commit
3bbab5f
1 Parent(s): 7876f74

Upload api_call_v6.py

Browse files
Files changed (1) hide show
  1. api_call_v6.py +245 -0
api_call_v6.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import re
3
+
4
+ def get_docket_ids(search_term):
5
+ url = f"https://api.regulations.gov/v4/dockets"
6
+ params = {
7
+ 'filter[searchTerm]': search_term,
8
+ 'api_key': "your-api-key"
9
+ }
10
+ response = requests.get(url, params=params)
11
+ if response.status_code == 200:
12
+ data = response.json()
13
+ dockets = data['data']
14
+ docket_ids = [docket['id'] for docket in dockets]
15
+ return docket_ids
16
+ else:
17
+ return f"Error: {response.status_code}"
18
+
19
+ class RegulationsDataFetcher:
20
+ API_KEY = "your-api-key"
21
+ BASE_COMMENT_URL = 'https://api.regulations.gov/v4/comments'
22
+ BASE_DOCKET_URL = 'https://api.regulations.gov/v4/dockets/'
23
+ HEADERS = {
24
+ 'X-Api-Key': API_KEY,
25
+ 'Content-Type': 'application/json'
26
+ }
27
+
28
+ def __init__(self, docket_id):
29
+ self.docket_id = docket_id
30
+ self.docket_url = self.BASE_DOCKET_URL + docket_id
31
+ self.dataset = []
32
+
33
+ def fetch_comments(self):
34
+ """Fetch a single page of 25 comments."""
35
+ url = f'{self.BASE_COMMENT_URL}?filter[docketId]={self.docket_id}&page[number]=1&page[size]=25'
36
+ response = requests.get(url, headers=self.HEADERS)
37
+
38
+ if response.status_code == 200:
39
+ return response.json()
40
+ else:
41
+ print(f'Failed to retrieve comments: {response.status_code}')
42
+ return None
43
+
44
+ def get_docket_info(self):
45
+ """Get docket information."""
46
+ response = requests.get(self.docket_url, headers=self.HEADERS)
47
+
48
+ if response.status_code == 200:
49
+ docket_data = response.json()
50
+ return (docket_data['data']['attributes']['agencyId'],
51
+ docket_data['data']['attributes']['title'],
52
+ docket_data['data']['attributes']['modifyDate'],
53
+ docket_data['data']['attributes']['docketType'],
54
+ docket_data['data']['attributes']['keywords'])
55
+ else:
56
+ print(f'Failed to retrieve docket info: {response.status_code}')
57
+ return None
58
+
59
+ def fetch_comment_details(self, comment_url):
60
+ """Fetch detailed information of a comment."""
61
+ response = requests.get(comment_url, headers=self.HEADERS)
62
+ if response.status_code == 200:
63
+ return response.json()
64
+ else:
65
+ print(f'Failed to retrieve comment details: {response.status_code}')
66
+ return None
67
+
68
+ def collect_data(self):
69
+ """Collect data and reshape into nested dictionary format."""
70
+ data = self.fetch_comments()
71
+ if not data:
72
+ return None
73
+
74
+ docket_info = self.get_docket_info()
75
+ if not docket_info:
76
+ return None
77
+
78
+ # Starting out with docket information
79
+ nested_data = {
80
+ "id": self.docket_id,
81
+ "agency": self.docket_id.split('-')[0],
82
+ "title": docket_info[1] if docket_info else "Unknown Title",
83
+ "update_date": docket_info[2].split('T')[0] if docket_info and docket_info[2] else "Unknown Update Date",
84
+ "update_time": docket_info[2].split('T')[1].strip('Z') if docket_info and docket_info[2] and 'T' in docket_info[2] else "Unknown Update Time",
85
+ "purpose": docket_info[3],
86
+ "keywords": docket_info[4],
87
+ "comments": []
88
+ }
89
+
90
+ # Going into each docket for comment information
91
+ if 'data' in data:
92
+ for comment in data['data']:
93
+ if len(nested_data["comments"]) >= 10:
94
+ break
95
+
96
+ comment_details = self.fetch_comment_details(comment['links']['self'])
97
+ if 'data' in comment_details and 'attributes' in comment_details['data']:
98
+ comment_data = comment_details['data']['attributes']
99
+
100
+ # Basic comment text cleaning
101
+ comment_text = (comment_data.get('comment', '') or '').strip()
102
+ comment_text = comment_text.replace("<br/>", "").replace("<span style='padding-left: 30px'></span>", "")
103
+ comment_text = re.sub(r'&[^;]+;', '', comment_text)
104
+
105
+ # Recording detailed comment information
106
+ if (comment_text and "attached" not in comment_text.lower() and "attachment" not in comment_text.lower() and comment_text.lower() != "n/a"):
107
+ nested_comment = {
108
+ "text": comment_text,
109
+ "comment_id": comment['id'],
110
+ "comment_url": comment['links']['self'],
111
+ "comment_date": comment['attributes']['postedDate'].split('T')[0],
112
+ "comment_time": comment['attributes']['postedDate'].split('T')[1].strip('Z'),
113
+ "commenter_fname": ((comment_data.get('firstName') or 'Anonymous').split(',')[0]).capitalize(),
114
+ "commenter_lname": ((comment_data.get('lastName') or 'Anonymous').split(',')[0]).capitalize(),
115
+ "comment_length": len(comment_text) if comment_text is not None else 0
116
+ }
117
+ nested_data["comments"].append(nested_comment)
118
+
119
+ return nested_data
120
+
121
+
122
+ # COLLECTING DATA
123
+
124
+ substance_related_terms = [
125
+ # Types of Opioids
126
+ "opioids",
127
+ "heroin",
128
+ "morphine",
129
+ "fentanyl",
130
+ "methadone",
131
+ "oxycodone",
132
+ "lofexidine",
133
+ "hydrocodone",
134
+ "codeine",
135
+ "tramadol",
136
+ "prescription opioids",
137
+ # Withdrawal Support
138
+ "lofexidine",
139
+ "buprenorphine",
140
+ "naloxone",
141
+ # Related Phrases
142
+ "opioid epidemic",
143
+ "opioid abuse",
144
+ "opioid crisis",
145
+ "opioid overdose",
146
+ "opioid tolerance",
147
+ "opioid treatment program",
148
+ "medication assisted treatment",
149
+ "substance abuse",
150
+ "narcotics",
151
+ "opioid addiction",
152
+ "opioid withdrawal",
153
+ "opioid dependence",
154
+ "opioid use disorder",
155
+ "opioid receptor",
156
+ "pain management",
157
+ "prescription drug abuse",
158
+ "drug addiction treatment",
159
+ "controlled substances",
160
+ "opioid analgesics",
161
+ # Additional Terms
162
+ "naltrexone",
163
+ "opioid detoxification",
164
+ "opioid therapy",
165
+ "chronic pain",
166
+ "opioid agonist",
167
+ "partial opioid agonist",
168
+ "opioid antagonist",
169
+ "drug rehabilitation",
170
+ "overdose prevention",
171
+ "opioid prescribing guidelines",
172
+ "opioid risk tool",
173
+ "opioid alternative",
174
+ "addiction recovery",
175
+ "addiction counseling",
176
+ "opioid education",
177
+ "opioid policy",
178
+ "opioid regulation",
179
+ # Types of Other Substances
180
+ "marijuana",
181
+ "cannabis",
182
+ "THC",
183
+ "CBD",
184
+ "synthetic cannabinoids",
185
+ "alcohol",
186
+ "ethanol",
187
+ "benzodiazepines",
188
+ "cocaine",
189
+ "amphetamine",
190
+ "methamphetamine",
191
+ "MDMA",
192
+ "ecstasy",
193
+ "hallucinogens",
194
+ "LSD",
195
+ "psilocybin",
196
+ "ketamine",
197
+ "inhalants",
198
+ "steroids",
199
+ "tobacco",
200
+ "nicotine",
201
+ # Related Phrases for Other Substances
202
+ "alcohol abuse",
203
+ "alcohol addiction",
204
+ "alcohol dependence",
205
+ "alcohol withdrawal",
206
+ "alcohol treatment",
207
+ "binge drinking",
208
+ "drug abuse",
209
+ "drug addiction",
210
+ "drug dependence",
211
+ "drug withdrawal",
212
+ "drug treatment",
213
+ "substance use disorder",
214
+ "chemical dependency",
215
+ "intoxication",
216
+ "sobriety",
217
+ "recovery program",
218
+ "detoxification",
219
+ "rehabilitation",
220
+ "12-step program",
221
+ "psychoactive drugs",
222
+ "addictive behavior",
223
+ "harm reduction",
224
+ "substance abuse counseling",
225
+ "addiction therapy",
226
+ "substance abuse prevention",
227
+ "drug education",
228
+ "drug policy",
229
+ "drug regulation"
230
+ ]
231
+
232
+ docket_ids = set()
233
+ all_data = []
234
+
235
+ for term in substance_related_terms:
236
+ docket_ids.update(get_docket_ids(term))
237
+
238
+ for docket_id in docket_ids:
239
+ fetcher = RegulationsDataFetcher(docket_id)
240
+ docket_data = fetcher.collect_data()
241
+ if docket_data and len(docket_data["comments"]) != 0:
242
+ all_data.append(docket_data)
243
+
244
+
245
+