File size: 5,221 Bytes
67ab0aa
 
 
 
 
 
 
69ddfd8
67ab0aa
 
 
 
 
 
 
 
 
 
 
69ddfd8
67ab0aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import requests
import json

def get_docket_ids(search_term):
    url = f"https://api.regulations.gov/v4/dockets"
    params = {
        'filter[searchTerm]': search_term,
        'api_key': "your_api_key"
    }
    response = requests.get(url, params=params)
    if response.status_code == 200:
        data = response.json()
        dockets = data['data']
        docket_ids = [docket['id'] for docket in dockets]
        return docket_ids
    else:
        return f"Error: {response.status_code}"

class RegulationsDataFetcher:
    API_KEY = "your_api_key"
    BASE_COMMENT_URL = 'https://api.regulations.gov/v4/comments'
    BASE_DOCKET_URL = 'https://api.regulations.gov/v4/dockets/'
    HEADERS = {
        'X-Api-Key': API_KEY,
        'Content-Type': 'application/json'
    }

    def __init__(self, docket_id):
        self.docket_id = docket_id
        self.docket_url = self.BASE_DOCKET_URL + docket_id
        self.dataset = []

    def fetch_comments(self):
        """Fetch a single page of 25 comments."""
        url = f'{self.BASE_COMMENT_URL}?filter[docketId]={self.docket_id}&page[number]=1&page[size]=25'
        response = requests.get(url, headers=self.HEADERS)
        
        if response.status_code == 200:
            return response.json()
        else:
            print(f'Failed to retrieve comments: {response.status_code}')
            return None

    def get_docket_info(self):
        """Get docket information."""
        response = requests.get(self.docket_url, headers=self.HEADERS)
        
        if response.status_code == 200:
            docket_data = response.json()
            return (docket_data['data']['attributes']['agencyId'],
                    docket_data['data']['attributes']['title'],
                    docket_data['data']['attributes']['modifyDate'], 
                    docket_data['data']['attributes']['docketType'], 
                    docket_data['data']['attributes']['keywords'])
        else:
            print(f'Failed to retrieve docket info: {response.status_code}')
            return None

    def fetch_comment_details(self, comment_url):
        """Fetch detailed information of a comment."""
        response = requests.get(comment_url, headers=self.HEADERS)
        if response.status_code == 200:
            return response.json()
        else:
            print(f'Failed to retrieve comment details: {response.status_code}')
            return None

    def collect_data(self):
        """Collect data and reshape into nested dictionary format."""
        data = self.fetch_comments()
        docket_info = self.get_docket_info()

        # Initialize the nested dictionary structure
        nested_data = {
            "id": self.docket_id,
            "title": docket_info[1] if docket_info else "Unknown Title",
            "context": docket_info[2] if docket_info else "Unknown Context",
            "purpose": docket_info[3],
            "keywords": docket_info[4],
            "comments": []
        }

        if data and 'data' in data:
            for comment in data['data']:
                comment_details = self.fetch_comment_details(comment['links']['self'])
                
                if comment_details and 'data' in comment_details and 'attributes' in comment_details['data']:
                    comment_data = comment_details['data']['attributes']
                    nested_comment = {
                        "text": comment_data.get('comment', ''),
                        "comment_id": comment['id'],
                        "comment_url": comment['links']['self'],
                        "comment_date": comment['attributes']['postedDate'],
                        "comment_title": comment['attributes']['title'],
                        "commenter_fname": comment_data.get('firstName', ''),
                        "commenter_lname": comment_data.get('lastName', ''),
                        "comment_length": len(comment_data.get('comment', '')) if comment_data.get('comment') is not None else 0
                    }
                    nested_data["comments"].append(nested_comment)

                if len(nested_data["comments"]) >= 10:
                    break

        return nested_data
    

# CREATING DATASET
    
opioid_related_terms = [
    # Types of Opioids
    "opioids",
    "heroin", 
    "morphine", 
    "fentanyl", 
    "methadone", 
    "oxycodone", 
    "hydrocodone", 
    "codeine", 
    "tramadol", 
    "prescription opioids", 
    # Withdrawal Support
    "lofexidine", 
    "buprenorphine", 
    "naloxone", 
    # Related Phrases
    "opioid epidemic", 
    "opioid abuse", 
    "opioid crisis", 
    "opioid overdose"
    "opioid tolerance", 
    "opioid treatment program", 
    "medication assisted treatment", 
]

docket_ids = set()
all_data = []

for term in opioid_related_terms:
    docket_ids.update(get_docket_ids(term))


for docket_id in docket_ids:
    fetcher = RegulationsDataFetcher(docket_id)
    docket_data = fetcher.collect_data()
    if len(docket_data["comments"])!=0:
        print(f'{docket_id} has comments')
        all_data.append(docket_data)

json_file_path = 'docket_comments.json'

with open(json_file_path, 'w') as f:
    json.dump(all_data, f)