File size: 9,723 Bytes
d1ae506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
"""
Get Cannabis Results | Hawaii
Copyright (c) 2024 Cannlytics

Authors:
    Keegan Skeate <https://github.com/keeganskeate>
Created: 7/10/2024
Updated: 7/10/2024
License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>

Data Source:

    - Public records request

"""
# Standard imports:
import json
import os

# External imports:
from cannlytics.data.coas.coas import CoADoc
from cannlytics.data.coas import standardize_results
from cannlytics.data.coas.parsing import find_unique_analytes
from cannlytics.utils.utils import convert_to_numeric, snake_case
import pandas as pd


def extract_samples(datafile, verbose=True):
    """Extract samples from CSV layout."""

    # Read the datafile, line by line.
    if verbose: print('Processing:', datafile)
    with open(datafile, 'r') as file:
        lines = file.readlines()

    # Define rows to skip.
    skip_rows = [
        '"',
        "Date",
        "Report:",
        "LicenseNum:",
        "Start:",
        "End:",
        "Status:",
        "Report Timestamp"
    ]

    # Extract the data for each sample.
    parser = CoADoc()
    samples = []
    obs = None
    analyses, results = [], []
    for i, line in enumerate(lines):

        # Skip nuisance rows.
        skip = False
        for skip_row in skip_rows:
            if line.startswith(skip_row):
                skip = True
                break
        if skip:
            continue
        
        # Get all values.
        values = line.replace('\n', '').split(',')

        # Skip blank rows.
        if all([x == '' for x  in values]):
            continue

        # Identify samples as rows that start with a date.
        try: date = pd.to_datetime(values[0])
        except: date = None
        if date and values[0] != '':

            # Record results for any existing observation.
            if obs is not None:
                obs['analyses'] = json.dumps(analyses)
                obs['results'] = json.dumps(results)
                samples.append(obs)
                analyses, results = [], []

            # Get the details for each sample.
            try:
                ids = values[4]
                batch_number = ids.split('(')[0].split(':')[-1].strip()
                sample_id = ids.split(':')[-1].replace(')', '').strip()
                obs = {
                    'date_tested': date.isoformat(),
                    'product_type': values[1],
                    'strain_name': values[2],
                    'product_name': values[3],
                    'batch_number': batch_number,
                    'sample_id': sample_id,
                    'status': values[-3],
                    'producer': values[-2],
                    'producer_license_number': values[-1],
                }
            # Handle long product names.
            except:
                try:
                    row = lines[i + 1].replace('\n', '').split(',')
                    ids = row[1]
                except:
                    row = lines[i + 1].replace('\n', '').split(',') + lines[i + 2].replace('\n', '').split(',')
                    ids = row[-4]
                batch_number = ids.split('(')[0].split(':')[-1].strip()
                sample_id = ids.split(':')[-1].replace(')', '').strip()
                obs = {
                    'date_tested': date.isoformat(),
                    'product_type': values[1],
                    'strain_name': values[2],
                    'product_name': ' '.join([values[3], row[0]]),
                    'batch_number': batch_number,
                    'sample_id': sample_id,
                    'status': row[-3],
                    'producer': row[-2],
                    'producer_license_number': row[-1],
                }
            continue

        # Get the cannabinoid results.
        if values[0] == 'Potency Analysis Test':
            analyses.append('cannabinoids')
            n_analytes = 5
            for n in range(1, n_analytes + 1):
                row = lines[i + n].replace('\n', '').split(',')
                name = row[0]
                key = parser.analytes.get(snake_case(name), snake_case(name))
                if name == 'Total':
                    obs['total_cannabinoids'] = convert_to_numeric(row[1].replace('%', ''))
                    continue
                results.append({
                    'analysis': 'cannabinoids',
                    'key': key,
                    'name': name,
                    'value': convert_to_numeric(row[1].replace('%', '')),
                    'status': row[2],
                })
            continue

        # Get the foreign matter results.
        if values[0] == 'Foreign Matter Inspection Test':
            analyses.append('foreign_matter')
            row = lines[i + 1].replace('\n', '').split(',')
            results.append({
                'analysis': 'foreign_matter',
                'key': 'foreign_matter',
                'name': 'Foreign Matter',
                'value': convert_to_numeric(row[1].replace('%', '')),
                'status': row[2],
            })
            continue

        # Get the microbe results.
        if values[0] == 'Microbiological Screening Test':
            analyses.append('microbes')
            n_analytes = 6
            for n in range(1, n_analytes + 1):
                row = lines[i + n].replace('\n', '').split(',')
                name = row[0]
                key = parser.analytes.get(snake_case(name), snake_case(name))
                if name == '': continue
                results.append({
                    'analysis': 'microbes',
                    'key': key,
                    'name': name,
                    'value': convert_to_numeric(row[1].split(' ')[0]),
                    'units': row[1].split(' ')[-1],
                    'status': row[2],
                })
            continue

        # Get the mycotoxin results.
        if values[0] == 'Mycotoxin Screening Test':
            analyses.append('mycotoxins')
            row = lines[i + 1].replace('\n', '').split(',')
            name = row[0]
            key = parser.analytes.get(snake_case(name), snake_case(name))
            results.append({
                'analysis': 'mycotoxins',
                'key': key,
                'name': name,
                'value': convert_to_numeric(row[1].split(' ')[0]),
                'status': row[2],
            })
            continue

        # Get the moisture content result.
        if values[0] == 'Moisture Content Test':
            analyses.append('moisture_content')
            row = lines[i + 1].replace('\n', '').split(',')
            value = convert_to_numeric(row[1].replace('%', ''))
            obs['moisture_content'] = value
            continue

        # Get the residual solvent results.
        # See: https://health.hawaii.gov/medicalcannabis/files/2022/05/Chapter-11-850-Hawaii-Administrative-Interim-Rules-Effective-April-29-2022.pdf
        if values[0] == 'Residual Solvent Test':
            analyses.append('residual_solvents')
            solvents = [
                'Benzene',
                'Butane',
                'Ethanol',
                'Heptane',
                'Hexane',
                'Pentane',
                'Toluene',
                'Total xylenes'
            ]
            for n in range(1, len(solvents) + 1):
                row = lines[i + n].replace('\n', '').split(',')
                name = solvents[n - 1]
                key = parser.analytes.get(snake_case(name), snake_case(name))
                results.append({
                    'analysis': 'residual_solvents',
                    'key': key,
                    'name': name,
                    'value': convert_to_numeric(row[1].split(' ')[0]),
                    'units': row[1].split(' ')[-1],
                    'status': row[2],
                })
            continue

    # Record the last sample's results.
    obs['analyses'] = json.dumps(analyses)
    obs['results'] = json.dumps(results)
    samples.append(obs)

    # Return the samples.
    return pd.DataFrame(samples)


# === Tests ===
# [✓] Tested: 2024-07-10 by Keegan Skeate <keegan@cannlytics.com>
if __name__ == '__main__':
        
    # Define where the data lives.
    data_dir = 'D://data/hawaii/public-records'
    datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if x.endswith('.csv')]

    # Process each CSV file.
    data = [extract_samples(file) for file in datafiles]

    # Aggregate all samples.
    results = pd.concat(data, ignore_index=True)
    print('Number of results:', len(results))

    # Standardize the results.
    analytes = find_unique_analytes(results)
    analytes = sorted(list(analytes))
    results = standardize_results(results, analytes)

    # Standardize time.
    results['date'] = pd.to_datetime(results['date_tested'], format='mixed')
    results['week'] = results['date'].dt.to_period('W').astype(str)
    results['month'] = results['date'].dt.to_period('M').astype(str)
    results = results.sort_values('date')

    # Save the results.
    outfile = 'D://data/hawaii/hi-results-latest.xlsx'
    outfile_csv = 'D://data/hawaii/hi-results-latest.csv'
    outfile_json = 'D://data/hawaii/hi-results-latest.jsonl'
    results.to_excel(outfile, index=False)
    results.to_csv(outfile_csv, index=False)
    results.to_json(outfile_json, orient='records', lines=True)
    print('Saved Excel:', outfile)
    print('Saved CSV:', outfile_csv)
    print('Saved JSON:', outfile_json)

    # Print out the features.
    features = {x: 'string' for x in results.columns}
    print('Number of features:', len(features))
    print('Features:', features)