keeganskeate commited on
Commit
d1ae506
1 Parent(s): 671cdca

latest-2024-08-11 (#6)

Browse files

- Added all `.zip` files (cd9e97ca527113da9a60f1c024039ebc3f9fabf3)
- Added all `.csv` files (e09bf762ace1faba40bac14a15c935769d8500f9)
- Added all `.xlsx` files (522ebd4d2884347969f2b0475d4decffa1e76239)
- Updated cannabis results 2024-08-15 (64a8aded2aa9612a55fcd9f6d5f62f4fd085bf62)

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -51
  2. .gitignore +128 -5
  3. LICENSE +0 -7
  4. README.md +76 -208
  5. algorithms/algorithm_constants.py +0 -903
  6. algorithms/algorithm_utils.py +0 -240
  7. algorithms/{get_results_psilabs.py → archived/get_results_mi_psi_labs.py} +9 -28
  8. algorithms/get_results_ak.py +223 -0
  9. algorithms/get_results_ca_flower_co.py +585 -0
  10. algorithms/get_results_ca_glass_house.py +346 -0
  11. algorithms/{get_results_rawgarden.py → get_results_ca_rawgarden.py} +251 -166
  12. algorithms/get_results_ca_sc_labs.py +235 -0
  13. algorithms/get_results_ct.py +280 -0
  14. algorithms/get_results_fl_flowery.py +231 -0
  15. algorithms/get_results_fl_jungleboys.py +314 -0
  16. algorithms/get_results_fl_kaycha.py +477 -0
  17. algorithms/get_results_fl_medical.py +553 -0
  18. algorithms/get_results_fl_terplife.py +269 -0
  19. algorithms/get_results_hi.py +274 -0
  20. algorithms/get_results_ma.py +99 -0
  21. algorithms/get_results_mcrlabs.py +0 -63
  22. algorithms/get_results_md.py +160 -0
  23. algorithms/get_results_mi.py +461 -0
  24. algorithms/get_results_nv.py +440 -0
  25. algorithms/get_results_ny.py +718 -0
  26. algorithms/get_results_or.py +256 -0
  27. algorithms/get_results_ri.py +244 -0
  28. algorithms/get_results_sclabs.py +0 -133
  29. algorithms/get_results_sdpharmlabs.py +0 -28
  30. algorithms/get_results_ut.py +188 -0
  31. algorithms/get_results_wa.py +292 -0
  32. algorithms/get_results_wa_inventory.py +514 -0
  33. algorithms/get_results_wa_strains.py +264 -0
  34. algorithms/get_results_washington_ccrs.py +0 -471
  35. algorithms/get_results_washington_leaf.py +0 -490
  36. algorithms/main.py +0 -370
  37. analysis/analyze_results.py +79 -0
  38. analysis/analyze_results_ca.py +320 -0
  39. analysis/analyze_results_co.py +86 -0
  40. analysis/analyze_results_ct.py +1205 -0
  41. analysis/analyze_results_fl.py +581 -0
  42. analysis/analyze_results_ma.py +886 -0
  43. analysis/analyze_results_md.py +9 -0
  44. analysis/analyze_results_mi.py +9 -0
  45. analysis/analyze_results_nv.py +165 -0
  46. analysis/analyze_results_ny.py +591 -0
  47. analysis/analyze_results_or.py +67 -0
  48. analysis/analyze_results_ri.py +134 -0
  49. analysis/analyze_results_ut.py +413 -0
  50. analysis/analyze_results_wa.py +9 -0
.gitattributes CHANGED
@@ -1,52 +1,3 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - uncompressed
34
- *.pcm filter=lfs diff=lfs merge=lfs -text
35
- *.sam filter=lfs diff=lfs merge=lfs -text
36
- *.raw filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - compressed
38
- *.aac filter=lfs diff=lfs merge=lfs -text
39
- *.flac filter=lfs diff=lfs merge=lfs -text
40
- *.mp3 filter=lfs diff=lfs merge=lfs -text
41
- *.ogg filter=lfs diff=lfs merge=lfs -text
42
- *.wav filter=lfs diff=lfs merge=lfs -text
43
- # Image files - uncompressed
44
- *.bmp filter=lfs diff=lfs merge=lfs -text
45
- *.gif filter=lfs diff=lfs merge=lfs -text
46
- *.png filter=lfs diff=lfs merge=lfs -text
47
- *.tiff filter=lfs diff=lfs merge=lfs -text
48
- # Image files - compressed
49
- *.jpg filter=lfs diff=lfs merge=lfs -text
50
- *.jpeg filter=lfs diff=lfs merge=lfs -text
51
- *.webp filter=lfs diff=lfs merge=lfs -text
52
  *.csv filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.csv filter=lfs diff=lfs merge=lfs -text
2
+ *.zip filter=lfs diff=lfs merge=lfs -text
3
+ *.xlsx filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -1,8 +1,131 @@
1
- # Ignore environment variables.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  *.env
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- # Ignore temporary files.
5
- *tmp
 
 
 
 
6
 
7
- # Ignore PDFs.
8
- *pdfs
 
 
 
 
1
+ # Logs
2
+ *.log
3
+ npm-debug.log*
4
+ yarn-debug.log*
5
+ yarn-error.log*
6
+
7
+ # Runtime data
8
+ pids
9
+ *.pid
10
+ *.seed
11
+ *.pid.lock
12
+
13
+ # Directory for instrumented libs generated by jscoverage/JSCover
14
+ lib-cov
15
+
16
+ # Coverage directory used by tools like istanbul
17
+ coverage
18
+
19
+ # nyc test coverage
20
+ .nyc_output
21
+
22
+ # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
23
+ .grunt
24
+
25
+ # Bower dependency directory (https://bower.io/)
26
+ bower_components
27
+
28
+ # node-waf configuration
29
+ .lock-wscript
30
+
31
+ # Compiled binary addons (https://nodejs.org/api/addons.html)
32
+ build/Release
33
+
34
+ # Dependency directories
35
+ node_modules/
36
+ jspm_packages/
37
+
38
+ # TypeScript v1 declaration files
39
+ typings/
40
+
41
+ # Optional npm cache directory
42
+ .npm
43
+
44
+ # Optional eslint cache
45
+ .eslintcache
46
+
47
+ # Optional REPL history
48
+ .node_repl_history
49
+
50
+ # Output of 'npm pack'
51
+ *.tgz
52
+
53
+ # Yarn Integrity file
54
+ .yarn-integrity
55
+
56
+ # dotenv environment variables file
57
  *.env
58
+ .env
59
+ docker.env
60
+ env.yaml
61
+
62
+ # next.js build output
63
+ .next
64
+
65
+ # Ignore Admin Tokens
66
+ .admin/tokens
67
+
68
+ # Ignore any SQL database
69
+ *.sqlite3
70
+
71
+ # Ignore documentation build
72
+ docs/build
73
+
74
+ # Ignore Firebase source control
75
+ # https://firebase.google.com/docs/cli/#source_control_aliases
76
+ .firebaserc
77
+
78
+ # Ignore datasets created or retrived with data tools.
79
+ *.datasets
80
+ .DS_Store
81
+
82
+ # Byte-compiled / optimized / DLL files
83
+ __pycache__/
84
+ *.py[cod]
85
+ *$py.class
86
+ *.pyc
87
+
88
+ # Distribution / packaging
89
+ .Python
90
+ build/
91
+ develop-eggs/
92
+ dist/
93
+ downloads/
94
+ eggs/
95
+ .eggs/
96
+ lib/
97
+ lib64/
98
+ parts/
99
+ sdist/
100
+ var/
101
+ wheels/
102
+ pip-wheel-metadata/
103
+ share/python-wheels/
104
+ *.egg-info/
105
+ .installed.cfg
106
+ *.egg
107
+ MANIFEST
108
+
109
+ # Testing
110
+ tmp/
111
+
112
+ # Ignore Firebase cahce.
113
+ *.cache
114
+
115
+ # Ignore Webpack bundles.
116
+ lims/static/console/bundles/*
117
+ website/static/website/js/bundles/*
118
+ *webpack-stats.json
119
 
120
+ # Ignore local copy of cannlytics.
121
+ # cannlytics/*
122
+ /public/static/*
123
+ public/lims/
124
+ public/website/
125
+ public/docs/
126
 
127
+ # Exception for Flutter library.
128
+ !app/lib/
129
+ !data/lib/
130
+ !metrc/lib/
131
+ *firebase_options.dart
LICENSE CHANGED
@@ -51,11 +51,8 @@ exhaustive, and do not form part of our licenses.
51
  respect those requests where reasonable. More considerations
52
  for the public:
53
  wiki.creativecommons.org/Considerations_for_licensees
54
-
55
  =======================================================================
56
-
57
  Creative Commons Attribution 4.0 International Public License
58
-
59
  By exercising the Licensed Rights (defined below), You accept and agree
60
  to be bound by the terms and conditions of this Creative Commons
61
  Attribution 4.0 International Public License ("Public License"). To the
@@ -64,10 +61,7 @@ granted the Licensed Rights in consideration of Your acceptance of
64
  these terms and conditions, and the Licensor grants You such rights in
65
  consideration of benefits the Licensor receives from making the
66
  Licensed Material available under these terms and conditions.
67
-
68
-
69
  Section 1 -- Definitions.
70
-
71
  a. Adapted Material means material subject to Copyright and Similar
72
  Rights that is derived from or based upon the Licensed Material
73
  and in which the Licensed Material is translated, altered,
@@ -77,7 +71,6 @@ Section 1 -- Definitions.
77
  Material is a musical work, performance, or sound recording,
78
  Adapted Material is always produced where the Licensed Material is
79
  synched in timed relation with a moving image.
80
-
81
  b. Adapter's License means the license You apply to Your Copyright
82
  and Similar Rights in Your contributions to Adapted Material in
83
  accordance with the terms and conditions of this Public License.
 
51
  respect those requests where reasonable. More considerations
52
  for the public:
53
  wiki.creativecommons.org/Considerations_for_licensees
 
54
  =======================================================================
 
55
  Creative Commons Attribution 4.0 International Public License
 
56
  By exercising the Licensed Rights (defined below), You accept and agree
57
  to be bound by the terms and conditions of this Creative Commons
58
  Attribution 4.0 International Public License ("Public License"). To the
 
61
  these terms and conditions, and the Licensor grants You such rights in
62
  consideration of benefits the Licensor receives from making the
63
  Licensed Material available under these terms and conditions.
 
 
64
  Section 1 -- Definitions.
 
65
  a. Adapted Material means material subject to Copyright and Similar
66
  Rights that is derived from or based upon the Licensed Material
67
  and in which the Licensed Material is translated, altered,
 
71
  Material is a musical work, performance, or sound recording,
72
  Adapted Material is always produced where the Licensed Material is
73
  synched in timed relation with a moving image.
 
74
  b. Adapter's License means the license You apply to Your Copyright
75
  and Similar Rights in Your contributions to Adapted Material in
76
  accordance with the terms and conditions of this Public License.
README.md CHANGED
@@ -1,94 +1,98 @@
1
  ---
2
- annotations_creators:
3
- - expert-generated
4
- language_creators:
5
- - expert-generated
6
  license:
7
  - cc-by-4.0
8
- pretty_name: cannabis_tests
9
- size_categories:
10
- - 1K<n<10K
11
- source_datasets:
12
- - original
13
  tags:
14
  - cannabis
15
- - lab results
16
  - tests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  ---
18
 
19
- # Cannabis Tests, Curated by Cannlytics
20
 
21
- <div style="margin-top:1rem; margin-bottom: 1rem;">
22
- <img width="240px" alt="" src="https://firebasestorage.googleapis.com/v0/b/cannlytics.appspot.com/o/public%2Fimages%2Fdatasets%2Fcannabis_tests%2Fcannabis_tests_curated_by_cannlytics.png?alt=media&token=22e4d1da-6b30-4c3f-9ff7-1954ac2739b2">
23
- </div>
24
 
25
- ## Table of Contents
26
- - [Table of Contents](#table-of-contents)
27
- - [Dataset Description](#dataset-description)
28
- - [Dataset Summary](#dataset-summary)
29
- - [Dataset Structure](#dataset-structure)
30
- - [Data Instances](#data-instances)
31
- - [Data Fields](#data-fields)
32
- - [Data Splits](#data-splits)
33
- - [Dataset Creation](#dataset-creation)
34
- - [Curation Rationale](#curation-rationale)
35
- - [Source Data](#source-data)
36
- - [Data Collection and Normalization](#data-collection-and-normalization)
37
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
38
- - [Considerations for Using the Data](#considerations-for-using-the-data)
39
- - [Social Impact of Dataset](#social-impact-of-dataset)
40
- - [Discussion of Biases](#discussion-of-biases)
41
- - [Other Known Limitations](#other-known-limitations)
42
- - [Additional Information](#additional-information)
43
- - [Dataset Curators](#dataset-curators)
44
- - [License](#license)
45
- - [Citation](#citation)
46
- - [Contributions](#contributions)
47
 
48
- ## Dataset Description
49
 
50
- - **Homepage:** <https://github.com/cannlytics/cannlytics>
51
- - **Repository:** <https://huggingface.co/datasets/cannlytics/cannabis_tests>
52
- - **Point of Contact:** <dev@cannlytics.com>
53
 
54
- ### Dataset Summary
55
-
56
- This dataset is a collection of public cannabis lab test results parsed by [`CoADoc`](https://github.com/cannlytics/cannlytics/tree/main/cannlytics/data/coas), a certificate of analysis (COA) parsing tool.
57
-
58
- ## Dataset Structure
59
-
60
- The dataset is partitioned into the various sources of lab results.
61
-
62
- | Subset | Source | Observations |
63
- |--------|--------|--------------|
64
- | `rawgarden` | Raw Gardens | 2,667 |
65
- | `mcrlabs` | MCR Labs | Coming soon! |
66
- | `psilabs` | PSI Labs | Coming soon! |
67
- | `sclabs` | SC Labs | Coming soon! |
68
- | `washington` | Washington State | Coming soon! |
69
-
70
- ### Data Instances
71
-
72
- You can load the `details` for each of the dataset files. For example:
73
 
74
  ```py
75
  from datasets import load_dataset
76
 
77
- # Download Raw Garden lab result details.
78
- dataset = load_dataset('cannlytics/cannabis_tests', 'rawgarden')
79
- details = dataset['details']
80
- assert len(details) > 0
81
- print('Downloaded %i observations.' % len(details))
82
  ```
83
 
84
- > Note: Configurations for `results` and `values` are planned. For now, you can create these data with `CoADoc().save(details, out_file)`.
85
-
86
- ### Data Fields
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  Below is a non-exhaustive list of fields, used to standardize the various data that are encountered, that you may expect encounter in the parsed COA data.
89
 
90
  | Field | Example| Description |
91
- |-------|-----|-------------|
92
  | `analyses` | ["cannabinoids"] | A list of analyses performed on a given sample. |
93
  | `{analysis}_method` | "HPLC" | The method used for each analysis. |
94
  | `{analysis}_status` | "pass" | The pass, fail, or N/A status for pass / fail analyses. |
@@ -132,7 +136,7 @@ Below is a non-exhaustive list of fields, used to standardize the various data t
132
  | `results_hash` | "{sha256-hash}" | An HMAC of the sample's `results` JSON signed with Cannlytics' public key, `"cannlytics.eth"`. |
133
  | `sample_id` | "{sha256-hash}" | A generated ID to uniquely identify the `producer`, `product_name`, and `results`. |
134
  | `sample_hash` | "{sha256-hash}" | An HMAC of the entire sample JSON signed with Cannlytics' public key, `"cannlytics.eth"`. |
135
- <!-- | `strain_name` | "Blue Rhino" | A strain name, if specified. Otherwise, can be attempted to be parsed from the `product_name`. | -->
136
 
137
  Each result can contain the following fields.
138
 
@@ -149,133 +153,10 @@ Each result can contain the following fields.
149
  | `loq` | 0.1 | The limit of quantification for the result analyte. Values above the `lod` but below the `loq` are typically reported as `<LOQ`. |
150
  | `status` | "pass" | The pass / fail status for contaminant screening analyses. |
151
 
152
- ### Data Splits
153
-
154
- The data is split into `details`, `results`, and `values` data. Configurations for `results` and `values` are planned. For now, you can create these data with:
155
-
156
- ```py
157
- from cannlytics.data.coas import CoADoc
158
- from datasets import load_dataset
159
- import pandas as pd
160
-
161
- # Download Raw Garden lab result details.
162
- repo = 'cannlytics/cannabis_tests'
163
- dataset = load_dataset(repo, 'rawgarden')
164
- details = dataset['details']
165
-
166
- # Save the data locally with "Details", "Results", and "Values" worksheets.
167
- outfile = 'details.xlsx'
168
- parser = CoADoc()
169
- parser.save(details.to_pandas(), outfile)
170
-
171
- # Read the values.
172
- values = pd.read_excel(outfile, sheet_name='Values')
173
-
174
- # Read the results.
175
- results = pd.read_excel(outfile, sheet_name='Results')
176
- ```
177
-
178
- <!-- Training data is used for training your models. Validation data is used for evaluating your trained models, to help you determine a final model. Test data is used to evaluate your final model. -->
179
-
180
- ## Dataset Creation
181
-
182
- ### Curation Rationale
183
-
184
- Certificates of analysis (CoAs) are abundant for cannabis cultivators, processors, retailers, and consumers too, but the data is often locked away. Rich, valuable laboratory data so close, yet so far away! CoADoc puts these vital data points in your hands by parsing PDFs and URLs, finding all the data, standardizing the data, and cleanly returning the data to you.
185
-
186
- ### Source Data
187
-
188
- | Data Source | URL |
189
- |-------------|-----|
190
- | MCR Labs Test Results | <https://reports.mcrlabs.com> |
191
- | PSI Labs Test Results | <https://results.psilabs.org/test-results/> |
192
- | Raw Garden Test Results | <https://rawgarden.farm/lab-results/> |
193
- | SC Labs Test Results | <https://client.sclabs.com/> |
194
- | Washington State Lab Test Results | <https://lcb.app.box.com/s/e89t59s0yb558tjoncjsid710oirqbgd> |
195
-
196
- #### Data Collection and Normalization
197
-
198
- You can recreate the dataset using the open source algorithms in the repository. First clone the repository:
199
-
200
- ```
201
- git clone https://huggingface.co/datasets/cannlytics/cannabis_tests
202
- ```
203
-
204
- You can then install the algorithm Python (3.9+) requirements:
205
-
206
- ```
207
- cd cannabis_tests
208
- pip install -r requirements.txt
209
- ```
210
-
211
- Then you can run all of the data-collection algorithms:
212
-
213
- ```
214
- python algorithms/main.py
215
- ```
216
-
217
- Or you can run each algorithm individually. For example:
218
-
219
- ```
220
- python algorithms/get_results_mcrlabs.py
221
- ```
222
-
223
- In the `algorithms` directory, you can find the data collection scripts described in the table below.
224
-
225
- | Algorithm | Organization | Description |
226
- |-----------|---------------|-------------|
227
- | `get_results_mcrlabs.py` | MCR Labs | Get lab results published by MCR Labs. |
228
- | `get_results_psilabs.py` | PSI Labs | Get historic lab results published by MCR Labs. |
229
- | `get_results_rawgarden.py` | Raw Garden | Get lab results Raw Garden publishes for their products. |
230
- | `get_results_sclabs.py` | SC Labs | Get lab results published by SC Labs. |
231
- | `get_results_washington.py` | Washington State | Get historic lab results obtained through a FOIA request in Washington State. |
232
-
233
- ### Personal and Sensitive Information
234
-
235
- The dataset includes public addresses and contact information for related cannabis licensees. It is important to take care to use these data points in a legal manner.
236
-
237
- ## Considerations for Using the Data
238
-
239
- ### Social Impact of Dataset
240
-
241
- Arguably, there is substantial social impact that could result from the study of cannabis, therefore, researchers and data consumers alike should take the utmost care in the use of this dataset.
242
-
243
- ### Discussion of Biases
244
-
245
- Cannlytics is a for-profit data and analytics company that primarily serves cannabis businesses. The data are not randomly collected and thus sampling bias should be taken into consideration.
246
-
247
- ### Other Known Limitations
248
-
249
- The data represents only a subset of the population of cannabis lab results. Non-standard values are coded as follows.
250
-
251
- | Actual | Coding |
252
- |--------|--------|
253
- | `'ND'` | `0.000000001` |
254
- | `'No detection in 1 gram'` | `0.000000001` |
255
- | `'Negative/1g'` | `0.000000001` |
256
- | '`PASS'` | `0.000000001` |
257
- | `'<LOD'` | `0.00000001` |
258
- | `'< LOD'` | `0.00000001` |
259
- | `'<LOQ'` | `0.0000001` |
260
- | `'< LOQ'` | `0.0000001` |
261
- | `'<LLOQ'` | `0.0000001` |
262
- | `'≥ LOD'` | `10001` |
263
- | `'NR'` | `None` |
264
- | `'N/A'` | `None` |
265
- | `'na'` | `None` |
266
- | `'NT'` | `None` |
267
-
268
- ## Additional Information
269
-
270
- ### Dataset Curators
271
-
272
- Curated by [🔥Cannlytics](https://cannlytics.com)<br>
273
- <dev@cannlytics.com>
274
-
275
- ### License
276
 
277
  ```
278
- Copyright (c) 2022 Cannlytics and the Cannabis Data Science Team
279
 
280
  The files associated with this dataset are licensed under a
281
  Creative Commons Attribution 4.0 International license.
@@ -289,19 +170,6 @@ any content within the dataset that is identified as belonging
289
  to a third party.
290
  ```
291
 
292
- ### Citation
293
-
294
- Please cite the following if you use the code examples in your research:
295
-
296
- ```bibtex
297
- @misc{cannlytics2022,
298
- title={Cannabis Data Science},
299
- author={Skeate, Keegan and O'Sullivan-Sutherland, Candace},
300
- journal={https://github.com/cannlytics/cannabis-data-science},
301
- year={2022}
302
- }
303
- ```
304
-
305
- ### Contributions
306
 
307
  Thanks to [🔥Cannlytics](https://cannlytics.com), [@candy-o](https://github.com/candy-o), [@hcadeaux](https://huggingface.co/hcadeaux), [@keeganskeate](https://github.com/keeganskeate), [The CESC](https://thecesc.org), and the entire [Cannabis Data Science Team](https://meetup.com/cannabis-data-science/members) for their contributions.
 
1
  ---
2
+ pretty_name: cannabis_results
 
 
 
3
  license:
4
  - cc-by-4.0
 
 
 
 
 
5
  tags:
6
  - cannabis
7
+ - lab
8
  - tests
9
+ - results
10
+ configs:
11
+ # - config_name: all
12
+ # data_files: "data/all/all-results-latest.csv"
13
+ # default: true
14
+ # - config_name: ak
15
+ # data_files: "data/ak/ak-results-latest.csv"
16
+ - config_name: ca
17
+ data_files: "data/ca/ca-results-latest.xlsx"
18
+ default: true
19
+ - config_name: co
20
+ data_files: "data/co/co-results-latest.xlsx"
21
+ - config_name: ct
22
+ data_files: "data/ct/ct-results-latest.xlsx"
23
+ - config_name: fl
24
+ data_files: "data/fl/fl-results-latest.xlsx"
25
+ - config_name: hi
26
+ data_files: "data/hi/hi-results-latest.csv"
27
+ - config_name: ma
28
+ data_files: "data/ma/ma-results-latest.csv"
29
+ - config_name: md
30
+ data_files: "data/md/md-results-latest.csv"
31
+ - config_name: mi
32
+ data_files: "data/mi/mi-results-latest.csv"
33
+ - config_name: nv
34
+ data_files: "data/nv/nv-results-latest.csv"
35
+ - config_name: ny
36
+ data_files: "data/ny/ny-results-latest.csv"
37
+ - config_name: or
38
+ data_files: "data/or/or-results-latest.csv"
39
+ - config_name: ri
40
+ data_files: "data/ri/ri-results-latest.csv"
41
+ - config_name: ut
42
+ data_files: "data/ut/ut-results-latest.csv"
43
+ - config_name: wa
44
+ data_files: "data/wa/wa-results-latest.xlsx"
45
  ---
46
 
47
+ # Cannabis Results
48
 
49
+ > Curated by [🔥Cannlytics](https://cannlytics.com), an open-source cannabis data and analytics company. If you find the data valuable, then consider [contributing $1 to help advance cannabis science!](https://opencollective.com/cannlytics-company)
 
 
50
 
51
+ This is a repository of public cannabis lab test results obtained through public records requests and certificates of analysis (COAs) available online. Lab results are useful for cannabis cultivators, processors, retailers, consumers, and everyone else interested in cannabis. The repository contains raw data and curated datafiles for states with permitted cannabis markets where data is available. The curated datafiles are cleaned and standardized for ease of use, but may contain errors. The raw data is provided for transparency, reproducibility, and for you to improve upon.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ > Note: Any data of individuals and/or their contact information may not be used for commercial purposes. The data represents only a subset of the population of cannabis lab results, and the non-random nature of data collection should be taken into consideration.
54
 
55
+ ## Using the Data
 
 
56
 
57
+ The data is split into subsets for each state where data is available. You can download a given subset by specifying the state's abbreviation, e.g. `ca` for California (CA) results.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
  ```py
60
  from datasets import load_dataset
61
 
62
+ # Download California results.
63
+ dataset = load_dataset('cannlytics/cannabis_results', 'ca')
64
+ data = dataset['data']
65
+ assert len(data) > 0
66
+ print('Downloaded %i observations.' % len(data))
67
  ```
68
 
69
+ ## Subsets
70
+
71
+ <!-- Automated Table -->
72
+ | Subset | State | Observations |
73
+ |--------|------|--------------|
74
+ | `ca` | CA | 71,581 |
75
+ | `co` | CO | 25,798 |
76
+ | `ct` | CT | 19,963 |
77
+ | `fl` | FL | 14,573 |
78
+ | `hi` | HI | 13,485 |
79
+ | `ma` | MA | 75,164 |
80
+ | `md` | MD | 105,013 |
81
+ | `mi` | MI | 89,956 |
82
+ | `nv` | NV | 153,064 |
83
+ | `ny` | NY | 330 |
84
+ | `or` | OR | 196,900 |
85
+ | `ri` | RI | 25,832 |
86
+ | `ut` | UT | 1,230 |
87
+ | `wa` | WA | 202,812 |
88
+
89
+
90
+ ## Data Points
91
 
92
  Below is a non-exhaustive list of fields, used to standardize the various data that are encountered, that you may expect encounter in the parsed COA data.
93
 
94
  | Field | Example| Description |
95
+ |-------|--------|-------------|
96
  | `analyses` | ["cannabinoids"] | A list of analyses performed on a given sample. |
97
  | `{analysis}_method` | "HPLC" | The method used for each analysis. |
98
  | `{analysis}_status` | "pass" | The pass, fail, or N/A status for pass / fail analyses. |
 
136
  | `results_hash` | "{sha256-hash}" | An HMAC of the sample's `results` JSON signed with Cannlytics' public key, `"cannlytics.eth"`. |
137
  | `sample_id` | "{sha256-hash}" | A generated ID to uniquely identify the `producer`, `product_name`, and `results`. |
138
  | `sample_hash` | "{sha256-hash}" | An HMAC of the entire sample JSON signed with Cannlytics' public key, `"cannlytics.eth"`. |
139
+ | `strain_name` | "Blue Rhino" | A strain name, if specified. Otherwise, can be attempted to be parsed from the `product_name`. |
140
 
141
  Each result can contain the following fields.
142
 
 
153
  | `loq` | 0.1 | The limit of quantification for the result analyte. Values above the `lod` but below the `loq` are typically reported as `<LOQ`. |
154
  | `status` | "pass" | The pass / fail status for contaminant screening analyses. |
155
 
156
+ ## License
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
 
158
  ```
159
+ Copyright (c) 2022-2024 Cannlytics
160
 
161
  The files associated with this dataset are licensed under a
162
  Creative Commons Attribution 4.0 International license.
 
170
  to a third party.
171
  ```
172
 
173
+ ## Contributors
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
  Thanks to [🔥Cannlytics](https://cannlytics.com), [@candy-o](https://github.com/candy-o), [@hcadeaux](https://huggingface.co/hcadeaux), [@keeganskeate](https://github.com/keeganskeate), [The CESC](https://thecesc.org), and the entire [Cannabis Data Science Team](https://meetup.com/cannabis-data-science/members) for their contributions.
algorithms/algorithm_constants.py DELETED
@@ -1,903 +0,0 @@
1
- """
2
- Cannabis Tests | Algorithm Constants
3
- Copyright (c) 2022 Cannlytics
4
-
5
- Authors:
6
- Keegan Skeate <https://github.com/keeganskeate>
7
- Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
- Created: 1/18/2022
9
- Updated: 9/16/2022
10
- License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
- """
12
-
13
- SC_LABS_PRODUCER_IDS = [
14
- '6',
15
- '23',
16
- '214',
17
- '257',
18
- '325',
19
- '365',
20
- '546',
21
- '936',
22
- '971',
23
- '1064',
24
- '1212',
25
- '1303',
26
- '1360',
27
- '1503',
28
- '1523',
29
- '1739',
30
- '1811',
31
- '1822',
32
- '1995',
33
- '2243',
34
- '2411',
35
- '2619',
36
- '2728',
37
- '2798',
38
- '2821',
39
- '2850',
40
- '2884',
41
- '3146',
42
- '3153',
43
- '3193',
44
- '3430',
45
- '3448',
46
- '3506',
47
- '3785',
48
- '3798',
49
- '3905',
50
- '3926',
51
- '4069',
52
- '4085',
53
- '4200',
54
- '4252',
55
- '4287',
56
- '4446',
57
- '4512',
58
- '4614',
59
- '4702',
60
- '5029',
61
- '5071',
62
- '5096',
63
- '5139',
64
- '5164',
65
- '5282',
66
- '5505',
67
- '5560',
68
- '5615',
69
- '5950',
70
- '6071',
71
- '6109',
72
- '6112',
73
- '6145',
74
- '6272',
75
- '6331',
76
- '6340',
77
- '6358',
78
- '6399',
79
- '6437',
80
- '6756',
81
- '6762',
82
- '6771',
83
- '6791',
84
- '6815',
85
- '6873',
86
- '6882',
87
- '6887',
88
- '6900',
89
- '6913',
90
- '6933',
91
- '7005',
92
- '7034',
93
- '7065',
94
- '7066',
95
- '7102',
96
- '7112',
97
- '7118',
98
- '7131',
99
- '7132',
100
- '7134',
101
- '7139',
102
- '7147',
103
- '7149',
104
- '7159',
105
- '7169',
106
- '7172',
107
- '7176',
108
- '7195',
109
- '7198',
110
- '7218',
111
- '7221',
112
- '7228',
113
- '7233',
114
- '7249',
115
- '7250',
116
- '7253',
117
- '7275',
118
- '7277',
119
- '7284',
120
- '7303',
121
- '7329',
122
- '7337',
123
- '7346',
124
- '7349',
125
- '7382',
126
- '7393',
127
- '7396',
128
- '7406',
129
- '7414',
130
- '7428',
131
- '7454',
132
- '7472',
133
- '7481',
134
- '7486',
135
- '7503',
136
- '7509',
137
- '7510',
138
- '7524',
139
- '7544',
140
- '7569',
141
- '7589',
142
- '7675',
143
- '7885',
144
- '7939',
145
- '7948',
146
- '7955',
147
- '7959',
148
- '7984',
149
- '8013',
150
- '8027',
151
- '8042',
152
- '8079',
153
- '8082',
154
- '8099',
155
- '8101',
156
- '8104',
157
- '8121',
158
- '8143',
159
- '8156',
160
- '8168',
161
- '8193',
162
- '8269',
163
- '8278',
164
- '8285',
165
- '8381',
166
- '8490',
167
- '8497',
168
- '8516',
169
- '8647',
170
- '8661',
171
- '8676',
172
- '8710',
173
- '8719',
174
- '8724',
175
- '8732',
176
- '8776',
177
- '8778',
178
- '8782',
179
- '8791',
180
- '8809',
181
- '8836',
182
- '8838',
183
- '8839',
184
- '8856',
185
- '8917',
186
- '8923',
187
- '8940',
188
- '8954',
189
- '8992',
190
- '9002',
191
- '9013',
192
- '9071',
193
- '9104',
194
- '9115',
195
- '9147',
196
- '9176',
197
- '9206',
198
- '9216',
199
- '9220',
200
- '9281',
201
- '9292',
202
- '9325',
203
- '9346',
204
- '9370',
205
- '9372',
206
- '9393',
207
- '9420',
208
- '9431',
209
- '9438',
210
- '9460',
211
- '9473',
212
- '9476',
213
- '9484',
214
- '9515',
215
- '9516',
216
- '9536',
217
- '9575',
218
- '9583',
219
- '9584',
220
- '9589',
221
- '9609',
222
- '9647',
223
- '9689',
224
- '9709',
225
- '9715',
226
- '9716',
227
- '9725',
228
- '9726',
229
- '9736',
230
- '9742',
231
- '9745',
232
- '9746',
233
- '9753',
234
- '9787',
235
- '9796',
236
- '9802',
237
- '9805',
238
- '9811',
239
- '9848',
240
- '9856',
241
- '9861',
242
- '9863',
243
- '9872',
244
- '9895',
245
- '9907',
246
- '9912',
247
- '9923',
248
- '9940',
249
- '9958',
250
- '9959',
251
- '9965',
252
- '9982',
253
- '9984',
254
- '10006',
255
- '10014',
256
- '10019',
257
- '10020',
258
- '10022',
259
- '10033',
260
- '10074',
261
- '10085',
262
- '10140',
263
- '10145',
264
- '10164',
265
- '10169',
266
- '10180',
267
- '10197',
268
- '10221',
269
- '10252',
270
- '10254',
271
- '10265',
272
- '10276',
273
- '10293',
274
- '10300',
275
- '10307',
276
- '10316',
277
- '10357',
278
- '10366',
279
- '10376',
280
- '10382',
281
- '10388',
282
- '10394',
283
- '10405',
284
- '10415',
285
- '10446',
286
- '10447',
287
- '10474',
288
- '10477',
289
- '10478',
290
- '10481',
291
- '10482',
292
- '10487',
293
- '10505',
294
- '10513',
295
- '10519',
296
- '10543',
297
- '10553',
298
- '10570',
299
- '10573',
300
- '10590',
301
- '10598',
302
- '10639',
303
- '10644',
304
- '10651',
305
- '10679',
306
- '10683',
307
- '10685',
308
- '10727',
309
- '10767',
310
- '10773',
311
- '10783',
312
- '10793',
313
- '10813',
314
- '10815',
315
- '10830',
316
- '10833',
317
- '10886',
318
- '10905',
319
- '10915',
320
- '10922',
321
- '10924',
322
- '10934',
323
- '10998',
324
- '11006',
325
- '11022',
326
- '11031',
327
- '11033',
328
- '11043',
329
- '11059',
330
- '11067',
331
- '11073',
332
- '11078',
333
- '11083',
334
- '11084',
335
- '11086',
336
- '11088',
337
- '11095',
338
- '11098',
339
- '11119',
340
- '11167',
341
- '11185',
342
- '11195',
343
- '11198',
344
- '11226',
345
- '11232',
346
- '11236',
347
- '11237',
348
- '11248',
349
- '11251',
350
- '11256',
351
- '11259',
352
- '11260',
353
- '11269',
354
- '11273',
355
- '11288',
356
- '11297',
357
- '11301',
358
- '11327',
359
- '11344',
360
- '11368',
361
- '11382',
362
- '11387',
363
- '11399',
364
- '11409',
365
- '11413',
366
- '11424',
367
- '11433',
368
- ]
369
-
370
- #------------------------------------------------------------------------------
371
- # Lab result fields.
372
- #------------------------------------------------------------------------------
373
-
374
- lab_result_fields = {
375
- 'global_id': 'string',
376
- 'mme_id': 'string',
377
- 'intermediate_type': 'category',
378
- 'status': 'category',
379
- 'global_for_inventory_id': 'string',
380
- 'cannabinoid_status': 'category',
381
- 'cannabinoid_cbc_percent': 'float',
382
- 'cannabinoid_cbc_mg_g': 'float',
383
- 'cannabinoid_cbd_percent': 'float',
384
- 'cannabinoid_cbd_mg_g': 'float',
385
- 'cannabinoid_cbda_percent': 'float',
386
- 'cannabinoid_cbda_mg_g': 'float',
387
- 'cannabinoid_cbdv_percent': 'float',
388
- 'cannabinoid_cbdv_mg_g': 'float',
389
- 'cannabinoid_cbg_percent': 'float',
390
- 'cannabinoid_cbg_mg_g': 'float',
391
- 'cannabinoid_cbga_percent': 'float',
392
- 'cannabinoid_cbga_mg_g': 'float',
393
- 'cannabinoid_cbn_percent': 'float',
394
- 'cannabinoid_cbn_mg_g': 'float',
395
- 'cannabinoid_d8_thc_percent': 'float',
396
- 'cannabinoid_d8_thc_mg_g': 'float',
397
- 'cannabinoid_d9_thca_percent': 'float',
398
- 'cannabinoid_d9_thca_mg_g': 'float',
399
- 'cannabinoid_d9_thc_percent': 'float',
400
- 'cannabinoid_d9_thc_mg_g': 'float',
401
- 'cannabinoid_thcv_percent': 'float',
402
- 'cannabinoid_thcv_mg_g': 'float',
403
- 'solvent_status': 'category',
404
- 'solvent_acetone_ppm': 'float',
405
- 'solvent_benzene_ppm': 'float',
406
- 'solvent_butanes_ppm': 'float',
407
- 'solvent_chloroform_ppm': 'float',
408
- 'solvent_cyclohexane_ppm': 'float',
409
- 'solvent_dichloromethane_ppm': 'float',
410
- 'solvent_ethyl_acetate_ppm': 'float',
411
- 'solvent_heptane_ppm': 'float',
412
- 'solvent_hexanes_ppm': 'float',
413
- 'solvent_isopropanol_ppm': 'float',
414
- 'solvent_methanol_ppm': 'float',
415
- 'solvent_pentanes_ppm': 'float',
416
- 'solvent_propane_ppm': 'float',
417
- 'solvent_toluene_ppm': 'float',
418
- 'solvent_xylene_ppm': 'float',
419
- 'foreign_matter': 'bool',
420
- 'foreign_matter_stems': 'float',
421
- 'foreign_matter_seeds': 'float',
422
- 'microbial_status': 'category',
423
- 'microbial_bile_tolerant_cfu_g': 'float',
424
- 'microbial_pathogenic_e_coli_cfu_g': 'float',
425
- 'microbial_salmonella_cfu_g': 'float',
426
- 'moisture_content_percent': 'float',
427
- 'moisture_content_water_activity_rate': 'float',
428
- 'mycotoxin_status': 'category',
429
- 'mycotoxin_aflatoxins_ppb': 'float',
430
- 'mycotoxin_ochratoxin_ppb': 'float',
431
- 'thc_percent': 'float',
432
- 'notes': 'string',
433
- 'testing_status': 'category',
434
- 'type': 'category',
435
- 'inventory_id': 'string',
436
- 'batch_id': 'string',
437
- 'parent_lab_result_id': 'string',
438
- 'og_parent_lab_result_id': 'string',
439
- 'copied_from_lab_id': 'string',
440
- 'external_id': 'string',
441
- 'lab_user_id': 'string',
442
- 'user_id': 'string',
443
- 'cannabinoid_editor': 'string',
444
- 'microbial_editor': 'string',
445
- 'mycotoxin_editor': 'string',
446
- 'solvent_editor': 'string',
447
- }
448
-
449
- lab_result_date_fields = [
450
- 'created_at',
451
- 'deleted_at',
452
- 'updated_at',
453
- 'received_at',
454
- ]
455
-
456
- #------------------------------------------------------------------------------
457
- # Licensees fields.
458
- #------------------------------------------------------------------------------
459
-
460
- licensee_fields = {
461
- 'global_id': 'string',
462
- 'name': 'string',
463
- 'type': 'string',
464
- 'code': 'string',
465
- 'address1': 'string',
466
- 'address2': 'string',
467
- 'city': 'string',
468
- 'state_code': 'string',
469
- 'postal_code': 'string',
470
- 'country_code': 'string',
471
- 'phone': 'string',
472
- 'external_id': 'string',
473
- 'certificate_number': 'string',
474
- 'is_live': 'bool',
475
- 'suspended': 'bool',
476
- }
477
-
478
- licensee_date_fields = [
479
- 'created_at', # No records if issued before 2018-02-21.
480
- 'updated_at',
481
- 'deleted_at',
482
- 'expired_at',
483
- ]
484
-
485
- #------------------------------------------------------------------------------
486
- # Inventories fields.
487
- #------------------------------------------------------------------------------
488
-
489
- inventory_fields = {
490
- 'global_id': 'string',
491
- 'strain_id': 'string',
492
- 'inventory_type_id': 'string',
493
- 'qty': 'float',
494
- 'uom': 'string',
495
- 'mme_id': 'string',
496
- 'user_id': 'string',
497
- 'external_id': 'string',
498
- 'area_id': 'string',
499
- 'batch_id': 'string',
500
- 'lab_result_id': 'string',
501
- 'lab_retest_id': 'string',
502
- 'is_initial_inventory': 'bool',
503
- 'created_by_mme_id': 'string',
504
- 'additives': 'string',
505
- 'serving_num': 'float',
506
- 'sent_for_testing': 'bool',
507
- 'medically_compliant': 'string',
508
- 'legacy_id': 'string',
509
- 'lab_results_attested': 'int',
510
- 'global_original_id': 'string',
511
- }
512
-
513
- inventory_date_fields = [
514
- 'created_at', # No records if issued before 2018-02-21.
515
- 'updated_at',
516
- 'deleted_at',
517
- 'inventory_created_at',
518
- 'inventory_packaged_at',
519
- 'lab_results_date',
520
- ]
521
-
522
- #------------------------------------------------------------------------------
523
- # Inventory type fields.
524
- #------------------------------------------------------------------------------
525
-
526
- inventory_type_fields = {
527
- 'global_id': 'string',
528
- 'mme_id': 'string',
529
- 'user_id': 'string',
530
- 'external_id': 'string',
531
- 'uom': 'string',
532
- 'name': 'string',
533
- 'intermediate_type': 'string',
534
- }
535
-
536
- inventory_type_date_fields = [
537
- 'created_at',
538
- 'updated_at',
539
- 'deleted_at',
540
- ]
541
-
542
- #------------------------------------------------------------------------------
543
- # Strain fields.
544
- #------------------------------------------------------------------------------
545
-
546
- strain_fields = {
547
- 'mme_id': 'string',
548
- 'user_id': 'string',
549
- 'global_id': 'string',
550
- 'external_id': 'string',
551
- 'name': 'string',
552
- }
553
- strain_date_fields = [
554
- 'created_at',
555
- 'updated_at',
556
- 'deleted_at',
557
- ]
558
-
559
-
560
- #------------------------------------------------------------------------------
561
- # Sales fields.
562
- # TODO: Parse Sales_0, Sales_1, Sales_2
563
- #------------------------------------------------------------------------------
564
-
565
- sales_fields = {
566
- 'global_id': 'string',
567
- 'external_id': 'string',
568
- 'type': 'string', # wholesale or retail_recrational
569
- 'price_total': 'float',
570
- 'status': 'string',
571
- 'mme_id': 'string',
572
- 'user_id': 'string',
573
- 'area_id': 'string',
574
- 'sold_by_user_id': 'string',
575
- }
576
- sales_date_fields = [
577
- 'created_at',
578
- 'updated_at',
579
- 'sold_at',
580
- 'deleted_at',
581
- ]
582
-
583
-
584
- #------------------------------------------------------------------------------
585
- # Sales Items fields.
586
- # TODO: Parse SalesItems_0, SalesItems_1, SalesItems_2, SalesItems_3
587
- #------------------------------------------------------------------------------
588
-
589
- sales_items_fields = {
590
- 'global_id': 'string',
591
- 'mme_id': 'string',
592
- 'user_id': 'string',
593
- 'sale_id': 'string',
594
- 'batch_id': 'string',
595
- 'inventory_id': 'string',
596
- 'external_id': 'string',
597
- 'qty': 'float',
598
- 'uom': 'string',
599
- 'unit_price': 'float',
600
- 'price_total': 'float',
601
- 'name': 'string',
602
- }
603
- sales_items_date_fields = [
604
- 'created_at',
605
- 'updated_at',
606
- 'sold_at',
607
- 'use_by_date',
608
- ]
609
-
610
- #------------------------------------------------------------------------------
611
- # Batches fields.
612
- # TODO: Parse Batches_0
613
- #------------------------------------------------------------------------------
614
-
615
- batches_fields = {
616
- 'external_id': 'string',
617
- 'num_plants': 'float',
618
- 'status': 'string',
619
- 'qty_harvest': 'float',
620
- 'uom': 'string',
621
- 'is_parent_batch': 'int',
622
- 'is_child_batch': 'int',
623
- 'type': 'string',
624
- 'harvest_stage': 'string',
625
- 'qty_accumulated_waste': 'float',
626
- 'qty_packaged_flower': 'float',
627
- 'qty_packaged_by_product': 'float',
628
- 'origin': 'string',
629
- 'source': 'string',
630
- 'qty_cure': 'float',
631
- 'plant_stage': 'string',
632
- 'flower_dry_weight': 'float',
633
- 'waste': 'float',
634
- 'other_waste': 'float',
635
- 'flower_waste': 'float',
636
- 'other_dry_weight': 'float',
637
- 'flower_wet_weight': 'float',
638
- 'other_wet_weight': 'float',
639
- 'global_id': 'string',
640
- 'global_area_id': 'string',
641
- 'area_name': 'string',
642
- 'global_mme_id': 'string',
643
- 'mme_name': 'string',
644
- 'mme_code': 'string',
645
- 'global_user_id': 'string',
646
- 'global_strain_id': 'string',
647
- 'strain_name': 'string',
648
- 'global_mother_plant_id': 'string',
649
- 'global_flower_area_id': 'string',
650
- 'global_other_area_id': 'string',
651
- }
652
- batches_date_fields = [
653
- 'created_at',
654
- 'updated_at',
655
- 'planted_at',
656
- 'harvested_at',
657
- 'batch_created_at',
658
- 'deleted_at',
659
- 'est_harvest_at',
660
- 'packaged_completed_at',
661
- 'harvested_end_at',
662
- ]
663
-
664
-
665
- #------------------------------------------------------------------------------
666
- # Taxes fields.
667
- # TODO: Parse Taxes_0
668
- #------------------------------------------------------------------------------
669
-
670
- taxes_fields = {
671
-
672
- }
673
- taxes_date_fields = [
674
-
675
- ]
676
-
677
- #------------------------------------------------------------------------------
678
- # Areas fields.
679
- #------------------------------------------------------------------------------
680
-
681
- areas_fields = {
682
- 'external_id': 'string',
683
- 'name': 'string',
684
- 'type': 'string',
685
- 'is_quarantine_area': 'bool',
686
- 'global_id': 'string',
687
- }
688
- areas_date_fields = [
689
- 'created_at',
690
- 'updated_at',
691
- 'deleted_at',
692
- ]
693
-
694
- #------------------------------------------------------------------------------
695
- # Inventory Transfer Items fields.
696
- # TODO: Parse InventoryTransferItems_0
697
- #------------------------------------------------------------------------------
698
-
699
- inventory_transfer_items_fields = {
700
- 'external_id': 'string',
701
- 'is_sample': 'int',
702
- 'sample_type': 'string',
703
- 'product_sample_type': 'string',
704
- 'description': 'string',
705
- 'qty': 'float',
706
- 'price': 'float',
707
- 'uom': 'string',
708
- 'received_qty': 'float',
709
- 'retest': 'int',
710
- 'global_id': 'string',
711
- 'is_for_extraction': 'int',
712
- 'propagation_source': 'string',
713
- 'inventory_name': 'string',
714
- 'intermediate_type': 'string',
715
- 'strain_name': 'string',
716
- 'global_mme_id': 'string',
717
- 'global_user_id': 'string',
718
- 'global_batch_id': 'string',
719
- 'global_plant_id': 'string',
720
- 'global_inventory_id': 'string',
721
- 'global_lab_result_id': 'string',
722
- 'global_received_area_id': 'string',
723
- 'global_received_strain_id': 'string',
724
- 'global_inventory_transfer_id': 'string',
725
- 'global_received_batch_id': 'string',
726
- 'global_received_inventory_id': 'string',
727
- 'global_received_plant_id': 'string',
728
- 'global_received_mme_id': 'string',
729
- 'global_received_mme_user_id': 'string',
730
- 'global_customer_id': 'string',
731
- 'global_inventory_type_id': 'string',
732
- # Optional: Match with inventory type fields
733
- # "created_at": "09/11/2018 07:39am",
734
- # "updated_at": "09/12/2018 03:55am",
735
- # "external_id": "123425",
736
- # "name": "Charlotte's Web Pre-Packs - 3.5gm",
737
- # "description": "",
738
- # "storage_instructions": "",
739
- # "ingredients": "",
740
- # "type": "end_product",
741
- # "allergens": "",
742
- # "contains": "",
743
- # "used_butane": 0,
744
- # "net_weight": "2",
745
- # "packed_qty": null,
746
- # "cost": "0.00",
747
- # "value": "0.00",
748
- # "serving_num": 1,
749
- # "serving_size": 0,
750
- # "uom": "ea",
751
- # "total_marijuana_in_grams": "0.000000",
752
- # "total_marijuana_in_mcg": null,
753
- # "deleted_at": null,
754
- # "intermediate_type": "usable_marijuana",
755
- # "global_id": "WAG12.TY3DE",
756
- # "global_original_id": null,
757
- # "weight_per_unit_in_grams": "0.00"
758
- # "global_mme_id": "WASTATE1.MM30",
759
- # "global_user_id": "WASTATE1.US1I",
760
- # "global_strain_id": null
761
- }
762
- inventory_transfer_items_date_fields = [
763
- 'created_at',
764
- 'updated_at',
765
- 'received_at',
766
- 'deleted_at',
767
- ]
768
-
769
- #------------------------------------------------------------------------------
770
- # Inventory Transfers fields.
771
- # TODO: Parse InventoryTransfers_0
772
- #------------------------------------------------------------------------------
773
-
774
- inventory_transfers_fields = {
775
- 'number_of_edits': 'int',
776
- 'external_id': 'string',
777
- 'void': 'int',
778
- 'multi_stop': 'int',
779
- 'route': 'string',
780
- 'stops': 'string',
781
- 'vehicle_description': 'string',
782
- 'vehicle_year': 'string',
783
- 'vehicle_color': 'string',
784
- 'vehicle_vin': 'string',
785
- 'vehicle_license_plate': 'string',
786
- 'notes': 'string',
787
- 'transfer_manifest': 'string',
788
- 'manifest_type': 'string',
789
- 'status': 'string',
790
- 'type': 'string',
791
- 'transfer_type': 'string',
792
- 'global_id': 'string',
793
- 'test_for_terpenes': 'int',
794
- 'transporter_name1': 'string',
795
- 'transporter_name2': 'string',
796
- 'global_mme_id': 'string',
797
- 'global_user_id': 'string',
798
- 'global_from_mme_id': 'string',
799
- 'global_to_mme_id': 'string',
800
- 'global_from_user_id': 'string',
801
- 'global_to_user_id': 'string',
802
- 'global_from_customer_id': 'string',
803
- 'global_to_customer_id': 'string',
804
- 'global_transporter_user_id': 'string',
805
- }
806
- inventory_transfers_date_fields = [
807
- 'created_at',
808
- 'updated_at',
809
- 'hold_starts_at',
810
- 'hold_ends_at',
811
- 'transferred_at',
812
- 'est_departed_at',
813
- 'est_arrival_at',
814
- 'deleted_at',
815
- ]
816
-
817
- #------------------------------------------------------------------------------
818
- # Disposals fields.
819
- # Optional: Parse Disposals_0
820
- #------------------------------------------------------------------------------
821
-
822
- disposals_fields = {
823
- 'external_id': 'string',
824
- 'whole_plant': 'string',
825
- 'reason': 'string',
826
- 'method': 'string',
827
- 'phase': 'string',
828
- 'type': 'string',
829
- 'qty': 'float',
830
- 'uom': 'string',
831
- 'source': 'string',
832
- 'disposal_cert': 'string',
833
- 'global_id': 'string',
834
- 'global_mme_id': 'string',
835
- 'global_user_id': 'string',
836
- 'global_batch_id': 'string',
837
- 'global_area_id': 'string',
838
- 'global_plant_id': 'string',
839
- 'global_inventory_id': 'string',
840
- }
841
- disposals_date_fields = [
842
- 'created_at',
843
- 'updated_at',
844
- 'hold_starts_at',
845
- 'hold_ends_at',
846
- 'disposal_at',
847
- 'deleted_at',
848
- ]
849
-
850
- #------------------------------------------------------------------------------
851
- # Inventory Adjustments fields.
852
- # Optional: Parse InventoryAdjustments_0, InventoryAdjustments_1, InventoryAdjustments_2
853
- #------------------------------------------------------------------------------
854
-
855
- inventory_adjustments_fields = {
856
- 'external_id': 'string',
857
- 'qty': 'float',
858
- 'uom': 'string',
859
- 'reason': 'string',
860
- 'memo': 'string',
861
- 'global_id': 'string',
862
- 'global_mme_id': 'string',
863
- 'global_user_id': 'string',
864
- 'global_inventory_id': 'string',
865
- 'global_adjusted_by_user_id': 'string',
866
- }
867
- inventory_adjustments_date_fields = [
868
- 'created_at',
869
- 'updated_at',
870
- 'adjusted_at',
871
- 'deleted_at',
872
- ]
873
-
874
- #------------------------------------------------------------------------------
875
- # Plants fields.
876
- #------------------------------------------------------------------------------
877
-
878
- plants_fields = {
879
- 'global_id': 'string',
880
- 'mme_id': 'string',
881
- 'user_id': 'string',
882
- 'external_id': 'string',
883
- 'inventory_id': 'string',
884
- 'batch_id': 'string',
885
- 'area_id': 'string',
886
- 'mother_plant_id': 'string',
887
- 'is_initial_inventory': 'string',
888
- 'origin': 'string',
889
- 'stage': 'string',
890
- 'strain_id': 'string',
891
- 'is_mother': 'string',
892
- 'last_moved_at': 'string',
893
- 'plant_harvested_end_at': 'string',
894
- 'legacy_id': 'string',
895
- }
896
- plants_date_fields = [
897
- 'created_at',
898
- 'deleted_at',
899
- 'updated_at',
900
- 'plant_created_at',
901
- 'plant_harvested_at',
902
- 'plant_harvested_end_at'
903
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
algorithms/algorithm_utils.py DELETED
@@ -1,240 +0,0 @@
1
- """
2
- Cannabis Tests | Utility Functions
3
- Copyright (c) 2021-2022 Cannlytics
4
-
5
- Authors:
6
- Keegan Skeate <https://github.com/keeganskeate>
7
- Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
- Created: 10/27/2021
9
- Updated: 9/16/2022
10
- License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
- """
12
- # Standard imports.
13
- from datetime import datetime
14
- import re
15
- from typing import Any, List, Optional, Tuple
16
-
17
- # External imports.
18
- import pandas as pd
19
- from pandas import DataFrame, Series, to_datetime
20
- from pandas.tseries.offsets import MonthEnd
21
-
22
-
23
- def end_of_month(value: datetime) -> str:
24
- """Format a datetime as an ISO formatted date at the end of the month.
25
- Args:
26
- value (datetime): A datetime value to transform into an ISO date.
27
- Returns:
28
- (str): An ISO formatted date.
29
- """
30
- month = value.month
31
- if month < 10:
32
- month = f'0{month}'
33
- year = value.year
34
- day = value + MonthEnd(0)
35
- return f'{year}-{month}-{day.day}'
36
-
37
-
38
- def end_of_year(value: datetime) -> str:
39
- """Format a datetime as an ISO formatted date at the end of the year.
40
- Args:
41
- value (datetime): A datetime value to transform into an ISO date.
42
- Returns:
43
- (str): An ISO formatted date.
44
- """
45
- return f'{value.year}-12-31'
46
-
47
-
48
- def end_of_period_timeseries(data: DataFrame, period: Optional[str] = 'M') -> DataFrame:
49
- """Convert a DataFrame from beginning-of-the-period to
50
- end-of-the-period timeseries.
51
- Args:
52
- data (DataFrame): The DataFrame to adjust timestamps.
53
- period (str): The period of the time series, monthly "M" by default.
54
- Returns:
55
- (DataFrame): The adjusted DataFrame, with end-of-the-month timestamps.
56
- """
57
- data.index = data.index.to_period(period).to_timestamp(period)
58
- return data
59
-
60
-
61
- # def forecast_arima(
62
- # model: Any,
63
- # forecast_horizon: Any,
64
- # exogenous: Optional[Any] = None,
65
- # ) -> Tuple[Any]:
66
- # """Format an auto-ARIMA model forecast as a time series.
67
- # Args:
68
- # model (ARIMA): An pmdarima auto-ARIMA model.
69
- # forecast_horizon (DatetimeIndex): A series of dates.
70
- # exogenous (DataFrame): Am optional DataFrame of exogenous variables.
71
- # Returns:
72
- # forecast (Series): The forecast series with forecast horizon index.
73
- # conf (Array): A 2xN array of lower and upper confidence bounds.
74
- # """
75
- # periods = len(forecast_horizon)
76
- # forecast, conf = model.predict(
77
- # n_periods=periods,
78
- # return_conf_int=True,
79
- # X=exogenous,
80
- # )
81
- # forecast = Series(forecast)
82
- # forecast.index = forecast_horizon
83
- # return forecast, conf
84
-
85
-
86
- def format_billions(value: float, pos: Optional[int] = None) -> str: #pylint: disable=unused-argument
87
- """The two args are the value and tick position."""
88
- return '%1.1fB' % (value * 1e-9)
89
-
90
-
91
- def format_millions(value: float, pos: Optional[int] = None) -> str: #pylint: disable=unused-argument
92
- """The two args are the value and tick position."""
93
- return '%1.1fM' % (value * 1e-6)
94
-
95
-
96
- def format_thousands(value: float, pos: Optional[int] = None) -> str: #pylint: disable=unused-argument
97
- """The two args are the value and tick position."""
98
- return '%1.0fK' % (value * 1e-3)
99
-
100
-
101
- def get_blocks(files, size=65536):
102
- """Get a block of a file by the given size."""
103
- while True:
104
- block = files.read(size)
105
- if not block: break
106
- yield block
107
-
108
-
109
- def get_number_of_lines(file_name, encoding='utf-16', errors='ignore'):
110
- """
111
- Read the number of lines in a large file.
112
- Credit: glglgl, SU3 <https://stackoverflow.com/a/9631635/5021266>
113
- License: CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0/>
114
- """
115
- with open(file_name, 'r', encoding=encoding, errors=errors) as f:
116
- count = sum(bl.count('\n') for bl in get_blocks(f))
117
- print('Number of rows:', count)
118
- return count
119
-
120
-
121
- def reverse_dataframe(data: DataFrame) -> DataFrame:
122
- """Reverse the ordering of a DataFrame.
123
- Args:
124
- data (DataFrame): A DataFrame to re-order.
125
- Returns:
126
- (DataFrame): The re-ordered DataFrame.
127
- """
128
- return data[::-1].reset_index(drop=True)
129
-
130
-
131
- def set_training_period(series: Series, date_start: str, date_end: str) -> Series:
132
- """Helper function to restrict a series to the desired
133
- training time period.
134
- Args:
135
- series (Series): The series to clean.
136
- date_start (str): An ISO date to mark the beginning of the training period.
137
- date_end (str): An ISO date to mark the end of the training period.
138
- Returns
139
- (Series): The series restricted to the desired time period.
140
- """
141
- return series.loc[
142
- (series.index >= to_datetime(date_start)) & \
143
- (series.index < to_datetime(date_end))
144
- ]
145
-
146
-
147
- def sorted_nicely(unsorted_list: List[str]) -> List[str]:
148
- """Sort the given iterable in the way that humans expect.
149
- Credit: Mark Byers <https://stackoverflow.com/a/2669120/5021266>
150
- License: CC BY-SA 2.5 <https://creativecommons.org/licenses/by-sa/2.5/>
151
- """
152
- convert = lambda text: int(text) if text.isdigit() else text
153
- alpha = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
154
- return sorted(unsorted_list, key=alpha)
155
-
156
-
157
- def rmerge(left, right, **kwargs):
158
- """Perform a merge using pandas with optional removal of overlapping
159
- column names not associated with the join.
160
-
161
- Though I suspect this does not adhere to the spirit of pandas merge
162
- command, I find it useful because re-executing IPython notebook cells
163
- containing a merge command does not result in the replacement of existing
164
- columns if the name of the resulting DataFrame is the same as one of the
165
- two merged DataFrames, i.e. data = pa.merge(data,new_dataframe). I prefer
166
- this command over pandas df.combine_first() method because it has more
167
- flexible join options.
168
-
169
- The column removal is controlled by the 'replace' flag which is
170
- 'left' (default) or 'right' to remove overlapping columns in either the
171
- left or right DataFrame. If 'replace' is set to None, the default
172
- pandas behavior will be used. All other parameters are the same
173
- as pandas merge command.
174
-
175
- Author: Michelle Gill
176
- Source: https://gist.github.com/mlgill/11334821
177
-
178
- Examples
179
- --------
180
- >>> left >>> right
181
- a b c a c d
182
- 0 1 4 9 0 1 7 13
183
- 1 2 5 10 1 2 8 14
184
- 2 3 6 11 2 3 9 15
185
- 3 4 7 12
186
-
187
- >>> rmerge(left,right,on='a')
188
- a b c d
189
- 0 1 4 7 13
190
- 1 2 5 8 14
191
- 2 3 6 9 15
192
-
193
- >>> rmerge(left,right,on='a',how='left')
194
- a b c d
195
- 0 1 4 7 13
196
- 1 2 5 8 14
197
- 2 3 6 9 15
198
- 3 4 7 NaN NaN
199
-
200
- >>> rmerge(left,right,on='a',how='left',replace='right')
201
- a b c d
202
- 0 1 4 9 13
203
- 1 2 5 10 14
204
- 2 3 6 11 15
205
- 3 4 7 12 NaN
206
-
207
- >>> rmerge(left,right,on='a',how='left',replace=None)
208
- a b c_x c_y d
209
- 0 1 4 9 7 13
210
- 1 2 5 10 8 14
211
- 2 3 6 11 9 15
212
- 3 4 7 12 NaN NaN
213
- """
214
-
215
- # Function to flatten lists from http://rosettacode.org/wiki/Flatten_a_list#Python
216
- def flatten(lst):
217
- return sum(([x] if not isinstance(x, list) else flatten(x) for x in lst), [])
218
-
219
- # Set default for removing overlapping columns in "left" to be true
220
- myargs = {'replace':'left'}
221
- myargs.update(kwargs)
222
-
223
- # Remove the replace key from the argument dict to be sent to
224
- # pandas merge command
225
- kwargs = {k:v for k, v in myargs.items() if k != 'replace'}
226
-
227
- if myargs['replace'] is not None:
228
- # Generate a list of overlapping column names not associated with the join
229
- skipcols = set(flatten([v for k, v in myargs.items() if k in ['on', 'left_on', 'right_on']]))
230
- leftcols = set(left.columns)
231
- rightcols = set(right.columns)
232
- dropcols = list((leftcols & rightcols).difference(skipcols))
233
-
234
- # Remove the overlapping column names from the appropriate DataFrame
235
- if myargs['replace'].lower() == 'left':
236
- left = left.copy().drop(dropcols, axis=1)
237
- elif myargs['replace'].lower() == 'right':
238
- right = right.copy().drop(dropcols, axis=1)
239
-
240
- return pd.merge(left, right, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
algorithms/{get_results_psilabs.py → archived/get_results_mi_psi_labs.py} RENAMED
@@ -18,34 +18,6 @@ Description:
18
 
19
  3. Create and use re-usable prediction models.
20
 
21
- Data Sources:
22
-
23
- - PSI Labs Test Results
24
- URL: <https://results.psilabs.org/test-results/>
25
-
26
- Resources:
27
-
28
- - ChromeDriver
29
- URL: <https://chromedriver.chromium.org/home>
30
-
31
- - Automation Cartoon
32
- URL: https://xkcd.com/1319/
33
-
34
- - Efficiency Cartoon
35
- URL: https://xkcd.com/1445/
36
-
37
- - SHA in Python
38
- URL: https://www.geeksforgeeks.org/sha-in-python/
39
-
40
- - Split / Explode a column of dictionaries into separate columns with pandas
41
- URL: https://stackoverflow.com/questions/38231591/split-explode-a-column-of-dictionaries-into-separate-columns-with-pandas
42
-
43
- - Tidyverse: Wide and Long Data Tables
44
- URL: https://rstudio-education.github.io/tidyverse-cookbook/tidy.html
45
-
46
- - Web Scraping using Selenium and Python
47
- URL: <https://www.scrapingbee.com/blog/selenium-python/>
48
-
49
  Setup:
50
 
51
  1. Create a data folder `../../.datasets/lab_results/psi_labs/raw_data`.
@@ -55,6 +27,15 @@ Setup:
55
 
56
  3. Specify the `PAGES` that you want to collect.
57
 
 
 
 
 
 
 
 
 
 
58
  """
59
  # Standard imports.
60
  from ast import literal_eval
 
18
 
19
  3. Create and use re-usable prediction models.
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  Setup:
22
 
23
  1. Create a data folder `../../.datasets/lab_results/psi_labs/raw_data`.
 
27
 
28
  3. Specify the `PAGES` that you want to collect.
29
 
30
+ Note:
31
+
32
+ It does not appear that new lab results are being added to the
33
+ PSI Labs test results website as of 2022-01-01.
34
+
35
+ Data Sources:
36
+
37
+ - [PSI Labs Test Results](https://results.psilabs.org/test-results/)
38
+
39
  """
40
  # Standard imports.
41
  from ast import literal_eval
algorithms/get_results_ak.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Cannabis Results | Alaska
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 7/10/2024
8
+ Updated: 7/10/2024
9
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
10
+
11
+ Data Source:
12
+
13
+ - Public records request
14
+
15
+ """
16
+ # Standard imports:
17
+ from collections import defaultdict
18
+ import glob
19
+ import os
20
+ import json
21
+
22
+ # External imports:
23
+ from cannlytics.data.coas.coas import CoADoc
24
+ from cannlytics.utils.utils import snake_case
25
+ import pandas as pd
26
+
27
+ def process_file(parser, file_path, sample_id='PackageId'):
28
+ """Process each file and transform the data."""
29
+ chunks = pd.read_csv(file_path, chunksize=100000, low_memory=False)
30
+ samples = {}
31
+ for chunk in chunks:
32
+ for _, row in chunk.iterrows():
33
+ sample_id_value = row[sample_id]
34
+ if sample_id_value not in samples:
35
+ sample = {sample_columns[key]: row[key] for key in sample_columns if key in row}
36
+ sample['results'] = []
37
+ samples[sample_id_value] = sample
38
+ result = {result_columns[key]: row[key] for key in result_columns if key in row}
39
+ name = result['name']
40
+ result['key'] = parser.analytes.get(snake_case(name), snake_case(name))
41
+ samples[sample_id_value]['results'].append(result)
42
+ return samples
43
+
44
+ def aggregate_and_save_to_csv(json_dir, output_csv):
45
+ """Aggregate and save JSON files to CSV."""
46
+ json_files = glob.glob(os.path.join(json_dir, '*.json'))
47
+ all_samples = []
48
+ for json_file in json_files:
49
+ with open(json_file, 'r') as f:
50
+ samples = json.load(f)
51
+ all_samples.extend(samples.values())
52
+ df = pd.DataFrame(all_samples)
53
+ df.to_csv(output_csv, index=False)
54
+ return df
55
+
56
+ # === Test ===
57
+ # [✓] Tested: 2024-07-10 by Keegan Skeate <keegan@cannlytics>
58
+ if __name__ == '__main__':
59
+
60
+ print('Curating AK results')
61
+ data_dir = r'D:\data\public-records\Alaska\AK Lab Result Data 2016-2024\AK Lab Result Data 2016-2024'
62
+ output_dir = r'D:\data\alaska\results\datasets'
63
+ if not os.path.exists(output_dir):
64
+ os.makedirs(output_dir)
65
+
66
+ # Walk the data directory and find all `.csv`'s with TestResult.
67
+ test_datafiles = []
68
+ for root, dirs, files in os.walk(data_dir):
69
+ for file in files:
70
+ if 'TestResult' in file:
71
+ test_datafile = os.path.join(root, file)
72
+ test_datafiles.append(test_datafile)
73
+
74
+ # Get package datafiles.
75
+ package_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'Package' in x and '.csv' in x]
76
+
77
+ # Each sample has the following fields:
78
+ sample_columns = {
79
+ 'PackageId': 'package_id',
80
+ 'PackageLabel': 'package_label',
81
+ 'LabTestResultId': 'sample_id',
82
+ 'TestingFacilityId': 'lab_id',
83
+ 'LabFacilityLicenseNumber': 'lab_license_number',
84
+ 'LabFacilityName': 'lab',
85
+ 'SourcePackageId': 'source_package_id',
86
+ 'SourcePackageLabel': 'source_package_label',
87
+ 'ProductName': 'product_name',
88
+ 'ProductCategoryName': 'product_type',
89
+ 'TestPerformedDate': 'date_tested',
90
+ 'OverallPassed': 'status',
91
+ 'IsRevoked': 'revoked',
92
+ # 'RevokedDate': 'date_revoked',
93
+ }
94
+ package_columns = {
95
+ 'Id': 'id',
96
+ 'FacilityId': 'facility_id',
97
+ 'TagId': 'tag_id',
98
+ 'Label': 'label',
99
+ 'SourceHarvestNames': 'source_harvest_names',
100
+ 'SourcePackageLabels': 'source_package_labels',
101
+ 'SourceProcessingJobNumbers': 'source_processing_job_numbers',
102
+ 'SourceProcessingJobNames': 'source_processing_job_names',
103
+ 'MultiHarvest': 'multi_harvest',
104
+ 'MultiPackage': 'multi_package',
105
+ 'MultiProcessingJob': 'multi_processing_job',
106
+ 'Quantity': 'quantity',
107
+ 'UnitOfMeasureName': 'unit_of_measure_name',
108
+ 'UnitOfMeasureAbbreviation': 'unit_of_measure_abbreviation',
109
+ 'UnitOfMeasureQuantityType': 'unit_of_measure_quantity_type',
110
+ 'ItemFromFacilityId': 'item_from_facility_id',
111
+ 'ItemFromFacilityLicenseNumber': 'item_from_facility_license_number',
112
+ 'ItemFromFacilityName': 'item_from_facility_name',
113
+ 'ItemFromFacilityType': 'item_from_facility_type',
114
+ 'ItemFromFacilityIsActive': 'item_from_facility_is_active',
115
+ 'PackagedDate': 'packaged_date',
116
+ 'PackagedByFacilityId': 'packaged_by_facility_id',
117
+ 'PackagedByFacilityLicenseNumber': 'packaged_by_facility_license_number',
118
+ 'PackagedByFacilityName': 'packaged_by_facility_name',
119
+ 'PackagedByFacilityType': 'packaged_by_facility_type',
120
+ 'PackagedByFacilityIsActive': 'packaged_by_facility_is_active',
121
+ 'LabTestingStateName': 'lab_testing_state_name',
122
+ 'LabTestingStateDate': 'lab_testing_state_date',
123
+ 'IsProductionBatch': 'is_production_batch',
124
+ 'IsTradeSample': 'is_trade_sample',
125
+ 'IsProcessValidationTestingSample': 'is_process_validation_testing_sample',
126
+ 'IsProficiencyTestingSample': 'is_proficiency_testing_sample',
127
+ 'ProductRequiresRemediation': 'product_requires_remediation',
128
+ 'ContainsRemediatedProduct': 'contains_remediated_product',
129
+ 'ReceivedFromManifestNumber': 'received_from_manifest_number',
130
+ 'ReceivedFromFacilityId': 'received_from_facility_id',
131
+ 'ReceivedFromFacilityLicenseNumber': 'received_from_facility_license_number',
132
+ 'ReceivedFromFacilityName': 'received_from_facility_name',
133
+ 'ReceivedFromFacilityType': 'received_from_facility_type',
134
+ 'ReceivedFromFacilityActive': 'received_from_facility_active',
135
+ 'ReceivedDateTime': 'received_date_time',
136
+ 'IsArchived': 'is_archived',
137
+ 'IsFinished': 'is_finished',
138
+ 'FinishedDate': 'finished_date',
139
+ 'LabTestResultId': 'sample_id',
140
+ 'TestingFacilityId': 'lab_id',
141
+ 'TestingFacilityName': 'lab',
142
+ 'TestingFacilityLicenseNumber': 'lab_license_number',
143
+ 'TestingFacilityType': 'lab_facility_type',
144
+ 'TestingFacilityIsActive': 'lab_facility_is_active',
145
+ 'OverallPassed': 'status',
146
+ 'TestPerformedDate': 'date_tested',
147
+ 'ProductId': 'product_id',
148
+ 'ProductName': 'product_name',
149
+ 'ProductCategoryName': 'product_category_name',
150
+ 'ProductCategoryType': 'product_category_type',
151
+ 'ProductCategoryTypeName': 'product_category_type_name',
152
+ 'QuantityType': 'quantity_type',
153
+ 'QuantityTypeName': 'quantity_type_name',
154
+ 'ItemUnitOfMeasureName': 'item_unit_of_measure_name',
155
+ 'ItemUnitOfMeasureAbbreviation': 'item_unit_of_measure_abbreviation',
156
+ 'UnitQuantity': 'unit_quantity',
157
+ 'UnitQuantityUnitOfMeasureName': 'unit_quantity_unit_of_measure_name',
158
+ 'StrainId': 'strain_id',
159
+ 'StrainName': 'strain_name',
160
+ }
161
+
162
+ result_columns = {
163
+ # 'LabTestResultDocumentFileId': 'coa_id',
164
+ # 'ResultReleased': 'released',
165
+ # 'ResultReleaseDateTime': 'date_released',
166
+ # 'LabTestDetailId': 'result_id',
167
+ # 'LabTestTypeId': 'test_id',
168
+ 'TestTypeName': 'name',
169
+ 'TestPassed': 'status',
170
+ 'TestResultLevel': 'value',
171
+ # 'TestComment': 'comment',
172
+ # 'TestInformationalOnly': 'r_and_d',
173
+ # 'LabTestDetailIsRevoked': 'result_revoked',
174
+ # 'LabTestDetailRevokedDate': 'date_result_revoked'
175
+ }
176
+
177
+ # Process each file and periodically save the results by year
178
+ parser = CoADoc()
179
+ all_samples_by_year = defaultdict(dict)
180
+ file_counter = 0
181
+ for file in test_datafiles:
182
+ print(f'Processing file: {file}')
183
+ samples = process_file(parser, file)
184
+
185
+ # FIXME: JSON encode all results.
186
+
187
+
188
+ # Merge samples into the all_samples_by_year dictionary
189
+ for sample_id, sample in samples.items():
190
+ year = sample['date_tested'][:4]
191
+ if sample_id in all_samples_by_year[year]:
192
+ all_samples_by_year[year][sample_id]['results'].extend(sample['results'])
193
+ else:
194
+ all_samples_by_year[year][sample_id] = sample
195
+
196
+ # Periodically save the results to avoid memory issues
197
+ file_counter += 1
198
+ if file_counter % 5 == 0:
199
+ for year, samples in all_samples_by_year.items():
200
+ output_file = os.path.join(output_dir, f'ak-lab-results-{year}-{file_counter}.json')
201
+ with open(output_file, 'w') as f:
202
+ json.dump(samples, f, indent=4)
203
+ all_samples_by_year.clear()
204
+
205
+ # Save any remaining samples
206
+ if all_samples_by_year:
207
+ for year, samples in all_samples_by_year.items():
208
+ output_file = os.path.join(output_dir, f'ak-lab-results-{year}-final.json')
209
+ with open(output_file, 'w') as f:
210
+ json.dump(samples, f, indent=4)
211
+
212
+ # TODO: Combine JSON by year.
213
+
214
+ # FIXME: Aggregate all samples and save them to a .csv
215
+ # output_dir = r'D:\data\alaska\results\datasets'
216
+ # output_csv = r'D:\data\alaska\results\datasets\ak-lab-results-latest.csv'
217
+ # results = aggregate_and_save_to_csv(output_dir, output_csv)
218
+
219
+ # TODO: Augment package data?
220
+
221
+ # # TODO: Augment license data.
222
+ # datafile = os.path.join(data_dir, 'AK Facility.csv')
223
+ # license_data = pd.read_csv(datafile)
algorithms/get_results_ca_flower_co.py ADDED
@@ -0,0 +1,585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get California Cannabis Lab Results | Flower Company
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
+ Created: 12/8/2023
9
+ Updated: 5/21/2024
10
+ License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
11
+
12
+ Description:
13
+
14
+ Archive cannabis lab result data published by the Flower Company.
15
+
16
+ Data Source:
17
+
18
+ * [Flower Company](https://flowercompany.com/)
19
+
20
+ Data points:
21
+
22
+ ✓ product_id (generated)
23
+ ✓ producer
24
+ ✓ product_name
25
+ ✓ product_url
26
+ ✓ total_thc
27
+ ✓ total_thc_units
28
+ ✓ total_cbd
29
+ ✓ total_cbd_units
30
+ ✓ price
31
+ ✓ discount_price
32
+ ✓ amount
33
+ ✓ classification
34
+ ✓ indica_percentage
35
+ ✓ sativa_percentage
36
+ ✓ image_url
37
+ ✓ product_type
38
+ ✓ product_subtype
39
+ ✓ product_description
40
+ ✓ predicted_effects
41
+ ✓ predicted_aromas
42
+ ✓ lineage
43
+ ✓ distributor
44
+ ✓ distributor_license_number
45
+ ✓ lab_results_url
46
+ ✓ results (augmented)
47
+
48
+ """
49
+ # Standard imports:
50
+ from datetime import datetime
51
+ import os
52
+ from time import sleep
53
+
54
+ # External imports:
55
+ from cannlytics.data import create_sample_id
56
+ from cannlytics.data.cache import Bogart
57
+ from cannlytics.data.coas.coas import CoADoc
58
+ from cannlytics.data.web import initialize_selenium
59
+ import pandas as pd
60
+ import requests
61
+
62
+ # Selenium imports.
63
+ from selenium.webdriver.common.by import By
64
+ from selenium.webdriver.support.ui import Select
65
+
66
+
67
+ # Define the base URL.
68
+ base_url = 'https://flowercompany.com/'
69
+
70
+ # Define the categories.
71
+ brand_pages = []
72
+ category_pages = [
73
+ 'category/fire-flower',
74
+ 'category/cartridges',
75
+ 'category/concentrates',
76
+ 'category/edibles',
77
+ 'category/prerolls',
78
+ 'category/top-shelf-nugs',
79
+ 'category/just-weed',
80
+ 'category/wellness',
81
+ 'category/the-freshest',
82
+ 'category/staff-picks',
83
+ 'category/latest-drops',
84
+ ]
85
+
86
+ # Define the indica/sativa types.
87
+ indica_percentages = {
88
+ 'Indica': 1,
89
+ 'I-Hybrid': 0.75,
90
+ 'Hybrid': 0.5,
91
+ 'S-Hybrid': 0.25,
92
+ 'Sativa': 0,
93
+ }
94
+
95
+
96
+ def click_yes_button(driver):
97
+ """Click the "Yes" button."""
98
+ try:
99
+ yes_button = driver.find_element(By.CLASS_NAME, 'age-gate-yes-button')
100
+ yes_button.click()
101
+ sleep(2)
102
+ except:
103
+ pass
104
+
105
+
106
+ def click_show_more_button(driver):
107
+ """Click "Show More" until the button is not found."""
108
+ while True:
109
+ try:
110
+ # Find the "Show More" button and click it
111
+ more_button = driver.find_element(By.CLASS_NAME, 'show-more-button')
112
+ more_button.click()
113
+ sleep(3)
114
+ except:
115
+ break
116
+
117
+
118
+ def save_product_data(
119
+ items: list[dict],
120
+ data_dir: str,
121
+ namespace: str = 'results'
122
+ ):
123
+ """Save the product data to a CSV file."""
124
+ if not os.path.exists(data_dir):
125
+ os.makedirs(data_dir)
126
+ timestamp = datetime.now().strftime('%Y-%m-%d')
127
+ datafile = os.path.join(data_dir, f'{namespace}-{timestamp}.csv')
128
+ df = pd.DataFrame(items)
129
+ df.to_csv(datafile, index=False)
130
+ return datafile
131
+
132
+
133
+ def download_coa_pdfs(
134
+ items,
135
+ pdf_dir,
136
+ cache=None,
137
+ url_key='lab_results_url',
138
+ id_key='product_id',
139
+ verbose=True,
140
+ pause=10.0
141
+ ):
142
+ """Download all of the COA PDFs."""
143
+ if not cache: cache = Bogart()
144
+ for obs in items:
145
+ url = obs[url_key]
146
+ if not url:
147
+ continue
148
+ url_hash = cache.hash_url(url)
149
+ if cache.get(url_hash):
150
+ if verbose:
151
+ print(f'Skipped (cached): {url}')
152
+ continue
153
+ response = requests.get(url)
154
+ filename = os.path.join(pdf_dir, obs[id_key] + '.pdf')
155
+ with open(filename, 'wb') as pdf_file:
156
+ pdf_file.write(response.content)
157
+ if verbose:
158
+ print(f'Downloaded PDF: {filename}')
159
+ cache.set(url_hash, {'type': 'download', 'url': url, 'file': filename})
160
+ sleep(pause)
161
+
162
+
163
+ def parse_coa_pdfs(
164
+ parser,
165
+ data,
166
+ pdf_dir,
167
+ cache=None,
168
+ id_key='product_id',
169
+ verbose=True,
170
+ ):
171
+ """Parse corresponding COAs from a DataFrame in a PDF directory."""
172
+ all_results = []
173
+ if not cache: cache = Bogart()
174
+ for _, row in data.iterrows():
175
+ coa_pdf = row[id_key] + '.pdf'
176
+ pdf_file_path = os.path.join(pdf_dir, coa_pdf)
177
+ if not os.path.exists(pdf_file_path):
178
+ continue
179
+ pdf_hash = cache.hash_file(pdf_file_path)
180
+ if cache.get(pdf_hash):
181
+ if verbose:
182
+ print(f'Skipped (cached parse): {pdf_file_path}')
183
+ all_results.append(cache.get(pdf_hash))
184
+ continue
185
+ try:
186
+ coa_data = parser.parse(pdf_file_path)
187
+ entry = {**row.to_dict(), **coa_data[0]}
188
+ entry['coa_pdf'] = coa_pdf
189
+ all_results.append(entry)
190
+ cache.set(pdf_hash, entry)
191
+ if verbose:
192
+ print(f'Parsed COA: {pdf_file_path}')
193
+ except Exception as e:
194
+ if verbose:
195
+ print(f'Failed to parse COA: {pdf_file_path}', str(e))
196
+ continue
197
+ return pd.DataFrame(all_results)
198
+
199
+
200
+ def extract_weight(amount_str: str):
201
+ """Extracts the numerical weight in grams from the amount string."""
202
+ if amount_str:
203
+ parts = amount_str.split('(')
204
+ if len(parts) > 1:
205
+ weight = parts[1].split('g')[0].strip()
206
+ return float(weight)
207
+ return None
208
+
209
+
210
+ def price_to_float(price_str: str):
211
+ """Converts a price string to a float."""
212
+ return float(price_str.replace('$', ''))
213
+
214
+
215
+ def get_products_flower_co(
216
+ data_dir: str,
217
+ cache = None,
218
+ verbose: bool = True,
219
+ headless: bool = True,
220
+ pause_between_page: float = 30.0,
221
+ ):
222
+ """Get products from Flower Company."""
223
+
224
+ # Initialize the driver.
225
+ driver = initialize_selenium(headless=headless)
226
+
227
+ # Get all of the brand pages.
228
+ driver.get(base_url + 'menu')
229
+ try:
230
+ yes_button = driver.find_element(By.CLASS_NAME, 'age-gate-yes-button')
231
+ yes_button.click()
232
+ sleep(2)
233
+ except Exception as e:
234
+ pass
235
+ div = driver.find_element(By.CLASS_NAME, 'special-content-brand-row')
236
+ links = div.find_elements(by=By.TAG_NAME, value='a')
237
+ for link in links:
238
+ brand_pages.append(link.get_attribute('href').replace(base_url, ''))
239
+
240
+ # Open each brand/category page.
241
+ products, recorded = [], set(cache.get('product_urls') or [])
242
+ for page in category_pages + brand_pages:
243
+
244
+ # Get the brand/category page.
245
+ driver.get(base_url + page)
246
+
247
+ # Click "Yes" button.
248
+ click_yes_button(driver)
249
+
250
+ # Click "Show More" until the button is not found.
251
+ click_show_more_button(driver)
252
+
253
+ # Get all of the cards.
254
+ sleep(pause_between_page)
255
+ cards = driver.find_elements(by=By.CLASS_NAME, value='product-card-wrapper')
256
+ if verbose:
257
+ print(f'Found {len(cards)} products for page: {page}')
258
+
259
+ # Get the data from each card.
260
+ for card in cards:
261
+
262
+ # Find the product details.
263
+ producer = card.find_element(By.CSS_SELECTOR, '.favorite-company a').text.strip()
264
+ product_name = card.find_element(By.CSS_SELECTOR, '.favorite-product-name a').text.strip()
265
+ product_url = card.find_element(By.CSS_SELECTOR, '.favorite-product-name a').get_attribute('href')
266
+
267
+ # Skip the product if it's already recorded.
268
+ if product_url in recorded:
269
+ continue
270
+ recorded.add(product_url)
271
+
272
+ # Get the total THC.
273
+ # Optional: Get other totals.
274
+ try:
275
+ total_thc = card.find_element(By.CSS_SELECTOR, '.product-card-thc').text.strip()
276
+ except:
277
+ total_thc = ''
278
+
279
+ # Find the price and discount.
280
+ discount = 0
281
+ discount_price = card.find_element(By.CSS_SELECTOR, '.price.product-card-price-actual').text.strip()
282
+ price = card.find_element(By.CSS_SELECTOR, '.price.retail.product-card-price-retail').text.strip()
283
+
284
+ # Find the amount.
285
+ try:
286
+ amount = card.find_element(By.CSS_SELECTOR, '.solo-variant-toggle').text.strip()
287
+ except:
288
+ select_element = card.find_element(By.CSS_SELECTOR, 'select.new-product-card-variant-select')
289
+ select_object = Select(select_element)
290
+ amount_options = [option.text.strip() for option in select_object.options]
291
+ amount = amount_options[0] if amount_options else None
292
+
293
+ # Find the strain type.
294
+ classification = card.text.split('\n')[0]
295
+ indica_percentage = indica_percentages.get(classification, 0.5)
296
+ sativa_percentage = 1 - indica_percentage
297
+
298
+ # Clean the data.
299
+ try:
300
+ total_thc_units = 'percent' if '%' in total_thc else 'mg'
301
+ total_thc = float(total_thc.lower().replace('% thc', '').replace('mg thc', '').strip())
302
+ price = price_to_float(price)
303
+ discount_price = price_to_float(discount_price)
304
+ discount = price - discount_price
305
+ except:
306
+ pass
307
+
308
+ # Add the product to the list.
309
+ products.append({
310
+ 'product_name': product_name,
311
+ 'category': page.split('/')[-1],
312
+ 'producer': producer,
313
+ 'total_thc': total_thc,
314
+ 'total_thc_units': total_thc_units,
315
+ 'price': price,
316
+ 'discount_price': discount_price,
317
+ 'discount': discount,
318
+ 'amount': extract_weight(amount),
319
+ 'classification': classification,
320
+ 'indica_percentage': indica_percentage,
321
+ 'sativa_percentage': sativa_percentage,
322
+ 'product_url': product_url,
323
+ })
324
+
325
+ # Cache the product URLs.
326
+ cache.set('product_urls', list(recorded))
327
+
328
+ # Open file of all saved product URLs.
329
+ products_datafile = os.path.join(data_dir, f'ca-all-products-flower-company.csv')
330
+ if os.path.exists(products_datafile):
331
+ existing_products = pd.read_csv(products_datafile)
332
+ if verbose:
333
+ print('Number of existing products:', len(existing_products))
334
+ new_products = pd.DataFrame(products)
335
+ new_products['total_thc'] = pd.to_numeric(new_products['total_thc'], errors='coerce')
336
+ new_products['total_thc'].fillna(0, inplace=True)
337
+ existing_products['total_thc'] = pd.to_numeric(existing_products['total_thc'], errors='coerce')
338
+ existing_combo = existing_products[['product_url', 'total_thc']]
339
+ merged_df = pd.merge(new_products, existing_combo, on=['product_url', 'total_thc'], how='left', indicator=True)
340
+ unrecorded_products = merged_df[merged_df['_merge'] == 'left_only']
341
+ unrecorded_products.drop(columns=['_merge'], inplace=True)
342
+ else:
343
+ unrecorded_products = pd.DataFrame(products)
344
+
345
+ # Get each product URL page to get each product's data and results.
346
+ data = []
347
+ if verbose:
348
+ print('Number of unrecorded products:', len(unrecorded_products))
349
+ for _, product in unrecorded_products.iterrows():
350
+ if verbose:
351
+ print(f'Getting data for: {product["product_url"]}')
352
+ driver.get(product['product_url'])
353
+ sleep(pause_between_page)
354
+
355
+ # Click "Yes" button.
356
+ click_yes_button(driver)
357
+
358
+ # Get data for each product:
359
+ types = driver.find_elements(By.CSS_SELECTOR, '.detail-product-type')
360
+ if types:
361
+ product_type = types[0].text.strip()
362
+ if len(types) >= 2:
363
+ product_subtype = types[1].text.strip()
364
+ else:
365
+ product_subtype = None
366
+
367
+ # Get the product description.
368
+ try:
369
+ product_description = driver.find_element(By.CSS_SELECTOR, '.product-view-description').text.strip()
370
+ except:
371
+ product_description = None
372
+
373
+ # Skip accessories.
374
+ if product_type == 'Accessory':
375
+ continue
376
+
377
+ # Get the effects, aromas, lineage, and lab results URL.
378
+ info_rows = driver.find_elements(By.CSS_SELECTOR, '.row.product-view-row')
379
+ contents, effects, aromas, lineage, lab_results_url = '', '', '', '', ''
380
+ for row in info_rows:
381
+ parts = row.text.split('\n')
382
+ field = parts[0].lower()
383
+ if 'contents' in field:
384
+ contents = parts[-1]
385
+ elif 'effects' in field:
386
+ effects = parts[-1]
387
+ elif 'aromas' in field:
388
+ aromas = parts[-1]
389
+ elif 'lineage' in field:
390
+ lineage = parts[-1]
391
+ elif 'tested' in field:
392
+ try:
393
+ el = row.find_element(By.TAG_NAME, 'a')
394
+ lab_results_url = el.get_attribute('href')
395
+ except:
396
+ pass
397
+
398
+ # Get the distributor.
399
+ els = driver.find_elements(By.CSS_SELECTOR, '.row.d-block .detail-sub-text')
400
+ distributor = els[-2].text.strip() if len(els) > 1 else None
401
+ distributor_license_number = els[-1].text.strip() if len(els) > 1 else None
402
+
403
+ # Get the image URL.
404
+ image_url = driver.find_element(By.CSS_SELECTOR, '.product-image-lg').get_attribute('src')
405
+
406
+ # Get product name and producer, if missing.
407
+ if not product['product_name']:
408
+ product['product_name'] = driver.find_element(By.CSS_SELECTOR, '.product-view-name').text
409
+ product['producer'] = driver.find_element(By.CSS_SELECTOR, '.product-view-brand').text
410
+
411
+ # Get prices and amounts, if missing.
412
+ if not product['price']:
413
+ price_element = driver.find_element(By.ID, 'variant-price-retail')
414
+ driver.execute_script("arguments[0].scrollIntoView(true);", price_element)
415
+ sleep(0.33)
416
+ price = price_element.text
417
+ discount_price = driver.find_element(By.ID, 'variant-price').text
418
+ amount = driver.find_element(By.CSS_SELECTOR, '.variant-toggle').text
419
+ product['amount'] = extract_weight(amount)
420
+ product['price'] = price_to_float(price)
421
+ product['discount_price'] = price_to_float(discount_price)
422
+ product['discount'] = product['price'] - product['discount_price']
423
+
424
+ # Get compounds, if missing.
425
+ if not product.get('total_thc'):
426
+ try:
427
+ total_thc = driver.find_element(By.CSS_SELECTOR, '.product-card-thc').text
428
+ product['total_thc'] = float(total_thc.lower().replace('% thc', '').replace('mg thc', '').strip())
429
+ product['total_thc_units'] = 'percent' if '%' in total_thc else 'mg'
430
+ except:
431
+ pass
432
+ if not product.get('total_cbd'):
433
+ try:
434
+ total_cbd = driver.find_element(By.CSS_SELECTOR, '.product-card-cbd').text
435
+ product['total_cbd'] = float(total_cbd.lower().replace('% cbd', '').replace('mg cbd', '').strip())
436
+ product['total_cbd_units'] = 'percent' if '%' in total_cbd else 'mg'
437
+ except:
438
+ product['total_cbd'] = None
439
+
440
+ # Get classification, if missing.
441
+ if not product['classification']:
442
+ el = driver.find_element(By.CSS_SELECTOR, '.product-detail-type-container')
443
+ product['classification'] = el.text.split('\n')[0]
444
+ product['indica_percentage'] = indica_percentages.get(product['classification'], 0.5)
445
+ product['sativa_percentage'] = 1 - indica_percentage
446
+
447
+ # Create a product ID.
448
+ product_id = create_sample_id(
449
+ private_key=str(product['total_thc']),
450
+ public_key=product['product_name'],
451
+ salt=product['producer'],
452
+ )
453
+
454
+ # Record the product item details.
455
+ item = {
456
+ 'product_id': product_id,
457
+ 'lab_results_url': lab_results_url,
458
+ 'image_url': image_url,
459
+ 'product_type': product_type,
460
+ # Note: `product_subtype` may be getting over-ridden.
461
+ # Deprecate `product_sub_type` once confirmed.
462
+ 'product_subtype': product_subtype,
463
+ 'product_sub_type': product_subtype,
464
+ 'product_description': product_description,
465
+ 'product_contents': contents,
466
+ 'predicted_effects': effects,
467
+ 'predicted_aromas': aromas.split(', '),
468
+ 'lineage': lineage,
469
+ 'distributor': distributor,
470
+ 'distributor_license_number': distributor_license_number,
471
+ }
472
+ data.append({**product, **item})
473
+
474
+ # Close the browser.
475
+ driver.close()
476
+ driver.quit()
477
+
478
+ # Return the data.
479
+ return data
480
+
481
+
482
+ def get_results_ca_flower_co(
483
+ pdf_dir,
484
+ data_dir,
485
+ cache_path=None,
486
+ verbose=True,
487
+ namespace = 'ca-products-flower-company',
488
+ ):
489
+ """Get California cannabis lab results from the Flower Company."""
490
+ if not os.path.exists(pdf_dir): os.makedirs(pdf_dir)
491
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
492
+ cache = Bogart(cache_path)
493
+ data = get_products_flower_co(data_dir, cache=cache, verbose=verbose)
494
+ datafile = save_product_data(data, data_dir, namespace=namespace)
495
+ cache.set(cache.hash_file(datafile), {'type': 'datafile', 'file': datafile})
496
+ if verbose: print(f'Saved {len(data)} products to: {datafile}')
497
+ download_coa_pdfs(data, pdf_dir=pdf_dir, cache=cache, verbose=verbose)
498
+ return data
499
+
500
+
501
+ # TODO: Turn the following into standalone functions.
502
+
503
+ def parse_coas_ca_flower_co():
504
+ """Parse COAs from the Flower Company."""
505
+ pass
506
+
507
+ # # Aggregate product URLs that have been recorded.
508
+ # existing_products = []
509
+ # url_files = [x for x in os.listdir(data_dir) if 'products' in x and 'all' not in x]
510
+ # for url_file in url_files:
511
+ # product_df = pd.read_csv(os.path.join(data_dir, url_file))
512
+ # existing_products.append(product_df)
513
+ # existing_products = pd.concat(existing_products)
514
+ # existing_products.drop_duplicates(subset=['product_url', 'total_thc'], inplace=True)
515
+ # print('Final number of products:', len(existing_products))
516
+ # products_datafile = os.path.join(data_dir, f'ca-all-products-flower-company.csv')
517
+ # existing_products.to_csv(products_datafile, index=False)
518
+
519
+ # # Read the download product items.
520
+ # product_data = pd.read_csv(datafile)
521
+
522
+ # # Parse any un-parsed COAs.
523
+ # # FIXME: For some reason this is causing a memory leak.
524
+ # TODO: Ensure the PDF can be matched to the data.
525
+ # parser = CoADoc()
526
+ # results = parse_coa_pdfs(
527
+ # parser=parser,
528
+ # data=product_data,
529
+ # cache=cache,
530
+ # pdf_dir=pdf_dir,
531
+ # verbose=verbose,
532
+ # )
533
+
534
+ # # Save the parsed COA data to a file.
535
+ # # TODO: Keep track of the datafile in the cache.
536
+ # namespace = 'ca-results-flower-company'
537
+ # timestamp = datetime.now().strftime('%Y-%m-%d')
538
+ # results_datafile = os.path.join(data_dir, f'{namespace}-{timestamp}.xlsx')
539
+ # parser.save(results, results_datafile)
540
+ # print(f'Saved {len(results)} parsed COAs to: {results_datafile}')
541
+
542
+ # Save all lab results.
543
+ # all_results = []
544
+ # results_files = [x for x in os.listdir(data_dir) if 'results' in x and 'all' not in x]
545
+ # for results_file in results_files:
546
+ # results_df = pd.read_excel(os.path.join(data_dir, results_file))
547
+ # all_results.append(results_df)
548
+ # all_results = pd.concat(all_results)
549
+ # all_results.drop_duplicates(subset=['sample_id', 'results_hash'], inplace=True)
550
+ # # all_results = all_results.loc[all_results['results'] != '[]']
551
+ # print('Final number of results:', len(all_results))
552
+ # all_results_datafile = os.path.join(data_dir, f'ca-all-results-flower-company.xlsx')
553
+ # all_results.to_excel(all_results_datafile, index=False)
554
+ # print(f'Saved {len(all_results)} results to: {all_results_datafile}')
555
+
556
+
557
+ def archive_results_ca_flower_co():
558
+ """Archive the results from the Flower Company."""
559
+ pass
560
+
561
+ # # FIXME: Upload data to Firestore.
562
+
563
+
564
+ # # FIXME: Upload files to Google Cloud Storage.
565
+
566
+
567
+
568
+ # # FIXME: Upload datafiles to Google Cloud Storage.
569
+
570
+
571
+ # === Test ===
572
+ # [✓] Tested: 2024-05-21 by Keegan Skeate <keegan@cannlytics>
573
+ if __name__ == '__main__':
574
+
575
+ # Get results.
576
+ all_results = get_results_ca_flower_co(
577
+ pdf_dir='D:/data/california/results/pdfs/flower-company',
578
+ data_dir='D:/data/california/results/datasets/flower-company',
579
+ cache_path='D://data/.cache/results-ca-flower-co.jsonl',
580
+ verbose=True,
581
+ )
582
+
583
+ # Parse COAs.
584
+
585
+ # Archive results.
algorithms/get_results_ca_glass_house.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get California cannabis lab results
3
+ Copyright (c) 2023 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
+ Created: 5/25/2023
9
+ Updated: 5/30/2023
10
+ License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
11
+
12
+ Description:
13
+
14
+ Archive California cannabis lab result data.
15
+
16
+ Data Sources:
17
+
18
+ - [Glass House Farms Strains](https://glasshousefarms.org/strains/)
19
+
20
+ """
21
+ # Standard imports:
22
+ from datetime import datetime
23
+ import os
24
+ from time import sleep
25
+
26
+ # External imports:
27
+ from cannlytics.data.coas.coas import CoADoc
28
+ from bs4 import BeautifulSoup
29
+ from cannlytics.utils.constants import DEFAULT_HEADERS
30
+ import pandas as pd
31
+ import requests
32
+
33
+
34
+ # Glass House Farms constants.
35
+ GLASS_HOUSE_FARMS = {
36
+ 'business_dba_name': 'Glass House Farms',
37
+ 'business_website': 'https://glasshousefarms.org',
38
+ 'business_image_url': 'https://glassfarms.wpenginepowered.com/wp-content/uploads/2021/10/new-ghf-menu.svg',
39
+ 'producer_license_number': 'CCL18-0000512',
40
+ 'producer_latitude': 34.404930,
41
+ 'producer_longitude': -119.518250,
42
+ 'producer_street_address': '5601 Casitas Pass Rd, Carpinteria, CA 93013',
43
+ 'producer_city': 'Carpinteria',
44
+ 'producer_county': 'Santa Barbara',
45
+ 'producer_state': 'CA',
46
+ 'lab_results_url': 'https://glasshousefarms.org/strains/',
47
+ # TODO: The data below should be pulled from the license data.
48
+ 'license_status': 'Active',
49
+ 'license_status_date': '2023-04-07T00:00:00',
50
+ 'license_term': 'Annual',
51
+ 'license_type': 'Cultivation - Small Mixed-Light Tier 1',
52
+ 'license_designation': 'Medicinal',
53
+ 'issue_date': '2019-03-11T00:00:00',
54
+ 'expiration_date': '2024-03-11T00:00:00',
55
+ 'licensing_authority_id': 'CCL',
56
+ 'licensing_authority': 'CalCannabis Cultivation Licensing (CCL)',
57
+ 'business_legal_name': 'Mission Health Associates, Inc. dba Glass House Farms',
58
+ 'business_owner_name': 'Graham Farrar, Kyle Kazan',
59
+ 'business_structure': 'Corporation',
60
+ 'business_phone': '(805) 252-5755',
61
+ 'parcel_number': '001-060-042',
62
+ }
63
+
64
+ # Strain type constants.
65
+ STRAIN_TYPES = {
66
+ 'sativa': {'sativa_percentage': 1.0, 'indica_percentage': 0.0},
67
+ 'sativaDominant': {'sativa_percentage': 0.75, 'indica_percentage': 0.25},
68
+ 'hybrid': {'sativa_percentage': 0.5, 'indica_percentage': 0.5},
69
+ 'indica': {'sativa_percentage': 0.0, 'indica_percentage': 1.0},
70
+ 'indicaDominant': {'sativa_percentage': 0.25, 'indica_percentage': 0.75},
71
+ 'cbd': {'sativa_percentage': 0.0, 'indica_percentage': 0.0},
72
+ 'cbdt': {'sativa_percentage': 0.0, 'indica_percentage': 0.0},
73
+ }
74
+
75
+ # TODO: Get license data from either Hugging Face or locally.
76
+ # from datasets import load_dataset
77
+
78
+ # license_number = GLASS_HOUSE_FARMS['producer_license_number']
79
+ # licenses = load_dataset('cannlytics/cannabis_licenses', 'ca')
80
+ # licenses = licenses['data'].to_pandas()
81
+ # licenses = pd.read_csv(f'../../cannabis_licenses/data/ca/licenses-ca-latest.csv')
82
+ # criterion = licenses['license_number'].str.contains(license_number)
83
+ # match = licenses.loc[criterion]
84
+ # if len(match) != 0:
85
+ # licensee = match.iloc[0]
86
+ # print('Found licensee data:', licensee)
87
+ # obs['producer_county'] = licensee['premise_county']
88
+ # obs['producer_latitude'] = licensee['premise_latitude']
89
+ # obs['producer_longitude'] = licensee['premise_longitude']
90
+ # obs['producer_license_number'] = licensee['license_number']
91
+
92
+
93
+ def get_glass_house_farms_lab_results(
94
+ data_dir: str,
95
+ pdf_dir: str,
96
+ overwrite=False
97
+ ):
98
+ """Get lab results published by Glass House Farms.
99
+ Data points:
100
+ ✓ image_url
101
+ ✓ strain_id
102
+ ✓ strain_name
103
+ ✓ strain_type
104
+ ✓ indica_percentage
105
+ ✓ sativa_percentage
106
+ ✓ strain_url
107
+ ✓ lineage
108
+ ✓ lab_result_id
109
+ ✓ coa_url
110
+ """
111
+
112
+ # Create output directory.
113
+ license_number = GLASS_HOUSE_FARMS['producer_license_number']
114
+ license_pdf_dir = os.path.join(pdf_dir, license_number)
115
+ if not os.path.exists(license_pdf_dir):
116
+ os.makedirs(license_pdf_dir)
117
+
118
+ # Read the strains website.
119
+ url = 'https://glasshousefarms.org/strains/'
120
+ response = requests.get(url, headers=DEFAULT_HEADERS)
121
+ soup = BeautifulSoup(response.content, 'html.parser')
122
+
123
+ # Get the data for each strain.
124
+ observations = []
125
+ strains = soup.find_all(class_='item')
126
+ for strain in strains:
127
+ obs = {}
128
+
129
+ # Extract image URL
130
+ img_tag = strain.find('img')
131
+ obs['image_url'] = img_tag['src']
132
+
133
+ # Extract item type
134
+ strain_type = strain.find('h5').text
135
+ obs['strain_type'] = strain_type
136
+
137
+ # Extract item name
138
+ strain_name = strain.find('h4').text
139
+ strain_name = strain_name.replace('\n', '').replace(strain_type, '').strip()
140
+ obs['strain_name'] = strain_name
141
+
142
+ # Get the strain URL.
143
+ strain_url = strain.find('a', class_='exp')['href']
144
+ obs['strain_url'] = strain_url
145
+
146
+ # Get the strain ID.
147
+ obs['strain_id'] = strain_url.rstrip('/').split('/')[-1]
148
+
149
+ # Get the indica and sativa percentages.
150
+ wave = strain.find('div', class_='wave')
151
+ wave_class = wave.get('class')
152
+ wave_class = [cls for cls in wave_class if cls != 'wave']
153
+ if wave_class:
154
+ for cls in wave_class:
155
+ if cls in STRAIN_TYPES:
156
+ obs['indica_percentage'] = STRAIN_TYPES[cls]['indica_percentage']
157
+ obs['sativa_percentage'] = STRAIN_TYPES[cls]['sativa_percentage']
158
+ break
159
+
160
+ # Record the observation.
161
+ observations.append(obs)
162
+
163
+ # Compile the strain data.
164
+ strain_data = pd.DataFrame(observations)
165
+
166
+ # Save the strain data.
167
+ date = datetime.now().strftime('%Y-%m-%d')
168
+ outfile = os.path.join(data_dir, f'ca-strains-glass-house-{date}.xlsx')
169
+ strain_data.to_excel(outfile, index=False)
170
+
171
+ # Get the lab results for each strain.
172
+ lab_results = []
173
+ for obs in observations:
174
+
175
+ # Get the strain page.
176
+ sleep(3.33)
177
+ response = requests.get(obs['strain_url'] , headers=DEFAULT_HEADERS)
178
+ soup = BeautifulSoup(response.content, 'html.parser')
179
+
180
+ # Get the lineage.
181
+ try:
182
+ content = soup.find('div', class_='content')
183
+ divs = content.find_all('div', class_='et_pb_column')
184
+ except:
185
+ print('No content found:', obs['strain_url'])
186
+ continue
187
+ try:
188
+ lineage = divs[2].text.split('Lineage')[1].replace('\n', '').strip()
189
+ obs['lineage'] = lineage.split(' x ')
190
+ except:
191
+ print('No lineage found:', obs['strain_url'])
192
+ obs['lineage'] = []
193
+
194
+ # Get all of the COA PDF links found.
195
+ pdf_links = []
196
+ for link in soup.find_all('a'):
197
+ href = link.get('href')
198
+ if href and href.endswith('.pdf'):
199
+ pdf_links.append(href)
200
+
201
+ # Format all of the COA PDF links found.
202
+ for link in pdf_links:
203
+ lab_result_id = link.split('/')[-1].split('.')[0]
204
+ result = {'coa_url': link, 'lab_result_id': lab_result_id}
205
+ lab_results.append({**GLASS_HOUSE_FARMS, **obs, **result})
206
+
207
+ # Download COA PDFs.
208
+ for lab_result in lab_results:
209
+ lab_result_id = lab_result['lab_result_id']
210
+ outfile = os.path.join(license_pdf_dir, f'{lab_result_id}.pdf')
211
+ if os.path.exists(outfile) and not overwrite:
212
+ continue
213
+ sleep(1)
214
+ response = requests.get(lab_result['coa_url'], headers=DEFAULT_HEADERS)
215
+ with open(outfile, 'wb') as pdf:
216
+ pdf.write(response.content)
217
+ print('Downloaded: %s' % outfile)
218
+
219
+ # Save all lab result URLs.
220
+ results = pd.DataFrame(lab_results)
221
+ date = datetime.now().strftime('%Y-%m-%d')
222
+ outfile = os.path.join(data_dir, f'ca-result-urls-glass-house-{date}.xlsx')
223
+ results.to_excel(outfile, index=False)
224
+
225
+ # === DEV: Commented for collection only ===
226
+
227
+ # # Initialize CoADoc.
228
+ # parser = CoADoc()
229
+
230
+ # # Parse the data from all COAs.
231
+ # coa_data = []
232
+ # for _, result in results.iterrows():
233
+ # lab_result_id = result['lab_result_id']
234
+ # coa_pdf = f'{lab_result_id}.pdf'
235
+ # pdf_file = os.path.join(license_pdf_dir, coa_pdf)
236
+ # if not os.path.exists(pdf_file):
237
+ # print('File not found:', pdf_file)
238
+ # continue
239
+ # try:
240
+ # parsed = parser.parse(pdf_file)
241
+ # entry = {**result.to_dict(), **parsed[0]}
242
+ # entry['coa_pdf'] = coa_pdf
243
+ # coa_data.append(entry)
244
+ # print('Parsed:', pdf_file)
245
+ # except:
246
+ # print('Error parsing:', pdf_file)
247
+ # continue
248
+
249
+ # # Save the lab results.
250
+ # date = datetime.now().strftime('%Y-%m-%d')
251
+ # outfile = os.path.join(data_dir, f'ca-results-glass-house-{date}.xlsx')
252
+ # try:
253
+ # parser.save(coa_data, outfile)
254
+ # except:
255
+ # try:
256
+ # coa_df = pd.DataFrame(coa_data)
257
+ # coa_df.to_excel(outfile, index=False)
258
+ # print('Saved %i results:' % len(coa_data), outfile)
259
+ # except:
260
+ # print('Error saving:', outfile)
261
+
262
+ # # Return the data.
263
+ # return pd.DataFrame(coa_data)
264
+
265
+
266
+ # === Test ===
267
+ # [✓] Tested: 2024-04-14 by Keegan Skeate <keegan@cannlytics>
268
+ if __name__ == '__main__':
269
+
270
+ # Specify where your data lives.
271
+ data_dir = 'D://data/california/results/datasets'
272
+ pdf_dir = 'D://data/california/results/pdfs'
273
+
274
+ # Get CA lab results.
275
+ all_results = get_glass_house_farms_lab_results(data_dir, pdf_dir)
276
+
277
+
278
+ # === DEV ===
279
+
280
+ # # Parse all COAs in directory.
281
+ # parser = CoADoc()
282
+ # license_number = GLASS_HOUSE_FARMS['producer_license_number']
283
+ # license_pdf_dir = os.path.join(pdf_dir, license_number)
284
+
285
+ # # Parse the data from all COAs.
286
+ # coa_data = []
287
+ # for _, result in all_results.iterrows():
288
+ # lab_result_id = result['lab_result_id']
289
+ # coa_pdf = f'{lab_result_id}.pdf'
290
+ # pdf_file = os.path.join(license_pdf_dir, coa_pdf)
291
+ # if not os.path.exists(pdf_file):
292
+ # print('File not found:', pdf_file)
293
+ # continue
294
+ # try:
295
+ # parsed = parser.parse(pdf_file)
296
+ # entry = {**result.to_dict(), **parsed[0]}
297
+ # entry['coa_pdf'] = coa_pdf
298
+ # coa_data.append(entry)
299
+ # print('Parsed:', pdf_file)
300
+ # except:
301
+ # print('Error parsing:', pdf_file)
302
+ # continue
303
+
304
+ # # Save the lab results.
305
+ # date = datetime.now().strftime('%Y-%m-%d')
306
+ # outfile = os.path.join(data_dir, f'ca-results-glass-house-{date}.xlsx')
307
+ # try:
308
+ # parser.save(coa_data, outfile)
309
+ # except:
310
+ # try:
311
+ # coa_df = pd.DataFrame(coa_data)
312
+ # coa_df.to_excel(outfile, index=False)
313
+ # except:
314
+ # print('Error saving:', outfile)
315
+ # print('Saved %i results:' % len(coa_data), outfile)
316
+
317
+
318
+ # # === Aggregate lab results ===
319
+
320
+ # # Aggregate all CA lab results.
321
+ # aggregate = []
322
+ # datafiles = [
323
+ # '../data/ca/glasshouse-lab-results-2023-09-22.xlsx',
324
+ # '../data/ca/rawgarden-lab-results-2023-09-23.csv',
325
+ # '../data/ca/sc-labs-lab-results-2022-07-13.xlsx',
326
+ # ]
327
+ # for datafile in datafiles:
328
+ # if datafile.endswith('.xlsx'):
329
+ # df = pd.read_excel(datafile, sheet_name='Details')
330
+ # else:
331
+ # df = pd.read_csv(datafile)
332
+ # aggregate.append(df)
333
+
334
+ # # Save aggregated CA lab results.
335
+ # aggregate = pd.concat(aggregate)
336
+ # aggregate.to_csv(f'../data/ca/ca-lab-results-latest.csv', index=False)
337
+ # print('Saved %i CA lab results' % len(aggregate))
338
+
339
+
340
+ # FIXME: Upload results to Firestore.
341
+
342
+
343
+ # FIXME: Upload PDFs to Google Cloud Storage.
344
+
345
+
346
+ # FIXME: Upload datafiles to Google Cloud Storage.
algorithms/{get_results_rawgarden.py → get_results_ca_rawgarden.py} RENAMED
@@ -6,7 +6,7 @@ Authors:
6
  Keegan Skeate <https://github.com/keeganskeate>
7
  Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
  Created: 8/23/2022
9
- Updated: 9/22/2022
10
  License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
 
12
  Description:
@@ -25,8 +25,7 @@ Data Source:
25
 
26
  Command line usage:
27
 
28
- python ai/curation/get_rawgarden_data/get_rawgarden_data.py \
29
- --days_ago=1 --get_all=False
30
 
31
  """
32
  # Standard imports.
@@ -53,26 +52,6 @@ from cannlytics.firebase import (
53
  from cannlytics.utils import kebab_case, rmerge
54
  from cannlytics.utils.constants import DEFAULT_HEADERS
55
 
56
- # Specify where your data lives.
57
- BUCKET_NAME = 'cannlytics-company.appspot.com'
58
- COLLECTION = 'public/data/lab_results'
59
- STORAGE_REF = 'data/lab_results/rawgarden'
60
-
61
- # Create directories if they don't already exist.
62
- # TODO: Edit `ENV_FILE` and `DATA_DIR` as needed for your desired setup.
63
- ENV_FILE = '../.env'
64
- DATA_DIR = '../'
65
- COA_DATA_DIR = f'{DATA_DIR}/rawgarden'
66
- COA_PDF_DIR = f'{COA_DATA_DIR}/pdfs'
67
- TEMP_PATH = f'{COA_DATA_DIR}/tmp'
68
- if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR)
69
- if not os.path.exists(COA_DATA_DIR): os.makedirs(COA_DATA_DIR)
70
- if not os.path.exists(COA_PDF_DIR): os.makedirs(COA_PDF_DIR)
71
- if not os.path.exists(TEMP_PATH): os.makedirs(TEMP_PATH)
72
-
73
- # Define constants.
74
- BASE = 'https://rawgarden.farm/lab-results/'
75
-
76
 
77
  def get_rawgarden_products(
78
  start: Optional[Any] = None,
@@ -206,69 +185,76 @@ def parse_rawgarden_coas(
206
  """
207
  parsed, unidentified = [], []
208
  started = False
209
- for path, _, files in os.walk(directory):
210
- if verbose and not started:
211
- started = True
212
- if filenames:
213
- total = len(filenames)
214
- else:
215
- total = len(files)
216
- print('Parsing %i COAs, ETA > %.2fm' % (total, total * 25 / 60))
217
- for filename in files:
218
- if not filename.endswith('.pdf'):
219
- continue
220
- if filenames is not None:
221
- if filename not in filenames:
222
  continue
223
- file_path = os.path.join(path, filename)
 
 
 
224
 
225
- # Parse the COA, by any means necessary!
226
- parser = CoADoc()
227
- try:
228
- new_data = parser.parse_pdf(
229
- file_path,
230
- temp_path=temp_path,
231
- **kwargs
232
- )
233
- except:
234
  try:
235
- # FIXME: This should work without directly calling OCR.
236
- temp_file = f'{temp_path}/ocr_coa.pdf'
237
- parser.pdf_ocr(
238
- file_path,
239
- temp_file,
240
- temp_path,
241
- resolution=180,
242
- )
243
  new_data = parser.parse_pdf(
244
- temp_file,
245
  temp_path=temp_path,
246
  **kwargs
247
  )
248
- except Exception as e:
249
- # Hot-fix: Remove temporary `magick-*` files.
250
- for i in os.listdir(temp_path):
251
- magick_path = os.path.join(temp_path, i)
252
- if os.path.isfile(magick_path) and i.startswith('magick-'):
253
- os.remove(magick_path)
254
- unidentified.append({'coa_pdf': filename})
255
- if verbose:
256
- print('Error:', filename)
257
- print(e)
258
- continue
259
-
260
- # Add the subtype key and record the data.
261
- subtype = path.split('\\')[-1]
262
- if isinstance(new_data, dict):
263
- new_data = [new_data]
264
- new_data[0]['product_subtype'] = subtype
265
- parsed.extend(new_data)
266
- parser.quit()
267
- gc.collect()
268
- if verbose:
269
- print('Parsed:', filename)
270
-
271
- # TODO: Save intermittently?
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
 
273
  return parsed, unidentified
274
 
@@ -326,6 +312,25 @@ if __name__ == '__main__':
326
 
327
  # === Setup ===
328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329
  # Support command line usage.
330
  # Future work: Allow data dirs to be specified from the command line.
331
  import argparse
@@ -338,7 +343,7 @@ if __name__ == '__main__':
338
  args = {}
339
 
340
  # Specify collection period.
341
- DAYS_AGO = args.get('days_ago', 365)
342
  GET_ALL = args.get('get_all', True)
343
 
344
  # === Data Collection ===
@@ -356,92 +361,172 @@ if __name__ == '__main__':
356
  # === Data Curation ===
357
 
358
  # Parse COA PDFs with CoADoc.
359
- coa_data, unidentified_coas = parse_rawgarden_coas(
360
- COA_PDF_DIR,
361
- filenames=filenames,
362
- temp_path=TEMP_PATH,
363
- verbose=True,
364
- )
365
-
366
- # Merge the `products`'s `product_subtype` with the COA data.
367
- # FIXME: Keep the URL (`lab_results_url`)!
368
- coa_df = rmerge(
369
- pd.DataFrame(coa_data),
370
- products,
371
- on='coa_pdf',
372
- how='left',
373
- replace='right',
374
- )
375
-
376
- # Create hashes.
377
- coa_df = coa_df.where(pd.notnull(coa_df), None)
378
- coa_df['results_hash'] = coa_df['results'].apply(
379
- lambda x: create_hash(x),
380
- )
381
- coa_df['sample_hash'] = coa_df.loc[:, coa_df.columns != 'sample_hash'].apply(
382
- lambda x: create_hash(x.to_dict()),
383
- axis=1,
384
- )
385
- datafile_hash = create_hash(coa_df)
386
-
387
- # === Data Archiving ===
388
-
389
- # Create custom column order.
390
- column_order = ['sample_hash', 'results_hash']
391
- column_order += list(parser.column_order)
392
- index = column_order.index('product_type') + 1
393
- column_order.insert(index, 'product_subtype')
394
-
395
- # Optional: Save the COA data to a workbook.
396
- parser = CoADoc()
397
- datafile = f'{COA_DATA_DIR}/{datafile_hash}.xlsx'
398
- parser.save(coa_df, datafile, column_order=column_order)
399
-
400
- # Optional: Save the unidentified COA data.
401
- errors = [x['coa_pdf'] for x in unidentified_coas]
402
- timestamp = datetime.now().isoformat()[:19].replace(':', '-')
403
- error_file = f'{COA_DATA_DIR}/rawgarden-unidentified-coas-{timestamp}.xlsx'
404
- products.loc[products['coa_pdf'].isin(errors)].to_excel(error_file)
405
-
406
- # === Firebase Database and Storage ===
407
-
408
- # Optional: Initialize Firebase.
409
- # initialize_firebase(ENV_FILE)
410
-
411
- # # Optional: Upload the lab results to Firestore.
412
- # upload_lab_results(
413
- # coa_df.to_dict(orient='records'),
414
- # update=True,
415
- # verbose=True
416
  # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
 
418
- # # Optional: Upload datafiles to Firebase Storage.
419
- # storage_datafile = '/'.join([STORAGE_REF, datafile.split('/')[-1]])
420
- # storage_error_file = '/'.join([STORAGE_REF, error_file.split('/')[-1]])
421
- # upload_file(storage_datafile, datafile, bucket_name=BUCKET_NAME)
422
- # upload_file(storage_error_file, error_file, bucket_name=BUCKET_NAME)
423
-
424
- # == Data Aggregation ===
425
-
426
- # # Initialize the COA parser.
427
- # parser = CoADoc()
428
-
429
- # # Stack COA datafiles, re-hash, and re-save!
430
- # datafiles = [
431
- # f'{COA_DATA_DIR}/d7815fd2a097d06b719aadcc00233026f86076a680db63c532a11b67d7c8bc70.xlsx',
432
- # f'{COA_DATA_DIR}/01880e30f092cf5739f9f2b58de705fc4c245d6859c00b50505a3a802ff7c2b2.xlsx',
433
- # ]
434
-
435
- # # Create custom column order.
436
- # column_order = ['sample_hash', 'results_hash']
437
- # column_order += list(parser.column_order)
438
- # index = column_order.index('product_type') + 1
439
- # column_order.insert(index, 'product_subtype')
440
-
441
- # # Aggregate the datafiles.
442
- # master_data = parser.aggregate(
443
- # datafiles,
444
- # output=COA_DATA_DIR,
445
- # sheet_name='Details',
446
- # column_order=column_order,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447
  # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  Keegan Skeate <https://github.com/keeganskeate>
7
  Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
  Created: 8/23/2022
9
+ Updated: 10/7/2023
10
  License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
 
12
  Description:
 
25
 
26
  Command line usage:
27
 
28
+ python get_rawgarden_data.py --days_ago=1 --get_all=False
 
29
 
30
  """
31
  # Standard imports.
 
52
  from cannlytics.utils import kebab_case, rmerge
53
  from cannlytics.utils.constants import DEFAULT_HEADERS
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  def get_rawgarden_products(
57
  start: Optional[Any] = None,
 
185
  """
186
  parsed, unidentified = [], []
187
  started = False
188
+ filename = None
189
+ try:
190
+ for path, _, files in os.walk(directory):
191
+ if verbose and not started:
192
+ started = True
193
+ if filenames:
194
+ total = len(filenames)
195
+ else:
196
+ total = len(files)
197
+ print('Parsing %i COAs, ETA > %.2fm' % (total, total * 25 / 60))
198
+ for filename in files:
199
+ if not filename.endswith('.pdf'):
 
200
  continue
201
+ if filenames is not None:
202
+ if filename not in filenames:
203
+ continue
204
+ file_path = os.path.join(path, filename)
205
 
206
+ # Parse the COA, by any means necessary!
207
+ parser = CoADoc()
 
 
 
 
 
 
 
208
  try:
 
 
 
 
 
 
 
 
209
  new_data = parser.parse_pdf(
210
+ file_path,
211
  temp_path=temp_path,
212
  **kwargs
213
  )
214
+ except:
215
+ try:
216
+ # FIXME: This should work without directly calling OCR.
217
+ temp_file = f'{temp_path}/ocr_coa.pdf'
218
+ parser.pdf_ocr(
219
+ file_path,
220
+ temp_file,
221
+ temp_path,
222
+ resolution=180,
223
+ )
224
+ new_data = parser.parse_pdf(
225
+ temp_file,
226
+ temp_path=temp_path,
227
+ **kwargs
228
+ )
229
+ except Exception as e:
230
+ # Hot-fix: Remove temporary `magick-*` files.
231
+ try:
232
+ for i in os.listdir(temp_path):
233
+ magick_path = os.path.join(temp_path, i)
234
+ if os.path.isfile(magick_path) and i.startswith('magick-'):
235
+ os.remove(magick_path)
236
+ except FileNotFoundError:
237
+ pass
238
+ unidentified.append({'coa_pdf': filename})
239
+ if verbose:
240
+ print('Error:', filename)
241
+ print(e)
242
+ continue
243
+
244
+ # Add the subtype key and record the data.
245
+ subtype = path.split('\\')[-1]
246
+ if isinstance(new_data, dict):
247
+ new_data = [new_data]
248
+ new_data[0]['product_subtype'] = subtype
249
+ parsed.extend(new_data)
250
+ parser.quit()
251
+ gc.collect()
252
+ if verbose:
253
+ print('Parsed:', filename)
254
+
255
+ # FIXME: Save intermittently?
256
+ except:
257
+ print('Error parsing:', filename)
258
 
259
  return parsed, unidentified
260
 
 
312
 
313
  # === Setup ===
314
 
315
+ # Specify where your data lives.
316
+ BUCKET_NAME = 'cannlytics-company.appspot.com'
317
+ COLLECTION = 'public/data/lab_results'
318
+ STORAGE_REF = 'data/lab_results/rawgarden'
319
+
320
+ # Create directories if they don't already exist.
321
+ # Note: Edit `ENV_FILE` and `DATA_DIR` as needed for your desired setup.
322
+ DATA_DIR = 'D:/data/california/results'
323
+ COA_DATA_DIR = f'{DATA_DIR}/datasets/rawgarden'
324
+ COA_PDF_DIR = f'{COA_DATA_DIR}/pdfs/rawgarden'
325
+ TEMP_PATH = f'{COA_DATA_DIR}/tmp'
326
+ if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR)
327
+ if not os.path.exists(COA_DATA_DIR): os.makedirs(COA_DATA_DIR)
328
+ if not os.path.exists(COA_PDF_DIR): os.makedirs(COA_PDF_DIR)
329
+ if not os.path.exists(TEMP_PATH): os.makedirs(TEMP_PATH)
330
+
331
+ # Define constants.
332
+ BASE = 'https://rawgarden.farm/lab-results/'
333
+
334
  # Support command line usage.
335
  # Future work: Allow data dirs to be specified from the command line.
336
  import argparse
 
343
  args = {}
344
 
345
  # Specify collection period.
346
+ DAYS_AGO = args.get('days_ago', 365 * 5)
347
  GET_ALL = args.get('get_all', True)
348
 
349
  # === Data Collection ===
 
361
  # === Data Curation ===
362
 
363
  # Parse COA PDFs with CoADoc.
364
+ # filenames.reverse()
365
+ # coa_data, unidentified_coas = parse_rawgarden_coas(
366
+ # COA_PDF_DIR,
367
+ # filenames=filenames,
368
+ # temp_path=TEMP_PATH,
369
+ # verbose=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
  # )
371
+ directory = COA_PDF_DIR
372
+ temp_path = TEMP_PATH
373
+ verbose = True
374
+ parsed, unidentified = [], []
375
+ started = False
376
+ filename = None
377
+ for path, _, files in os.walk(directory):
378
+ if verbose and not started:
379
+ started = True
380
+ if filenames:
381
+ total = len(filenames)
382
+ else:
383
+ total = len(files)
384
+ print('Parsing %i COAs, ETA > %.2fm' % (total, total * 25 / 60))
385
+ for filename in files:
386
+ if not filename.endswith('.pdf'):
387
+ continue
388
+ if filenames is not None:
389
+ if filename not in filenames:
390
+ continue
391
+ file_path = os.path.join(path, filename)
392
+
393
+ # Parse the COA, by any means necessary!
394
+ parser = CoADoc()
395
+ try:
396
+ new_data = parser.parse_pdf(
397
+ file_path,
398
+ temp_path=temp_path,
399
+ )
400
+ except:
401
+ try:
402
+ # FIXME: This should work without directly calling OCR.
403
+ temp_file = f'{temp_path}/ocr_coa.pdf'
404
+ parser.pdf_ocr(
405
+ file_path,
406
+ temp_file,
407
+ temp_path,
408
+ resolution=180,
409
+ )
410
+ new_data = parser.parse_pdf(
411
+ temp_file,
412
+ temp_path=temp_path,
413
+ )
414
+ except Exception as e:
415
+ # Hot-fix: Remove temporary `magick-*` files.
416
+ try:
417
+ for i in os.listdir(temp_path):
418
+ magick_path = os.path.join(temp_path, i)
419
+ if os.path.isfile(magick_path) and i.startswith('magick-'):
420
+ os.remove(magick_path)
421
+ except FileNotFoundError:
422
+ pass
423
+ unidentified.append({'coa_pdf': filename})
424
+ if verbose:
425
+ print('Error:', filename)
426
+ print(e)
427
+ continue
428
+
429
+ # Add the subtype key and record the data.
430
+ subtype = path.split('\\')[-1]
431
+ if isinstance(new_data, dict):
432
+ new_data = [new_data]
433
+ new_data[0]['product_subtype'] = subtype
434
+ parsed.extend(new_data)
435
+ if verbose:
436
+ print('Parsed:', filename)
437
 
438
+ # Deprecated: Reset the parer.
439
+ # parser.quit()
440
+ # gc.collect()
441
+
442
+ # TODO: See if the following fields need to be augmented:
443
+ # - date_retail
444
+ # - lab_results_url
445
+
446
+ # Save intermittently.
447
+ if len(parsed) % 100 == 0:
448
+
449
+ # Merge the `products`'s `product_subtype` with the COA data.
450
+ # FIXME: Keep the URL (`lab_results_url`)!
451
+ coa_df = pd.DataFrame(parsed)
452
+ # coa_df = rmerge(
453
+ # pd.DataFrame(parsed),
454
+ # products,
455
+ # on='coa_pdf',
456
+ # how='left',
457
+ # replace='right',
458
+ # )
459
+
460
+ # Create custom column order.
461
+ column_order = ['sample_hash', 'results_hash']
462
+ # column_order += list(parser.column_order)
463
+ column_index = coa_df.columns.get_loc('product_type') + 1
464
+ column_order.insert(column_index, 'product_subtype')
465
+
466
+ # Save the COA data.
467
+ parser = CoADoc()
468
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
469
+ datafile = f'{COA_DATA_DIR}/rawgarden-coa-data-{timestamp}.xlsx'
470
+ parser.save(coa_df, datafile, column_order=column_order)
471
+ print('Saved:', datafile)
472
+
473
+ # Save the unidentified COA data.
474
+ errors = [x['coa_pdf'] for x in unidentified]
475
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
476
+ error_file = f'{COA_DATA_DIR}/rawgarden-unidentified-coas-{timestamp}.xlsx'
477
+ products.loc[products['coa_pdf'].isin(errors)].to_excel(error_file)
478
+
479
+ # === Data saving ===
480
+
481
+ # # Create hashes.
482
+ # coa_df = coa_df.where(pd.notnull(coa_df), None)
483
+ # coa_df['results_hash'] = coa_df['results'].apply(
484
+ # lambda x: create_hash(x),
485
+ # )
486
+ # coa_df['sample_hash'] = coa_df.loc[:, coa_df.columns != 'sample_hash'].apply(
487
+ # lambda x: create_hash(x.to_dict()),
488
+ # axis=1,
489
  # )
490
+ # datafile_hash = create_hash(coa_df)
491
+
492
+ # Define where the data lives.
493
+ data_dir = 'D://data/california/results/datasets/rawgarden'
494
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
495
+
496
+ # Save Raw Garden COA data as JSON.
497
+ coa_data = pd.DataFrame(parsed)
498
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
499
+ outfile = os.path.join(data_dir, f'fl-results-rawgarden-{timestamp}.json')
500
+ coa_data.to_json(outfile, orient='records')
501
+ print('Saved Raw Garden lab results:', outfile)
502
+
503
+ # Save Raw Garden COA datafile.
504
+ coa_data = pd.DataFrame(parsed)
505
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
506
+ outfile = os.path.join(data_dir, f'fl-results-rawgarden-{timestamp}.xlsx')
507
+ parser.save(coa_data[:1000], outfile)
508
+ print('Saved Raw Garden lab results:', outfile)
509
+
510
+
511
+ # === Data Aggregation ===
512
+
513
+ # Aggregate historic lab results.
514
+ aggregate = []
515
+ files = os.listdir(COA_DATA_DIR)
516
+ for file in files:
517
+ if 'unidentified' in file or not file.endswith('.xlsx'):
518
+ continue
519
+ file_name = os.path.join(COA_DATA_DIR, file)
520
+ df = pd.read_excel(file_name, sheet_name='Details')
521
+ aggregate.append(df)
522
+
523
+ # Aggregate and remove duplicates.
524
+ aggregate = pd.concat(aggregate)
525
+ aggregate = aggregate.drop_duplicates(subset=['sample_id'])
526
+ print('Aggregated %i results.' % len(aggregate))
527
+
528
+ # Save Raw Garden lab results.
529
+ date = datetime.now().strftime('%Y-%m-%d')
530
+ outfile = os.path.join(data_dir, f'fl-results-rawgarden-{date}.csv')
531
+ aggregate.to_csv(outfile, index=False)
532
+ print('Saved Raw Garden lab results:', outfile)
algorithms/get_results_ca_sc_labs.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get California Lab Results | SC Labs
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 12/30/2023
8
+ Updated: 1/15/2024
9
+ License: MIT License <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
10
+ """
11
+ # Standard imports:
12
+ from datetime import datetime, timedelta
13
+ import os
14
+ from time import sleep
15
+ from typing import Optional
16
+ from urllib.parse import urljoin
17
+
18
+ # External imports:
19
+ from bs4 import BeautifulSoup
20
+ from cannlytics.utils.constants import DEFAULT_HEADERS
21
+ from cannlytics.data.coas import CoADoc
22
+ from cannlytics.data.coas.algorithms import sclabs
23
+ import pandas as pd
24
+ import requests
25
+
26
+
27
+ # Define the base URL for SC Labs.
28
+ BASE_URL = 'https://client.sclabs.com/verify/'
29
+
30
+
31
+ def get_sc_labs_results_by_month(
32
+ year: int,
33
+ month: int,
34
+ start_day: int = 1,
35
+ days: int = 31,
36
+ max_samples: int = 199,
37
+ prefix: Optional[str] = 'P',
38
+ pause: float = 3.33,
39
+ verbose: bool = True,
40
+ reverse: bool = False,
41
+ ) -> list:
42
+ """Get SC Labs results for a given month and year.
43
+ Args:
44
+ year (int): The year to search.
45
+ month (int): The month to search.
46
+ max_samples (int): The maximum number of samples to search for each day.
47
+ Returns:
48
+ docs (list): A list of URLs to the COAs.
49
+ """
50
+ docs = []
51
+ start_date = datetime(year, month, start_day)
52
+ if reverse:
53
+ end_date = (start_date - timedelta(days=days))
54
+ else:
55
+ end_date = (start_date + timedelta(days=days))
56
+ print('Start date:', start_date)
57
+ print('End date:', end_date)
58
+ while start_date < end_date:
59
+
60
+ # Iterate through all possible sample IDs for the day.
61
+ for i in range(1, max_samples):
62
+ sample_id = start_date.strftime('%y%m%d') + f'{prefix}{i:03}'
63
+ url = urljoin(BASE_URL, sample_id + '/')
64
+ page = requests.get(url, headers=DEFAULT_HEADERS)
65
+ sleep(pause)
66
+ if page.status_code != 200:
67
+ continue
68
+
69
+ # Check for 'Product not found' to break the loop
70
+ soup = BeautifulSoup(page.content, 'html.parser')
71
+ try:
72
+ note = soup.find('p', {'class': 'errornote'}).text.strip()
73
+ except:
74
+ note = ''
75
+ if note == 'Product not found.':
76
+ if verbose:
77
+ print('Product not found:', url)
78
+ break
79
+
80
+ # Check for 'results as private' to skip the sample
81
+ if note.endswith('the results as private.'):
82
+ if verbose:
83
+ print('Private results:', url)
84
+ continue
85
+
86
+ # Check for 'Certificate of Analysis'
87
+ try:
88
+ certificate_tag = soup.find('div', {'class': 'sdp-sample-coa'})
89
+ except:
90
+ if verbose:
91
+ print('Failed to find COA:', url)
92
+ continue
93
+ if certificate_tag and certificate_tag.text.strip() == 'Certificate of Analysis':
94
+ docs.append(url)
95
+ if verbose:
96
+ print('Found COA:', url)
97
+
98
+ # Move to the next day.
99
+ start_date += timedelta(days=1)
100
+
101
+ # Return the list of URLs.
102
+ return docs
103
+
104
+
105
+ # Define parameters for data collection.
106
+ prefixes = [
107
+ # Used prefixes
108
+ 'J', 'K', 'L', 'M', 'N', 'P',
109
+ 'Q', 'R', 'S', 'T', 'U',
110
+ 'W', 'V', 'X', 'Y',
111
+ 'H', 'Z',
112
+ # Unknown if used
113
+ # 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'I', 'O',
114
+ ]
115
+ start_day = 1
116
+ days = 30
117
+ start_year = 2024
118
+ end_year = 2024
119
+ start_month = 7
120
+ end_month = 7
121
+ pause = 3.33
122
+
123
+ # Get all valid URLS, iterating over prefixes, years, and months.
124
+ docs = []
125
+ for y in reversed(range(start_year, end_year + 1)):
126
+ for m in reversed(range(start_month, end_month + 1)):
127
+ for prefix in prefixes:
128
+ print(f'=== Querying {y}-{m:02d} ({prefix}) ===')
129
+ results = get_sc_labs_results_by_month(
130
+ prefix=prefix,
131
+ year=y,
132
+ month=m,
133
+ days=days,
134
+ start_day=start_day,
135
+ pause=pause,
136
+ )
137
+ docs.extend(results)
138
+
139
+ # Define the directory.
140
+ DATA_DIR = 'D:/data/california/results/datasets/sclabs'
141
+ if not os.path.exists(DATA_DIR):
142
+ os.makedirs(DATA_DIR)
143
+
144
+ # Save the list of URLS.
145
+ timestamp = pd.Timestamp.now().strftime('%Y-%m-%d-%H-%M-%S')
146
+ outfile = os.path.join(DATA_DIR, f'ca-results-sclabs-urls-{timestamp}.xlsx')
147
+ pd.DataFrame(docs, columns=['url']).to_excel(outfile, index=False)
148
+ print(f'Saved URLS: {outfile}')
149
+
150
+
151
+ # TODO: Implement functions for aggregation.
152
+
153
+ # # Optional: Read the list of URLs.
154
+ # urls = []
155
+ # url_files = [
156
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2023-12-23-11-42-37.xlsx",
157
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2023-12-24-00-52-02.xlsx",
158
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2023-12-24-15-39-06.xlsx",
159
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2023-12-29-07-53-39.xlsx",
160
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2023-12-30-21-15-44.xlsx",
161
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2024-01-02-20-22-30.xlsx",
162
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2024-01-03-03-40-42.xlsx",
163
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2024-01-04-17-48-13.xlsx",
164
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2024-01-04-18-09-29.xlsx",
165
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2024-01-05-00-57-43.xlsx",
166
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2024-01-05-07-23-55.xlsx",
167
+ # "D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-urls-2024-01-05-15-43-00.xlsx",
168
+ # ]
169
+ # for url_file in url_files:
170
+ # url_data = pd.read_excel(url_file)
171
+ # urls.append(url_data)
172
+ # urls = pd.concat(urls)
173
+ # urls.drop_duplicates(subset=['url'], inplace=True)
174
+ # docs = urls['url'].tolist()
175
+ # docs.reverse()
176
+
177
+ # # Read all parsed COA datafiles.
178
+ # datafiles = [
179
+ # 'D:/data/california/lab_results/datasets/sclabs/ca-lab-results-sclabs-2024-01-02-00-39-36.xlsx',
180
+ # 'D:/data/california/lab_results/datasets/sclabs/ca-lab-results-sclabs-2024-01-03-06-24-11.xlsx',
181
+ # r"D:\data\california\lab_results\datasets\sclabs\ca-lab-results-sclabs-2024-01-05-02-17-28.xlsx",
182
+ # r"D:/data/california/lab_results/datasets/sclabs\ca-lab-results-sclabs-2024-01-05-23-23-15.xlsx",
183
+ # r'D:/data/california/lab_results/datasets/sclabs\ca-lab-results-sclabs-2024-01-05-18-01-26.xlsx',
184
+ # r'D:/data/california/lab_results/datasets/sclabs\ca-lab-results-sclabs-2024-01-08-18-12-08.xlsx',
185
+ # ]
186
+ # all_results = []
187
+ # for datafile in datafiles:
188
+ # data = pd.read_excel(datafile)
189
+ # all_results.append(data)
190
+ # results = pd.concat(all_results)
191
+ # results.drop_duplicates(subset=['coa_id'], inplace=True)
192
+ # results['lab_result_url'] = results['coa_id'].astype(str).apply(
193
+ # lambda x: urljoin(BASE_URL, x.split('-')[0].strip() + '/')
194
+ # )
195
+ # print('Number of results:', len(results))
196
+
197
+ # # Determine un-parsed COAs (all docs not in results['lab_result_url'] column).
198
+ # docs = list(set(docs) - set(results['lab_result_url'].tolist()))
199
+ # print('Number of un-parsed COAs:', len(docs))
200
+
201
+ # Parse each COA.
202
+ all_data = []
203
+ errors = []
204
+ parser = CoADoc()
205
+ print(f'Parsing {len(docs)} URLs.')
206
+ for doc in docs:
207
+ sleep(1)
208
+ try:
209
+ coa_data = sclabs.parse_sc_labs_coa(parser, doc, verbose=True)
210
+ all_data.append(coa_data)
211
+ print(f'Parsed COA: {doc}')
212
+ except Exception as e:
213
+ errors.append(doc)
214
+ print(f'Failed to parse COA: {doc}')
215
+ print(e)
216
+
217
+ # Save the results.
218
+ DATA_DIR = 'D:/data/california/results/datasets/sclabs'
219
+ timestamp = pd.Timestamp.now().strftime('%Y-%m-%d-%H-%M-%S')
220
+ outfile = os.path.join(DATA_DIR, f'ca-results-sclabs-{timestamp}.xlsx')
221
+ pd.DataFrame(all_data).to_excel(outfile, index=False)
222
+ print(f'Saved {len(all_data)} results: {outfile}')
223
+
224
+
225
+ # TODO: Aggregate the results.
226
+
227
+
228
+ # FIXME: Upload data to Firestore.
229
+
230
+
231
+ # FIXME: Upload PDFs to Google Cloud Storage.
232
+
233
+
234
+ # FIXME: Upload datafiles to Google Cloud Storage.
235
+
algorithms/get_results_ct.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Cannabis Tests | Get Connecticut Test Result Data
3
+ Copyright (c) 2023 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 4/8/2023
8
+ Updated: 7/3/2023
9
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
10
+
11
+ Data Source:
12
+
13
+ - Connecticut Medical Marijuana Brand Registry
14
+ URL: <https://data.ct.gov/Health-and-Human-Services/Medical-Marijuana-Brand-Registry/egd5-wb6r/data>
15
+
16
+ """
17
+ # Standard imports:
18
+ from datetime import datetime
19
+ import os
20
+ import requests
21
+ from typing import Optional
22
+
23
+ # External imports:
24
+ import cannlytics
25
+ from cannlytics.utils import convert_to_numeric
26
+ import pandas as pd
27
+
28
+
29
+ # Connecticut lab results API URL.
30
+ CT_RESULTS_URL = 'https://data.ct.gov/api/views/egd5-wb6r/rows.json'
31
+
32
+ # Connecticut lab results fields.
33
+ CT_FIELDS = {
34
+ 'sid': 'id',
35
+ 'id': 'lab_id',
36
+ 'position': None,
37
+ 'created_at': None,
38
+ 'created_meta': None,
39
+ 'updated_at': 'data_refreshed_date',
40
+ 'updated_meta': None,
41
+ 'meta': None,
42
+ 'brand_name': 'product_name',
43
+ 'dosage_form': 'product_type',
44
+ 'producer': 'producer',
45
+ 'product_image': 'image_url',
46
+ 'label_image': 'images',
47
+ 'lab_analysis': 'lab_results_url',
48
+ 'approval_date': 'date_tested',
49
+ 'registration_number': 'traceability_id',
50
+ }
51
+ CT_CANNABINOIDS = {
52
+ 'cbg': 'cbg',
53
+ 'cbg_a': 'cbga',
54
+ 'cannabavarin_cbdv': 'cbdv',
55
+ 'cannabichromene_cbc': 'cbc',
56
+ 'cannbinol_cbn': 'cbn',
57
+ 'tetrahydrocannabivarin_thcv': 'thcv',
58
+ 'tetrahydrocannabinol_thc': 'thc',
59
+ 'tetrahydrocannabinol_acid_thca': 'thca',
60
+ 'cannabidiols_cbd': 'cbd',
61
+ 'cannabidiol_acid_cbda': 'cbda',
62
+ }
63
+ CT_TERPENES = {
64
+ 'a_pinene': 'alpha_pinene',
65
+ 'b_myrcene': 'beta_myrcene',
66
+ 'b_caryophyllene': 'beta_caryophyllene',
67
+ 'b_pinene': 'beta_pinene',
68
+ 'limonene': 'limonene',
69
+ 'ocimene': 'ocimene',
70
+ 'linalool_lin': 'linalool_lin',
71
+ 'humulene_hum': 'humulene_hum',
72
+ 'a_bisabolol': 'alpha_bisabolol',
73
+ 'a_phellandrene': 'alpha_phellandrene',
74
+ 'a_terpinene': 'alpha_terpinene',
75
+ 'b_eudesmol': 'beta_eudesmol',
76
+ 'b_terpinene': 'beta_terpinene',
77
+ 'fenchone': 'fenchone',
78
+ 'pulegol': 'pulegol',
79
+ 'borneol': 'borneol',
80
+ 'isopulegol': 'isopulegol',
81
+ 'carene': 'carene',
82
+ 'camphene': 'camphene',
83
+ 'camphor': 'camphor',
84
+ 'caryophyllene_oxide': 'caryophyllene_oxide',
85
+ 'cedrol': 'cedrol',
86
+ 'eucalyptol': 'eucalyptol',
87
+ 'geraniol': 'geraniol',
88
+ 'guaiol': 'guaiol',
89
+ 'geranyl_acetate': 'geranyl_acetate',
90
+ 'isoborneol': 'isoborneol',
91
+ 'menthol': 'menthol',
92
+ 'l_fenchone': 'l_fenchone',
93
+ 'nerol': 'nerol',
94
+ 'sabinene': 'sabinene',
95
+ 'terpineol': 'terpineol',
96
+ 'terpinolene': 'terpinolene',
97
+ 'trans_b_farnesene': 'trans_beta_farnesene',
98
+ 'valencene': 'valencene',
99
+ 'a_cedrene': 'alpha_cedrene',
100
+ 'a_farnesene': 'alpha_farnesene',
101
+ 'b_farnesene': 'beta_farnesene',
102
+ 'cis_nerolidol': 'cis_nerolidol',
103
+ 'fenchol': 'fenchol',
104
+ 'trans_nerolidol': 'trans_nerolidol'
105
+ }
106
+
107
+
108
+ def flatten_results(x):
109
+ """Flatten the results."""
110
+ results = []
111
+ for name, analyte in CT_CANNABINOIDS.items():
112
+ # print(analyte, x[name])
113
+ results.append({
114
+ 'key': analyte,
115
+ 'name': name,
116
+ 'value': convert_to_numeric(x[name]),
117
+ 'units': 'percent',
118
+ 'analysis': 'cannabinoids',
119
+ })
120
+ for name, analyte in CT_TERPENES.items():
121
+ # print(analyte, x[name])
122
+ results.append({
123
+ 'key': analyte,
124
+ 'name': name,
125
+ 'value': convert_to_numeric(x[name]),
126
+ 'units': 'percent',
127
+ 'analysis': 'terpenes',
128
+ })
129
+ return results
130
+
131
+
132
+ def get_results_ct(url: str = CT_RESULTS_URL) -> pd.DataFrame:
133
+ """Get all of the Connecticut test results.
134
+ Args:
135
+ url (str): The URL to the CSV data.
136
+ Returns:
137
+ df (pd.DataFrame): A Pandas DataFrame of the test results.
138
+ """
139
+
140
+ # Get the data from the OpenData API.
141
+ response = requests.get(url)
142
+ if response.status_code == 200:
143
+ json_data = response.json()
144
+ metadata = json_data['meta']
145
+ header = metadata['view']['columns']
146
+ headers = [h['name'] for h in header]
147
+ columns = [cannlytics.utils.snake_case(h) for h in headers]
148
+ rows = json_data['data']
149
+ df = pd.DataFrame(rows, columns=columns)
150
+ else:
151
+ print('Failed to fetch CT results. Status code:', response.status_code)
152
+
153
+ # FIXME: Standardize the results.
154
+ # Note: The results do not match the COAs!!!
155
+ df['results'] = df.apply(flatten_results, axis=1)
156
+
157
+ # Drop unnecessary columns.
158
+ drop_columns = ['meta', 'position', 'created_at', 'created_meta',
159
+ 'updated_at', 'updated_meta']
160
+ drop_columns += list(CT_CANNABINOIDS.keys()) + list(CT_TERPENES.keys())
161
+ df.drop(columns=drop_columns, inplace=True)
162
+
163
+ # Rename the columns.
164
+ df.rename(columns=CT_FIELDS, inplace=True)
165
+
166
+ # TODO: Extract product_size, serving_size, servings_per_package, sample_weight
167
+ # from dosage_form and standardize product type.
168
+
169
+ # TODO: Format COA URLs.
170
+ # coa_urls
171
+
172
+ # Create the directory if it doesn't exist.
173
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
174
+
175
+ # Save the results to Excel.
176
+ date = datetime.now().isoformat()[:10]
177
+ datafile = f'{data_dir}/ct-lab-results-{date}.xlsx'
178
+ try:
179
+ cannlytics.utils.to_excel_with_style(df, datafile)
180
+ except:
181
+ df.to_excel(datafile)
182
+ print('Connecticut lab results archived:', datafile)
183
+ return df
184
+
185
+
186
+ def download_pdfs_ct(
187
+ df: pd.DataFrame,
188
+ download_path: str,
189
+ column_name: Optional[str] = 'lab_results_url',
190
+ id_column: Optional[str] = 'id',
191
+ verbose: Optional[bool] = True,
192
+ ) -> None:
193
+ """
194
+ Downloads all PDFs from a specified column in a Pandas DataFrame.
195
+ Args:
196
+ df (pandas.DataFrame): The input DataFrame containing the URLs of the PDFs.
197
+ column_name (str): The name of the column containing the PDF URLs.
198
+ download_path (str): The path to the directory where the PDFs will be downloaded.
199
+ """
200
+ for _, row in df.iterrows():
201
+ pdf_url = row[column_name]
202
+ if isinstance(pdf_url, list):
203
+ pdf_url = pdf_url[0]
204
+
205
+ # Create the filename from the ID.
206
+ filename = row[id_column]
207
+ if not filename.endswith('.pdf'):
208
+ filename = filename + '.pdf'
209
+
210
+ # Create the local file path for downloading the PDF.
211
+ # Continue if the PDF is already downloaded.
212
+ outfile = os.path.join(download_path, filename)
213
+ if os.path.isfile(outfile) or pdf_url is None:
214
+ continue
215
+
216
+ # Download the PDF.
217
+ try:
218
+ response = requests.get(pdf_url)
219
+ except:
220
+ print(f'Failed to download PDF: {pdf_url}')
221
+ continue
222
+ if response.status_code == 200:
223
+ with open(outfile, 'wb') as file:
224
+ file.write(response.content)
225
+ if verbose:
226
+ print(f'Downloaded PDF: {outfile}.')
227
+ else:
228
+ print(f'Failed to download PDF {filename}. Status code:', response.status_code)
229
+
230
+
231
+ # === Test ===
232
+ # [✓] Tested: 2024-04-14 by Keegan Skeate <keegan@cannlytics>
233
+ if __name__ == '__main__':
234
+
235
+ # Command line usage.
236
+ import argparse
237
+ try:
238
+ parser = argparse.ArgumentParser()
239
+ parser.add_argument('--pdf_dir', dest='pdf_dir', type=str)
240
+ parser.add_argument('--data_dir', dest='data_dir', type=str)
241
+ args = parser.parse_args()
242
+ except SystemExit:
243
+ args = {}
244
+
245
+ # Specify where your data lives.
246
+ DATA_DIR = 'D://data/connecticut/results'
247
+ PDF_DIR = 'D://data/connecticut/results/pdfs'
248
+ stats_dir = 'D://data/connecticut/results/datasets'
249
+
250
+ # Set the destination for the PDFs.
251
+ data_dir = args.get('data_dir', DATA_DIR)
252
+ pdf_dir = args.get('pdf_dir', os.path.join(data_dir, 'pdfs'))
253
+
254
+ # Get the test results.
255
+ print('Getting Connecticut test results...')
256
+ results = get_results_ct()
257
+
258
+ # Download the PDFs.
259
+ print('Downloading PDFs...')
260
+ if not os.path.exists(pdf_dir): os.makedirs(pdf_dir)
261
+ download_pdfs_ct(results, pdf_dir)
262
+
263
+ # Save the results to Excel.
264
+ date = datetime.now().isoformat()[:10]
265
+ if not os.path.exists(stats_dir): os.makedirs(stats_dir)
266
+ results.to_excel(f'{stats_dir}/ct-lab-results-{date}.xlsx', index=False)
267
+ results.to_csv(f'{stats_dir}/ct-lab-results-latest.csv', index=False)
268
+ print('Connecticut lab results archived:', stats_dir)
269
+
270
+ # TODO: Integrate with `analyte_results_ct.py`.
271
+
272
+
273
+ # FIXME: Upload results to Firestore.
274
+
275
+
276
+ # FIXME: Upload PDFs to Google Cloud Storage.
277
+
278
+
279
+ # FIXME: Upload datafiles to Google Cloud Storage.
280
+
algorithms/get_results_fl_flowery.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Florida cannabis lab results | Flowery
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 5/18/2023
8
+ Updated: 4/28/2024
9
+ License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
10
+
11
+ Description:
12
+
13
+ Archive Florida cannabis lab result data for the Flowery.
14
+
15
+ Data Sources:
16
+
17
+ - [The Flowery](https://support.theflowery.co)
18
+
19
+ """
20
+ # Standard imports:
21
+ from datetime import datetime
22
+ import os
23
+ from time import sleep
24
+
25
+ # External imports:
26
+ from cannlytics.data.web import initialize_selenium
27
+ from cannlytics.utils.constants import DEFAULT_HEADERS
28
+ import pandas as pd
29
+ import requests
30
+
31
+ # Selenium imports.
32
+ from selenium.webdriver.common.by import By
33
+ from selenium.webdriver.support import expected_conditions as EC
34
+
35
+
36
+ def get_results_the_flowery(
37
+ data_dir: str,
38
+ slug = 'the-flowery',
39
+ producer_license_number = 'MMTC-2019-0020',
40
+ lists_url = 'https://support.theflowery.co/hc/en-us/sections/7240468576283-Drop-Information',
41
+ overwrite = False,
42
+ ):
43
+ """Get lab results published by The Flowery on the public web."""
44
+
45
+ # Initialize the web driver.
46
+ driver = initialize_selenium()
47
+
48
+ # Load the lists page to get each list of COAs.
49
+ coa_lists = []
50
+ driver.get(lists_url)
51
+ links = driver.find_elements(by=By.TAG_NAME, value='a')
52
+ for link in links:
53
+ if 'COAs' in link.text:
54
+ coa_lists.append(link.get_attribute('href'))
55
+
56
+ # Get COA URLs.
57
+ coa_urls = []
58
+ for coa_list in coa_lists:
59
+ driver = initialize_selenium()
60
+ driver.get(coa_list)
61
+ links = driver.find_elements(by=By.TAG_NAME, value='a')
62
+ for link in links:
63
+ href = link.get_attribute('href')
64
+ if href and href.endswith('.pdf'):
65
+ coa_urls.append(href)
66
+ driver.close()
67
+ driver.quit()
68
+
69
+ # Close the browser.
70
+ driver.close()
71
+ driver.quit()
72
+
73
+ # Create an output directory.
74
+ datasets_dir = os.path.join(data_dir, '.datasets')
75
+ if not os.path.exists(datasets_dir):
76
+ os.makedirs(datasets_dir)
77
+
78
+ # Save the COA URLs.
79
+ date = datetime.now().isoformat()[:19].replace(':', '-')
80
+ df = pd.DataFrame(coa_urls)
81
+ df.to_excel(f'{datasets_dir}/fl-lab-result-urls-{slug}-{date}.xlsx', index=False)
82
+ print('Saved %i lab result URLs for %s' % (len(df), slug))
83
+
84
+ # Create a directory for COA PDFs.
85
+ pdf_dir = os.path.join(datasets_dir, 'pdfs')
86
+ if not os.path.exists(pdf_dir):
87
+ os.makedirs(pdf_dir)
88
+
89
+ # Create a directory for each licensees COAs.
90
+ license_pdf_dir = os.path.join(pdf_dir, producer_license_number)
91
+ if not os.path.exists(license_pdf_dir):
92
+ os.makedirs(license_pdf_dir)
93
+
94
+ # Download the COA PDFs.
95
+ for coa_url in coa_urls:
96
+ sample_id = coa_url.split('/')[-1].split('.')[0]
97
+ batch_id = coa_url.split('/')[-2]
98
+ outfile = os.path.join(license_pdf_dir, f'{batch_id}-{sample_id}.pdf')
99
+ if os.path.exists(outfile) and not overwrite:
100
+ continue
101
+ sleep(0.3)
102
+ response = requests.get(coa_url, headers=DEFAULT_HEADERS)
103
+ with open(outfile, 'wb') as pdf:
104
+ pdf.write(response.content)
105
+ print('Downloaded: %s' % outfile)
106
+
107
+ # TODO: Save details about each COA, including The Flowery company data.
108
+
109
+ # Return the COA URLs.
110
+ return df
111
+
112
+
113
+ def get_product_results_the_flowery(
114
+ data_dir: str,
115
+ overwrite = False,
116
+ **kwargs,
117
+ ):
118
+ """Get product results from The Flowery website."""
119
+ # Initialize a web driver.
120
+ driver = initialize_selenium()
121
+
122
+ # Iterate over all of the product types.
123
+ observations = []
124
+ categories = ['Flower', 'Concentrates', 'Pre-Rolls', 'Vaporizers', 'Tinctures']
125
+ for category in categories:
126
+
127
+ # Load the category page.
128
+ url = f'https://theflowery.co/shop?categories[]={category}'
129
+ driver.get(url)
130
+ sleep(3.33)
131
+
132
+ # Get all of the product cards.
133
+ divs = driver.find_elements(by=By.CSS_SELECTOR, value='a.s-shop-product-card')
134
+ for div in divs:
135
+
136
+ # Extract product name.
137
+ product_name = div.find_element(by=By.CLASS_NAME, value='title').text
138
+
139
+ # Extract product image URL.
140
+ product_image = div.find_element(by=By.TAG_NAME, value='img').get_attribute('src')
141
+
142
+ # Extract product price.
143
+ product_price = float(div.find_element(by=By.CLASS_NAME, value='full-price').text.strip('$'))
144
+
145
+ # Extract strain type.
146
+ strain_type = div.find_element(by=By.CLASS_NAME, value='sort').text
147
+
148
+ # Extract product URL (assuming the URL is stored in the href attribute of a link)
149
+ product_url = div.get_attribute('href')
150
+
151
+ # Store the extracted data in a dictionary
152
+ obs = {
153
+ 'product_name': product_name,
154
+ 'image_url': product_image,
155
+ 'product_price': product_price,
156
+ 'strain_type': strain_type,
157
+ 'product_url': product_url
158
+ }
159
+ observations.append(obs)
160
+
161
+ # Get the COA URL for each product.
162
+ # TODO: Also get the image_url.
163
+ for i, obs in enumerate(observations):
164
+ coa_url = ''
165
+ driver.get(obs['product_url'])
166
+ sleep(3.33)
167
+ links = driver.find_elements(by=By.TAG_NAME, value='a')
168
+ for link in links:
169
+ if link.get_attribute('href') and '.pdf' in link.get_attribute('href'):
170
+ coa_url = link.get_attribute('href')
171
+ break
172
+ observations[i]['coa_url'] = coa_url
173
+ print('Found COA URL: %s' % coa_url)
174
+
175
+ # Close the driver.
176
+ driver.close()
177
+
178
+ # Download the COA PDFs.
179
+ license_pdf_dir = os.path.join(data_dir, '.datasets', 'pdfs', 'MMTC-2019-0020')
180
+ for obs in observations:
181
+ coa_url = obs['coa_url']
182
+
183
+ # Get the sample ID
184
+ sample_id = coa_url.split('/')[-1].split('.')[0]
185
+
186
+ # Format the file.
187
+ outfile = os.path.join(license_pdf_dir, f'{sample_id}.pdf')
188
+ if os.path.exists(outfile) and not overwrite:
189
+ print('Cached: %s' % outfile)
190
+ continue
191
+ sleep(0.3)
192
+
193
+ # Download the PDF.
194
+ # FIXME: This is failing every time.
195
+ # try:
196
+ response = requests.get(coa_url, headers=DEFAULT_HEADERS)
197
+ with open(outfile, 'wb') as pdf:
198
+ pdf.write(response.content)
199
+ print('Downloaded: %s' % outfile)
200
+ sleep(3.33)
201
+ # except:
202
+ # print('Failed to download: %s' % coa_url)
203
+
204
+ # Merge The Flowery data with the COA data.
205
+ the_flowery = {'business_dba_name': 'The Flowery'}
206
+ observations = [{**the_flowery, **x} for x in observations]
207
+
208
+ # Save the data.
209
+ date = datetime.now().isoformat()[:19].replace(':', '-')
210
+ data = pd.DataFrame(observations)
211
+ data.to_excel(f'{data_dir}/the-flowery-lab-result-urls-{date}.xlsx', index=False)
212
+ print('Saved %i lab result URLs for The Flowery.' % len(data))
213
+ return data
214
+
215
+
216
+ # === Test ===
217
+ # [✓] Tested: 2024-04-14 by Keegan Skeate <keegan@cannlytics>
218
+ if __name__ == '__main__':
219
+
220
+ # Specify where your data lives.
221
+ DATA_DIR = 'D://data/florida/results'
222
+
223
+ # [✓] TEST: Get The Flowery COAs.
224
+ try:
225
+ the_flowery_products = get_product_results_the_flowery(DATA_DIR)
226
+ except Exception as e:
227
+ print('ERROR:', e)
228
+ try:
229
+ the_flowery_coas = get_results_the_flowery(DATA_DIR)
230
+ except Exception as e:
231
+ print('ERROR:', e)
algorithms/get_results_fl_jungleboys.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Florida cannabis lab results | JungleBoys
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 5/18/2023
8
+ Updated: 4/21/2024
9
+ License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
10
+
11
+ Description:
12
+
13
+ Archive Florida cannabis lab result data for JungleBoys Florida.
14
+
15
+ Data Sources:
16
+
17
+ - [Jungle Boys Florida](https://jungleboysflorida.com)
18
+
19
+ """
20
+ # Standard imports:
21
+ from datetime import datetime
22
+ import os
23
+ from time import sleep
24
+
25
+ # External imports:
26
+ from cannlytics.data.coas.coas import CoADoc
27
+ from cannlytics.data.web import initialize_selenium
28
+ from cannlytics.utils.constants import DEFAULT_HEADERS
29
+ import pandas as pd
30
+
31
+ # Selenium imports.
32
+ from selenium.webdriver.common.by import By
33
+ from selenium.webdriver.support import expected_conditions as EC
34
+ from selenium.webdriver.support.ui import WebDriverWait
35
+
36
+
37
+ class JungleBoys:
38
+ """Download lab results from Jungle Boys' website."""
39
+
40
+ def __init__(self, data_dir, headless=False, download_dir=None):
41
+ """Initialize the driver and directories."""
42
+ self.data_dir = data_dir
43
+ self.driver = initialize_selenium(
44
+ headless=headless,
45
+ download_dir=download_dir,
46
+ )
47
+ # self.datasets_dir = os.path.join(data_dir, '.datasets')
48
+ # self.pdf_dir = os.path.join(self.datasets_dir, 'pdfs')
49
+ # self.license_pdf_dir = os.path.join(self.pdf_dir, 'terplife')
50
+ # if not os.path.exists(self.datasets_dir): os.makedirs(self.datasets_dir)
51
+ # if not os.path.exists(self.pdf_dir): os.makedirs(self.pdf_dir)
52
+ # if not os.path.exists(self.license_pdf_dir): os.makedirs(self.license_pdf_dir)
53
+
54
+ def get_results_jungle_boys(
55
+ self,
56
+ products_url='https://jungleboysflorida.com/products/',
57
+ pause=3.33,
58
+ initial_pause=3.33,
59
+ ):
60
+ """Get lab results published by Jungle Boys on the public web."""
61
+
62
+ # Get products from each store.
63
+ all_products = []
64
+ self.driver.get(products_url)
65
+ sleep(3.33)
66
+ self.verify_age_jungle_boys(self.driver)
67
+ WebDriverWait(self.driver, 10).until(
68
+ EC.presence_of_element_located((By.CSS_SELECTOR, '.dovetail-ecommerce-age-gate-retailer'))
69
+ )
70
+ select_buttons = self.driver.find_elements(By.CSS_SELECTOR, '.dovetail-ecommerce-age-gate-retailer .chakra-button')
71
+ for button in select_buttons:
72
+ try:
73
+ # Scroll into view and click the button
74
+ self.driver.execute_script('arguments[0].scrollIntoView(true);', button)
75
+ sleep(1) # Small delay to ensure visibility
76
+ button.click()
77
+ sleep(5)
78
+
79
+ # Get all of the products for the store.
80
+ products = self.get_product_details_jungle_boys(self.driver)
81
+ all_products.extend(products)
82
+
83
+ # FIXME: May need to query COAs at this stage.
84
+ pdf_dir = r'D:\data\florida\lab_results\jungleboys\pdfs'
85
+ # product_names = [x['name'] + ' ' + str(x['category']) for x in products]
86
+ product_names = list(set([x['name'] for x in products]))
87
+ self.search_and_download_jungle_boys_coas(
88
+ self.driver,
89
+ product_names,
90
+ pdf_dir,
91
+ pause=pause,
92
+ initial_pause=initial_pause,
93
+ )
94
+
95
+ # Assuming clicking navigates away or requires you to go back
96
+ # driver.back()
97
+ self.driver.get(products_url)
98
+ # Re-find the buttons to avoid StaleElementReferenceException
99
+ WebDriverWait(self.driver, 10).until(
100
+ EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.dovetail-ecommerce-age-gate-retailer .chakra-button'))
101
+ )
102
+ select_buttons = self.driver.find_elements(By.CSS_SELECTOR, '.dovetail-ecommerce-age-gate-retailer .chakra-button')
103
+ except:
104
+ pass
105
+ # select_buttons = driver.find_elements(By.CSS_SELECTOR, '.dovetail-ecommerce-age-gate-retailer .chakra-button')
106
+
107
+ # Download each COA (if it doesn't already exist).
108
+ # pdf_dir = r'D:\data\florida\lab_results\jungleboys\pdfs'
109
+ # product_names = [x['name'] + ' ' + str(x['category']) for x in all_products]
110
+ # self.search_and_download_jungle_boys_coas(self.driver, product_names, pdf_dir)
111
+
112
+ # Close the driver.
113
+ self.driver.quit()
114
+
115
+ # Return the products.
116
+ return all_products
117
+
118
+ def verify_age_jungle_boys(self, driver, pause=1):
119
+ """Verify age for Jungle Boys' website."""
120
+ checkbox_js = "document.querySelector('input[type=checkbox].chakra-checkbox__input').click();"
121
+ driver.execute_script(checkbox_js)
122
+ sleep(pause)
123
+ continue_button = WebDriverWait(driver, 10).until(
124
+ EC.element_to_be_clickable((By.CSS_SELECTOR, 'button.chakra-button:not([disabled])'))
125
+ )
126
+ continue_button.click()
127
+
128
+ def get_product_details_jungle_boys(self, driver):
129
+ products = []
130
+ product_elements = driver.find_elements(By.CSS_SELECTOR, '.wp-block-dovetail-ecommerce-product-list-list-view')
131
+ accessories = ['Accessories', 'Grinders', 'Lighters']
132
+ for el in product_elements:
133
+ driver.execute_script('arguments[0].scrollIntoView(true);', el)
134
+ sleep(0.1)
135
+
136
+ # Skip accessories.
137
+ name = el.find_element(By.CSS_SELECTOR, 'div.dovetail-ecommerce-advanced-text').text
138
+ text = el.text
139
+ if any(x in text for x in accessories) and text not in name:
140
+ print('Skipping:', name)
141
+ continue
142
+
143
+ # Get the image URL.
144
+ image_url = el.find_element(By.TAG_NAME, 'img').get_attribute('src')
145
+
146
+ # Get the category.
147
+ try:
148
+ category = el.find_element(By.CSS_SELECTOR, 'div.dovetail-ecommerce-product-category').text
149
+ except:
150
+ try:
151
+ category = el.find_element(By.CSS_SELECTOR, 'div.dovetail-ecommerce-product-sub-category').text
152
+ except:
153
+ category = None
154
+
155
+ # Get the quantity.
156
+ quantity = None
157
+ buttons = el.find_elements(By.TAG_NAME, 'button')
158
+ for button in buttons:
159
+ button_text = button.text
160
+ if button_text and button_text != 'Add to Bag':
161
+ quantity = button_text
162
+ break
163
+
164
+ # Get the strain type.
165
+ strain_type = None
166
+ try:
167
+ strain_type = el.find_element(By.CSS_SELECTOR, 'div.dovetail-ecommerce-product-strain').text
168
+ except:
169
+ pass
170
+
171
+ # Get the price and total THC.
172
+ price, total_thc = None, None
173
+ lines = el.text.split('\n')
174
+ for line in lines:
175
+ if 'THC' in line:
176
+ total_thc = line.replace('THC ', '').replace('%', '')
177
+ try:
178
+ total_thc = float(total_thc)
179
+ except:
180
+ pass
181
+ elif '$' in line:
182
+ price = line.replace('$ ', '')
183
+ try:
184
+ price = float(price)
185
+ except:
186
+ pass
187
+
188
+ # Record the product details.
189
+ products.append({
190
+ 'name': name,
191
+ 'image_url': image_url,
192
+ 'price': price,
193
+ 'category': category,
194
+ 'strain_type': strain_type,
195
+ 'quantity': quantity,
196
+ })
197
+
198
+ # Return the products.
199
+ return products
200
+
201
+ def search_and_download_jungle_boys_coas(
202
+ self,
203
+ driver,
204
+ product_names,
205
+ pdf_dir,
206
+ pause=3.33,
207
+ initial_pause=3.33,
208
+ coas_url='https://jungleboysflorida.com/coa/',
209
+ ):
210
+ """Search for and download COAs from Jungle"""
211
+ driver.get(coas_url)
212
+ sleep(initial_pause)
213
+ for product_name in product_names:
214
+ print('Searching for:', product_name)
215
+
216
+ # JavaScript to set the value of the search input.
217
+ js_query_selector = "document.querySelector('div.wp-block-create-block-coa__search input[placeholder=\"Search\"]')"
218
+ search_query = product_name.replace('-', '').replace(' ', ' ')
219
+ js_set_value = f"{js_query_selector}.value = '{search_query}';"
220
+ driver.execute_script(js_set_value)
221
+
222
+ # Perform the search.
223
+ search_div = driver.find_element(By.CSS_SELECTOR, "div.wp-block-create-block-coa__search")
224
+ search_box = search_div.find_element(By.TAG_NAME, 'input')
225
+ search_query = product_name.replace('-', '').replace(' ', ' ')
226
+ search_box.clear()
227
+ search_box.send_keys(search_query)
228
+ # Optionally, simulate a keypress to trigger any attached event listeners
229
+ # This step mimics pressing the Enter key to submit the search
230
+ # search_box.send_keys(Keys.RETURN)
231
+ sleep(pause)
232
+
233
+ # Download the PDFs.
234
+ self.download_pdf_links(driver, pdf_dir)
235
+
236
+ def download_pdf_links(self, driver, pdf_dir, pause=3.33, overwrite=False):
237
+ """Download all PDF links in search results."""
238
+ pdf_links = driver.find_elements(By.CSS_SELECTOR, ".wp-block-create-block-coa__result a[target='_blank']")
239
+ pdf_urls = [x.get_attribute('href') for x in pdf_links]
240
+ print('Found %i PDFs total.' % len(pdf_urls))
241
+ all_pdf_links = driver.find_elements(By.CSS_SELECTOR, ".wp-block-create-block-coa__result a[target='_blank']")
242
+ pdf_urls = []
243
+ for link in all_pdf_links:
244
+ # Check if the parent <li> of this link does not have the class "hidden"
245
+ parent_li = link.find_element(By.XPATH, "./ancestor::li[1]") # Get the immediate parent <li>
246
+ if "hidden" not in parent_li.get_attribute("class"):
247
+ pdf_urls.append(link.get_attribute('href'))
248
+ print('Found %i queried PDFs.' % len(pdf_urls))
249
+ for pdf_url in pdf_urls:
250
+ pdf_name = pdf_url.split('/')[-1]
251
+ pdf_path = os.path.join(pdf_dir, pdf_name)
252
+ if not os.path.exists(pdf_path) and not overwrite:
253
+ print('Downloading:', pdf_path)
254
+ driver.get(pdf_url)
255
+ print('Downloaded:', pdf_path)
256
+ sleep(pause)
257
+ else:
258
+ print('Cached:', pdf_path)
259
+
260
+
261
+ def get_results_fl_jungle_boys(
262
+ data_dir: str,
263
+ download_dir: str,
264
+ dataset_dir: str,
265
+ ):
266
+ """Get lab results for the Jungle Boys in Florida."""
267
+
268
+ # Initialize a client to query Jungle Boys COAs.
269
+ downloader = JungleBoys(
270
+ data_dir=data_dir,
271
+ download_dir=download_dir,
272
+ headless=False,
273
+ )
274
+
275
+ # Download the COAs.
276
+ downloader.get_results_jungle_boys()
277
+
278
+ # Parse the Jungle Boys COAs.
279
+ parser = CoADoc()
280
+ all_pdfs = [x for x in os.listdir(download_dir) if x.endswith('.pdf')]
281
+ coa_data = []
282
+ print('Parsing %i COAs...' % len(all_pdfs))
283
+ for pdf in all_pdfs:
284
+ try:
285
+ doc = os.path.join(download_dir, pdf)
286
+ data = parser.parse(doc)
287
+ if isinstance(data, dict):
288
+ coa_data.append(data)
289
+ elif isinstance(data, list):
290
+ coa_data.extend(data)
291
+ print('Parsed:', doc)
292
+ except:
293
+ print('Error parsing:', doc)
294
+
295
+ # Save the Jungle Boys COA data.
296
+ namespace = 'fl-results-jungle-boys'
297
+ all_data = pd.DataFrame(coa_data)
298
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
299
+ outfile = os.path.join(dataset_dir, f'{namespace}-{timestamp}.xlsx')
300
+ parser.save(coa_data, outfile)
301
+ return all_data
302
+
303
+
304
+ # === Test ===
305
+ # [✓] Tested: 2024-04-14 by Keegan Skeate <keegan@cannlytics>
306
+ if __name__ == '__main__':
307
+
308
+ # Get Jungle Boys Florida results.
309
+ # FIXME: This is failing.
310
+ get_results_fl_jungle_boys(
311
+ data_dir='D://data/florida/results',
312
+ download_dir='D://data/florida/results/pdfs/jungleboys',
313
+ dataset_dir='D://data/florida/results/datasets/jungleboys',
314
+ )
algorithms/get_results_fl_kaycha.py ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Florida cannabis lab results | Kaycha Labs
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 5/18/2023
8
+ Updated: 5/22/2024
9
+ License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
10
+
11
+ Description:
12
+
13
+ Archive Florida cannabis lab result data for Kaycha Labs.
14
+
15
+ Data Sources:
16
+
17
+ - [Florida Labs](https://knowthefactsmmj.com/cmtl/)
18
+ - [Florida Licenses](https://knowthefactsmmj.com/mmtc/)
19
+ - [Kaycha Labs](https://yourcoa.com)
20
+
21
+ """
22
+ # Standard imports:
23
+ from datetime import datetime
24
+ import os
25
+ import tempfile
26
+ from time import sleep
27
+ from typing import Optional
28
+
29
+ # External imports:
30
+ from bs4 import BeautifulSoup
31
+ from cannlytics.data.cache import Bogart
32
+ from cannlytics.data.coas.coas import CoADoc
33
+ from cannlytics.data.coas.algorithms.kaycha import parse_kaycha_coa
34
+ from cannlytics.utils.utils import (
35
+ download_file_with_selenium,
36
+ remove_duplicate_files,
37
+ )
38
+ from cannlytics.utils.constants import DEFAULT_HEADERS
39
+ import pandas as pd
40
+ import requests
41
+
42
+ # Get a list of Florida companies.
43
+ # Note: It may be best to retrieve this list dynamically.
44
+ # TODO: Try to find COAs for the remaining companies. E.g.
45
+ # - Plant 13 Florida, Inc.
46
+ # - House of Platinum Cannabis
47
+ # - Cookies Florida, Inc.
48
+ FLORIDA_LICENSES = {
49
+ 'MMTC-2015-0002': {
50
+ 'business_dba_name': 'Ayr Cannabis Dispensary',
51
+ 'business_legal_name': 'Liberty Health Sciences, FL',
52
+ 'slug': 'Liberty+Health+Sciences%2C+FL',
53
+ },
54
+ 'MMTC-2017-0011': {
55
+ 'business_dba_name': 'Cannabist',
56
+ 'slug': 'Cannabist',
57
+ },
58
+ 'MMTC-2019-0018': {
59
+ 'business_dba_name': 'Cookies Florida, Inc.',
60
+ 'slug': '',
61
+ },
62
+ 'MMTC-2015-0001': {
63
+ 'business_dba_name': 'Curaleaf',
64
+ 'slug': 'CURALEAF+FLORIDA+LLC',
65
+ },
66
+ 'MMTC-2015-0003': {
67
+ 'business_dba_name': 'Fluent ',
68
+ 'slug': 'Fluent',
69
+ },
70
+ 'MMTC-2019-0019': {
71
+ 'business_dba_name': 'Gold Leaf',
72
+ 'slug': 'Gold+Leaf',
73
+ },
74
+ 'MMTC-2019-0021': {
75
+ 'business_dba_name': 'Green Dragon',
76
+ 'slug': 'Green+Dragon',
77
+ },
78
+ 'MMTC-2016-0007': {
79
+ 'business_dba_name': 'GrowHealthy',
80
+ 'slug': 'GrowHealthy',
81
+ },
82
+ 'MMTC-2017-0013': {
83
+ 'business_dba_name': 'GTI (Rise Dispensaries)',
84
+ 'slug': 'GTI',
85
+ },
86
+ 'MMTC-2018-0014': {
87
+ 'business_dba_name': 'House of Platinum Cannabis',
88
+ 'slug': '',
89
+ },
90
+ 'MMTC-2019-0016': {
91
+ 'business_dba_name': 'Insa - Cannabis for Real Life',
92
+ 'slug': 'Insa',
93
+ },
94
+ 'MMTC-2019-0015': {
95
+ 'business_dba_name': 'Jungle Boys',
96
+ 'slug': 'Jungle+Boys',
97
+ },
98
+ 'MMTC-2017-0010': {
99
+ 'business_dba_name': 'MüV',
100
+ 'slug': 'Altmed+Florida',
101
+ },
102
+ 'MMTC-2016-0006': {
103
+ 'business_dba_name': 'Planet 13 Florida, Inc.',
104
+ 'slug': '',
105
+ },
106
+ 'MMTC-2019-0022': {
107
+ 'business_dba_name': 'Revolution Florida',
108
+ 'slug': 'Revolution',
109
+ },
110
+ 'MMTC-2019-0017': {
111
+ 'business_dba_name': 'Sanctuary Cannabis',
112
+ 'slug': 'Sanctuary',
113
+ },
114
+ 'MMTC-2017-0012': {
115
+ 'business_dba_name': 'Sunburn',
116
+ 'slug': '',
117
+ },
118
+ 'MMTC-2017-0008': {
119
+ 'business_dba_name': 'Sunnyside*',
120
+ 'slug': 'Sunnyside',
121
+ },
122
+ 'MMTC-2015-0004': {
123
+ 'business_dba_name': 'Surterra Wellness',
124
+ 'slug': 'Surterra+Wellness',
125
+ },
126
+ 'MMTC-2019-0020': {
127
+ 'business_dba_name': 'The Flowery',
128
+ 'slug': 'The+Flowery',
129
+ },
130
+ 'MMTC-2015-0005': {
131
+ 'business_dba_name': 'Trulieve',
132
+ 'slug': 'Trulieve',
133
+ },
134
+ 'MMTC-2017-0009': {
135
+ 'business_dba_name': 'VidaCann',
136
+ 'slug': 'VidaCann',
137
+ },
138
+ }
139
+
140
+ # Define the minimum file size for a PDF.
141
+ MIN_FILE_SIZE = 21 * 1024
142
+
143
+
144
+ def download_coas_kaycha(
145
+ data_dir: str,
146
+ slug: str,
147
+ pdf_dir: Optional[str] = None,
148
+ dba: Optional[str] = None,
149
+ producer_license_number: Optional[str] = None,
150
+ overwrite: Optional[bool] = False,
151
+ base: Optional[str] = 'https://yourcoa.com',
152
+ columns: Optional[list] = None,
153
+ pause: Optional[float] = 0.33,
154
+ cache: Optional[Bogart] = None,
155
+ ):
156
+ """Download Kaycha Labs COAs uploaded to the public web."""
157
+
158
+ # Initialize COA URL collection.
159
+ if columns is None:
160
+ columns = ['lab_id', 'batch_number', 'product_name']
161
+
162
+ # Create an output directory.
163
+ datasets_dir = os.path.join(data_dir, 'datasets')
164
+ if not os.path.exists(datasets_dir):
165
+ os.makedirs(datasets_dir)
166
+
167
+ # Initialize the cache.
168
+ if cache is None:
169
+ cache = Bogart()
170
+
171
+ # Request each page until the maximum is reached.
172
+ page = 0
173
+ observations = []
174
+ iterate = True
175
+ while iterate:
176
+
177
+ # Get the first/next page of COAs.
178
+ page += 1
179
+ url = f'{base}/company/company?t={slug}&page={page}'
180
+ response = requests.get(url, headers=DEFAULT_HEADERS)
181
+ if response.status_code != 200:
182
+ print(f'Request failed with status {response.status_code}')
183
+
184
+ # Get the download URLs.
185
+ soup = BeautifulSoup(response.content, 'html.parser')
186
+ links = soup.find_all('a')
187
+ links = [x['href'] for x in links if 'coa-download' in x['href']]
188
+ links = list(set(links))
189
+ links = [base + x for x in links]
190
+
191
+ # Get the details from the page.
192
+ divs = soup.find_all(class_='pdf_box')
193
+ print('Found %i samples on page %i.' % (len(divs), page))
194
+ for n, div in enumerate(divs):
195
+ observation = {}
196
+ spans = div.find_all('span')[:len(columns)]
197
+ values = [x.text for x in spans]
198
+ for k, value in enumerate(values):
199
+ observation[columns[k]] = value
200
+ try:
201
+ observation['download_url'] = links[n]
202
+ except:
203
+ continue
204
+ if dba is not None:
205
+ observation['business_dba_name'] = dba
206
+ if producer_license_number is not None:
207
+ observation['producer_license_number'] = producer_license_number
208
+ observations.append(observation)
209
+
210
+ # See if the next button is disabled to know when to stop iterating.
211
+ next_element = soup.find(class_='next')
212
+ if not next_element:
213
+ iterate = False
214
+ elif next_element and 'disabled' in next_element.get('class', []):
215
+ iterate = False
216
+
217
+ # Otherwise pause to respect the server.
218
+ sleep(pause)
219
+
220
+ # Save the observed lab result URLs.
221
+ date = datetime.now().isoformat()[:19].replace(':', '-')
222
+ df = pd.DataFrame(observations)
223
+ df.to_excel(f'{datasets_dir}/fl-lab-result-urls-{slug}-{date}.xlsx', index=False)
224
+ print('Saved %i lab result URLs for %s' % (len(df), slug))
225
+
226
+ # Create a directory for COA PDFs.
227
+ if pdf_dir is None:
228
+ pdf_dir = os.path.join(data_dir, 'pdfs')
229
+ if not os.path.exists(pdf_dir):
230
+ os.makedirs(pdf_dir)
231
+
232
+ # Create a directory for each licensees COAs.
233
+ license_pdf_dir = os.path.join(pdf_dir, producer_license_number)
234
+ if not os.path.exists(license_pdf_dir):
235
+ os.makedirs(license_pdf_dir)
236
+
237
+ # Download the PDFs.
238
+ # Checks if the file size is small and retires with Selenium if needed.
239
+ print('License directory:', license_pdf_dir)
240
+ for _, row in df.iterrows():
241
+ sleep(pause)
242
+ download_url = row['download_url']
243
+ if not download_url.startswith('http'):
244
+ download_url = base + download_url
245
+ sample_id = download_url.split('/')[-1].split('?')[0].split('&')[0]
246
+ outfile = os.path.join(license_pdf_dir, f'{sample_id}.pdf')
247
+ url_hash = cache.hash_url(download_url)
248
+ if (os.path.exists(outfile) or cache.get(url_hash)) and not overwrite:
249
+ print('Cached:', download_url)
250
+ # DEV: Ween off of os.path.exists and then remove the following line.
251
+ cache.set(url_hash, {'type': 'download', 'url': download_url, 'file': outfile})
252
+ continue
253
+ cache.set(url_hash, {'type': 'download', 'url': download_url, 'file': outfile})
254
+ try:
255
+ coa_url = f'{base}/coa/download?sample={sample_id}'
256
+ response = requests.get(coa_url, headers=DEFAULT_HEADERS)
257
+ if response.status_code == 200:
258
+ if len(response.content) < MIN_FILE_SIZE:
259
+ print('File size is small, retrying with Selenium:', download_url)
260
+ response = requests.get(download_url, allow_redirects=True)
261
+ if response.status_code == 200:
262
+ redirected_url = response.url
263
+ download_file_with_selenium(
264
+ redirected_url,
265
+ download_dir=license_pdf_dir,
266
+ )
267
+ print('Downloaded with Selenium:', redirected_url)
268
+ cache.set(url_hash, {'type': 'download', 'url': download_url, 'redirect_url': redirected_url})
269
+ else:
270
+ with open(outfile, 'wb') as pdf:
271
+ pdf.write(response.content)
272
+ print('Downloaded:', outfile)
273
+ cache.set(url_hash, {'type': 'download', 'url': download_url, 'coa_url': coa_url, 'file': outfile})
274
+ else:
275
+ print('Failed to download, retrying with Selenium:', coa_url)
276
+ response = requests.get(download_url, allow_redirects=True)
277
+ if response.status_code == 200:
278
+ redirected_url = response.url
279
+ download_file_with_selenium(
280
+ redirected_url,
281
+ download_dir=license_pdf_dir,
282
+ )
283
+ print('Downloaded with Selenium:', redirected_url)
284
+ cache.set(url_hash, {'type': 'download', 'url': download_url, 'redirect_url': redirected_url})
285
+ except:
286
+ coa_url = f'{base}/coa/coa-view?sample={sample_id}'
287
+ response = requests.get(coa_url, allow_redirects=True)
288
+ if response.status_code == 200:
289
+ redirected_url = response.url
290
+ download_file_with_selenium(
291
+ redirected_url,
292
+ download_dir=license_pdf_dir,
293
+ )
294
+ print('Downloaded with Selenium:', redirected_url)
295
+ cache.set(url_hash, {'type': 'download', 'url': download_url, 'coa_url': coa_url, 'redirect_url': redirected_url})
296
+ else:
297
+ print('Final fail to download with Selenium:', coa_url)
298
+ # Optional: Keep track of failed to download URLs.
299
+ # Optional: Try another way to download.
300
+
301
+ # Return the COA URLs.
302
+ return df
303
+
304
+
305
+ def get_results_kaycha(
306
+ data_dir: str,
307
+ licenses=None,
308
+ pause: Optional[float] = 0.33,
309
+ verbose: Optional[bool] = False,
310
+ cache_path: Optional[str] = None,
311
+ **kwargs
312
+ ):
313
+ """Get lab results published by Kaycha Labs on the public web."""
314
+ # Initialize the cache.
315
+ cache = Bogart(cache_path)
316
+
317
+ # Download COAs for each licensee.
318
+ coa_urls = []
319
+ if licenses is None:
320
+ licenses = FLORIDA_LICENSES
321
+ # TODO: Make this an argument.
322
+ # items = reversed(licenses.items())
323
+ items = licenses.items()
324
+ for producer_license_number, licensee in items:
325
+ print('Getting COAs for %s' % licensee['business_dba_name'])
326
+ urls = download_coas_kaycha(
327
+ data_dir,
328
+ slug=licensee['slug'],
329
+ dba=licensee['business_dba_name'],
330
+ producer_license_number=producer_license_number,
331
+ pause=pause,
332
+ cache=cache,
333
+ )
334
+ coa_urls.append(urls)
335
+
336
+ # Remove duplicate COAs.
337
+ try:
338
+ datasets_dir = os.path.join(data_dir, 'datasets')
339
+ pdf_dir = os.path.join(datasets_dir, 'pdfs')
340
+ license_pdf_dir = os.path.join(pdf_dir, producer_license_number)
341
+ remove_duplicate_files(license_pdf_dir, verbose=verbose)
342
+ except:
343
+ print('Failed to remove duplicate files.')
344
+
345
+ # Save and return all of the COA URLs.
346
+ date = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
347
+ data = pd.concat(coa_urls)
348
+ datasets_dir = os.path.join(data_dir, 'datasets')
349
+ data.to_excel(f'{datasets_dir}/fl-lab-result-urls-{date}.xlsx', index=False)
350
+ print('Saved %i lab result URLs for Kaycha Labs.' % len(data))
351
+ return data
352
+
353
+
354
+ def parse_results_kaycha(
355
+ data_dir: str,
356
+ pdf_dir: str,
357
+ temp_path: Optional[str] = None,
358
+ reverse: Optional[bool] = True,
359
+ sort: Optional[bool] = False,
360
+ completed: Optional[list] = None,
361
+ cache_path: Optional[str] = None,
362
+ ):
363
+ """Parse lab results from Kaycha Labs COAs."""
364
+ parser = CoADoc()
365
+ cache = Bogart(cache_path)
366
+ if temp_path is None: temp_path = tempfile.mkdtemp()
367
+ date = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
368
+ folders = os.listdir(pdf_dir)
369
+ folders = [x for x in folders if x.startswith('MMTC')]
370
+ if sort: folders = sorted(folders)
371
+ if reverse: folders = reversed(folders)
372
+ if completed is None: completed = []
373
+ all_results = []
374
+ for folder in folders:
375
+ if folder in completed:
376
+ continue
377
+
378
+ # Identify all of the PDFs for a licensee.
379
+ outfile = os.path.join(data_dir, 'datasets', f'fl-results-{folder}-{date}.xlsx')
380
+ license_pdf_dir = os.path.join(pdf_dir, folder)
381
+ pdf_files = os.listdir(license_pdf_dir)
382
+ if reverse: pdf_files = reversed(pdf_files)
383
+
384
+ # Parse the COA PDFs for each licensee.
385
+ print('Parsing %i COAs:' % len(pdf_files), folder)
386
+ all_data = []
387
+ for pdf_file in pdf_files:
388
+ if not pdf_file.endswith('.pdf'):
389
+ continue
390
+
391
+ # Use cached data if available.
392
+ pdf_file_path = os.path.join(license_pdf_dir, pdf_file)
393
+ pdf_hash = cache.hash_file(pdf_file_path)
394
+ if cache.get(pdf_hash):
395
+ print('Cached parse:', pdf_file_path)
396
+ all_data.append(cache.get(pdf_hash))
397
+ continue
398
+
399
+ # Parse the PDF.
400
+ try:
401
+ doc = os.path.join(license_pdf_dir, pdf_file)
402
+ coa_data = parse_kaycha_coa(
403
+ parser,
404
+ doc,
405
+ verbose=True,
406
+ temp_path=temp_path,
407
+ )
408
+ if coa_data.get('producer_license_number') is None:
409
+ coa_data['producer_license_number'] = folder
410
+ all_data.append(coa_data)
411
+ print('Parsed:', doc)
412
+ except:
413
+ print('Error:', doc)
414
+ continue
415
+
416
+ # Cache the data.
417
+ cache.set(pdf_hash, coa_data)
418
+
419
+ # Save the data for each licensee.
420
+ all_results.extend(all_data)
421
+ try:
422
+ parser.save(all_data, outfile)
423
+ print('Saved COA data:', outfile)
424
+ except:
425
+ print('Failed to save COA data.')
426
+
427
+ # Return all of the parsed data.
428
+ return all_results
429
+
430
+
431
+ # === Test ===
432
+ # [✓] Tested: 2024-05-22 by Keegan Skeate <keegan@cannlytics>
433
+ if __name__ == '__main__':
434
+
435
+ # [✓] TEST: Get Kaycha COAs.
436
+ kaycha_coas = get_results_kaycha(
437
+ data_dir='D://data/florida/results',
438
+ pause=7.77,
439
+ verbose=True,
440
+ cache_path='D://data/.cache/results-kaycha.jsonl',
441
+ )
442
+
443
+ # [✓] TEST: Parse Kaycha COAs.
444
+ # Note: This is a super, super long process. Uncomment completed
445
+ # license numbers to parse COA PDFs for all other licenses.
446
+ # parse_results_kaycha(
447
+ # data_dir='D://data/florida/results',
448
+ # pdf_dir='D://data/florida/results/pdfs',
449
+ # cache_path='D://data/.cache/results-fl-kaycha.jsonl',
450
+ # reverse=False,
451
+ # sort=True,
452
+ # completed=[
453
+ # # 'MMTC-2015-0001', # Longest
454
+ # # "MMTC-2015-0002",
455
+ # # "MMTC-2015-0004",
456
+ # "MMTC-2015-0005",
457
+ # "MMTC-2016-0006",
458
+ # "MMTC-2015-0003",
459
+ # 'MMTC-2017-0009',
460
+ # 'MMTC-2016-0007',
461
+ # 'MMTC-2017-0008',
462
+ # 'MMTC-2017-0009',
463
+ # 'MMTC-2017-0010',
464
+ # 'MMTC-2017-0011',
465
+ # 'MMTC-2017-0012',
466
+ # 'MMTC-2017-0013',
467
+ # 'MMTC-2018-0014',
468
+ # 'MMTC-2019-0015',
469
+ # 'MMTC-2019-0016',
470
+ # 'MMTC-2019-0017',
471
+ # 'MMTC-2019-0018',
472
+ # 'MMTC-2019-0019',
473
+ # 'MMTC-2019-0020',
474
+ # 'MMTC-2019-0021',
475
+ # 'MMTC-2019-0022',
476
+ # ]
477
+ # )
algorithms/get_results_fl_medical.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Reddit Results | Cannlytics
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 12/1/2023
7
+ Updated: 5/21/2024
8
+ License: MIT License <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
9
+
10
+ Description:
11
+
12
+ This tool collects cannabis product reviews and associated product
13
+ images to perform research.
14
+
15
+ Product data from the product label images, as well as natural
16
+ language data from the review, such as sentiment rating, can be used
17
+ to analyze how product data may affect how the product is reviewed.
18
+
19
+ """
20
+ # Standard imports:
21
+ from datetime import datetime
22
+ import json
23
+ import os
24
+ import shutil
25
+ from time import sleep
26
+
27
+ # External imports:
28
+ from bs4 import BeautifulSoup
29
+ from cannlytics.data.cache import Bogart
30
+ from cannlytics.data.coas import CoADoc
31
+ from cannlytics.data.web import initialize_selenium
32
+ from cannlytics.utils.constants import DEFAULT_HEADERS
33
+ from cannlytics.utils.utils import (
34
+ download_file_with_selenium,
35
+ remove_duplicate_files,
36
+ )
37
+ from dotenv import dotenv_values
38
+ import logging
39
+ import pandas as pd
40
+ import praw
41
+ import requests
42
+ import tempfile
43
+
44
+
45
+ #-----------------------------------------------------------------------
46
+ # Setup.
47
+ #-----------------------------------------------------------------------
48
+
49
+ # Create a directory to store the downloaded images.
50
+ images_directory = 'D://data/reddit/FLMedicalTrees/images'
51
+ os.makedirs(images_directory, exist_ok=True)
52
+
53
+ # Set up logging
54
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
55
+
56
+
57
+ #-----------------------------------------------------------------------
58
+ # Get Reddit posts with Selenium.
59
+ #-----------------------------------------------------------------------
60
+
61
+ # Get the Subreddit page.
62
+ # Note: This required being logged-in in the browser.
63
+ # Note: This step is currently done manually with repeated reading
64
+ # of the `page_source` to append posts to the `data`.
65
+ driver = initialize_selenium(headless=False)
66
+ query = 'COA'
67
+ queries = [
68
+ 'COA',
69
+ 'COA attached',
70
+ 'COA in', # e.g. in the comments, included, etc.
71
+ 'Certificate',
72
+ 'Certificate of Analysis',
73
+ 'lab results',
74
+ 'test results',
75
+ 'results',
76
+ 'effect',
77
+ 'aroma',
78
+ 'taste',
79
+ 'smell',
80
+ 'flavor',
81
+ ]
82
+ sort_by = 'new'
83
+ subreddit = 'FLMedicalTrees'
84
+ driver.get(f"https://www.reddit.com/r/{subreddit}/search/?q={query}&sort={sort_by}")
85
+ sleep(5)
86
+
87
+ # # Scroll to load posts (manually or automatically).
88
+ # for _ in range(10):
89
+ # driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
90
+ # sleep(2)
91
+
92
+ # Collect post details.
93
+ data = []
94
+ recorded_posts = []
95
+
96
+ # Manual iteration of queries here.
97
+ page_source = driver.page_source
98
+ soup = BeautifulSoup(page_source, 'html.parser')
99
+ posts = soup.find_all('faceplate-tracker', {'data-testid': 'search-post'})
100
+ for post in posts:
101
+ context = post.get('data-faceplate-tracking-context')
102
+ context = json.loads(context)
103
+ post_context = context['post']
104
+ post_id = post_context['id']
105
+ if post_id in recorded_posts:
106
+ continue
107
+ recorded_posts.append(post_id)
108
+ data.append({
109
+ 'title': post_context['title'],
110
+ 'url': 'https://www.reddit.com' + post_context['url'],
111
+ 'created_timestamp': post_context['created_timestamp'],
112
+ 'author_id': post_context['author_id'],
113
+ 'post_id': post_id,
114
+ 'number_comments': post_context['number_comments'],
115
+ 'subreddit_id': post_context['subreddit_id'],
116
+ 'subreddit_name': post_context['subreddit_name'],
117
+ })
118
+ print(f'Number of posts: {len(data)}')
119
+
120
+ # Close the driver.
121
+ driver.close()
122
+ driver.quit()
123
+
124
+ # Save the post data.
125
+ data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data"
126
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
127
+ datafile = os.path.join(data_dir, f'fl-medical-trees-posts-{timestamp}.xlsx')
128
+ df = pd.DataFrame(data)
129
+ df.to_excel(datafile, index=False)
130
+ print('Saved post data:', datafile)
131
+
132
+
133
+ #-----------------------------------------------------------------------
134
+ # Get Reddit post data with the Reddit API.
135
+ #-----------------------------------------------------------------------
136
+
137
+ # DEV:
138
+ data = pd.read_excel(r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data\fl-medical-trees-posts-2024-06-25-18-59-50.xlsx")
139
+ data = data.to_dict(orient='records')
140
+ recorded_posts = [x['post_id'] for x in data]
141
+
142
+
143
+ def initialize_reddit(config):
144
+ reddit = praw.Reddit(
145
+ client_id=config['REDDIT_CLIENT_ID'],
146
+ client_secret=config['REDDIT_SECRET'],
147
+ password=config['REDDIT_PASSWORD'],
148
+ user_agent=config['REDDIT_USER_AGENT'],
149
+ username=config['REDDIT_USERNAME'],
150
+ )
151
+ return reddit
152
+
153
+
154
+ # # Read already collected posts.
155
+ collected_posts = []
156
+ # data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data"
157
+ # post_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'posts' in x and 'results' not in x]
158
+ # posts = pd.concat([pd.read_excel(x) for x in post_datafiles])
159
+ # posts.drop_duplicates(subset=['post_id', 'coa_url', 'redirect_url'], inplace=True)
160
+ # collected_posts = list(set(posts['post_id'].values) - set(recorded_posts))
161
+ # print('Total number of already collected posts:', len(collected_posts))
162
+ # print('Number of posts to collect:', len(data) - len(collected_posts))
163
+
164
+ # Initialize Reddit.
165
+ config = dotenv_values('.env')
166
+ reddit = initialize_reddit(config)
167
+
168
+ # Get each post page and data for each post.
169
+ all_posts = []
170
+ for n, post_data in enumerate(data[len(all_posts):]):
171
+
172
+ # Retrieve the post content.
173
+ post_id = post_data['post_id'].split('_')[-1]
174
+ if post_id in collected_posts:
175
+ print('Post already collected:', post_id)
176
+ continue
177
+ print('Getting data for post:', post_id)
178
+ try:
179
+ submission = reddit.submission(id=post_id)
180
+ except:
181
+ try:
182
+ print('Failed to retrieve post:', post_id)
183
+ print('Waiting 60 seconds to retry...')
184
+ sleep(61)
185
+ reddit = initialize_reddit(config)
186
+ submission = reddit.submission(id=post_id)
187
+ except:
188
+ print('Failed to retrieve post:', post_id)
189
+ print('Waiting 60 seconds to retry...')
190
+ sleep(61)
191
+ reddit = initialize_reddit(config)
192
+ submission = reddit.submission(id=post_id)
193
+ post_content = submission.selftext
194
+
195
+ # Retrieve images.
196
+ images = []
197
+ if 'imgur.com' in submission.url or submission.url.endswith(('.jpg', '.jpeg', '.png', '.gif')):
198
+ images.append(submission.url)
199
+
200
+ try:
201
+ if submission.is_gallery:
202
+ image_dict = submission.media_metadata
203
+ for image_item in image_dict.values():
204
+ try:
205
+ largest_image = image_item['s']
206
+ image_url = largest_image['u']
207
+ images.append(image_url)
208
+ except KeyError:
209
+ pass
210
+ except AttributeError:
211
+ pass
212
+
213
+ # Download images.
214
+ for i, image_url in enumerate(images, start=1):
215
+ file_extension = os.path.splitext(image_url)[-1].split('?')[0]
216
+ filename = f"{post_id}_image_{i}{file_extension}"
217
+ if file_extension not in ['.jpg', '.jpeg', '.png', '.gif']:
218
+ filename = f"{post_id}_image_{i}.jpg"
219
+ outfile = os.path.join(images_directory, filename)
220
+ if os.path.exists(outfile):
221
+ continue
222
+ try:
223
+ response = requests.get(image_url, headers={'User-agent': 'CannBot'})
224
+ except:
225
+ try:
226
+ print('Failed to download image:', image_url)
227
+ print('Waiting 60 seconds to retry...')
228
+ sleep(60)
229
+ response = requests.get(image_url, headers={'User-agent': 'CannBot'})
230
+ except:
231
+ print('Failed to download image:', image_url)
232
+ print('Waiting 60 seconds to retry...')
233
+ sleep(60)
234
+ response = requests.get(image_url, headers={'User-agent': 'CannBot'})
235
+ sleep(3.33)
236
+ if response.status_code != 200:
237
+ print('Unsuccessful request for image:', image_url)
238
+ continue
239
+ with open(outfile, 'wb') as file:
240
+ file.write(response.content)
241
+ print(f"Downloaded image: {outfile}")
242
+
243
+ # Retrieve comments.
244
+ comments = []
245
+ submission.comments.replace_more(limit=None)
246
+ for comment in submission.comments.list():
247
+ comments.append({
248
+ 'comment_id': comment.id,
249
+ 'comment_author': comment.author.name if comment.author else None,
250
+ 'comment_body': comment.body,
251
+ 'comment_created_utc': datetime.utcfromtimestamp(comment.created_utc).strftime('%Y-%m-%d %H:%M:%S')
252
+ })
253
+
254
+ # Update post_data with the retrieved information.
255
+ post_data['post_content'] = post_content
256
+ post_data['upvotes'] = submission.ups
257
+ post_data['downvotes'] = submission.downs
258
+ post_data['images'] = images
259
+ post_data['comments'] = comments
260
+ print('Post data retrieved:', submission.title)
261
+ all_posts.append(post_data)
262
+ sleep(3.33)
263
+
264
+ # Optional: Try downloading all of the images after the post data is retrieved?
265
+
266
+ # Save the post data.
267
+ try:
268
+ df = pd.DataFrame(all_posts)
269
+ data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data"
270
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
271
+ datafile = os.path.join(data_dir, f'fl-medical-trees-coa-posts-{timestamp}.xlsx')
272
+ df.to_excel(datafile, index=False)
273
+ print('Saved post data:', datafile)
274
+ except:
275
+ print('No posts to curate.')
276
+
277
+
278
+ #-----------------------------------------------------------------------
279
+ # Parse COA URLs from images.
280
+ #-----------------------------------------------------------------------
281
+
282
+ # DEV: Extract all COA URLs for posts from logs.
283
+ text_file = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\158-reported-effects\data\scanned-images.txt"
284
+ with open(text_file, 'r') as file:
285
+ lines = file.readlines()
286
+ coa_urls = {}
287
+ for line in lines:
288
+ if 'COA URL found for post' in line:
289
+ post_id, coa_url = line.split(':', maxsplit=1)
290
+ post_id = 't3_' + post_id.split(' ')[-1].strip()
291
+ coa_url = coa_url.strip()
292
+ post_urls = coa_urls.get(post_id, [])
293
+ post_urls.append(coa_url)
294
+ coa_urls[post_id] = list(set(post_urls))
295
+ print('Number of COA URLs:', len(coa_urls))
296
+
297
+ # Define the image directory.
298
+ images_directory = 'D://data/reddit/FLMedicalTrees/images'
299
+
300
+ # Initialize CoADoc.
301
+ parser = CoADoc()
302
+ temp_path = tempfile.mkdtemp()
303
+
304
+ # Scan all images for COA URLs.
305
+ # coa_urls = {}
306
+ image_files = os.listdir(images_directory)
307
+ image_files = [os.path.join(images_directory, x) for x in image_files]
308
+ print('Number of images:', len(image_files))
309
+ for image_file in image_files:
310
+ post_id = os.path.basename(image_file).split('_')[0]
311
+ if post_id in coa_urls:
312
+ continue
313
+ print('Scanning:', image_file)
314
+ post_urls = coa_urls.get(post_id, [])
315
+ try:
316
+ coa_url = parser.scan(
317
+ image_file,
318
+ temp_path=temp_path,
319
+ )
320
+ except:
321
+ print('Failed to scan:', image_file)
322
+ continue
323
+ if coa_url:
324
+ print(f"COA URL found for post {post_id}: {coa_url}")
325
+ post_urls.append(coa_url)
326
+ coa_urls[post_id] = list(set(post_urls))
327
+
328
+ # Clean up the temporary directory.
329
+ try:
330
+ shutil.rmtree(temp_path)
331
+ except:
332
+ pass
333
+
334
+
335
+ #-----------------------------------------------------------------------
336
+ # Download COA PDFs using the COA URLs.
337
+ #-----------------------------------------------------------------------
338
+
339
+ # Define the minimum file size for a PDF.
340
+ MIN_FILE_SIZE = 21 * 1024
341
+
342
+ # Download all PDFs.
343
+ pdf_dir = 'D://data/reddit/FLMedicalTrees/pdfs'
344
+ redirect_urls = {}
345
+ for post_id, urls in coa_urls.items():
346
+ print(f"Downloading COA for post {post_id}: {urls}")
347
+ for i, url in enumerate(urls, start=1):
348
+ filename = f"{post_id}-coa-{i}.pdf"
349
+ outfile = os.path.join(pdf_dir, filename)
350
+ if os.path.exists(outfile):
351
+ print('Cached:', outfile)
352
+ continue
353
+
354
+ # Download Kaycha Labs COAs.
355
+ if 'yourcoa.com' in url:
356
+ base = 'https://yourcoa.com'
357
+ sample_id = url.split('/')[-1].split('?')[0].split('&')[0]
358
+ if sample_id == 'coa-download':
359
+ sample_id = url.split('sample=')[-1]
360
+ try:
361
+ coa_url = f'{base}/coa/download?sample={sample_id}'
362
+ response = requests.get(coa_url, headers=DEFAULT_HEADERS)
363
+ if response.status_code == 200:
364
+ if len(response.content) < MIN_FILE_SIZE:
365
+ print('File size is small, retrying with Selenium:', url)
366
+ # coa_url = f'{base}/coa/coa-view?sample={sample_id}'
367
+ response = requests.get(url, allow_redirects=True)
368
+ if response.status_code == 200:
369
+ redirected_url = response.url
370
+ download_file_with_selenium(
371
+ redirected_url,
372
+ download_dir=pdf_dir,
373
+ )
374
+ print('Downloaded with Selenium:', redirected_url)
375
+ else:
376
+ with open(outfile, 'wb') as pdf:
377
+ pdf.write(response.content)
378
+ print('Downloaded:', outfile)
379
+ else:
380
+ print('Failed to download, retrying with Selenium:', url)
381
+ response = requests.get(url, allow_redirects=True)
382
+ if response.status_code == 200:
383
+ redirected_url = response.url
384
+ download_file_with_selenium(
385
+ redirected_url,
386
+ download_dir=pdf_dir,
387
+ )
388
+ print('Downloaded with Selenium:', redirected_url)
389
+ except:
390
+ coa_url = f'{base}/coa/coa-view?sample={sample_id}'
391
+ response = requests.get(coa_url, allow_redirects=True)
392
+ if response.status_code == 200:
393
+ redirected_url = response.url
394
+ download_file_with_selenium(
395
+ redirected_url,
396
+ download_dir=pdf_dir,
397
+ )
398
+ print('Downloaded with Selenium:', redirected_url)
399
+
400
+ # Download Method Testing Labs COAs.
401
+ elif 'mete.labdrive.net' in url:
402
+ download_file_with_selenium(
403
+ url,
404
+ download_dir=pdf_dir,
405
+ method='a',
406
+ tag_name='a',
407
+ filename=f"{post_id}-coa-{i}.pdf",
408
+ )
409
+ print('Downloaded with Selenium:', url)
410
+
411
+ # Download regular PDFs.
412
+ # Note: Ensure ModernCanna, ACS, etc. COAs are being downloaded.
413
+ elif url.startswith('http'):
414
+ response = requests.get(url, allow_redirects=True)
415
+ if response.status_code == 200:
416
+ filename = f"{post_id}-coa-{i}.pdf"
417
+ outfile = os.path.join(pdf_dir, filename)
418
+ with open(outfile, 'wb') as file:
419
+ file.write(response.content)
420
+ print(f"Downloaded COA: {outfile}")
421
+ sleep(1)
422
+
423
+ # Skip invalid URLs.
424
+ else:
425
+ print('Invalid URL:', url)
426
+ continue
427
+
428
+ # Remove duplicate PDFs.
429
+ remove_duplicate_files(pdf_dir, verbose=True)
430
+
431
+ try:
432
+
433
+ # Tie the COA URLs to the posts.
434
+ df = pd.DataFrame(all_posts)
435
+ df['coa_url'] = df['post_id'].map(coa_urls)
436
+ df['redirect_url'] = df['post_id'].map(redirect_urls)
437
+
438
+ # Save the post data.
439
+ data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data"
440
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
441
+ datafile = os.path.join(data_dir, f'fl-medical-trees-coa-posts-{timestamp}.xlsx')
442
+ df.to_excel(datafile, index=False)
443
+ print('Saved post data:', datafile)
444
+
445
+ except:
446
+ print('No posts to curate.')
447
+
448
+
449
+ #-----------------------------------------------------------------------
450
+ # Parse COA data from the PDFs.
451
+ #-----------------------------------------------------------------------
452
+
453
+ # Parse COA data from the PDFs.
454
+ parser = CoADoc()
455
+ pdf_dir = r'D:\data\reddit\FLMedicalTrees\pdfs'
456
+ pdf_files = os.listdir(pdf_dir)
457
+ pdf_files = [os.path.join(pdf_dir, x) for x in pdf_files]
458
+ logging.info('Parsing %i COA PDFs...' % len(pdf_files))
459
+ all_coa_data = {}
460
+ failed = []
461
+ temp_path = tempfile.mkdtemp()
462
+ for pdf_file in pdf_files:
463
+ try:
464
+ coa_data = parser.parse_pdf(
465
+ pdf_file,
466
+ temp_path=temp_path,
467
+ verbose=True,
468
+ use_qr_code=False,
469
+ )
470
+ except:
471
+ logging.info('Failed to parse: %s' % pdf_file)
472
+ failed.append(pdf_file)
473
+ continue
474
+ if coa_data:
475
+ logging.info('Parsed: %s' % pdf_file)
476
+ coa_id = os.path.basename(pdf_file).split(' ')[0]
477
+ if isinstance(coa_data, list):
478
+ all_coa_data[coa_id] = coa_data[0]
479
+ elif isinstance(coa_data, dict):
480
+ all_coa_data[coa_id] = coa_data
481
+ else:
482
+ logging.info('Found no data: %s' % pdf_file)
483
+ try:
484
+ shutil.rmtree(temp_path)
485
+ except:
486
+ pass
487
+
488
+ # Compile the COA data and remove duplicates.
489
+ coa_df = pd.DataFrame(list(all_coa_data.values()))
490
+ coa_df['coa_id'] = [x.replace('.pdf', '') for x in list(all_coa_data.keys())]
491
+ coa_df.drop_duplicates(subset=['coa_id', 'sample_id', 'results_hash'], inplace=True)
492
+
493
+ # Save the COA data.
494
+ data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data"
495
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
496
+ datafile = os.path.join(data_dir, f'fl-medical-trees-coa-data-{timestamp}.xlsx')
497
+ coa_df.to_excel(datafile, index=False)
498
+ logging.info('Saved %i COA data: %s' % (len(coa_df), datafile))
499
+
500
+ # Save all of the COAs that failed to be parsed.
501
+ with open(os.path.join(data_dir, f'unidentified-coas-{timestamp}.json'), 'w') as f:
502
+ json.dump(failed, f, indent=4)
503
+ logging.info('Saved list of failed PDFs.')
504
+
505
+
506
+ #-----------------------------------------------------------------------
507
+ # Create a sample of posts that have COA URls.
508
+ #-----------------------------------------------------------------------
509
+
510
+ # Define where the data lives.
511
+ data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data"
512
+
513
+ # Read all of the posts.
514
+ post_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'posts' in x and 'results' not in x]
515
+ posts = pd.concat([pd.read_excel(x) for x in post_datafiles])
516
+ posts.drop_duplicates(subset=['post_id', 'coa_url', 'redirect_url'], inplace=True)
517
+ print('Number of posts:', len(posts))
518
+
519
+ # Read in all of the COA datafiles.
520
+ coa_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'coa-data' in x]
521
+ results = pd.concat([pd.read_excel(x) for x in coa_datafiles])
522
+ results.drop_duplicates(subset=['sample_id', 'results_hash'], inplace=True)
523
+ print('Number of COA results:', len(results))
524
+
525
+ # Find the post ID for the lab results.
526
+ post_coa_data = {}
527
+ for index, row in results.iterrows():
528
+ coa_id = row['coa_id'].replace('t3_', '').split('-coa')[0]
529
+ matches = posts.loc[posts['post_id'].str.contains(coa_id)]
530
+ try:
531
+ if matches.empty:
532
+ matches = posts.loc[posts['coa_url'].str.contains(coa_id)]
533
+ if matches.empty:
534
+ matches = posts.loc[posts['redirect_url'].str.contains(coa_id)]
535
+ except:
536
+ pass
537
+ if not matches.empty:
538
+ post_id = matches['post_id'].values[0]
539
+ post_coa_data[post_id] = row.to_dict()
540
+
541
+ # Merge the lab results with the post data.
542
+ coa_data_df = pd.DataFrame.from_dict(post_coa_data, orient='index')
543
+ coa_data_df.reset_index(inplace=True)
544
+ coa_data_df.rename(columns={'index': 'post_id'}, inplace=True)
545
+ merged_df = posts.merge(coa_data_df, on='post_id', how='left')
546
+ merged_df = merged_df.loc[~merged_df['coa_id'].isna()]
547
+ print('Number of posts with COA data:', len(merged_df))
548
+
549
+ # Save the updated post data.
550
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
551
+ datafile = os.path.join(data_dir, f'fl-medical-trees-posts-with-results-{timestamp}.xlsx')
552
+ merged_df.to_excel(datafile, index=False)
553
+ print('Saved %i posts with COA data:' % len(merged_df), datafile)
algorithms/get_results_fl_terplife.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Florida cannabis lab results | TerpLife Labs
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 5/18/2023
8
+ Updated: 5/22/2024
9
+ License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
10
+
11
+ Description:
12
+
13
+ Archive Florida cannabis lab result data for TerpLife Labs.
14
+
15
+ Data Sources:
16
+
17
+ - [TerpLife Labs](https://www.terplifelabs.com)
18
+
19
+ """
20
+ # Standard imports:
21
+ from datetime import datetime
22
+ import itertools
23
+ import os
24
+ import random
25
+ import string
26
+ from time import time, sleep
27
+
28
+ # External imports:
29
+ from cannlytics.data.cache import Bogart
30
+ from cannlytics.data.coas.coas import CoADoc
31
+ from cannlytics.data.coas.algorithms.terplife import parse_terplife_coa
32
+ from cannlytics.data.web import initialize_selenium
33
+ import pandas as pd
34
+ from selenium.webdriver.common.by import By
35
+ from selenium.webdriver.support import expected_conditions as EC
36
+ from selenium.webdriver.support.ui import WebDriverWait
37
+
38
+
39
+ class TerpLifeLabs:
40
+ """Download lab results from TerpLife Labs."""
41
+
42
+ def __init__(self, data_dir, namespace='terplife', cache_path=None):
43
+ """Initialize the driver and directories."""
44
+ self.data_dir = data_dir
45
+ self.datasets_dir = os.path.join(data_dir, 'datasets', namespace)
46
+ self.pdf_dir = os.path.join(data_dir, 'pdfs', namespace)
47
+ if not os.path.exists(self.datasets_dir): os.makedirs(self.datasets_dir)
48
+ if not os.path.exists(self.pdf_dir): os.makedirs(self.pdf_dir)
49
+ # self.driver = initialize_selenium(download_dir=self.pdf_dir)
50
+ self.cache = Bogart(cache_path)
51
+
52
+ def get_results_terplife(
53
+ self,
54
+ queries: list,
55
+ url='https://www.terplifelabs.com/coa/',
56
+ wait=30,
57
+ ):
58
+ """Get lab results published by TerpLife Labs on the public web."""
59
+ start = datetime.now()
60
+ # FIXME: Refactor the following.
61
+ # self.driver.get(url)
62
+ # sleep(1)
63
+ # , browser='edge'
64
+ with initialize_selenium(download_dir=self.pdf_dir) as driver:
65
+ self.driver = driver
66
+ self.driver.get(url)
67
+ for query in queries:
68
+ print('Querying: %s' % query)
69
+ sleep(1)
70
+ self.query_search_box(query)
71
+ self.download_search_results(wait=wait)
72
+ self.driver.close()
73
+ self.driver.quit()
74
+ end = datetime.now()
75
+ print('Finished downloading TerpLife Labs COAs.')
76
+ print('Time elapsed: %s' % str(end - start))
77
+
78
+ def download_search_results(self, wait=30):
79
+ """Download the results of a search."""
80
+ # TODO: Wait for the table to load instead of simply waiting.
81
+ sleep(wait)
82
+ load = EC.presence_of_element_located((By.CLASS_NAME, 'file-list'))
83
+ table = WebDriverWait(self.driver, wait).until(load)
84
+ rows = table.find_elements(By.CLASS_NAME, 'file-item')
85
+ print('Found %i rows.' % len(rows))
86
+ for row in rows:
87
+
88
+ # Skip if the file has already be downloaded.
89
+ file_name = ''
90
+ try:
91
+ file_name = row.find_element(By.CLASS_NAME, 'file-item-name').text
92
+ if file_name == 'COAS':
93
+ continue
94
+ outfile = os.path.join(self.pdf_dir, file_name)
95
+ if os.path.exists(outfile):
96
+ print('Cached: %s' % outfile)
97
+ # DEV: Ween off of this cache.set
98
+ file_hash = self.cache.hash_file(outfile)
99
+ self.cache.set(file_hash, {'type': 'download', 'file': outfile})
100
+ continue
101
+ except:
102
+ print('ERROR FINDING: %s' % file_name)
103
+ sleep(60)
104
+ break
105
+
106
+ # Click on the icons for each row.
107
+ try:
108
+ self.driver.execute_script('arguments[0].scrollIntoView();', row)
109
+ sleep(3.33)
110
+ row.click()
111
+ except:
112
+ print('ERROR CLICKING: %s' % file_name)
113
+ continue
114
+
115
+ # Click the download button.
116
+ try:
117
+ sleep(random.uniform(30, 31))
118
+ download_button = self.driver.find_element(By.CLASS_NAME, 'lg-download')
119
+ download_button.click()
120
+ print('Downloaded: %s' % outfile)
121
+ # TODO: Properly wait for the download to finish.
122
+ sleep(random.uniform(30, 31))
123
+ file_hash = self.cache.hash_file(outfile)
124
+ self.cache.set(file_hash, {'type': 'download', 'file': outfile})
125
+ except:
126
+ print('ERROR DOWNLOADING: %s' % file_name)
127
+ continue
128
+
129
+ # Click the close button.
130
+ try:
131
+ close_button = self.driver.find_element(By.CLASS_NAME, 'lg-close')
132
+ close_button.click()
133
+ except:
134
+ print('ERROR CLOSING: %s' % file_name)
135
+
136
+ def query_search_box(self, character):
137
+ """Find the search box and enter text."""
138
+ search_box = self.get_search_box()
139
+ self.driver.execute_script('arguments[0].scrollIntoView();', search_box)
140
+ sleep(0.3)
141
+ search_box.clear()
142
+ search_box.send_keys(character)
143
+ sleep(0.3)
144
+ search_button = search_box.find_element(By.XPATH, 'following-sibling::*[1]')
145
+ search_button.click()
146
+
147
+ def get_search_box(self):
148
+ """Find the search box and enter text."""
149
+ inputs = self.driver.find_elements(By.TAG_NAME, 'input')
150
+ for input in inputs:
151
+ if input.get_attribute('placeholder') == 'Enter a keyword to search':
152
+ return input
153
+ return None
154
+
155
+ def quit(self):
156
+ """Close the driver."""
157
+ self.driver.close()
158
+ self.driver.quit()
159
+
160
+
161
+ def get_day_month_combinations():
162
+ """Get all day-month combinations."""
163
+ day_month_combinations = []
164
+ for month in range(1, 13):
165
+ if month in [4, 6, 9, 11]:
166
+ days_in_month = 30
167
+ elif month == 2:
168
+ days_in_month = 29
169
+ else:
170
+ days_in_month = 31
171
+ for day in range(1, days_in_month + 1):
172
+ formatted_month = f'{month:02d}'
173
+ formatted_day = f'{day:02d}'
174
+ combination = formatted_month + formatted_day
175
+ day_month_combinations.append(combination)
176
+ return day_month_combinations
177
+
178
+
179
+ def add_digits(strings):
180
+ """Add digits 0-9 to each string in a list."""
181
+ return [s + str(digit) for s in strings for digit in range(10)]
182
+
183
+
184
+ def add_letters(strings):
185
+ """Add letters a-z to each string in a list."""
186
+ return [s + letter for s in strings for letter in string.ascii_lowercase]
187
+
188
+
189
+ # === Test ===
190
+ # [✓] Tested: 2024-05-22 by Keegan Skeate <keegan@cannlytics>
191
+ if __name__ == '__main__':
192
+
193
+ # Query by digit combinations.
194
+ queries = get_day_month_combinations()
195
+ queries += [''.join(map(str, x)) for x in itertools.product(range(10), repeat=2)]
196
+ # queries = []
197
+
198
+ # Query by alphabetic combinations.
199
+ specific_letters = [x for x in string.ascii_lowercase]
200
+ queries += [a + b for a in specific_letters for b in string.ascii_lowercase]
201
+
202
+ # Drill down on specific queries.
203
+ long_letters = [ 'wu', 'us', 'tp', 'qd', 'oo', 'og', 'nd', 'mh', 'it',
204
+ 'io', 'ie', 'fm', 'bu', 'bf', 'at', 'aq', 'ao']
205
+ long_digits = ['81', '61', '51', '41', '40', '30', '20']
206
+
207
+ # Create new lists with the combinations
208
+ # queries += add_letters(long_letters)
209
+ # queries += add_digits(long_digits)
210
+ queries.reverse()
211
+ print('All queries:', queries)
212
+
213
+ # Download TerpLife Labs COAs.
214
+ # FIXME: This has a severe memory leak. Chrome may not being closed properly.
215
+ DATA_DIR = 'D://data/florida/results'
216
+ CACHE_PATH = 'D://data/.cache/results-fl-terplife.jsonl'
217
+ downloader = TerpLifeLabs(DATA_DIR, cache_path=CACHE_PATH)
218
+ downloader.get_results_terplife(queries)
219
+ downloader.quit()
220
+
221
+ # Optional: Search TerpLife for known strains.
222
+
223
+ # === TODO: Turn the following into methods of the class ===
224
+ # cache = Bogart(CACHE_PATH)
225
+
226
+ # # Find the recently downloaded PDFs.
227
+ # days_ago = 365
228
+ # pdf_dir = 'D://data/florida/results/pdfs/terplife'
229
+ # current_time = time()
230
+ # recent_threshold = days_ago * 24 * 60 * 60
231
+ # recent_files = []
232
+ # for filename in os.listdir(pdf_dir):
233
+ # file_path = os.path.join(pdf_dir, filename)
234
+ # if os.path.isfile(file_path):
235
+ # modification_time = os.path.getmtime(file_path)
236
+ # time_difference = current_time - modification_time
237
+ # if time_difference <= recent_threshold:
238
+ # recent_files.append(file_path)
239
+
240
+ # # Parse the COA PDFs.
241
+ # pdf_dir = 'D://data/florida/results/pdfs/terplife'
242
+ # recent_files = os.listdir(pdf_dir)
243
+ # print('Parsing %i recently downloaded files...' % len(recent_files))
244
+ # parser = CoADoc()
245
+ # all_data = []
246
+ # for doc in recent_files:
247
+ # try:
248
+ # filename = os.path.join(pdf_dir, doc)
249
+ # pdf_hash = cache.hash_file(filename)
250
+ # if cache.get(pdf_hash):
251
+ # print('Cached parse:', doc)
252
+ # all_data.append(cache.get(pdf_hash))
253
+ # continue
254
+ # coa_data = parse_terplife_coa(parser, doc, verbose=True)
255
+ # all_data.append(coa_data)
256
+ # cache.set(pdf_hash, coa_data)
257
+ # print(f'Parsed: {doc}')
258
+ # except Exception as e:
259
+ # print('Failed to parse:', doc)
260
+ # print(e)
261
+
262
+ # # Save all of the data.
263
+ # timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
264
+ # output_dir = 'D://data/florida/results/datasets/terplife'
265
+ # outfile = os.path.join(output_dir, f'fl-results-terplife-{timestamp}.xlsx')
266
+ # all_results = pd.DataFrame(all_data)
267
+ # all_results.replace(r'\\u0000', '', regex=True, inplace=True)
268
+ # parser.save(all_results, outfile)
269
+ # print('Saved %i COA data:' % len(all_results), outfile)
algorithms/get_results_hi.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Cannabis Results | Hawaii
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 7/10/2024
8
+ Updated: 7/10/2024
9
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
10
+
11
+ Data Source:
12
+
13
+ - Public records request
14
+
15
+ """
16
+ # Standard imports:
17
+ import json
18
+ import os
19
+
20
+ # External imports:
21
+ from cannlytics.data.coas.coas import CoADoc
22
+ from cannlytics.data.coas import standardize_results
23
+ from cannlytics.data.coas.parsing import find_unique_analytes
24
+ from cannlytics.utils.utils import convert_to_numeric, snake_case
25
+ import pandas as pd
26
+
27
+
28
+ def extract_samples(datafile, verbose=True):
29
+ """Extract samples from CSV layout."""
30
+
31
+ # Read the datafile, line by line.
32
+ if verbose: print('Processing:', datafile)
33
+ with open(datafile, 'r') as file:
34
+ lines = file.readlines()
35
+
36
+ # Define rows to skip.
37
+ skip_rows = [
38
+ '"',
39
+ "Date",
40
+ "Report:",
41
+ "LicenseNum:",
42
+ "Start:",
43
+ "End:",
44
+ "Status:",
45
+ "Report Timestamp"
46
+ ]
47
+
48
+ # Extract the data for each sample.
49
+ parser = CoADoc()
50
+ samples = []
51
+ obs = None
52
+ analyses, results = [], []
53
+ for i, line in enumerate(lines):
54
+
55
+ # Skip nuisance rows.
56
+ skip = False
57
+ for skip_row in skip_rows:
58
+ if line.startswith(skip_row):
59
+ skip = True
60
+ break
61
+ if skip:
62
+ continue
63
+
64
+ # Get all values.
65
+ values = line.replace('\n', '').split(',')
66
+
67
+ # Skip blank rows.
68
+ if all([x == '' for x in values]):
69
+ continue
70
+
71
+ # Identify samples as rows that start with a date.
72
+ try: date = pd.to_datetime(values[0])
73
+ except: date = None
74
+ if date and values[0] != '':
75
+
76
+ # Record results for any existing observation.
77
+ if obs is not None:
78
+ obs['analyses'] = json.dumps(analyses)
79
+ obs['results'] = json.dumps(results)
80
+ samples.append(obs)
81
+ analyses, results = [], []
82
+
83
+ # Get the details for each sample.
84
+ try:
85
+ ids = values[4]
86
+ batch_number = ids.split('(')[0].split(':')[-1].strip()
87
+ sample_id = ids.split(':')[-1].replace(')', '').strip()
88
+ obs = {
89
+ 'date_tested': date.isoformat(),
90
+ 'product_type': values[1],
91
+ 'strain_name': values[2],
92
+ 'product_name': values[3],
93
+ 'batch_number': batch_number,
94
+ 'sample_id': sample_id,
95
+ 'status': values[-3],
96
+ 'producer': values[-2],
97
+ 'producer_license_number': values[-1],
98
+ }
99
+ # Handle long product names.
100
+ except:
101
+ try:
102
+ row = lines[i + 1].replace('\n', '').split(',')
103
+ ids = row[1]
104
+ except:
105
+ row = lines[i + 1].replace('\n', '').split(',') + lines[i + 2].replace('\n', '').split(',')
106
+ ids = row[-4]
107
+ batch_number = ids.split('(')[0].split(':')[-1].strip()
108
+ sample_id = ids.split(':')[-1].replace(')', '').strip()
109
+ obs = {
110
+ 'date_tested': date.isoformat(),
111
+ 'product_type': values[1],
112
+ 'strain_name': values[2],
113
+ 'product_name': ' '.join([values[3], row[0]]),
114
+ 'batch_number': batch_number,
115
+ 'sample_id': sample_id,
116
+ 'status': row[-3],
117
+ 'producer': row[-2],
118
+ 'producer_license_number': row[-1],
119
+ }
120
+ continue
121
+
122
+ # Get the cannabinoid results.
123
+ if values[0] == 'Potency Analysis Test':
124
+ analyses.append('cannabinoids')
125
+ n_analytes = 5
126
+ for n in range(1, n_analytes + 1):
127
+ row = lines[i + n].replace('\n', '').split(',')
128
+ name = row[0]
129
+ key = parser.analytes.get(snake_case(name), snake_case(name))
130
+ if name == 'Total':
131
+ obs['total_cannabinoids'] = convert_to_numeric(row[1].replace('%', ''))
132
+ continue
133
+ results.append({
134
+ 'analysis': 'cannabinoids',
135
+ 'key': key,
136
+ 'name': name,
137
+ 'value': convert_to_numeric(row[1].replace('%', '')),
138
+ 'status': row[2],
139
+ })
140
+ continue
141
+
142
+ # Get the foreign matter results.
143
+ if values[0] == 'Foreign Matter Inspection Test':
144
+ analyses.append('foreign_matter')
145
+ row = lines[i + 1].replace('\n', '').split(',')
146
+ results.append({
147
+ 'analysis': 'foreign_matter',
148
+ 'key': 'foreign_matter',
149
+ 'name': 'Foreign Matter',
150
+ 'value': convert_to_numeric(row[1].replace('%', '')),
151
+ 'status': row[2],
152
+ })
153
+ continue
154
+
155
+ # Get the microbe results.
156
+ if values[0] == 'Microbiological Screening Test':
157
+ analyses.append('microbes')
158
+ n_analytes = 6
159
+ for n in range(1, n_analytes + 1):
160
+ row = lines[i + n].replace('\n', '').split(',')
161
+ name = row[0]
162
+ key = parser.analytes.get(snake_case(name), snake_case(name))
163
+ if name == '': continue
164
+ results.append({
165
+ 'analysis': 'microbes',
166
+ 'key': key,
167
+ 'name': name,
168
+ 'value': convert_to_numeric(row[1].split(' ')[0]),
169
+ 'units': row[1].split(' ')[-1],
170
+ 'status': row[2],
171
+ })
172
+ continue
173
+
174
+ # Get the mycotoxin results.
175
+ if values[0] == 'Mycotoxin Screening Test':
176
+ analyses.append('mycotoxins')
177
+ row = lines[i + 1].replace('\n', '').split(',')
178
+ name = row[0]
179
+ key = parser.analytes.get(snake_case(name), snake_case(name))
180
+ results.append({
181
+ 'analysis': 'mycotoxins',
182
+ 'key': key,
183
+ 'name': name,
184
+ 'value': convert_to_numeric(row[1].split(' ')[0]),
185
+ 'status': row[2],
186
+ })
187
+ continue
188
+
189
+ # Get the moisture content result.
190
+ if values[0] == 'Moisture Content Test':
191
+ analyses.append('moisture_content')
192
+ row = lines[i + 1].replace('\n', '').split(',')
193
+ value = convert_to_numeric(row[1].replace('%', ''))
194
+ obs['moisture_content'] = value
195
+ continue
196
+
197
+ # Get the residual solvent results.
198
+ # See: https://health.hawaii.gov/medicalcannabis/files/2022/05/Chapter-11-850-Hawaii-Administrative-Interim-Rules-Effective-April-29-2022.pdf
199
+ if values[0] == 'Residual Solvent Test':
200
+ analyses.append('residual_solvents')
201
+ solvents = [
202
+ 'Benzene',
203
+ 'Butane',
204
+ 'Ethanol',
205
+ 'Heptane',
206
+ 'Hexane',
207
+ 'Pentane',
208
+ 'Toluene',
209
+ 'Total xylenes'
210
+ ]
211
+ for n in range(1, len(solvents) + 1):
212
+ row = lines[i + n].replace('\n', '').split(',')
213
+ name = solvents[n - 1]
214
+ key = parser.analytes.get(snake_case(name), snake_case(name))
215
+ results.append({
216
+ 'analysis': 'residual_solvents',
217
+ 'key': key,
218
+ 'name': name,
219
+ 'value': convert_to_numeric(row[1].split(' ')[0]),
220
+ 'units': row[1].split(' ')[-1],
221
+ 'status': row[2],
222
+ })
223
+ continue
224
+
225
+ # Record the last sample's results.
226
+ obs['analyses'] = json.dumps(analyses)
227
+ obs['results'] = json.dumps(results)
228
+ samples.append(obs)
229
+
230
+ # Return the samples.
231
+ return pd.DataFrame(samples)
232
+
233
+
234
+ # === Tests ===
235
+ # [✓] Tested: 2024-07-10 by Keegan Skeate <keegan@cannlytics.com>
236
+ if __name__ == '__main__':
237
+
238
+ # Define where the data lives.
239
+ data_dir = 'D://data/hawaii/public-records'
240
+ datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if x.endswith('.csv')]
241
+
242
+ # Process each CSV file.
243
+ data = [extract_samples(file) for file in datafiles]
244
+
245
+ # Aggregate all samples.
246
+ results = pd.concat(data, ignore_index=True)
247
+ print('Number of results:', len(results))
248
+
249
+ # Standardize the results.
250
+ analytes = find_unique_analytes(results)
251
+ analytes = sorted(list(analytes))
252
+ results = standardize_results(results, analytes)
253
+
254
+ # Standardize time.
255
+ results['date'] = pd.to_datetime(results['date_tested'], format='mixed')
256
+ results['week'] = results['date'].dt.to_period('W').astype(str)
257
+ results['month'] = results['date'].dt.to_period('M').astype(str)
258
+ results = results.sort_values('date')
259
+
260
+ # Save the results.
261
+ outfile = 'D://data/hawaii/hi-results-latest.xlsx'
262
+ outfile_csv = 'D://data/hawaii/hi-results-latest.csv'
263
+ outfile_json = 'D://data/hawaii/hi-results-latest.jsonl'
264
+ results.to_excel(outfile, index=False)
265
+ results.to_csv(outfile_csv, index=False)
266
+ results.to_json(outfile_json, orient='records', lines=True)
267
+ print('Saved Excel:', outfile)
268
+ print('Saved CSV:', outfile_csv)
269
+ print('Saved JSON:', outfile_json)
270
+
271
+ # Print out the features.
272
+ features = {x: 'string' for x in results.columns}
273
+ print('Number of features:', len(features))
274
+ print('Features:', features)
algorithms/get_results_ma.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Results Massachusetts | MCR Labs
3
+ Copyright (c) 2022-2023 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
+ Created: 7/13/2022
9
+ Updated: 4/21/2024
10
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
+
12
+ Description:
13
+
14
+ Collect all public Massachusetts lab result data.
15
+
16
+ Data Sources:
17
+
18
+ - [MCR Labs Test Results](https://reports.mcrlabs.com)
19
+
20
+ """
21
+ # Standard imports.
22
+ from datetime import datetime
23
+ import os
24
+
25
+ # External imports.
26
+ import pandas as pd
27
+
28
+ # Internal imports.
29
+ from cannlytics.data.coas.algorithms.mcrlabs import get_mcr_labs_test_results
30
+ from cannlytics.firebase import initialize_firebase, update_documents
31
+ from cannlytics.utils.utils import to_excel_with_style
32
+
33
+
34
+ def upload_results(
35
+ data: pd.DataFrame,
36
+ collection: str = 'public/data/results',
37
+ key: str = 'sample_hash',
38
+ verbose: bool = False,
39
+ ):
40
+ """Upload test results to Firestore."""
41
+ refs, updates = [], []
42
+ for _, obs in data.iterrows():
43
+ doc_id = obs[key]
44
+ refs.append(f'{collection}/{doc_id}')
45
+ updates.append(obs.to_dict())
46
+ database = initialize_firebase()
47
+ update_documents(refs, updates, database=database)
48
+ if verbose:
49
+ print('Uploaded %i lab results to Firestore.' % len(refs))
50
+
51
+
52
+ def get_results_mcrlabs(
53
+ data_dir: str = '.',
54
+ starting_page: int = 1,
55
+ pause: float = 3.33,
56
+ upload: bool = False,
57
+ ):
58
+ """Get all of the MCR Labs test results."""
59
+
60
+ # Get all of the results.
61
+ # FIXME: Fix the errors that are being skipped.
62
+ all_results = get_mcr_labs_test_results(
63
+ starting_page=starting_page,
64
+ pause=pause,
65
+ )
66
+
67
+ # Save the results to Excel.
68
+ data = pd.DataFrame(all_results)
69
+ date = datetime.now().isoformat()[:10]
70
+ if not os.path.exists(data_dir):
71
+ os.makedirs(data_dir)
72
+ datafile = f'{data_dir}/ma-results-{date}.xlsx'
73
+ try:
74
+ to_excel_with_style(data, datafile)
75
+ except:
76
+ data.to_excel(datafile)
77
+ print('Saved %i results to %s' % (len(data), datafile))
78
+ datafile = f'{data_dir}/ma-results-latest.csv'
79
+ data.to_csv(datafile, index=False)
80
+
81
+ # Optionally upload the data to Firestore.
82
+ if upload:
83
+ upload_results(data)
84
+
85
+ # Return the data.
86
+ return data
87
+
88
+
89
+ # === Test ===
90
+ # [✓] Tested: 2024-03-21 by Keegan Skeate <keegan@cannlytics>
91
+ if __name__ == '__main__':
92
+
93
+ # Get all of the MCR Labs test results.
94
+ ma_results = get_results_mcrlabs(
95
+ data_dir='D://data/massachusetts/results',
96
+ starting_page=1,
97
+ pause=3.33,
98
+ )
99
+ print('Finished collecting %i lab results from MCR Labs' % len(ma_results))
algorithms/get_results_mcrlabs.py DELETED
@@ -1,63 +0,0 @@
1
- """
2
- Cannabis Tests | Get MCR Labs Test Result Data
3
- Copyright (c) 2022-2023 Cannlytics
4
-
5
- Authors:
6
- Keegan Skeate <https://github.com/keeganskeate>
7
- Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
- Created: 7/13/2022
9
- Updated: 2/6/2023
10
- License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
-
12
- Description:
13
-
14
- Collect all of MCR Labs' publicly published lab results.
15
-
16
- Data Points: See `cannlytics.data.coas.mcrlabs.py`.
17
-
18
- Data Sources:
19
-
20
- - MCR Labs Test Results
21
- URL: <https://reports.mcrlabs.com>
22
-
23
- """
24
- # Standard imports.
25
- from datetime import datetime
26
- import os
27
-
28
- # External imports.
29
- import pandas as pd
30
-
31
- # Internal imports.
32
- from cannlytics.data.coas.mcrlabs import get_mcr_labs_test_results
33
- from cannlytics.firebase import initialize_firebase, update_documents
34
- from cannlytics.utils.utils import to_excel_with_style
35
-
36
-
37
- # Specify where your data lives.
38
- DATA_DIR = '.datasets/lab_results/mcr_labs'
39
-
40
- # Get all of the results!
41
- all_results = get_mcr_labs_test_results(
42
- starting_page=1,
43
- pause=3,
44
- )
45
-
46
- # Save the results to Excel.
47
- data = pd.DataFrame(all_results)
48
- timestamp = datetime.now().isoformat()[:19].replace(':', '-')
49
- if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR)
50
- datafile = f'{DATA_DIR}/mcr-lab-results-{timestamp}.xlsx'
51
- to_excel_with_style(data, datafile)
52
-
53
- # Prepare the data to upload to Firestore.
54
- refs, updates = [], []
55
- for index, obs in data.iterrows():
56
- sample_id = obs['sample_id']
57
- refs.append(f'public/data/lab_results/{sample_id}')
58
- updates.append(obs.to_dict())
59
-
60
- # Initialize Firebase and upload the data to Firestore!
61
- database = initialize_firebase()
62
- update_documents(refs, updates, database=database)
63
- print('Added %i lab results to Firestore!' % len(refs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
algorithms/get_results_md.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Results | Maryland
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 9/26/2023
8
+ Updated: 7/10/2024
9
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
10
+
11
+ Description:
12
+
13
+ Collect all public Maryland lab result data.
14
+
15
+ Data Sources:
16
+
17
+ - Public records request from the Maryland Medical Cannabis Commission (MMCC).
18
+
19
+ """
20
+ # Standard imports:
21
+ import os
22
+
23
+ # External imports:
24
+ from cannlytics.utils import snake_case, camel_to_snake
25
+ from cannlytics.utils.constants import ANALYTES
26
+ import pandas as pd
27
+
28
+ def combine_redundant_columns(df, product_types=None, verbose=False):
29
+ """Combine redundant columns and extract units and product types."""
30
+ combined_results = {}
31
+ for col in df.columns:
32
+ matched = False
33
+ if product_types is not None:
34
+ for product_type in product_types:
35
+ if product_type in col and '(' not in col:
36
+ base_name = col.split(product_type)[0].strip()
37
+ if base_name not in combined_results:
38
+ combined_results[base_name] = df[col]
39
+ if verbose:
40
+ print('New column:', base_name)
41
+ else:
42
+ combined_results[base_name] = combined_results[base_name].fillna(df[col])
43
+ if verbose:
44
+ print('Combined column:', base_name)
45
+ matched = True
46
+ if matched:
47
+ continue
48
+ if '(' in col and ')' in col:
49
+ base_name = col.split('(')[0].strip()
50
+ if base_name not in combined_results:
51
+ combined_results[base_name] = df[col]
52
+ if verbose:
53
+ print('New column:', base_name)
54
+ else:
55
+ combined_results[base_name] = combined_results[base_name].fillna(df[col])
56
+ if verbose:
57
+ print('Combined column:', base_name)
58
+ elif col not in combined_results:
59
+ if verbose:
60
+ print('New column:', col)
61
+ combined_results[col] = df[col]
62
+ return pd.DataFrame(combined_results)
63
+
64
+ def standardize_analyte_names(df, analyte_mapping):
65
+ """Standardize analyte names."""
66
+ df.columns = [analyte_mapping.get(snake_case(col), snake_case(col)) for col in df.columns]
67
+ return df
68
+
69
+ def get_results_md(data_dir: str, output_dir: str) -> pd.DataFrame:
70
+ """Get results for Maryland."""
71
+
72
+ # Get the data files.
73
+ datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir)]
74
+
75
+ # Read all of the data.
76
+ all_data = []
77
+ for datafile in datafiles:
78
+
79
+ # Read the segment of data.
80
+ df = pd.read_csv(datafile)
81
+
82
+ # Pivot the dataframe.
83
+ results = df.pivot_table(
84
+ index=[
85
+ 'TestPerformedDate',
86
+ 'PackageId',
87
+ 'StrainName',
88
+ 'TestingFacilityId',
89
+ 'ProductCategoryName',
90
+ ],
91
+ columns='TestTypeName',
92
+ values='TestResultLevel',
93
+ aggfunc='first'
94
+ ).reset_index()
95
+ results = pd.DataFrame(results)
96
+
97
+ # Determine the "status" based on the "TestPassed" column.
98
+ status = df.groupby('PackageId')['TestPassed'].apply(lambda x: 'Fail' if False in x.values else 'Pass')
99
+ results = results.merge(status, left_on='PackageId', right_index=True)
100
+
101
+ # Combine redundant columns
102
+ product_types = [
103
+ 'Infused Edible',
104
+ 'Infused Non-Edible',
105
+ 'Non-Solvent Concentrate',
106
+ 'R&D Testing',
107
+ 'Raw Plant Material',
108
+ 'Solvent Based Concentrate',
109
+ 'Sub-Contract',
110
+ 'Whole Wet Plant',
111
+ ]
112
+ results = combine_redundant_columns(results, product_types=product_types)
113
+
114
+ # Standardize the analyte names
115
+ results = standardize_analyte_names(results, ANALYTES)
116
+ columns = {
117
+ 'testpassed': 'status',
118
+ 'testperformeddate': 'date_tested',
119
+ 'packageid': 'package_id',
120
+ 'strainname': 'strain_name',
121
+ 'testingfacilityid': 'lab_id',
122
+ 'productcategoryname': 'product_type'
123
+ }
124
+ results = results.rename(columns=columns)
125
+
126
+ # Drop duplicates.
127
+ results = results.drop_duplicates(subset=['package_id'])
128
+
129
+ # Record the data.
130
+ all_data.extend(results.to_dict(orient='records'))
131
+ print('Read %i MD lab results from %s.' % (len(results), datafile))
132
+
133
+ # Aggregate all lab results.
134
+ all_results = pd.DataFrame(all_data)
135
+ print('Aggregated %i MD lab results.' % len(all_results))
136
+
137
+ # Save the results.
138
+ outfile = os.path.join(output_dir, 'md-results-latest.xlsx')
139
+ outfile_csv = os.path.join(output_dir, 'md-results-latest.csv')
140
+ outfile_json = os.path.join(output_dir, 'md-results-latest.jsonl')
141
+ all_results.to_excel(outfile, index=False)
142
+ all_results.to_csv(outfile_csv, index=False)
143
+ all_results.to_json(outfile_json, orient='records', lines=True)
144
+ print('Saved Excel:', outfile)
145
+ print('Saved CSV:', outfile_csv)
146
+ print('Saved JSON:', outfile_json)
147
+
148
+ # Return the results.
149
+ return all_results
150
+
151
+ # === Tests ===
152
+ # [✓] Tested: 2024-07-10 by Keegan Skeate <keegan@cannlytics.com>
153
+ if __name__ == '__main__':
154
+
155
+ # Define where the data lives.
156
+ data_dir = r'D:\data\public-records\Maryland\md-prr-2024-01-02\md-prr-2024-01-02'
157
+ output_dir = 'D://data/maryland'
158
+
159
+ # Curate results.
160
+ get_results_md(data_dir=data_dir, output_dir=output_dir)
algorithms/get_results_mi.py ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Results from MI PRR
3
+ Copyright (c) 2023 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 10/23/2023
7
+ Updated: 7/11/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+
10
+ Data Sources:
11
+
12
+ - Public records request
13
+
14
+ """
15
+ # External imports:
16
+ from datetime import datetime
17
+ import os
18
+ import matplotlib.pyplot as plt
19
+ from matplotlib.ticker import StrMethodFormatter
20
+ from matplotlib import cm
21
+ import numpy as np
22
+ import pandas as pd
23
+ import re
24
+ import seaborn as sns
25
+ from scipy import stats
26
+
27
+
28
+ # Setup plotting style.
29
+ plt.style.use('fivethirtyeight')
30
+ plt.rcParams.update({
31
+ 'figure.figsize': (12, 8),
32
+ 'font.family': 'Times New Roman',
33
+ 'font.size': 24,
34
+ })
35
+
36
+
37
+ def save_figure(filename, dpi=300, bbox_inches='tight'):
38
+ """Save a figure to the figures directory."""
39
+ plt.savefig(f'figures/{filename}', bbox_inches=bbox_inches, dpi=dpi)
40
+
41
+
42
+ # === Get the data ===
43
+
44
+ # Read the results.
45
+ data_dir = r'D:\data\public-records\Michigan'
46
+ datafile = os.path.join(data_dir, 'Michigan_Metrc_Flower_Potency_Final_2.17.23.xlsx')
47
+ mi_results = pd.read_excel(datafile)
48
+
49
+
50
+ # === Clean the data ===
51
+
52
+ # Rename certain columns.
53
+ mi_results = mi_results.rename(columns={
54
+ 'ProductName': 'product_name',
55
+ 'ProductCategory': 'product_type',
56
+ 'TestType': 'test_type',
57
+ 'Quantity': 'total_thc',
58
+ 'Licensee': 'lab',
59
+ 'TestPerformedDate': 'date_tested',
60
+ 'Comment': 'notes',
61
+ 'Med AU': 'medical',
62
+ })
63
+
64
+ # Standardize state.
65
+ state = 'MI'
66
+ mi_results['lab_state'] = state
67
+ mi_results['producer_state'] = state
68
+
69
+ # Add a date column.
70
+ mi_results['date'] = pd.to_datetime(mi_results['date_tested'], format='mixed')
71
+ mi_results['week'] = mi_results['date'].dt.to_period('W').astype(str)
72
+ mi_results['month'] = mi_results['date'].dt.to_period('M').astype(str)
73
+ mi_results = mi_results.sort_values('date')
74
+
75
+ # Save the results.
76
+ outfile = 'D://data/michigan/mi-results-latest.xlsx'
77
+ outfile_csv = 'D://data/michigan/mi-results-latest.csv'
78
+ outfile_json = 'D://data/michigan/mi-results-latest.jsonl'
79
+ mi_results.to_excel(outfile, index=False)
80
+ mi_results.to_csv(outfile_csv, index=False)
81
+ mi_results.to_json(outfile_json, orient='records', lines=True)
82
+ print('Saved Excel:', outfile)
83
+ print('Saved CSV:', outfile_csv)
84
+ print('Saved JSON:', outfile_json)
85
+
86
+ # Print out features.
87
+ features = {x: 'string' for x in mi_results.columns}
88
+ print('Number of features:', len(features))
89
+ print('Features:', features)
90
+
91
+
92
+ # === Analyze tests by month. ===
93
+
94
+ # Exclude outliers.
95
+ sample = mi_results.loc[
96
+ (mi_results['total_thc'] > 0) &
97
+ (mi_results['total_thc'] < 100) &
98
+ (mi_results['product_type'] == 'Flower')
99
+ ]
100
+ print('Number of samples:', len(sample))
101
+
102
+ # Visualize the frequency of tests by month/year.
103
+ test_frequency = sample['month'].value_counts().sort_index()
104
+ subsample = test_frequency[2:-1]
105
+ subsample.index = subsample.index.to_timestamp()
106
+ plt.figure(figsize=(12, 8))
107
+ sns.lineplot(
108
+ x=subsample.index,
109
+ y=subsample.values,
110
+ marker="o",
111
+ color="mediumblue"
112
+ )
113
+ plt.title('Monthly Number of Lab Tests in MI')
114
+ plt.ylabel('Number of Tests')
115
+ plt.xlabel('')
116
+ plt.xticks(rotation=45, ha='right')
117
+ plt.grid(True, which='both', linestyle='--', linewidth=0.5)
118
+ plt.tight_layout()
119
+ save_figure('mi-tests-by-month.png')
120
+ plt.show()
121
+
122
+
123
+ # === Analyze medical vs. adult-use testing. ===
124
+
125
+ # Visualize adult-use vs. medical tests over time.
126
+ grouped = sample.groupby(['month', 'medical']).size().reset_index(name='counts')
127
+ pivot_grouped = grouped.pivot(index='month', columns='medical', values='counts').fillna(0)
128
+ pivot_grouped = pivot_grouped.apply(pd.to_numeric, errors='coerce')
129
+ pivot_grouped.index = pivot_grouped.index.to_timestamp()
130
+ pivot_grouped = pivot_grouped[2:-1]
131
+ plt.figure(figsize=(15, 10))
132
+ for column in pivot_grouped.columns:
133
+ sns.lineplot(data=pivot_grouped, x=pivot_grouped.index, y=column, marker='o', label=column)
134
+ plt.title('Number of Adult Use vs Medical Tests by Month in MI')
135
+ plt.ylabel('Number of Lab Tests')
136
+ plt.xlabel('')
137
+ plt.xticks(rotation=45, ha='right')
138
+ plt.legend(title='Adult Use / Medical')
139
+ plt.grid(True, which='both', linestyle='--', linewidth=0.5)
140
+ plt.tight_layout()
141
+ save_figure('mi-med-au-tests-by-month.png')
142
+ plt.show()
143
+
144
+ # Visualize the frequency distribution for medical.
145
+ subsample = sample[(sample['date'] >= datetime(2022, 1, 1)) &
146
+ (sample['date'] < datetime(2023, 1, 1))]
147
+ med_au_distribution = subsample['medical'].value_counts()
148
+ plt.figure(figsize=(5, 8))
149
+ bar_plot = sns.barplot(x=med_au_distribution.index, y=med_au_distribution.values, palette='tab10')
150
+ plt.title('Adult-Use to Medical Lab Tests in MI in 2022', fontsize=21)
151
+ plt.ylabel('Number of Lab Tests')
152
+ plt.xlabel('')
153
+ for index, value in enumerate(med_au_distribution.values):
154
+ bar_plot.text(index, value + 0.1, str(value), color='black', ha='center')
155
+ plt.tight_layout()
156
+ save_figure('mi-med-au-frequency.png')
157
+ plt.show()
158
+
159
+
160
+ # === Analyze lab market share. ===
161
+
162
+ # Count the number of labs.
163
+ labs = sample['lab'].unique()
164
+ print('Number of labs:', len(labs))
165
+
166
+ # Visualize the number of tests by lab.
167
+ subsample = sample[(sample['date'] >= datetime(2021, 1, 1)) &
168
+ (sample['date'] < datetime(2022, 1, 1))]
169
+ lab_results = subsample.groupby('lab')
170
+ tests_by_lab = lab_results['total_thc'].count().sort_values(ascending=False)
171
+ sns.barplot(x=tests_by_lab.index, y=tests_by_lab.values, palette='tab20')
172
+ plt.xticks(rotation=45, ha='right')
173
+ plt.title('Lab Tests in MI in 2022')
174
+ plt.ylabel('Number of Lab Tests')
175
+ plt.xlabel('')
176
+ plt.tight_layout()
177
+ save_figure('mi-tests-by-lab.png')
178
+ plt.show()
179
+
180
+ # Visualize market share by lab in 2021.
181
+ subsample = sample[(sample['date'] >= datetime(2021, 1, 1)) &
182
+ (sample['date'] < datetime(2022, 1, 1))]
183
+ lab_results = subsample.groupby('lab')
184
+ tests_by_lab = lab_results['total_thc'].count().sort_values(ascending=False)
185
+ market_share = tests_by_lab.div(tests_by_lab.sum()).mul(100).round(2)
186
+ sns.barplot(x=market_share.index, y=market_share.values, palette='tab20')
187
+ plt.title('Lab Market Share in MI in 2021')
188
+ plt.ylabel('Market Share (%)')
189
+ plt.xlabel('')
190
+ plt.xticks(rotation=45, ha='right')
191
+ plt.tight_layout()
192
+ save_figure('mi-market-share-by-lab-2021.png')
193
+ plt.show()
194
+
195
+ # Visualize market share by lab in 2022.
196
+ subsample = sample[(sample['date'] >= datetime(2022, 1, 1)) &
197
+ (sample['date'] < datetime(2023, 1, 1))]
198
+ lab_results = subsample.groupby('lab')
199
+ tests_by_lab = lab_results['total_thc'].count().sort_values(ascending=False)
200
+ market_share = tests_by_lab.div(tests_by_lab.sum()).mul(100).round(2)
201
+ sns.barplot(x=market_share.index, y=market_share.values, palette='tab20')
202
+ plt.title('Lab Market Share in MI in 2022')
203
+ plt.ylabel('Market Share (%)')
204
+ plt.xlabel('')
205
+ plt.xticks(rotation=45, ha='right')
206
+ plt.tight_layout()
207
+ save_figure('mi-market-share-by-lab-2022.png')
208
+ plt.show()
209
+
210
+
211
+ # === Analyze total THC. ===
212
+
213
+ # Get a sub-sample.
214
+ subsample = sample[(sample['date'] >= datetime(2022, 1, 1)) &
215
+ (sample['date'] < datetime(2023, 1, 1))]
216
+
217
+ # Visualize the distribution of THC.
218
+ mean_value = subsample['total_thc'].mean()
219
+ quantile_1 = subsample['total_thc'].quantile(0.01)
220
+ quantile_25 = subsample['total_thc'].quantile(0.25)
221
+ quantile_75 = subsample['total_thc'].quantile(0.75)
222
+ quantile_99 = subsample['total_thc'].quantile(0.99)
223
+ plt.figure(figsize=(12, 7))
224
+ sns.histplot(subsample['total_thc'], bins=100, color='lightblue', kde=True)
225
+ plt.axvline(quantile_1, color='blue', linestyle='dashed', linewidth=2, label=f'1st percentile: {quantile_1:.2f}%')
226
+ plt.axvline(quantile_25, color='green', linestyle='dashed', linewidth=2, label=f'25th percentile: {quantile_25:.2f}%')
227
+ plt.axvline(mean_value, color='red', linestyle='dashed', linewidth=2, label=f'Mean: {mean_value:.2f}%')
228
+ plt.axvline(quantile_75, color='darkgreen', linestyle='dashed', linewidth=2, label=f'75th percentile: {quantile_75:.2f}%')
229
+ plt.axvline(quantile_99, color='blue', linestyle='dashed', linewidth=2, label=f'99th percentile: {quantile_99:.2f}%')
230
+ plt.title('Total THC in MI Cannabis Flower in 2022', pad=15)
231
+ plt.xlabel('Total THC (%)')
232
+ plt.ylabel('Number of Tests')
233
+ plt.legend()
234
+ plt.tight_layout()
235
+ save_figure('mi-total-thc-distribution.png')
236
+ plt.show()
237
+
238
+ # Visualize the difference between medical and adult-use THC.
239
+ plt.figure(figsize=(12, 7))
240
+ sns.histplot(
241
+ data=subsample,
242
+ x='total_thc',
243
+ hue='medical',
244
+ bins=100,
245
+ kde=True,
246
+ palette={'Med': 'blue', 'AU': 'green'},
247
+ stat='density',
248
+ )
249
+ median_med = subsample[subsample['medical'] == 'Med']['total_thc'].median()
250
+ median_au = subsample[subsample['medical'] == 'AU']['total_thc'].median()
251
+ plt.axvline(median_med, color='blue', linestyle='--', linewidth=1.5, label=f'Medical Median: {median_med:.2f}%')
252
+ plt.axvline(median_au, color='green', linestyle='--', linewidth=1.5, label=f'Adult-Use Median: {median_au:.2f}%')
253
+ plt.title('Total THC for Medical and Adult-Use in MI in 2022', pad=15)
254
+ plt.xlabel('Total THC (%)')
255
+ plt.ylabel('Frequency (%)')
256
+ plt.legend(loc='upper right', bbox_to_anchor=(1.3, 1))
257
+ plt.tight_layout()
258
+ save_figure('mi-med-au-total-thc-distribution.png')
259
+ plt.show()
260
+
261
+ # Perform a t-test to determine if the difference between medical and adult-use THC is significant.
262
+ med_thc = subsample[subsample['medical'] == 'Med']['total_thc']
263
+ au_thc = subsample[subsample['medical'] == 'AU']['total_thc']
264
+ t_stat, p_val = stats.ttest_ind(med_thc, au_thc, equal_var=True) # You can set equal_var to False for Welch's t-test
265
+ print(f'T-statistic: {t_stat}')
266
+ print(f'P-value: {p_val}')
267
+ alpha = 0.05
268
+ if p_val < alpha:
269
+ print('The difference between medical and adult-use THC is statistically significant.')
270
+ else:
271
+ print('The difference between medical and adult-use THC is not statistically significant.')
272
+
273
+
274
+ # === Analyze THC by lab. ===
275
+
276
+ # Visualize average THC percentage for each licensee.
277
+ average_thc_by_licensee = subsample.groupby('lab')['total_thc'].mean()
278
+ average_thc_by_licensee = average_thc_by_licensee.sort_values(ascending=False)
279
+ plt.figure(figsize=(25, 8))
280
+ bar_plot = sns.barplot(x=average_thc_by_licensee.index, y=average_thc_by_licensee.values, palette='tab20')
281
+ plt.title('Average Total THC by Lab in MI in 2022', pad=15)
282
+ plt.ylabel('Average Total THC (%)')
283
+ plt.xlabel('Lab')
284
+ plt.xticks(rotation=45, ha='right')
285
+ for index, value in enumerate(average_thc_by_licensee.values):
286
+ bar_plot.text(index, value + 0.2, f'{value:.0f}%', color='black', ha='center')
287
+ mean = average_thc_by_licensee.mean()
288
+ plt.axhline(
289
+ y=mean,
290
+ color='red',
291
+ linestyle='--',
292
+ label=f'MI Avg Total THC: {mean:.2f}%',
293
+ )
294
+ plt.tight_layout()
295
+ save_figure('mi-total-thc-by-lab.png')
296
+ plt.show()
297
+
298
+
299
+ # === Augment strain data. ===
300
+
301
+ def extract_strain_name(product_name):
302
+ """Extract the strain name from the product name."""
303
+ name = str(product_name)
304
+ strain_name = re.split(r' - | \| | _ | x | – | — |:|\(|\)|/', name)[0]
305
+ strain_name = strain_name.split('Buds')[0].strip()
306
+ strain_name = strain_name.split('Bulk')[0].strip()
307
+ strain_name = strain_name.split('Flower')[0].strip()
308
+ strain_name = strain_name.split('Pre-Roll')[0].strip()
309
+ return strain_name
310
+
311
+
312
+ # Augment strain names.
313
+ sample['strain_name'] = sample['product_name'].apply(extract_strain_name)
314
+ print(sample.sample(10)['strain_name'])
315
+
316
+
317
+ # === Analyze strains. ===
318
+
319
+ # Exclude samples with strain_name set to None, '' or 'Unprocessed'
320
+ sample = sample[sample['strain_name'].notna()]
321
+ sample = sample[~sample['strain_name'].isin(['', 'Unprocessed'])]
322
+
323
+ # Standardize strain names
324
+ sample['strain_name'] = sample['strain_name'].replace({
325
+ 'Gorilla Glue': 'Gorilla Glue #4',
326
+ 'GG4': 'Gorilla Glue #4'
327
+ })
328
+
329
+ # Restrict the timeframe to 2022.
330
+ subsample = sample[(sample['date'] >= datetime(2022, 1, 1)) &
331
+ (sample['date'] < datetime(2023, 1, 1))]
332
+
333
+ # Visualize the frequency of each strain
334
+ strain_counts = subsample['strain_name'].value_counts()
335
+ counts = strain_counts.head(20)
336
+ plt.figure(figsize=(13, 13))
337
+ bar_plot = sns.barplot(
338
+ y=counts.index,
339
+ x=counts.values,
340
+ palette='tab20',
341
+ )
342
+ plt.title('Number of Lab Tests for the Top 20 Strains in MI in 2022', pad=15)
343
+ plt.xlabel('')
344
+ plt.ylabel('')
345
+ for index, value in enumerate(counts.values):
346
+ bar_plot.text(value, index, str(value), color='black', ha='left', va='center')
347
+ plt.tight_layout()
348
+ save_figure('mi-top-strains.png')
349
+ plt.show()
350
+
351
+ # Visualize the average THC for the top strains.
352
+ avg_thc_per_strain = subsample.groupby('strain_name')['total_thc'].mean().sort_values(ascending=False)
353
+ overall_avg_thc = subsample['total_thc'].mean()
354
+ print('Overall average THC:', round(overall_avg_thc, 2))
355
+ print('99th percentile THC:', round(sample['total_thc'].quantile(0.99), 2))
356
+ top_20_strains = strain_counts.head(20).index
357
+ avg_thc_top_20_strains = avg_thc_per_strain[avg_thc_per_strain.index.isin(top_20_strains)]
358
+ avg_thc_top_20_strains = avg_thc_top_20_strains.loc[top_20_strains]
359
+ print('Average THC for top 20 strains:', round(avg_thc_top_20_strains.mean(), 2))
360
+ plt.figure(figsize=(26, 10))
361
+ bar_plot = sns.barplot(
362
+ x=avg_thc_top_20_strains.index,
363
+ y=avg_thc_top_20_strains.values,
364
+ palette='tab20'
365
+ )
366
+ plt.axhline(
367
+ y=overall_avg_thc,
368
+ color='red',
369
+ linestyle='--',
370
+ label=f'MI Avg Total THC: {overall_avg_thc:.2f}%',
371
+ )
372
+ plt.title('Average Total THC for the Top 20 Strains in MI in 2022', fontsize=36, pad=15)
373
+ plt.ylabel('Total THC (%)')
374
+ plt.xlabel('')
375
+ plt.xticks(rotation=45, ha='right')
376
+ plt.legend()
377
+ for p in bar_plot.patches:
378
+ bar_plot.annotate(format(p.get_height(), '.2f') + '%',
379
+ (p.get_x() + p.get_width() / 2., p.get_height()),
380
+ ha='center', va='center',
381
+ xytext=(0, 9),
382
+ textcoords='offset points')
383
+ plt.tight_layout()
384
+ save_figure('mi-avg-thc-by-top-20-strains.png')
385
+ plt.show()
386
+
387
+ # Look at the top adult-use strains.
388
+ adult_use = subsample.loc[subsample['medical'] == 'AU']
389
+ strain_counts = adult_use['strain_name'].value_counts()
390
+ avg_thc_per_strain = adult_use.groupby('strain_name')['total_thc'].mean().sort_values(ascending=False)
391
+ overall_avg_thc = adult_use['total_thc'].mean()
392
+ print('Adult-use average THC:', round(overall_avg_thc, 2))
393
+ print('Adult-use 99th percentile THC:', round(sample['total_thc'].quantile(0.99), 2))
394
+ top_20_strains = strain_counts.head(20).index
395
+ avg_thc_top_20_strains = avg_thc_per_strain[avg_thc_per_strain.index.isin(top_20_strains)]
396
+ avg_thc_top_20_strains = avg_thc_top_20_strains.loc[top_20_strains]
397
+ print('Average THC for top 20 adult-use strains:', round(avg_thc_top_20_strains.mean(), 2))
398
+ plt.figure(figsize=(26, 10))
399
+ bar_plot = sns.barplot(
400
+ x=avg_thc_top_20_strains.index,
401
+ y=avg_thc_top_20_strains.values,
402
+ palette='tab20'
403
+ )
404
+ plt.axhline(
405
+ y=overall_avg_thc,
406
+ color='red',
407
+ linestyle='--',
408
+ label=f'MI Adult-Use Avg Total THC: {overall_avg_thc:.2f}%',
409
+ )
410
+ plt.title('Average Total THC for the Top 20 Adult-Use Strains in MI in 2022', fontsize=36, pad=15)
411
+ plt.ylabel('Total THC (%)')
412
+ plt.xlabel('')
413
+ plt.xticks(rotation=45, ha='right')
414
+ plt.legend()
415
+ for p in bar_plot.patches:
416
+ bar_plot.annotate(format(p.get_height(), '.2f') + '%',
417
+ (p.get_x() + p.get_width() / 2., p.get_height()),
418
+ ha='center', va='center',
419
+ xytext=(0, 9),
420
+ textcoords='offset points')
421
+ plt.tight_layout()
422
+ save_figure('mi-avg-thc-by-top-20-strains-adult-use.png')
423
+ plt.show()
424
+
425
+ # Look at the top medical strains.
426
+ medical = subsample.loc[subsample['medical'] == 'Med']
427
+ strain_counts = medical['strain_name'].value_counts()
428
+ avg_thc_per_strain = medical.groupby('strain_name')['total_thc'].mean().sort_values(ascending=False)
429
+ overall_avg_thc = medical['total_thc'].mean()
430
+ print('Medical average THC:', round(overall_avg_thc, 2))
431
+ print('Medical 99th percentile THC:', round(sample['total_thc'].quantile(0.99), 2))
432
+ top_20_strains = strain_counts.head(20).index
433
+ avg_thc_top_20_strains = avg_thc_per_strain[avg_thc_per_strain.index.isin(top_20_strains)]
434
+ avg_thc_top_20_strains = avg_thc_top_20_strains.loc[top_20_strains]
435
+ print('Average THC for top 20 medical strains:', round(avg_thc_top_20_strains.mean(), 2))
436
+ plt.figure(figsize=(26, 10))
437
+ bar_plot = sns.barplot(
438
+ x=avg_thc_top_20_strains.index,
439
+ y=avg_thc_top_20_strains.values,
440
+ palette='tab20'
441
+ )
442
+ plt.axhline(
443
+ y=overall_avg_thc,
444
+ color='red',
445
+ linestyle='--',
446
+ label=f'MI Medical Avg Total THC: {overall_avg_thc:.2f}%',
447
+ )
448
+ plt.title('Average Total THC for the Top 20 Medical Strains in MI in 2022', fontsize=36, pad=15)
449
+ plt.ylabel('Total THC (%)')
450
+ plt.xlabel('')
451
+ plt.xticks(rotation=45, ha='right')
452
+ plt.legend()
453
+ for p in bar_plot.patches:
454
+ bar_plot.annotate(format(p.get_height(), '.2f') + '%',
455
+ (p.get_x() + p.get_width() / 2., p.get_height()),
456
+ ha='center', va='center',
457
+ xytext=(0, 9),
458
+ textcoords='offset points')
459
+ plt.tight_layout()
460
+ save_figure('mi-avg-thc-by-top-20-strains-med.png')
461
+ plt.show()
algorithms/get_results_nv.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Results Nevada
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 5/25/2024
8
+ Updated: 5/30/2024
9
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
10
+
11
+ Description:
12
+
13
+ Curate Nevada lab result data obtained through public records requests.
14
+
15
+ Data Sources:
16
+
17
+ - Public records request
18
+
19
+ """
20
+ # Standard imports:
21
+ from glob import glob
22
+ import os
23
+
24
+ # External imports:
25
+ from cannlytics.utils import snake_case
26
+ from cannlytics.utils.constants import ANALYTES
27
+ import pandas as pd
28
+
29
+ # Define standard columns.
30
+ columns = {
31
+ 'Id': 'sample_id',
32
+ 'PackagedByFacilityName': 'producer',
33
+ 'PackagedByFacilityLicenseNumber': 'producer_license_number',
34
+ 'LabFacilityName': 'lab',
35
+ 'LabFacilityLicenseNumber': 'lab_license_number',
36
+ 'Label': 'label',
37
+ 'PackageType': 'package_type',
38
+ 'Quantity': 'quantity',
39
+ 'UnitOfMeasureId': 'units_id',
40
+ 'UnitOfMeasureName': 'unit_of_measure_name',
41
+ 'UnitOfMeasureAbbreviation': 'unit_of_measure_abbreviation',
42
+ 'ProductName': 'product_name',
43
+ 'ProductCategoryName': 'product_type',
44
+ 'InitialLabTestingState': 'initial_lab_testing_state',
45
+ 'LabTestingState': 'lab_testing_state',
46
+ 'LabTestingStateName': 'lab_testing_state_name',
47
+ 'LabTestingStateDate': 'date_tested',
48
+ 'IsTestingSample': 'is_testing_sample',
49
+ 'IsProcessValidationTestingSample': 'is_process_validation_testing_sample',
50
+ 'ProductRequiresRemediation': 'product_requires_remediation',
51
+ 'ContainsRemediatedProduct': 'contains_remediated_product',
52
+ 'RemediationDate': 'remediation_date',
53
+ 'RemediationRecordedDateTime': 'remediation_recorded_datetime',
54
+ 'PackagedDate': 'date_packaged',
55
+ 'LabTestDetailId': 'lab_test_detail_id',
56
+ 'TestPerformedDate': 'test_performed_date',
57
+ 'LabTestResultDocumentFileId': 'lab_test_result_document_file_id',
58
+ 'OverallPassed': 'overall_passed',
59
+ 'TestTypeName': 'test_type',
60
+ 'TestPassed': 'test_passed',
61
+ 'TestResultLevel': 'test_result',
62
+ 'TestComment': 'test_comment',
63
+ 'ArchivedDate': 'archived_date',
64
+ 'FinishedDate': 'date_finished',
65
+ 'IsOnHold': 'is_on_hold'
66
+ }
67
+
68
+ # Define the data types for each column.
69
+ dtype_spec = {
70
+ 'Id': str,
71
+ 'PackagedByFacilityName': str,
72
+ 'PackagedByFacilityLicenseNumber': str,
73
+ 'LabFacilityName': str,
74
+ 'LabFacilityLicenseNumber': str,
75
+ 'Label': str,
76
+ 'PackageType': str,
77
+ 'Quantity': float,
78
+ 'UnitOfMeasureId': str,
79
+ 'UnitOfMeasureName': str,
80
+ 'UnitOfMeasureAbbreviation': str,
81
+ 'ProductName': str,
82
+ 'ProductCategoryName': str,
83
+ 'InitialLabTestingState': str,
84
+ 'LabTestingState': str,
85
+ 'LabTestingStateName': str,
86
+ 'LabTestingStateDate': str,
87
+ 'IsTestingSample': bool,
88
+ 'IsProcessValidationTestingSample': bool,
89
+ 'ProductRequiresRemediation': bool,
90
+ 'ContainsRemediatedProduct': bool,
91
+ 'RemediationDate': str,
92
+ 'RemediationRecordedDateTime': str,
93
+ 'PackagedDate': str,
94
+ 'LabTestDetailId': str,
95
+ 'TestPerformedDate': str,
96
+ 'LabTestResultDocumentFileId': str,
97
+ 'OverallPassed': bool,
98
+ 'TestTypeName': str,
99
+ 'TestPassed': bool,
100
+ 'TestResultLevel': str,
101
+ 'TestComment': str,
102
+ 'ArchivedDate': str,
103
+ 'FinishedDate': str,
104
+ 'IsOnHold': bool
105
+ }
106
+
107
+ def read_and_standardize_csv(file_path, columns, dtype_spec):
108
+ """Read a CSV file and standardize the column names."""
109
+ try:
110
+ df = pd.read_csv(file_path, dtype=dtype_spec, low_memory=False)
111
+ df.rename(columns=columns, inplace=True)
112
+ return df
113
+ except Exception as e:
114
+ print(f"Error reading {file_path}: {e}")
115
+ return pd.DataFrame()
116
+
117
+ def collect_data(data_dir, columns, dtype_spec):
118
+ """Collect data from a directory of CSV files."""
119
+ results = []
120
+ for root, _, files in os.walk(data_dir):
121
+ for file in files:
122
+ if 'no data' in file.lower():
123
+ continue
124
+ if file.endswith('.csv'):
125
+ print('Reading:', file)
126
+ file_path = os.path.join(root, file)
127
+ df = read_and_standardize_csv(file_path, columns, dtype_spec)
128
+ if not df.empty:
129
+ results.append(df)
130
+ return pd.concat(results, ignore_index=True)
131
+
132
+ def standardize_analyte_names(df, analyte_mapping):
133
+ """Standardize analyte names."""
134
+ df.columns = [analyte_mapping.get(snake_case(col), snake_case(col)) for col in df.columns]
135
+ return df
136
+
137
+ def augment_calculations(
138
+ df,
139
+ cannabinoids=None,
140
+ terpenes=None,
141
+ delta_9_thc='delta_9_thc',
142
+ thca='thca',
143
+ cbd='cbd',
144
+ cbda='cbda',
145
+ ):
146
+ """Augment the DataFrame with additional calculated fields."""
147
+ # Calculate total cannabinoids.
148
+ if cannabinoids is not None:
149
+ df['total_cannabinoids'] = round(df[cannabinoids].sum(axis=1), 2)
150
+
151
+ # Calculate total terpenes.
152
+ if terpenes is not None:
153
+ df['total_terpenes'] = round(df[terpenes].sum(axis=1), 2)
154
+
155
+ # Calculate the total THC to total CBD ratio.
156
+ df['total_thc'] = round(df[delta_9_thc] + 0.877 * df[thca], 2)
157
+ df['total_cbd'] = round(df[cbd] + 0.877 * df[cbda], 2)
158
+ df['thc_cbd_ratio'] = round(df['total_thc'] / df['total_cbd'], 2)
159
+
160
+ # Calculate the total cannabinoids to total terpenes ratio.
161
+ if cannabinoids is not None and terpenes is not None:
162
+ df['cannabinoids_terpenes_ratio'] = round(df['total_cannabinoids'] / df['total_terpenes'], 2)
163
+
164
+ # Return the augmented data.
165
+ return df
166
+
167
+ def combine_redundant_columns(df, product_types=None, verbose=False):
168
+ """Combine redundant columns and extract units and product types."""
169
+ combined_results = {}
170
+ for col in df.columns:
171
+ matched = False
172
+ if product_types is not None:
173
+ for product_type in product_types:
174
+ if product_type in col and '(' not in col:
175
+ base_name = col.split(product_type)[0].strip()
176
+ if base_name not in combined_results:
177
+ combined_results[base_name] = df[col]
178
+ if verbose:
179
+ print('New column:', base_name)
180
+ else:
181
+ combined_results[base_name] = combined_results[base_name].fillna(df[col])
182
+ if verbose:
183
+ print('Combined column:', base_name)
184
+ matched = True
185
+ if matched:
186
+ continue
187
+ if '(' in col and ')' in col:
188
+ base_name = col.split('(')[0].strip()
189
+ if base_name not in combined_results:
190
+ combined_results[base_name] = df[col]
191
+ if verbose:
192
+ print('New column:', base_name)
193
+ else:
194
+ combined_results[base_name] = combined_results[base_name].fillna(df[col])
195
+ if verbose:
196
+ print('Combined column:', base_name)
197
+ elif col not in combined_results:
198
+ if verbose:
199
+ print('New column:', col)
200
+ combined_results[col] = df[col]
201
+ return pd.DataFrame(combined_results)
202
+
203
+ def combine_similar_columns(df, similar_columns):
204
+ """Combine similar columns with different spellings or capitalization."""
205
+ for target_col, col_variants in similar_columns.items():
206
+ if target_col not in df.columns:
207
+ df[target_col] = pd.NA
208
+ for col in col_variants:
209
+ if col in df.columns:
210
+ df[target_col] = df[target_col].combine_first(df[col])
211
+ df.drop(columns=[col], inplace=True)
212
+ return df
213
+
214
+ def augment_metadata(results, data, sample_columns, boolean_columns,):
215
+ """Reattach missing columns from `data` to `results`."""
216
+ for col in sample_columns:
217
+ if col not in results.columns:
218
+ results[col] = results['label'].map(data.drop_duplicates('label').set_index('label')[col])
219
+ for col in boolean_columns:
220
+ if col not in results.columns:
221
+ results[col] = results['label'].map(data.groupby('label')[col].transform(lambda x: any(x) if x.name in ['overall_passed', 'test_passed'] else all(x)))
222
+ return results
223
+
224
+ def get_results_nv(
225
+ data_dir: str,
226
+ output_dir: str,
227
+ licenses_dir: str,
228
+ labs_dir: str,
229
+ ) -> pd.DataFrame:
230
+ """Get results for Oregon."""
231
+
232
+ # === Read the results ===
233
+
234
+ # Collect Nevada lab results
235
+ data = collect_data(data_dir, columns, dtype_spec)
236
+
237
+ # === Standardize the results ===
238
+
239
+ # Pivot the data to get results for each package label
240
+ results = data.pivot_table(
241
+ index=['label', 'producer', 'lab', 'product_name', 'product_type', 'date_tested', 'date_packaged', 'date_finished'],
242
+ columns='test_type',
243
+ values='test_result',
244
+ aggfunc='first'
245
+ ).reset_index()
246
+ print('Number of Nevada test samples:', len(results))
247
+
248
+ # Combine redundant columns
249
+ product_types = [
250
+ 'Infused Edible',
251
+ 'Infused Non-Edible',
252
+ 'Non-Solvent Concentrate',
253
+ 'R&D Testing',
254
+ 'Raw Plant Material',
255
+ 'Solvent Based Concentrate',
256
+ 'Sub-Contract',
257
+ 'Whole Wet Plant',
258
+ ]
259
+ results = combine_redundant_columns(results, product_types=product_types)
260
+ print('Combined redundant columns.')
261
+
262
+ # Combine similar columns.
263
+ similar_columns = {
264
+ 'beta_pinene': ['Beta Pinene', 'Beta-Pinene'],
265
+ 'caryophyllene_oxide': ['Carophyllene Oxide', 'Caryophyllene Oxide'],
266
+ 'delta_8_thc': ['Delta 8 THC', 'Delta-8 THC'],
267
+ 'delta_9_thc': ['Delta 9 THC', 'Delta-9 THC'],
268
+ 'thca': ['THCA', 'THCa'],
269
+ 'total_yeast_and_mold': ['Total Yeast and Mold', 'Yeast and Mold']
270
+ }
271
+ results = combine_similar_columns(results, similar_columns)
272
+ print('Combined similar columns.')
273
+
274
+ # Standardize the analyte names
275
+ results = standardize_analyte_names(results, ANALYTES)
276
+ print('Standardized analyte names.')
277
+
278
+ # Drop nuisance columns.
279
+ drop = ['']
280
+ results = results.drop(columns=drop, errors='ignore')
281
+
282
+ # Ensure all numeric columns are numeric.
283
+ non_numeric = [
284
+ 'label', 'producer', 'lab', 'product_name',
285
+ 'product_type', 'date_tested', 'date_packaged', 'date_finished'
286
+ ]
287
+ numeric_cols = results.columns.difference(non_numeric)
288
+ for col in numeric_cols:
289
+ results[col] = pd.to_numeric(results[col], errors='coerce')
290
+ print('Converted columns to numeric.')
291
+
292
+ # Augment metadata.
293
+ sample_columns = [
294
+ 'sample_id', 'package_type', 'quantity', 'units_id', 'unit_of_measure_name',
295
+ 'unit_of_measure_abbreviation', 'lab_testing_state', 'lab_testing_state_name',
296
+ 'remediation_date', 'remediation_recorded_datetime', 'lab_test_detail_id',
297
+ 'test_performed_date', 'lab_test_result_document_file_id', 'archived_date',
298
+ 'lab_license_number', 'producer_license_number'
299
+ ]
300
+ boolean_columns = [
301
+ 'contains_remediated_product', 'product_requires_remediation', 'is_on_hold',
302
+ 'is_process_validation_testing_sample', 'is_testing_sample', 'overall_passed', 'test_passed'
303
+ ]
304
+ results = augment_metadata(results, data, sample_columns, boolean_columns)
305
+ print('Augmented metadata.')
306
+
307
+ # Augment additional calculated metrics.
308
+ cannabinoids = ['cbd', 'cbda', 'cbn', 'delta_8_thc', 'delta_9_thc', 'thca']
309
+ terpenes = [
310
+ 'alpha_bisabolol', 'alpha_humulene', 'alpha_pinene', 'alpha_terpinene',
311
+ 'terpinolene', 'beta_pinene', 'beta_caryophyllene', 'beta_myrcene',
312
+ 'd_limonene', 'linalool', 'caryophyllene_oxide', 'other_terpenes'
313
+ ]
314
+ results = augment_calculations(results, cannabinoids, terpenes)
315
+ print('Augmented fields.')
316
+
317
+ # Convert dates to datetime and ensure they are timezone unaware.
318
+ date_columns = [
319
+ 'date_tested', 'test_performed_date', 'date_packaged',
320
+ 'date_finished', 'remediation_date', 'archived_date'
321
+ ]
322
+ for col in date_columns:
323
+ if col in results.columns:
324
+ results[col] = pd.to_datetime(results[col], errors='coerce').dt.tz_localize(None)
325
+
326
+ # === Augment licensee data. ===
327
+
328
+ # Read NV lab license data.
329
+ lab_columns = {
330
+ 'CEID': 'lab_id',
331
+ 'premise_county': 'lab_county',
332
+ 'premise_state': 'lab_state',
333
+ }
334
+ lab_licenses = pd.read_csv(labs_dir, low_memory=False)
335
+ lab_licenses['license_number'] = lab_licenses['license_number'].astype(str)
336
+ lab_licenses.set_index('license_number', inplace=True)
337
+ lab_licenses.rename(columns=lab_columns, inplace=True)
338
+
339
+ # Read NV licenses.
340
+ license_columns = {
341
+ 'CEID': 'producer_id',
342
+ 'license_type': 'producer_license_type',
343
+ 'premise_county': 'producer_county',
344
+ 'premise_state': 'producer_state',
345
+ 'business_legal_name': 'producer_legal_name',
346
+ }
347
+ license_files = sorted(
348
+ glob(os.path.join(licenses_dir, '*licenses*.csv')),
349
+ key=os.path.getmtime,
350
+ reverse=True
351
+ )
352
+ all_licenses = pd.concat(
353
+ (pd.read_csv(file, low_memory=False) for file in license_files),
354
+ ignore_index=True
355
+ )
356
+ all_licenses['license_number'] = all_licenses['license_number'].astype(str)
357
+ all_licenses = all_licenses.drop_duplicates(subset='license_number', keep='first')
358
+ all_licenses.set_index('license_number', inplace=True)
359
+ all_licenses.rename(columns=license_columns, inplace=True)
360
+
361
+ # Augment lab license data.
362
+ labs = list(results['lab_license_number'].unique())
363
+ for lab in labs:
364
+ if lab in lab_licenses.index:
365
+ license_data = lab_licenses.loc[lab]
366
+ for key in lab_columns.values():
367
+ if key in lab_licenses.columns:
368
+ # FIXME: Does this need to be changed?
369
+ results[key] = results['lab_license_number'].map(lab_licenses[key])
370
+ # results[key] = results['lab_license_number'].map(license_data[key])
371
+
372
+ # Augment producer license data.
373
+ producers = list(results['producer_license_number'].unique())
374
+ for producer in producers:
375
+ if producer in all_licenses.index:
376
+ license_data = all_licenses.loc[producer]
377
+ for key in license_columns.values():
378
+ if key in all_licenses.columns:
379
+ # FIXME: Does this need to be changed?
380
+ results[key] = results['producer_license_number'].map(all_licenses[key])
381
+ # results[key] = results['lab_license_number'].map(license_data[key])
382
+
383
+ # === Save the results. ===
384
+
385
+ # Sort the columns.
386
+ non_numeric_cols = non_numeric + sample_columns + boolean_columns + date_columns
387
+ non_numeric_cols += list(lab_columns.values()) + list(license_columns.values())
388
+ numeric_cols = [col for col in results.columns if col not in non_numeric_cols]
389
+ numeric_cols_sorted = sorted(numeric_cols)
390
+ results = results[non_numeric_cols + numeric_cols_sorted]
391
+
392
+ # # Save the results with copyright and sources sheets.
393
+ # stats_dir = 'D://data/nevada/results/datasets'
394
+ # date = datetime.now().strftime('%Y-%m-%d')
395
+ # if not os.path.exists(stats_dir): os.makedirs(stats_dir)
396
+ # outfile = f'{stats_dir}/nv-results-{date}.xlsx'
397
+ # save_with_copyright(
398
+ # results,
399
+ # outfile,
400
+ # dataset_name='Nevada Cannabis Lab Results',
401
+ # author='Keegan Skeate',
402
+ # publisher='Cannlytics',
403
+ # sources=['Nevada Cannabis Compliance Board'],
404
+ # source_urls=['https://ccb.nv.gov/'],
405
+ # )
406
+ # print('Saved Nevada lab results:', outfile)
407
+
408
+ # Save the results.
409
+ outfile = os.path.join(output_dir, 'nv-results-latest.xlsx')
410
+ outfile_csv = os.path.join(output_dir, 'nv-results-latest.csv')
411
+ outfile_json = os.path.join(output_dir, 'nv-results-latest.jsonl')
412
+ results.to_excel(outfile, index=False)
413
+ results.to_csv(outfile_csv, index=False)
414
+ # FIXME: This causes an ValueError
415
+ # ValueError: DataFrame columns must be unique for orient='records'.
416
+ # results.to_json(outfile_json, orient='records', lines=True)
417
+ print('Saved Excel:', outfile)
418
+ print('Saved CSV:', outfile_csv)
419
+ # print('Saved JSON:', outfile_json)
420
+
421
+ # Return the results.
422
+ return results
423
+
424
+ # === Test ===
425
+ # [✓] Tested: 2024-07-10 by Keegan Skeate <keegan@cannlytics>
426
+ if __name__ == '__main__':
427
+
428
+ # Define where the data lives.
429
+ data_dir = 'D://data/public-records/Nevada-001'
430
+ licenses_dir = r"C:\Users\keega\Documents\cannlytics\cannlytics\datasets\cannabis_licenses\data\nv"
431
+ labs_dir = r"C:\Users\keega\Documents\cannlytics\cannlytics\datasets\cannabis_licenses\data\nv\labs-nv-2023-12-17T11-41-34.csv"
432
+ output_dir = 'D://data/nevada/results/datasets'
433
+
434
+ # Curate results.
435
+ get_results_nv(
436
+ data_dir=data_dir,
437
+ output_dir=output_dir,
438
+ licenses_dir=licenses_dir,
439
+ labs_dir=labs_dir,
440
+ )
algorithms/get_results_ny.py ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Results | New York
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 6/24/2024
7
+ Updated: 6/24/2024
8
+ License: MIT License <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
9
+
10
+ Data Sources:
11
+
12
+ - [Jetty Extracts](https://jettyextracts.com/coa-new-york/)
13
+ - [MFNY]('https://www.mycoa.info/')
14
+ - [Hudson Cannabis](https://www.hudsoncannabis.co/coas)
15
+ - [NYSCannabis](https://www.reddit.com/r/NYSCannabis)
16
+
17
+ """
18
+ # Standard imports:
19
+ from datetime import datetime
20
+ import json
21
+ import os
22
+ import shutil
23
+ from time import sleep
24
+ from urllib.parse import urljoin
25
+
26
+ # External imports:
27
+ from bs4 import BeautifulSoup
28
+ from cannlytics.data.cache import Bogart
29
+ from cannlytics.data.coas import CoADoc
30
+ from cannlytics.data.web import initialize_selenium, download_google_drive_file
31
+ from cannlytics.utils.constants import DEFAULT_HEADERS
32
+ from cannlytics.utils.utils import (
33
+ download_file_with_selenium,
34
+ remove_duplicate_files,
35
+ kebab_case,
36
+ )
37
+ from dotenv import dotenv_values
38
+ import gdown
39
+ import logging
40
+ import pandas as pd
41
+ import praw
42
+ import requests
43
+ import tempfile
44
+ from selenium.webdriver.common.by import By
45
+ from selenium.webdriver.support import expected_conditions as EC
46
+ from selenium.webdriver.support.ui import WebDriverWait
47
+
48
+
49
+ #-----------------------------------------------------------------------
50
+ # Setup.
51
+ #-----------------------------------------------------------------------
52
+
53
+ # Define where the Reddit data will be stored.
54
+ data_dir = r"D:\data\new-york\NYSCannabis"
55
+
56
+ # Create a directory to store the downloaded images.
57
+ images_directory = 'D://data/new-york/NYSCannabis/images'
58
+ os.makedirs(images_directory, exist_ok=True)
59
+
60
+ # Set up logging
61
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
62
+
63
+
64
+ #-----------------------------------------------------------------------
65
+ # Get Jetty Extracts COAs.
66
+ #-----------------------------------------------------------------------
67
+ try:
68
+
69
+ # Define the URL.
70
+ url = 'https://jettyextracts.com/coa-new-york/'
71
+
72
+ # Define the PDF directory.
73
+ pdf_dir = 'D://data/new-york/jetty-extracts/pdfs'
74
+ os.makedirs(pdf_dir, exist_ok=True)
75
+
76
+ # TODO: Download the CSV programmatically.
77
+ datafile = r"D:\data\new-york\jetty-extracts\jetty-extracts-coas-2024-06-24.csv"
78
+ coas = pd.read_csv(datafile)
79
+
80
+ # Download the COAs from the CSV.
81
+ last_column = coas.columns[-1]
82
+ folder_urls = coas[last_column].values
83
+ for folder_url in reversed(folder_urls):
84
+ try:
85
+ gdown.download_folder(folder_url, output=pdf_dir, quiet=False)
86
+ sleep(3.33)
87
+ except:
88
+ print('Failed to download:', folder_url)
89
+
90
+ except:
91
+ print('Failed to download Jetty Extracts COAs.')
92
+
93
+
94
+ #-----------------------------------------------------------------------
95
+ # Get My COAs.
96
+ #-----------------------------------------------------------------------
97
+
98
+ try:
99
+
100
+ # Define the URL.
101
+ url = 'https://www.mycoa.info/'
102
+
103
+ # Define the PDF directory.
104
+ pdf_dir = 'D://data/new-york/my-coa/pdfs'
105
+ os.makedirs(pdf_dir, exist_ok=True)
106
+
107
+ # Get all of the PDF links.
108
+ driver = initialize_selenium(headless=False, download_dir=pdf_dir)
109
+ driver.get(url)
110
+ sleep(5)
111
+ pdf_links = driver.find_elements(By.XPATH, "//a[contains(@href, 'dropbox.com/s')]")
112
+ pdf_urls = [link.get_attribute('href') for link in pdf_links]
113
+ print(f'Found {len(pdf_links)} PDF links.')
114
+
115
+ # Download all of the PDFs from Dropbox.
116
+ for pdf_url in pdf_urls:
117
+ driver.get(pdf_url)
118
+ sleep(3.33)
119
+ wait = WebDriverWait(driver, 10)
120
+ download_button = wait.until(EC.presence_of_element_located((By.XPATH, "//button[@aria-label='Download']")))
121
+ download_button.click()
122
+ sleep(3.33)
123
+ print('Downloaded:', pdf_url)
124
+ print('All PDFs have been downloaded.')
125
+
126
+ # Close the Selenium driver
127
+ driver.quit()
128
+
129
+ except:
130
+ print('Failed to download My COAs.')
131
+
132
+
133
+ #-----------------------------------------------------------------------
134
+ # Get Hudson Cannabis COAs.
135
+ #-----------------------------------------------------------------------
136
+ try:
137
+
138
+ # Define the URL.
139
+ url = 'https://www.hudsoncannabis.co/coas'
140
+
141
+ # Define the PDF directory.
142
+ pdf_dir = 'D://data/new-york/hudson-cannabis/pdfs'
143
+ os.makedirs(pdf_dir, exist_ok=True)
144
+
145
+ # Find all of the PDF Links.
146
+ driver = initialize_selenium(headless=False, download_dir=pdf_dir)
147
+ driver.get(url)
148
+ wait = WebDriverWait(driver, 10)
149
+ wait.until(EC.presence_of_element_located((By.ID, "root")))
150
+ sleep(5)
151
+ pdf_links = driver.find_elements(By.XPATH, "//a[contains(@href, 'drive.google.com/file')]")
152
+ print(f'Found {len(pdf_links)} PDF links.')
153
+
154
+ # Download each PDF.
155
+ for link in pdf_links:
156
+ pdf_url = link.get_attribute('href')
157
+ pdf_name = pdf_url.split('/')[-2] + '.pdf'
158
+ save_path = os.path.join(pdf_dir, pdf_name)
159
+ print(f'Downloading {pdf_name} from {pdf_url}')
160
+ download_google_drive_file(pdf_url, save_path)
161
+ sleep(3.33)
162
+
163
+ print('All PDFs have been downloaded.')
164
+
165
+ # Close the Selenium driver
166
+ driver.quit()
167
+
168
+ except:
169
+ print('Failed to download Hudson Cannabis COAs.')
170
+
171
+
172
+ #-----------------------------------------------------------------------
173
+ # Get Reddit posts with Selenium.
174
+ #-----------------------------------------------------------------------
175
+
176
+ def get_reddit_posts(driver, data, recorded_posts):
177
+ """Get the posts from the Reddit page."""
178
+ page_source = driver.page_source
179
+ soup = BeautifulSoup(page_source, 'html.parser')
180
+ posts = soup.find_all('shreddit-post')
181
+ for post in posts:
182
+ post_id = post.get('id')
183
+ if post_id in recorded_posts:
184
+ continue
185
+ recorded_posts.append(post_id)
186
+ title = post.get('post-title')
187
+ url = post.get('content-href')
188
+ created_timestamp = post.get('created-timestamp')
189
+ author_id = post.get('author-id')
190
+ author = post.get('author')
191
+ number_comments = post.get('comment-count')
192
+ subreddit_id = post.get('subreddit-id')
193
+ subreddit_name = post.get('subreddit-prefixed-name')
194
+ data.append({
195
+ 'title': title,
196
+ 'url': url,
197
+ 'created_timestamp': created_timestamp,
198
+ 'author_id': author_id,
199
+ 'author': author,
200
+ 'post_id': post_id,
201
+ 'number_comments': number_comments,
202
+ 'subreddit_id': subreddit_id,
203
+ 'subreddit_name': subreddit_name,
204
+ })
205
+ print(f'Number of posts: {len(data)}')
206
+ return data, recorded_posts
207
+
208
+
209
+ # Get the Subreddit page.
210
+ # Note: This required being logged-in in the browser.
211
+ driver = initialize_selenium(headless=False)
212
+ query = 'COA'
213
+ queries = [
214
+ 'COA',
215
+ 'COA attached',
216
+ 'COA in', # e.g. in the comments, included, etc.
217
+ 'Certificate',
218
+ 'Certificate of Analysis',
219
+ 'lab results',
220
+ 'test results',
221
+ 'results',
222
+ 'effect',
223
+ 'aroma',
224
+ 'taste',
225
+ 'smell',
226
+ 'flavor',
227
+ ]
228
+ sort_by = 'new'
229
+ subreddit = 'NYSCannabis'
230
+ driver.get(f"https://www.reddit.com/r/{subreddit}/search/?q={query}&sort={sort_by}")
231
+ sleep(5)
232
+
233
+ # Manual iteration of queries here to collect post details.
234
+ data, recorded_posts = [], []
235
+ data, recorded_posts = get_reddit_posts(driver, data, recorded_posts)
236
+
237
+ # Close the driver.
238
+ driver.close()
239
+ driver.quit()
240
+
241
+ # Save the post data.
242
+ data_dir = r"D:\data\new-york\NYSCannabis"
243
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
244
+ datafile = os.path.join(data_dir, f'ny-reddit-posts-{timestamp}.xlsx')
245
+ df = pd.DataFrame(data)
246
+ df.to_excel(datafile, index=False)
247
+ print('Saved post data:', datafile)
248
+
249
+
250
+ #-----------------------------------------------------------------------
251
+ # Get Reddit post data with the Reddit API.
252
+ #-----------------------------------------------------------------------
253
+
254
+
255
+ def initialize_reddit(config):
256
+ reddit = praw.Reddit(
257
+ client_id=config['REDDIT_CLIENT_ID'],
258
+ client_secret=config['REDDIT_SECRET'],
259
+ password=config['REDDIT_PASSWORD'],
260
+ user_agent=config['REDDIT_USER_AGENT'],
261
+ username=config['REDDIT_USERNAME'],
262
+ )
263
+ return reddit
264
+
265
+
266
+ # DEV:
267
+ # data = pd.read_excel(r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data\fl-medical-trees-posts-2024-05-07-11-45-14.xlsx")
268
+ # data = data.to_dict(orient='records')
269
+ # recorded_posts = [x['post_id'] for x in data]
270
+
271
+ # # Read already collected posts.
272
+ # data_dir = r"D:\data\new-york\NYSCannabis"
273
+ # post_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'posts' in x and 'results' not in x]
274
+ # posts = pd.concat([pd.read_excel(x) for x in post_datafiles])
275
+ # posts.drop_duplicates(subset=['post_id', 'coa_url', 'redirect_url'], inplace=True)
276
+ # collected_posts = list(set(posts['post_id'].values) - set(recorded_posts))
277
+ # print('Total number of already collected posts:', len(collected_posts))
278
+ # print('Number of posts to collect:', len(data) - len(collected_posts))
279
+
280
+
281
+ def get_post_content(reddit, post_id, config):
282
+ """Retrieve the post content."""
283
+ try:
284
+ submission = reddit.submission(id=post_id)
285
+ except:
286
+ try:
287
+ print('Failed to retrieve post:', post_id)
288
+ print('Waiting 60 seconds to retry...')
289
+ sleep(61)
290
+ reddit = initialize_reddit(config)
291
+ submission = reddit.submission(id=post_id)
292
+ except:
293
+ print('Failed to retrieve post:', post_id)
294
+ print('Waiting 60 seconds to retry...')
295
+ sleep(61)
296
+ reddit = initialize_reddit(config)
297
+ submission = reddit.submission(id=post_id)
298
+ return submission
299
+
300
+
301
+ def get_post_images(submission):
302
+ images = []
303
+ if 'imgur.com' in submission.url or submission.url.endswith(('.jpg', '.jpeg', '.png', '.gif')):
304
+ images.append(submission.url)
305
+ try:
306
+ if submission.is_gallery:
307
+ image_dict = submission.media_metadata
308
+ for image_item in image_dict.values():
309
+ try:
310
+ largest_image = image_item['s']
311
+ image_url = largest_image['u']
312
+ images.append(image_url)
313
+ except KeyError:
314
+ pass
315
+ except AttributeError:
316
+ pass
317
+ return images
318
+
319
+
320
+ def download_post_images(post_id, images, images_directory):
321
+ for i, image_url in enumerate(images, start=1):
322
+ file_extension = os.path.splitext(image_url)[-1].split('?')[0]
323
+ filename = f"{post_id}_image_{i}{file_extension}"
324
+ if file_extension not in ['.jpg', '.jpeg', '.png', '.gif']:
325
+ filename = f"{post_id}_image_{i}.jpg"
326
+ outfile = os.path.join(images_directory, filename)
327
+ if os.path.exists(outfile):
328
+ continue
329
+ try:
330
+ response = requests.get(image_url, headers={'User-agent': 'CannBot'})
331
+ except:
332
+ try:
333
+ print('Failed to download image:', image_url)
334
+ print('Waiting 60 seconds to retry...')
335
+ sleep(60)
336
+ response = requests.get(image_url, headers={'User-agent': 'CannBot'})
337
+ except:
338
+ print('Failed to download image:', image_url)
339
+ print('Waiting 60 seconds to retry...')
340
+ sleep(60)
341
+ response = requests.get(image_url, headers={'User-agent': 'CannBot'})
342
+ sleep(3.33)
343
+ if response.status_code != 200:
344
+ print('Unsuccessful request for image:', image_url)
345
+ continue
346
+ with open(outfile, 'wb') as file:
347
+ file.write(response.content)
348
+ print(f"Downloaded image: {outfile}")
349
+
350
+
351
+ def get_post_comments(submission):
352
+ """Retrieve the post comments."""
353
+ comments = []
354
+ submission.comments.replace_more(limit=None)
355
+ for comment in submission.comments.list():
356
+ comments.append({
357
+ 'comment_id': comment.id,
358
+ 'comment_author': comment.author.name if comment.author else None,
359
+ 'comment_body': comment.body,
360
+ 'comment_created_utc': datetime.utcfromtimestamp(comment.created_utc).strftime('%Y-%m-%d %H:%M:%S')
361
+ })
362
+ return comments
363
+
364
+
365
+ def get_reddit_post_data(all_posts=None, collected_posts=None, data=None):
366
+ """Get the data for each post."""
367
+
368
+ # Initialize Reddit.
369
+ config = dotenv_values('.env')
370
+ reddit = initialize_reddit(config)
371
+
372
+ # Get each post page and data for each post.
373
+ if all_posts is None: all_posts = []
374
+ if collected_posts is None: collected_posts = []
375
+ for n, post_data in enumerate(data[len(all_posts):]):
376
+
377
+ # Retrieve the post content.
378
+ post_id = post_data['post_id'].split('_')[-1]
379
+ if post_id in collected_posts:
380
+ print('Post already collected:', post_id)
381
+ continue
382
+ print('Getting data for post:', post_id)
383
+ submission = get_post_content(reddit, post_id, config)
384
+ post_content = submission.selftext
385
+
386
+ # Retrieve images.
387
+ images = get_post_images(submission)
388
+
389
+ # Download images.
390
+ download_post_images(post_id, images, images_directory)
391
+
392
+ # Retrieve comments.
393
+ comments = get_post_comments(submission)
394
+
395
+ # Update post_data with the retrieved information.
396
+ post_data['post_content'] = post_content
397
+ post_data['upvotes'] = submission.ups
398
+ post_data['downvotes'] = submission.downs
399
+ post_data['images'] = images
400
+ post_data['comments'] = comments
401
+ all_posts.append(post_data)
402
+ print('Post data retrieved:', submission.title)
403
+ sleep(3.33)
404
+
405
+
406
+ def save_post_data(all_posts, data_dir, namespace):
407
+ """Save the post data."""
408
+ try:
409
+ df = pd.DataFrame(all_posts)
410
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
411
+ datafile = os.path.join(data_dir, f'{namespace}-{timestamp}.xlsx')
412
+ df.to_excel(datafile, index=False)
413
+ print('Saved post data:', datafile)
414
+ except:
415
+ print('No posts to curate.')
416
+
417
+
418
+ # Get all of the post data.
419
+ all_posts = get_reddit_post_data(data=data)
420
+
421
+ # Save the post data.
422
+ save_post_data(all_posts, data_dir, 'ny-reddit-coa-posts')
423
+
424
+
425
+ #-----------------------------------------------------------------------
426
+ # Parse COA URLs from images.
427
+ #-----------------------------------------------------------------------
428
+
429
+ # FIXME: Read saved COA URLs.
430
+ coa_urls = {}
431
+ # text_file = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\158-reported-effects\data\scanned-images.txt"
432
+ # with open(text_file, 'r') as file:
433
+ # lines = file.readlines()
434
+ # coa_urls = {}
435
+ # for line in lines:
436
+ # if 'COA URL found for post' in line:
437
+ # post_id, coa_url = line.split(':', maxsplit=1)
438
+ # post_id = 't3_' + post_id.split(' ')[-1].strip()
439
+ # coa_url = coa_url.strip()
440
+ # post_urls = coa_urls.get(post_id, [])
441
+ # post_urls.append(coa_url)
442
+ # coa_urls[post_id] = list(set(post_urls))
443
+ # print('Number of COA URLs:', len(coa_urls))
444
+
445
+ # Initialize CoADoc.
446
+ parser = CoADoc()
447
+ temp_path = tempfile.mkdtemp()
448
+
449
+ # Scan all images for COA URLs.
450
+ image_files = os.listdir(images_directory)
451
+ image_files = [os.path.join(images_directory, x) for x in image_files]
452
+ print('Number of images:', len(image_files))
453
+ for image_file in image_files:
454
+ post_id = os.path.basename(image_file).split('_')[0]
455
+ if post_id in coa_urls:
456
+ continue
457
+ post_urls = coa_urls.get(post_id, [])
458
+ print('Scanning image:', image_file)
459
+ try:
460
+ coa_url = parser.scan(
461
+ image_file,
462
+ temp_path=temp_path,
463
+ )
464
+ except:
465
+ print('Failed to scan:', image_file)
466
+ continue
467
+ if coa_url:
468
+ print(f"COA URL found for post {post_id}: {coa_url}")
469
+ post_urls.append(coa_url)
470
+ coa_urls[post_id] = list(set(post_urls))
471
+ else:
472
+ print(f"No COA URL found for post {post_id}.")
473
+
474
+ # Clean up the temporary directory.
475
+ try:
476
+ shutil.rmtree(temp_path)
477
+ except:
478
+ pass
479
+
480
+
481
+ #-----------------------------------------------------------------------
482
+ # Download COA PDFs using the COA URLs.
483
+ #-----------------------------------------------------------------------
484
+
485
+ def download_kaycha_coa(url, outfile):
486
+ """Download a Kaycha Labs COA."""
487
+ base = 'https://yourcoa.com'
488
+ sample_id = url.split('/')[-1].split('?')[0].split('&')[0]
489
+ if sample_id == 'coa-download':
490
+ sample_id = url.split('sample=')[-1]
491
+ try:
492
+ coa_url = f'{base}/coa/download?sample={sample_id}'
493
+ response = requests.get(coa_url, headers=DEFAULT_HEADERS)
494
+ if response.status_code == 200:
495
+ if len(response.content) < MIN_FILE_SIZE:
496
+ print('File size is small, retrying with Selenium:', url)
497
+ response = requests.get(url, allow_redirects=True)
498
+ if response.status_code == 200:
499
+ redirected_url = response.url
500
+ download_file_with_selenium(
501
+ redirected_url,
502
+ download_dir=pdf_dir,
503
+ )
504
+ print('Downloaded with Selenium:', redirected_url)
505
+ return redirected_url
506
+ else:
507
+ with open(outfile, 'wb') as pdf:
508
+ pdf.write(response.content)
509
+ print('Downloaded:', outfile)
510
+ else:
511
+ print('Failed to download, retrying with Selenium:', url)
512
+ response = requests.get(url, allow_redirects=True)
513
+ if response.status_code == 200:
514
+ redirected_url = response.url
515
+ download_file_with_selenium(
516
+ redirected_url,
517
+ download_dir=pdf_dir,
518
+ )
519
+ print('Downloaded with Selenium:', redirected_url)
520
+ return redirected_url
521
+ except:
522
+ coa_url = f'{base}/coa/coa-view?sample={sample_id}'
523
+ response = requests.get(coa_url, allow_redirects=True)
524
+ if response.status_code == 200:
525
+ redirected_url = response.url
526
+ download_file_with_selenium(
527
+ redirected_url,
528
+ download_dir=pdf_dir,
529
+ )
530
+ print('Downloaded with Selenium:', redirected_url)
531
+ return redirected_url
532
+
533
+
534
+ # Download all PDFs.
535
+ MIN_FILE_SIZE = 21 * 1024
536
+ pdf_dir = r'D:\data\new-york\NYSCannabis\pdfs'
537
+ os.makedirs(pdf_dir, exist_ok=True)
538
+ redirect_urls = {}
539
+ for post_id, urls in coa_urls.items():
540
+ print(f"Downloading COA for post: {post_id}")
541
+ for i, url in enumerate(urls, start=1):
542
+
543
+ # Skip if the URL is already downloaded.
544
+ filename = f"{post_id}-coa-{i}.pdf"
545
+ outfile = os.path.join(pdf_dir, filename)
546
+ if os.path.exists(outfile):
547
+ print('Cached:', outfile)
548
+ redirect_urls[post_id] = url
549
+ continue
550
+
551
+ # Handle QBench COAs.
552
+ if 'qbench.net' in url and 'download' not in url:
553
+ download_file_with_selenium(
554
+ url,
555
+ download_dir=pdf_dir,
556
+ method='button',
557
+ el_id='qbenchDownloadPdfButton',
558
+ )
559
+ print('Downloaded with Selenium:', url)
560
+
561
+ # Download Kaycha Labs COAs.
562
+ elif 'yourcoa.com' in url:
563
+ url = download_kaycha_coa(url, outfile)
564
+
565
+ # Download regular PDFs.
566
+ elif url.startswith('http'):
567
+ response = requests.get(url, allow_redirects=True)
568
+ if response.status_code == 200:
569
+ # if len(response.content) < MIN_FILE_SIZE:
570
+ # redirected_url = response.url
571
+ # print('File size is small, retrying with Selenium:', redirected_url)
572
+ # download_file_with_selenium(
573
+ # redirected_url,
574
+ # download_dir=pdf_dir,
575
+ # )
576
+ # print('Downloaded with Selenium:', redirected_url)
577
+ # else:
578
+ with open(outfile, 'wb') as file:
579
+ file.write(response.content)
580
+ print(f"Downloaded COA: {outfile}")
581
+ else:
582
+ print('Failed to download:', url)
583
+ sleep(1)
584
+
585
+ # Skip invalid URLs.
586
+ else:
587
+ print('Invalid URL:', url)
588
+ continue
589
+
590
+ # Save the URL that was downloaded.
591
+ redirect_urls[post_id] = url
592
+
593
+ # Remove duplicate PDFs.
594
+ remove_duplicate_files(pdf_dir, verbose=True)
595
+
596
+ try:
597
+
598
+ # Tie the COA URLs to the posts.
599
+ df = pd.DataFrame(all_posts)
600
+ df['coa_url'] = df['post_id'].map(coa_urls)
601
+ df['redirect_url'] = df['post_id'].map(redirect_urls)
602
+
603
+ # Save the post data.
604
+ data_dir = r"D:\data\new-york\NYSCannabis"
605
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
606
+ datafile = os.path.join(data_dir, f'ny-reddit-coa-posts-{timestamp}.xlsx')
607
+ df.to_excel(datafile, index=False)
608
+ print('Saved post data:', datafile)
609
+
610
+ except:
611
+ print('No posts to curate.')
612
+
613
+
614
+ #-----------------------------------------------------------------------
615
+ # Parse COA data from the PDFs.
616
+ #-----------------------------------------------------------------------
617
+
618
+ # Parse COA data from the PDFs.
619
+ parser = CoADoc()
620
+ pdf_dir = r'D:\data\reddit\FLMedicalTrees\pdfs'
621
+ pdf_files = os.listdir(pdf_dir)
622
+ pdf_files = [os.path.join(pdf_dir, x) for x in pdf_files]
623
+ logging.info('Parsing %i COA PDFs...' % len(pdf_files))
624
+ all_coa_data = {}
625
+ failed = []
626
+ temp_path = tempfile.mkdtemp()
627
+ for pdf_file in pdf_files:
628
+ try:
629
+ coa_data = parser.parse_pdf(
630
+ pdf_file,
631
+ temp_path=temp_path,
632
+ verbose=True,
633
+ use_qr_code=False,
634
+ )
635
+ except:
636
+ logging.info('Failed to parse: %s' % pdf_file)
637
+ failed.append(pdf_file)
638
+ continue
639
+ if coa_data:
640
+ logging.info('Parsed: %s' % pdf_file)
641
+ coa_id = os.path.basename(pdf_file).split(' ')[0]
642
+ if isinstance(coa_data, list):
643
+ all_coa_data[coa_id] = coa_data[0]
644
+ elif isinstance(coa_data, dict):
645
+ all_coa_data[coa_id] = coa_data
646
+ else:
647
+ logging.info('Found no data: %s' % pdf_file)
648
+ try:
649
+ shutil.rmtree(temp_path)
650
+ except:
651
+ pass
652
+
653
+ # Compile the COA data and remove duplicates.
654
+ coa_df = pd.DataFrame(list(all_coa_data.values()))
655
+ coa_df['coa_id'] = [x.replace('.pdf', '') for x in list(all_coa_data.keys())]
656
+ coa_df.drop_duplicates(subset=['coa_id', 'sample_id', 'results_hash'], inplace=True)
657
+
658
+ # Save the COA data.
659
+ data_dir = r"D:\data\new-york\NYSCannabis"
660
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
661
+ datafile = os.path.join(data_dir, f'ny-reddit-coa-data-{timestamp}.xlsx')
662
+ coa_df.to_excel(datafile, index=False)
663
+ logging.info('Saved %i COA data: %s' % (len(coa_df), datafile))
664
+
665
+ # Save all of the COAs that failed to be parsed.
666
+ with open(os.path.join(data_dir, f'unidentified-coas-{timestamp}.json'), 'w') as f:
667
+ json.dump(failed, f, indent=4)
668
+ logging.info('Saved list of failed PDFs.')
669
+
670
+
671
+ #-----------------------------------------------------------------------
672
+ # Create a sample of posts that have COA URls.
673
+ #-----------------------------------------------------------------------
674
+
675
+ # Define where the data lives.
676
+ data_dir = r"D:\data\new-york\NYSCannabis"
677
+
678
+ # Read all of the posts.
679
+ post_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'posts' in x and 'results' not in x]
680
+ posts = pd.concat([pd.read_excel(x) for x in post_datafiles])
681
+ posts.drop_duplicates(subset=['post_id', 'coa_url', 'redirect_url'], inplace=True)
682
+ print('Number of posts:', len(posts))
683
+
684
+ # Read in all of the COA datafiles.
685
+ coa_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'coa-data' in x]
686
+ results = pd.concat([pd.read_excel(x) for x in coa_datafiles])
687
+ results.drop_duplicates(subset=['sample_id', 'results_hash'], inplace=True)
688
+ print('Number of COA results:', len(results))
689
+
690
+ # Find the post ID for the lab results.
691
+ post_coa_data = {}
692
+ for index, row in results.iterrows():
693
+ coa_id = row['coa_id'].replace('t3_', '').split('-coa')[0]
694
+ matches = posts.loc[posts['post_id'].str.contains(coa_id)]
695
+ try:
696
+ if matches.empty:
697
+ matches = posts.loc[posts['coa_url'].str.contains(coa_id)]
698
+ if matches.empty:
699
+ matches = posts.loc[posts['redirect_url'].str.contains(coa_id)]
700
+ except:
701
+ pass
702
+ if not matches.empty:
703
+ post_id = matches['post_id'].values[0]
704
+ post_coa_data[post_id] = row.to_dict()
705
+
706
+ # Merge the lab results with the post data.
707
+ coa_data_df = pd.DataFrame.from_dict(post_coa_data, orient='index')
708
+ coa_data_df.reset_index(inplace=True)
709
+ coa_data_df.rename(columns={'index': 'post_id'}, inplace=True)
710
+ merged_df = posts.merge(coa_data_df, on='post_id', how='left')
711
+ merged_df = merged_df.loc[~merged_df['coa_id'].isna()]
712
+ print('Number of posts with COA data:', len(merged_df))
713
+
714
+ # Save the updated post data.
715
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
716
+ datafile = os.path.join(data_dir, f'ny-reddit-posts-with-results-{timestamp}.xlsx')
717
+ merged_df.to_excel(datafile, index=False)
718
+ print('Saved %i posts with COA data:' % len(merged_df), datafile)
algorithms/get_results_or.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Results Oregon
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 5/25/2024
8
+ Updated: 5/30/2024
9
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
10
+
11
+ Description:
12
+
13
+ Curate Oregon lab result data obtained through public records requests.
14
+
15
+ Data Sources:
16
+
17
+ - Public records request by Jamie Toth.
18
+
19
+ """
20
+ # Standard imports:
21
+ from datetime import datetime
22
+ import os
23
+
24
+ # External imports:
25
+ from cannlytics.data import save_with_copyright
26
+ from cannlytics.utils import snake_case
27
+ from cannlytics.utils.constants import ANALYTES
28
+ import pandas as pd
29
+
30
+ # Define standard columns.
31
+ columns = {
32
+ 'LabId': 'lab_id',
33
+ 'SampleCreatedByLicenseId': 'producer_id',
34
+ 'SampleId': 'sample_id',
35
+ 'MonthOfTest': 'month',
36
+ 'YearOfTest': 'year',
37
+ 'ProductType': 'product_type',
38
+ 'TestName': 'test_name',
39
+ 'Result': 'result',
40
+ 'PassFail': 'status',
41
+ }
42
+
43
+ # Define the data types for each column.
44
+ dtype_spec = {
45
+ 'LabId': str,
46
+ 'SampleCreatedByLicenseId': str,
47
+ 'SampleId': str,
48
+ 'MonthOfTest': str,
49
+ 'YearOfTest': str,
50
+ 'ProductType': str,
51
+ 'TestName': str,
52
+ 'Result': float,
53
+ 'PassFail': str,
54
+ }
55
+
56
+ def read_and_standardize_csv(file_path, columns, dtype_spec):
57
+ """Read a CSV file and standardize the column names."""
58
+ try:
59
+ df = pd.read_csv(file_path, dtype=dtype_spec, low_memory=False)
60
+ df.rename(columns=columns, inplace=True)
61
+ return df
62
+ except Exception as e:
63
+ print(f"Error reading {file_path}: {e}")
64
+ return pd.DataFrame()
65
+
66
+ def collect_data(datafile, columns, dtype_spec):
67
+ """Collect data from the specified CSV file."""
68
+ df = read_and_standardize_csv(datafile, columns, dtype_spec)
69
+ df['month'] = df['year'].astype(str) + '-' + df['month'].astype(str)
70
+ df['date_tested'] = pd.to_datetime(df['month'], format='%Y-%m', errors='coerce')
71
+ return df
72
+
73
+ def pivot_data(data):
74
+ """Pivot the data to get results for each sample."""
75
+ results = data.pivot_table(
76
+ index=['sample_id', 'producer_id', 'lab_id', 'product_type', 'date_tested'],
77
+ columns='test_name',
78
+ values='result',
79
+ aggfunc='first'
80
+ ).reset_index()
81
+ results['month'] = results['date_tested'].dt.to_period('M')
82
+ results['year'] = results['date_tested'].dt.year
83
+ return results
84
+
85
+ def augment_calculations(
86
+ df,
87
+ cannabinoids=None,
88
+ terpenes=None,
89
+ delta_9_thc='delta_9_thc',
90
+ thca='thca',
91
+ cbd='cbd',
92
+ cbda='cbda',
93
+ ):
94
+ """Augment the DataFrame with additional calculated fields."""
95
+ # Calculate total cannabinoids.
96
+ if cannabinoids is not None:
97
+ df['total_cannabinoids'] = round(df[cannabinoids].sum(axis=1), 2)
98
+
99
+ # Calculate total terpenes.
100
+ if terpenes is not None:
101
+ df['total_terpenes'] = round(df[terpenes].sum(axis=1), 2)
102
+
103
+ # Calculate the total THC to total CBD ratio.
104
+ df['total_thc'] = round(df[delta_9_thc] + 0.877 * df[thca], 2)
105
+ df['total_cbd'] = round(df[cbd] + 0.877 * df[cbda], 2)
106
+ df['thc_cbd_ratio'] = round(df['total_thc'] / df['total_cbd'], 2)
107
+
108
+ # Calculate the total cannabinoids to total terpenes ratio.
109
+ if cannabinoids is not None and terpenes is not None:
110
+ df['cannabinoids_terpenes_ratio'] = round(df['total_cannabinoids'] / df['total_terpenes'], 2)
111
+
112
+ # Return the augmented data.
113
+ return df
114
+
115
+ def standardize_analyte_names(df, analyte_mapping):
116
+ """Standardize analyte names."""
117
+ df.columns = [col.split('(')[0].strip() for col in df.columns]
118
+ df.columns = [analyte_mapping.get(snake_case(col), snake_case(col)) for col in df.columns]
119
+ return df
120
+
121
+ def combine_similar_columns(df, similar_columns):
122
+ """Combine similar columns with different spellings or capitalization."""
123
+ for target_col, col_variants in similar_columns.items():
124
+ if target_col not in df.columns:
125
+ df[target_col] = pd.NA
126
+ for col in col_variants:
127
+ if col in df.columns:
128
+ df[target_col] = df[target_col].combine_first(df[col])
129
+ df.drop(columns=[col], inplace=True)
130
+ return df
131
+
132
+ def convert_mg_g_to_percentage(df):
133
+ """Convert mg/g values to percentage for specified columns."""
134
+ mg_g_columns = [col for col in df.columns if '(mg/g)' in col]
135
+ for col in mg_g_columns:
136
+ df[col] = df[col] / 10
137
+ df.rename(columns={col: col.replace('(mg/g)', '').strip()}, inplace=True)
138
+ return df
139
+
140
+ def get_results_or(data_dir: str, output_dir: str) -> pd.DataFrame:
141
+ """Get results for Oregon."""
142
+
143
+ # Read Oregon lab results.
144
+ data = collect_data(data_dir, columns, dtype_spec)
145
+ print('Number of Oregon tests:', len(data))
146
+
147
+ # Pivot the data to get results for each sample.
148
+ results = pivot_data(data)
149
+ print('Number of Oregon test samples:', len(results))
150
+
151
+ # Divide any value in a column with mg/g by 10 to get a percentage.
152
+ results = convert_mg_g_to_percentage(results)
153
+ print('Converted mg/g values to percentages.')
154
+
155
+ # Combine similar columns.
156
+ similar_columns = {
157
+ 'cbd': ['CBD (%RSD)', 'CBD (RPD)', 'Total CBD (mg/g; cannot fail)'],
158
+ 'delta_8_thc': ['Delta 8 THC', 'Delta-8 THC', 'Delta-8 THC (%RSD)', 'Delta-8 THC (RPD)', 'Delta-8 THC (mg/g)'],
159
+ 'delta_9_thc': ['Delta 9 THC', 'Delta-9 THC', 'THC (%RSD)', 'THC (RPD)', 'Delta-9 THC (mg/g)'],
160
+ 'moisture_content': ['Moisture Content (%)', 'R&D Test: Moisture Content', 'Subcontracted Test - Moisture Content'],
161
+ 'mycotoxins': ['Mycotoxins (pass/fail)', 'R&D Test: Mycotoxins', 'Subcontracted Test - Mycotoxins'],
162
+ }
163
+ results = combine_similar_columns(results, similar_columns)
164
+ print('Combined similar columns.')
165
+
166
+ # Standardize the analyte names
167
+ results = standardize_analyte_names(results, ANALYTES)
168
+ print('Standardized analyte names.')
169
+
170
+ # Drop nuisance columns.
171
+ drop = [
172
+ 'heavy_metals',
173
+ 'pesticides',
174
+ 'potency',
175
+ 'randd_test',
176
+ 'randd_test_heavy_metals',
177
+ 'randd_test_microbiological_contaminants',
178
+ 'randd_test_moisture_content',
179
+ 'randd_test_mycotoxins',
180
+ 'randd_test_pesticides',
181
+ 'randd_test_potency',
182
+ 'randd_test_solvents',
183
+ 'randd_test_water_activity',
184
+ 'solvents',
185
+ 'tentatively_identified_compounds',
186
+ 'microbiological_contaminants',
187
+ ]
188
+ results = results.drop(columns=drop, errors='ignore')
189
+
190
+ # Ensure all numeric columns are numeric.
191
+ non_numeric = [
192
+ 'sample_id',
193
+ 'producer_id',
194
+ 'lab_id',
195
+ 'product_type',
196
+ 'date_tested',
197
+ 'month',
198
+ 'year',
199
+ ]
200
+ numeric_cols = results.columns.difference(non_numeric)
201
+ for col in numeric_cols:
202
+ results[col] = pd.to_numeric(results[col], errors='coerce')
203
+ print('Converted columns to numeric.')
204
+
205
+ # Augment additional calculated metrics.
206
+ cannabinoids = ['delta_8_thc', 'delta_9_thc', 'thca', 'cbd']
207
+ results['total_cbd'] = results['cbd']
208
+ results['total_cannabinoids'] = round(results[cannabinoids].sum(), 2)
209
+ results['thc_cbd_ratio'] = round(results['total_thc'] / results['total_cbd'], 2)
210
+ print('Augmented fields.')
211
+
212
+ # Sort the columns.
213
+ numeric_cols = results.columns.difference(non_numeric)
214
+ numeric_cols_sorted = sorted(numeric_cols)
215
+ results = results[non_numeric + numeric_cols_sorted]
216
+
217
+ # # Save the results with copyright and sources sheets.
218
+ # date = datetime.now().strftime('%Y-%m-%d')
219
+ # if not os.path.exists(output_dir): os.makedirs(output_dir)
220
+ # outfile = f'{output_dir}/or-results-{date}.xlsx'
221
+ # save_with_copyright(
222
+ # results,
223
+ # outfile,
224
+ # dataset_name='Oregon Cannabis Lab Results',
225
+ # author='Jamie Toth (data acquisition), Keegan Skeate (curation)',
226
+ # publisher='Cannlytics',
227
+ # sources=['Oregon Liquor and Cannabis Commission', 'Jamie Toth'],
228
+ # source_urls=['https://www.oregon.gov/olcc/marijuana/pages/default.aspx', 'https://jamietoth.com'],
229
+ # )
230
+ # print('Saved Oregon lab results:', outfile)
231
+
232
+ # Save the results.
233
+ outfile = os.path.join(output_dir, 'or-results-latest.xlsx')
234
+ outfile_csv = os.path.join(output_dir, 'or-results-latest.csv')
235
+ outfile_json = os.path.join(output_dir, 'or-results-latest.jsonl')
236
+ results.to_excel(outfile, index=False)
237
+ results.to_csv(outfile_csv, index=False)
238
+ # FIXME: This causes an OverflowError
239
+ # results.to_json(outfile_json, orient='records', lines=True)
240
+ print('Saved Excel:', outfile)
241
+ print('Saved CSV:', outfile_csv)
242
+ # print('Saved JSON:', outfile_json)
243
+
244
+ # Return the results.
245
+ return results
246
+
247
+ # === Test ===
248
+ # [✓] Tested: 2024-07-10 by Keegan Skeate <keegan@cannlytics>
249
+ if __name__ == '__main__':
250
+
251
+ # Define where the data lives.
252
+ data_dir = "D:\data\public-records\Oregon\Oregon\Oregon data 5-7-24 (rich)\Anonymized Test Data Feb 2021 to April 2024.csv"
253
+ output_dir = 'D://data/oregon/results/datasets'
254
+
255
+ # Curate results.
256
+ get_results_or(data_dir=data_dir, output_dir=output_dir)
algorithms/get_results_ri.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Results Rhode Island
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 5/25/2024
8
+ Updated: 5/30/2024
9
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
10
+
11
+ Description:
12
+
13
+ Curate Rhode Island lab result data obtained through public records requests.
14
+
15
+ Data Sources:
16
+
17
+ - Public records request
18
+
19
+ """
20
+ # Standard imports:
21
+ from datetime import datetime
22
+ import os
23
+
24
+ # External imports:
25
+ from cannlytics.data import save_with_copyright
26
+ from cannlytics.utils import snake_case
27
+ from cannlytics.utils.constants import ANALYTES
28
+ import numpy as np
29
+ import pandas as pd
30
+
31
+ # Define columns.
32
+ columns = {
33
+ 'Id': 'sample_id',
34
+ 'TestingFacilityName': 'lab',
35
+ 'ItemFromFacilityLicenseNumber': 'producer_license_number',
36
+ 'SourcePackageLabels': 'label',
37
+ 'TestPerformedDate': 'date_tested',
38
+ 'TestTypeName': 'test_type',
39
+ 'TestResultLevel': 'test_result',
40
+ 'OverallPassed': 'status',
41
+ }
42
+
43
+ # Define the data types for each column.
44
+ dtype_spec = {
45
+ 'Id': str,
46
+ 'TestingFacilityName': str,
47
+ 'ItemFromFacilityLicenseNumber': str,
48
+ 'SourcePackageLabels': str,
49
+ 'TestPerformedDate': str,
50
+ 'TestTypeName': str,
51
+ 'TestResultLevel': float,
52
+ 'OverallPassed': bool,
53
+ }
54
+
55
+ def collect_data(data_dir, columns, dtype_spec):
56
+ """Collect data from a directory of CSV and Excel files."""
57
+ results = []
58
+ for root, _, files in os.walk(data_dir):
59
+ for file in files:
60
+ if 'no data' in file.lower():
61
+ continue
62
+ print('Reading:', file)
63
+ file_path = os.path.join(root, file)
64
+ if file.endswith('.csv'):
65
+ df = read_and_standardize_csv(file_path, columns, dtype_spec)
66
+ elif file.endswith('.xlsx'):
67
+ df = read_and_standardize_excel(file_path, columns)
68
+ if not df.empty:
69
+ results.append(df)
70
+ return pd.concat(results, ignore_index=True)
71
+
72
+ def read_and_standardize_csv(file_path, columns, dtype_spec):
73
+ """Read a CSV file and standardize the column names."""
74
+ try:
75
+ df = pd.read_csv(file_path, dtype=dtype_spec, usecols=columns.keys(), encoding='latin1')
76
+ df.rename(columns=columns, inplace=True)
77
+ return df
78
+ except Exception as e:
79
+ print(f"Error reading {file_path}: {e}")
80
+ return pd.DataFrame()
81
+
82
+ def read_and_standardize_excel(file_path, columns):
83
+ """Read an Excel file and standardize the column names."""
84
+ try:
85
+ df = pd.read_excel(file_path, usecols=columns.keys())
86
+ df.rename(columns=columns, inplace=True)
87
+ return df
88
+ except Exception as e:
89
+ print(f"Error reading {file_path}: {e}")
90
+ return pd.DataFrame()
91
+
92
+ def extract_test_details(data):
93
+ """Extract test_name, units, and product_type from test_type."""
94
+ data[['test_name', 'units', 'product_type']] = data['test_type'].str.extract(r'(.+?) \((.+?)\) (.+)')
95
+ return data
96
+
97
+ def pivot_data(data):
98
+ """Pivot the data to get results for each sample."""
99
+ results = data.pivot_table(
100
+ index=['sample_id', 'producer_license_number', 'lab', 'label', 'date_tested', 'product_type'],
101
+ columns='test_name',
102
+ values='test_result',
103
+ aggfunc='first'
104
+ ).reset_index()
105
+ results['date_tested'] = pd.to_datetime(results['date_tested'], errors='coerce')
106
+ results['month'] = results['date_tested'].dt.to_period('M')
107
+ results['year'] = results['date_tested'].dt.year
108
+ return results
109
+
110
+ def augment_calculations(
111
+ df,
112
+ cannabinoids=None,
113
+ terpenes=None,
114
+ delta_9_thc='delta_9_thc',
115
+ thca='thca',
116
+ cbd='cbd',
117
+ cbda='cbda',
118
+ ):
119
+ """Augment the DataFrame with additional calculated fields."""
120
+ # Calculate total cannabinoids.
121
+ if cannabinoids is not None:
122
+ df['total_cannabinoids'] = round(df[cannabinoids].sum(axis=1), 2)
123
+
124
+ # Calculate total terpenes.
125
+ if terpenes is not None:
126
+ df['total_terpenes'] = round(df[terpenes].sum(axis=1), 2)
127
+
128
+ # Calculate the total THC to total CBD ratio.
129
+ df['total_thc'] = round(df[delta_9_thc] + 0.877 * df[thca], 2)
130
+ df['total_cbd'] = round(df[cbd] + 0.877 * df[cbda], 2)
131
+ df['thc_cbd_ratio'] = round(df['total_thc'] / df['total_cbd'], 2)
132
+
133
+ # Calculate the total cannabinoids to total terpenes ratio.
134
+ if cannabinoids is not None and terpenes is not None:
135
+ df['cannabinoids_terpenes_ratio'] = round(df['total_cannabinoids'] / df['total_terpenes'], 2)
136
+
137
+ # Return the augmented data.
138
+ return df
139
+
140
+ def standardize_analyte_names(df, analyte_mapping):
141
+ """Standardize analyte names."""
142
+ df.columns = [analyte_mapping.get(snake_case(col), snake_case(col)) for col in df.columns]
143
+ return df
144
+
145
+ def combine_similar_columns(df, similar_columns):
146
+ """Combine similar columns with different spellings or capitalization."""
147
+ for target_col, col_variants in similar_columns.items():
148
+ if target_col not in df.columns:
149
+ df[target_col] = pd.NA
150
+ for col in col_variants:
151
+ if col in df.columns:
152
+ df[target_col] = df[target_col].combine_first(df[col])
153
+ df.drop(columns=[col], inplace=True)
154
+ return df
155
+
156
+ def get_results_ri(data_dir: str, output_dir: str) -> pd.DataFrame:
157
+
158
+ # Collect Rhode Island lab results
159
+ data = collect_data(data_dir, columns, dtype_spec)
160
+ print('Number of Rhode Island tests:', len(data))
161
+
162
+ # Extract test details
163
+ data = extract_test_details(data)
164
+
165
+ # Pivot the data to get results for each sample.
166
+ results = pivot_data(data)
167
+ print('Number of Rhode Island samples:', len(results))
168
+
169
+ # Combine similar names.
170
+ similar_columns = {
171
+ 'total_yeast_and_mold': ['Total Yeast and MOld', 'Total Yeast and Mold'],
172
+ '1_2_dichloroethane': ['1,2 Dichlorethane', '1,2 Dichloroethane'],
173
+ 'total_cbd': ['Total CBD'],
174
+ 'total_thc': ['Total THC'],
175
+ '3_methylpentane': ['3 Methylpetane', '3 Methylpentane'],
176
+ 'n_methylpyrrolidone': ['N Methylpyrrolidone', 'N methylpyrrlidone'],
177
+ 'n_n_dimethylacetamide': ['N,N Dimethyacetamide', 'N,N Dimethylacetamide'],
178
+ }
179
+ results = combine_similar_columns(results, similar_columns)
180
+
181
+ # Standardize the analyte names
182
+ results = standardize_analyte_names(results, ANALYTES)
183
+ print('Standardized analyte names.')
184
+
185
+ # Augment additional calculated metrics.
186
+ cannabinoids = ['cbd', 'cbda', 'delta_9_thc', 'thca']
187
+ terpenes = [
188
+ 'alpha_bisabolol', 'alpha_humulene', 'alpha_pinene',
189
+ 'alpha_terpinene', 'beta_caryophyllene', 'beta_myrcene',
190
+ 'beta_pinene', 'caryophyllene_oxide', 'd_limonene', 'linalool',
191
+ 'nerolidol', 'other_terpenes'
192
+ ]
193
+ results = augment_calculations(results)
194
+ print('Augmented fields.')
195
+
196
+ # Sort the columns.
197
+ non_numeric = [
198
+ 'sample_id', 'producer_license_number', 'lab', 'label',
199
+ 'date_tested', 'product_type', 'month', 'year'
200
+ ]
201
+ numeric_cols = results.columns.difference(non_numeric)
202
+ numeric_cols_sorted = sorted(numeric_cols)
203
+ results = results[non_numeric + numeric_cols_sorted]
204
+
205
+ # # Save the results with copyright and sources sheets.
206
+ # date = datetime.now().strftime('%Y-%m-%d')
207
+ # if not os.path.exists(output_dir): os.makedirs(output_dir)
208
+ # outfile = f'{output_dir}/ri-results-{date}.xlsx'
209
+ # save_with_copyright(
210
+ # results,
211
+ # outfile,
212
+ # dataset_name='Rhode Island Cannabis Lab Results',
213
+ # author='Keegan Skeate',
214
+ # publisher='Cannlytics',
215
+ # sources=['Rhode Island Office Of Cannabis Regulation'],
216
+ # source_urls=['https://dbr.ri.gov/office-cannabis-regulation'],
217
+ # )
218
+ # print('Saved Rhode Island lab results:', outfile)
219
+
220
+ # Save the results.
221
+ outfile = os.path.join(output_dir, 'ri-results-latest.xlsx')
222
+ outfile_csv = os.path.join(output_dir, 'ri-results-latest.csv')
223
+ outfile_json = os.path.join(output_dir, 'ri-results-latest.jsonl')
224
+ results.to_excel(outfile, index=False)
225
+ results.to_csv(outfile_csv, index=False)
226
+ # FIXME: This causes an OverflowError
227
+ # results.to_json(outfile_json, orient='records', lines=True)
228
+ print('Saved Excel:', outfile)
229
+ print('Saved CSV:', outfile_csv)
230
+ # print('Saved JSON:', outfile_json)
231
+
232
+ # Return the results.
233
+ return results
234
+
235
+ # === Test ===
236
+ # [✓] Tested: 2024-07-10 by Keegan Skeate <keegan@cannlytics>
237
+ if __name__ == '__main__':
238
+
239
+ # Define where the data lives.
240
+ data_dir = 'D://data/public-records/Rhode Island/Rhode Island'
241
+ output_dir = 'D://data/rhode-island/results/datasets'
242
+
243
+ # Curate results.
244
+ get_results_ri(data_dir=data_dir, output_dir=output_dir)
algorithms/get_results_sclabs.py DELETED
@@ -1,133 +0,0 @@
1
- """
2
- Cannabis Tests | Get SC Labs Test Result Data
3
- Copyright (c) 2022-2023 Cannlytics
4
-
5
- Authors:
6
- Keegan Skeate <https://github.com/keeganskeate>
7
- Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
- Created: 7/8/2022
9
- Updated: 2/6/2023
10
- License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
-
12
- Description:
13
-
14
- Collect all of SC Labs' publicly published lab results.
15
-
16
- Algorithm:
17
-
18
- 1. Discover all SC Labs public clients by scanning:
19
-
20
- https://client.sclabs.com/client/{client}/
21
-
22
- 2. Iterate over pages for each client, collecting samples until
23
- the 1st sample and active page are the same:
24
-
25
- https://client.sclabs.com/client/{client}/?page={page}
26
-
27
- 3. (a) Get the sample details for each sample found.
28
- (b) Save the sample details.
29
-
30
- Data Sources:
31
-
32
- - SC Labs Test Results
33
- URL: <https://client.sclabs.com/>
34
-
35
- """
36
- # Standard imports.
37
- from datetime import datetime
38
- import math
39
- import os
40
- from time import sleep
41
-
42
- # External imports.
43
- import pandas as pd
44
-
45
- # Internal imports.
46
- from cannlytics.data.coas.sclabs import (
47
- get_sc_labs_sample_details,
48
- get_sc_labs_test_results,
49
- )
50
- from cannlytics.firebase import initialize_firebase, update_documents
51
-
52
- # Specify where your data lives.
53
- RAW_DATA = '../../../.datasets/lab_results/raw_data/sc_labs'
54
-
55
- # Future work: Figure out a more efficient way to find all producer IDs.
56
- PAGES = range(1, 12_000)
57
- PRODUCER_IDS = list(PAGES)
58
- PRODUCER_IDS.reverse()
59
-
60
- # Alternatively, uncomment to read in the known producer IDs.
61
- # from algorithm_constants import SC_LABS_PRODUCER_IDS as PRODUCER_IDS
62
-
63
- # Iterate over potential client pages and client sample pages.
64
- start = datetime.now()
65
- clients = []
66
- errors = []
67
- test_results = []
68
- for _id in PRODUCER_IDS:
69
- results = get_sc_labs_test_results(_id)
70
- if results:
71
- test_results += results
72
- print('Found all samples for producer:', _id)
73
- clients.append(_id)
74
- sleep(3)
75
-
76
- # Save the results, just in case.
77
- data = pd.DataFrame(test_results)
78
- timestamp = datetime.now().isoformat()[:19].replace(':', '-')
79
- if not os.path.exists(RAW_DATA): os.makedirs(RAW_DATA)
80
- datafile = f'{RAW_DATA}/sc-lab-results-{timestamp}.xlsx'
81
- data.to_excel(datafile, index=False)
82
- end = datetime.now()
83
- print('Sample collection took:', end - start)
84
-
85
- # Read in the saved test results (useful for debugging).
86
- start = datetime.now()
87
- data = pd.read_excel(datafile)
88
-
89
- # Get the sample details for each sample found.
90
- errors = []
91
- rows = []
92
- subset = data.loc[data['results'].isnull()]
93
- total = len(subset)
94
- for index, values in subset.iterrows():
95
- if not math.isnan(values['results']):
96
- continue
97
- percent = round((index + 1) * 100 / total, 2)
98
- sample = values['lab_results_url'].split('/')[-2]
99
- details = get_sc_labs_sample_details(sample)
100
- rows.append({**values.to_dict(), **details})
101
- if details['results']:
102
- print('Results found (%.2f%%) (%i/%i):' % (percent, index + 1, total), sample)
103
- else:
104
- print('No results found (%.2f%%) (%i/%i):' % (percent, index + 1, total), sample)
105
- sleep(3)
106
-
107
- # Save every 500 samples just in case.
108
- if index % 500 == 0 and index != 0:
109
- data = pd.DataFrame(rows)
110
- timestamp = datetime.now().isoformat()[:19].replace(':', '-')
111
- datafile = f'{RAW_DATA}/sc-lab-results-{timestamp}.xlsx'
112
- data.to_excel(datafile, index=False)
113
- print('Saved data:', datafile)
114
-
115
- # Save the final results.
116
- data = pd.DataFrame(rows)
117
- timestamp = datetime.now().isoformat()[:19].replace(':', '-')
118
- datafile = f'{RAW_DATA}/sc-lab-results-{timestamp}.xlsx'
119
- data.to_excel(datafile, index=False)
120
- end = datetime.now()
121
- print('Detail collection took:', end - start)
122
-
123
- # Prepare the data to upload to Firestore.
124
- refs, updates = [], []
125
- for index, obs in data.iterrows():
126
- sample_id = obs['sample_id']
127
- refs.append(f'public/data/lab_results/{sample_id}')
128
- updates.append(obs.to_dict())
129
-
130
- # Initialize Firebase and upload the data to Firestore!
131
- database = initialize_firebase()
132
- update_documents(refs, updates, database=database)
133
- print('Added %i lab results to Firestore!' % len(refs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
algorithms/get_results_sdpharmlabs.py DELETED
@@ -1,28 +0,0 @@
1
- """
2
- Cannabis Tests | Get SDPharmLabs Test Result Data
3
- Copyright (c) 2022 Cannlytics
4
-
5
- Authors:
6
- Keegan Skeate <https://github.com/keeganskeate>
7
- Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
- Created: 8/23/2022
9
- Updated: 9/20/2022
10
- License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
-
12
- Description:
13
-
14
- Curate SDPharmLabs' publicly published lab results by:
15
-
16
- 1. Finding products and their COA URLS on SDPharmLabs' website.
17
- 2. Downloading COA PDFs from their URLs.
18
- 3. Using CoADoc to parse the COA PDFs (with OCR if needed).
19
- 4. Archiving the COA data in Firestore.
20
-
21
- Data Source:
22
-
23
- - SDPharmLabs
24
- URL: <https://sandiego.pharmlabscannabistesting.com/>
25
-
26
- """
27
-
28
- base = 'https://sandiego.pharmlabscannabistesting.com/results'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
algorithms/get_results_ut.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Get Results | Utah
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 7/4/2024
7
+ Updated: 7/10/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """
10
+ # Standard imports:
11
+ import os
12
+ from typing import List, Optional
13
+ from zipfile import ZipFile
14
+
15
+ # External imports:
16
+ from cannlytics import __version__
17
+ from cannlytics.data.cache import Bogart
18
+ from cannlytics.data.coas.parsing import get_coa_files
19
+ from cannlytics.data.coas import CoADoc
20
+ from cannlytics.data.coas.algorithms.utah import parse_utah_coa
21
+ from cannlytics.data.coas import standardize_results
22
+ from cannlytics.data.coas.parsing import find_unique_analytes
23
+ import pandas as pd
24
+
25
+ def unzip_folder(folder, destination, remove=True):
26
+ """Unzip a folder.
27
+ Args:
28
+ pdf_dir (str): The directory where the folder is stored.
29
+ folder (str): The name of the folder to unzip.
30
+ """
31
+ os.makedirs(destination, exist_ok=True)
32
+ with ZipFile(folder) as zip_ref:
33
+ zip_ref.extractall(destination)
34
+ if remove:
35
+ os.remove(folder)
36
+
37
+ def parse_coa_pdfs(
38
+ pdfs,
39
+ algorithm=None,
40
+ parser=None,
41
+ cache=None,
42
+ data=None,
43
+ verbose=True,
44
+ ) -> List[dict]:
45
+ """Parse a list of COA PDFs.
46
+ Args:
47
+ pdfs (List[str]): A list of PDFs to parse.
48
+ algorithm (function): The parsing algorithm to use.
49
+ parser (object): The parser object to use.
50
+ cache (object): The cache object to use.
51
+ data (List[dict]): The data to append to.
52
+ verbose (bool): Whether to print verbose output.
53
+ Returns:
54
+ List[dict]: The parsed data.
55
+ """
56
+ if data is None:
57
+ data = []
58
+ if parser is None:
59
+ parser = CoADoc()
60
+ for pdf in pdfs:
61
+ if not os.path.exists(pdf):
62
+ if verbose: print(f'PDF not found: {pdf}')
63
+ continue
64
+ if cache is not None:
65
+ pdf_hash = cache.hash_file(pdf)
66
+ if cache is not None:
67
+ if cache.get(pdf_hash):
68
+ if verbose: print('Cached:', pdf)
69
+ data.append(cache.get(pdf_hash))
70
+ continue
71
+ try:
72
+ if algorithm is not None:
73
+ coa_data = algorithm(parser, pdf)
74
+ else:
75
+ coa_data = parser.parse(pdf)
76
+ data.append(coa_data)
77
+ if cache is not None:
78
+ cache.set(pdf_hash, coa_data)
79
+ print('Parsed:', pdf)
80
+ except:
81
+ print('Error:', pdf)
82
+ return data
83
+
84
+ def get_results_ut(
85
+ data_dir: str,
86
+ pdf_dir: str,
87
+ cache_path: Optional[str] = None,
88
+ clear_cache: Optional[bool] = False,
89
+ ) -> pd.DataFrame:
90
+ """Get lab results for Utah."""
91
+
92
+ # Unzip all of the folders.
93
+ folders = [os.path.join(pdf_dir, x) for x in os.listdir(pdf_dir) if x.endswith('.zip')]
94
+ for folder in folders:
95
+ unzip_folder(folder, pdf_dir)
96
+ print('Unzipped:', folder)
97
+
98
+ # Get all of the PDFs.
99
+ pdfs = get_coa_files(pdf_dir)
100
+ pdfs.sort(key=os.path.getmtime)
101
+ print('Found %i PDFs.' % len(pdfs))
102
+
103
+ # Initialize COA parsing.
104
+ cache = Bogart(cache_path)
105
+
106
+ # DEV: Clear the cache.
107
+ if clear_cache:
108
+ cache.clear()
109
+
110
+ # Parse COAs.
111
+ parse_coa_pdfs(
112
+ pdfs,
113
+ algorithm=parse_utah_coa,
114
+ cache=cache,
115
+ )
116
+
117
+ # Read results.
118
+ results = cache.to_df()
119
+ print('Number of results:', len(results))
120
+
121
+ # Standardize time.
122
+ results['date'] = pd.to_datetime(results['date_tested'], format='mixed')
123
+ results['week'] = results['date'].dt.to_period('W').astype(str)
124
+ results['month'] = results['date'].dt.to_period('M').astype(str)
125
+ results = results.sort_values('date')
126
+
127
+ # Standardize compounds.
128
+ # Note: Removes nuisance analytes.
129
+ analytes = find_unique_analytes(results)
130
+ nuisance_analytes = [
131
+ 'det_detected',
132
+ 'global_shortages_of_laboratory_suppliesto',
133
+ 'here_recorded_may_not_be_used_as_an_endorsement_for_a_product',
134
+ 'information_see',
135
+ 'information_see_https_totoag_utah_govto_2021_to_04_to_29_toudaf_temporarily_adjusts_medical_cannabis_testing_protocols_due_to',
136
+ 'nd_not_detected',
137
+ 'notes',
138
+ 'notes_sample_was_tested_as_received_the_cannabinoid_results_were_not_adjusted_for_moisture_content',
139
+ 'phtatpthso_togtoaegn_utetashti_nggo_vwto_2_a_0_s',
140
+ 'recorded_the_results_here_recorded_may_not_be_used_as_an_endorsement_for_a_product',
141
+ 'results_pertain_only_to_the_test_sample_listed_in_this_report',
142
+ 'see_https_totoag_utah_govto_2021_to_04_to_29_toudaf_temporarily_adjusts_medical_cannabis_testing_protocols_due_to_global',
143
+ 'shortages_of_laboratory_suppliesto',
144
+ 'tac_2500000',
145
+ 'tac_t',
146
+ 'this_report_may_not_be_reproduced_except_in_its_entirety',
147
+ 'total_cbd',
148
+ 'total_thc',
149
+ ]
150
+ analytes = analytes - set(nuisance_analytes)
151
+ analytes = sorted(list(analytes))
152
+ results = standardize_results(results, analytes)
153
+
154
+ # Save the results.
155
+ outfile = os.path.join(data_dir, 'ut-results-latest.xlsx')
156
+ outfile_csv = os.path.join(data_dir, 'ut-results-latest.csv')
157
+ outfile_json = os.path.join(data_dir, 'ut-results-latest.jsonl')
158
+ results.to_excel(outfile, index=False)
159
+ results.to_csv(outfile_csv, index=False)
160
+ results.to_json(outfile_json, orient='records', lines=True)
161
+ print('Saved Excel:', outfile)
162
+ print('Saved CSV:', outfile_csv)
163
+ print('Saved JSON:', outfile_json)
164
+
165
+ # Print out features.
166
+ features = {x: 'string' for x in results.columns}
167
+ print('Number of features:', len(features))
168
+ print('Features:', features)
169
+
170
+ # Return the results.
171
+ return results
172
+
173
+ # === Tests ===
174
+ # [✓] Tested: 2024-07-10 by Keegan Skeate <keegan@cannlytics.com>
175
+ if __name__ == '__main__':
176
+
177
+ # Define where the data lives.
178
+ data_dir = 'D://data/utah'
179
+ pdf_dir = 'D://data/public-records/Utah'
180
+ cache_path = 'D://data/.cache/results-ut.jsonl'
181
+
182
+ # Curate results.
183
+ results = get_results_ut(
184
+ data_dir=data_dir,
185
+ pdf_dir=pdf_dir,
186
+ cache_path=cache_path,
187
+ clear_cache=True
188
+ )
algorithms/get_results_wa.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Curate CCRS Lab Results
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
+ Created: 1/1/2023
9
+ Updated: 6/1/2024
10
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
+
12
+ Original author: Cannabis Data
13
+ Original license: MIT <https://github.com/cannabisdata/cannabisdata/blob/main/LICENSE>
14
+
15
+ Data Sources:
16
+
17
+ - [Public records request](https://portal.lcb.wa.gov/s/public-record-request-form)
18
+
19
+ """
20
+ # Standard imports:
21
+ from datetime import datetime
22
+ import gc
23
+ import os
24
+ from typing import Optional
25
+
26
+ # External imports:
27
+ from cannlytics.data.ccrs import (
28
+ CCRS,
29
+ CCRS_ANALYTES,
30
+ CCRS_ANALYSES,
31
+ CCRS_DATASETS,
32
+ anonymize,
33
+ get_datafiles,
34
+ find_detections,
35
+ format_test_value,
36
+ save_dataset,
37
+ unzip_datafiles,
38
+ )
39
+ from cannlytics.utils import convert_to_numeric, camel_to_snake
40
+ import pandas as pd
41
+
42
+
43
+ def read_lab_results(
44
+ data_dir: str,
45
+ value_key: Optional[str] = 'TestValue',
46
+ ) -> pd.DataFrame:
47
+ """Read CCRS lab results."""
48
+ lab_results = pd.DataFrame()
49
+ lab_result_files = get_datafiles(data_dir, 'LabResult_')
50
+ fields = CCRS_DATASETS['lab_results']['fields']
51
+ parse_dates = CCRS_DATASETS['lab_results']['date_fields']
52
+ usecols = list(fields.keys()) + parse_dates
53
+ dtype = {k: v for k, v in fields.items() if v != 'datetime64'}
54
+ dtype[value_key] = 'string' # Hot-fix for `ValueError`.
55
+ for datafile in lab_result_files:
56
+ data = pd.read_csv(
57
+ datafile,
58
+ sep='\t',
59
+ encoding='utf-16',
60
+ engine='python',
61
+ parse_dates=parse_dates,
62
+ dtype=dtype,
63
+ usecols=usecols,
64
+ on_bad_lines='skip',
65
+ # DEV: Uncomment to make development quicker.
66
+ # nrows=1000,
67
+ )
68
+ lab_results = pd.concat([lab_results, data])
69
+ if 'TestValue' in lab_results.columns:
70
+ lab_results[value_key] = lab_results[value_key].apply(convert_to_numeric)
71
+ # lab_results = lab_results.assign(TestValue=values)
72
+ return lab_results
73
+
74
+
75
+ def format_result(
76
+ item_results,
77
+ manager: Optional[CCRS] = None,
78
+ drop: Optional[list] = []
79
+ ) -> dict:
80
+ """Format results for a lab sample."""
81
+
82
+ # Skip items with no lab results.
83
+ if item_results.empty:
84
+ return None
85
+
86
+ # Record item metadata and important results.
87
+ item = item_results.iloc[0].to_dict()
88
+ [item.pop(key) for key in drop]
89
+ entry = {
90
+ **item,
91
+ 'delta_9_thc': format_test_value(item_results, 'delta_9_thc'),
92
+ 'thca': format_test_value(item_results, 'thca'),
93
+ 'total_thc': format_test_value(item_results, 'total_thc'),
94
+ 'cbd': format_test_value(item_results, 'cbd'),
95
+ 'cbda': format_test_value(item_results, 'cbda'),
96
+ 'total_cbd': format_test_value(item_results, 'total_cbd'),
97
+ 'moisture_content': format_test_value(item_results, 'moisture_content'),
98
+ 'water_activity': format_test_value(item_results, 'water_activity'),
99
+ }
100
+
101
+ # Determine "Pass" or "Fail" status.
102
+ statuses = list(item_results['LabTestStatus'].unique())
103
+ if 'Fail' in statuses:
104
+ entry['status'] = 'Fail'
105
+ else:
106
+ entry['status'] = 'Pass'
107
+
108
+ # Augment the complete `results`.
109
+ entry_results = []
110
+ for _, item_result in item_results.iterrows():
111
+ test_name = item_result['TestName']
112
+ analyte = CCRS_ANALYTES[test_name]
113
+ try:
114
+ analysis = CCRS_ANALYSES[analyte['type']]
115
+ except KeyError:
116
+ if manager is not None:
117
+ manager.create_log('Unidentified analysis: ' + str(analyte['type']))
118
+ else:
119
+ print('Unidentified analysis:', analyte['type'])
120
+ analysis = analyte['type']
121
+ entry_results.append({
122
+ 'analysis': analysis,
123
+ 'key': analyte['key'],
124
+ 'name': item_result['TestName'],
125
+ 'units': analyte['units'],
126
+ 'value': item_result['TestValue'],
127
+ })
128
+ entry['results'] = entry_results
129
+
130
+ # Determine detected contaminants.
131
+ entry['pesticides'] = find_detections(entry_results, 'pesticides')
132
+ entry['residual_solvents'] = find_detections(entry_results, 'residual_solvents')
133
+ entry['heavy_metals'] = find_detections(entry_results, 'heavy_metals')
134
+
135
+ # Return the entry.
136
+ return entry
137
+
138
+
139
+ def augment_lab_results(
140
+ manager: CCRS,
141
+ results: pd.DataFrame,
142
+ item_key: Optional[str] = 'InventoryId',
143
+ analysis_name: Optional[str] = 'TestName',
144
+ analysis_key: Optional[str] = 'TestValue',
145
+ verbose: Optional[str] = True,
146
+ ) -> pd.DataFrame:
147
+ """Format CCRS lab results to merge into another dataset."""
148
+
149
+ # Handle `TestName`'s that are not in known analytes.
150
+ results[analysis_name] = results[analysis_name].apply(
151
+ lambda x: x.replace('Pesticides - ', '').replace(' (ppm) (ppm)', '')
152
+ )
153
+
154
+ # Map `TestName` to `type` and `key`.
155
+ # Future work: Handle unidentified analyses. Ask ChatGPT?
156
+ test_names = list(results[analysis_name].unique())
157
+ known_analytes = list(CCRS_ANALYTES.keys())
158
+ missing = list(set(test_names) - set(known_analytes))
159
+ try:
160
+ assert len(missing) == 0
161
+ del test_names, known_analytes, missing
162
+ gc.collect()
163
+ except:
164
+ manager.create_log('Unidentified analytes: ' + ', '.join(missing))
165
+ raise ValueError(f'Unidentified analytes. Add missing analytes to `CCRS_ANALYTES`: {", ".join(missing)}')
166
+
167
+ # Augment lab results with standard analyses and analyte keys.
168
+ analyte_data = results[analysis_name].map(CCRS_ANALYTES).values.tolist()
169
+ results = results.join(pd.DataFrame(analyte_data))
170
+ results['type'] = results['type'].map(CCRS_ANALYSES)
171
+ results[item_key] = results[item_key].astype(str)
172
+
173
+ # Setup for iteration.
174
+ item_ids = list(results[item_key].unique())
175
+ drop = [analysis_name, analysis_key, 'LabTestStatus', 'key', 'type', 'units']
176
+ N = len(item_ids)
177
+ if verbose:
178
+ manager.create_log(f'Curating {N} items...')
179
+ manager.create_log('Estimated runtime: ' + str(round(N * 0.00011, 2)) + ' minutes')
180
+
181
+ # Return the curated lab results.
182
+ group = results.groupby(item_key).apply(format_result, drop=drop, manager=manager).dropna()
183
+ return pd.DataFrame(group.tolist())
184
+
185
+
186
+ def curate_ccrs_lab_results(
187
+ manager: CCRS,
188
+ data_dir: str,
189
+ stats_dir: str
190
+ ) -> pd.DataFrame:
191
+ """Curate CCRS lab results."""
192
+
193
+ # Start curating lab results.
194
+ manager.create_log('Curating lab results...')
195
+ start = datetime.now()
196
+
197
+ # Unzip all CCRS datafiles.
198
+ unzip_datafiles(data_dir)
199
+
200
+ # Read all lab results.
201
+ lab_results = read_lab_results(data_dir)
202
+
203
+ # Curate all lab results.
204
+ lab_results = augment_lab_results(manager, lab_results)
205
+
206
+ # Standardize the lab results.
207
+ # TODO: Add producer
208
+ columns = {
209
+ 'ExternalIdentifier': 'lab_id',
210
+ 'inventory_type': 'product_type',
211
+ 'test_date': 'date_tested',
212
+ }
213
+ lab_results.rename(columns=columns, inplace=True)
214
+
215
+ # Anonymize the data.
216
+ # FIXME: This does not appear to be anonymizing `created_by`.
217
+ lab_results = anonymize(lab_results)
218
+
219
+ # Standardize the column names.
220
+ lab_results.rename(columns=lambda x: camel_to_snake(x), inplace=True)
221
+
222
+ # Save the curated lab results.
223
+ # TODO: Save a copy as `wa-lab-results-latest.csv` in the `data` directory.
224
+ timestamp = lab_results['created_date'].max().strftime('%Y-%m-%d')
225
+ lab_results_dir = os.path.join(stats_dir, 'lab_results')
226
+ outfile = save_dataset(lab_results, lab_results_dir, f'wa-lab-results-{timestamp}')
227
+ manager.create_log('Saved lab results: ' + str(outfile))
228
+
229
+ # Finish curating lab results.
230
+ end = datetime.now()
231
+ manager.create_log('✓ Finished curating lab results in ' + str(end - start))
232
+ return lab_results
233
+
234
+
235
+ # === Test ===
236
+ # [✓] Tested: 2024-07-15 by Keegan Skeate <keegan@cannlytics>
237
+ if __name__ == '__main__':
238
+
239
+ # Debug variables.
240
+ item_key = 'InventoryId'
241
+ analysis_name = 'TestName'
242
+ analysis_key = 'TestValue'
243
+ value_key = 'TestValue'
244
+ verbose = True
245
+ drop = []
246
+
247
+ # Initialize.
248
+ base = 'D://data/washington/'
249
+ stats_dir = 'D://data/washington/stats'
250
+ manager = CCRS()
251
+
252
+ # Curate lab results for each release.
253
+ releases = [
254
+ # 'CCRS PRR (8-4-23)', # Contains all prior releases.
255
+ # 'CCRS PRR (9-5-23)',
256
+ # 'CCRS PRR (10-2-23)',
257
+ # 'CCRS PRR (11-2-23)',
258
+ # 'CCRS PRR (12-2-23)',
259
+ # 'CCRS PRR (1-2-24)',
260
+ # 'CCRS PRR (2-2-24)',
261
+ # 'CCRS PRR (3-27-24)',
262
+ # 'CCRS PRR (4-2-24)',
263
+ # 'CCRS PRR (5-2-24)',
264
+ # 'CCRS PRR (6-2-24)',
265
+ 'CCRS PRR (7-2-24)',
266
+ ]
267
+ for release in releases:
268
+ data_dir = os.path.join(base, release, release)
269
+ try:
270
+ lab_results = curate_ccrs_lab_results(manager, data_dir, stats_dir)
271
+ manager.create_log('Curated %i WA lab results.' % len(lab_results))
272
+ except:
273
+ manager.create_log('Failed to curate WA lab results:' + data_dir)
274
+
275
+ # Aggregate lab results.
276
+ all_results = []
277
+ datafiles = os.listdir(os.path.join(stats_dir, 'lab_results'))
278
+ datafiles = [os.path.join(stats_dir, 'lab_results', x) for x in datafiles if \
279
+ not x.startswith('~') and \
280
+ not 'aggregate' in x and \
281
+ not 'latest' in x and \
282
+ not 'inventory' in x]
283
+ for datafile in datafiles:
284
+ data = pd.read_excel(datafile)
285
+ all_results.append(data)
286
+ results = pd.concat(all_results)
287
+ results.drop_duplicates(subset=['lab_result_id', 'updated_date'], inplace=True)
288
+ results.sort_values(by=['created_date'], inplace=True)
289
+ print('Total number of results:', len(results))
290
+ outfile = os.path.join(stats_dir, 'lab_results', 'wa-lab-results-aggregate.xlsx')
291
+ results.to_excel(outfile, index=False)
292
+ manager.create_log('Saved aggregate lab results to: ' + outfile)
algorithms/get_results_wa_inventory.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Curate CCRS Inventory
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
+ Created: 1/1/2023
9
+ Updated: 6/5/2024
10
+ License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
11
+
12
+ Original author: Cannabis Data
13
+ Original license: MIT <https://github.com/cannabisdata/cannabisdata/blob/main/LICENSE>
14
+
15
+ Data Source:
16
+
17
+ - WSLCB PRR (latest)
18
+ URL: <https://lcb.app.box.com/s/plb3dr2fvsuvgixb38g10tbwqos73biz>
19
+
20
+ """
21
+ # Standard imports:
22
+ from datetime import datetime
23
+ import gc
24
+ import os
25
+ from typing import Optional
26
+
27
+ # External imports:
28
+ from cannlytics.data.ccrs import (
29
+ CCRS,
30
+ CCRS_DATASETS,
31
+ CURATED_CCRS_DATASETS,
32
+ anonymize,
33
+ get_datafiles,
34
+ merge_datasets,
35
+ save_dataset,
36
+ unzip_datafiles,
37
+ )
38
+ from cannlytics.utils import camel_to_snake, rmerge, sorted_nicely
39
+ import pandas as pd
40
+
41
+
42
+ def read_items(
43
+ datafile: str,
44
+ item_cols: list,
45
+ item_types: dict,
46
+ date_fields: list,
47
+ ):
48
+ """Read CCRS inventory items and format accordingly."""
49
+ items = pd.read_csv(
50
+ datafile,
51
+ sep='\t',
52
+ encoding='utf-16',
53
+ parse_dates=date_fields,
54
+ usecols=item_cols,
55
+ dtype=item_types,
56
+ )
57
+ return items.rename(columns={
58
+ 'CreatedBy': 'inventory_created_by',
59
+ 'UpdatedBy': 'inventory_updated_by',
60
+ 'CreatedDate': 'inventory_created_at',
61
+ 'updatedDate': 'inventory_updated_at',
62
+ 'UpdatedDate': 'inventory_updated_at',
63
+ 'Name': 'inventory_name',
64
+ })
65
+
66
+
67
+ def read_licensees(data_dir: str):
68
+ """Read CCRS licensees data and format accordingly."""
69
+ licensees = pd.read_csv(
70
+ f'{data_dir}/Licensee_0/Licensee_0/Licensee_0.csv',
71
+ sep='\t',
72
+ encoding='utf-16',
73
+ usecols=['LicenseeId', 'Name', 'DBA'],
74
+ dtype={
75
+ 'LicenseeId': 'string',
76
+ 'Name': 'string',
77
+ 'DBA': 'string',
78
+ },
79
+ )
80
+ columns = {'Name': 'licensee_name', 'DBA': 'licensee_dba'}
81
+ return licensees.rename(columns, axis=1)
82
+
83
+
84
+ def read_products(
85
+ datafile: str,
86
+ # item_cols: list,
87
+ # item_types: dict,
88
+ # date_fields: list,
89
+ ):
90
+ """Read CCRS inventory items and format accordingly."""
91
+ fields = CCRS_DATASETS['products']['fields']
92
+ parse_dates = CCRS_DATASETS['products']['date_fields']
93
+ use_cols = list(fields.keys()) + parse_dates
94
+ fields['UnitWeightGrams'] = 'string'
95
+ fields['IsDeleted'] = 'string'
96
+ fields['CreatedDate'] = 'string'
97
+ fields['UpdatedDate'] = 'string'
98
+ products = pd.read_csv(
99
+ datafile,
100
+ sep='\t',
101
+ encoding='utf-16',
102
+ parse_dates=parse_dates,
103
+ usecols=use_cols,
104
+ dtype=fields,
105
+ )
106
+ products = products.rename(columns={
107
+ 'CreatedBy': 'product_created_by',
108
+ 'UpdatedBy': 'product_updated_by',
109
+ 'CreatedDate': 'product_created_at',
110
+ 'updatedDate': 'product_updated_at',
111
+ 'UpdatedDate': 'product_updated_at',
112
+ 'Name': 'product_name',
113
+ 'Description': 'product_description',
114
+ 'LicenseeId': 'producer_license_number',
115
+ 'ExternalIdentifier': 'product_external_id',
116
+ })
117
+ products.rename(columns=lambda x: camel_to_snake(x), inplace=True)
118
+ return products
119
+
120
+
121
+ def merge_areas(items, area_files):
122
+ """Merge areas with inventory items using `AreaId`."""
123
+ # FIXME: The new areas file needs text-to-columns applied in Excel.
124
+ try:
125
+ return merge_datasets(
126
+ items,
127
+ area_files,
128
+ dataset='areas',
129
+ on='AreaId',
130
+ target='AreaId',
131
+ how='left',
132
+ validate='m:1',
133
+ rename={'Name': 'area_name'},
134
+ drop=['LicenseeId', 'IsQuarantine', 'ExternalIdentifier',
135
+ 'IsDeleted', 'CreatedBy', 'CreatedDate', 'UpdatedBy', 'UpdatedDate']
136
+ )
137
+ except:
138
+ raise ValueError('The new areas file needs text-to-columns applied in Excel.')
139
+
140
+
141
+ def merge_licensees(items, licensees):
142
+ """Merge licensees with inventory items using `LicenseeId`."""
143
+ return rmerge(
144
+ items,
145
+ licensees,
146
+ on='LicenseeId',
147
+ how='left',
148
+ validate='m:1',
149
+ )
150
+
151
+
152
+ def merge_lab_results(
153
+ manager: CCRS,
154
+ results_file: str,
155
+ root_directory: str,
156
+ on: Optional[str] = 'inventory_id',
157
+ target: Optional[str] = 'lab_result_id',
158
+ verbose: Optional[bool] = True,
159
+ ) -> pd.DataFrame:
160
+ """Merge lab results with items in a given directory."""
161
+
162
+ # Read the standardized lab results.
163
+ lab_results = pd.read_excel(results_file)
164
+ lab_results.rename(columns={
165
+ 'inventory_id': on,
166
+ 'lab_result_id': target,
167
+ }, inplace=True)
168
+ lab_results[on] = lab_results[on].astype(str)
169
+ lab_results.drop_duplicates(subset=target, inplace=True)
170
+ lab_results.drop_duplicates(subset=on, inplace=True)
171
+
172
+ # Get inventory item fields.
173
+ fields = CURATED_CCRS_DATASETS['inventory']['fields']
174
+ parse_dates = CURATED_CCRS_DATASETS['inventory']['date_fields']
175
+ use_cols = list(fields.keys()) + parse_dates
176
+
177
+ # Iterate over all inventory datafiles in the directory.
178
+ matched = pd.DataFrame()
179
+ for directory, _, files in os.walk(root_directory):
180
+ for datafile in files:
181
+ if 'inventory' in datafile.lower() and datafile.endswith('.xlsx'):
182
+ if datafile.startswith('~$'):
183
+ continue
184
+
185
+ # Construct the full file path
186
+ filename = os.path.join(directory, datafile)
187
+
188
+ # Read the standardized inventory.
189
+ # FIXME:
190
+ # try:
191
+ print('READING INVENTORY FILE:', filename)
192
+ data = pd.read_excel(
193
+ filename,
194
+ dtype=fields,
195
+ parse_dates=parse_dates,
196
+ usecols=use_cols,
197
+ engine='openpyxl',
198
+ )
199
+ # except:
200
+ # manager.create_log('Failed to read: ' + filename)
201
+ # continue
202
+ data[on] = data[on].astype(str)
203
+ data.drop_duplicates(subset=on, inplace=True)
204
+
205
+ # Merge the lab results with the datafile.
206
+ match = rmerge(
207
+ data,
208
+ lab_results,
209
+ on=on,
210
+ how='left',
211
+ validate='m:1',
212
+ )
213
+
214
+ # Record rows with matching lab results.
215
+ if len(match):
216
+ match = match.loc[~match[target].isna()]
217
+ matched = pd.concat([matched, match], ignore_index=True)
218
+ if verbose:
219
+ manager.create_log('Matched ' + str(len(matched)) + ' lab results...')
220
+
221
+ # Perform garbage cleaning.
222
+ gc.collect()
223
+
224
+ # Return the matched lab results.
225
+ return matched
226
+
227
+
228
+ def merge_lab_results_with_products(
229
+ manager: CCRS,
230
+ products,
231
+ results_file: str,
232
+ on: Optional[str] = 'ProductId',
233
+ target: Optional[str] = 'lab_result_id',
234
+ verbose: Optional[bool] = True,
235
+ ):
236
+ """Merge lab results with products data."""
237
+ lab_results = pd.read_excel(results_file)
238
+ lab_results[on] = lab_results[on].astype(str)
239
+ merged_data = rmerge(
240
+ products,
241
+ lab_results,
242
+ on=on,
243
+ how='left',
244
+ validate='m:1',
245
+ )
246
+ merged_data = merged_data.loc[~merged_data[target].isna()]
247
+ if verbose: manager.create_log('Matched ' + str(len(merged_data)) + ' lab results with products...')
248
+ return merged_data
249
+
250
+
251
+ def merge_products(items, product_files):
252
+ """Merge products with inventory items using `ProductId`."""
253
+ items = merge_datasets(
254
+ items,
255
+ product_files,
256
+ dataset='products',
257
+ on='ProductId',
258
+ target='InventoryType',
259
+ how='left',
260
+ # FIXME: This mapping may not be right.
261
+ validate='m:m',
262
+ rename={
263
+ 'CreatedDate': 'product_created_at',
264
+ 'updatedDate': 'product_updated_at',
265
+ 'UpdatedDate': 'product_updated_at',
266
+ 'ExternalIdentifier': 'product_external_id',
267
+ 'LicenseeId': 'producer_licensee_id',
268
+ 'Name': 'product_name',
269
+ 'Description': 'product_description',
270
+ },
271
+ )
272
+ # FIXME: Merge products with lab results.
273
+ # items = merge_lab_results_with_products(manager, items, results_file)
274
+ return items
275
+
276
+
277
+ def merge_strains(items, strain_files):
278
+ """Merge strains with inventory items using `StrainId`."""
279
+ items = merge_datasets(
280
+ items,
281
+ strain_files,
282
+ dataset='strains',
283
+ on='StrainId',
284
+ target='strain_name',
285
+ how='left',
286
+ validate='m:1',
287
+ rename={
288
+ 'Name': 'strain_name',
289
+ 'CreatedDate': 'strain_created_at',
290
+ },
291
+ drop=['CreatedBy', 'UpdatedBy', 'UpdatedDate'],
292
+ dedupe=True,
293
+ )
294
+ missing = (items['strain_name'] == False) | (items['strain_name'] == 'False')
295
+ items.loc[missing, 'strain_name'] = items.loc[missing, 'StrainType']
296
+ items.loc[missing, 'StrainType'] = None
297
+ return items
298
+
299
+
300
+ def curate_ccrs_inventory(
301
+ manager: CCRS,
302
+ data_dir: str,
303
+ stats_dir: str
304
+ ):
305
+ """Curate CCRS inventory by merging additional datasets."""
306
+ manager.create_log('Curating inventory...')
307
+ start = datetime.now()
308
+
309
+ # Unzip all CCRS datafiles.
310
+ unzip_datafiles(data_dir)
311
+
312
+ # Create stats directory if it doesn't already exist.
313
+ release = os.path.basename(data_dir)
314
+ inventory_dir = os.path.join(stats_dir, 'inventory-' + release)
315
+ if not os.path.exists(inventory_dir): os.makedirs(inventory_dir)
316
+
317
+ # Read licensees data.
318
+ licensees = read_licensees(data_dir)
319
+
320
+ # Define all fields.
321
+ # Note: `IsDeleted` throws a `ValueError` if defined as a bool.
322
+ fields = CCRS_DATASETS['inventory']['fields']
323
+ date_fields = CCRS_DATASETS['inventory']['date_fields']
324
+ item_cols = list(fields.keys()) + date_fields
325
+ item_types = {k: fields[k] for k in fields if k not in date_fields}
326
+ item_types['IsDeleted'] = 'string'
327
+
328
+ # Get all datafiles.
329
+ inventory_files = get_datafiles(data_dir, 'Inventory_')
330
+ product_files = get_datafiles(data_dir, 'Product_')
331
+ strain_files = get_datafiles(data_dir, 'Strains_')
332
+ area_files = get_datafiles(data_dir, 'Areas_')
333
+
334
+ # Curate inventory datafiles.
335
+ manager.create_log(str(len(inventory_files)) + ' datafiles to curate.')
336
+ manager.create_log('Estimated runtime: ' + str(len(inventory_files) * 0.25 + 1.5) + ' hours')
337
+ for i, datafile in enumerate(inventory_files):
338
+
339
+ # Read in the items.
340
+ manager.create_log('Augmenting: ' + datafile)
341
+ items = read_items(datafile, item_cols, item_types, date_fields)
342
+
343
+ # Merge licensee data.
344
+ manager.create_log('Merging licensee data...')
345
+ items = merge_licensees(items, licensees)
346
+
347
+ # Merge product data.
348
+ manager.create_log('Merging product data...')
349
+ items = merge_products(items, product_files)
350
+
351
+ # Merge strain data.
352
+ manager.create_log('Merging strain data...')
353
+ items = merge_strains(items, strain_files)
354
+
355
+ # Merge area data.
356
+ manager.create_log('Merging area data...')
357
+ items = merge_areas(items, area_files)
358
+
359
+ # Standardize column names.
360
+ manager.create_log('Standardizing...')
361
+ items.rename(
362
+ columns={col: camel_to_snake(col) for col in items.columns},
363
+ inplace=True
364
+ )
365
+
366
+ # Anonymize the data.
367
+ manager.create_log('Anonymizing...')
368
+ items = anonymize(items)
369
+
370
+ # Save the curated inventory data.
371
+ manager.create_log('Saving curated inventory data...')
372
+ outfile = os.path.join(inventory_dir, f'inventory_{i}.xlsx')
373
+ items.to_excel(outfile, index=False)
374
+ manager.create_log('Curated inventory datafile: ' + str(i + 1) + '/' + str(len(inventory_files)))
375
+
376
+ # Perform garbage cleaning.
377
+ gc.collect()
378
+
379
+ # FIXME:
380
+ # Merge and save inventory data with curated lab result data.
381
+ # TODO: Save a copy as `wa-lab-results-latest.csv` in the `data` directory.
382
+ # try:
383
+ manager.create_log('Merging lab results...')
384
+ # inventory_files = sorted_nicely(os.listdir(inventory_dir))
385
+
386
+ # Match with aggregate lab results.
387
+ lab_results_dir = os.path.join(stats_dir, 'lab_results')
388
+ results_file = os.path.join(lab_results_dir, 'wa-lab-results-aggregate.xlsx')
389
+ matched = merge_lab_results(manager, results_file, stats_dir)
390
+ matched.rename(columns=lambda x: camel_to_snake(x), inplace=True)
391
+
392
+ # Save the matched inventory lab results.
393
+ outfile = save_dataset(matched, lab_results_dir, 'wa-inventory-lab-results-' + release)
394
+ manager.create_log('Saved inventory lab results: ' + str(outfile))
395
+ # except Exception as e:
396
+ # manager.create_log('Failed to merge lab results. Curate lab results first.')
397
+ # manager.create_log(str(e))
398
+
399
+ # FIXME: Attach lab results to products.
400
+ matched = pd.DataFrame()
401
+ lab_results_dir = os.path.join(stats_dir, 'lab_results')
402
+ inventory_results_file = results_file = os.path.join(lab_results_dir, 'inventory_lab_results_0.xlsx')
403
+ lab_results = pd.read_excel(inventory_results_file)
404
+ augmented_inventory_files = sorted_nicely(os.listdir(inventory_dir))
405
+ augmented_inventory_files = [os.path.join(inventory_dir, f) for f in augmented_inventory_files if not f.startswith('~$')]
406
+ for i, product_file in enumerate(product_files):
407
+
408
+ # Read products.
409
+ products = read_products(product_file)
410
+
411
+ # TODO: Match products with inventory.
412
+ products.rename(columns={'product_id': 'ProductId'}, inplace=True)
413
+ # for inventory_file in augmented_inventory_files:
414
+ # inventory = pd.read_excel(
415
+ # inventory_file
416
+ # )
417
+
418
+ # FIXME: This is not working.
419
+ products = merge_datasets(
420
+ products,
421
+ augmented_inventory_files,
422
+ dataset='inventory',
423
+ on='ProductId',
424
+ target='inventory_id',
425
+ how='left',
426
+ validate='m:1',
427
+ rename={
428
+ 'CreatedBy': 'inventory_created_by',
429
+ 'UpdatedBy': 'inventory_updated_by',
430
+ 'CreatedDate': 'inventory_created_at',
431
+ 'updatedDate': 'inventory_updated_at',
432
+ 'UpdatedDate': 'inventory_updated_at',
433
+ 'Name': 'inventory_name',
434
+ },
435
+ )
436
+
437
+ # Merge the lab results with the products.
438
+ match = rmerge(
439
+ products,
440
+ lab_results,
441
+ on='product_id',
442
+ how='left',
443
+ validate='m:1',
444
+ )
445
+ match = match.loc[~match['lab_result_id'].isna()]
446
+ matched = pd.concat([matched, match], ignore_index=True)
447
+ manager.create_log('Matched ' + str(len(matched)) + ' lab results with products...')
448
+
449
+ # Save the matched product lab results.
450
+ save_dataset(matched, lab_results_dir, 'product_lab_results')
451
+
452
+ # Complete curation.
453
+ end = datetime.now()
454
+ manager.create_log('✓ Finished curating inventory in ' + str(end - start))
455
+
456
+
457
+ # === Test ===
458
+ # [✓] Tested: 2024-06-08 by Keegan Skeate <keegan@cannlytics>
459
+ if __name__ == '__main__':
460
+
461
+ # Initialize.
462
+ base = 'D://data/washington/'
463
+ stats_dir = 'D://data/washington/stats'
464
+ manager = CCRS()
465
+
466
+ # Curate the inventory for each release.
467
+ releases = [
468
+ # 'CCRS PRR (8-4-23)', # Contains all prior releases.
469
+ # 'CCRS PRR (9-5-23)',
470
+ # 'CCRS PRR (10-2-23)',
471
+ # 'CCRS PRR (11-2-23)',
472
+ # 'CCRS PRR (12-2-23)',
473
+ # 'CCRS PRR (1-2-24)',
474
+ # 'CCRS PRR (2-2-24)',
475
+ # 'CCRS PRR (3-27-24)',
476
+ # 'CCRS PRR (4-2-24)',
477
+ # 'CCRS PRR (5-2-24)',
478
+ # 'CCRS PRR (6-2-24)',
479
+ 'CCRS PRR (7-2-24)',
480
+ ]
481
+ for release in releases:
482
+ try:
483
+ data_dir = os.path.join(base, release, release)
484
+ curate_ccrs_inventory(manager, data_dir, stats_dir)
485
+ manager.create_log('✓ Finished curating inventory for ' + release)
486
+ except Exception as e:
487
+ manager.create_log('Failed to curate inventory for ' + release)
488
+ manager.create_log(str(e))
489
+ continue
490
+
491
+ # Aggregate lab results.
492
+ all_results = []
493
+ datafiles = os.listdir(os.path.join(stats_dir, 'lab_results'))
494
+ datafiles = [os.path.join(stats_dir, 'lab_results', x) for x in datafiles if \
495
+ not x.startswith('~') and \
496
+ not 'aggregate' in x and \
497
+ 'inventory' in x]
498
+ datafiles += [
499
+ r"D:\data\washington\wa-lab-results-2022-01-26.xlsx",
500
+ r"D:\data\washington\wa-lab-results-2023-08-30.xlsx",
501
+ ]
502
+ for datafile in datafiles:
503
+ data = pd.read_excel(datafile)
504
+ all_results.append(data)
505
+ results = pd.concat(all_results)
506
+ results.drop_duplicates(subset=['lab_result_id', 'updated_date'], inplace=True)
507
+ results.sort_values(by=['created_date'], inplace=True)
508
+ print('Number of results:', len(results))
509
+ outfile = os.path.join(stats_dir, 'lab_results', 'wa-results-latest.xlsx')
510
+ outfile_csv = os.path.join(stats_dir, 'lab_results', 'wa-results-latest.csv')
511
+ results.to_excel(outfile, index=False)
512
+ results.to_csv(outfile_csv, index=False)
513
+ manager.create_log('Saved aggregate lab results to: ' + outfile)
514
+ manager.create_log('Saved aggregate lab results to: ' + outfile_csv)
algorithms/get_results_wa_strains.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # Standard imports:
4
+ import json
5
+ import os
6
+ import subprocess
7
+ from typing import List
8
+ import warnings
9
+
10
+ # External imports:
11
+ import pandas as pd
12
+ from cannlytics.compounds import pesticides
13
+ from cannlytics.data.coas.coas import standardize_results
14
+ from cannlytics.utils.utils import snake_case, kebab_case, camel_to_snake
15
+ import matplotlib.pyplot as plt
16
+ from matplotlib.dates import DateFormatter
17
+ from cannlytics.data.ccrs import CCRS_DATASETS, get_datafiles
18
+ from cannlytics.data.cache import Bogart
19
+
20
+
21
+ def read_ccrs_data(
22
+ datafile,
23
+ dtype: dict,
24
+ usecols: List[str],
25
+ parse_dates: List[str],
26
+ on_bad_lines: str = 'skip',
27
+ sep: str = '\t',
28
+ encoding: str = 'utf-16',
29
+ engine: str = 'python',
30
+ rename = None,
31
+ ) -> pd.DataFrame:
32
+ """Load supplement data from a specified data file."""
33
+ df = pd.read_csv(
34
+ datafile,
35
+ sep=sep,
36
+ encoding=encoding,
37
+ engine=engine,
38
+ parse_dates=parse_dates,
39
+ dtype=dtype,
40
+ usecols=usecols,
41
+ on_bad_lines=on_bad_lines
42
+ )
43
+ if rename:
44
+ df = df.rename(columns=rename)
45
+ return df
46
+
47
+
48
+ def convert_timestamps(obj):
49
+ """
50
+ Recursively convert Timestamp and NaTType objects in a dictionary to strings.
51
+ """
52
+ if isinstance(obj, dict):
53
+ for key, value in obj.items():
54
+ if isinstance(value, pd.Timestamp):
55
+ obj[key] = value.isoformat()
56
+ elif isinstance(value, pd._libs.tslibs.nattype.NaTType):
57
+ obj[key] = None
58
+ elif isinstance(value, dict):
59
+ convert_timestamps(value)
60
+ elif isinstance(value, list):
61
+ obj[key] = [convert_timestamps(item) if isinstance(item, (pd.Timestamp, pd._libs.tslibs.nattype.NaTType)) else item for item in value]
62
+ return obj
63
+
64
+
65
+ # Read lab results.
66
+ data_dir = 'D://data/washington/stats/lab_results'
67
+ datafile = os.path.join(data_dir, 'wa-lab-results-aggregate.xlsx')
68
+ results = pd.read_excel(datafile)
69
+
70
+ # Initialize the cache.
71
+ inventory_cache = Bogart('D://data/.cache/results-wa-inventory.jsonl')
72
+ products_cache = Bogart('D://data/.cache/results-wa-products.jsonl')
73
+ strains_cache = Bogart('D://data/.cache/results-wa-strains.jsonl')
74
+
75
+ # Isolate the subsample or results.
76
+ results['inventory_id'] = results['inventory_id'].astype(str)
77
+ inventory_ids = list(results['inventory_id'].unique())
78
+ print('Number of inventory items:', len(inventory_ids))
79
+ matches = {}
80
+
81
+ # Iterate over all releases to augment inventory, product, and strain data.
82
+ base = 'D://data/washington/'
83
+ releases = [
84
+ # 'CCRS PRR (8-4-23)', # Contains all prior releases.
85
+ 'CCRS PRR (9-5-23)',
86
+ 'CCRS PRR (10-2-23)',
87
+ 'CCRS PRR (11-2-23)',
88
+ 'CCRS PRR (12-2-23)',
89
+ 'CCRS PRR (1-2-24)',
90
+ 'CCRS PRR (2-2-24)',
91
+ 'CCRS PRR (3-27-24)',
92
+ 'CCRS PRR (4-2-24)',
93
+ 'CCRS PRR (5-2-24)',
94
+ 'CCRS PRR (6-2-24)',
95
+ 'CCRS PRR (7-2-24)',
96
+ ]
97
+ for release in releases:
98
+ data_dir = os.path.join(base, release, release)
99
+ print('Augmenting data:', data_dir)
100
+
101
+ # Find matching inventory items.
102
+ inventory_files = get_datafiles(data_dir, 'Inventory_')
103
+ inventory_fields = CCRS_DATASETS['inventory']['fields']
104
+ inventory_date_fields = CCRS_DATASETS['inventory']['date_fields']
105
+ item_cols = list(inventory_fields.keys()) + inventory_date_fields
106
+ item_types = {k: inventory_fields[k] for k in inventory_fields if k not in inventory_date_fields}
107
+ item_types['IsDeleted'] = 'string'
108
+ inventory_renames = {
109
+ 'CreatedBy': 'inventory_created_by',
110
+ 'UpdatedBy': 'inventory_updated_by',
111
+ 'CreatedDate': 'inventory_created_at',
112
+ 'updatedDate': 'inventory_updated_at',
113
+ 'UpdatedDate': 'inventory_updated_at',
114
+ 'Name': 'inventory_name',
115
+ }
116
+ for i, datafile in enumerate(inventory_files):
117
+ if len(matches) == len(results):
118
+ print('Matched all inventory items')
119
+ break
120
+ print('Augmenting inventory:', datafile)
121
+ items = read_ccrs_data(
122
+ datafile,
123
+ usecols=item_cols,
124
+ dtype=item_types,
125
+ parse_dates=inventory_date_fields,
126
+ rename=inventory_renames,
127
+ )
128
+ for inventory_id in inventory_ids:
129
+ if inventory_cache.get(inventory_id):
130
+ matches[inventory_id] = inventory_cache.get(inventory_id)
131
+ continue
132
+ # if inventory_id in matches:
133
+ # continue
134
+ item = items.loc[items['InventoryId'] == inventory_id]
135
+ if len(item) > 0:
136
+ item = item.iloc[0]
137
+ item_dict = item.to_dict()
138
+ item_dict = convert_timestamps(item_dict)
139
+ matches[inventory_id] = item_dict
140
+ print('Matched inventory:', inventory_id)
141
+ inventory_cache.set(inventory_id, item_dict)
142
+ print('Matched inventory items:', len(matches))
143
+
144
+ # Match product data.
145
+ product_matches = {}
146
+ product_files = get_datafiles(data_dir, 'Product_')
147
+ product_fields = CCRS_DATASETS['products']['fields']
148
+ product_date_fields = CCRS_DATASETS['products']['date_fields']
149
+ product_cols = list(product_fields.keys()) + product_date_fields
150
+ product_types = {k: product_fields[k] for k in product_fields if k not in product_date_fields}
151
+ product_types['IsDeleted'] = 'string'
152
+ product_types['UnitWeightGrams'] = 'string'
153
+ product_types['CreatedDate'] = 'string'
154
+ product_types['UpdatedDate'] = 'string'
155
+ product_renames = {
156
+ 'CreatedDate': 'product_created_at',
157
+ 'updatedDate': 'product_updated_at',
158
+ 'UpdatedDate': 'product_updated_at',
159
+ 'ExternalIdentifier': 'product_external_id',
160
+ 'LicenseeId': 'producer_licensee_id',
161
+ 'Name': 'product_name',
162
+ 'Description': 'product_description',
163
+ }
164
+ for i, datafile in enumerate(product_files):
165
+ if len(product_matches) == len(results):
166
+ print('Matched all products')
167
+ break
168
+ print('Augmenting products:', datafile)
169
+ products = read_ccrs_data(
170
+ datafile,
171
+ usecols=product_cols,
172
+ dtype=product_types,
173
+ parse_dates=product_date_fields,
174
+ rename=product_renames,
175
+ )
176
+ for inventory_id, values in matches.items():
177
+ if products_cache.get(inventory_id):
178
+ obs = matches[inventory_id]
179
+ product = products_cache.get(inventory_id)
180
+ matches[inventory_id] = {**obs, **product}
181
+ product_matches[inventory_id] = product
182
+ continue
183
+ # if inventory_id in product_matches:
184
+ # continue
185
+ product = products.loc[products['ProductId'] == values['ProductId']]
186
+ if len(product) > 0:
187
+ product = product.iloc[0]
188
+ obs = matches[inventory_id]
189
+ product_dict = product.to_dict()
190
+ product_dict = convert_timestamps(product_dict)
191
+ matches[inventory_id] = {**obs, **product_dict}
192
+ print('Matched product:', inventory_id)
193
+ product_matches[inventory_id] = product_dict
194
+ products_cache.set(inventory_id, product_dict)
195
+
196
+ # Match strain data.
197
+ strain_matches = {}
198
+ strain_files = get_datafiles(data_dir, 'Strains_')
199
+ strain_fields = CCRS_DATASETS['strains']['fields']
200
+ strain_date_fields = CCRS_DATASETS['strains']['date_fields']
201
+ strain_cols = list(strain_fields.keys()) + strain_date_fields
202
+ strain_types = {k: strain_fields[k] for k in strain_fields if k not in strain_date_fields}
203
+ strain_types['IsDeleted'] = 'string'
204
+ strain_renames = {
205
+ 'Name': 'strain_name',
206
+ 'CreatedDate': 'strain_created_at',
207
+ }
208
+ for i, datafile in enumerate(strain_files):
209
+ if len(strain_matches) == len(results):
210
+ print('Matched all strains')
211
+ break
212
+ print('Augmenting strains:', datafile)
213
+ strains = read_ccrs_data(
214
+ datafile,
215
+ usecols=strain_cols,
216
+ dtype=strain_types,
217
+ parse_dates=strain_date_fields,
218
+ rename=strain_renames,
219
+ )
220
+ # TODO: Fix misaligned strain data.
221
+ # missing = (strains['strain_name'] == False) | (strains['strain_name'] == 'False')
222
+ # strains.loc[missing, 'strain_name'] = strains.loc[strains, 'StrainType']
223
+ for inventory_id, values in matches.items():
224
+ # if inventory_id in strain_matches:
225
+ # continue
226
+ if strains_cache.get(inventory_id):
227
+ strain_matches[inventory_id] = strains_cache.get(inventory_id)
228
+ continue
229
+ strain = strains.loc[strains['StrainId'] == values['StrainId']]
230
+ if len(strain) > 0:
231
+ strain = strain.iloc[0]
232
+ obs = matches[inventory_id]
233
+ strain_dict = strain.to_dict()
234
+ strain_dict = convert_timestamps(strain_dict)
235
+ matches[inventory_id] = {**obs, **strain_dict}
236
+ print('Matched strain:', inventory_id)
237
+ strain_matches[inventory_id] = strain_dict
238
+ strains_cache.set(inventory_id, strain_dict)
239
+
240
+ # Optional: Merge area data?
241
+ # area_files = get_datafiles(data_dir, 'Areas_')
242
+
243
+ # Break if all of the subsample is matched.
244
+ if len(matches) == len(results):
245
+ print('Matched all results')
246
+ break
247
+
248
+ # FIXME: Merge inventory, product, and strain data with the results using the cache.
249
+
250
+ # Merge the inventory data with the subsample.
251
+ matches_df = pd.DataFrame.from_dict(matches, orient='index')
252
+ matches_df.index.name = 'inventory_id'
253
+ matches_df.reset_index(inplace=True, drop=True)
254
+ matches_df.columns = [camel_to_snake(col) for col in matches_df.columns]
255
+ results = results.merge(
256
+ matches_df,
257
+ on='inventory_id',
258
+ how='left',
259
+ suffixes=['', '_dup']
260
+ )
261
+ results.drop(columns=[x for x in results.columns if '_dup' in x], inplace=True)
262
+
263
+ # TODO: Further process the results.
264
+
algorithms/get_results_washington_ccrs.py DELETED
@@ -1,471 +0,0 @@
1
- """
2
- Cannabis Tests | Washington State
3
- Copyright (c) 2022 Cannlytics
4
-
5
- Authors: Keegan Skeate <https://github.com/keeganskeate>
6
- Created: 9/23/2022
7
- Updated: 9/27/2022
8
- License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
9
-
10
- Description: This script augments lab result data with pertinent
11
- licensee, inventory, inventory type, product, and strain data.
12
-
13
- Data sources:
14
-
15
- - WA State Traceability Data Dec. 2021 to Aug. 2022
16
- URL: <https://lcb.app.box.com/s/gosuk65m5iinuaqxx2ef7uis9ccnzb20/folder/170118338288>
17
-
18
- """
19
- # Standard imports.
20
- import gc
21
- import json
22
- import os
23
-
24
- # External imports.
25
- from dotenv import dotenv_values
26
- import matplotlib.pyplot as plt
27
- import pandas as pd
28
-
29
- # Internal imports.
30
- # from cannlytics.data.ccrs.utils import get_number_of_lines
31
- from cannlytics.data.ccrs import CCRS
32
- from cannlytics.data.ccrs.utils import unzip_files
33
- from cannlytics.utils import (
34
- camel_to_snake,
35
- get_number_of_lines,
36
- snake_case,
37
- sorted_nicely,
38
- )
39
-
40
-
41
- DATA_DIR = 'D:\\data\\washington\\ccrs-2022-08-18'
42
- SUB_DIR = 'CCRS PRR (8-18-22)'
43
- ENV_FILE = '.env'
44
-
45
-
46
- #-----------------------------------------------------------------------
47
- # Get the data.
48
- #-----------------------------------------------------------------------
49
-
50
- # Extract all files.
51
- unzip_files(DATA_DIR, extension='.zip')
52
-
53
-
54
- #-----------------------------------------------------------------------
55
- # Curate the data.
56
- #-----------------------------------------------------------------------
57
-
58
- # Get all of the datafiles.
59
- subsets = {}
60
- datafiles = []
61
- for path, _, files in os.walk(DATA_DIR):
62
- for f in files:
63
- abs_path = os.path.join(path, f)
64
- if f.endswith('.csv'):
65
- datafiles.append(abs_path)
66
-
67
- # Count the number of observations in each file.
68
- print('| Subset | Observations |')
69
- print('|--------|--------------|')
70
- for f in sorted_nicely(datafiles):
71
- datafile = f.split('\\')[-1]
72
- name = datafile.replace('.csv', '').split('_')[0]
73
- subset = subsets.get(name, {
74
- 'observations': 0,
75
- 'datafiles': [],
76
- })
77
- abs_path = os.path.join(DATA_DIR, f)
78
- file_name = os.path.abspath(abs_path)
79
- number = get_number_of_lines(file_name)
80
- subset['observations'] += number
81
- subset['datafiles'].append(datafile)
82
- print(f'| `{datafile}` | `{number:,}` |')
83
- subsets[name] = subset
84
-
85
- # Print the total number of observations.
86
- for key, values in subsets.items():
87
- print(f'{key}: {values["observations"]:,}', 'observations.')
88
-
89
- # Get the columns for each subset.
90
- for key, values in subsets.items():
91
- datafile = values['datafiles'][0]
92
- name = datafile.replace('.csv', '').split('_')[0]
93
- folder = datafile.replace('.csv', '')
94
- abs_path = os.path.join(DATA_DIR, SUB_DIR, folder, datafile)
95
- file_name = os.path.abspath(abs_path)
96
- df = pd.read_csv(
97
- file_name,
98
- sep='\t',
99
- encoding='utf-16',
100
- nrows=2,
101
- index_col=False,
102
- low_memory=False,
103
- )
104
- subsets[name]['columns'] = list(df.columns)
105
-
106
- # Count the number of data points for each subset.
107
- for key, values in subsets.items():
108
- number_of_cols = len(values['columns'])
109
- data_points = values['observations'] * number_of_cols
110
- print(f'{key}: {data_points:,}', 'data points.')
111
-
112
-
113
- #-----------------------------------------------------------------------
114
- # Augment license data.
115
- #-----------------------------------------------------------------------
116
-
117
- # Read licensee data.
118
- # licensees = ccrs.read_licensees()
119
- licensees = pd.read_csv(
120
- f'{DATA_DIR}/{SUB_DIR}/Licensee_0/Licensee_0.csv',
121
- sep='\t',
122
- encoding='utf-16',
123
- index_col=False,
124
- low_memory=False,
125
- )
126
- licensees.columns = [camel_to_snake(x) for x in licensees.columns]
127
-
128
- # Restrict to active licensees.
129
- licensees = licensees.loc[licensees['license_status'] == 'Active']
130
-
131
- # TODO: Geocode licensees.
132
-
133
- # TODO: Figure out `license_type`.
134
-
135
- # TODO: Save augmented licensees.
136
-
137
-
138
- #-----------------------------------------------------------------------
139
- # Augment strain data.
140
- #-----------------------------------------------------------------------
141
-
142
- # Read strain data.
143
- strains = pd.read_csv(
144
- f'{DATA_DIR}/{SUB_DIR}/Strains_0/Strains_0.csv',
145
- sep='\t',
146
- # sep=',',
147
- encoding='utf-16',
148
- index_col=False,
149
- # skiprows=range(2, 901),
150
- engine='python',
151
- quotechar='"',
152
- nrows=2000,
153
- error_bad_lines=False,
154
- )
155
- strains.columns = [camel_to_snake(x) for x in strains.columns]
156
-
157
- # FIXME: First 899 rows are misaligned.
158
- strains = strains.iloc[900:]
159
-
160
-
161
- #------------------------------------------------------------------------------
162
- # Manage lab result data.
163
- #------------------------------------------------------------------------------
164
-
165
- # # Read lab results.
166
- # lab_results = ccrs.read_lab_results()
167
-
168
- # # Note: Sometimes "Not Tested" is a `test_value`.
169
- # lab_results['test_value'] = pd.to_numeric(lab_results['test_value'], errors='coerce')
170
-
171
- # # Remove lab results with `created_date` in the past.
172
- # lab_results = lab_results.loc[lab_results['created_date'] >= pd.to_datetime(START)]
173
-
174
- # # Identify all of the labs.
175
- # lab_ids = list(lab_results['lab_licensee_id'].unique())
176
-
177
- # # Trend analytes by day by lab.
178
- # group = [pd.Grouper(key='created_date', freq='M'), 'test_name', 'lab_licensee_id']
179
- # trending = lab_results.groupby(group, as_index=True)['test_value'].mean()
180
-
181
- # # Visualize all analytes!!!
182
- # tested_analytes = list(trending.index.get_level_values(1).unique())
183
- # for analyte in tested_analytes:
184
- # fig, ax = plt.subplots(figsize=(8, 5))
185
- # idx = pd.IndexSlice
186
- # for lab_id in lab_ids:
187
- # try:
188
- # lab_samples = trending.loc[idx[:, analyte, lab_id]]
189
- # if len(lab_samples) > 0:
190
- # lab_samples.plot(
191
- # ax=ax,
192
- # label=lab_id,
193
- # )
194
- # except KeyError:
195
- # pass
196
- # plt.legend(title='Lab ID', loc='upper right')
197
- # plt.title(f'Average {analyte} by Lab in Washington')
198
- # plt.show()
199
-
200
- # # TODO: Save trending!
201
-
202
- # # Calculate failure rate by lab.
203
-
204
- # # TODO: Calculate failure rate by licensee.
205
- # # fail = lab_results.loc[lab_results['LabTestStatus'] == 'Fail']
206
-
207
- # # Get lab prices.
208
-
209
- # # Estimate laboratory revenue.
210
-
211
- # # Estimate laboratory market share.
212
-
213
- # # TODO: Estimate amount spent on lab testing by licensee.
214
-
215
-
216
- #-----------------------------------------------------------------------
217
- # CCRS data exploration.
218
- #-----------------------------------------------------------------------
219
-
220
- # # Initialize a CCRS client.
221
- # config = dotenv_values(ENV_FILE)
222
- # os.environ['CANNLYTICS_API_KEY'] = config['CANNLYTICS_API_KEY']
223
- # os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = config['GOOGLE_APPLICATION_CREDENTIALS']
224
- # ccrs = CCRS(data_dir=DATA_DIR)
225
-
226
- # # Read licensee data.
227
- # licensees = ccrs.read_licensees()
228
-
229
- # # Read areas data.
230
- # areas = ccrs.read_areas()
231
-
232
- # # Read inventory data.
233
- # inventory = ccrs.read_inventory(limit=100_000)
234
-
235
- # # Wishlist: Augment with licensee data with licensee_id
236
-
237
- # # Wishlist: Augment with strain data with strain_id
238
-
239
- # # Wishlist Augment product data with product_id
240
-
241
- # # Optional: Explore interesting fields:
242
- # # - quantity_on_hand
243
- # # - total_cost
244
- # # - created_date
245
-
246
- # # Optional: Count inventory items by date for each licensee?
247
-
248
- # # Estimate Cost of Goods Sold (CoGS) (Poor data for this metric).
249
- # cogs = (inventory.initial_quantity - inventory.quantity_on_hand) * inventory.total_cost
250
-
251
- # # Read inventory adjustment data.
252
- # adjustments = ccrs.read_inventory_adjustments()
253
-
254
- # # Wishlist: Merge inventory details
255
- # # inventory_adjustments = pd.merge()
256
-
257
- # # Highlight imperfect system.
258
- # lost = adjustments.loc[adjustments.inventory_adjustment_reason == 'Lost']
259
- # theft = adjustments.loc[adjustments.inventory_adjustment_reason == 'Theft']
260
- # seized = adjustments.loc[adjustments.inventory_adjustment_reason == 'Seizure']
261
- # other = adjustments.loc[adjustments.inventory_adjustment_reason == 'Other']
262
- # not_found = lost.loc[lost['adjustment_detail'].astype(str).str.contains('not found', case=False)]
263
-
264
- # # Read plant data.
265
- # plants = ccrs.read_plants()
266
-
267
- # # Wishlist: Augment with strain data.
268
- # # StrainId is missing from strain data! And all plant StrainIds are 1...
269
- # strains = ccrs.read_strains()
270
-
271
- # # Wishlist: Augment with area data.
272
- # # Area data is missing AreaId.
273
-
274
- # # Wishlist: Augment with licensee data.
275
- # # Licensee data is missing LicenseeId
276
-
277
- # # TODO: Calculate number of plants by type by day, week, month, year
278
- # # for each licensee.
279
- # # This may have to be done by looking at created_date and harvest_date.
280
-
281
- # # TODO: Estimate wholesale sales by licensee_id
282
-
283
- # # Estimate growing period.
284
- # final_states = ['Harvested', 'Drying', 'Sold']
285
- # harvested = plants.loc[plants.plant_state.isin(final_states)]
286
- # grow_days = (harvested.harvest_date - harvested.created_date).dt.days
287
- # grow_days = grow_days.loc[(grow_days > 30) & (grow_days < 365)]
288
- # grow_days.describe()
289
- # grow_days.hist(bins=100)
290
- # plt.show()
291
-
292
- # # TODO: Estimate a production function (yield per plant).
293
-
294
- # # # Optional: See who is transferring plants to who.
295
- # # # InventoryPlantTransfer_0
296
- # # # FromLicenseeId, ToLicenseeId, FromInventoryId, ToInventoryId, TransferDate
297
-
298
- # # Read plant destruction data.
299
- # destructions = ccrs.read_plant_destructions()
300
-
301
- # # Look at the reasons for destruction.
302
- # destructions['destruction_reason'].value_counts().plot(kind='pie')
303
-
304
- # # Look at contaminants
305
- # mites = destructions.loc[destructions.destruction_reason == 'Mites']
306
- # contaminated = destructions.loc[destructions.destruction_reason == 'Contamination']
307
-
308
- # # Plot plants destroyed by mites per day.
309
- # mites_by_day = mites.groupby('destruction_date')['plant_id'].count()
310
- # mites_by_day.plot()
311
- # plt.title('Number of Plants Destroyed by Mites in Washington')
312
- # plt.show()
313
-
314
- # # Plot plants destroyed by contamination per day.
315
- # contaminated_by_day = contaminated.groupby('destruction_date')['plant_id'].count()
316
- # contaminated_by_day.plot()
317
- # plt.title('Number of Contaminated Plants in Washington')
318
- # plt.show()
319
-
320
- # # # TODO: Calculate daily risk of plant death.
321
- # # destructions_by_day = destructions.groupby('destruction_date')['plant_id'].count()
322
- # # # plants_by_day =
323
- # # # plant_risk =
324
-
325
- # # Saturday Morning Statistics teaser:
326
- # # Capital asset pricing model (CAPM) or...
327
- # # Plant liability asset net total model (PLANTM) ;)
328
-
329
-
330
- #------------------------------------------------------------------------------
331
- # Manage product data.
332
- #------------------------------------------------------------------------------
333
-
334
- # # Read product data.
335
- # products = ccrs.read_products(limit=100_000)
336
-
337
- # # Look at products by day by licensee.
338
- # products_by_day = products.groupby(['licensee_id', 'created_date'])['name'].count()
339
-
340
- # # Wishlist: There is a reference to InventoryTypeId but not inventory type data.
341
-
342
- # # Wishlist: Match with licensee data with licensee_id
343
-
344
-
345
- #------------------------------------------------------------------------------
346
- # Manage sales data.
347
- #------------------------------------------------------------------------------
348
-
349
- # # Read sale header data.
350
- # sale_headers = ccrs.read_sale_headers()
351
-
352
- # # Read sale detail data.
353
- # sale_details = ccrs.read_sale_details()
354
-
355
- # # Calculate total price and total tax.
356
- # sale_details['total_tax'] = sale_details['sales_tax'] + sale_details['other_tax']
357
- # sale_details['total_price'] = sale_details['unit_price'] - abs(sale_details['discount']) + sale_details['total_tax']
358
-
359
- # sale_details = pd.merge(
360
- # sale_details,
361
- # sale_headers,
362
- # left_on='sale_header_id',
363
- # right_on='sale_header_id',
364
- # how='left',
365
- # validate='m:1',
366
- # suffixes=(None, '_header'),
367
- # )
368
-
369
- # # Calculate total transactions, average transaction, and total sales by retailer.
370
- # transactions = sale_details.groupby(['sale_header_id', 'licensee_id'], as_index=False)
371
- # transaction_amount = transactions['total_price'].sum()
372
- # avg_transaction_amount = transaction_amount.groupby('licensee_id')['total_price'].mean()
373
-
374
- # # Calculate transactions and sales by day.
375
- # daily = sale_details.groupby(['sale_date', 'licensee_id'], as_index=False)
376
- # daily_sales = daily['total_price'].sum()
377
- # daily_transactions = daily['total_price'].count()
378
- # group = ['sale_date', 'licensee_id', 'sale_header_id']
379
- # daily_avg_transaction_amount = sale_details.groupby(group, as_index=False)['total_price'].mean()
380
-
381
- # # TODO: Aggregate statistics by daily and licensee.
382
-
383
- # # TODO: Calculate year-to-date statistics for each licensee.
384
-
385
- # # FIXME: Figure out how to connect sale_headers.licensee_id with licensees.license_number?
386
-
387
- # # TODO: Break down by sale type:
388
- # # 'RecreationalRetail', 'RecreationalMedical', 'Wholesale'
389
-
390
- # # TODO: Try to match sale_items.inventory_id to other details?
391
-
392
-
393
- #------------------------------------------------------------------------------
394
- # Manage transfer data.
395
- #------------------------------------------------------------------------------
396
-
397
- # # Read transfer data.
398
- # transfers = ccrs.read_transfers()
399
-
400
- # # TODO: Get list of license numbers / addresses from transers.
401
-
402
- # # Future work: Look at number of items, etc. for each transfer.
403
-
404
-
405
- #------------------------------------------------------------------------------
406
- # Future work: Augment the data.
407
- #------------------------------------------------------------------------------
408
-
409
- # Get Fed FRED data pertinent to geographic area.
410
-
411
- # Get Census data pertinent to geographic area.
412
-
413
-
414
- #------------------------------------------------------------------------------
415
- # Future work: Estimate ARIMAX for every variable.
416
- #------------------------------------------------------------------------------
417
-
418
- # Estimate each variable by licensee in 2022 by day, month, week, and year-end:
419
- # - total sales
420
- # - number of transactions (Poisson model)
421
- # - average transaction amount
422
- # - Number of failures (Poisson model)
423
-
424
-
425
- #------------------------------------------------------------------------------
426
- # Save the data and statistics, making the data available for future use.
427
- #------------------------------------------------------------------------------
428
-
429
- # # Save all the statistics and forecasts to local data archive.
430
- # ccrs.save(lab_results, 'D:\\data\\washington\\stats\\daily_sales.xlsx')
431
-
432
- # # Upload all the statistics and forecasts to make available through the API.
433
- # # through the Cannlytics API and Cannlytics Website.
434
- # ccrs.upload(lab_results, 'lab_results', id_field='lab_result_id')
435
-
436
- # # Get all data and statistics from the API!
437
- # base = 'http://127.0.0.1:8000/api'
438
- # ccrs.get('lab_results', limit=100, base=base)
439
-
440
-
441
- #-----------------------------------------------------------------------
442
- # Read lab results data.
443
- #-----------------------------------------------------------------------
444
-
445
- # 1. Read Leaf lab results.
446
- # 2. Sort the data, removing null observations.
447
- # 3. Define a lab ID for each observation and remove attested lab results.
448
-
449
- #-----------------------------------------------------------------------
450
- # Augment lab result data with inventory data.
451
- #-----------------------------------------------------------------------
452
-
453
-
454
- #-----------------------------------------------------------------------
455
- # Augment lab result data with inventory type data.
456
- #-----------------------------------------------------------------------
457
-
458
-
459
- #-----------------------------------------------------------------------
460
- # Augment lab result data with strain data.
461
- #-----------------------------------------------------------------------
462
-
463
-
464
- #-----------------------------------------------------------------------
465
- # Augment lab result data with GIS data.
466
- #-----------------------------------------------------------------------
467
-
468
-
469
- #-----------------------------------------------------------------------
470
- # Augment lab result data with the labs' licensee data.
471
- #-----------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
algorithms/get_results_washington_leaf.py DELETED
@@ -1,490 +0,0 @@
1
- """
2
- Cannabis Tests | Get Washington Test Result Data
3
- Copyright (c) 2022 Cannlytics
4
-
5
- Authors:
6
- Keegan Skeate <https://github.com/keeganskeate>
7
- Created: 1/11/2022
8
- Updated: 9/16/2022
9
- License: CC-BY 4.0 <https://huggingface.co/datasets/cannlytics/cannabis_tests/blob/main/LICENSE>
10
-
11
- Description: This script combines relevant fields from the licensees, inventories,
12
- inventory types, and strains datasets with the lab results data. Lab results are
13
- augmented with licensees, inventories, inventory types, and strains data.
14
-
15
- Data sources:
16
-
17
- - WA State Traceability Data January 2018 - November 2021
18
- https://lcb.app.box.com/s/e89t59s0yb558tjoncjsid710oirqbgd?page=1
19
- https://lcb.app.box.com/s/e89t59s0yb558tjoncjsid710oirqbgd?page=2
20
-
21
- Data Guide:
22
-
23
- - Washington State Leaf Data Systems Guide
24
- https://lcb.wa.gov/sites/default/files/publications/Marijuana/traceability/WALeafDataSystems_UserManual_v1.37.5_AddendumC_LicenseeUser.pdf
25
-
26
- Data available at:
27
-
28
- - https://cannlytics.com/data/market/augmented-washington-state-lab-results
29
- - https://cannlytics.com/data/market/augmented-washington-state-licensees
30
-
31
- """
32
- # Standard imports.
33
- import gc
34
- import json
35
-
36
- # External imports.
37
- import pandas as pd
38
-
39
- # Internal imports.
40
- from utils import get_number_of_lines
41
-
42
- #------------------------------------------------------------------------------
43
- # Read lab results data.
44
- #------------------------------------------------------------------------------
45
-
46
- def read_lab_results(
47
- columns=None,
48
- fields=None,
49
- date_columns=None,
50
- nrows=None,
51
- data_dir='../.datasets',
52
- ):
53
- """
54
- 1. Read Leaf lab results.
55
- 2. Sort the data, removing null observations.
56
- 3. Define a lab ID for each observation and remove attested lab results.
57
- """
58
- shards = []
59
- lab_datasets = ['LabResults_0', 'LabResults_1', 'LabResults_2']
60
- for dataset in lab_datasets:
61
- lab_data = pd.read_csv(
62
- f'{data_dir}/{dataset}.csv',
63
- sep='\t',
64
- encoding='utf-16',
65
- usecols=columns,
66
- dtype=fields,
67
- parse_dates=date_columns,
68
- nrows=nrows,
69
- )
70
- shards.append(lab_data)
71
- del lab_data
72
- gc.collect()
73
- data = pd.concat(shards)
74
- del shards
75
- gc.collect()
76
- data.dropna(subset=['global_id'], inplace=True)
77
- # data.set_index('global_id', inplace=True)
78
- data.sort_index(inplace=True)
79
- data['lab_id'] = data['global_id'].map(lambda x: x[x.find('WAL'):x.find('.')])
80
- data = data.loc[data.lab_id != '']
81
- return data
82
-
83
-
84
- #------------------------------------------------------------------------------
85
- # Combine lab result data with inventory data.
86
- #------------------------------------------------------------------------------
87
-
88
- # Define necessary lab result fields.
89
- lab_result_fields = {
90
- 'global_id' : 'string',
91
- 'global_for_inventory_id': 'string'
92
- }
93
-
94
- # Read lab result fields necessary to connect with inventory data.
95
- lab_results = read_lab_results(
96
- columns=list(lab_result_fields.keys()),
97
- fields=lab_result_fields,
98
- )
99
-
100
- # Save initial enhanced lab results.
101
- lab_results.to_csv('../.datasets/augmented_lab_results.csv')
102
-
103
- # Define inventory fields.
104
- inventory_fields = {
105
- 'global_id' : 'string',
106
- 'inventory_type_id': 'string',
107
- 'strain_id': 'string',
108
- }
109
- inventory_columns = list(inventory_fields.keys())
110
-
111
- # Define chunking parameters.
112
- # inventory_type_rows = get_number_of_lines('../.datasets/Inventories_0.csv')
113
- inventory_row_count = 129_920_072
114
- chunk_size = 30_000_000
115
- read_rows = 0
116
- skiprows = None
117
- datatypes = {
118
- 'global_id' : 'string',
119
- 'global_for_inventory_id': 'string',
120
- 'lab_id': 'string',
121
- 'inventory_type_id': 'string',
122
- 'strain_id': 'string',
123
- }
124
-
125
- # Read in a chunk at a time, match with lab results, and save the data.
126
- while read_rows < inventory_row_count:
127
-
128
- # Define the chunk size.
129
- if read_rows:
130
- skiprows = [i for i in range(1, read_rows)]
131
-
132
- # 1. Open enhanced lab results.
133
- lab_results = pd.read_csv(
134
- '../.datasets/lab_results_with_ids.csv',
135
- # index_col='global_id',
136
- dtype=datatypes
137
- )
138
-
139
- # 2. Read chunk of inventories.
140
- inventories = pd.read_csv(
141
- '../.datasets/Inventories_0.csv',
142
- sep='\t',
143
- encoding='utf-16',
144
- usecols=inventory_columns,
145
- dtype=inventory_fields,
146
- skiprows=skiprows,
147
- nrows=chunk_size,
148
- )
149
-
150
- # 3. Merge inventories with enhanced lab results.
151
- inventories.rename(columns={'global_id': 'inventory_id'}, inplace=True)
152
- lab_results = pd.merge(
153
- left=lab_results,
154
- right=inventories,
155
- how='left',
156
- left_on='global_for_inventory_id',
157
- right_on='inventory_id',
158
- )
159
-
160
- # Remove overlapping columns
161
- try:
162
- new_entries = lab_results[['inventory_type_id_y', 'strain_id_x']]
163
- lab_results = lab_results.combine_first(new_entries)
164
- lab_results.rename(columns={
165
- 'inventory_type_id_x': 'inventory_type_id',
166
- 'strain_id_x': 'strain_id',
167
- }, inplace=True)
168
- except KeyError:
169
- pass
170
- extra_columns = ['inventory_id', 'Unnamed: 0', 'inventory_type_id_y',
171
- 'strain_id_y']
172
- lab_results.drop(extra_columns, axis=1, inplace=True, errors='ignore')
173
-
174
- # 4. Save lab results enhanced with IDs.
175
- lab_results.to_csv('../.datasets/lab_results_with_ids.csv')
176
- read_rows += chunk_size
177
- print('Read:', read_rows)
178
-
179
- del new_entries
180
- del inventories
181
- gc.collect()
182
-
183
-
184
- #------------------------------------------------------------------------------
185
- # Combine lab result data with inventory type data.
186
- #------------------------------------------------------------------------------
187
-
188
- results_with_ids = pd.read_csv('../.datasets/lab_results_with_ids.csv')
189
-
190
- # Uncomment if you do not already have inventory_type_names.csv:
191
-
192
- # Get only the inventory names from the inventory types data.
193
- # from augment_inventory_types import augment_inventory_types
194
- # augment_inventory_types()
195
-
196
- # Get only the results with
197
- results_with_ids = results_with_ids[~results_with_ids['inventory_type_id'].isna()]
198
-
199
- # Read in inventory type names.
200
- inventory_type_names = pd.read_csv(
201
- '../.datasets/inventory_type_names.csv',
202
- # index_col='global_id',
203
- dtype={
204
- 'global_id' : 'string',
205
- 'inventory_name': 'string',
206
- }
207
- )
208
-
209
- # Merge enhanced lab results with inventory type names.
210
- results_with_ids = pd.merge(
211
- left=results_with_ids,
212
- right=inventory_type_names,
213
- how='left',
214
- left_on='inventory_type_id',
215
- right_on='global_id',
216
- )
217
- results_with_ids.rename(columns={'global_id_x': 'global_id'}, inplace=True)
218
- results_with_ids.drop(['global_id_y'], axis=1, inplace=True, errors='ignore')
219
-
220
- # Save the lab results enhanced with inventory names.
221
- results_with_ids.to_csv('../.datasets/lab_results_with_inventory_names.csv')
222
-
223
-
224
- #------------------------------------------------------------------------------
225
- # Combine lab result data with strain data.
226
- #------------------------------------------------------------------------------
227
-
228
- # Define strain fields.
229
- strain_fields = {
230
- 'global_id': 'string',
231
- 'name': 'string',
232
- }
233
- strain_columns = list(strain_fields.keys())
234
-
235
- # Read in strain data.
236
- strains = pd.read_csv(
237
- '../.datasets/Strains_0.csv',
238
- sep='\t',
239
- encoding='utf-16',
240
- dtype=strain_fields,
241
- usecols=strain_columns,
242
- )
243
-
244
- # Merge enhanced lab results with strain data.
245
- strains.rename(columns={
246
- 'global_id': 'strain_id',
247
- 'name': 'strain_name',
248
- }, inplace=True)
249
- results_with_ids = pd.merge(
250
- left=results_with_ids,
251
- right=strains,
252
- how='left',
253
- left_on='strain_id',
254
- right_on='strain_id',
255
- )
256
- results_with_ids.rename(columns={'global_id_x': 'global_id'}, inplace=True)
257
- results_with_ids.drop(['global_id_y'], axis=1, inplace=True, errors='ignore')
258
-
259
- # Save the extra lab results fields.
260
- results_with_ids.to_csv('../.datasets/lab_results_with_strain_names.csv')
261
-
262
- #------------------------------------------------------------------------------
263
- # Combine lab result data with geocoded licensee data.
264
- #------------------------------------------------------------------------------
265
-
266
- # Add code variable to lab results with IDs.
267
- results_with_ids['code'] = results_with_ids['global_for_inventory_id'].map(
268
- lambda x: x[x.find('WA'):x.find('.')]
269
- ).str.replace('WA', '')
270
-
271
- # Specify the licensee fields.
272
- licensee_fields = {
273
- 'global_id' : 'string',
274
- 'code': 'string',
275
- 'name': 'string',
276
- 'type': 'string',
277
- 'address1': 'string',
278
- 'address2': 'string',
279
- 'city': 'string',
280
- 'state_code': 'string',
281
- 'postal_code': 'string',
282
- }
283
- licensee_date_fields = [
284
- 'created_at', # No records if issued before 2018-02-21.
285
- ]
286
- licensee_columns = list(licensee_fields.keys()) + licensee_date_fields
287
-
288
- # # Read in the licensee data.
289
- licensees = pd.read_csv(
290
- # '../.datasets/Licensees_0.csv',
291
- '../.datasets/geocoded_licensee_data.csv',
292
- # sep='\t',
293
- # encoding='utf-16',
294
- usecols=licensee_columns,
295
- dtype=licensee_fields,
296
- parse_dates=licensee_date_fields,
297
- )
298
-
299
- # Format the licensees data.
300
- licensees.rename(columns={
301
- 'global_id': 'mme_id',
302
- 'created_at': 'license_created_at',
303
- 'type': 'license_type',
304
- }, inplace=True)
305
-
306
- # Combine the data sets.
307
- results_with_ids = pd.merge(
308
- left=results_with_ids,
309
- right=licensees,
310
- how='left',
311
- left_on='code',
312
- right_on='code'
313
- )
314
- results_with_ids.rename(columns={'global_id_x': 'global_id'}, inplace=True)
315
- results_with_ids.drop(['global_id_y'], axis=1, inplace=True, errors='ignore')
316
-
317
- # Save lab results enhanced with additional fields.
318
- results_with_ids.to_csv('../.datasets/lab_results_with_licensee_data.csv')
319
-
320
-
321
- #------------------------------------------------------------------------------
322
- # TODO: Combine lab result data with the labs' licensee data.
323
- #------------------------------------------------------------------------------
324
-
325
- # Read enhanced lab results.
326
- results_with_ids = pd.read_csv('../.datasets/lab_results_with_licensee_data.csv')
327
-
328
- # TODO: Combine each lab's licensee data.
329
- # lab_name
330
- # lab_address1
331
- # lab_address2
332
- # lab_ciy
333
- # lab_postal_code
334
- # lab_phone
335
- # lab_certificate_number
336
- # lab_global_id
337
- # lab_code
338
- # lab_created_at
339
-
340
-
341
- # TODO: Save the data enhanced with the lab's licensee data.
342
-
343
- #------------------------------------------------------------------------------
344
- # Combine lab result data with enhanced lab results data.
345
- #------------------------------------------------------------------------------
346
-
347
- # Read in results with IDs.
348
- results_with_ids = pd.read_csv(
349
- '../.datasets/lab_results_with_licensee_data.csv',
350
- dtype = {
351
- 'global_id': 'string',
352
- 'global_for_inventory_id': 'string',
353
- 'lab_result_id': 'string',
354
- 'inventory_type_id': 'string',
355
- 'lab_id': 'string',
356
- 'strain_id': 'string',
357
- 'inventory_name': 'string',
358
- 'strain_name': 'string',
359
- 'code': 'string',
360
- 'mme_id': 'string',
361
- 'license_created_at': 'string',
362
- 'name': 'string',
363
- 'address1': 'string',
364
- 'address2': 'string',
365
- 'city': 'string',
366
- 'state_code': 'string',
367
- 'postal_code': 'string',
368
- 'license_type': 'string',
369
- # TODO: Re-run with latitude and longitude
370
- 'latitude': 'float',
371
- 'longitude': 'float',
372
- },
373
- )
374
-
375
- # Read all lab results fields with any valuable data.
376
- lab_result_fields = {
377
- 'global_id' : 'string',
378
- 'intermediate_type' : 'category',
379
- 'status' : 'category',
380
- 'cannabinoid_status' : 'category',
381
- 'cannabinoid_cbc_percent' : 'float16',
382
- 'cannabinoid_cbc_mg_g' : 'float16',
383
- 'cannabinoid_cbd_percent' : 'float16',
384
- 'cannabinoid_cbd_mg_g' : 'float16',
385
- 'cannabinoid_cbda_percent' : 'float16',
386
- 'cannabinoid_cbda_mg_g' : 'float16',
387
- 'cannabinoid_cbdv_percent' : 'float16',
388
- 'cannabinoid_cbg_percent' : 'float16',
389
- 'cannabinoid_cbg_mg_g' : 'float16',
390
- 'cannabinoid_cbga_percent' : 'float16',
391
- 'cannabinoid_cbga_mg_g' : 'float16',
392
- 'cannabinoid_cbn_percent' : 'float16',
393
- 'cannabinoid_cbn_mg_g' : 'float16',
394
- 'cannabinoid_d8_thc_percent' : 'float16',
395
- 'cannabinoid_d8_thc_mg_g' : 'float16',
396
- 'cannabinoid_d9_thca_percent': 'float16',
397
- 'cannabinoid_d9_thca_mg_g' : 'float16',
398
- 'cannabinoid_d9_thc_percent' : 'float16',
399
- 'cannabinoid_d9_thc_mg_g' : 'float16',
400
- 'cannabinoid_thcv_percent' : 'float16',
401
- 'cannabinoid_thcv_mg_g' : 'float16',
402
- 'solvent_status' : 'category',
403
- 'solvent_acetone_ppm' : 'float16',
404
- 'solvent_benzene_ppm' : 'float16',
405
- 'solvent_butanes_ppm' : 'float16',
406
- 'solvent_chloroform_ppm' : 'float16',
407
- 'solvent_cyclohexane_ppm' : 'float16',
408
- 'solvent_dichloromethane_ppm' : 'float16',
409
- 'solvent_ethyl_acetate_ppm' : 'float16',
410
- 'solvent_heptane_ppm' : 'float16',
411
- 'solvent_hexanes_ppm' : 'float16',
412
- 'solvent_isopropanol_ppm' : 'float16',
413
- 'solvent_methanol_ppm' : 'float16',
414
- 'solvent_pentanes_ppm' : 'float16',
415
- 'solvent_propane_ppm' : 'float16',
416
- 'solvent_toluene_ppm' : 'float16',
417
- 'solvent_xylene_ppm' : 'float16',
418
- 'foreign_matter' : 'bool',
419
- 'foreign_matter_stems': 'float16',
420
- 'foreign_matter_seeds': 'float16',
421
- 'microbial_status' : 'category',
422
- 'microbial_bile_tolerant_cfu_g' : 'float16',
423
- 'microbial_pathogenic_e_coli_cfu_g' : 'float16',
424
- 'microbial_salmonella_cfu_g' : 'float16',
425
- 'moisture_content_percent' : 'float16',
426
- 'moisture_content_water_activity_rate' : 'float16',
427
- 'mycotoxin_status' : 'category',
428
- 'mycotoxin_aflatoxins_ppb' : 'float16',
429
- 'mycotoxin_ochratoxin_ppb' : 'float16',
430
- 'thc_percent' : 'float16',
431
- 'notes' : 'float32',
432
- 'testing_status' : 'category',
433
- 'type' : 'category',
434
- 'external_id' : 'string',
435
- }
436
- lab_result_date_columns = ['created_at', 'updated_at', 'received_at',]
437
- lab_result_columns = list(lab_result_fields.keys()) + lab_result_date_columns
438
- complete_lab_results = read_lab_results(
439
- columns=lab_result_columns,
440
- fields=lab_result_fields,
441
- date_columns=None,
442
- )
443
-
444
- # Merge lab results with the complete lab results data.
445
- complete_lab_results.rename(columns={
446
- 'global_id': 'lab_result_id',
447
- 'type': 'sample_type',
448
- }, inplace=True)
449
- results_with_ids = pd.merge(
450
- left=results_with_ids,
451
- right=complete_lab_results,
452
- how='left',
453
- left_on='global_id',
454
- right_on='lab_result_id',
455
- )
456
- results_with_ids.rename(columns={'lab_id_x': 'lab_id'}, inplace=True)
457
- results_with_ids.drop([
458
- 'Unnamed: 0',
459
- 'Unnamed: 0.1',
460
- 'global_id',
461
- 'lab_id_y',
462
- ], axis=1, inplace=True, errors='ignore')
463
-
464
- # TODO: Fill missing cannabinoid percent or mg/g.
465
-
466
- # FIXME: Are missing values posing a problem?
467
- # Calculate total cannabinoids.
468
- cannabinoids_wa = [
469
- 'cannabinoid_d9_thca_percent',
470
- 'cannabinoid_d9_thc_percent',
471
- 'cannabinoid_d8_thc_percent',
472
- 'cannabinoid_thcv_percent',
473
- 'cannabinoid_cbd_percent',
474
- 'cannabinoid_cbda_percent',
475
- 'cannabinoid_cbdv_percent',
476
- 'cannabinoid_cbg_percent',
477
- 'cannabinoid_cbga_percent',
478
- 'cannabinoid_cbc_percent',
479
- 'cannabinoid_cbn_percent',
480
- ]
481
- results_with_ids['total_cannabinoids'] = results_with_ids[cannabinoids_wa].sum(axis=1)
482
-
483
- # Save the complete lab results data to csv, xlsx, and json.
484
- results_with_ids.to_excel('../.datasets/lab_results_complete.xlsx')
485
- results_with_ids.to_csv('../.datasets/lab_results_complete.csv')
486
- # FIXME: NAType is not JSON serializable
487
- # with open('../.datasets/lab_results_complete.json', 'w') as outfile:
488
- # data = results_with_ids.where(pd.notnull(results_with_ids), '')
489
- # data = json.loads(json.dumps(list(data.T.to_dict().values())))
490
- # json.dump(data, outfile)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
algorithms/main.py DELETED
@@ -1,370 +0,0 @@
1
- """
2
- Get Cannabis Tests Data
3
- Copyright (c) 2022 Cannlytics
4
-
5
- Authors:
6
- Keegan Skeate <https://github.com/keeganskeate>
7
- Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
- Created: 8/23/2022
9
- Updated: 9/15/2022
10
- License: MIT License <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
11
-
12
- Description:
13
-
14
- Periodically curate publicly published lab results by:
15
-
16
- 1. Finding products and their COA URLS on the web.
17
- 2. Downloading COA PDFs from their URLs.
18
- 3. Using CoADoc to parse the COA PDFs (with OCR).
19
- 4. Archiving the COA data in Firebase Firestore and Storage.
20
-
21
- Data Sources:
22
-
23
- - Raw Garden Lab Results
24
- URL: <https://rawgarden.farm/lab-results/>
25
-
26
- """
27
- # # Standard imports.
28
- # import base64
29
- # from datetime import datetime, timedelta
30
- # import os
31
- # from time import sleep
32
- # from typing import Any, List, Optional, Tuple
33
-
34
- # # External imports.
35
- # from bs4 import BeautifulSoup
36
- # from firebase_admin import firestore, initialize_app
37
- # import pandas as pd
38
- # import requests
39
-
40
- # # Internal imports.
41
- # from cannlytics.data.coas import CoADoc
42
- # from cannlytics.firebase import (
43
- # get_document,
44
- # initialize_firebase,
45
- # update_documents,
46
- # upload_file,
47
- # )
48
- # from cannlytics.utils import kebab_case, rmerge
49
- # from cannlytics.utils.constants import DEFAULT_HEADERS
50
-
51
- # # Specify where your data lives.
52
- # BUCKET_NAME = 'cannlytics-company.appspot.com'
53
- # COLLECTION = 'public/data/lab_results'
54
- # STORAGE_REF = 'data/lab_results/raw_garden'
55
-
56
- # # Create temporary directories.
57
- # DATA_DIR = '/tmp'
58
- # COA_DATA_DIR = f'{DATA_DIR}/lab_results/raw_garden'
59
- # COA_PDF_DIR = f'{COA_DATA_DIR}/pdfs'
60
- # TEMP_PATH = f'{COA_DATA_DIR}/tmp'
61
- # if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR)
62
- # if not os.path.exists(COA_DATA_DIR): os.makedirs(COA_DATA_DIR)
63
- # if not os.path.exists(COA_PDF_DIR): os.makedirs(COA_PDF_DIR)
64
- # if not os.path.exists(TEMP_PATH): os.makedirs(TEMP_PATH)
65
-
66
- # # Define constants.
67
- # BASE = 'https://rawgarden.farm/lab-results/'
68
-
69
-
70
- # def get_rawgarden_products(
71
- # start: Optional[Any] = None,
72
- # end: Optional[Any] = None,
73
- # ) -> pd.DataFrame:
74
- # """Get Raw Garden's lab results page. Then get all of the product
75
- # categories. Finally, get all product data, including: `coa_pdf`,
76
- # `lab_results_url`, `product_name`, `product_subtype`, `date_retail`.
77
- # Args:
78
- # start (str or datetime): A point in time to begin restricting
79
- # the product list by `date_retail` (optional).
80
- # end (str or datetime): A point in time to end restricting
81
- # the product list by `date_retail` (optional).
82
- # Returns:
83
- # (DataFrame): Returns a DataFrame of product data.
84
- # """
85
-
86
- # # Get the website.
87
- # response = requests.get(BASE, headers=DEFAULT_HEADERS)
88
- # soup = BeautifulSoup(response.content, 'html.parser')
89
-
90
- # # Get all product data listed on the website.
91
- # observations = []
92
- # categories = soup.find_all('div', attrs={'class': 'category-content'})
93
- # for category in categories:
94
- # subtype = category.find('h3').text
95
- # dates = category.findAll('h5', attrs={'class': 'result-date'})
96
- # names = category.findAll('h5')
97
- # names = [div for div in names if div.get('class') is None]
98
- # links = category.findAll('a')
99
- # for i, link in enumerate(links):
100
- # try:
101
- # href = link.get('href')
102
- # date = pd.to_datetime(dates[i].text)
103
- # name = names[i].text
104
- # if href.endswith('.pdf'):
105
- # observations.append({
106
- # 'coa_pdf': href.split('/')[-1],
107
- # 'lab_results_url': href,
108
- # 'product_name': name,
109
- # 'product_subtype': subtype,
110
- # 'date_retail': date,
111
- # })
112
- # except AttributeError:
113
- # continue
114
-
115
- # # Restrict the observations to the desired time frame.
116
- # results = pd.DataFrame(observations)
117
- # dates = results['date_retail']
118
- # if start:
119
- # if isinstance(start, str):
120
- # latest = pd.to_datetime(start)
121
- # else:
122
- # latest = start
123
- # results = results.loc[dates >= latest]
124
- # if end:
125
- # if isinstance(end, str):
126
- # earliest = pd.to_datetime(end)
127
- # else:
128
- # earliest = end
129
- # results = results.loc[dates <= earliest]
130
- # results['date_retail'] = dates.apply(lambda x: x.isoformat()[:19])
131
- # return results
132
-
133
-
134
- # def download_rawgarden_coas(
135
- # items: pd.DataFrame,
136
- # pause: Optional[float] = 0.24,
137
- # verbose: Optional[bool] = True,
138
- # ) -> None:
139
- # """Download Raw Garden product COAs to `product_subtype` folders.
140
- # Args:
141
- # items: (DataFrame): A DataFrame of products with `product_subtype`
142
- # and `lab_results_url` to download.
143
- # pause (float): A pause to respect the server serving the PDFs,
144
- # `0.24` seconds by default (optional).
145
- # verbose (bool): Whether or not to print status, `True` by
146
- # default (optional).
147
- # """
148
- # if verbose:
149
- # total = len(items)
150
- # print('Downloading %i PDFs, ETA > %.2fs' % (total, total * pause))
151
-
152
- # # Create a folder of each of the subtypes.
153
- # subtypes = list(items['product_subtype'].unique())
154
- # for subtype in subtypes:
155
- # folder = kebab_case(subtype)
156
- # subtype_folder = f'{COA_PDF_DIR}/{folder}'
157
- # if not os.path.exists(subtype_folder):
158
- # os.makedirs(subtype_folder)
159
-
160
- # # Download each COA PDF from its URL to a `product_subtype` folder.
161
- # for i, row in enumerate(items.iterrows()):
162
- # item = row[1]
163
- # url = item['lab_results_url']
164
- # subtype = item['product_subtype']
165
- # filename = url.split('/')[-1]
166
- # folder = kebab_case(subtype)
167
- # outfile = os.path.join(COA_PDF_DIR, folder, filename)
168
- # response = requests.get(url, headers=DEFAULT_HEADERS)
169
- # with open(outfile, 'wb') as pdf:
170
- # pdf.write(response.content)
171
- # if verbose:
172
- # message = 'Downloaded {}/{} | {}/{}'
173
- # message = message.format(str(i + 1), str(total), folder, filename)
174
- # print(message)
175
- # sleep(pause)
176
-
177
-
178
- # def parse_rawgarden_coas(
179
- # directory: str,
180
- # filenames: Optional[list] = None,
181
- # temp_path: Optional[str] = '/tmp',
182
- # verbose: Optional[bool] = True,
183
- # **kwargs,
184
- # ) -> Tuple[list]:
185
- # """Parse Raw Garden lab results with CoADoc.
186
- # Args:
187
- # directory (str): The directory of files to parse.
188
- # filenames (list): A list of files to parse (optional).
189
- # temp_path (str): A temporary directory to use for any OCR (optional).
190
- # verbose (bool): Whether or not to print status, `True` by
191
- # default (optional).
192
- # Returns:
193
- # (tuple): Returns both a list of parsed and unidentified COA data.
194
- # """
195
- # parser = CoADoc()
196
- # parsed, unidentified = [], []
197
- # started = False
198
- # for path, _, files in os.walk(directory):
199
- # if verbose and not started:
200
- # started = True
201
- # if filenames:
202
- # total = len(filenames)
203
- # else:
204
- # total = len(files)
205
- # print('Parsing %i COAs, ETA > %.2fm' % (total, total * 25 / 60))
206
- # for filename in files:
207
- # if not filename.endswith('.pdf'):
208
- # continue
209
- # if filenames is not None:
210
- # if filename not in filenames:
211
- # continue
212
- # doc = os.path.join(path, filename)
213
- # try:
214
- # # FIXME: Make API request to Cannlytics? Tesseract, etc.
215
- # # are going to be too heavy for a cloud function.
216
- # coa = parser.parse(doc, temp_path=temp_path, **kwargs)
217
- # subtype = path.split('\\')[-1]
218
- # coa[0]['product_subtype'] = subtype
219
- # parsed.extend(coa)
220
- # if verbose:
221
- # print('Parsed:', filename)
222
- # except Exception as e:
223
- # unidentified.append({'coa_pdf': filename})
224
- # if verbose:
225
- # print('Error:', filename)
226
- # print(e)
227
- # pass
228
- # return parsed, unidentified
229
-
230
-
231
- # def upload_lab_results(
232
- # observations: List[dict],
233
- # collection: Optional[str] = None,
234
- # database: Optional[Any] = None,
235
- # update: Optional[bool] = True,
236
- # verbose: Optional[bool] = True,
237
- # ) -> None:
238
- # """Upload lab results to Firestore.
239
- # Args:
240
- # observations (list): A list of lab results to upload.
241
- # collection (str): The Firestore collection where lab results live,
242
- # `'public/data/lab_results'` by default (optional).
243
- # database (Client): A Firestore database instance (optional).
244
- # update (bool): Whether or not to update existing entries, `True`
245
- # by default (optional).
246
- # verbose (bool): Whether or not to print status, `True` by
247
- # default (optional).
248
- # """
249
- # if collection is None:
250
- # collection = COLLECTION
251
- # if database is None:
252
- # database = initialize_firebase()
253
- # refs, updates = [], []
254
- # for obs in observations:
255
- # sample_id = obs['sample_id']
256
- # ref = f'{collection}/{sample_id}'
257
- # if not update:
258
- # doc = get_document(ref)
259
- # if doc is not None:
260
- # continue
261
- # refs.append(ref)
262
- # updates.append(obs)
263
- # if updates:
264
- # if verbose:
265
- # print('Uploading %i lab results.' % len(refs))
266
- # update_documents(refs, updates, database=database)
267
- # if verbose:
268
- # print('Uploaded %i lab results.' % len(refs))
269
-
270
-
271
- def main(event, context):
272
- """Archive Raw Garden data on a periodic basis.
273
- Triggered from a message on a Cloud Pub/Sub topic.
274
- Args:
275
- event (dict): Event payload.
276
- context (google.cloud.functions.Context): Metadata for the event.
277
- """
278
- raise NotImplementedError
279
-
280
- # # Check that the PubSub message is valid.
281
- # pubsub_message = base64.b64decode(event['data']).decode('utf-8')
282
- # if pubsub_message != 'success':
283
- # return
284
-
285
- # # Get the most recent Raw Garden products.
286
- # DAYS_AGO = 1
287
- # start = datetime.now() - timedelta(days=DAYS_AGO)
288
- # products = get_rawgarden_products(start=start)
289
-
290
- # # Download Raw Garden product COAs to `product_subtype` folders.
291
- # download_rawgarden_coas(products, pause=0.24, verbose=True)
292
-
293
- # # Parse COA PDFs with CoADoc.
294
- # coa_data, unidentified_coas = parse_rawgarden_coas(
295
- # COA_PDF_DIR,
296
- # filenames=products['coa_pdf'].to_list(),
297
- # temp_path=TEMP_PATH,
298
- # verbose=True,
299
- # )
300
-
301
- # # Merge the `products`'s `product_subtype` with the COA data.
302
- # coa_dataframe = rmerge(
303
- # pd.DataFrame(coa_data),
304
- # products,
305
- # on='coa_pdf',
306
- # how='left',
307
- # replace='right',
308
- # )
309
-
310
- # # Optional: Save the COA data to a workbook.
311
- # parser = CoADoc()
312
- # timestamp = datetime.now().isoformat()[:19].replace(':', '-')
313
- # datafile = f'{COA_DATA_DIR}/rawgarden-coa-data-{timestamp}.xlsx'
314
- # parser.save(coa_dataframe, datafile)
315
-
316
- # # Optional: Save the unidentified COA data.
317
- # errors = [x['coa_pdf'] for x in unidentified_coas]
318
- # error_file = f'{COA_DATA_DIR}/rawgarden-unidentified-coas-{timestamp}.xlsx'
319
- # products.loc[products['coa_pdf'].isin(errors)].to_excel(error_file)
320
-
321
- # # Initialize Firebase.
322
- # # FIXME: Ideally use the internal initialization.
323
- # try:
324
- # initialize_app()
325
- # except ValueError:
326
- # pass
327
- # database = firestore.client()
328
-
329
- # # Optional: Upload the lab results to Firestore.
330
- # upload_lab_results(
331
- # coa_dataframe.to_dict(orient='records'),
332
- # database=database,
333
- # update=False,
334
- # verbose=False
335
- # )
336
-
337
- # # Optional: Upload datafiles to Firebase Storage.
338
- # storage_error_file = '/'.join([STORAGE_REF, error_file.split('/')[-1]])
339
- # upload_file(storage_error_file, error_file, bucket_name=BUCKET_NAME)
340
-
341
-
342
- # === Test ===
343
- if __name__ == '__main__':
344
-
345
- from cannlytics.utils import encode_pdf
346
- from cannlytics.utils.constants import DEFAULT_HEADERS
347
- import requests
348
-
349
- # [✓] TEST: Mock the Google Cloud Function scheduled routine.
350
- # event = {'data': base64.b64encode('success'.encode())}
351
- # get_rawgarden_data(event, context={})
352
-
353
- # # [ ] Test: Post a PDF to the Cannlytics API for parsing.
354
- # # FIXME:
355
- # coa_doc_api = 'https://cannlytics.com/api/data/coas'
356
- # folder = 'tests/assets/coas/'
357
- # filename = f'{folder}/210000525-Citrus-Slurm-Diamonds.pdf'
358
- # # files = {'upload_file': open(filename, 'rb')}
359
- # # values = {'lims': 'Cannalysis'}
360
- # # response = requests.post(base, files=files, data=values)
361
- # with open(filename, 'rb') as f:
362
- # response = requests.post(
363
- # coa_doc_api,
364
- # headers=DEFAULT_HEADERS,
365
- # files={'file': f}
366
- # )
367
- # print(response.status_code)
368
-
369
- # # Optional: Also allow for encoding of PDFs.
370
- # encoded_pdf = encode_pdf(filename)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
analysis/analyze_results.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from typing import List
4
+ import pandas as pd
5
+
6
+
7
+ def calc_results_stats(
8
+ results: pd.DataFrame,
9
+ cannabinoid_keys: List[str] = None,
10
+ terpene_keys: List[str] = None,
11
+ cbd_key: str = 'cbd',
12
+ cbda_key: str = 'cbda',
13
+ thc_key: str = 'delta_9_thc',
14
+ thca_key: str = 'thca',
15
+ decarb: float = 0.877,
16
+ ) -> pd.DataFrame:
17
+ """Calculate statistics for the results."""
18
+ # Calculate total cannabinoids.
19
+ if cannabinoid_keys is not None:
20
+ results['total_cannabinoids'] = results[cannabinoid_keys].sum(axis=1)
21
+ results['total_thc'] = results[thc_key] + decarb * results[thca_key]
22
+ results['total_cbd'] = results[cbd_key] + decarb * results[cbda_key]
23
+ results['thc_to_cbd_ratio'] = results['total_thc'] / results['total_cbd']
24
+
25
+ # Calculate total terpenes.
26
+ if terpene_keys is not None:
27
+ results['total_terpenes'] = results[terpene_keys].sum(axis=1)
28
+ results['beta_pinene_to_d_limonene_ratio'] = (
29
+ results['beta_pinene'] / results['d_limonene']
30
+ )
31
+ # TODO: Add other terpene ratios.
32
+ # TODO: Identify mono and sesuiterpenes.
33
+ # results['monoterpene_to_sesquiterpene_ratio'] = (
34
+ # results['total_monoterpenes'] / results['total_sesquiterpenes']
35
+ # )
36
+
37
+ return results
38
+
39
+ def calc_aggregate_results_stats(
40
+ results: pd.DataFrame,
41
+ cannabinoid_keys: List[str] = None,
42
+ terpene_keys: List[str] = None,
43
+ ) -> pd.DataFrame:
44
+ """Calculate aggregate statistics for the results."""
45
+
46
+ def calculate_statistics(group: pd.DataFrame, name: str) -> pd.DataFrame:
47
+ """Calculate mean, median, std, and percentiles for a given group."""
48
+ stats = group.describe(percentiles=[.25, .50, .75]).T
49
+ stats['period'] = name
50
+ stats = stats[['period', 'mean', '50%', 'std', '25%', '75%']].rename(
51
+ columns={'50%': 'median', '25%': 'percentile_25', '75%': 'percentile_75'})
52
+ return stats
53
+
54
+ # Create the timeseries.
55
+ results['date_tested'] = pd.to_datetime(results['date_tested'])
56
+ results.set_index('date_tested', inplace=True)
57
+ periods = {
58
+ 'daily': results.resample('D'),
59
+ 'weekly': results.resample('W'),
60
+ 'monthly': results.resample('M'),
61
+ 'quarterly': results.resample('Q'),
62
+ 'yearly': results.resample('Y')
63
+ }
64
+
65
+ # Calculate statistics for each period.
66
+ all_stats = []
67
+ for period_name, period_group in periods.items():
68
+ if cannabinoid_keys:
69
+ cannabinoid_stats = calculate_statistics(period_group[cannabinoid_keys], period_name)
70
+ cannabinoid_stats['type'] = 'cannabinoid'
71
+ all_stats.append(cannabinoid_stats)
72
+ if terpene_keys:
73
+ terpene_stats = calculate_statistics(period_group[terpene_keys], period_name)
74
+ terpene_stats['type'] = 'terpene'
75
+ all_stats.append(terpene_stats)
76
+
77
+ # Return the statistics.
78
+ stats = pd.concat(all_stats).reset_index().rename(columns={'index': 'compound'})
79
+ return stats
analysis/analyze_results_ca.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Results | California
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 12/10/2023
7
+ Updated: 8/15/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """
10
+ # Standard imports:
11
+ from datetime import datetime
12
+ import json
13
+ import os
14
+ from typing import List, Optional
15
+ import warnings
16
+
17
+ # External imports:
18
+ from cannlytics.data.cache import Bogart
19
+ from cannlytics.data.coas import standardize_results
20
+ from cannlytics.data.coas.parsing import (
21
+ find_unique_analytes,
22
+ get_coa_files,
23
+ parse_coa_pdfs,
24
+ )
25
+ from cannlytics.firebase import initialize_firebase
26
+ from cannlytics.compounds import cannabinoids, terpenes
27
+ from dotenv import dotenv_values
28
+ import pandas as pd
29
+
30
+ # Ignore all UserWarnings
31
+ warnings.filterwarnings("ignore", category=UserWarning)
32
+
33
+ # Internal imports:
34
+ # from analyze_results import calc_results_stats, calc_aggregate_results_stats
35
+
36
+
37
+ def analyze_results_ca(
38
+ cache_path: str,
39
+ pdf_dir: str,
40
+ reverse: bool = False,
41
+ ) -> pd.DataFrame:
42
+ """
43
+ Analyze California lab results.
44
+
45
+ Args:
46
+ cache_path (str): The path to the cache file.
47
+ pdf_dir (str): The directory where the PDFs are stored.
48
+ output_dir (str): The directory where the datasets are saved.
49
+ compounds (List[str]): The list of compounds to analyze.
50
+ reverse (bool): Whether to reverse the order of the results.
51
+ save (bool): Whether to save the results to a file.
52
+
53
+ Returns:
54
+ pd.DataFrame: The analyzed results.
55
+ """
56
+ # Initialize cache.
57
+ cache = Bogart(cache_path)
58
+
59
+ # TODO: Remove duplicates in the PDF dir.
60
+
61
+ # Get all of the PDFs.
62
+ pdfs = get_coa_files(pdf_dir)
63
+
64
+ # Sort the PDFs by modified date
65
+ pdfs.sort(key=os.path.getmtime)
66
+
67
+ # Parse the PDFs.
68
+ all_results = parse_coa_pdfs(pdfs, cache=cache, reverse=reverse)
69
+
70
+ return all_results
71
+
72
+ # === Test ===
73
+ if __name__ == '__main__':
74
+
75
+ analyze_results_ca(
76
+ cache_path='D://data/.cache/results-ca.jsonl',
77
+ pdf_dir='D://data/california/results/pdfs',
78
+ reverse=True,
79
+ )
80
+
81
+ # Read the cache.
82
+ results = Bogart('D://data/.cache/results-ca.jsonl').to_df()
83
+ print('Read %i results from cache.' % len(results))
84
+
85
+ # Separate the errors.
86
+ errors = results[~results['error'].isna()]
87
+ results = results[results['error'].isna()]
88
+ print('Number of errors:', len(errors))
89
+ print('Number of valid results:', len(results))
90
+
91
+ # === DEV ===
92
+
93
+ # # Identify all of the unique errors.
94
+ # # TODO: Fix the errors.
95
+ # unique_errors = errors['error'].unique()
96
+ # # print(errors['error'].value_counts())
97
+
98
+ # def find_example_coa_for_errors(errors_df, error_counts):
99
+ # sorted_errors = error_counts.index.tolist()
100
+ # example_coas = []
101
+
102
+ # for error in sorted_errors:
103
+ # example_coa_pdf = errors_df[errors_df['error'] == error].iloc[0]['coa_pdf']
104
+ # example_coas.append({'error': error, 'example_coa_pdf': example_coa_pdf})
105
+
106
+ # return pd.DataFrame(example_coas)
107
+
108
+ # # Get example COAs for each unique error
109
+ # error_counts = errors['error'].value_counts()
110
+ # example_coas = find_example_coa_for_errors(errors, error_counts)
111
+
112
+ # # Display the examples
113
+ # print("Example COAs for each unique error:")
114
+ # print(example_coas)
115
+
116
+ # TODO: Figure out why there are duplicates.
117
+
118
+ # # Group by `coa_pdf` to find duplicates
119
+ # duplicate_groups = results[results.duplicated(subset=['coa_pdf'], keep=False)]
120
+ # grouped = duplicate_groups.groupby('coa_pdf')
121
+ # for coa_pdf, group in grouped:
122
+ # print(f'\nCOA PDF: {coa_pdf}')
123
+ # unique_hashes = group['sample_hash'].unique()
124
+ # if len(unique_hashes) > 1:
125
+ # print(f'- Warning: Different sample_hashes found!')
126
+ # else:
127
+ # print(f'- All records have the same sample_hash.')
128
+
129
+ # # DEV: Identify the same COA parsed multiple ways.
130
+ # multiple_coas = results['coa_pdf'].value_counts()
131
+ # multiple_coas = multiple_coas[multiple_coas > 1]
132
+ # print('Number of samples with Multiple COAs:', len(multiple_coas))
133
+
134
+ # Merge SC Labs results, removing duplicates, unfinished results,
135
+ # and removes Colorado results.
136
+ extra_dir = r'D:\data\california\results\datasets\sclabs'
137
+ datafiles = [os.path.join(extra_dir, x) for x in os.listdir(extra_dir) if 'urls' not in x and 'latest' not in x]
138
+ sclabs = pd.concat([pd.read_excel(x) for x in datafiles])
139
+ sclabs = sclabs.drop_duplicates(subset=['sample_hash'])
140
+ sclabs = sclabs.loc[sclabs['results'] != '[]']
141
+ sclabs = sclabs.loc[(sclabs['lab_state'] != 'CO')]
142
+ print('Number of SC Labs results:', len(sclabs))
143
+
144
+ # Merge the results.
145
+ results = pd.concat([results, sclabs])
146
+
147
+ # Drop duplicates.
148
+ results = results.drop_duplicates(subset=['sample_hash'])
149
+ print('Number of unique results:', len(results))
150
+
151
+ # Read constants for processing.
152
+ # FIXME: This requires the script be run from this directory.
153
+ try:
154
+ script_dir = os.path.dirname(os.path.abspath(__file__))
155
+ except:
156
+ script_dir = os.getcwd()
157
+ processing_config = os.path.join(script_dir, 'processing.json')
158
+ with open(processing_config, 'r') as f:
159
+ data = json.load(f)
160
+ nuisance_analytes = data['nuisance_analytes']
161
+ nuisance_columns = data['nuisance_columns']
162
+
163
+ # Drop all non-standard columns.
164
+ results.drop(columns=nuisance_columns, errors='ignore', inplace=True)
165
+
166
+ # FIXME: Standardize analytes.
167
+ # analytes = find_unique_analytes(results)
168
+ # analytes = list(set(analytes) - set(nuisance_analytes))
169
+ # analytes = sorted(list(analytes))
170
+ # results = standardize_results(results, analytes)
171
+
172
+ # Standardize state.
173
+ state = 'CA'
174
+ results['lab_state'] = results['lab_state'].fillna(state)
175
+ results['producer_state'] = results['producer_state'].fillna(state)
176
+
177
+ # Standardize time.
178
+ results['date'] = pd.to_datetime(results['date_tested'], format='mixed', errors='coerce')
179
+ results['date'] = results['date'].apply(lambda x: pd.Timestamp(x).tz_localize(None) if pd.notnull(x) else x)
180
+ results = results.sort_values('date', na_position='last')
181
+
182
+ # Save the results.
183
+ outfile = 'D://data/cannabis_results/data/ca/ca-results-latest.xlsx'
184
+ outfile_csv = 'D://data/cannabis_results/data/ca/ca-results-latest.csv'
185
+ results.to_excel(outfile, index=False)
186
+ results.to_csv(outfile_csv, index=False)
187
+ print('Saved %i results for %s to Excel:' % (len(results), state), outfile)
188
+ print('Saved %i results for %s to CSV:' % (len(results), state), outfile_csv)
189
+
190
+ # Print out features.
191
+ features = {x: 'string' for x in results.columns}
192
+ print('Number of features:', len(features))
193
+ print('Features:', features)
194
+
195
+
196
+ #-----------------------------------------------------------------------
197
+ # Calculate statistics.
198
+ #-----------------------------------------------------------------------
199
+
200
+ # # Calculate results statistics.
201
+ # results = calc_results_stats(
202
+ # results,
203
+ # cannabinoid_keys=cannabinoid_keys,
204
+ # terpene_keys=terpene_keys,
205
+ # )
206
+
207
+ # # Calculate aggregate statistics.
208
+ # stats = calc_aggregate_results_stats(
209
+ # results,
210
+ # cannabinoid_keys=cannabinoid_keys,
211
+ # terpene_keys=terpene_keys,
212
+ # )
213
+
214
+
215
+ #-----------------------------------------------------------------------
216
+ # Upload COA PDFs to Google Cloud Storage and data to Firestore.
217
+ #-----------------------------------------------------------------------
218
+
219
+ # FIXME: Refactor into re-usable functions.
220
+
221
+ # # Match COA PDFs with the results.
222
+ # pdf_dir = 'D://data/florida/results/pdfs'
223
+ # coa_pdfs = {}
224
+ # for index, result in all_results.iterrows():
225
+
226
+ # # Get the name of the PDF.
227
+ # identifier = result['coa_pdf']
228
+ # if identifier == 'download.pdf':
229
+ # lab_results_url = result['lab_results_url']
230
+ # identifier = lab_results_url.split('=')[-1].split('?')[0]
231
+
232
+ # # Find the matching PDF.
233
+ # for root, _, files in os.walk(pdf_dir):
234
+ # for filename in files:
235
+ # if identifier in filename:
236
+ # pdf_path = os.path.join(root, filename)
237
+ # coa_pdfs[result['sample_hash']] = pdf_path
238
+ # break
239
+
240
+ # # Initialize Firebase.
241
+ # config = dotenv_values('.env')
242
+ # db = initialize_firebase()
243
+ # bucket_name = config['FIREBASE_STORAGE_BUCKET']
244
+ # firebase_api_key = config['FIREBASE_API_KEY']
245
+
246
+ # # Upload datafiles to Google Cloud Storage.
247
+ # # Checks if the file has been uploaded according to the local cache.
248
+ # # FIXME:
249
+ # for datafile in datafiles:
250
+ # filename = os.path.split(datafile)[-1]
251
+ # if filename not in cache.get('datafiles', []):
252
+ # file_ref = f'data/results/florida/datasets/{filename}'
253
+ # # upload_file(
254
+ # # destination_blob_name=file_ref,
255
+ # # source_file_name=datafile,
256
+ # # bucket_name=bucket_name,
257
+ # # )
258
+ # print('Uploaded:', file_ref)
259
+ # # FIXME:
260
+ # # cache.setdefault('datafiles', []).append(filename)
261
+
262
+ # # Upload PDFs to Google Cloud Storage.
263
+ # # Checks if the file has been uploaded according to the local cache.
264
+ # print('Number of unique COA PDFs:', len(coa_pdfs))
265
+ # for sample_hash, pdf_path in coa_pdfs.items():
266
+ # print('Uploading:', pdf_path)
267
+ # pdf_hash = cache.hash_file(pdf_path)
268
+
269
+ # if pdf_hash not in cache.get('pdfs', []):
270
+
271
+ # # Upload the file.
272
+ # file_ref = f'data/results/florida/pdfs/{pdf_hash}.pdf'
273
+ # # upload_file(
274
+ # # destination_blob_name=file_ref,
275
+ # # source_file_name=pdf_path,
276
+ # # bucket_name=bucket_name,
277
+ # # )
278
+
279
+ # # # Get download URL and create a short URL.
280
+ # # download_url, short_url = None, None
281
+ # # try:
282
+ # # download_url = get_file_url(file_ref, bucket_name=bucket_name)
283
+ # # short_url = create_short_url(
284
+ # # api_key=firebase_api_key,
285
+ # # long_url=download_url,
286
+ # # project_name=db.project
287
+ # # )
288
+ # # except Exception as e:
289
+ # # print('Failed to get download URL:', e)
290
+
291
+ # # # Keep track of the file reference and download URLs.
292
+ # # all_results.loc[all_results['sample_hash'] == sample_hash, 'file_ref'] = file_ref
293
+ # # all_results.loc[all_results['sample_hash'] == sample_hash, 'download_url'] = download_url
294
+ # # all_results.loc[all_results['sample_hash'] == sample_hash, 'short_url'] = short_url
295
+
296
+ # # Cache the PDF.
297
+ # # FIXME:
298
+ # # cache.setdefault('pdfs', []).append(pdf_hash)
299
+
300
+ # # Upload the raw data to Firestore.
301
+ # # Checks if the data has been uploaded according to the local cache.
302
+ # refs, updates = [], []
303
+ # collection = 'results'
304
+ # for _, obs in all_results.iterrows():
305
+ # doc_id = obs['sample_hash']
306
+ # if doc_id not in cache.get('results', []):
307
+ # refs.append(f'{collection}/{doc_id}')
308
+ # updates.append(obs.to_dict())
309
+ # # FIXME:
310
+ # # cache.setdefault('results', []).append(doc_id)
311
+ # # if refs:
312
+ # # update_documents(refs, updates, database=db)
313
+ # # print('Uploaded %i results to Firestore.' % len(refs))
314
+
315
+ # # TODO: Save the statistics to Firestore.
316
+
317
+ # # Save the updated cache
318
+ # # with open(cache_file, 'w') as f:
319
+ # # json.dump(cache, f)
320
+ # # print('Saved cache:', cache_file)
analysis/analyze_results_co.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Results | Colorado
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 8/14/2024
7
+ Updated: 8/15/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """
10
+ # Standard imports:
11
+ import json
12
+ import os
13
+ import warnings
14
+
15
+ # External imports:
16
+ from cannlytics.data.coas import standardize_results
17
+ from cannlytics.data.coas.parsing import find_unique_analytes
18
+ import pandas as pd
19
+
20
+ # Ignore all UserWarnings
21
+ warnings.filterwarnings("ignore", category=UserWarning)
22
+
23
+
24
+ def analyze_results_co(
25
+ data_dir: str,
26
+ ) -> pd.DataFrame:
27
+ """Analyze Colorado lab results."""
28
+
29
+ # Merge SC Labs results, removing duplicates and unfinished results.
30
+ datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'urls' not in x and 'latest' not in x]
31
+ results = pd.concat([pd.read_excel(x) for x in datafiles])
32
+ results = results.drop_duplicates(subset=['sample_hash'])
33
+ results = results.loc[results['results'] != '[]']
34
+ results = results.loc[(results['lab_state'] == 'CO')]
35
+ print('Number of CO SC Labs results:', len(results))
36
+
37
+ # Read constants for processing.
38
+ try:
39
+ script_dir = os.path.dirname(os.path.abspath(__file__))
40
+ except:
41
+ script_dir = os.getcwd()
42
+ processing_config = os.path.join(script_dir, 'processing.json')
43
+ with open(processing_config, 'r') as f:
44
+ data = json.load(f)
45
+ nuisance_analytes = data['nuisance_analytes']
46
+ nuisance_columns = data['nuisance_columns']
47
+
48
+ # Drop all non-standard columns.
49
+ results.drop(columns=nuisance_columns, errors='ignore', inplace=True)
50
+
51
+ # FIXME: Standardize analytes.
52
+ # analytes = find_unique_analytes(results)
53
+ # analytes = list(set(analytes) - set(nuisance_analytes))
54
+ # analytes = sorted(list(analytes))
55
+ # results = standardize_results(results, analytes)
56
+
57
+ # Standardize state.
58
+ state = 'CO'
59
+ results['lab_state'] = results['lab_state'].fillna(state)
60
+ # results['producer_state'] = results['producer_state'].fillna(state)
61
+
62
+ # Standardize time.
63
+ results['date'] = pd.to_datetime(results['date_tested'], format='mixed', errors='coerce')
64
+ results['date'] = results['date'].apply(lambda x: pd.Timestamp(x).tz_localize(None) if pd.notnull(x) else x)
65
+ results = results.sort_values('date', na_position='last')
66
+
67
+ # Save the results.
68
+ outfile = 'D://data/cannabis_results/data/co/co-results-latest.xlsx'
69
+ outfile_csv = 'D://data/cannabis_results/data/co/co-results-latest.csv'
70
+ results.to_excel(outfile, index=False)
71
+ results.to_csv(outfile_csv, index=False)
72
+ print('Saved %i results for %s to Excel:' % (len(results), state), outfile)
73
+ print('Saved %i results for %s to CSV:' % (len(results), state), outfile_csv)
74
+
75
+ # Print out features.
76
+ features = {x: 'string' for x in results.columns}
77
+ print('Number of features:', len(features))
78
+ print('Features:', features)
79
+
80
+
81
+ # === Test ===
82
+ # [✓] Tested: 2024-08-15 by Keegan Skeate <keegan@cannlytics>
83
+ if __name__ == '__main__':
84
+
85
+ # Get all of the results.
86
+ analyze_results_co(data_dir=r'D:\data\california\results\datasets\sclabs')
analysis/analyze_results_ct.py ADDED
@@ -0,0 +1,1205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CoADoc | Parse Connecticut COAs
3
+ Copyright (c) 2023 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Candace O'Sullivan-Sutherland <https://github.com/candy-o>
8
+ Created: 12/11/2023
9
+ Updated: 12/16/2023
10
+ License: MIT License <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
11
+
12
+ Description:
13
+
14
+ Extract data from NE Labs COAs and merge the COA data with product
15
+ data from the Connecticut Medical Marijuana Brand Registry.
16
+
17
+ Data Points:
18
+
19
+ ✓ id
20
+ ✓ lab_id
21
+ ✓ product_id
22
+ ✓ product_name
23
+ ✓ product_type
24
+ ✓ brand
25
+ ✓ image_url
26
+ ✓ lab_results_url
27
+ ✓ date_reported
28
+ ✓ date_received
29
+ ✓ date_tested
30
+ ✓ total_terpenes
31
+ ✓ cannabinoids_method
32
+ ✓ total_cannabinoids
33
+ ✓ sample_weight
34
+ ✓ label_url
35
+ ✓ lab
36
+ ✓ lab_website
37
+ ✓ lab_license_number
38
+ ✓ lab_image_url
39
+ ✓ lab_address
40
+ ✓ lab_street
41
+ ✓ lab_city
42
+ ✓ lab_county
43
+ ✓ lab_state
44
+ ✓ lab_zipcode
45
+ ✓ lab_latitude
46
+ ✓ lab_longitude
47
+ ✓ lab_phone
48
+ ✓ producer
49
+ ✓ producer_address
50
+ ✓ producer_street
51
+ ✓ producer_city
52
+ ✓ producer_county
53
+ ✓ producer_state
54
+ ✓ producer_zipcode
55
+ ✓ analyses
56
+ ✓ reported_results
57
+ ✓ results
58
+
59
+
60
+ """
61
+ # Standard imports:
62
+ from datetime import datetime
63
+ import json
64
+ import os
65
+ from typing import Any, Optional
66
+
67
+ # External imports:
68
+ from cannlytics import __version__
69
+ from cannlytics.data.data import create_hash, create_sample_id
70
+ from cannlytics.utils import convert_to_numeric, snake_case
71
+ from cannlytics.utils.constants import ANALYTES
72
+ import pandas as pd
73
+ import pdfplumber
74
+
75
+
76
+ NE_LABS = {
77
+ 'coa_algorithm': 'ne_labs.py',
78
+ 'coa_algorithm_entry_point': 'parse_ne_labs_coa',
79
+ 'lims': 'Northeast Laboratories',
80
+ 'url': 'www.nelabsct.com',
81
+ 'lab': 'Northeast Laboratories',
82
+ 'lab_website': 'www.nelabsct.com',
83
+ 'lab_license_number': '#PH-0404',
84
+ 'lab_image_url': 'https://www.nelabsct.com/images/Northeast-Laboratories.svg',
85
+ 'lab_address': '129 Mill Street, Berlin, CT 06037',
86
+ 'lab_street': '129 Mill Street',
87
+ 'lab_city': 'Berlin',
88
+ 'lab_county': 'Hartford',
89
+ 'lab_state': 'CT',
90
+ 'lab_zipcode': '06037',
91
+ 'lab_latitude': 41.626190,
92
+ 'lab_longitude': -72.748250,
93
+ 'lab_phone': '860-828-9787',
94
+ # 'lab_email': '',
95
+ }
96
+
97
+ NE_LABS_ANALYTES = [
98
+ {
99
+ 'analysis': 'microbes',
100
+ 'name': 'Total Aerobic Microbial Count',
101
+ 'key': 'total_aerobic_microbial_count',
102
+ 'units': 'CFU/g',
103
+ },
104
+ {
105
+ 'analysis': 'microbes',
106
+ 'name': 'Total Yeast & Mold Count',
107
+ 'key': 'total_yeast_and_mold',
108
+ 'units': 'CFU/g',
109
+ },
110
+ {
111
+ 'analysis': 'microbes',
112
+ 'name': 'Bile Tolerant Gram Negative Bacteria',
113
+ 'key': 'gram_negative_bacteria',
114
+ 'units': 'CFU/g',
115
+ },
116
+ {
117
+ 'analysis': 'microbes',
118
+ 'name': 'Coliform',
119
+ 'key': 'coliform',
120
+ 'units': 'CFU/g',
121
+ },
122
+ {
123
+ 'analysis': 'microbes',
124
+ 'name': 'Enteropathogenic E.coli',
125
+ 'key': 'e_coli',
126
+ 'units': 'CFU/g',
127
+ },
128
+ {
129
+ 'analysis': 'microbes',
130
+ 'name': 'Salmonella species',
131
+ 'key': 'salmonella',
132
+ 'units': 'CFU/g',
133
+ },
134
+ ]
135
+
136
+
137
+ ALTA_SCI = {
138
+ 'coa_algorithm': 'altasci.py',
139
+ 'coa_algorithm_entry_point': 'parse_altasci_coa',
140
+ 'lims': 'AltaSci Laboratories',
141
+ 'url': 'www.altascilabs.com',
142
+ 'lab': 'AltaSci Laboratories',
143
+ 'lab_website': 'www.altascilabs.com',
144
+ 'lab_license_number': 'CTM0000002',
145
+ 'lab_image_url': '',
146
+ 'lab_address': '1 Hartford Square, New Britain, CT 06052',
147
+ 'lab_street': '1 Hartford Square',
148
+ 'lab_city': 'New Britain',
149
+ 'lab_county': 'Hartford',
150
+ 'lab_state': 'CT',
151
+ 'lab_zipcode': '06052',
152
+ 'lab_latitude': 41.665670,
153
+ 'lab_longitude': -72.811370,
154
+ 'lab_phone': '(860) 224-6668',
155
+ }
156
+
157
+ ALTA_SCI_ANALYTES = [
158
+ {
159
+ 'analysis': 'microbes',
160
+ 'name': 'Total Aerobic Microbial Count',
161
+ 'key': 'total_aerobic_microbial_count',
162
+ 'units': 'CFU/g',
163
+ },
164
+ {
165
+ 'analysis': 'microbes',
166
+ 'name': 'Total Yeast and Mold Count',
167
+ 'key': 'total_yeast_and_mold',
168
+ 'units': 'CFU/g',
169
+ },
170
+ {
171
+ 'analysis': 'microbes',
172
+ 'name': 'Gram-negative Bacteria',
173
+ 'key': 'gram_negative_bacteria',
174
+ 'units': 'CFU/g',
175
+ },
176
+ {
177
+ 'analysis': 'microbes',
178
+ 'name': 'E. coli (pathogenic strains)',
179
+ 'key': 'e_coli',
180
+ 'units': 'Detected in 1 gram',
181
+ },
182
+ {
183
+ 'analysis': 'microbes',
184
+ 'name': 'Salmonella',
185
+ 'key': 'salmonella',
186
+ 'units': 'Detected in 1 gram',
187
+ },
188
+ {
189
+ 'analysis': 'mycotoxins',
190
+ 'name': 'Aflatoxin B1',
191
+ 'key': 'aflatoxin_b1',
192
+ 'units': 'ug/Kg',
193
+ },
194
+ {
195
+ 'analysis': 'mycotoxins',
196
+ 'name': 'Aflatoxin B2',
197
+ 'key': 'aflatoxin_b2',
198
+ 'units': 'ug/Kg',
199
+ },
200
+ {
201
+ 'analysis': 'mycotoxins',
202
+ 'name': 'Aflatoxin G1',
203
+ 'key': 'aflatoxin_g1',
204
+ 'units': 'ug/Kg',
205
+ },
206
+ {
207
+ 'analysis': 'mycotoxins',
208
+ 'name': 'Aflatoxin G2',
209
+ 'key': 'aflatoxin_g2',
210
+ 'units': 'ug/Kg',
211
+ },
212
+ {
213
+ 'analysis': 'mycotoxins',
214
+ 'name': 'Ochratoxin A',
215
+ 'key': 'ochratoxin_a',
216
+ 'units': 'ug/Kg',
217
+ },
218
+ {
219
+ 'analysis': 'heavy_metals',
220
+ 'name': 'Arsenic',
221
+ 'key': 'arsenic',
222
+ 'units': 'ug/g',
223
+ },
224
+ {
225
+ 'analysis': 'heavy_metals',
226
+ 'name': 'Cadmium',
227
+ 'key': 'cadmium',
228
+ 'units': 'ug/g',
229
+ },
230
+ {
231
+ 'analysis': 'heavy_metals',
232
+ 'name': 'Mercury',
233
+ 'key': 'mercury',
234
+ 'units': 'ug/g',
235
+ },
236
+ {
237
+ 'analysis': 'heavy_metals',
238
+ 'name': 'Lead',
239
+ 'key': 'lead',
240
+ 'units': 'ug/g',
241
+ },
242
+ {
243
+ 'analysis': 'cannabinoids',
244
+ 'name': 'Δ9-Tetrahydrocannabinol Acid (Δ9-THC-A)',
245
+ 'key': 'thca',
246
+ 'units': 'percent',
247
+ },
248
+ {
249
+ 'analysis': 'cannabinoids',
250
+ 'name': 'Tetrahydrocannabinol (THC)',
251
+ 'key': 'delta_9_thc',
252
+ 'units': 'percent',
253
+ },
254
+ {
255
+ 'analysis': 'cannabinoids',
256
+ 'name': 'Cannabidiol Acid (CBD-A)',
257
+ 'key': 'cbda',
258
+ 'units': 'percent',
259
+ },
260
+ {
261
+ 'analysis': 'cannabinoids',
262
+ 'name': 'Cannabidiol (CBD)',
263
+ 'key': 'cbd',
264
+ 'units': 'percent',
265
+ },
266
+ {
267
+ 'analysis': 'cannabinoids',
268
+ 'name': 'Cannabigerol Acid (CBG-A)',
269
+ 'key': 'cbga',
270
+ 'units': 'percent',
271
+ },
272
+ {
273
+ 'analysis': 'cannabinoids',
274
+ 'name': 'Cannabigerol (CBG)',
275
+ 'key': 'cbg',
276
+ 'units': 'percent',
277
+ },
278
+ {
279
+ 'analysis': 'cannabinoids',
280
+ 'name': 'Cannabinol (CBN)',
281
+ 'key': 'cbn',
282
+ 'units': 'percent',
283
+ },
284
+ {
285
+ 'analysis': 'cannabinoids',
286
+ 'name': 'Cannabichromene (CBC)',
287
+ 'key': 'cbc',
288
+ 'units': 'percent',
289
+ },
290
+ {
291
+ 'analysis': 'terpenes',
292
+ 'name': 'α-Pinene',
293
+ 'key': 'alpha_pinene',
294
+ 'units': 'percent',
295
+ },
296
+ {
297
+ 'analysis': 'terpenes',
298
+ 'name': 'β-Pinene',
299
+ 'key': 'beta_pinene',
300
+ 'units': 'percent',
301
+ },
302
+ {
303
+ 'analysis': 'terpenes',
304
+ 'name': 'β-Myrcene',
305
+ 'key': 'beta_myrcene',
306
+ 'units': 'percent',
307
+ },
308
+ {
309
+ 'analysis': 'terpenes',
310
+ 'name': 'Limonene',
311
+ 'key': 'd_limonene',
312
+ 'units': 'percent',
313
+ },
314
+ {
315
+ 'analysis': 'terpenes',
316
+ 'name': 'Ocimene',
317
+ 'key': 'ocimene',
318
+ 'units': 'percent',
319
+ },
320
+ {
321
+ 'analysis': 'terpenes',
322
+ 'name': 'Linalool',
323
+ 'key': 'linalool',
324
+ 'units': 'percent',
325
+ },
326
+ {
327
+ 'analysis': 'terpenes',
328
+ 'name': 'β-Caryophyllene',
329
+ 'key': 'beta_caryophyllene',
330
+ 'units': 'percent',
331
+ },
332
+ {
333
+ 'analysis': 'terpenes',
334
+ 'name': 'Humulene',
335
+ 'key': 'humulene',
336
+ 'units': 'percent',
337
+ },
338
+ {
339
+ 'analysis': 'pesticides',
340
+ 'name': 'Avermectin (abamectin)',
341
+ 'key': 'avermectin',
342
+ 'units': 'ppb',
343
+ },
344
+ {
345
+ 'analysis': 'pesticides',
346
+ 'name': 'Acequinocyl',
347
+ 'key': 'acequinocyl',
348
+ 'units': 'ppb',
349
+ },
350
+ {
351
+ 'analysis': 'pesticides',
352
+ 'name': 'Bifenazate',
353
+ 'key': 'bifenazate',
354
+ 'units': 'ppb',
355
+ },
356
+ {
357
+ 'analysis': 'pesticides',
358
+ 'name': 'Bifenthrin (synthetic pyrethroid)',
359
+ 'key': 'bifenthrin',
360
+ 'units': 'ppb',
361
+ },
362
+ {
363
+ 'analysis': 'pesticides',
364
+ 'name': 'Cyfluthrin (synthetic pyrethroid)',
365
+ 'key': 'cyfluthrin',
366
+ 'units': 'ppb',
367
+ },
368
+ {
369
+ 'analysis': 'pesticides',
370
+ 'name': 'Etoxazole',
371
+ 'key': 'etoxazole',
372
+ 'units': 'ppb',
373
+ },
374
+ {
375
+ 'analysis': 'pesticides',
376
+ 'name': 'Imazalil',
377
+ 'key': 'imazalil',
378
+ 'units': 'ppb',
379
+ },
380
+ {
381
+ 'analysis': 'pesticides',
382
+ 'name': 'Imidacloprid',
383
+ 'key': 'imidacloprid',
384
+ 'units': 'ppb',
385
+ },
386
+ {
387
+ 'analysis': 'pesticides',
388
+ 'name': 'Myclobutanil',
389
+ 'key': 'myclobutanil',
390
+ 'units': 'ppb',
391
+ },
392
+ {
393
+ 'analysis': 'pesticides',
394
+ 'name': 'Paclobutrazol',
395
+ 'key': 'paclobutrazol',
396
+ 'units': 'ppb',
397
+ },
398
+ {
399
+ 'analysis': 'pesticides',
400
+ 'name': 'Pyrethrins (synthetic)',
401
+ 'key': 'pyrethrins',
402
+ 'units': 'ppb',
403
+ },
404
+ {
405
+ 'analysis': 'pesticides',
406
+ 'name': 'Spinosad',
407
+ 'key': 'spinosad',
408
+ 'units': 'ppb',
409
+ },
410
+ {
411
+ 'analysis': 'pesticides',
412
+ 'name': 'Spiromesifen',
413
+ 'key': 'spiromesifen',
414
+ 'units': 'ppb',
415
+ },
416
+ {
417
+ 'analysis': 'pesticides',
418
+ 'name': 'Spirotetramat',
419
+ 'key': 'spirotetramat',
420
+ 'units': 'ppb',
421
+ },
422
+ {
423
+ 'analysis': 'pesticides',
424
+ 'name': 'Trifloxystrobin',
425
+ 'key': 'trifloxystrobin',
426
+ 'units': 'ppb',
427
+ },
428
+ # TODO: Add residual solvents?
429
+ ]
430
+
431
+ def parse_ne_labs_historic_coa(
432
+ parser,
433
+ doc: Any,
434
+ public_key: Optional[str] = 'product_id',
435
+ verbose: Optional[bool] = False,
436
+ **kwargs,
437
+ ) -> dict:
438
+ """Parse a historic Northeast Labs COA PDF.
439
+ Args:
440
+ doc (str or PDF): A PDF file path or pdfplumber PDF.
441
+ Returns:
442
+ (dict): The sample data.
443
+ """
444
+ # Initialize.
445
+ obs = {}
446
+
447
+ # Read the PDF.
448
+ if isinstance(doc, str):
449
+ report = pdfplumber.open(doc)
450
+ else:
451
+ report = doc
452
+ obs['coa_pdf'] = report.stream.name.replace('\\', '/').split('/')[-1]
453
+
454
+ # Extract producer details.
455
+ page = report.pages[0]
456
+
457
+ # Get the producer and the producer's address.
458
+ tables = page.extract_tables()
459
+ values = tables[0][0][0].split('\n')
460
+ parts = values[-1].replace('•', '').strip().split(', ')
461
+ obs['producer'] = values[0]
462
+ obs['producer_street'] = parts[0]
463
+ obs['producer_city'] = parts[1].replace(' CT', '')
464
+ obs['producer_state'] = 'CT'
465
+
466
+ # Get the product ID.
467
+ obs['product_id'] = tables[0][-1][0].split(':')[-1].strip()
468
+
469
+ # Get the lab ID and date tested.
470
+ lines = page.extract_text().split('\n')
471
+ for line in lines:
472
+ if 'Report#' in line:
473
+ obs['lab_id'] = line.split(': ')[-1]
474
+ elif 'Report Date' in line:
475
+ obs['date_tested'] = line.split(': ')[-1]
476
+ break
477
+
478
+ # Collect results and analyses.
479
+ analyses, results = [], []
480
+
481
+ # Get the microbes.
482
+ for line in lines:
483
+ for analyte in NE_LABS_ANALYTES:
484
+ if analyte['name'] in line:
485
+ if 'microbes' not in analyses:
486
+ analyses.append('microbes')
487
+ result = {}
488
+ value = line.split(analyte['name'])[-1].strip()
489
+ if 'PASS' in value:
490
+ result['status'] = 'Pass'
491
+ value = value.replace(' PASS', '')
492
+ elif 'FAIL' in value:
493
+ result['status'] = 'Fail'
494
+ value = value.replace(' FAIL', '')
495
+ value = value.replace('per gram', '')
496
+ value = value.replace('Not Detected', 'ND')
497
+ values = value.split(' ', maxsplit=1)
498
+ result['value'] = values[0]
499
+ result['limit'] = values[-1]
500
+ results.append({**analyte, **result})
501
+
502
+ # Get results from the front page.
503
+ crop = page.crop((0, page.height * 0.25, page.width, page.height * 0.75))
504
+ tables = crop.extract_tables({
505
+ 'vertical_strategy': 'text',
506
+ 'horizontal_strategy': 'text',
507
+ })
508
+ clean_tables = []
509
+ for table in tables:
510
+ clean_table = []
511
+ for row in table:
512
+ clean_table.append([cell for cell in row if cell])
513
+ if clean_table:
514
+ clean_tables.append(clean_table)
515
+
516
+ # Identify the analyses.
517
+ analysis = None
518
+ for row in clean_tables[0]:
519
+ try:
520
+ table_name = row[0]
521
+ except IndexError:
522
+ continue
523
+
524
+ # Determine the analysis.
525
+ if 'Heavy Metals' in table_name:
526
+ analysis = 'heavy_metals'
527
+ analyses.append(analysis)
528
+ continue
529
+ elif 'Mycotoxins' in table_name:
530
+ analysis = 'mycotoxins'
531
+ analyses.append(analysis)
532
+ continue
533
+ elif 'Pesticides' in table_name:
534
+ analysis = 'pesticides'
535
+ analyses.append(analysis)
536
+ continue
537
+
538
+ # Extract results.
539
+ analyte = row[0]
540
+ key = ANALYTES.get(snake_case(analyte), snake_case(analyte))
541
+ if analysis == 'heavy_metals':
542
+ results.append({
543
+ 'analysis': analysis,
544
+ 'key': key,
545
+ 'name': key,
546
+ 'value': row[2],
547
+ 'limit': row[-1],
548
+ 'status': row[-2],
549
+ 'units': row[3],
550
+ })
551
+ elif analysis == 'mycotoxins':
552
+ results.append({
553
+ 'analysis': analysis,
554
+ 'key': key,
555
+ 'name': analyte,
556
+ 'value': row[1],
557
+ 'limit': row[-1],
558
+ 'status': row[-2],
559
+ 'units': row[2],
560
+ })
561
+ elif analysis == 'pesticides':
562
+ results.append({
563
+ 'analysis': analysis,
564
+ 'key': key,
565
+ 'name': analyte,
566
+ 'value': None,
567
+ 'limit': None,
568
+ 'status': row[-1],
569
+ 'units': None,
570
+ })
571
+
572
+ # Get additional results.
573
+ tables = report.pages[1].extract_tables()
574
+ for table in tables:
575
+ table_name = table[0][0]
576
+
577
+ # Get terpenes.
578
+ if 'TERPENES' in table_name:
579
+ rows = table[1][0].split('\n')
580
+ for row in rows:
581
+ values = row.replace(' %', '').split(' ')
582
+ if 'TOTAL' in values[0]:
583
+ obs['total_terpenes'] = convert_to_numeric(values[-1])
584
+ continue
585
+ analyte = values[0]
586
+ key = ANALYTES.get(snake_case(analyte), snake_case(analyte))
587
+ results.append({
588
+ 'analysis': 'terpenes',
589
+ 'key': key,
590
+ 'name': analyte,
591
+ 'value': convert_to_numeric(values[-1]),
592
+ 'units': 'percent',
593
+ })
594
+
595
+ # Get cannabinoids.
596
+ elif 'CANNABINOIDS' in table_name:
597
+ rows = table[1][0].split('\n')
598
+ for row in rows:
599
+ values = row.replace(' % weight', '').split(' ')
600
+ if 'TOTAL' in values[0]:
601
+ obs['total_cannabinoids'] = convert_to_numeric(values[-1])
602
+ continue
603
+ analyte = values[0]
604
+ key = ANALYTES.get(snake_case(analyte), snake_case(analyte))
605
+ results.append({
606
+ 'analysis': 'cannabinoids',
607
+ 'key': key,
608
+ 'name': analyte,
609
+ 'value': convert_to_numeric(values[-1]),
610
+ 'units': 'percent',
611
+ })
612
+
613
+ # Get minor analyses.
614
+ # - water activity?
615
+ text = report.pages[1].extract_text()
616
+ if 'Residual Alcohol' in text:
617
+ value = text.split('Residual Alcohol')[-1].split('\n')[0]
618
+ value = value.replace('%', '').strip()
619
+ results.append({
620
+ 'analysis': 'residual_solvents',
621
+ 'key': 'residual_alcohol',
622
+ 'name': 'Residual Alcohol',
623
+ 'value': convert_to_numeric(value),
624
+ 'units': 'percent',
625
+ })
626
+ if 'Moisture' in text:
627
+ value = text.split('Moisture')[-1].split('\n')[0]
628
+ value = value.replace('%', '').strip()
629
+ obs['moisture_content'] = convert_to_numeric(value)
630
+
631
+ # Get the reviewer data.
632
+ last_page = report.pages[-1]
633
+ last_page_text = last_page.extract_text()
634
+ reviewer_text = last_page_text.split('Approved By:')[-1].split('Date:')[0]
635
+ reviewer_text = reviewer_text.replace('\n', '')
636
+ values = reviewer_text.split('QA / QC')
637
+ obs['reviewed_by'] = values[0] + 'QA / QC'
638
+ obs['released_by'] = values[-1]
639
+ obs['date_reviewed'] = last_page_text.split('Approved By:')[-1].split('Date:')[-1].split('\n')[0]
640
+
641
+ # Close the report.
642
+ report.close()
643
+
644
+ # Standardize dates.
645
+ # FIXME:
646
+ # obs = standardize_dates(obs)
647
+
648
+ # Finish data collection with a freshly minted sample ID.
649
+ obs = {**NE_LABS, **obs}
650
+ obs['analyses'] = json.dumps(list(set(analyses)))
651
+ obs['coa_algorithm_entry_point'] = 'parse_ne_labs_historic_coa'
652
+ obs['coa_algorithm_version'] = __version__
653
+ obs['coa_parsed_at'] = datetime.now().isoformat()
654
+ obs['results'] = json.dumps(results)
655
+ obs['results_hash'] = create_hash(results)
656
+ obs['sample_id'] = create_sample_id(
657
+ private_key=json.dumps(results),
658
+ public_key=obs[public_key],
659
+ salt=obs.get('producer', obs.get('date_tested', 'cannlytics.eth')),
660
+ )
661
+ obs['sample_hash'] = create_hash(obs)
662
+ return obs
663
+
664
+
665
+ def parse_ne_labs_coa(
666
+ parser,
667
+ doc: Any,
668
+ public_key: Optional[str] = 'product_id',
669
+ verbose: Optional[bool] = False,
670
+ **kwargs,
671
+ ) -> dict:
672
+ """Parse a Northeast Labs COA PDF.
673
+ Args:
674
+ doc (str or PDF): A PDF file path or pdfplumber PDF.
675
+ Returns:
676
+ (dict): The sample data.
677
+ """
678
+ # Initialize.
679
+ obs = {}
680
+
681
+ # Read the PDF.
682
+ if isinstance(doc, str):
683
+ report = pdfplumber.open(doc)
684
+ else:
685
+ report = doc
686
+ obs['coa_pdf'] = report.stream.name.replace('\\', '/').split('/')[-1]
687
+
688
+ # Extract producer details.
689
+ page = report.pages[0]
690
+ midpoint = page.width * 0.45
691
+ left_half = (0, 0, midpoint, page.height)
692
+ left_half_page = page.crop(left_half)
693
+ left_text = left_half_page.extract_text()
694
+ lines = left_text.split('\n')
695
+ parts = lines[2].split(' ')
696
+ obs['producer'] = lines[0]
697
+ obs['producer_street'] = lines[1]
698
+ obs['producer_city'] = ' '.join(parts[0:-2]).rstrip(',')
699
+ obs['producer_state'] = parts[-2]
700
+ obs['producer_zipcode'] = parts[-1]
701
+ obs['producer_address'] = ', '.join([obs['producer_street'], obs['producer_city'], obs['producer_state'] + ' ' + obs['producer_zipcode']])
702
+
703
+ # Extract dates and product details.
704
+ right_half = (midpoint, 0, page.width, page.height)
705
+ right_half_page = page.crop(right_half)
706
+ right_text = right_half_page.extract_text()
707
+ lines = right_text.split('\nResults')[0].split('\n')
708
+ for line in lines:
709
+ if 'Date Received' in line:
710
+ obs['date_received'] = line.split(': ')[-1]
711
+ if 'Report Date' in line:
712
+ obs['date_tested'] = line.split(': ')[-1]
713
+ if 'Report ID' in line:
714
+ obs['lab_id'] = line.split(': ')[-1]
715
+
716
+ # Get the product ID.
717
+ top_half = page.crop((0, 0, page.width, page.height * 0.5))
718
+ top_lines = top_half.extract_text().split('\n')
719
+ for line in top_lines:
720
+ if 'Product ID' in line:
721
+ obs['product_id'] = line.split(': ')[-1]
722
+ break
723
+
724
+ # Get the tables.
725
+ tables = []
726
+ for page in report.pages:
727
+ tables.extend(page.extract_tables())
728
+
729
+ # Clean the tables.
730
+ clean_tables = []
731
+ for table in tables:
732
+ clean_table = []
733
+ for row in table:
734
+ clean_table.append([cell for cell in row if cell])
735
+ clean_tables.append(clean_table)
736
+
737
+ # Get the results from the tables.
738
+ analyses, results = [], []
739
+ for table in clean_tables:
740
+ table_name = table[0][0]
741
+
742
+ # Hot-fix for cannabinoids:
743
+ if table_name == 'C':
744
+ table_name = 'Cannabinoids\nby HPLC'
745
+
746
+ # Get the microbes.
747
+ if table_name.startswith('Microbiology'):
748
+ analyses.append('microbes')
749
+ for cells in table[1:]:
750
+ if 'Pass/Fail' in cells[0]:
751
+ continue
752
+ key = ANALYTES.get(snake_case(cells[0]), snake_case(cells[0]))
753
+ results.append({
754
+ 'analysis': 'microbes',
755
+ 'key': key,
756
+ 'name': cells[0],
757
+ 'value': cells[1],
758
+ 'limit': cells[2],
759
+ 'method': cells[3],
760
+ 'units': 'µg/kg',
761
+ })
762
+
763
+ # Get the cannabinoids, if not already collected.
764
+ elif table_name.startswith('Cannabinoids') and 'cannabinoids' not in analyses:
765
+ analyses.append('cannabinoids')
766
+ if '\nby ' in table_name:
767
+ obs['cannabinoids_method'] = table_name.split('\nby ')[-1]
768
+ for cells in table[1:]:
769
+ if not cells or 'dry weight' in cells[0]:
770
+ continue
771
+ if 'Total Cannabinoids' in cells[0]:
772
+ if len(cells) == 1:
773
+ obs['total_cannabinoids'] = convert_to_numeric(cells[0].split(':')[-1].strip())
774
+ else:
775
+ obs['total_cannabinoids'] = convert_to_numeric(cells[1].replace(' %', ''))
776
+ continue
777
+ if 'Total' in cells[0] and 'THC' in cells[0]:
778
+ values = cells[0].split('\n')
779
+ value = values[0].split(':')[-1].replace(' %', '').strip()
780
+ obs['total_thc'] = convert_to_numeric(value)
781
+ continue
782
+ key = ANALYTES.get(snake_case(cells[0]), snake_case(cells[0]))
783
+ results.append({
784
+ 'analysis': 'cannabinoids',
785
+ 'key': key,
786
+ 'name': cells[0],
787
+ 'value': convert_to_numeric(cells[1]),
788
+ 'units': 'percent',
789
+ })
790
+
791
+ # Get the terpenes.
792
+ elif table_name.startswith('Terpenes'):
793
+ analyses.append('terpenes')
794
+ if '\nby ' in table_name:
795
+ obs['terpenes_method'] = table_name.split('\nby ')[-1]
796
+ for cells in table[1:]:
797
+ if not cells or 'dry weight' in cells[0]:
798
+ continue
799
+ if 'Total Terpenes' in cells[0]:
800
+ values = cells[0].split('\n')
801
+ obs['total_terpenes'] = convert_to_numeric(values[0].replace(' %', '').replace('Total Terpenes: ', ''))
802
+ continue
803
+ key = ANALYTES.get(snake_case(cells[0]), snake_case(cells[0]))
804
+ try:
805
+ value = convert_to_numeric(cells[1])
806
+ except:
807
+ value = 'ND'
808
+ results.append({
809
+ 'analysis': 'terpenes',
810
+ 'key': key,
811
+ 'name': cells[0],
812
+ 'value': value,
813
+ 'units': 'percent',
814
+ })
815
+
816
+ # Get the pesticides.
817
+ elif table_name.startswith('Pesticides'):
818
+ analyses.append('pesticides')
819
+ if '\nby ' in table_name:
820
+ obs['pesticides_method'] = table_name.split('\nby ')[-1]
821
+ for cells in table[1:]:
822
+ if 'Pass/Fail' in cells[0]:
823
+ continue
824
+ # Handle two-column tables.
825
+ if len(cells) == 4:
826
+ split_cells = [cells[:2], cells[2:]]
827
+ for split in split_cells:
828
+ key = ANALYTES.get(snake_case(split[0]), snake_case(split[0]))
829
+ results.append({
830
+ 'analysis': 'pesticides',
831
+ 'key': key,
832
+ 'name': split[0],
833
+ 'value': split[1],
834
+ 'limit': None,
835
+ 'units': None,
836
+ })
837
+ else:
838
+ key = ANALYTES.get(snake_case(cells[0]), snake_case(cells[0]))
839
+ results.append({
840
+ 'analysis': 'pesticides',
841
+ 'key': key,
842
+ 'name': cells[0],
843
+ 'value': cells[1],
844
+ 'limit': cells[2],
845
+ 'units': None,
846
+ })
847
+
848
+ # Get the heavy metals.
849
+ elif table_name.startswith('Heavy Metals'):
850
+ analyses.append('heavy_metals')
851
+ if '\nby ' in table_name:
852
+ obs['heavy_metals_method'] = table_name.split('\nby ')[-1]
853
+ for cells in table[1:]:
854
+ if 'Pass/Fail' in cells[0]:
855
+ continue
856
+ key = ANALYTES.get(snake_case(cells[0]), snake_case(cells[0]))
857
+ results.append({
858
+ 'analysis': 'heavy_metals',
859
+ 'key': key,
860
+ 'name': cells[0],
861
+ 'value': cells[1],
862
+ 'limit': cells[2],
863
+ 'units': '',
864
+ })
865
+
866
+ # Get the mycotoxins.
867
+ elif table_name.startswith('Mycotoxins'):
868
+ analyses.append('mycotoxins')
869
+ if '\nby ' in table_name:
870
+ obs['mycotoxins_method'] = table_name.split('\nby ')[-1]
871
+ for cells in table[1:]:
872
+ if 'Pass/Fail' in cells[0]:
873
+ continue
874
+ key = ANALYTES.get(snake_case(cells[0]), snake_case(cells[0]))
875
+ results.append({
876
+ 'analysis': 'mycotoxins',
877
+ 'key': key,
878
+ 'name': cells[0],
879
+ 'value': cells[1],
880
+ 'limit': cells[2],
881
+ 'units': 'µg/kg',
882
+ })
883
+
884
+ # Get the moisture content and water activity.
885
+ elif table_name.startswith('Moisture'):
886
+ for cells in table[1:]:
887
+ if 'Content' in cells[0]:
888
+ obs['moisture_content'] = convert_to_numeric(cells[-1])
889
+ elif 'Activity' in cells[0]:
890
+ obs['water_activity'] = convert_to_numeric(cells[-1])
891
+
892
+ # Get the residual solvents results.
893
+ elif 'Residual' in table_name:
894
+ analyses.append('residual_solvents')
895
+ if '\nby ' in table_name:
896
+ obs['residual_solvents_method'] = table_name.split('\nby ')[-1]
897
+ for cells in table[1:]:
898
+ if 'Pass/Fail' in cells[0]:
899
+ continue
900
+ if cells[0] == 'GC-MS':
901
+ obs['residual_solvents_method'] = 'GC-MS'
902
+ continue
903
+ key = ANALYTES.get(snake_case(cells[0]), snake_case(cells[0]))
904
+ results.append({
905
+ 'analysis': 'residual_solvents',
906
+ 'key': key,
907
+ 'name': cells[0],
908
+ 'value': cells[1],
909
+ 'units': 'percent',
910
+ })
911
+ # Get the sample weight.
912
+ elif table_name.startswith('Density'):
913
+ obs['sample_weight'] = convert_to_numeric(table[1][-1])
914
+
915
+ # Get the reviewer data.
916
+ last_page = report.pages[-1]
917
+ last_table = last_page.extract_tables()[-1]
918
+ row = last_table[1]
919
+ obs['date_reviewed'] = row[0]
920
+ obs['reviewed_by'] = row[1]
921
+ obs['released_by'] = row[2]
922
+
923
+ # Close the report.
924
+ report.close()
925
+
926
+ # Standardize dates.
927
+ # FIXME:
928
+ # obs = standardize_dates(obs)
929
+
930
+ # Finish data collection with a freshly minted sample ID.
931
+ obs = {**NE_LABS, **obs}
932
+ obs['analyses'] = json.dumps(list(set(analyses)))
933
+ obs['coa_algorithm_version'] = __version__
934
+ obs['coa_parsed_at'] = datetime.now().isoformat()
935
+ obs['results'] = json.dumps(results)
936
+ obs['results_hash'] = create_hash(results)
937
+ obs['sample_id'] = create_sample_id(
938
+ private_key=json.dumps(results),
939
+ public_key=obs[public_key],
940
+ salt=obs.get('producer', obs.get('date_tested', 'cannlytics.eth')),
941
+ )
942
+ obs['sample_hash'] = create_hash(obs)
943
+ return obs
944
+
945
+
946
+ def parse_altasci_coa(
947
+ parser,
948
+ doc: Any,
949
+ public_key: Optional[str] = 'product_id',
950
+ verbose: Optional[bool] = False,
951
+ **kwargs,
952
+ ) -> dict:
953
+ """Parse a Northeast Labs COA PDF.
954
+ Args:
955
+ doc (str or PDF): A PDF file path or pdfplumber PDF.
956
+ Returns:
957
+ (dict): The sample data.
958
+ """
959
+
960
+ # Initialize.
961
+ obs = {}
962
+
963
+ # Get the front page text.
964
+ report = pdfplumber.open(doc)
965
+ front_page = report.pages[0]
966
+ front_page_text = front_page.extract_text()
967
+ lines = front_page_text.split('\n')
968
+
969
+ for i, line in enumerate(lines):
970
+ if 'Customer Name' in line:
971
+ obs['producer'] = line.split(':')[1].strip()
972
+ if 'Customer Address' in line:
973
+ obs['producer_street'] = line.split(':')[1].strip()
974
+ city_zip = lines[i + 1].split(', CT ')
975
+ obs['producer_city'] = city_zip[0].strip()
976
+ obs['producer_state'] = 'CT'
977
+ obs['producer_zipcode'] = city_zip[1].strip()
978
+ obs['producer_address'] = ', '.join([
979
+ obs['producer_street'],
980
+ obs['producer_city'],
981
+ obs['producer_state'] + ' ' + obs['producer_zipcode'],
982
+ ])
983
+ if 'Results issued on' in line:
984
+ values = line.lstrip('Results issued on: ').split(' COA No.: ')
985
+ obs['date_tested'] = values[0].strip()
986
+ obs['lab_id'] = values[-1].strip()
987
+
988
+ # Get the tables.
989
+ tables = []
990
+ for page in report.pages:
991
+ tables.extend(page.extract_tables())
992
+
993
+ # Clean the tables.
994
+ clean_tables = []
995
+ for table in tables:
996
+ clean_table = []
997
+ for row in table:
998
+ clean_table.append([cell for cell in row if cell])
999
+ clean_tables.append(clean_table)
1000
+
1001
+ # Get the product ID.
1002
+ obs['product_id'] = clean_tables[0][0][0]
1003
+
1004
+ # Extract all of the lines.
1005
+ all_lines = []
1006
+ for page in report.pages:
1007
+ all_lines.extend(page.extract_text().split('\n'))
1008
+
1009
+ # Extract all of the analytes.
1010
+ results = []
1011
+ for line in all_lines:
1012
+ for analyte in ALTA_SCI_ANALYTES:
1013
+ if analyte['name'] in line:
1014
+ result = {}
1015
+ value = line.split(analyte['name'])[-1].strip()
1016
+ if 'Pass' in value:
1017
+ result['status'] = 'Pass'
1018
+ value = value.replace(' Pass', '')
1019
+ elif 'Fail' in value:
1020
+ result['status'] = 'Fail'
1021
+ value = value.replace(' Fail', '')
1022
+ if analyte['units'] in value:
1023
+ value = value.replace(analyte['units'], '')
1024
+ if analyte['analysis'] == 'cannabinoids' or analyte['analysis'] == 'terpenes':
1025
+ value = convert_to_numeric(value.replace('%', ''))
1026
+ result['value'] = value
1027
+ results.append({**analyte, **result})
1028
+
1029
+ # Calculate total THC, applying decarboxylation rate.
1030
+ obs['total_thc'] = sum([
1031
+ x['value'] * 0.877 if x['key'].endswith('a') else x['value']
1032
+ for x in results
1033
+ if x['analysis'] == 'cannabinoids' and 'thc' in x['key'] and isinstance(x['value'], (float, int))
1034
+ ])
1035
+
1036
+ # Calculate total cannabinoids, applying decarboxylation rate.
1037
+ obs['total_cannabinoids'] = sum([
1038
+ x['value'] * 0.877 if x['key'].endswith('a') else x['value']
1039
+ for x in results
1040
+ if x['analysis'] == 'cannabinoids' and isinstance(x['value'], (float, int))
1041
+ ])
1042
+
1043
+ # Calculate total terpenes.
1044
+ obs['total_terpenes'] = sum([
1045
+ x['value'] for x in results
1046
+ if x['analysis'] == 'terpenes' and isinstance(x['value'], (float, int))
1047
+ ])
1048
+
1049
+ # Determine all unique analyses.
1050
+ analyses = list(set(x['analysis'] for x in results))
1051
+
1052
+ # Get the reviewer data.
1053
+ for page in report.pages:
1054
+ text = page.extract_text()
1055
+ if 'Results Approved by:' in text:
1056
+ reviewer = text.split('Results Approved by:')[-1].split('\n')[0].strip()
1057
+ obs['reviewed_by'] = reviewer
1058
+ obs['released_by'] = reviewer
1059
+ break
1060
+
1061
+ # Close the report.
1062
+ report.close()
1063
+
1064
+ # Standardize dates.
1065
+ # FIXME:
1066
+ # obs = standardize_dates(obs)
1067
+
1068
+ # Finish data collection with a freshly minted sample ID.
1069
+ obs = {**ALTA_SCI, **obs}
1070
+ obs['analyses'] = json.dumps(list(set(analyses)))
1071
+ obs['coa_algorithm_version'] = __version__
1072
+ obs['coa_parsed_at'] = datetime.now().isoformat()
1073
+ obs['results'] = json.dumps(results)
1074
+ obs['results_hash'] = create_hash(results)
1075
+ obs['sample_id'] = create_sample_id(
1076
+ private_key=json.dumps(results),
1077
+ public_key=obs[public_key],
1078
+ salt=obs.get('producer', obs.get('date_tested', 'cannlytics.eth')),
1079
+ )
1080
+ obs['sample_hash'] = create_hash(obs)
1081
+ return obs
1082
+
1083
+
1084
+ def standardize_dates(item: dict) -> dict:
1085
+ # FIXME: The dates may not be correct.
1086
+ """Turn dates to ISO format."""
1087
+ date_columns = [x for x in item.keys() if x.startswith('date')]
1088
+ for date_column in date_columns:
1089
+ try:
1090
+ item[date_column] = pd.to_datetime(item[date_column]).isoformat()
1091
+ except:
1092
+ pass
1093
+ return item
1094
+
1095
+
1096
+ def extract_url(s):
1097
+ """Extract the URL from the string representation of the list."""
1098
+ try:
1099
+ list_rep = eval(s)
1100
+ return list_rep[0] if list_rep else None
1101
+ except:
1102
+ return None
1103
+
1104
+
1105
+ # === Test ===
1106
+ if __name__ == '__main__':
1107
+
1108
+ from cannlytics.data.coas import CoADoc
1109
+
1110
+ # === Read the data ===
1111
+
1112
+ # Specify where your data lives.
1113
+ DATA_DIR = 'D://data/cannabis_results/data/ct'
1114
+ PDF_DIR = 'D://data/connecticut/results/pdfs'
1115
+ stats_dir = 'D://data/connecticut/results/datasets'
1116
+
1117
+ # Read in downloaded CT results.
1118
+ datafile = f'{stats_dir}/ct-lab-results-latest.csv'
1119
+ ct_results = pd.read_csv(datafile)
1120
+
1121
+ # Clean URLs.
1122
+ ct_results['image_url'] = ct_results['image_url'].apply(extract_url)
1123
+ ct_results['label_url'] = ct_results['images'].apply(extract_url)
1124
+ ct_results['lab_results_url'] = ct_results['lab_results_url'].apply(extract_url)
1125
+
1126
+ # Rename certain columns.
1127
+ ct_results.rename(columns={
1128
+ 'date_tested': 'date_reported',
1129
+ 'producer': 'brand',
1130
+ 'results': 'reported_results',
1131
+ }, inplace=True)
1132
+
1133
+ # Drop certain columns.
1134
+ ct_results.drop(columns=['images'], inplace=True)
1135
+
1136
+
1137
+ # === Parse CT COAs ===
1138
+
1139
+ # Find the COA for each sample.
1140
+ parser = CoADoc()
1141
+ missing = 0
1142
+ invalid = 0
1143
+ pdf_files = {}
1144
+ all_results = []
1145
+ for index, row in ct_results.iterrows():
1146
+
1147
+ # Identify if the COA exists.
1148
+ pdf_file = os.path.join(PDF_DIR, row['id'] + '.pdf')
1149
+ if not os.path.exists(pdf_file):
1150
+ pdf_file = os.path.join(PDF_DIR, row['lab_id'] + '.pdf')
1151
+ if not os.path.exists(pdf_file):
1152
+ missing += 1
1153
+ continue
1154
+
1155
+ # Record the PDF.
1156
+ pdf_files[row['id']] = pdf_file
1157
+
1158
+ # TODO: Use the parser to extract data and identify the lab.
1159
+ # parser = CoADoc(lims={'NE Labs': NE_LABS_CT})
1160
+ # parser.identify_lims(front_page_text)
1161
+
1162
+ # Skip invalid files.
1163
+ try:
1164
+ report = pdfplumber.open(pdf_file)
1165
+ front_page_text = report.pages[0].extract_text()
1166
+ except:
1167
+ print('Invalid file:', pdf_file)
1168
+ invalid += 1
1169
+ continue
1170
+
1171
+ # Identify the lab and extract the COA data.
1172
+ report.close()
1173
+ if NE_LABS['url'] in front_page_text:
1174
+ try:
1175
+ coa_data = parse_ne_labs_coa(parser, pdf_file)
1176
+ print('Parsed NE Labs COA:', pdf_file)
1177
+ except:
1178
+ try:
1179
+ coa_data = parse_ne_labs_historic_coa(parser, pdf_file)
1180
+ print('Parsed NE Labs historic COA:', pdf_file)
1181
+ except:
1182
+ print('Failed to parse NE Labs COA:', pdf_file)
1183
+ continue
1184
+
1185
+ elif ALTA_SCI['url'] in front_page_text:
1186
+ try:
1187
+ coa_data = parse_altasci_coa(parser, pdf_file)
1188
+ print('Parsed AltaSci Labs COA:', pdf_file)
1189
+ except:
1190
+ print('Failed to parse AltaSci Labs COA:', pdf_file)
1191
+ continue
1192
+
1193
+ # Otherwise, the COA is unidentified.
1194
+ else:
1195
+ print('Unidentified lab:', pdf_file)
1196
+ continue
1197
+
1198
+ # Merge details with COA data.
1199
+ all_results.append({**row.to_dict(), **coa_data})
1200
+
1201
+ # Save the augmented CT lab results data.
1202
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
1203
+ outfile = os.path.join(DATA_DIR, f'ct-coa-data-{timestamp}.xlsx')
1204
+ parser.save(pd.DataFrame(all_results), outfile)
1205
+ print('Saved CT lab results:', outfile)
analysis/analyze_results_fl.py ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Results | Florida
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors:
6
+ Keegan Skeate <https://github.com/keeganskeate>
7
+ Created: 3/19/2024
8
+ Updated: 7/11/2024
9
+ License: MIT License <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
10
+ """
11
+ # Standard imports:
12
+ from datetime import datetime
13
+ from typing import List
14
+ from dotenv import dotenv_values
15
+ import json
16
+ import gc
17
+ import os
18
+
19
+ # External imports:
20
+ # from cannlytics.data import save_with_copyright
21
+ from cannlytics.data.cache import Bogart
22
+ from cannlytics.data.coas import (
23
+ CoADoc,
24
+ get_result_value,
25
+ standardize_results,
26
+ )
27
+ from cannlytics.data.coas.parsing import (
28
+ find_unique_analytes,
29
+ get_coa_files,
30
+ parse_coa_pdfs,
31
+ )
32
+ from cannlytics.firebase import (
33
+ initialize_firebase,
34
+ create_short_url,
35
+ get_file_url,
36
+ update_documents,
37
+ upload_file,
38
+ )
39
+ from cannlytics.compounds import cannabinoids, terpenes
40
+ from cannlytics.utils.utils import hash_file
41
+ import pandas as pd
42
+
43
+ # Internal imports:
44
+ # from analyze_results import calc_results_stats, calc_aggregate_results_stats
45
+
46
+
47
+ def analyze_results_fl(
48
+ cache_path: str,
49
+ pdf_dir: str,
50
+ reverse: bool = False,
51
+ ) -> pd.DataFrame:
52
+ """
53
+ Analyze Florida lab results.
54
+
55
+ Args:
56
+ cache_path (str): The path to the cache file.
57
+ pdf_dir (str): The directory where the PDFs are stored.
58
+ reverse (bool): Whether to reverse the order of the results.
59
+
60
+ Returns:
61
+ pd.DataFrame: The analyzed results.
62
+ """
63
+ # Initialize cache.
64
+ cache = Bogart(cache_path)
65
+
66
+ # Get all of the PDFs.
67
+ pdfs = get_coa_files(pdf_dir)
68
+
69
+ # Sort the PDFs by modified date
70
+ pdfs.sort(key=os.path.getmtime)
71
+
72
+ # Parse the PDFs.
73
+ parse_coa_pdfs(pdfs, cache=cache, reverse=reverse)
74
+
75
+
76
+ # === Test ===
77
+ if __name__ == '__main__':
78
+
79
+ # Parse all of the COAs.
80
+ analyze_results_fl(
81
+ cache_path = 'D://data/.cache/results-fl.jsonl',
82
+ pdf_dir = 'D://data/florida/results/pdfs',
83
+ reverse=True,
84
+ )
85
+
86
+ # Read the cache.
87
+ results = Bogart('D://data/.cache/results-fl.jsonl').to_df()
88
+ print('Read %i results from cache.' % len(results))
89
+
90
+ # TODO: Figure out why there are duplicates.
91
+
92
+ # Drop duplicates.
93
+ results = results.drop_duplicates(subset=['sample_hash'])
94
+
95
+ # TODO: Identify the same COA parsed multiple ways.
96
+ multiple_coas = results['coa_pdf'].value_counts()
97
+ print('Multiple COAs:', multiple_coas[multiple_coas > 1])
98
+
99
+ # FIXME: Handle:
100
+ # - `download.pdf`
101
+ # - `DA20618004-002.pdf`
102
+
103
+ # Drop all non-standard columns.
104
+ nuisance_columns = [
105
+ 'sample_units',
106
+ 'total_thc_wet',
107
+ 'total_cbd_wet',
108
+ 'total_cannabinoids_wet',
109
+ ]
110
+ results.drop(columns=nuisance_columns, inplace=True)
111
+
112
+ # Standardize the data.
113
+ analytes = find_unique_analytes(results)
114
+ nuisance_analytes = [
115
+ '',
116
+ '0_analysis_method_sop_t_30_065_sop_t_40_065',
117
+ '0_sop_t_30_102_fl_davie_sop_t_40_102_fl_davie',
118
+ '18_7_percent',
119
+ '1_total_contaminant_load_metals',
120
+ '20_3_percent',
121
+ '21_8_percent',
122
+ '23_0_percent',
123
+ '27_7_percent',
124
+ '3_carer_a_f_oto',
125
+ '3_carer_ae_o_ooes',
126
+ '3_carer_cy_ll_oooo',
127
+ '3_carer_cy_ll_oot',
128
+ '3_carer_ey_ll_o_oeee',
129
+ '3_carer_ey_ll_toe',
130
+ '3_carer_or_oto',
131
+ '3_carers_ll_obes',
132
+ '5_caren_ae_l_pote',
133
+ '5_k_4_the_measurement_of_uncertainty_mu_error_is_available_from_the_lab_upon_request_the_decision',
134
+ '64_er_20_39_and_f_s_rule',
135
+ '7_total_contaminant_load_metals',
136
+ '9_tetrahyd_binolic_acid_thca',
137
+ 'a_ee_acon_o_oeea',
138
+ 'aipnerrenchyto_aleo_ol_ote',
139
+ 'al_sample_received',
140
+ 'alpharnium_iene_hol_o_oose_toe',
141
+ 'alphas_terpineo_a_oere_ote',
142
+ 'alpna_bisapo_o',
143
+ 'alpnerrenchyto_alco_ol_ober',
144
+ 'ana_phellancrene_oto',
145
+ 'ana_phellancrene_toa',
146
+ 'analyzed_by_weight_extraction_date_extracted_by',
147
+ 'ane_eeerene_cohol_o_oee',
148
+ 'ane_eeerene_cohol_o_oeee',
149
+ 'aon_yreene_ooo',
150
+ 'apne_beebo_l_o_oeee',
151
+ 'apne_eeabolol_l_oost',
152
+ 'apne_eeabolol_l_toa',
153
+ 'apne_esabolol_l_oes',
154
+ 'apne_ie_ol_oto',
155
+ 'apne_redrene_cohol_number_oot',
156
+ 'apne_rene_alcohol_i_op',
157
+ 'apne_rene_alcohol_o_oeea',
158
+ 'apne_rene_alcohol_oes',
159
+ 'apne_rene_alcohol_oon',
160
+ 'apne_rene_alcohol_pose',
161
+ 'bdg_200084',
162
+ 'bdg_200126',
163
+ 'bdg_200146',
164
+ 'beta_pinene_toe_ae',
165
+ 'ca',
166
+ 'cann',
167
+ 'content',
168
+ 'cc_binol_cbn',
169
+ 'ceernene_oer_aan',
170
+ 'ceernene_oer_an',
171
+ 'cro',
172
+ 'cultivar_conftti_kshcke',
173
+ 'cultivar_florida_tringle',
174
+ 'cultivar_la_kush_cke',
175
+ 'cultivar_m_a_c',
176
+ 'cultivar_mod_grps_number_6',
177
+ 'cultivar_pnch_ckies',
178
+ 'cultivation_facility_muv_ruskin',
179
+ 'fth_acai_gelato_x_sherb_bx_1',
180
+ 'fth_apples_and_bananas_full_flower',
181
+ 'fth_cereal_milk_x_white_runtz',
182
+ 'fth_fatso',
183
+ 'fth_fatty_sour',
184
+ 'fth_gary_payton_full_flower',
185
+ 'fth_mac',
186
+ 'fth_miami_sunkissed_full_flower',
187
+ 'fth_origins_double_trouble_full_flower',
188
+ 'fth_origins_og_kush_full_flower',
189
+ 'fth_origins_space_coast_kush',
190
+ 'fth_origins_triangle_kush_full_flower',
191
+ 'fth_pink_moon_milk_full_flower',
192
+ 'fth_sfv_og_x_sherb_bx_1',
193
+ 'fth_sundaes_best',
194
+ 'ga_213',
195
+ 'd_012',
196
+ 'd_013',
197
+ 'da_013_fisherbrand_isotemp_heat_block_da_020_fisherbrand_isotemp',
198
+ 'da_020_fisherbrand_isotemp_heat_block_da_049_fisher',
199
+ 'da_049_applied_biosystems_thermocycler_da_254',
200
+ 'da_171_fisherbrand_isotemp_heat_block_da_020_fisherbrand_isotemp',
201
+ 'ee_retail_batch_total_wttovol',
202
+ 'etrahivariyn_cthrw_ocanina_b_ivuansr_inns',
203
+ 'fety_s_umma',
204
+ 'ficate_of_analysis',
205
+ 'gmo_s_x_lemon_freeze_pop_s',
206
+ 'gmo_s_x_melon_fizz_s',
207
+ 'h_air_fryer_kush',
208
+ 'h_banana_chocolate_thai',
209
+ 'h_blue_breath_mints',
210
+ 'h_chocolate_ice_cream',
211
+ 'h_frosted_durban_dawg',
212
+ 'h_gelato_glacier_burst',
213
+ 'h_grape_ice_cream',
214
+ 'h_ice_cream_haze',
215
+ 'h_ice_cream_pai',
216
+ 'h_iced_lady_d',
217
+ 'h_jelly_dog_rv_03',
218
+ 'h_jet_fuel_gelato',
219
+ 'h_key_lime_tide',
220
+ 'h_lemon_cherry_gelato',
221
+ 'h_london_pound_cake',
222
+ 'h_miami_citrus_splash',
223
+ 'h_mimosa_kush_mints',
224
+ 'h_mota_meringue_cake',
225
+ 'h_nuclear_ice_cream',
226
+ 'h_nuclear_nebula_retriever',
227
+ 'h_nuclear_nightfall_express',
228
+ 'h_origins_og_kush_full_flower',
229
+ 'h_pb_meringue_cake',
230
+ 'h_pb_night_star',
231
+ 'h_pineapple_upside_down_cake',
232
+ 'h_shady_meringue_cake',
233
+ 'h_sour_shady_og',
234
+ 'h_stardawg_x_northern_lights',
235
+ 'i_apple_fritter_x_gluey',
236
+ 'i_auto',
237
+ 'i_baba_s_frosted_breath',
238
+ 'i_king_louis_xiii',
239
+ 'i_pb_souffle_rv_03',
240
+ 'i_vanilla_cookie_face',
241
+ 'jams_fast_acting_ch',
242
+ 'jams_fast_acting_che',
243
+ 'l_sample_received',
244
+ 'la_bomba_x_trop_cherry',
245
+ 'la_bomba_x_trop_cherry_wf',
246
+ 'li',
247
+ 'lls_00_0005',
248
+ 'lod_limit_of_detection',
249
+ 'loq',
250
+ 'mpn_and_traditional_culture_based_techniques_in_accordance_with_f_s_rule',
251
+ 'ms_classic_chews_mixed_berry_i_10_mg_x',
252
+ 'nd_not_detected_na_not_analyzed_ppm_parts_per_million_ppb_parts_per_billion_limit_of_detection',
253
+ 'neity_label_claim_microbials_moisture',
254
+ 'oe',
255
+ 'p',
256
+ 'p_um',
257
+ 'p_y',
258
+ 'passed',
259
+ 'pe',
260
+ 'processing_facility_muv_ruskin',
261
+ 'q_lab_director_processing_and_source_facilities_added',
262
+ 'q_lab_director_re_s',
263
+ 'r_0_nb_32898',
264
+ 'r_metal_lod_unit_result_pass_to_action',
265
+ 'r_mycotoxins_testing_utilizing_liquid_chromatography_with_triple_quadrupole_mass_spectrometry_in',
266
+ 'raspberry_lemonade_lozenge_2_5_mg_x',
267
+ 'retail_batch_date',
268
+ 'retail_batch_total_units',
269
+ 'retail_batch_total_wttovol',
270
+ 'retail_batchnumber_bu_060622_9409_ckc',
271
+ 'retail_batchnumber_bu_070622_6505_mg',
272
+ 'retail_batchnumber_bu_090222_7370_ckc',
273
+ 'retail_batchnumber_bu_110222_1451_apj',
274
+ 'retail_batchnumber_bu_150422_5024_pc',
275
+ 'retail_batchnumber_bu_240522_6518_lakc',
276
+ 'retail_batchnumber_bu_260422_1432_mac',
277
+ 'retail_batchnumber_bu_300322_9848_chp',
278
+ 'retail_batchnumber_bu_310522_9161_ft',
279
+ 's_classic_chews_mixed_berry_i_10_mg_x',
280
+ 's_durban_daybreak_duster',
281
+ 's_h_citrus_farmer',
282
+ 's_raspberry_lemonade_lozenge_2_5_mg_x',
283
+ 's_silver_gmo_jack',
284
+ 'sampling_sop',
285
+ 'saree_oer',
286
+ 'seed_to_sale',
287
+ 'ss_chews_sativa_watermelon',
288
+ 'ssc_002',
289
+ 'sted_not_tested_pass_pass',
290
+ 'sted_pass_tested_pass',
291
+ 'tal_contaminant_load_pesticides',
292
+ 'tal_dimethomorph',
293
+ 'tal_permethrin',
294
+ 'tal_spinetoram',
295
+ 'tal_spinosad',
296
+ 'terpenes_tested',
297
+ 'th_miami_sunkissed_full_flower_ig_pre_roll_s_035_oz_unit',
298
+ 'th_pink_moon_milk_full_flower',
299
+ 'u_15143701',
300
+ 'ual_tcl_terpenes_water',
301
+ 'unit_weight',
302
+ 'unless_otherwise_stated_all_quality_control_samples_performed_within_specifications_established_by_the_laboratory',
303
+ 'ur_classic_chews_watermelon_10_mg_x',
304
+ 'vav_09_1020_947_077_to_alk_09_1412_9291_179',
305
+ 'y',
306
+ 'y_rabinolic_acl',
307
+ 'pemonene_o_oes_oes',
308
+ # Totals to be handled:
309
+ 'total',
310
+ 'total_cbd',
311
+ 'total_cbd_homogeneity',
312
+ 'total_cbd_homogeneity_rsd',
313
+ 'total_co',
314
+ 'total_contaminant_load',
315
+ 'total_contaminant_load_metals',
316
+ 'total_contaminant_load_pesticides',
317
+ 'total_diazinon',
318
+ 'total_dimethomorph',
319
+ 'total_ochratoxin_a',
320
+ 'total_permethrin',
321
+ 'total_pyrethrins',
322
+ 'total_spinetoram',
323
+ 'total_spinosad',
324
+ 'total_thc',
325
+ 'total_thc_homogeneity',
326
+ 'total_thc_homogeneity_rsd',
327
+ 'total_units_received',
328
+ 'total_yeast_and_mold_high',
329
+ # Analytes to be fixed:
330
+ 'alpha_terpinen',
331
+ 'sipha_ce_drene',
332
+ 'sipha_fenchui_alcohol_otod',
333
+ 'sipha_phellandrene',
334
+ 'sipha_terpinene',
335
+ 'siphacteroinene_ot_02',
336
+ 'siphacteroinene_otod',
337
+ 'percent_moisture',
338
+ 'pentanes_n_pentane',
339
+ 'pentachloronitrobenzene_pcnb',
340
+ 'pclaaivaens_eee',
341
+ 'letrahycrocannabinolic_aci_105',
342
+ 'ipha_ce_drene',
343
+ 'fetrany_rocanna_ino_ic_acid',
344
+ 'escherichia_coli_shigella_spp',
345
+ 'dg_tetrahydrocannabinoid_d_9_thc',
346
+ 'dg_tetrahydrocannabinolic_acid_thca',
347
+ 'd_9_tetrahyd_binolic_acid_thca',
348
+ 'd_9_tetrahyd_binolic_acid_thca_i',
349
+ 'alpha_fenchy_aleohol_obese_ocoee_ml',
350
+ 'alpha_finene_oost_oe',
351
+ 'aloha_humulene',
352
+ 'alpha_phellandrane',
353
+ 'alpha_pinane',
354
+ 'alpha_redrene_lcohol_pote',
355
+ 'alpha_tecrene_icohol_to',
356
+ 'gammaz_terpinene',
357
+ 'aspergillus_flavus_env',
358
+ 'aspergillus_fumigatus_env',
359
+ 'aspergillus_niger_env',
360
+ 'aspergillus_terreus_env',
361
+ 'bacillus_group',
362
+ 'butanes_n_butane',
363
+ 'bife',
364
+ 'chlora',
365
+ 'chlorfe',
366
+ 'clofe',
367
+ 'fluthrin',
368
+ 'hexahydrocannabinol_hhc',
369
+ 'homogeneity_d_8_thc',
370
+ 'iromesifen',
371
+ 'irotetramat',
372
+ 'iroxamine',
373
+ 'lordane',
374
+ 'lorfenapyr',
375
+ 'ntachloronitrobenzene_pcnb',
376
+ 'ntoa_na_ntoa_ntoa',
377
+ 'ntoa_ntoa_ntoa_ntoa',
378
+ 'obra_esabolol_l_oger',
379
+ 'obra_esabolol_l_top',
380
+ 'oiphehumuene_o_ooge',
381
+ 'opiconazole',
382
+ 'opoxur',
383
+ 'otal_sample_received',
384
+ 'peta_goimene',
385
+ 'pha_fenchyl_alcohol',
386
+ 'phe_inens_ees',
387
+ 'propico',
388
+ 'ptan',
389
+ 'rathion_methyl',
390
+ 'rethrins',
391
+ 'ridaben',
392
+ 'salmonella_entericatoenterobacter',
393
+ 'spinetoram_total',
394
+ 'spinosad_total',
395
+ 'tebuco',
396
+ 'tetrahyd_to_bivarin_thcv',
397
+ 'tetrahydrocannabinoid_48_thc',
398
+ 'tetrahydrocannabinolic_acid',
399
+ 'totranvd',
400
+ 'water',
401
+ 'xylenes_total',
402
+ ]
403
+ analytes = list(set(analytes) - set(nuisance_analytes))
404
+ analytes = sorted(list(analytes))
405
+ # FIXME: This is raising an unknown error.
406
+ # results = standardize_results(results, analytes)
407
+
408
+ # Standardize state.
409
+ state = 'FL'
410
+ results['lab_state'] = results['lab_state'].fillna(state)
411
+ results['producer_state'] = results['producer_state'].fillna(state)
412
+
413
+ # Standardize time.
414
+ results['date'] = pd.to_datetime(results['date_tested'], format='mixed')
415
+ results['week'] = results['date'].dt.to_period('W').astype(str)
416
+ results['month'] = results['date'].dt.to_period('M').astype(str)
417
+ results = results.sort_values('date')
418
+
419
+ # Save the results.
420
+ data_dir = 'D://data/cannabis_results/data/fl'
421
+ outfile = os.path.join(data_dir, 'fl-results-latest.xlsx')
422
+ outfile_csv = os.path.join(data_dir, 'fl-results-latest.csv')
423
+ results.to_excel(outfile, index=False)
424
+ results.to_csv(outfile_csv, index=False)
425
+ print('Saved Excel:', outfile)
426
+ print('Saved CSV:', outfile_csv)
427
+
428
+ # Print out features.
429
+ features = {x: 'string' for x in results.columns}
430
+ print('Number of features:', len(features))
431
+ print(json.dumps(features, indent=2))
432
+
433
+
434
+ #-----------------------------------------------------------------------
435
+ # Calculate statistics.
436
+ #-----------------------------------------------------------------------
437
+
438
+ # # Calculate results statistics.
439
+ # results = calc_results_stats(
440
+ # results,
441
+ # cannabinoid_keys=cannabinoid_keys,
442
+ # terpene_keys=terpene_keys,
443
+ # )
444
+
445
+ # # Calculate aggregate statistics.
446
+ # stats = calc_aggregate_results_stats(
447
+ # results,
448
+ # cannabinoid_keys=cannabinoid_keys,
449
+ # terpene_keys=terpene_keys,
450
+ # )
451
+
452
+ # # Save all of the data.
453
+ # output_dir = 'D://data/florida/results/datasets'
454
+ # date = datetime.now().strftime('%Y-%m-%d')
455
+ # outfile = os.path.join(output_dir, f'fl-results-{date}.xlsx')
456
+ # results.replace(r'\\u0000', '', regex=True, inplace=True)
457
+ # save_with_copyright(
458
+ # results,
459
+ # outfile,
460
+ # dataset_name='Florida Cannabis Lab Results',
461
+ # author='Keegan Skeate',
462
+ # publisher='Cannlytics',
463
+ # sources=['Kaycha Labs', 'TerpLife Labs'],
464
+ # source_urls=['https://yourcoa.com', 'https://www.terplifelabs.com'],
465
+ # )
466
+ # print('Saved %i COA data:' % len(results), outfile)
467
+
468
+
469
+ #-----------------------------------------------------------------------
470
+ # Upload COA PDFs to Google Cloud Storage and data to Firestore.
471
+ #-----------------------------------------------------------------------
472
+
473
+ # # TODO: Re-write using Bogart cache.
474
+
475
+ # # Use a local cache to keep track of lab results in Firestore,
476
+ # # PDFs in Google Cloud Storage, and which datafiles are in Cloud Storage.
477
+ # cache_dir = 'D://data/florida/cache'
478
+ # cache_file = os.path.join(cache_dir, 'results-fl.json')
479
+ # if os.path.exists(cache_file):
480
+ # with open(cache_file, 'r') as f:
481
+ # cache = json.load(f)
482
+ # else:
483
+ # cache = {}
484
+ # os.makedirs(cache_dir, exist_ok=True)
485
+
486
+ # # Match COA PDFs with the results.
487
+ # pdf_dir = 'D://data/florida/results/pdfs'
488
+ # coa_pdfs = {}
489
+ # for index, result in all_results.iterrows():
490
+
491
+ # # Get the name of the PDF.
492
+ # identifier = result['coa_pdf']
493
+ # if identifier == 'download.pdf':
494
+ # lab_results_url = result['lab_results_url']
495
+ # identifier = lab_results_url.split('=')[-1].split('?')[0]
496
+
497
+ # # Find the matching PDF.
498
+ # for root, _, files in os.walk(pdf_dir):
499
+ # for filename in files:
500
+ # if identifier in filename:
501
+ # pdf_path = os.path.join(root, filename)
502
+ # coa_pdfs[result['sample_hash']] = pdf_path
503
+ # break
504
+
505
+ # # Initialize Firebase.
506
+ # config = dotenv_values('.env')
507
+ # db = initialize_firebase()
508
+ # bucket_name = config['FIREBASE_STORAGE_BUCKET']
509
+ # firebase_api_key = config['FIREBASE_API_KEY']
510
+
511
+ # # Upload datafiles to Google Cloud Storage.
512
+ # # Checks if the file has been uploaded according to the local cache.
513
+ # for datafile in datafiles:
514
+ # filename = os.path.split(datafile)[-1]
515
+ # if filename not in cache.get('datafiles', []):
516
+ # file_ref = f'data/results/florida/datasets/{filename}'
517
+ # # upload_file(
518
+ # # destination_blob_name=file_ref,
519
+ # # source_file_name=datafile,
520
+ # # bucket_name=bucket_name,
521
+ # # )
522
+ # print('Uploaded:', file_ref)
523
+ # cache.setdefault('datafiles', []).append(filename)
524
+
525
+ # # Upload PDFs to Google Cloud Storage.
526
+ # # Checks if the file has been uploaded according to the local cache.
527
+ # print('Number of unique COA PDFs:', len(coa_pdfs))
528
+ # for sample_hash, pdf_path in coa_pdfs.items():
529
+ # print('Uploading:', pdf_path)
530
+ # pdf_hash = hash_file(pdf_path)
531
+
532
+ # if pdf_hash not in cache.get('pdfs', []):
533
+
534
+ # # Upload the file.
535
+ # file_ref = f'data/results/florida/pdfs/{pdf_hash}.pdf'
536
+ # # upload_file(
537
+ # # destination_blob_name=file_ref,
538
+ # # source_file_name=pdf_path,
539
+ # # bucket_name=bucket_name,
540
+ # # )
541
+
542
+ # # # Get download URL and create a short URL.
543
+ # # download_url, short_url = None, None
544
+ # # try:
545
+ # # download_url = get_file_url(file_ref, bucket_name=bucket_name)
546
+ # # short_url = create_short_url(
547
+ # # api_key=firebase_api_key,
548
+ # # long_url=download_url,
549
+ # # project_name=db.project
550
+ # # )
551
+ # # except Exception as e:
552
+ # # print('Failed to get download URL:', e)
553
+
554
+ # # # Keep track of the file reference and download URLs.
555
+ # # all_results.loc[all_results['sample_hash'] == sample_hash, 'file_ref'] = file_ref
556
+ # # all_results.loc[all_results['sample_hash'] == sample_hash, 'download_url'] = download_url
557
+ # # all_results.loc[all_results['sample_hash'] == sample_hash, 'short_url'] = short_url
558
+
559
+ # # Cache the PDF.
560
+ # cache.setdefault('pdfs', []).append(pdf_hash)
561
+
562
+ # # Upload the raw data to Firestore.
563
+ # # Checks if the data has been uploaded according to the local cache.
564
+ # refs, updates = [], []
565
+ # collection = 'results'
566
+ # for _, obs in all_results.iterrows():
567
+ # doc_id = obs['sample_hash']
568
+ # if doc_id not in cache.get('results', []):
569
+ # refs.append(f'{collection}/{doc_id}')
570
+ # updates.append(obs.to_dict())
571
+ # cache.setdefault('results', []).append(doc_id)
572
+ # # if refs:
573
+ # # update_documents(refs, updates, database=db)
574
+ # # print('Uploaded %i results to Firestore.' % len(refs))
575
+
576
+ # # TODO: Save the statistics to Firestore.
577
+
578
+ # # Save the updated cache
579
+ # with open(cache_file, 'w') as f:
580
+ # json.dump(cache, f)
581
+ # print('Saved cache:', cache_file)
analysis/analyze_results_ma.py ADDED
@@ -0,0 +1,886 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Cannabis Lab Results | Massachusetts
3
+ Copyright (c) 2023 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 2/1/2024
7
+ Updated: 8/15/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """
10
+ # External imports:
11
+ import json
12
+ from matplotlib import ticker
13
+ import matplotlib.pyplot as plt
14
+ from matplotlib.ticker import StrMethodFormatter
15
+ import matplotlib.dates as mdates
16
+ from matplotlib import cm
17
+ import numpy as np
18
+ import pandas as pd
19
+ import seaborn as sns
20
+ from adjustText import adjust_text
21
+
22
+
23
+ # === Setup ===
24
+
25
+ # Setup plotting style.
26
+ plt.style.use('fivethirtyeight')
27
+ plt.rcParams.update({
28
+ 'font.family': 'Times New Roman',
29
+ 'font.size': 24,
30
+ })
31
+
32
+ assets_dir = './presentation/images/figures'
33
+
34
+
35
+ # === Analyze MCR Labs data ===
36
+
37
+ from cannlytics.data.coas import CoADoc
38
+ import os
39
+ import pandas as pd
40
+
41
+
42
+ # Read MCR Labs data.
43
+ data_dir = r'D:\data\massachusetts\results'
44
+ # datafile = r"D:\data\massachusetts\lab_results\ma-lab-results-2024-04-08.xlsx"
45
+ # mcr_results = pd.read_excel(datafile)
46
+ datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'urls' not in x and 'latest' not in x]
47
+ mcr_results = pd.concat([pd.read_excel(x) for x in datafiles])
48
+ mcr_results = mcr_results.drop_duplicates(subset=['product_name', 'date_tested'])
49
+ mcr_results = mcr_results.loc[mcr_results['results'] != '[]']
50
+ print('Number of MCR Labs results:', len(mcr_results))
51
+
52
+ # TODO: Standardize lab results.
53
+ parser = CoADoc()
54
+ date = pd.Timestamp.now().strftime('%Y-%m-%d')
55
+ data_dir = r"D:\data\massachusetts\lab_results"
56
+ outfile = os.path.join(data_dir, f'mcr-lab-results-{date}.xlsx')
57
+ parser.save(mcr_results, outfile)
58
+ print(f'Saved standardized MCR Labs results: {outfile}')
59
+
60
+
61
+ # === Get the data ===
62
+
63
+ # Read MA lab results.
64
+ datafiles = [
65
+ r"D:\data\public-records\Massachusetts\TestingTHC-THCA-YeastMold-Apr-Dec2021-FINAL.csv",
66
+ r"D:\data\public-records\Massachusetts\TestingTHC-THCA-YeastMold-2022-FINAL.csv",
67
+ r"D:\data\public-records\Massachusetts\TestingTHC-THCA-YeastMold-2023-Jan-June-FINAL.csv",
68
+ r"D:\data\public-records\Massachusetts\TestingTHC-THCA-YeastMold-2023-Jul-Sep-FINAL.csv",
69
+ ]
70
+ ma_results = pd.concat([pd.read_csv(datafile) for datafile in datafiles])
71
+
72
+ # Coalesce similarly named columns.
73
+ ma_results['lab'] = ma_results['TestingLabId'].combine_first(ma_results['TestingLab'])
74
+ ma_results['strain_name'] = ma_results['StrainName'].combine_first(ma_results['Strain'])
75
+ ma_results = ma_results.drop(columns=[
76
+ 'TestingLabId',
77
+ 'TestingLab',
78
+ 'StrainName',
79
+ 'Strain',
80
+ ])
81
+
82
+ # Rename certain columns.
83
+ ma_results = ma_results.rename(columns={
84
+ 'ProductCategory': 'product_type',
85
+ 'PackageLabel': 'label',
86
+ 'TestType': 'test_type',
87
+ 'TestResult': 'test_result',
88
+ 'TestPerformedDate': 'date_tested',
89
+ })
90
+
91
+ # Standardize state.
92
+ state = 'MA'
93
+ ma_results['lab_state'] = state
94
+ ma_results['producer_state'] = state
95
+
96
+ # Add a date column.
97
+ ma_results['date'] = pd.to_datetime(ma_results['date_tested'])
98
+ ma_results['week'] = ma_results['date'].dt.to_period('W').astype(str)
99
+ ma_results['month'] = ma_results['date'].dt.to_period('M').astype(str)
100
+ ma_results = ma_results.sort_values('date')
101
+
102
+ # Creating a pivot table
103
+ pivot_df = ma_results.pivot_table(
104
+ index=['label', 'date_tested', 'lab'],
105
+ columns='test_type',
106
+ values='test_result',
107
+ aggfunc='first',
108
+ ).reset_index()
109
+ pivot_df.columns.name = None
110
+ pivot_df.rename({
111
+ 'THC (%) Raw Plant Material': 'delta_9_thc',
112
+ 'THCA (%) Raw Plant Material': 'thca',
113
+ 'Total THC (%) Raw Plant Material': 'total_thc',
114
+ 'Total Yeast and Mold (CFU/g) Raw Plant Material': 'yeast_and_mold'
115
+ }, axis=1, inplace=True)
116
+ pivot_df['date'] = pd.to_datetime(pivot_df['date_tested'])
117
+ pivot_df['week'] = pivot_df['date'].dt.to_period('W').astype(str)
118
+ pivot_df['month'] = pivot_df['date'].dt.to_period('M').astype(str)
119
+ print('Number of public MA lab results:', len(pivot_df))
120
+
121
+ # Add MCR Labs data.
122
+ pivot_df = pd.concat([pivot_df, mcr_results])
123
+ print('Total number of MA results:', len(pivot_df))
124
+
125
+ # Save the data.
126
+ outfile = 'D://data/cannabis_results/data/ma/ma-results-latest.xlsx'
127
+ outfile_csv = 'D://data/cannabis_results/data/ma/ma-results-latest.csv'
128
+ pivot_df.to_excel(outfile, index=False)
129
+ pivot_df.to_csv(outfile_csv, index=False)
130
+ print('Saved Excel:', outfile)
131
+ print('Saved CSV:', outfile_csv)
132
+
133
+ # Print out the features.
134
+ features = {x: 'string' for x in pivot_df.columns}
135
+ print('Number of features:', len(features))
136
+ print(json.dumps(features, indent=2))
137
+
138
+
139
+ # === Visualize the number of tests per month ===
140
+
141
+ # Count the number of tests per month.
142
+ monthly_tests = pivot_df.groupby('month').size().reset_index(name='n_tests')
143
+
144
+ # # Plot the number of tests per month.
145
+ # plt.figure(figsize=(15, 8))
146
+ # fig, ax = plt.subplots()
147
+ # monthly_tests.plot(x='month', y='n_tests', kind='bar', ax=ax, color='k')
148
+ # ax.set_title('Number of MA Cannabis Tests per Month')
149
+ # ax.set_xlabel('Month')
150
+ # ax.set_ylabel('Number of Tests')
151
+ # ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
152
+ # plt.show()
153
+
154
+ # Isolate the sample.
155
+ sample = pivot_df.loc[pivot_df['yeast_and_mold'].notnull()]
156
+ sample = sample.loc[sample['date'] >= pd.to_datetime('2023-01-01')]
157
+
158
+ # Calculate the number of detects.
159
+ detects = sample.loc[sample['yeast_and_mold'] > 0]
160
+ detects.sort_values('yeast_and_mold', ascending=False, inplace=True)
161
+
162
+ # Calculate the maximum yeast and mold detection.
163
+ print('Maximum yeast and mold detection:', detects['yeast_and_mold'].max())
164
+
165
+ # Calculate the most frequent value.
166
+ print('Most frequent yeast and mold detection:', detects['yeast_and_mold'].mode())
167
+
168
+ # Histogram below 10k.
169
+ plt.figure(figsize=(15, 8))
170
+ filtered_df = sample.dropna(subset=['yeast_and_mold'])
171
+ filtered_df.loc[
172
+ (filtered_df['yeast_and_mold'] <= 15_000) &
173
+ (filtered_df['yeast_and_mold'] > 100)
174
+ ]['yeast_and_mold'].hist(
175
+ bins=100,
176
+ alpha=0.75,
177
+ density=True,
178
+ )
179
+ plt.axvline(10_000, color='r', linestyle='dashed', linewidth=1)
180
+ plt.xlabel('Yeast and Mold (CFU/g)')
181
+ plt.ylabel('Frequency')
182
+ plt.title('Histogram of Yeast and Mold Detections below 10,000')
183
+ plt.legend(['State Limit (10,000)', 'Yeast and Mold (CFU/g)'])
184
+ plt.gca().xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, _: f'{int(x):,}'))
185
+ plt.xlim(0, 15_000)
186
+ plt.savefig(f'{assets_dir}/histogram-below-10k.png', bbox_inches='tight', dpi=300, transparent=True)
187
+ plt.show()
188
+
189
+ # Histogram above 10k,
190
+ plt.figure(figsize=(15, 8))
191
+ filtered_df = sample.dropna(subset=['yeast_and_mold'])
192
+ filtered_df.loc[filtered_df['yeast_and_mold'] > 10_000]['yeast_and_mold'].hist(
193
+ bins=1000,
194
+ alpha=0.75,
195
+ density=True,
196
+ )
197
+ plt.axvline(10_000, color='r', linestyle='dashed', linewidth=1)
198
+ plt.xlabel('Yeast and Mold (CFU/g)')
199
+ plt.ylabel('Frequency')
200
+ plt.title('Histogram of Yeast and Mold Detections above 10,000')
201
+ plt.legend(['State Limit (10,000)', 'Yeast and Mold (CFU/g)'])
202
+ plt.gca().xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, _: f'{int(x):,}'))
203
+ plt.xlim(0, 500_000)
204
+ plt.savefig(f'{assets_dir}/histogram-above-10k.png', bbox_inches='tight', dpi=300, transparent=True)
205
+ plt.show()
206
+
207
+
208
+ # === Failure Analysis ===
209
+
210
+ # Identify failures.
211
+ fails = sample.loc[sample['yeast_and_mold'] > 10_000]
212
+ print(fails[['label', 'date_tested', 'lab', 'yeast_and_mold']])
213
+
214
+ # Visualize failure rates.
215
+ sample['fail'] = sample['yeast_and_mold'] >= 10_000
216
+ fail_counts = sample['fail'].value_counts()
217
+ fail_percentages = (fail_counts / fail_counts.sum()) * 100
218
+ colors = cm.coolwarm(sample['fail'].value_counts(normalize=True))
219
+ plt.figure(figsize=(15, 8))
220
+ ax = sample['fail'].value_counts().plot(
221
+ kind='bar',
222
+ color=[colors[-1], colors[0]]
223
+ )
224
+ ax.get_yaxis().set_major_formatter(StrMethodFormatter('{x:,.0f}'))
225
+ plt.xticks(
226
+ ticks=[0, 1],
227
+ labels=['Below 10,000 CFU/g', 'Above 10,000 CFU/g'],
228
+ rotation=0,
229
+ )
230
+ for i, (count, percentage) in enumerate(zip(fail_counts, fail_percentages)):
231
+ ax.text(i, count, f'{percentage:.1f}%', color='black', ha='center', va='bottom')
232
+ plt.ylabel('Number of Samples')
233
+ plt.title('Total Yeast and Mold Detections in MA in 2023', pad=24)
234
+ plt.xlabel('Pass/Fail')
235
+ plt.savefig(f'{assets_dir}/ma-yeast-and-mold-failure-rate-2023.png', bbox_inches='tight', dpi=300, transparent=True)
236
+ plt.show()
237
+ failure_rate = len(fails) / len(sample)
238
+ print('Failure rate: %0.2f%%' % (failure_rate * 100))
239
+
240
+
241
+ # === Lab Failure Analysis ===
242
+
243
+ # FIXME: Visualize failure rate by lab.
244
+ samples_tested_by_lab = sample['lab'].value_counts()
245
+ failures_by_lab = sample.groupby('lab')['fail'].sum()
246
+ failure_rate_by_lab = sample.groupby('lab')['fail'].mean()
247
+ failure_rate_by_lab = failure_rate_by_lab.sort_values()
248
+ plt.figure(figsize=(18, 16/1.618))
249
+ ax = sns.barplot(
250
+ x=failure_rate_by_lab.index,
251
+ y=failure_rate_by_lab.values * 100,
252
+ palette='coolwarm'
253
+ )
254
+ for i, p in enumerate(ax.patches):
255
+ lab = failure_rate_by_lab.index[i]
256
+ ax.annotate(
257
+ f'{failures_by_lab[lab]:,.0f} / {samples_tested_by_lab[lab]:,.0f}',
258
+ (p.get_x() + p.get_width() / 2., p.get_height()),
259
+ ha='center',
260
+ va='bottom',
261
+ fontsize=24,
262
+ color='black',
263
+ xytext=(0, 3),
264
+ textcoords='offset points'
265
+ )
266
+ plt.ylabel('Failure Rate (%)', fontsize=28, labelpad=10)
267
+ plt.xlabel('')
268
+ plt.title('Total Yeast and Mold Failure Rate by Lab in MA in 2021', fontsize=34)
269
+ plt.xticks(rotation=45)
270
+ plt.figtext(
271
+ 0,
272
+ -0.075,
273
+ 'Note: Statistics are calculated from 35,825 package lab tests for total yeast and mold performed between 1/1/2023 and 9/30/2023 in Massachusetts. The number of tests above the state limit, 10,000 CFU/g, and the total number of tests are shown for each lab.',
274
+ ha='left',
275
+ fontsize=24,
276
+ wrap=True
277
+ )
278
+ plt.tight_layout()
279
+ plt.savefig(f'{assets_dir}/ma-yeast-and-mold-failure-rate-by-lab-2023.png', bbox_inches='tight', dpi=300, transparent=True)
280
+ plt.show()
281
+
282
+
283
+ # === Method Analysis ===
284
+
285
+ def determine_method(x):
286
+ """Determine the method of testing based on the value.
287
+ If the value is divisible by 10 and has no decimal component, it's `plating`.
288
+ Otherwise, it's considered `qPCR`.
289
+ """
290
+ if pd.isna(x):
291
+ return None
292
+ # Check if the number is a whole number and divisible by 10
293
+ if x % 10 == 0 and x == int(x):
294
+ return 'plating'
295
+ else:
296
+ return 'qPCR'
297
+
298
+
299
+ # Determine the method of testing.
300
+ sample['method'] = sample['yeast_and_mold'].apply(determine_method)
301
+ test_count_per_method = sample['method'].value_counts()
302
+
303
+ # Example analysis: Average yeast_and_mold results per method
304
+ average_results_per_method = sample.groupby('method')['yeast_and_mold'].mean()
305
+
306
+ print(test_count_per_method)
307
+ print(average_results_per_method)
308
+
309
+ # Histogram
310
+ plt.figure(figsize=(15, 8))
311
+ filtered_df = sample.dropna(subset=['yeast_and_mold'])
312
+ subsample = filtered_df.loc[
313
+ (filtered_df['yeast_and_mold'] <= 15_000) &
314
+ (filtered_df['yeast_and_mold'] > 100)
315
+ ]
316
+ plating_values = subsample.loc[subsample['method'] == 'plating']['yeast_and_mold']
317
+ qpcr_values = subsample.loc[subsample['method'] == 'qPCR']['yeast_and_mold']
318
+ plating_values.hist(
319
+ bins=100,
320
+ alpha=0.75,
321
+ density=True,
322
+ label='Plating',
323
+ )
324
+ qpcr_values.hist(
325
+ bins=100,
326
+ alpha=0.75,
327
+ density=True,
328
+ label='qPCR',
329
+ )
330
+ plt.axvline(10_000, color='r', linestyle='dashed', linewidth=1, label='State Limit (10,000)')
331
+ plt.xlabel('Yeast and Mold (CFU/g)')
332
+ plt.ylabel('Frequency')
333
+ plt.title('Histogram of Yeast and Mold Detections below 10,000')
334
+ # plt.legend(['State Limit (10,000)', 'Yeast and Mold Counts'])
335
+ plt.legend()
336
+ plt.gca().xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, _: f'{int(x):,}'))
337
+ plt.xlim(0, 15_000)
338
+ plt.savefig(f'{assets_dir}/below-10k-methods.png', bbox_inches='tight', dpi=300, transparent=True)
339
+ plt.show()
340
+
341
+ # Histogram
342
+ plt.figure(figsize=(15, 8))
343
+ filtered_df = sample.dropna(subset=['yeast_and_mold'])
344
+ subsample = filtered_df.loc[filtered_df['yeast_and_mold'] > 10_000]
345
+ plating_values = subsample.loc[subsample['method'] == 'plating']['yeast_and_mold']
346
+ qpcr_values = subsample.loc[subsample['method'] == 'qPCR']['yeast_and_mold']
347
+ plating_values.hist(
348
+ bins=1000,
349
+ alpha=0.75,
350
+ density=True,
351
+ label='Plating',
352
+ )
353
+ qpcr_values.loc [
354
+ qpcr_values != 200001
355
+ ].hist(
356
+ bins=1000,
357
+ alpha=0.75,
358
+ density=True,
359
+ label='qPCR',
360
+ )
361
+ plt.axvline(10_000, color='r', linestyle='dashed', linewidth=1, label='State Limit (10,000)')
362
+ plt.xlabel('Yeast and Mold Counts')
363
+ plt.ylabel('Frequency')
364
+ plt.title('Histogram of Yeast and Mold Detections above 10,000')
365
+ plt.legend()
366
+ plt.gca().xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, _: f'{int(x):,}'))
367
+ plt.xlim(0, 500_000)
368
+ plt.savefig(f'{assets_dir}/above-10k-methods.png', bbox_inches='tight', dpi=300, transparent=True)
369
+ plt.show()
370
+
371
+
372
+ # Count the number of tests per method per lab.
373
+ group = sample.groupby('lab')['method'].value_counts()
374
+ group.sort_index(inplace=True)
375
+ group.plot(kind='bar', figsize=(14, 7), width=0.8)
376
+ plt.title('Estimated Number of Tests per Method per Lab')
377
+ plt.xlabel('Lab')
378
+ plt.ylabel('Number of Tests')
379
+ plt.legend(title='Method')
380
+ plt.tight_layout()
381
+ plt.savefig(f'{assets_dir}/methods-by-lab.png', bbox_inches='tight', dpi=300, transparent=True)
382
+ plt.show()
383
+
384
+ # === Benford's Law Analysis ===
385
+
386
+
387
+ # Function to extract the first significant digit
388
+ def first_significant_digit(number):
389
+ return int(str(number).split('.')[0][0])
390
+
391
+ # 1. Extracting first significant digit from yeast_and_mold values
392
+ subsample = sample.dropna(subset=['yeast_and_mold'])
393
+ subsample = subsample.loc[
394
+ (subsample['yeast_and_mold'] <= 200_000) &
395
+ (subsample['yeast_and_mold'] > 0)
396
+ ]
397
+ subsample['first_digit'] = subsample['yeast_and_mold'].dropna().apply(first_significant_digit)
398
+
399
+ # 2. Generate Benford's Law distribution for the first significant digit
400
+ digits = range(1, 10)
401
+ benford = [np.log10(1 + 1/d) * 100 for d in digits]
402
+
403
+ # 3. Generate a random sample and extract first significant digit
404
+ np.random.seed(420)
405
+ random_sample = np.random.uniform(
406
+ 0,
407
+ 100000,
408
+ size=len(subsample['yeast_and_mold'].dropna()),
409
+ )
410
+ random_first_digit = [first_significant_digit(num) for num in random_sample]
411
+
412
+ # Frequency counts of the first digits
413
+ actual_counts = subsample['first_digit'].value_counts(normalize=True).sort_index() * 100
414
+ random_counts = pd.Series(random_first_digit).value_counts(normalize=True).sort_index() * 100
415
+
416
+ # Smooth line.
417
+ from scipy.interpolate import make_interp_spline
418
+ xnew = np.linspace(min(digits), max(digits), 100)
419
+ spl = make_interp_spline(digits, benford, k=2) # k is the degree of the spline
420
+ benford_smooth = spl(xnew)
421
+
422
+ # 4. Plot the distributions
423
+ plt.figure(figsize=(15, 8))
424
+ plt.plot(xnew, benford_smooth, '-', label='Benford\'s Law')
425
+ plt.plot(actual_counts.index, actual_counts, 's-', label='Yeast and Mold Counts')
426
+ plt.plot(random_counts.index, random_counts, 'd-', label='Random Sample')
427
+ plt.xticks(digits)
428
+ plt.xlabel('First Significant Digit')
429
+ plt.ylabel('Percentage')
430
+ plt.title('First Significant Digit Distribution Comparison')
431
+ plt.legend()
432
+ plt.grid(True)
433
+ plt.savefig(f'{assets_dir}/benford-ym.png', bbox_inches='tight', dpi=300, transparent=True)
434
+ plt.show()
435
+
436
+ # # 4. Plot the distributions
437
+ qpcr = subsample.loc[subsample['method'] == 'qPCR']
438
+ plating = subsample.loc[subsample['method'] == 'plating']
439
+ plating_counts = plating['first_digit'].value_counts(normalize=True).sort_index() * 100
440
+ qpcr_counts = qpcr['first_digit'].value_counts(normalize=True).sort_index() * 100
441
+ plt.figure(figsize=(15, 8))
442
+ plt.plot(digits, benford, 'o-', label='Benford\'s Law')
443
+ plt.plot(plating_counts.index, plating_counts, 's-', label='Plating')
444
+ plt.plot(qpcr_counts.index, qpcr_counts, 'd-', label='qPCR')
445
+ plt.xticks(digits)
446
+ plt.xlabel('First Significant Digit')
447
+ plt.ylabel('Percentage')
448
+ plt.title('First Significant Digit Distribution Comparison')
449
+ plt.legend()
450
+ plt.grid(True)
451
+ plt.savefig(f'{assets_dir}/benford-methods.png', bbox_inches='tight', dpi=300, transparent=True)
452
+ plt.show()
453
+
454
+ print(plating.sample(5, random_state=420)['yeast_and_mold'])
455
+ print(qpcr.sample(5, random_state=420)['yeast_and_mold'])
456
+
457
+
458
+ from scipy.stats import chisquare
459
+
460
+ # Convert percentages back to counts
461
+ total_qpcr = len(qpcr.dropna(subset=['first_digit']))
462
+ total_plating = len(plating.dropna(subset=['first_digit']))
463
+
464
+ qpcr_observed_counts = (qpcr_counts / 100) * total_qpcr
465
+ plating_observed_counts = (plating_counts / 100) * total_plating
466
+
467
+ # Benford's expected percentages for the first digit
468
+ benford_percentages = np.array([np.log10(1 + 1/d) for d in range(1, 10)])
469
+
470
+ # Convert Benford's percentages to expected counts for each method
471
+ benford_expected_qpcr = benford_percentages * total_qpcr
472
+ benford_expected_plating = benford_percentages * total_plating
473
+
474
+ # Perform Chi-squared test for qPCR
475
+ chi2_stat_qpcr, p_val_qpcr = chisquare(f_obs=qpcr_observed_counts, f_exp=benford_expected_qpcr)
476
+
477
+ # Perform Chi-squared test for Plating
478
+ chi2_stat_plating, p_val_plating = chisquare(f_obs=plating_observed_counts, f_exp=benford_expected_plating)
479
+
480
+ print(f"qPCR Chi-squared Stat: {chi2_stat_qpcr}, p-value: {p_val_qpcr}")
481
+ print(f"Plating Chi-squared Stat: {chi2_stat_plating}, p-value: {p_val_plating}")
482
+
483
+ # Comparing the Chi-squared statistics and p-values
484
+ # A lower p-value indicates a higher statistical significance of deviation from Benford's Law
485
+ lower_deviation = 'Plating' if p_val_qpcr < p_val_plating else 'qPCR'
486
+ print(f"The method with lower deviation from Benford's Law is {lower_deviation}")
487
+
488
+
489
+ # === Summary Statistics ===
490
+
491
+ # TODO: Calculate the number of producers in each state dataset.
492
+
493
+
494
+ # TODO: Calculate the number of tests per producer per week / month / year in each state.
495
+
496
+
497
+
498
+ def plot_metric_over_time(metric, metric_name, y_label, color='skyblue'):
499
+ """
500
+ General function to plot any calculated metric over time.
501
+ """
502
+ plt.figure(figsize=(15, 8))
503
+ metric.plot(color=color)
504
+ plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, _: f'{int(x):,}'))
505
+ plt.title(f'{metric_name} Over Time')
506
+ plt.xlabel('Date')
507
+ plt.ylabel(y_label)
508
+ plt.grid(True)
509
+ plt.tight_layout()
510
+ title = metric_name.replace(' ', '-').lower()
511
+ print(title)
512
+ plt.savefig(f'{assets_dir}/timeseries-{title}.png', bbox_inches='tight', dpi=300, transparent=True)
513
+ plt.show()
514
+
515
+ # Get a sample.
516
+ sample = pivot_df.copy()
517
+ sample['yeast_and_mold'] = pd.to_numeric(sample['yeast_and_mold'], errors='coerce')
518
+ sample['date_tested'] = pd.to_datetime(sample['date_tested'])
519
+ sample = sample.loc[sample['date_tested'] >= pd.to_datetime('2023-01-01')]
520
+
521
+ # Visualize the number of tests over time.
522
+ sample['count'] = 1
523
+ num_tests = sample.resample('M', on='date_tested')['count'].sum()
524
+ plot_metric_over_time(num_tests, 'Number of Tests', 'Number of Tests')
525
+
526
+ # Visualize the cost of tests over time (assuming $20 per test).
527
+ cost_of_tests = num_tests * 20
528
+ plot_metric_over_time(cost_of_tests, 'Cost of Tests', 'Cost ($)', 'green')
529
+
530
+ # Visualize the cost of failures over time (assuming $35,000 per failure).
531
+ sample['failure'] = sample['yeast_and_mold'] > 10_000
532
+ failures_per_month = sample.loc[sample['date'] >= pd.to_datetime('2023-01-01')].resample('M', on='date_tested')['failure'].sum()
533
+ cost_of_failures = failures_per_month * 21_464
534
+ plot_metric_over_time(cost_of_failures, 'Estimated Cost of Failures', 'Cost ($)', 'red')
535
+
536
+ # Estimate the cost of testing in 2023.
537
+ total_cost_of_tests = cost_of_tests.sum()
538
+ avg_monthly_cost = cost_of_tests.mean()
539
+ estimate_2023 = total_cost_of_tests + (avg_monthly_cost * 3)
540
+ print(f'Estimated cost of testing in 2023: ${estimate_2023 / 1_000_000:,.0f} million')
541
+
542
+ # Estimate the cost of total yeast and mold failures in 2023.
543
+ total_cost_of_failures = cost_of_failures.sum()
544
+ avg_monthly_cost = cost_of_failures.mean()
545
+ estimate_2023 = total_cost_of_failures + (avg_monthly_cost * 3)
546
+ print(f'Estimated cost of total yeast and mold failures in 2023: ${estimate_2023 / 1_000_000:,.0f} million')
547
+
548
+
549
+ # === Timeseries Analysis ===
550
+
551
+ def plot_timeseries(
552
+ df,
553
+ title,
554
+ x='date_tested',
555
+ y='total_thc',
556
+ y_label='Total THC (%)',
557
+ outfile=None,
558
+ y_min=0,
559
+ y_max=15_000,
560
+ ma=30,
561
+ dot_color='royalblue',
562
+ line_color='navy',
563
+ ):
564
+ """
565
+ Plot the timeseries data with dots for actual values and separate trend lines
566
+ for periods before and after the compliance date.
567
+ """
568
+ plt.figure(figsize=(15, 8))
569
+ df[x] = pd.to_datetime(df[x])
570
+
571
+ # Plot actual THC values as dots
572
+ sns.scatterplot(
573
+ data=df,
574
+ x=x,
575
+ y=y,
576
+ color=dot_color,
577
+ s=75,
578
+ alpha=0.6,
579
+ )
580
+
581
+ # Plot weekly moving average.
582
+ df[f'{ma}_day_avg'] = df[y].rolling(
583
+ window=ma,
584
+ min_periods=1
585
+ ).mean()
586
+ sns.lineplot(
587
+ data=df,
588
+ x=x,
589
+ y=f'{ma}_day_avg',
590
+ color=line_color,
591
+ label=f'{ma}-day Moving Average'
592
+ )
593
+
594
+ # Calculate positions for the desired number of ticks (e.g., 5 ticks)
595
+ selected_dates = ['2023-01-01', '2023-04-01', '2023-07-01', '2023-10-01'] # Example dates
596
+ plt.xticks(ticks=pd.to_datetime(selected_dates), labels=selected_dates)
597
+
598
+ # Add title and labels
599
+ plt.title(title, pad=20)
600
+ plt.xlabel('Date')
601
+ plt.ylabel(y_label)
602
+ plt.legend(loc='lower left')
603
+ plt.tight_layout()
604
+ plt.ylim(y_min, y_max)
605
+ plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, _: f'{int(x):,}'))
606
+ if outfile is None:
607
+ outfile = f'{assets_dir}/{y.replace("_", "-")}-timeseries.pdf'
608
+ plt.savefig(outfile, dpi=300, bbox_inches='tight', transparent=True)
609
+ plt.show()
610
+
611
+
612
+ # Get the data.
613
+ sample = pivot_df.copy()
614
+ sample['yeast_and_mold'] = pd.to_numeric(sample['yeast_and_mold'], errors='coerce')
615
+ sample['year'] = pd.to_datetime(sample['date_tested']).dt.year
616
+ sample = sample.loc[sample['year'] == 2023]
617
+
618
+ # Plot trend lines and timeseries
619
+ plot_timeseries(
620
+ sample.copy().loc[
621
+ (sample['yeast_and_mold'] > 100) &
622
+ (sample['yeast_and_mold'] < 10_000)
623
+ ],
624
+ title='Yeast and Mold Values Over Time in MA',
625
+ x='date_tested',
626
+ y='yeast_and_mold',
627
+ y_label='Yeast and Mold (CFU/g)',
628
+ outfile=f'{assets_dir}/timeseries-yeast-and-mold.png',
629
+ y_min=100,
630
+ y_max=10_000,
631
+ ma=30,
632
+ )
633
+
634
+ # Plot trend lines and timeseries
635
+ plot_timeseries(
636
+ sample.copy().loc[
637
+ (sample['yeast_and_mold'] > 10_000) &
638
+ (sample['yeast_and_mold'] < 500_000)
639
+ ],
640
+ title='Yeast and Mold Values Over Time in MA',
641
+ x='date_tested',
642
+ y='yeast_and_mold',
643
+ y_label='Yeast and Mold (CFU/g)',
644
+ outfile=f'{assets_dir}/timeseries-yeast-and-mold-above-10k.png',
645
+ y_min=10_000,
646
+ y_max=500_000,
647
+ ma=30,
648
+ dot_color='firebrick',
649
+ line_color='darkred',
650
+ )
651
+
652
+ # Plot trend lines and timeseries by lab.
653
+ labs = list(pivot_df['lab'].unique())
654
+ lab_colors = sns.color_palette('tab10', n_colors=len(labs))
655
+ for i, lab in enumerate(labs):
656
+ y_min, y_max = 100, 500_000
657
+ lab_sample = sample.copy().loc[
658
+ (sample['yeast_and_mold'] > y_min) &
659
+ (sample['yeast_and_mold'] < y_max) &
660
+ (sample['lab'] == lab)
661
+ ]
662
+ if len(lab_sample) < 100:
663
+ continue
664
+ print(len(lab_sample))
665
+ plot_timeseries(
666
+ lab_sample,
667
+ title='Yeast and Mold Values Over Time in MA',
668
+ x='date_tested',
669
+ y='yeast_and_mold',
670
+ y_label='Yeast and Mold (CFU/g)',
671
+ outfile=f'{assets_dir}/timeseries-yeast-and-mold-{lab}.png',
672
+ y_min=y_min,
673
+ y_max=y_max,
674
+ ma=30,
675
+ dot_color=lab_colors[i],
676
+ line_color=lab_colors[i],
677
+ )
678
+ print(f'timeseries-yeast-and-mold-{lab}.png')
679
+
680
+
681
+ # === Lab Failure Analysis ===
682
+
683
+ def calculate_failure_rate(df, threshold=10_000, period='W'):
684
+ """
685
+ Calculate the failure rate based on the 'yeast_and_mold' threshold.
686
+ """
687
+ df['failure'] = df['yeast_and_mold'] >= threshold
688
+ df['date_tested'] = pd.to_datetime(df['date_tested'])
689
+ return df.groupby(df['date_tested'].dt.to_period(period))['failure'].mean() * 100
690
+
691
+
692
+ def plot_failure_rates(df, color, threshold=10_000, period='W'):
693
+ """
694
+ Plot the failure rates over time with a moving average.
695
+ """
696
+ plt.figure(figsize=(15, 8))
697
+ failure_rate = calculate_failure_rate(df, threshold, period=period)
698
+ failure_rate.index = failure_rate.index.to_timestamp()
699
+
700
+ # Plot the failure rates
701
+ plt.plot(
702
+ failure_rate.index,
703
+ failure_rate,
704
+ # label=f'Lab {lab}',
705
+ color=color
706
+ )
707
+
708
+ # Calculate and plot horizontal lines for the mean, 25th percentile, and 75th percentile
709
+ mean_rate = failure_rate.mean()
710
+ percentile_25 = failure_rate.quantile(0.25)
711
+ percentile_75 = failure_rate.quantile(0.75)
712
+
713
+ plt.axhline(y=mean_rate, color='green', linestyle='--', label='Mean')
714
+ plt.axhline(y=percentile_25, color='blue', linestyle=':', label='25th Percentile')
715
+ plt.axhline(y=percentile_75, color='red', linestyle='-.', label='75th Percentile')
716
+
717
+ # Add title and labels.
718
+ plt.title('Failure Rates Over Time by Lab')
719
+ plt.xlabel('Date')
720
+ plt.ylabel('Failure Rate (%)')
721
+ plt.legend()
722
+ plt.tight_layout()
723
+ plt.show()
724
+
725
+ # Get the data.
726
+ sample = pivot_df.copy()
727
+ sample['yeast_and_mold'] = pd.to_numeric(sample['yeast_and_mold'], errors='coerce')
728
+ sample['year'] = pd.to_datetime(sample['date_tested']).dt.year
729
+ sample = sample.loc[sample['year'] == 2023]
730
+
731
+ # Assuming 'pivot_df' and 'sample' are defined and prepared as per your previous code
732
+ labs = list(pivot_df['lab'].unique())
733
+ lab_colors = sns.color_palette('tab10', n_colors=len(labs))
734
+ for i, lab in enumerate(labs):
735
+ lab_sample = sample[(sample['lab'] == lab) & (sample['yeast_and_mold'].notna())]
736
+ if len(lab_sample) >= 1_000:
737
+ print('N = ', len(lab_sample))
738
+ plot_failure_rates(lab_sample.copy(), lab_colors[i])
739
+
740
+
741
+ # === Overall failure rate analysis ===
742
+
743
+ def plot_failure_rates(df, color, threshold=10_000, period='W'):
744
+ """
745
+ Plot the failure rates over time, segmenting the data to avoid drawing lines across gaps.
746
+ """
747
+ plt.figure(figsize=(15, 8))
748
+
749
+ # Define your data periods explicitly
750
+ periods = [
751
+ (pd.to_datetime('2021-04-01'), pd.to_datetime('2021-12-31')),
752
+ (pd.to_datetime('2023-01-01'), pd.to_datetime('2023-09-30')),
753
+ ]
754
+
755
+ for start_date, end_date in periods:
756
+ # Filter the dataframe for the current period
757
+ period_df = df[(df['date_tested'] >= start_date) & (df['date_tested'] <= end_date)]
758
+ failure_rate = calculate_failure_rate(period_df, threshold, period=period)
759
+ failure_rate.index = failure_rate.index.to_timestamp()
760
+
761
+ # Plot the failure rates for the current period
762
+ plt.plot(failure_rate.index, failure_rate, color=color)
763
+
764
+ # Calculate and plot horizontal lines for the mean, 25th percentile, and 75th percentile
765
+ mean_rate = failure_rate.mean()
766
+ percentile_25 = failure_rate.quantile(0.25)
767
+ percentile_75 = failure_rate.quantile(0.75)
768
+ plt.axhline(y=mean_rate, color='green', linestyle='--', label='Mean')
769
+ plt.axhline(y=percentile_25, color='blue', linestyle=':', label='25th Percentile')
770
+ plt.axhline(y=percentile_75, color='red', linestyle='-.', label='75th Percentile')
771
+
772
+ # Add title and labels.
773
+ plt.title('Failure Rates Over Time')
774
+ plt.xlabel('Date')
775
+ plt.ylabel('Failure Rate (%)')
776
+ plt.tight_layout()
777
+ outfile = f'{assets_dir}/failure-rates-over-time.png'
778
+ plt.savefig(outfile, dpi=300, bbox_inches='tight', transparent=True)
779
+ plt.show()
780
+
781
+ # Assuming 'pivot_df' is your DataFrame with 'date_tested' and 'yeast_and_mold' columns
782
+ sample = pivot_df.copy()
783
+ sample['date_tested'] = pd.to_datetime(sample['date_tested'])
784
+ sample['yeast_and_mold'] = pd.to_numeric(sample['yeast_and_mold'], errors='coerce')
785
+
786
+ # Plot overall failure rate.
787
+ plot_failure_rates(
788
+ sample.loc[
789
+ pd.to_datetime(sample['date_tested']) >= pd.to_datetime('2021-07-01')
790
+ ],
791
+ 'k'
792
+ )
793
+
794
+
795
+ # === Detection Rate Analysis ===
796
+
797
+ def calculate_detection_rate(df, threshold=100, period='W'):
798
+ """
799
+ Calculate the detection rate based on the 'yeast_and_mold' threshold.
800
+ """
801
+ df['detected'] = df['yeast_and_mold'] > threshold
802
+ df['date_tested'] = pd.to_datetime(df['date_tested'])
803
+ return df.groupby(df['date_tested'].dt.to_period(period))['detected'].mean() * 100
804
+
805
+
806
+ def plot_detection_rates(df, color, threshold=100, period='W'):
807
+ """
808
+ Plot the detection rates over time, segmenting the data to avoid drawing lines across gaps.
809
+ """
810
+ plt.figure(figsize=(15, 8))
811
+
812
+ periods = [
813
+ (pd.to_datetime('2021-04-01'), pd.to_datetime('2021-12-31')),
814
+ (pd.to_datetime('2023-01-01'), pd.to_datetime('2023-09-30')),
815
+ ]
816
+
817
+ for start_date, end_date in periods:
818
+ period_df = df[(df['date_tested'] >= start_date) & (df['date_tested'] <= end_date)]
819
+ detection_rate = calculate_detection_rate(period_df, threshold, period=period)
820
+ detection_rate.index = detection_rate.index.to_timestamp()
821
+
822
+ plt.plot(detection_rate.index, detection_rate, color=color)
823
+
824
+ # Calculate and add aesthetic lines for mean, 25th, and 75th percentiles
825
+ overall_rate = calculate_detection_rate(df, threshold, period)
826
+ mean_rate = overall_rate.mean()
827
+ percentile_25 = overall_rate.quantile(0.25)
828
+ percentile_75 = overall_rate.quantile(0.75)
829
+ plt.axhline(y=mean_rate, color='green', linestyle='--', label='Mean')
830
+ plt.axhline(y=percentile_25, color='blue', linestyle=':', label='25th Percentile')
831
+ plt.axhline(y=percentile_75, color='red', linestyle='-.', label='75th Percentile')
832
+
833
+ plt.title('Detection Rates Over Time')
834
+ plt.xlabel('Date')
835
+ plt.ylabel('Detection Rate (%)')
836
+ plt.legend()
837
+ plt.tight_layout()
838
+ plt.show()
839
+
840
+
841
+ # Visualize detection rates over time.
842
+ sample = pivot_df.copy()
843
+ sample['yeast_and_mold'] = pd.to_numeric(sample['yeast_and_mold'], errors='coerce')
844
+ sample['date_tested'] = pd.to_datetime(sample['date_tested'])
845
+ sample = sample.loc[sample['date_tested'] >= pd.to_datetime('2021-07-01')]
846
+ plot_detection_rates(sample.copy(), 'k')
847
+
848
+
849
+ def plot_detection_rates(df, color, lab_name, threshold=100, period='W'):
850
+ """
851
+ Plot the detection rates over time.
852
+ """
853
+ plt.figure(figsize=(15, 8))
854
+ detection_rate = calculate_detection_rate(df, threshold, period=period)
855
+ detection_rate.index = detection_rate.index.to_timestamp()
856
+
857
+ # Plot the detection rates
858
+ plt.plot(detection_rate.index, detection_rate, label=f'Lab {lab_name}', color=color)
859
+
860
+ # Calculate and plot horizontal lines for the mean, 25th percentile, and 75th percentile
861
+ mean_rate = detection_rate.mean()
862
+ percentile_25 = detection_rate.quantile(0.25)
863
+ percentile_75 = detection_rate.quantile(0.75)
864
+
865
+ plt.axhline(y=mean_rate, color='green', linestyle='--', label='Mean')
866
+ plt.axhline(y=percentile_25, color='blue', linestyle=':', label='25th Percentile')
867
+ plt.axhline(y=percentile_75, color='red', linestyle='-.', label='75th Percentile')
868
+
869
+ # Add title and labels
870
+ plt.title(f'Detection Rates Over Time: {lab_name}')
871
+ plt.xlabel('Date')
872
+ plt.ylabel('Detection Rate (%)')
873
+ plt.legend()
874
+ plt.tight_layout()
875
+ plt.show()
876
+
877
+ # Visualize detection rate by lab.
878
+ sample['date_tested'] = pd.to_datetime(sample['date_tested'])
879
+ sample['yeast_and_mold'] = pd.to_numeric(sample['yeast_and_mold'], errors='coerce')
880
+ labs = list(sample['lab'].unique())
881
+ lab_colors = sns.color_palette('tab10', n_colors=len(labs))
882
+ for i, lab in enumerate(labs):
883
+ lab_sample = sample[(sample['lab'] == lab) & (sample['yeast_and_mold'].notna())]
884
+ if len(lab_sample) >= 100:
885
+ plot_detection_rates(lab_sample, lab_colors[i], lab)
886
+
analysis/analyze_results_md.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Cannabis Lab Results | Maryland
3
+ Copyright (c) 2023 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 2/1/2024
7
+ Updated: 2/1/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """
analysis/analyze_results_mi.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Cannabis Lab Results | Michigan
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 2/1/2024
7
+ Updated: 2/1/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """
analysis/analyze_results_nv.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Cannabis Lab Results | Nevada
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 5/30/2024
7
+ Updated: 5/30/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """
10
+
11
+ # Standard imports:
12
+ import os
13
+
14
+ # External imports:
15
+ from dotenv import dotenv_values
16
+ from matplotlib import pyplot as plt
17
+ import matplotlib.dates as mdates
18
+ import numpy as np
19
+ import pandas as pd
20
+ from sklearn.linear_model import LinearRegression
21
+
22
+ # Define where figures will be saved.
23
+ assets_dir = 'D://data/nevada/results/assets'
24
+
25
+ # Read curated Nevada lab result data.
26
+ stats_dir = 'D://data/nevada/results/datasets'
27
+ datafile = f'{stats_dir}/nv-results-latest.csv'
28
+ results = pd.read_csv(datafile)
29
+ print('Number of Nevada results:', len(results))
30
+
31
+ # TODO: Calculate lab market share by month.
32
+ results = results.dropna(subset=['date_tested'])
33
+ results['month'] = results['date_tested'].dt.to_period('M')
34
+ market_share = results.groupby(['month', 'lab']).size().unstack().fillna(0)
35
+ market_share = market_share.div(market_share.sum(axis=1), axis=0)
36
+
37
+ # TODO: Save the figure as an interactive HTML figure.
38
+ market_share.plot.area(
39
+ title='Market Share by Lab by Month in Nevada',
40
+ figsize=(13, 8),
41
+ )
42
+ plt.xlabel('')
43
+ plt.savefig(f'{assets_dir}/nv-market-share-by-lab-by-month.png', dpi=300, bbox_inches='tight', transparent=False)
44
+ plt.show()
45
+
46
+ # TODO: Calculate tests per capita by month.
47
+ nv_population = {
48
+ 2023: 3_194_176,
49
+ 2022: 3_177_421,
50
+ 2021: 3_146_632,
51
+ 2020: 3_115_840,
52
+ 2019: 3_090_771,
53
+ }
54
+ results['year'] = results['date_tested'].dt.year
55
+ results['population'] = results['year'].map(nv_population)
56
+ nv_tests_per_capita = results.groupby('month').size() / (results.groupby('month')['population'].first() / 100_000)
57
+
58
+ # TODO: Save the figure as an interactive HTML figure.
59
+ fig, ax = plt.subplots(figsize=(13, 8))
60
+ nv_tests_per_capita.plot(ax=ax, title='Cannabis Tests per 100,000 People by Month in Nevada')
61
+ ax.set_ylabel('Tests per 100,000 People')
62
+ plt.show()
63
+
64
+ # Visualize average total THC by month over time.
65
+ results['total_thc'] = results['total_thc'].astype(float)
66
+ average_total_thc = results.groupby('month')['total_thc'].mean()
67
+
68
+ # TODO: Save the figure as an interactive HTML figure.
69
+ fig, ax = plt.subplots(figsize=(13, 8))
70
+ average_total_thc.index = average_total_thc.index.to_timestamp()
71
+ ax.plot(average_total_thc.index, average_total_thc.values, label='Monthly Average Total THC', color='royalblue', lw=5)
72
+ ax.scatter(results['date_tested'], results['total_thc'], color='royalblue', s=10, alpha=0.5, label='Daily Individual Results')
73
+ ax.set_xlabel('')
74
+ ax.set_ylabel('Total THC (%)')
75
+ ax.set_title('Average Total THC by Month in Nevada')
76
+ ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
77
+ ax.xaxis.set_major_locator(mdates.MonthLocator((1,4,7,10)))
78
+ plt.xticks(rotation=45)
79
+ plt.ylim(10, 35)
80
+ plt.savefig(f'{assets_dir}/nv-total-thc.png', dpi=300, bbox_inches='tight', transparent=False)
81
+ plt.show()
82
+
83
+ # Visualize average total CBD by month over time.
84
+ results['total_cbd'] = results['total_cbd'].astype(float)
85
+ sample = results.loc[results['total_cbd'] < 1]
86
+ average_total_cbd = sample.groupby('month')['total_cbd'].mean()
87
+
88
+ # TODO: Save the figure as an interactive HTML figure.
89
+ fig, ax = plt.subplots(figsize=(13, 8))
90
+ average_total_cbd.index = average_total_cbd.index.to_timestamp()
91
+ ax.plot(average_total_cbd.index, average_total_cbd.values, label='Monthly Average Total CBD', color='royalblue', lw=5)
92
+ ax.scatter(sample['date_tested'], sample['total_cbd'], color='royalblue', s=10, alpha=0.5, label='Daily Individual Results')
93
+ ax.set_xlabel('')
94
+ ax.set_ylabel('Total CBD (%)')
95
+ ax.set_title('Average Total CBD by Month in Nevada in Low CBD Samples (<1%)')
96
+ ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
97
+ ax.xaxis.set_major_locator(mdates.MonthLocator((1,4,7,10)))
98
+ plt.xticks(rotation=45)
99
+ plt.ylim(0, 0.33)
100
+ plt.savefig(f'{assets_dir}/nv-total-cbd.png', dpi=300, bbox_inches='tight', transparent=False)
101
+ plt.show()
102
+
103
+ # Visualize average total terpenes by month over time.
104
+ results['total_terpenes'] = results['total_terpenes'].astype(float)
105
+ average_total_terpenes = results.groupby('month')['total_terpenes'].mean()
106
+
107
+ # TODO: Save the figure as an interactive HTML figure.
108
+ fig, ax = plt.subplots(figsize=(13, 8))
109
+ average_total_terpenes.index = average_total_terpenes.index.to_timestamp()
110
+ ax.plot(average_total_terpenes.index, average_total_terpenes.values, label='Monthly Average Total Terpenes', color='royalblue', lw=5)
111
+ ax.scatter(results['date_tested'], results['total_terpenes'], color='royalblue', s=10, alpha=0.5, label='Daily Individual Results')
112
+ ax.set_xlabel('')
113
+ ax.set_ylabel('Total Terpenes (%)')
114
+ ax.set_title('Average Total Terpenes by Month in Nevada')
115
+ ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
116
+ ax.xaxis.set_major_locator(mdates.MonthLocator((1,4,7,10)))
117
+ plt.xticks(rotation=45)
118
+ plt.ylim(0, 4.5)
119
+ plt.xlim(results['date_tested'].min(), pd.to_datetime('2021-04-01'))
120
+ plt.savefig(f'{assets_dir}/nv-total-terpenes.png', dpi=300, bbox_inches='tight', transparent=False)
121
+ plt.show()
122
+
123
+ # Optional: Save the figure as an interactive HTML figure.
124
+ # # Visualize total THC to total CBD in a scatter plot with a trend line.
125
+ # plt.scatter(results['total_thc'], results['total_cbd'])
126
+ # plt.xlabel('Total THC')
127
+ # plt.ylabel('Total CBD')
128
+ # plt.title('Total THC vs. Total CBD in Nevada')
129
+ # plt.show()
130
+
131
+ # TODO: Save the figure as an interactive HTML figure.
132
+ # Visualize total cannabinoids to total terpenes in a scatter plot with a trend line.
133
+ sample = results[
134
+ (results['total_cannabinoids'] <= 40) &
135
+ (results['total_cannabinoids'] > 0) &
136
+ (results['total_terpenes'] > 0) &
137
+ (results['total_terpenes'] <= 4.5)
138
+ ]
139
+ X = sample[['total_terpenes']].dropna().values
140
+ y = sample[['total_cannabinoids']].dropna().values
141
+ model = LinearRegression(fit_intercept=False).fit(X, y)
142
+ slope = model.coef_[0][0]
143
+ avg_total_terpenes = sample['total_terpenes'].mean()
144
+ avg_total_cannabinoids = sample['total_cannabinoids'].mean()
145
+ fig, ax = plt.subplots(figsize=(13, 8))
146
+ plt.scatter(X, y, label='Data points')
147
+ plt.plot(X, model.predict(X), color='red', linewidth=2, label='Fit (through origin)')
148
+ plt.ylabel('Total Cannabinoids (%)')
149
+ plt.xlabel('Total Terpenes (%)')
150
+ plt.title('Total Cannabinoids to Total Terpenes in Cannabis Flower in Nevada', pad=20)
151
+ plt.text(0.75, 0.75, f'Estimated Ratio: {slope:.0f} to 1', transform=plt.gca().transAxes, fontsize=24, verticalalignment='top')
152
+ plt.text(0.05, 0.95, f'Avg. Total Cannabinoids: {avg_total_cannabinoids:.2f}%', transform=plt.gca().transAxes, fontsize=24, verticalalignment='top')
153
+ plt.text(0.05, 0.875, f'Avg. Total Terpenes: {avg_total_terpenes:.2f}%', transform=plt.gca().transAxes, fontsize=24, verticalalignment='top')
154
+ plt.ylim(0, 50)
155
+ plt.xlim(0, 4.5)
156
+ plt.savefig(f'{assets_dir}/nv-total-cannabinoids-to-terpenes.png', dpi=300, bbox_inches='tight', transparent=False)
157
+ plt.show()
158
+
159
+
160
+ # TODO: Upload figures to Firebase Storage, keeping track of references.
161
+
162
+
163
+ # TODO: Upload statistics to Firestore, with references to figures.
164
+
165
+
analysis/analyze_results_ny.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Results | New York
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 6/26/2024
7
+ Updated: 6/26/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """
10
+ # Standard imports:
11
+ import base64
12
+ from datetime import datetime
13
+ import json
14
+ import os
15
+ import shutil
16
+ import tempfile
17
+ from typing import List, Optional
18
+
19
+ # External imports:
20
+ from cannlytics.data.cache import Bogart
21
+ from cannlytics.data.coas import CoADoc
22
+ from cannlytics.data.coas import standardize_results
23
+ from cannlytics.data.coas.parsing import get_coa_files, parse_coa_pdfs
24
+ from cannlytics.firebase import initialize_firebase
25
+ from cannlytics.compounds import cannabinoids, terpenes
26
+ from dotenv import dotenv_values
27
+ import pandas as pd
28
+ import pdfplumber
29
+
30
+
31
+ #-----------------------------------------------------------------------
32
+ # Find all COA PDFs.
33
+ #-----------------------------------------------------------------------
34
+
35
+ # Constants:
36
+ pdf_dir = 'D://data/new-york'
37
+
38
+ # Get all of the PDFs.
39
+ pdfs = get_coa_files(pdf_dir)
40
+ pdfs.sort(key=os.path.getmtime)
41
+ print('Found %i PDFs.' % len(pdfs))
42
+
43
+ # Initialize COA parsing.
44
+ parser = CoADoc()
45
+ cache = Bogart('D://data/.cache/results-ny.jsonl')
46
+ verbose = True
47
+ all_results = []
48
+
49
+
50
+ #-----------------------------------------------------------------------
51
+ # DEV: Identify all labs
52
+ #-----------------------------------------------------------------------
53
+
54
+ # Extract text from all PDFs.
55
+ extracted_data = []
56
+ for pdf_file in pdfs:
57
+ try:
58
+ with pdfplumber.open(pdf_file) as pdf:
59
+ text = pdf.pages[0].extract_text() + '\n'
60
+ extracted_data.append({'file': pdf_file, 'text': text})
61
+ except:
62
+ pass
63
+
64
+ # Find all COAs from a specific lab.
65
+ coas = {}
66
+ unidentified_coas = []
67
+ labs = [
68
+ 'Phyto-farma Labs',
69
+ 'Phyto-Farma Labs',
70
+ 'Kaycha Labs',
71
+ 'Keystone State Testing',
72
+ 'Green Analytics',
73
+ ]
74
+ for data in extracted_data:
75
+ for lab in labs:
76
+ if lab in data['text']:
77
+ lab_coas = coas.get(lab, [])
78
+ lab_coas.append(data['file'])
79
+ coas[lab] = lab_coas
80
+ break
81
+ else:
82
+ unidentified_coas.append(data['file'])
83
+ print('Number of unidentified COAs:', len(unidentified_coas))
84
+
85
+ # DEV: Look at the first page of a PDF.
86
+ if unidentified_coas:
87
+ pdf = pdfplumber.open(unidentified_coas[0])
88
+ page = pdf.pages[0]
89
+ im = page.to_image(resolution=300)
90
+ im.debug_tablefinder()
91
+
92
+ # Count COAs per lab.
93
+ for lab, lab_coas in coas.items():
94
+ print(lab, len(lab_coas))
95
+
96
+
97
+ #-----------------------------------------------------------------------
98
+ # Parse Kaycha Labs COAs.
99
+ #-----------------------------------------------------------------------
100
+
101
+ from cannlytics.data.coas.algorithms.kaycha import parse_kaycha_coa
102
+
103
+ # Parse COAs.
104
+ lab_coas = coas['Kaycha Labs']
105
+ for pdf in lab_coas:
106
+ if not os.path.exists(pdf):
107
+ if verbose: print(f'PDF not found: {pdf}')
108
+ continue
109
+ pdf_hash = cache.hash_file(pdf)
110
+ if cache is not None:
111
+ if cache.get(pdf_hash):
112
+ if verbose: print('Cached:', pdf)
113
+ all_results.append(cache.get(pdf_hash))
114
+ continue
115
+ try:
116
+ coa_data = parse_kaycha_coa(parser, pdf)
117
+ all_results.append(coa_data)
118
+ if cache is not None: cache.set(pdf_hash, coa_data)
119
+ print('Parsed:', pdf)
120
+ except:
121
+ print('Error:', pdf)
122
+
123
+
124
+ #-----------------------------------------------------------------------
125
+ # Parse Keystone State Testing COAs.
126
+ #-----------------------------------------------------------------------
127
+
128
+ from cannlytics.data.coas.algorithms.keystone import parse_keystone_coa
129
+
130
+ lab_coas = coas['Keystone State Testing']
131
+ for pdf in lab_coas:
132
+ pdf_hash = cache.hash_file(pdf)
133
+ if cache is not None:
134
+ if cache.get(pdf_hash):
135
+ if verbose: print('Cached:', pdf)
136
+ all_results.append(cache.get(pdf_hash))
137
+ continue
138
+ try:
139
+ coa_data = parse_keystone_coa(parser, pdf)
140
+ all_results.append(coa_data)
141
+ if cache is not None: cache.set(pdf_hash, coa_data)
142
+ print('Parsed:', pdf)
143
+ except Exception as e:
144
+ print('Error:', pdf)
145
+ print(e)
146
+
147
+
148
+ #-----------------------------------------------------------------------
149
+ # Parse Phyto-farma Labs COAs.
150
+ #-----------------------------------------------------------------------
151
+
152
+ from cannlytics.data.coas.algorithms.phytofarma import parse_phyto_farma_coa
153
+
154
+ # Parse Phyto-Farma Labs COAs.
155
+ lab_coas = coas['Phyto-Farma Labs'] + coas['Phyto-farma Labs']
156
+ for pdf in lab_coas:
157
+ pdf_hash = cache.hash_file(pdf)
158
+ if cache is not None:
159
+ if cache.get(pdf_hash):
160
+ if verbose: print('Cached:', pdf)
161
+ all_results.append(cache.get(pdf_hash))
162
+ continue
163
+ try:
164
+ coa_data = parse_phyto_farma_coa(parser, pdf)
165
+ all_results.append(coa_data)
166
+ if cache is not None: cache.set(pdf_hash, coa_data)
167
+ print('Parsed:', pdf)
168
+ except Exception as e:
169
+ print('Error:', pdf)
170
+ print(e)
171
+
172
+
173
+ #-----------------------------------------------------------------------
174
+ # TODO: Parse Green Analytics COAs.
175
+ #-----------------------------------------------------------------------
176
+
177
+ # lab_coas = coas['Green Analytics']
178
+ lab_coas = [
179
+ 'D://data/new-york\\NYSCannabis\\pdfs\\1c3sh5h-coa-1.pdf',
180
+ 'D://data/new-york\\NYSCannabis\\pdfs\\1cp4tdr-coa-1.pdf',
181
+ 'D://data/new-york\\NYSCannabis\\pdfs\\1c91onw-coa-1.pdf'
182
+ ]
183
+
184
+
185
+ #-----------------------------------------------------------------------
186
+ # Optional: Parse the COAs with AI.
187
+ #-----------------------------------------------------------------------
188
+
189
+ def encode_image(image_path):
190
+ """Encode an image as a base64 string."""
191
+ with open(image_path, 'rb') as image_file:
192
+ return base64.b64encode(image_file.read()).decode('utf-8')
193
+
194
+
195
+
196
+ #-----------------------------------------------------------------------
197
+ # TODO: Analyze results.
198
+ #-----------------------------------------------------------------------
199
+
200
+ from cannlytics.data.coas import standardize_results
201
+ from cannlytics.compounds import cannabinoids, terpenes
202
+ import matplotlib.pyplot as plt
203
+ from matplotlib.dates import MonthLocator, DateFormatter
204
+ import seaborn as sns
205
+
206
+ # Setup.
207
+ assets_dir = r'C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\165-labels\presentation\images\figures'
208
+ plt.style.use('seaborn-v0_8-whitegrid')
209
+ plt.rcParams.update({
210
+ 'font.family': 'Times New Roman',
211
+ 'font.size': 24,
212
+ })
213
+
214
+
215
+ def format_date(x, pos):
216
+ try:
217
+ return pd.to_datetime(x).strftime('%b %d, %Y')
218
+ except ValueError:
219
+ return ''
220
+
221
+
222
+ # Read results.
223
+ cache = Bogart('D://data/.cache/results-ny.jsonl')
224
+ results = cache.to_df()
225
+ print('Number of results:', len(results))
226
+
227
+ # Standardize results.
228
+ compounds = list(cannabinoids.keys()) + list(terpenes.keys())
229
+ # # DEV:
230
+ # compounds = [
231
+ # 'delta_9_thc',
232
+ # 'thca',
233
+ # 'alpha_humulene',
234
+ # 'beta_caryophyllene',
235
+ # 'beta_pinene',
236
+ # 'd_limonene',
237
+ # ]
238
+ results['date'] = pd.to_datetime(results['date_tested'], format='mixed')
239
+ results['week'] = results['date'].dt.to_period('W').astype(str)
240
+ results['month'] = results['date'].dt.to_period('M').astype(str)
241
+ results = standardize_results(results, compounds)
242
+
243
+ # Sort the results by date.
244
+ results = results.sort_values('date')
245
+
246
+ # TODO: Look at values of any terpenes not yet observed.
247
+
248
+
249
+ #-----------------------------------------------------------------------
250
+ # Lab analysis
251
+ #-----------------------------------------------------------------------
252
+
253
+ # # FIXME: Visualize the number of results by lab over time.
254
+ # weekly_tests = results.groupby(['week', 'lab']).size().reset_index(name='count')
255
+ # pivot_table = weekly_tests.pivot_table(values='count', index='week', columns='lab', aggfunc='sum').fillna(0)
256
+ # plt.figure(figsize=(15, 8))
257
+ # colors = sns.color_palette('tab20', n_colors=len(pivot_table.columns))
258
+ # bottom = pd.Series([0] * len(pivot_table.index), index=pivot_table.index)
259
+ # for lab, color in zip(pivot_table.columns, colors):
260
+ # plt.bar(
261
+ # pivot_table.index,
262
+ # pivot_table[lab],
263
+ # bottom=bottom,
264
+ # label=lab,
265
+ # color=color,
266
+ # edgecolor='grey', # Add border
267
+ # alpha=0.8, # Add transparency
268
+ # )
269
+ # bottom += pivot_table[lab]
270
+ # plt.title('Number of Lab Results by Lab', pad=10)
271
+ # plt.xlabel('Week')
272
+ # plt.ylabel('Number of Results')
273
+ # plt.xticks(rotation=45)
274
+ # ticks = plt.gca().get_xticks()
275
+ # plt.gca().set_xticks(ticks[::4]) # Show every 4th xtick
276
+ # plt.legend(loc='upper right', title='Lab', ncol=2)
277
+ # plt.tight_layout()
278
+ # plt.savefig(os.path.join(assets_dir, 'lab-timeseries.png'))
279
+ # plt.show()
280
+
281
+ # This one is good:
282
+ sample = results.dropna(subset=['date'])
283
+ plt.figure(figsize=(18, 8))
284
+ ax = sns.countplot(data=sample, x='week', hue='lab', palette='tab10')
285
+ plt.title('Number of Lab Results by Lab', pad=10)
286
+ plt.xlabel('')
287
+ plt.ylabel('Number of Results')
288
+ plt.xticks(rotation=45)
289
+ ticks = ax.get_xticks()
290
+ ax.set_xticks(ticks[::4])
291
+ ax.set_xticklabels([format_date(item.get_text(), None) for item in ax.get_xticklabels()])
292
+ plt.legend(loc='upper right')
293
+ plt.tight_layout()
294
+ plt.savefig(os.path.join(assets_dir, 'lab-timeseries.png'))
295
+ plt.show()
296
+
297
+
298
+ #-----------------------------------------------------------------------
299
+ # Producer analysis
300
+ #-----------------------------------------------------------------------
301
+
302
+ # Assign standard producer names.
303
+ producer_names = {
304
+ 'Hudson Valley Cannabis LLC': 'Hudson Cannabis',
305
+ 'MFNY Processor LLC': 'MFNY',
306
+ '': 'Unknown',
307
+ 'Hepworth Ag, INC': 'Hepworth Ag',
308
+ 'Processing': 'Unknown',
309
+ 'MFNY PROCESSOR LLC': 'MFNY',
310
+ 'Hudson Valley Hemp Company': 'Hudson Cannabis',
311
+ 'Hepworth Ag, Inc.': 'Hepworth Ag',
312
+ 'NYHO Labs LLC': 'NYHO Labs',
313
+ 'Hudson Valley Hemp Company, LLC': 'Hudson Cannabis',
314
+ 'Cirona Labs': 'Cirona Labs',
315
+ 'Hudson Cannabis c/o Hudson Valley Hemp Company, LLC': 'Hudson Cannabis',
316
+ 'Milton, NY, 12547, US': 'Unknown',
317
+ }
318
+ results['producer_dba'] = results['producer'].map(producer_names)
319
+
320
+ # FIXME: Visualize the number of results by producer over time.
321
+ # results['week'] = results['date'].dt.to_period('W').dt.start_time
322
+ # weekly_tests = results.groupby(['week', 'dba']).size().reset_index(name='count')
323
+ # pivot_table = weekly_tests.pivot_table(values='count', index='week', columns='dba', aggfunc='sum').fillna(0)
324
+ # plt.figure(figsize=(21, 9))
325
+ # colors = sns.color_palette('tab20', n_colors=len(pivot_table.columns))
326
+ # bottom = None
327
+ # for dba, color in zip(pivot_table.columns, colors):
328
+ # plt.bar(
329
+ # pivot_table.index,
330
+ # pivot_table[dba],
331
+ # bottom=bottom,
332
+ # label=dba,
333
+ # color=color,
334
+ # edgecolor='grey', # Add border
335
+ # alpha=0.8, # Add transparency
336
+ # )
337
+ # if bottom is None:
338
+ # bottom = pivot_table[dba]
339
+ # else:
340
+ # bottom += pivot_table[dba]
341
+ # plt.title('Number of Lab Results by Producer', pad=10)
342
+ # plt.xlabel('Week')
343
+ # plt.ylabel('Number of Results')
344
+ # plt.xticks(rotation=45)
345
+ # ticks = plt.gca().get_xticks()
346
+ # plt.gca().set_xticks(ticks[::4]) # Show every 4th xtick
347
+ # plt.legend(loc='upper right', title='Producer', ncol=2)
348
+ # plt.tight_layout()
349
+ # plt.savefig(os.path.join(assets_dir, 'producer-timeseries.png'))
350
+ # plt.show()
351
+
352
+ # This one is good.
353
+ sample = results.dropna(subset=['date'])
354
+ plt.figure(figsize=(18, 8))
355
+ ax = sns.countplot(data=sample, x='week', hue='producer_dba', palette='tab10')
356
+ plt.title('Number of Lab Results by Producer', pad=10)
357
+ plt.xlabel('')
358
+ plt.ylabel('Number of Results')
359
+ plt.xticks(rotation=45)
360
+ ticks = ax.get_xticks()
361
+ ax.set_xticks(ticks[::4])
362
+ ax.set_xticklabels([format_date(item.get_text(), None) for item in ax.get_xticklabels()])
363
+ plt.legend(loc='upper right')
364
+ plt.tight_layout()
365
+ plt.savefig(os.path.join(assets_dir, 'producer-timeseries.png'))
366
+ plt.show()
367
+
368
+
369
+ #-----------------------------------------------------------------------
370
+ # Product type analysis
371
+ #-----------------------------------------------------------------------
372
+
373
+ # Assign product types.
374
+ flower_types = [
375
+ 'Plant, Flower - Cured',
376
+ 'Flower',
377
+ ]
378
+ preroll_types = [
379
+ 'Plant, Preroll',
380
+ ]
381
+ infused_preroll_types = [
382
+ 'Plant, Enhanced Preroll',
383
+ ]
384
+ concentrate_types = [
385
+ 'Concentrate',
386
+ 'Derivative',
387
+ 'Concentrates & Extract, Vape Cartridge',
388
+ 'Concentrates & Extract, Live Rosin',
389
+ 'Concentrates & Extract, Concentrate',
390
+ 'Concentrates & Extract, Rosin',
391
+ 'Concentrates & Extract, Distillate'
392
+ ]
393
+ edible_types = [
394
+ 'Edible',
395
+ 'Ingestible, Gummy',
396
+ 'Ingestible, Edibles',
397
+ ]
398
+
399
+
400
+ def assign_product_type(x):
401
+ if x in flower_types:
402
+ return 'Flower'
403
+ if x in concentrate_types:
404
+ return 'Concentrate'
405
+ if x in edible_types:
406
+ return 'Edible'
407
+ if x in preroll_types:
408
+ return 'Preroll'
409
+ if x in infused_preroll_types:
410
+ return 'Infused Preroll'
411
+ return 'Other'
412
+
413
+
414
+ # Assign standard product type.
415
+ results['standard_product_type'] = results['product_type'].apply(assign_product_type)
416
+
417
+ # Define a consistent color palette.
418
+ product_type_palette = {
419
+ 'Flower': '#2ca02c', # green
420
+ 'Concentrate': '#ff7f0e', # orange
421
+ 'Edible': '#8c564b', # brown
422
+ 'Preroll': '#1f77b4', # blue
423
+ 'Infused Preroll': '#9467bd', # purple
424
+ 'Other': '#d62728' # red
425
+ }
426
+
427
+ # Visualize the number of results by product type over time
428
+ sample = results.dropna(subset=['date'])
429
+ sample.sort_values('date', inplace=True)
430
+ plt.figure(figsize=(18, 8))
431
+ ax = sns.countplot(data=sample, x='week', hue='standard_product_type', palette=product_type_palette)
432
+ plt.title('Number of Lab Results by Product Type', pad=10)
433
+ plt.xlabel('')
434
+ plt.ylabel('Number of Results')
435
+ plt.xticks(rotation=45)
436
+ ticks = ax.get_xticks()
437
+ ax.set_xticks(ticks[::4])
438
+ # FIXME:
439
+ ax.set_xticklabels([format_date(item.get_text(), None) for item in ax.get_xticklabels()])
440
+ plt.legend(loc='upper right')
441
+ plt.tight_layout()
442
+ plt.savefig(os.path.join(assets_dir, 'product-type-timeseries.png'))
443
+ plt.show()
444
+
445
+ # Visualize the proportions of product types in a pie chart
446
+ plt.figure(figsize=(12, 12))
447
+ results['standard_product_type'].value_counts().plot.pie(
448
+ autopct='%1.1f%%',
449
+ startangle=90,
450
+ colors=[product_type_palette[key] for key in results['standard_product_type'].value_counts().index]
451
+ )
452
+ plt.title('Proportions of Product Types')
453
+ plt.ylabel('')
454
+ plt.tight_layout()
455
+ plt.savefig(os.path.join(assets_dir, 'product-type-pie.png'))
456
+ plt.show()
457
+
458
+ # Visualize total cannabinoids and total terpenes in a scatter plot
459
+ sample = results.loc[results['standard_product_type'] != 'Other']
460
+ plt.figure(figsize=(18, 8))
461
+ ax = sns.scatterplot(
462
+ data=sample,
463
+ y='total_cannabinoids',
464
+ x='total_terpenes',
465
+ hue='standard_product_type',
466
+ palette=product_type_palette,
467
+ s=200
468
+ )
469
+ plt.title('Total Cannabinoids to Total Terpenes', pad=10)
470
+ plt.ylabel('Total Cannabinoids (%)')
471
+ plt.xlabel('Total Terpenes (%)')
472
+ plt.xlim(0, 10.5)
473
+ plt.ylim(0, 100)
474
+ legend = ax.legend(title='Product Type', bbox_to_anchor=(1.05, 1), loc='upper left')
475
+ for leg_entry in legend.legendHandles:
476
+ leg_entry.set_sizes([200])
477
+ plt.tight_layout()
478
+ plt.savefig(os.path.join(assets_dir, 'cannabinoids-to-terpenes.png'))
479
+ plt.show()
480
+
481
+
482
+ # Optional: Visualize total THC to CBD.
483
+
484
+
485
+ #-----------------------------------------------------------------------
486
+ # Timeseries analysis.
487
+ #-----------------------------------------------------------------------
488
+
489
+ import statsmodels.api as sm
490
+
491
+ # Look at the trend in THCA in flower.
492
+ compound = 'thca'
493
+ sample = results[results['standard_product_type'] == 'Flower']
494
+ avg = results.groupby(['month', 'standard_product_type'])[compound].mean().reset_index()
495
+ avg['month'] = pd.to_datetime(avg['month'], errors='coerce')
496
+ flower_data = avg[avg['standard_product_type'] == 'Flower']
497
+ flower_data = flower_data.dropna(subset=[compound, 'month'])
498
+ flower_data['month_num'] = range(len(flower_data))
499
+ X = sm.add_constant(flower_data['month_num'])
500
+ y = flower_data[compound]
501
+ model = sm.OLS(y, X).fit()
502
+ slope = model.params['month_num']
503
+ direction = '+' if slope > 0 else '-'
504
+ plt.figure(figsize=(13, 8))
505
+ plt.plot(flower_data['month'], flower_data[compound], 'bo-', label='Avg. THCA by month', linewidth=2)
506
+ plt.plot(flower_data['month'], model.predict(X), 'r-', label=f'Trend: {direction}{slope:.2f}% per month', linewidth=2)
507
+ plt.scatter(sample['date'], sample[compound], color='lightblue', s=80)
508
+ plt.title('Trend of THCA in Flower in New York', pad=10)
509
+ plt.xlabel('')
510
+ plt.ylabel('THCA')
511
+ plt.legend()
512
+ plt.xticks(rotation=45)
513
+ plt.tight_layout()
514
+ plt.savefig(os.path.join(assets_dir, 'average-thca-by-month.png'))
515
+ plt.show()
516
+
517
+
518
+ #-----------------------------------------------------------------------
519
+ # Visualize terpene ratios.
520
+ #-----------------------------------------------------------------------
521
+
522
+ # Function to create scatter plots
523
+ def create_scatter_plot(x_col, y_col, title, x_label, y_label, filename):
524
+ plt.figure(figsize=(18, 8))
525
+ ax = sns.scatterplot(
526
+ data=results,
527
+ x=x_col,
528
+ y=y_col,
529
+ hue='standard_product_type',
530
+ palette=product_type_palette,
531
+ s=200
532
+ )
533
+ plt.title(title, pad=10)
534
+ plt.xlabel(x_label)
535
+ plt.ylabel(y_label)
536
+ legend = ax.legend(title='Product Type', bbox_to_anchor=(1.05, 1), loc='upper left')
537
+ for leg_entry in legend.legendHandles:
538
+ leg_entry.set_sizes([200])
539
+ plt.tight_layout()
540
+ plt.savefig(os.path.join(assets_dir, filename))
541
+ plt.show()
542
+
543
+
544
+ # Visualize the ratio of `alpha_humulene` to `beta_caryophyllene`
545
+ create_scatter_plot(
546
+ y_col='alpha_humulene',
547
+ x_col='beta_caryophyllene',
548
+ title='Ratio of Alpha-Humulene to Beta-Caryophyllene by Product Type',
549
+ y_label='Alpha-Humulene',
550
+ x_label='Beta-Caryophyllene',
551
+ filename='alpha_humulene_to_beta_caryophyllene.png'
552
+ )
553
+
554
+ # Visualize the ratio of `beta_pinene` to `d_limonene`
555
+ create_scatter_plot(
556
+ y_col='beta_pinene',
557
+ x_col='d_limonene',
558
+ title='Ratio of Beta-Pinene to D-Limonene by Product Type',
559
+ y_label='Beta-Pinene',
560
+ x_label='D-Limonene',
561
+ filename='beta_pinene_to_d_limonene.png'
562
+ )
563
+
564
+
565
+ #-----------------------------------------------------------------------
566
+ # Regression analysis on THCA.
567
+ #-----------------------------------------------------------------------
568
+
569
+ from patsy import dmatrices
570
+
571
+ # Run a regression on THCA in flower.
572
+ compound = 'thca'
573
+ product_type = 'Flower'
574
+ sample = results[results['standard_product_type'] == product_type]
575
+ sample['month'] = pd.to_datetime(sample['month'], errors='coerce')
576
+ sample = sample.dropna(subset=['month'])
577
+ sample['month_num'] = sample['month'].rank(method='dense').astype(int) - 1
578
+ y, X = dmatrices('thca ~ month_num + C(lab) + C(dba)', data=sample, return_type='dataframe')
579
+ model = sm.OLS(y, X).fit()
580
+ print(model.summary().as_latex())
581
+
582
+
583
+ #-----------------------------------------------------------------------
584
+ # TODO: Save the results.
585
+ #-----------------------------------------------------------------------
586
+
587
+ # Save the results.
588
+ last_test_date = results['date'].max().strftime('%Y-%m-%d')
589
+ outfile = f'D://data/new-york/ny-results-{last_test_date}.xlsx'
590
+ results.to_excel(outfile, index=False)
591
+ print('Saved:', outfile)
analysis/analyze_results_or.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ # === Analyze Oregon lab results ===
5
+
6
+ # # Visualize market share by lab by month as a timeseries.
7
+ # market_share = results.groupby(['month', 'lab_id']).size().unstack().fillna(0)
8
+ # market_share = market_share.div(market_share.sum(axis=1), axis=0)
9
+ # market_share.plot.area(
10
+ # title='Market Share by Lab by Month in Oregon',
11
+ # figsize=(13, 8),
12
+ # legend=None,
13
+ # )
14
+ # plt.xlabel('')
15
+ # plt.savefig(f'{assets_dir}/or-market-share-by-lab-by-month.png', dpi=300, bbox_inches='tight', transparent=False)
16
+ # plt.show()
17
+
18
+ # # Visualize tests per capita by month.
19
+ # or_population = {
20
+ # 2023: 4_233_358,
21
+ # 2022: 4_239_379,
22
+ # 2021: 4_256_465,
23
+ # 2020: 4_245_044,
24
+ # 2019: 4_216_116,
25
+ # }
26
+ # results['year'] = results['date'].dt.year
27
+ # results['population'] = results['year'].map(or_population)
28
+ # fig, ax = plt.subplots(figsize=(13, 8))
29
+ # or_tests_per_capita = results.groupby('month').size() / (results.groupby('month')['population'].first() / 100_000)
30
+ # or_tests_per_capita.plot(ax=ax, title='Cannabis Tests per 100,000 People by Month in Oregon')
31
+ # ax.set_ylabel('Tests per 100,000 People')
32
+ # plt.show()
33
+
34
+ # # Visualize average total THC by month over time.
35
+ # results['total_thc'] = results['total_thc'].astype(float)
36
+ # average_total_thc = results.groupby('month')['total_thc'].mean()
37
+ # fig, ax = plt.subplots(figsize=(13, 8))
38
+ # average_total_thc.index = average_total_thc.index.to_timestamp()
39
+ # ax.plot(average_total_thc.index, average_total_thc.values, label='Monthly Average Total THC', color='royalblue', lw=5)
40
+ # ax.scatter(results['date'], results['total_thc'], color='royalblue', s=10, alpha=0.5, label='Daily Individual Results')
41
+ # ax.set_xlabel('')
42
+ # ax.set_ylabel('Total THC (%)')
43
+ # ax.set_title('Average Total THC by Month in Oregon')
44
+ # ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
45
+ # ax.xaxis.set_major_locator(mdates.MonthLocator((1,4,7,10)))
46
+ # plt.xticks(rotation=45)
47
+ # plt.ylim(0, 45)
48
+ # plt.savefig(f'{assets_dir}/or-total-thc.png', dpi=300, bbox_inches='tight', transparent=False)
49
+ # plt.show()
50
+
51
+ # # Visualize average total CBD by month over time.
52
+ # results['total_cbd'] = results['total_cbd'].astype(float)
53
+ # sample = results.loc[results['total_cbd'] < 1]
54
+ # average_total_cbd = sample.groupby('month')['total_cbd'].mean()
55
+ # fig, ax = plt.subplots(figsize=(13, 8))
56
+ # average_total_cbd.index = average_total_cbd.index.to_timestamp()
57
+ # ax.plot(average_total_cbd.index, average_total_cbd.values, label='Monthly Average Total CBD', color='royalblue', lw=5)
58
+ # ax.scatter(sample['date'], sample['total_cbd'], color='royalblue', s=10, alpha=0.5, label='Daily Individual Results')
59
+ # ax.set_xlabel('')
60
+ # ax.set_ylabel('Total CBD (%)')
61
+ # ax.set_title('Average Total CBD by Month in Oregon in Low CBD Samples (<1%)')
62
+ # ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
63
+ # ax.xaxis.set_major_locator(mdates.MonthLocator((1,4,7,10)))
64
+ # plt.xticks(rotation=45)
65
+ # plt.ylim(0, 0.75)
66
+ # plt.savefig(f'{assets_dir}/or-total-cbd.png', dpi=300, bbox_inches='tight', transparent=False)
67
+ # plt.show()
analysis/analyze_results_ri.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # === OLD ===
3
+
4
+ # data_dir = r'D:\data\public-records\Rhode Island\Rhode Island'
5
+ # data = []
6
+ # for root, dirs, files in os.walk(data_dir):
7
+ # for file in files:
8
+ # if 'no data' in file.lower():
9
+ # continue
10
+ # print('Reading:', file)
11
+ # datafile = os.path.join(root, file)
12
+ # if file.endswith('.csv'):
13
+ # df = pd.read_csv(datafile, usecols=columns.keys(), encoding='latin1') # Use 'latin1' encoding
14
+ # elif file.endswith('.xlsx'):
15
+ # df = pd.read_excel(datafile, usecols=columns.keys()) # Read .xlsx files correctly
16
+ # df.rename(columns=columns, inplace=True)
17
+ # data.append(df)
18
+ # data = pd.concat(data, ignore_index=True)
19
+ # print('Number of Rhode Island tests:', len(data))
20
+
21
+ # # Extract test_name, units, and product_type from test_type.
22
+ # data[['test_name', 'units', 'product_type']] = data['test_type'].str.extract(r'(.+?) \((.+?)\) (.+)')
23
+
24
+ # # Restrict to passed tests.
25
+ # data = data[data['status'] == True]
26
+
27
+ # # Pivot the data to get results for each sample.
28
+ # results = data.pivot_table(
29
+ # index=['sample_id', 'producer_license_number', 'lab', 'label', 'date_tested', 'product_type'],
30
+ # columns='test_name',
31
+ # values='test_result',
32
+ # aggfunc='first'
33
+ # ).reset_index()
34
+ # results['date_tested'] = pd.to_datetime(results['date_tested'], errors='coerce')
35
+ # results['month'] = results['date_tested'].dt.to_period('M')
36
+ # print('Number of Rhode Island samples:', len(results))
37
+
38
+ # # Calculate the total cannabinoids.
39
+ # ri_cannabinoids = [
40
+ # 'CBD',
41
+ # 'CBDA',
42
+ # 'Delta-9 THC',
43
+ # 'THCA',
44
+ # ]
45
+ # ri_terpenes = [
46
+ # 'Alpha-Bisabolol',
47
+ # 'Alpha-Humulene',
48
+ # 'Alpha-Pinene',
49
+ # 'Alpha-Terpinene',
50
+ # 'Beta-Caryophyllene',
51
+ # 'Beta-Myrcene',
52
+ # 'Beta-Pinene',
53
+ # 'Caryophyllene Oxide',
54
+ # 'Limonene',
55
+ # 'Linalool',
56
+ # 'Nerolidol',
57
+ # ]
58
+ # results['total_thc'] = results['Total THC']
59
+ # results['total_cbd'] = results['Total CBD']
60
+ # results['total_cannabinoids'] = results['total_thc'] + results['total_cbd']
61
+ # results['total_terpenes'] = results[ri_terpenes].sum(axis=1)
62
+
63
+ # # Calculate the total THC to total CBD ratio.
64
+ # results['thc_cbd_ratio'] = results['total_thc'] / results['total_cbd']
65
+
66
+ # # Calculate the total cannabinoids to total terpenes ratio.
67
+ # results['cannabinoids_terpenes_ratio'] = results['total_cannabinoids'] / results['total_terpenes']
68
+
69
+
70
+ # === Analyze Rhode Island lab results ===
71
+
72
+ # # Visualize market share by lab by month as a timeseries.
73
+ # market_share = results.groupby(['month', 'lab']).size().unstack().fillna(0)
74
+ # market_share = market_share.div(market_share.sum(axis=1), axis=0)
75
+ # market_share.plot.area(
76
+ # title='Market Share by Lab by Month in Rhode Island',
77
+ # figsize=(13, 8),
78
+ # )
79
+ # plt.xlabel('')
80
+ # plt.savefig(f'{assets_dir}/ri-market-share-by-lab-by-month.png', dpi=300, bbox_inches='tight', transparent=False)
81
+ # plt.show()
82
+
83
+ # # Visualize tests per capita by month.
84
+ # ri_population = {
85
+ # 2023: 1_095_962,
86
+ # 2022: 1_093_842,
87
+ # 2021: 1_097_092,
88
+ # 2020: 1_096_444,
89
+ # 2019: 1_058_158,
90
+ # }
91
+ # results['year'] = results['date_tested'].dt.year
92
+ # results['population'] = results['year'].map(ri_population)
93
+ # tests_per_capita = results.groupby('month').size() / (results.groupby('month')['population'].first() / 100_000)
94
+ # fig, ax = plt.subplots(figsize=(13, 8))
95
+ # tests_per_capita.plot(ax=ax, title='Cannabis Tests per 100,000 People by Month in Rhode Island')
96
+ # ax.set_ylabel('Tests per 100,000 People')
97
+ # plt.show()
98
+
99
+ # # Visualize average total THC by month over time.
100
+ # results['date_tested'] = pd.to_datetime(results['date_tested'])
101
+ # results['total_thc'] = results['total_thc'].astype(float)
102
+ # results['month'] = results['date_tested'].dt.to_period('M')
103
+ # average_total_thc = results.groupby('month')['total_thc'].mean()
104
+ # fig, ax = plt.subplots(figsize=(13, 8))
105
+ # average_total_thc.index = average_total_thc.index.to_timestamp()
106
+ # ax.plot(average_total_thc.index, average_total_thc.values, label='Monthly Average Total THC', color='royalblue', lw=5)
107
+ # ax.scatter(results['date_tested'], results['total_thc'], color='royalblue', s=10, alpha=0.5, label='Daily Individual Results')
108
+ # ax.set_xlabel('')
109
+ # ax.set_ylabel('Total THC (%)')
110
+ # ax.set_title('Average Total THC by Month in Rhode Island')
111
+ # ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
112
+ # ax.xaxis.set_major_locator(mdates.MonthLocator((1,4,7,10)))
113
+ # plt.xticks(rotation=45)
114
+ # plt.ylim(5, 37.5)
115
+ # plt.savefig(f'{assets_dir}/ri-total-thc.png', dpi=300, bbox_inches='tight', transparent=False)
116
+ # plt.show()
117
+
118
+ # # Visualize average total CBD by month over time.
119
+ # results['total_cbd'] = results['total_cbd'].astype(float)
120
+ # sample = results.loc[results['total_cbd'] < 1]
121
+ # average_total_cbd = sample.groupby('month')['total_cbd'].mean()
122
+ # fig, ax = plt.subplots(figsize=(13, 8))
123
+ # average_total_cbd.index = average_total_cbd.index.to_timestamp()
124
+ # ax.plot(average_total_cbd.index, average_total_cbd.values, label='Monthly Average Total CBD', color='royalblue', lw=5)
125
+ # ax.scatter(sample['date_tested'], sample['total_cbd'], color='royalblue', s=10, alpha=0.5, label='Daily Individual Results')
126
+ # ax.set_xlabel('')
127
+ # ax.set_ylabel('Total CBD (%)')
128
+ # ax.set_title('Average Total CBD by Month in Rhode Island in Low CBD Samples (<1%)')
129
+ # ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
130
+ # ax.xaxis.set_major_locator(mdates.MonthLocator((1,4,7,10)))
131
+ # plt.xticks(rotation=45)
132
+ # plt.ylim(0, 0.33)
133
+ # plt.savefig(f'{assets_dir}/ri-total-cbd.png', dpi=300, bbox_inches='tight', transparent=False)
134
+ # plt.show()
analysis/analyze_results_ut.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Results | Utah
3
+ Copyright (c) 2023-2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 7/4/2024
7
+ Updated: 7/9/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """
10
+ # Standard imports:
11
+ import os
12
+ from typing import List
13
+ from zipfile import ZipFile
14
+
15
+ # External imports:
16
+ import matplotlib.pyplot as plt
17
+ import pandas as pd
18
+ import seaborn as sns
19
+
20
+ # Internal imports:
21
+ from cannlytics import __version__
22
+ from cannlytics.data.cache import Bogart
23
+ from cannlytics.data.coas.parsing import get_coa_files
24
+ from cannlytics.data.coas import CoADoc
25
+ from cannlytics.data.coas.algorithms.utah import parse_utah_coa
26
+ from cannlytics.data.coas import standardize_results
27
+ from cannlytics.data.coas.parsing import find_unique_analytes
28
+
29
+
30
+ #-----------------------------------------------------------------------
31
+ # Find all of the COA PDFs.
32
+ #-----------------------------------------------------------------------
33
+
34
+ def unzip_folder(folder, destination, remove=True):
35
+ """Unzip a folder.
36
+ Args:
37
+ pdf_dir (str): The directory where the folder is stored.
38
+ folder (str): The name of the folder to unzip.
39
+ """
40
+ os.makedirs(destination, exist_ok=True)
41
+ with ZipFile(folder) as zip_ref:
42
+ zip_ref.extractall(destination)
43
+ if remove:
44
+ os.remove(folder)
45
+
46
+
47
+ # Unzip all of the folders.
48
+ pdf_dir = r'D:\data\public-records\Utah'
49
+ folders = [os.path.join(pdf_dir, x) for x in os.listdir(pdf_dir) if x.endswith('.zip')]
50
+ for folder in folders:
51
+ unzip_folder(folder, pdf_dir)
52
+ print('Unzipped:', folder)
53
+
54
+ # Get all of the PDFs.
55
+ pdfs = get_coa_files(pdf_dir)
56
+ pdfs.sort(key=os.path.getmtime)
57
+ print('Found %i PDFs.' % len(pdfs))
58
+
59
+
60
+ #-----------------------------------------------------------------------
61
+ # Parse Utah Department of Agriculture and Food COAs.
62
+ #-----------------------------------------------------------------------
63
+
64
+ def parse_coa_pdfs(
65
+ pdfs,
66
+ algorithm=None,
67
+ parser=None,
68
+ cache=None,
69
+ data=None,
70
+ verbose=True,
71
+ ) -> List[dict]:
72
+ """Parse a list of COA PDFs.
73
+ Args:
74
+ pdfs (List[str]): A list of PDFs to parse.
75
+ algorithm (function): The parsing algorithm to use.
76
+ parser (object): The parser object to use.
77
+ cache (object): The cache object to use.
78
+ data (List[dict]): The data to append to.
79
+ verbose (bool): Whether to print verbose output.
80
+ Returns:
81
+ List[dict]: The parsed data.
82
+ """
83
+ if data is None:
84
+ data = []
85
+ if parser is None:
86
+ parser = CoADoc()
87
+ for pdf in pdfs:
88
+ if not os.path.exists(pdf):
89
+ if verbose: print(f'PDF not found: {pdf}')
90
+ continue
91
+ if cache is not None:
92
+ pdf_hash = cache.hash_file(pdf)
93
+ if cache is not None:
94
+ if cache.get(pdf_hash):
95
+ if verbose: print('Cached:', pdf)
96
+ data.append(cache.get(pdf_hash))
97
+ continue
98
+ try:
99
+ if algorithm is not None:
100
+ coa_data = algorithm(parser, pdf)
101
+ else:
102
+ coa_data = parser.parse(pdf)
103
+ data.append(coa_data)
104
+ if cache is not None:
105
+ cache.set(pdf_hash, coa_data)
106
+ print('Parsed:', pdf)
107
+ except:
108
+ print('Error:', pdf)
109
+ return data
110
+
111
+
112
+ # Initialize COA parsing.
113
+ cache = Bogart('D://data/.cache/results-ut.jsonl')
114
+
115
+ # DEV: Clear the cache.
116
+ cache.clear()
117
+
118
+ # Parse COAs.
119
+ all_results = parse_coa_pdfs(
120
+ pdfs,
121
+ algorithm=parse_utah_coa,
122
+ cache=cache,
123
+ )
124
+
125
+ # Read results.
126
+ results = cache.to_df()
127
+ print('Number of results:', len(results))
128
+
129
+ # Standardize time.
130
+ results['date'] = pd.to_datetime(results['date_tested'], format='mixed')
131
+ results['week'] = results['date'].dt.to_period('W').astype(str)
132
+ results['month'] = results['date'].dt.to_period('M').astype(str)
133
+ results = results.sort_values('date')
134
+
135
+ # Standardize compounds.
136
+ # Note: Removes nuisance analytes.
137
+ analytes = find_unique_analytes(results)
138
+ nuisance_analytes = [
139
+ 'det_detected',
140
+ 'global_shortages_of_laboratory_suppliesto',
141
+ 'here_recorded_may_not_be_used_as_an_endorsement_for_a_product',
142
+ 'information_see',
143
+ 'information_see_https_totoag_utah_govto_2021_to_04_to_29_toudaf_temporarily_adjusts_medical_cannabis_testing_protocols_due_to',
144
+ 'nd_not_detected',
145
+ 'notes',
146
+ 'notes_sample_was_tested_as_received_the_cannabinoid_results_were_not_adjusted_for_moisture_content',
147
+ 'phtatpthso_togtoaegn_utetashti_nggo_vwto_2_a_0_s',
148
+ 'recorded_the_results_here_recorded_may_not_be_used_as_an_endorsement_for_a_product',
149
+ 'results_pertain_only_to_the_test_sample_listed_in_this_report',
150
+ 'see_https_totoag_utah_govto_2021_to_04_to_29_toudaf_temporarily_adjusts_medical_cannabis_testing_protocols_due_to_global',
151
+ 'shortages_of_laboratory_suppliesto',
152
+ 'tac_2500000',
153
+ 'tac_t',
154
+ 'this_report_may_not_be_reproduced_except_in_its_entirety',
155
+ 'total_cbd',
156
+ 'total_thc',
157
+ ]
158
+ analytes = analytes - set(nuisance_analytes)
159
+ # TODO: Alphabetize the analytes.
160
+ analytes = sorted(list(analytes))
161
+ results = standardize_results(results, analytes)
162
+
163
+ # Save the results.
164
+ last_test_date = results['date'].max().strftime('%Y-%m-%d')
165
+ outfile = f'D://data/utah/ut-results-{last_test_date}.xlsx'
166
+ latest = f'D://data/utah/ut-results-latest.csv'
167
+ results.to_excel(outfile, index=False)
168
+ results.to_csv(latest, index=False)
169
+ print('Saved:', outfile)
170
+ print('Saved:', latest)
171
+
172
+ # Print out features.
173
+ features = {x: 'string' for x in results.columns}
174
+ print('Number of features:', len(features))
175
+ print('Features:', features)
176
+
177
+
178
+ #-----------------------------------------------------------------------
179
+ # Analyze Utah results.
180
+ #-----------------------------------------------------------------------
181
+
182
+ def format_date(x, pos):
183
+ try:
184
+ return pd.to_datetime(x).strftime('%b %#d, %Y')
185
+ except ValueError:
186
+ return ''
187
+
188
+ # Setup.
189
+ assets_dir = r'C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\165-labels\presentation\images\figures'
190
+ plt.style.use('seaborn-v0_8-whitegrid')
191
+ plt.rcParams.update({
192
+ 'font.family': 'Times New Roman',
193
+ 'font.size': 24,
194
+ })
195
+
196
+ # FIXME: Read the curated results.
197
+
198
+ # # Read results.
199
+ # results = cache.to_df()
200
+ # print('Number of results:', len(results))
201
+
202
+ # # Standardize time.
203
+ # results['date'] = pd.to_datetime(results['date_tested'], format='mixed')
204
+ # results['week'] = results['date'].dt.to_period('W').astype(str)
205
+ # results['month'] = results['date'].dt.to_period('M').astype(str)
206
+ # results = results.sort_values('date')
207
+
208
+ # # Standardize compounds.
209
+ # analytes = find_unique_analytes(results)
210
+ # results = standardize_results(results, analytes)
211
+
212
+
213
+
214
+ #-----------------------------------------------------------------------
215
+ # Lab analysis
216
+ #-----------------------------------------------------------------------
217
+
218
+ # Visualize the number of tests by date.
219
+ plt.figure(figsize=(18, 8))
220
+ ax = sns.countplot(data=results.dropna(subset=['month']), x='month', color='skyblue')
221
+ plt.title('Number of Tests by Date in Utah', pad=10)
222
+ plt.xlabel('')
223
+ plt.ylabel('Number of Tests')
224
+ plt.xticks(rotation=45)
225
+ ticks = ax.get_xticks()
226
+ ax.set_xticks(ticks[::4]) # Adjust the interval as needed
227
+ ax.set_xticklabels([format_date(item.get_text(), None) for item in ax.get_xticklabels()])
228
+ plt.tight_layout()
229
+ plt.savefig(os.path.join(assets_dir, 'ut-lab-timeseries.png'))
230
+ plt.show()
231
+
232
+
233
+ #-----------------------------------------------------------------------
234
+ # Producer analysis
235
+ #-----------------------------------------------------------------------
236
+
237
+ # Assign standard producer names.
238
+ producer_names = {
239
+ 'Harvest of Utah': 'Harvest',
240
+ 'True North of Utah': 'True North',
241
+ 'Tryke': 'Tryke',
242
+ 'Standard Wellness of Utah': 'Standard Wellness',
243
+ 'Tryke Companies of Utah': 'Tryke',
244
+ 'Tryke Companies of\nUtah': 'Tryke',
245
+ 'Pure Plan': 'Pure Plan',
246
+ 'Riverside Farm': 'Riverside Farm',
247
+ 'Riverside Farms': 'Riverside Farm',
248
+ 'Wholesome Ag': 'Wholesome Ag',
249
+ 'Zion Cultivars': 'Zion Cultivars',
250
+ 'Zion Cultivators': 'Zion Cultivars',
251
+ 'Dragonfly Greenhouse': 'Dragonfly',
252
+ 'Dragonfly Processing': 'Dragonfly',
253
+ 'UMC Program': 'UMC Program',
254
+ 'Zion Alchemy': 'Zion Alchemy',
255
+ 'Wasatch Extraction': 'Wasatch Extraction',
256
+ 'Standard Wellness of Utah Great Salt Lake 1/8': 'Standard Wellness',
257
+ 'Tryke Companies of Utah Who Dat Orange': 'Tryke',
258
+ }
259
+ results['producer_dba'] = results['producer'].map(producer_names)
260
+
261
+ # Aggregate data by month and DBA
262
+ monthly_tests = results.groupby(['month', 'producer_dba']).size().reset_index(name='count')
263
+
264
+ # Plotting the time series line plot
265
+ colors = sns.color_palette('tab20', n_colors=len(monthly_tests['producer_dba'].unique()))
266
+ pivot_table = monthly_tests.pivot_table(values='count', index='month', columns='producer_dba', aggfunc='sum').fillna(0)
267
+ plt.figure(figsize=(21, 9))
268
+ bottom = None
269
+ for dba in pivot_table.columns:
270
+ plt.bar(
271
+ pivot_table.index,
272
+ pivot_table[dba],
273
+ bottom=bottom,
274
+ label=dba,
275
+ color=colors.pop(0),
276
+ edgecolor='grey',
277
+ alpha=0.8,
278
+ )
279
+ if bottom is None:
280
+ bottom = pivot_table[dba]
281
+ else:
282
+ bottom += pivot_table[dba]
283
+ plt.title('Number of Tests by Producer by Month in Utah', pad=10)
284
+ plt.xlabel('')
285
+ plt.ylabel('Number of Tests')
286
+ plt.xticks(rotation=45)
287
+ ticks = plt.gca().get_xticks()
288
+ plt.gca().set_xticks(ticks[::3]) # Show every 3rd xtick
289
+ plt.legend(loc='upper left', title='Producer', ncol=2)
290
+ plt.tight_layout()
291
+ plt.savefig(os.path.join(assets_dir, 'ut-producer-timeseries.png'))
292
+ plt.show()
293
+
294
+
295
+ #-----------------------------------------------------------------------
296
+ # Timeseries analysis.
297
+ #-----------------------------------------------------------------------
298
+
299
+ import statsmodels.api as sm
300
+
301
+ # Look at the trend in THCA in flower.
302
+ compound = 'thca'
303
+ sample = results.loc[results['date'] >= pd.to_datetime('2024-01-01')]
304
+ avg = sample.groupby(['month'])[compound].mean().reset_index()
305
+ avg['month'] = pd.to_datetime(avg['month'], errors='coerce')
306
+ avg = avg.dropna(subset=[compound, 'month'])
307
+ avg['month_num'] = range(len(avg))
308
+ X = sm.add_constant(avg['month_num'])
309
+ y = avg[compound]
310
+ model = sm.OLS(y, X).fit()
311
+ slope = model.params['month_num']
312
+ direction = '+' if slope > 0 else '-'
313
+ plt.figure(figsize=(13, 8))
314
+ plt.plot(avg['month'], avg[compound], 'bo-', label='Avg. THCA by month', linewidth=2)
315
+ plt.plot(avg['month'], model.predict(X), 'r-', label=f'Trend: {direction}{slope:.2f}% per month', linewidth=2)
316
+ plt.scatter(sample['date'], sample[compound], color='lightblue', s=80)
317
+ plt.title('Trend of THCA in Utah Cannabis Flower', pad=10)
318
+ plt.xlabel('')
319
+ plt.ylabel('THCA')
320
+ plt.legend()
321
+ plt.xticks(rotation=45)
322
+ plt.tight_layout()
323
+ plt.savefig(os.path.join(assets_dir, 'ut-average-thca-by-month.png'))
324
+ plt.show()
325
+
326
+
327
+ #-----------------------------------------------------------------------
328
+ # Visualize terpene ratios.
329
+ #-----------------------------------------------------------------------
330
+
331
+ from adjustText import adjust_text
332
+
333
+ # Read in NY data and compare to UT data.
334
+ ny_cache = Bogart('D://data/.cache/results-ny.jsonl')
335
+ ny_results = ny_cache.to_df()
336
+ ny_flower_types = [
337
+ 'Plant, Flower - Cured',
338
+ 'Flower',
339
+ ]
340
+ ny_flower = ny_results.loc[ny_results['product_type'].isin(ny_flower_types)]
341
+ ny_flower = standardize_results(ny_flower, analytes)
342
+ ny_flower['state'] = 'NY'
343
+ ny_flower['date'] = pd.to_datetime(ny_flower['date_tested'], format='mixed')
344
+ ny_flower['week'] = ny_flower['date'].dt.to_period('W').astype(str)
345
+ ny_flower['month'] = ny_flower['date'].dt.to_period('M').astype(str)
346
+
347
+ # Create the sample.
348
+ results['state'] = 'UT'
349
+ # sample = results.loc[results['date'] >= pd.to_datetime('2024-01-01')]
350
+ sample = results.copy()
351
+ sample = pd.concat([sample, ny_flower])
352
+ # sample = sample.loc[sample['date'] >= pd.to_datetime('2024-01-01')]
353
+
354
+ # Function to create scatter plots
355
+ def create_scatter_plot(x_col, y_col, title, x_label, y_label, filename, annotate=False):
356
+ plt.figure(figsize=(18, 8))
357
+ ax = sns.scatterplot(
358
+ data=sample,
359
+ x=x_col,
360
+ y=y_col,
361
+ hue='state',
362
+ s=200
363
+ )
364
+ plt.title(title, pad=10)
365
+ plt.xlabel(x_label)
366
+ plt.ylabel(y_label)
367
+ legend = ax.legend(title='Product Type', bbox_to_anchor=(1.05, 1), loc='upper left')
368
+ for leg_entry in legend.legendHandles:
369
+ leg_entry.set_sizes([200])
370
+
371
+ # Annotate 3 samples with beta_pinene to d_limonene ratio greater than 0.5
372
+ if annotate:
373
+ texts = []
374
+ random_state = 420_000
375
+ high_ratio_samples = sample[sample['beta_pinene'] / sample['d_limonene'] > 0.75] \
376
+ .sample(n=5, random_state=random_state)
377
+ for i, row in high_ratio_samples.iterrows():
378
+ if pd.notna(row[x_col]) and pd.notna(row[y_col]):
379
+ texts.append(ax.text(
380
+ row[x_col],
381
+ row[y_col],
382
+ row['product_name'],
383
+ fontsize=24,
384
+ color='crimson',
385
+ ))
386
+ adjust_text(texts, only_move={'points': 'xy', 'texts': 'xy'}, arrowprops=dict(arrowstyle='-', color='grey'))
387
+
388
+ plt.tight_layout()
389
+ plt.savefig(os.path.join(assets_dir, filename))
390
+ plt.show()
391
+
392
+
393
+ # Visualize the ratio of `beta_pinene` to `d_limonene`
394
+ create_scatter_plot(
395
+ y_col='beta_pinene',
396
+ x_col='d_limonene',
397
+ title='Ratio of Beta-Pinene to D-Limonene in New York and Utah Cannabis Flower',
398
+ y_label='Beta-Pinene',
399
+ x_label='D-Limonene',
400
+ filename='ut-beta-pinene-to-d-limonene.png',
401
+ annotate=True
402
+ )
403
+
404
+ # Visualize the ratio of `alpha_humulene` to `beta_caryophyllene`
405
+ create_scatter_plot(
406
+ y_col='alpha_humulene',
407
+ x_col='beta_caryophyllene',
408
+ title='Ratio of Alpha-Humulene to Beta-Caryophyllene in New York and Utah Cannabis Flower',
409
+ y_label='Alpha-Humulene',
410
+ x_label='Beta-Caryophyllene',
411
+ filename='ut-alpha-humulene-to-beta-caryophyllene.png',
412
+ annotate=False
413
+ )
analysis/analyze_results_wa.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analyze Cannabis Lab Results | Washington
3
+ Copyright (c) 2024 Cannlytics
4
+
5
+ Authors: Keegan Skeate <https://github.com/keeganskeate>
6
+ Created: 2/1/2024
7
+ Updated: 2/1/2024
8
+ License: MIT License <https://github.com/cannlytics/cannabis-data-science/blob/main/LICENSE>
9
+ """