diff --git a/.gitattributes b/.gitattributes
index a0b886925f3a4a763d8b85d86e58e16e873391d7..9f48abdaf7e3d028226fcb8fbe20fa30ce79d5ab 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -50,3 +50,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.jpeg filter=lfs diff=lfs merge=lfs -text
*.webp filter=lfs diff=lfs merge=lfs -text
*.xlsx filter=lfs diff=lfs merge=lfs -text
+*.csv filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
index e9d99ce0e0b8f8f7eb79102407640d19c0f4c897..389c02685b4f9952536fcd38ee399108e895b80e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,3 +12,6 @@
# Ignore VS Code settings.
*.vscode
+
+# Ignore PyCache
+*__pycache__
diff --git a/README.md b/README.md
index 53497626c289915a7516393b46a40df245c0213d..99d4ff227e17f8d76cfa9c5eefcfea2cf05a9487 100644
--- a/README.md
+++ b/README.md
@@ -14,10 +14,15 @@ tags:
- cannabis
- licenses
- licensees
+ - retail
---
# Cannabis Licenses, Curated by Cannlytics
+
+
+
+
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
@@ -49,58 +54,55 @@ tags:
### Dataset Summary
-This dataset is a collection of cannabis license data for the licensees that have been permitted in the United States.
+**Cannabis Licenses** is a collection of cannabis license data for each state with permitted adult-use cannabis. The dataset also includes a sub-dataset, `all`, that includes all licenses.
## Dataset Structure
-The dataset is partitioned into subsets for each state.
-
-
-| State | Licenses |
-|-------|----------|
-| [Alaska](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ak) | |
-| [Arizona](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/az) | |
-| [California](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ca) | ✅ |
-| [Colorado](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/co) | |
-| [Connecticut](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ct) | |
-| [Illinois](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/il) | |
-| [Maine](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/me) | ✅ |
-| [Massachusetts](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ma) | |
-| [Michigan](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/mi) | |
-| [Montana](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/mt) | |
-| [Nevada](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/nv) | ✅ |
-| [New Jersey](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/nj) | ✅ |
-| [New Mexico](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/nm) | |
-| [Oregon](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/or) | ✅ |
-| [Rhode Island](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ri) | |
-| [Vermont](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/vt) | |
-| [Washington](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/wa) | ✅ |
-
-Coming Soon (2):
+The dataset is partitioned into 18 subsets for each state and the aggregate.
+
+| State | Code | Status |
+|-------|------|--------|
+| [All](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/all) | `all` | ✅ |
+| [Alaska](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ak) | `ak` | ✅ |
+| [Arizona](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/az) | `az` | ✅ |
+| [California](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ca) | `ca` | ✅ |
+| [Colorado](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/co) | `co` | ✅ |
+| [Connecticut](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ct) | `ct` | ✅ |
+| [Illinois](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/il) | `il` | ✅ |
+| [Maine](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/me) | `me` | ✅ |
+| [Massachusetts](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ma) | `ma` | ✅ |
+| [Michigan](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/mi) | `mi` | ✅ |
+| [Montana](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/mt) | `mt` | ✅ |
+| [Nevada](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/nv) | `nv` | ✅ |
+| [New Jersey](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/nj) | `nj` | ✅ |
+| New York | `ny` | ⏳ Expected 2022 Q4 |
+| [New Mexico](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/nm) | `nm` | ⚠️ Under development |
+| [Oregon](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/or) | `or` | ✅ |
+| [Rhode Island](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/ri) | `ri` | ✅ |
+| [Vermont](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/vt) | `vt` | ✅ |
+| Virginia | `va` | ⏳ Expected 2024 |
+| [Washington](https://huggingface.co/datasets/cannlytics/cannabis_licenses/tree/main/data/wa) | `wa` | ✅ |
+
+The following (18) states have issued medical cannabis licenses, but are not (yet) included in the dataset:
-- New York
-- Virginia
-
-Medical (18):
-
-- District of Columbia (D.C.)
-- Utah
-- Oklahoma
-- North Dakota
-- South Dakota
-- Minnesota
-- Missouri
+- Alabama
- Arkansas
+- Delaware
+- District of Columbia (D.C.)
+- Florida
- Louisiana
+- Maryland
+- Minnesota
- Mississippi
-- Alabama
-- Florida
+- Missouri
+- New Hampshire
+- North Dakota
- Ohio
-- West Virginia
+- Oklahoma
- Pennsylvania
-- Maryland
-- Delaware
-- New Hampshire
+- South Dakota
+- Utah
+- West Virginia
### Data Instances
@@ -122,33 +124,34 @@ Below is a non-exhaustive list of fields, used to standardize the various data t
| Field | Example | Description |
|-------|-----|-------------|
-| `id` | `"1046"` | |
-| `license_number` | `"C10-0000423-LIC"` | |
-| `license_status` | `"Active"` | |
-| `license_status_date` | `""` | |
-| `license_term` | `"Provisional"` | |
-| `license_type` | `"Commercial - Retailer"` | |
-| `license_designation` | `"Adult-Use and Medicinal"` | |
-| `issue_date` | `"2019-07-15T00:00:00"` | |
-| `expiration_date` | `"2023-07-14T00:00:00"` | |
-| `licensing_authority_id` | `"BCC"` | |
-| `licensing_authority` | `"Bureau of Cannabis Control (BCC)"` | |
-| `business_legal_name` | `"Movocan"` | |
-| `business_dba_name` | `"Movocan"` | |
-| `business_owner_name` | `"redacted"` | |
-| `business_structure` | `"Corporation"` | |
-| `activity` | `""` | |
-| `premise_street_address` | `"1632 Gateway Rd"` | |
-| `premise_city` | `"Calexico"` | |
-| `premise_state` | `"CA"` | |
-| `premise_county` | `"Imperial"` | |
-| `premise_zip_code` | `"92231"` | |
-| `business_email` | `"redacted@gmail.com"` | |
-| `business_phone` | `"(555) 555-5555"` | |
-| `parcel_number` | `""` | |
-| `premise_latitude` | `32.69035693` | |
-| `premise_longitude` | `-115.38987552` | |
-| `data_refreshed_date` | `"2022-09-21T12:16:33.3866667"` | |
+| `id` | `"1046"` | A state-unique ID for the license. |
+| `license_number` | `"C10-0000423-LIC"` | A unique license number. |
+| `license_status` | `"Active"` | The status of the license. Only licenses that are active are included. |
+| `license_status_date` | `"2022-04-20T00:00"` | The date the status was assigned, an ISO-formatted date if present. |
+| `license_term` | `"Provisional"` | The term for the license. |
+| `license_type` | `"Commercial - Retailer"` | The type of business license. |
+| `license_designation` | `"Adult-Use and Medicinal"` | A state-specific classification for the license. |
+| `issue_date` | `"2019-07-15T00:00:00"` | An issue date for the license, an ISO-formatted date if present. |
+| `expiration_date` | `"2023-07-14T00:00:00"` | An expiration date for the license, an ISO-formatted date if present. |
+| `licensing_authority_id` | `"BCC"` | A unique ID for the state licensing authority. |
+| `licensing_authority` | `"Bureau of Cannabis Control (BCC)"` | The state licensing authority. |
+| `business_legal_name` | `"Movocan"` | The legal name of the business that owns the license. |
+| `business_dba_name` | `"Movocan"` | The name the license is doing business as. |
+| `business_owner_name` | `"redacted"` | The name of the owner of the license. |
+| `business_structure` | `"Corporation"` | The structure of the business that owns the license. |
+| `activity` | `"Pending Inspection"` | Any relevant license activity. |
+| `premise_street_address` | `"1632 Gateway Rd"` | The street address of the business. |
+| `premise_city` | `"Calexico"` | The city of the business. |
+| `premise_state` | `"CA"` | The state abbreviation of the business. |
+| `premise_county` | `"Imperial"` | The county of the business. |
+| `premise_zip_code` | `"92231"` | The zip code of the business. |
+| `business_email` | `"redacted@gmail.com"` | The business email of the license. |
+| `business_phone` | `"(555) 555-5555"` | The business phone of the license. |
+| `business_website` | `"cannlytics.com"` | The business website of the license. |
+| `parcel_number` | `"A42"` | An ID for the business location. |
+| `premise_latitude` | `32.69035693` | The latitude of the business. |
+| `premise_longitude` | `-115.38987552` | The longitude of the business. |
+| `data_refreshed_date` | `"2022-09-21T12:16:33.3866667"` | An ISO-formatted time when the license data was updated. |
### Data Splits
@@ -176,12 +179,12 @@ Data about organizations operating in the cannabis industry for each state is va
| Alaska | |
| Arizona | |
| California | |
-| Colorado | |
+| Colorado | |
| Connecticut | |
-| Illinois | |
+| Illinois | |
| Maine | |
-| Massachusetts | |
-| Michigan | |
+| Massachusetts | |
+| Michigan | |
| Montana | |
| Nevada | |
| New Jersey | |
@@ -191,7 +194,7 @@ Data about organizations operating in the cannabis industry for each state is va
| Vermont | |
| Washington | |
-#### Data Collection and Normalization
+### Data Collection and Normalization
In the `algorithms` directory, you can find the algorithms used for data collection. You can use these algorithms to recreate the dataset. First, you will need to clone the repository:
@@ -241,7 +244,7 @@ The data is for adult-use cannabis licenses. It would be valuable to include med
### Dataset Curators
Curated by [🔥Cannlytics](https://cannlytics.com)
-
+
### License
@@ -267,7 +270,7 @@ Please cite the following if you use the code examples in your research:
```bibtex
@misc{cannlytics2022,
title={Cannabis Data Science},
- author={Skeate, Keegan},
+ author={Skeate, Keegan and O'Sullivan-Sutherland, Candace},
journal={https://github.com/cannlytics/cannabis-data-science},
year={2022}
}
@@ -275,4 +278,4 @@ Please cite the following if you use the code examples in your research:
### Contributions
-Thanks to [🔥Cannlytics](https://cannlytics.com), [@candy-o](https://github.com/candy-o), [@keeganskeate](https://github.com/keeganskeate), and the entire [Cannabis Data Science Team](https://meetup.com/cannabis-data-science/members) for their contributions.
+Thanks to [🔥Cannlytics](https://cannlytics.com), [@candy-o](https://github.com/candy-o), [@hcadeaux](https://huggingface.co/hcadeaux), [@keeganskeate](https://github.com/keeganskeate), and the entire [Cannabis Data Science Team](https://meetup.com/cannabis-data-science/members) for their contributions.
diff --git a/algorithms/get_licenses_ak.py b/algorithms/get_licenses_ak.py
index 30c7af4d553697a8ec637b57028c7875d5b15496..3a705233f7a62818e3b23f3517e00c118d56080d 100644
--- a/algorithms/get_licenses_ak.py
+++ b/algorithms/get_licenses_ak.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/6/2022
License:
Description:
@@ -15,7 +15,230 @@ Description:
Data Source:
- - Alaska
+ - Department of Commerce, Community, and Economic Development
+ Alcohol and Marijuana Control Office
URL:
-"""
\ No newline at end of file
+"""
+# Standard imports.
+from datetime import datetime
+import os
+from time import sleep
+from typing import Optional
+
+# External imports.
+from cannlytics.data.gis import search_for_address
+from dotenv import dotenv_values
+import pandas as pd
+
+# Selenium imports.
+from selenium import webdriver
+from selenium.webdriver.chrome.options import Options
+from selenium.webdriver.common.by import By
+from selenium.webdriver.chrome.service import Service
+try:
+ import chromedriver_binary # Adds chromedriver binary to path.
+except ImportError:
+ pass # Otherwise, ChromeDriver should be in your path.
+
+
+# Specify where your data lives.
+DATA_DIR = '../data/ak'
+ENV_FILE = '../.env'
+
+# Specify state-specific constants.
+STATE = 'AK'
+ALASKA = {
+ 'licensing_authority_id': 'AAMCO',
+ 'licensing_authority': 'Alaska Alcohol and Marijuana Control Office',
+ 'licenses_url': 'https://www.commerce.alaska.gov/abc/marijuana/Home/licensesearch',
+ 'licenses': {
+ 'columns': {
+ 'License #': 'license_number',
+ 'Business License #': 'id',
+ 'Doing Business As': 'business_dba_name',
+ 'License Type': 'license_type',
+ 'License Status': 'license_status',
+ 'Physical Address': 'address',
+ },
+ },
+}
+
+
+def get_licenses_ak(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ ):
+ """Get Alaska cannabis license data."""
+
+ # Initialize Selenium and specify options.
+ service = Service()
+ options = Options()
+ options.add_argument('--window-size=1920,1200')
+
+ # DEV: Run with the browser open.
+ # options.headless = False
+
+ # PRODUCTION: Run with the browser closed.
+ options.add_argument('--headless')
+ options.add_argument('--disable-gpu')
+ options.add_argument('--no-sandbox')
+
+ # Initiate a Selenium driver.
+ driver = webdriver.Chrome(options=options, service=service)
+
+ # Load the license page.
+ driver.get(ALASKA['licenses_url'])
+
+ # Get the license type select.
+ license_types = []
+ options = driver.find_elements(by=By.TAG_NAME, value='option')
+ for option in options:
+ text = option.text
+ if text:
+ license_types.append(text)
+
+ # Iterate over all of the license types.
+ data = []
+ columns = list(ALASKA['licenses']['columns'].values())
+ for license_type in license_types:
+
+ # Set the text into the select.
+ select = driver.find_element(by=By.ID, value='SearchLicenseTypeID')
+ select.send_keys(license_type)
+
+ # Click search.
+ # TODO: There is probably an elegant way to wait for the table to load.
+ search_button = driver.find_element(by=By.ID, value='mariSearchBtn')
+ search_button.click()
+ sleep(2)
+
+ # Extract the table data.
+ table = driver.find_element(by=By.TAG_NAME, value='tbody')
+ rows = table.find_elements(by=By.TAG_NAME, value='tr')
+ for row in rows:
+ obs = {}
+ cells = row.find_elements(by=By.TAG_NAME, value='td')
+ for i, cell in enumerate(cells):
+ column = columns[i]
+ obs[column] = cell.text.replace('\n', ', ')
+ data.append(obs)
+
+ # End the browser session.
+ service.stop()
+
+ # Standardize the license data.
+ licenses = pd.DataFrame(data)
+ licenses = licenses.assign(
+ business_legal_name=licenses['business_dba_name'],
+ business_owner_name=None,
+ business_structure=None,
+ licensing_authority_id=ALASKA['licensing_authority_id'],
+ licensing_authority=ALASKA['licensing_authority'],
+ license_designation='Adult-Use',
+ license_status_date=None,
+ license_term=None,
+ premise_state=STATE,
+ parcel_number=None,
+ activity=None,
+ issue_date=None,
+ expiration_date=None,
+ )
+
+ # Restrict the license status to active.
+ active_license_types = [
+ 'Active-Operating',
+ 'Active-Pending Inspection',
+ 'Delegated',
+ 'Complete',
+ ]
+ licenses = licenses.loc[licenses['license_status'].isin(active_license_types)]
+
+ # Assign the city and zip code.
+ licenses['premise_city'] = licenses['address'].apply(
+ lambda x: x.split(', ')[1]
+ )
+ licenses['premise_zip_code'] = licenses['address'].apply(
+ lambda x: x.split(', ')[2].replace(STATE, '').strip()
+ )
+
+ # Search for address for each retail license.
+ # Only search for a query once, then re-use the response.
+ # Note: There is probably a much, much more efficient way to do this!!!
+ config = dotenv_values(env_file)
+ api_key = config['GOOGLE_MAPS_API_KEY']
+ queries = {}
+ fields = [
+ 'formatted_address',
+ 'formatted_phone_number',
+ 'geometry/location/lat',
+ 'geometry/location/lng',
+ 'website',
+ ]
+ licenses = licenses.reset_index(drop=True)
+ licenses = licenses.assign(
+ premise_street_address=None,
+ premise_county=None,
+ premise_latitude=None,
+ premise_longitude=None,
+ business_phone=None,
+ business_website=None,
+ )
+ for index, row in licenses.iterrows():
+
+ # Query Google Place API, if necessary.
+ query = ', '.join([row['business_dba_name'], row['address']])
+ gis_data = queries.get(query)
+ if gis_data is None:
+ try:
+ gis_data = search_for_address(query, api_key=api_key, fields=fields)
+ except:
+ gis_data = {}
+ queries[query] = gis_data
+
+ # Record the query.
+ licenses.iat[index, licenses.columns.get_loc('premise_street_address')] = gis_data.get('street')
+ licenses.iat[index, licenses.columns.get_loc('premise_county')] = gis_data.get('county')
+ licenses.iat[index, licenses.columns.get_loc('premise_latitude')] = gis_data.get('latitude')
+ licenses.iat[index, licenses.columns.get_loc('premise_longitude')] = gis_data.get('longitude')
+ licenses.iat[index, licenses.columns.get_loc('business_phone')] = gis_data.get('formatted_phone_number')
+ licenses.iat[index, licenses.columns.get_loc('business_website')] = gis_data.get('website')
+
+ # Clean-up after GIS.
+ licenses.drop(columns=['address'], inplace=True)
+
+ # Optional: Search for business website for email and a photo.
+ licenses['business_email'] = None
+ licenses['business_image_url'] = None
+
+ # Get the refreshed date.
+ licenses['data_refreshed_date'] = datetime.now().isoformat()
+
+ # Save and return the data.
+ if data_dir is not None:
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ retailers = licenses.loc[licenses['license_type'] == 'Retail Marijuana Store']
+ licenses.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ return licenses
+
+
+# === Test ===
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_ak(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_az.py b/algorithms/get_licenses_az.py
index 9d18eae4d88d24728789d2fafc825c9a4656eb62..45a35cdbd998087999cb72703345a3eeddb83581 100644
--- a/algorithms/get_licenses_az.py
+++ b/algorithms/get_licenses_az.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/27/2022
-Updated: 9/30/2022
+Updated: 10/7/2022
License:
Description:
@@ -27,23 +27,15 @@ from time import sleep
from typing import Optional
# External imports.
-from bs4 import BeautifulSoup
from cannlytics.data.gis import geocode_addresses
-from cannlytics.utils import camel_to_snake
-from cannlytics.utils.constants import DEFAULT_HEADERS
-import matplotlib.pyplot as plt
import pandas as pd
-import requests
-import seaborn as sns
+import zipcodes
# Selenium imports.
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
-from selenium.common.exceptions import (
- TimeoutException,
-)
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
try:
@@ -54,193 +46,288 @@ except ImportError:
# Specify where your data lives.
DATA_DIR = '../data/az'
+ENV_FILE = '../.env'
# Specify state-specific constants.
STATE = 'AZ'
ARIZONA = {
'licensing_authority_id': 'ADHS',
'licensing_authority': 'Arizona Department of Health Services',
- 'retailers': {
- 'url': 'https://azcarecheck.azdhs.gov/s/?licenseType=null',
- },
+ 'licenses_url': 'https://azcarecheck.azdhs.gov/s/?licenseType=null',
}
-# def get_licenses_az(
-# data_dir: Optional[str] = None,
-# env_file: Optional[str] = '.env',
-# ):
-# """Get Arizona cannabis license data."""
-
-# DEV:
-data_dir = DATA_DIR
-env_file = '../.env'
-
-
-# Create directories if necessary.
-if not os.path.exists(data_dir): os.makedirs(data_dir)
-
-# Initialize Selenium.
-service = Service()
-options = Options()
-options.add_argument('--window-size=1920,1200')
-# DEV:
-options.headless = False
-# options.add_argument('--headless')
-options.add_argument('--disable-gpu')
-options.add_argument('--no-sandbox')
-driver = webdriver.Chrome(options=options, service=service)
-
-# Load the license page.
-url = ARIZONA['retailers']['url']
-driver.get(url)
-
-# Wait for the page to load by waiting to detect the image.
-try:
- el = (By.CLASS_NAME, 'slds-container_center')
- detect = EC.presence_of_element_located(el)
- WebDriverWait(driver, timeout=30).until(detect)
-except TimeoutException:
- print('Failed to load page within %i seconds.' % (30))
-
-# Get the map container.
-container = driver.find_element(by=By.CLASS_NAME, value='slds-container_center')
-
-# Click "Load more" until all of the licenses are visible.
-more = True
-while(more):
- button = container.find_element(by=By.TAG_NAME, value='button')
- driver.execute_script('arguments[0].scrollIntoView(true);', button)
- button.click()
- counter = container.find_element(by=By.CLASS_NAME, value='count-text')
- more = int(counter.text.replace(' more', ''))
-
-# Get license data for each retailer.
-retailers = []
-els = container.find_elements(by=By.CLASS_NAME, value='map-list__item')
-for i, el in enumerate(els):
-
- # Get a retailer's data.
- count = i + 1
- xpath = f'/html/body/div[3]/div[2]/div/div[2]/div[2]/div/div/c-azcc-portal-home/c-azcc-map/div/div[2]/div[2]/div[2]/div[{count}]/c-azcc-map-list-item/div'
- list_item = el.find_element(by=By.XPATH, value=xpath)
- body = list_item.find_element(by=By.CLASS_NAME, value='slds-media__body')
- divs = body.find_elements(by=By.TAG_NAME, value='div')
- name = divs[0].text
- legal_name = divs[1].text
- if not name:
- name = legal_name
- address = divs[3].text
- address_parts = address.split(',')
- parts = divs[2].text.split(' · ')
-
- # Get the retailer's link to get more details.
- link = divs[-1].find_element(by=By.TAG_NAME, value='a')
- href = link.get_attribute('href')
-
- # Record the retailer's data.
- obs = {
- 'address': address,
- 'details_url': href,
- 'business_legal_name': legal_name,
- 'business_dba_name': name,
- 'business_phone': parts[-1],
- 'license_status': parts[0],
- 'license_type': parts[1],
- 'premise_street_address': address_parts[0],
- 'premise_city': address_parts[1],
- 'premise_zip_code': address_parts[-1].replace('AZ ', ''),
- }
- retailers.append(obs)
-
-# Standardize the retailer data.
-retailers = pd.DataFrame(retailers)
-retailers['licensing_authority_id'] = ARIZONA['licensing_authority_id']
-retailers['licensing_authority'] = ARIZONA['licensing_authority']
-retailers['license_designation'] = 'Adult-Use'
-retailers['premise_state'] = STATE
-retailers['license_status_date'] = None
-retailers['license_term'] = None
-retailers['business_structure'] = None
-retailers['activity'] = None
-retailers['parcel_number'] = None
-
-# TODO: Get each retailer's details.
-for index, row in retailers.iterrows():
-
- # Load the retailer's details webpage.
- driver.get(row['details_url'])
- # https://azcarecheck.azdhs.gov/s/facility-details?facilityId=001t000000L0TAaAAN&activeTab=details
-
- # TODO: Get the `business_email`.
- # lightning-formatted-email
-
-
- # TODO: Get the `license_number`
-
-
- # TODO: Get `issue_date` and `expiration_date`
-
-
- # TODO: Get `business_owner_name`
-
-
- # TODO: Get `license_designation` ("Services").
-
-
- # TODO: Create entries for cultivations!
-
-
- # TODO: Get the `premise_latitude` and `premise_longitude`.
- # https://maps.google.com/maps?daddr=33.447334955594650,-111.991646657827630&ll=
-
-
-
-# TODO: Lookup counties for the retailers.
-
-
-# TODO: Geocode-cultivations.
-
-# Geocode licenses to get `premise_latitude` and `premise_longitude`.
-# config = dotenv_values(env_file)
-# google_maps_api_key = config['GOOGLE_MAPS_API_KEY']
-# retailers = geocode_addresses(
-# retailers,
-# api_key=google_maps_api_key,
-# address_field='address',
-# )
-# drop_cols = ['state', 'state_name', 'address', 'formatted_address']
-# retailers.drop(columns=drop_cols, inplace=True)
-# gis_cols = {
-# 'county': 'premise_county',
-# 'latitude': 'premise_latitude',
-# 'longitude': 'premise_longitude',
-# }
-# retailers.rename(columns=gis_cols, inplace=True)
-
-# TODO: Save and return the data.
-# if data_dir is not None:
-# timestamp = datetime.now().isoformat()[:19].replace(':', '-')
-# retailers.to_excel(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.xlsx')
-# return retailers
+def county_from_zip(x):
+ """Find a county given a zip code. Returns `None` if no match."""
+ try:
+ return zipcodes.matching(x)[0]['county']
+ except KeyError:
+ return None
+
+
+def get_licenses_az(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ ):
+ """Get Arizona cannabis license data."""
+
+ # Create directories if necessary.
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+
+ # Initialize Selenium and specify options.
+ service = Service()
+ options = Options()
+ options.add_argument('--window-size=1920,1200')
+
+ # DEV: Run with the browser open.
+ # options.headless = False
+
+ # PRODUCTION: Run with the browser closed.
+ options.add_argument('--headless')
+ options.add_argument('--disable-gpu')
+ options.add_argument('--no-sandbox')
+
+ # Initiate a Selenium driver.
+ driver = webdriver.Chrome(options=options, service=service)
+
+ # Load the license page.
+ driver.get(ARIZONA['licenses_url'])
+ detect = (By.CLASS_NAME, 'slds-container_center')
+ WebDriverWait(driver, 30).until(EC.presence_of_element_located(detect))
+
+ # Get the map container.
+ container = driver.find_element(by=By.CLASS_NAME, value='slds-container_center')
+
+ # Click "Load more" until all of the licenses are visible.
+ more = True
+ while(more):
+ button = container.find_element(by=By.TAG_NAME, value='button')
+ driver.execute_script('arguments[0].scrollIntoView(true);', button)
+ button.click()
+ counter = container.find_element(by=By.CLASS_NAME, value='count-text')
+ more = int(counter.text.replace(' more', ''))
+
+ # Get license data for each retailer.
+ data = []
+ els = container.find_elements(by=By.CLASS_NAME, value='map-list__item')
+ for i, el in enumerate(els):
+
+ # Get a retailer's data.
+ count = i + 1
+ xpath = f'/html/body/div[3]/div[2]/div/div[2]/div[2]/div/div/c-azcc-portal-home/c-azcc-map/div/div[2]/div[2]/div[2]/div[{count}]/c-azcc-map-list-item/div'
+ list_item = el.find_element(by=By.XPATH, value=xpath)
+ body = list_item.find_element(by=By.CLASS_NAME, value='slds-media__body')
+ divs = body.find_elements(by=By.TAG_NAME, value='div')
+ name = divs[0].text
+ legal_name = divs[1].text
+ if not name:
+ name = legal_name
+ address = divs[3].text
+ address_parts = address.split(',')
+ parts = divs[2].text.split(' · ')
+
+ # Get the retailer's link to get more details.
+ link = divs[-1].find_element(by=By.TAG_NAME, value='a')
+ href = link.get_attribute('href')
+
+ # Record the retailer's data.
+ obs = {
+ 'address': address,
+ 'details_url': href,
+ 'business_legal_name': legal_name,
+ 'business_dba_name': name,
+ 'business_phone': parts[-1],
+ 'license_status': parts[0],
+ 'license_type': parts[1],
+ 'premise_street_address': address_parts[0].strip(),
+ 'premise_city': address_parts[1].strip(),
+ 'premise_zip_code': address_parts[-1].replace('AZ ', '').strip(),
+ }
+ data.append(obs)
+
+ # Standardize the retailer data.
+ retailers = pd.DataFrame(data)
+ retailers = retailers.assign(
+ business_email=None,
+ business_owner_name=None,
+ business_structure=None,
+ business_image_url=None,
+ business_website=None,
+ id=retailers.index,
+ licensing_authority_id=ARIZONA['licensing_authority_id'],
+ licensing_authority=ARIZONA['licensing_authority'],
+ license_designation='Adult-Use',
+ license_number=None,
+ license_status_date=None,
+ license_term=None,
+ premise_latitude=None,
+ premise_longitude=None,
+ premise_state=STATE,
+ issue_date=None,
+ expiration_date=None,
+ parcel_number=None,
+ activity=None,
+ )
+
+ # Get each retailer's details.
+ cultivators = pd.DataFrame(columns=retailers.columns)
+ manufacturers = pd.DataFrame(columns=retailers.columns)
+ for index, row in retailers.iterrows():
+
+ # Load the licenses's details webpage.
+ driver.get(row['details_url'])
+ detect = (By.CLASS_NAME, 'slds-container_center')
+ WebDriverWait(driver, 30).until(EC.presence_of_element_located(detect))
+ container = driver.find_element(by=By.CLASS_NAME, value='slds-container_center')
+ sleep(4)
+
+ # Get the `business_email`.
+ links = container.find_elements(by=By.TAG_NAME, value='a')
+ for link in links:
+ href = link.get_attribute('href')
+ if href is None: continue
+ if href.startswith('mailto'):
+ business_email = href.replace('mailto:', '')
+ col = retailers.columns.get_loc('business_email')
+ retailers.iat[index, col] = business_email
+ break
+
+ # Get the `license_number`
+ for link in links:
+ href = link.get_attribute('href')
+ if href is None: continue
+ if href.startswith('https://azdhs-licensing'):
+ col = retailers.columns.get_loc('license_number')
+ retailers.iat[index, col] = link.text
+ break
+
+ # Get the `premise_latitude` and `premise_longitude`.
+ for link in links:
+ href = link.get_attribute('href')
+ if href is None: continue
+ if href.startswith('https://maps.google.com/'):
+ coords = href.split('=')[1].split('&')[0].split(',')
+ lat_col = retailers.columns.get_loc('premise_latitude')
+ long_col = retailers.columns.get_loc('premise_longitude')
+ retailers.iat[index, lat_col] = float(coords[0])
+ retailers.iat[index, long_col] = float(coords[1])
+ break
+
+ # Get the `issue_date`.
+ key = 'License Effective'
+ el = container.find_element_by_xpath(f"//p[contains(text(),'{key}')]/following-sibling::lightning-formatted-text")
+ col = retailers.columns.get_loc('issue_date')
+ retailers.iat[index, col] = el.text
+
+ # Get the `expiration_date`.
+ key = 'License Expires'
+ el = container.find_element_by_xpath(f"//p[contains(text(),'{key}')]/following-sibling::lightning-formatted-text")
+ col = retailers.columns.get_loc('expiration_date')
+ retailers.iat[index, col] = el.text
+
+ # Get the `business_owner_name`.
+ key = 'Owner / License'
+ el = container.find_element_by_xpath(f"//p[contains(text(),'{key}')]/following-sibling::lightning-formatted-text")
+ col = retailers.columns.get_loc('expiration_date')
+ retailers.iat[index, col] = el.text
+
+ # Get the `license_designation` ("Services").
+ key = 'Services'
+ el = container.find_element_by_xpath(f"//p[contains(text(),'{key}')]/following-sibling::lightning-formatted-rich-text")
+ col = retailers.columns.get_loc('license_designation')
+ retailers.iat[index, col] = el.text
+
+ # Create entries for cultivations.
+ cultivator = retailers.iloc[index].copy()
+ key = 'Offsite Cultivation Address'
+ el = container.find_element_by_xpath(f"//p[contains(text(),'{key}')]/following-sibling::lightning-formatted-text")
+ address = el.text
+ if address:
+ parts = address.split(',')
+ cultivator['address'] = address
+ cultivator['premise_street_address'] = parts[0]
+ cultivator['premise_city'] = parts[1].strip()
+ cultivator['premise_zip_code'] = parts[-1].replace(STATE, '').strip()
+ cultivator['license_type'] = 'Offsite Cultivation'
+ cultivators.append(cultivator, ignore_index=True)
+
+ # Create entries for manufacturers.
+ manufacturer = retailers.iloc[index].copy()
+ key = 'Manufacture Address'
+ el = container.find_element_by_xpath(f"//p[contains(text(),'{key}')]/following-sibling::lightning-formatted-text")
+ address = el.text
+ if address:
+ parts = address.split(',')
+ manufacturer['address'] = address
+ manufacturer['premise_street_address'] = parts[0]
+ manufacturer['premise_city'] = parts[1].strip()
+ manufacturer['premise_zip_code'] = parts[-1].replace(STATE, '').strip()
+ manufacturer['license_type'] = 'Offsite Cultivation'
+ manufacturers.append(manufacturer, ignore_index=True)
+
+ # End the browser session.
+ service.stop()
+ retailers.drop(column=['address', 'details_url'], inplace=True)
+
+ # Lookup counties by zip code.
+ retailers['premise_county'] = retailers['premise_zip_code'].apply(county_from_zip)
+ cultivators['premise_county'] = cultivators['premise_zip_code'].apply(county_from_zip)
+ manufacturers['premise_county'] = manufacturers['premise_zip_code'].apply(county_from_zip)
+
+ # Setup geocoding
+ config = dotenv_values(env_file)
+ api_key = config['GOOGLE_MAPS_API_KEY']
+ drop_cols = ['state', 'state_name', 'county', 'address', 'formatted_address']
+ gis_cols = {'latitude': 'premise_latitude', 'longitude': 'premise_longitude'}
+
+ # # Geocode cultivators.
+ # cultivators = geocode_addresses(cultivators, api_key=api_key, address_field='address')
+ # cultivators.drop(columns=drop_cols, inplace=True)
+ # cultivators.rename(columns=gis_cols, inplace=True)
+
+ # # Geocode manufacturers.
+ # manufacturers = geocode_addresses(manufacturers, api_key=api_key, address_field='address')
+ # manufacturers.drop(columns=drop_cols, inplace=True)
+ # manufacturers.rename(columns=gis_cols, inplace=True)
+
+ # TODO: Lookup business website and image.
+
+ # Aggregate all licenses.
+ licenses = pd.concat([retailers, cultivators, manufacturers])
+
+ # Get the refreshed date.
+ timestamp = datetime.now().isoformat()
+ licenses['data_refreshed_date'] = timestamp
+ retailers['data_refreshed_date'] = timestamp
+ # cultivators['data_refreshed_date'] = timestamp
+ # manufacturers['data_refreshed_date'] = timestamp
+
+ # Save and return the data.
+ if data_dir is not None:
+ timestamp = timestamp[:19].replace(':', '-')
+ licenses.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ # cultivators.to_csv(f'{data_dir}/cultivators-{STATE.lower()}-{timestamp}.csv', index=False)
+ # manufacturers.to_csv(f'{data_dir}/manufacturers-{STATE.lower()}-{timestamp}.csv', index=False)
+ return licenses
# === Test ===
-# if __name__ == '__main__':
-
-# # Support command line usage.
-# import argparse
-# try:
-# arg_parser = argparse.ArgumentParser()
-# arg_parser.add_argument('--d', dest='data_dir', type=str)
-# arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
-# arg_parser.add_argument('--env', dest='env_file', type=str)
-# args = arg_parser.parse_args()
-# except SystemExit:
-# args = {'d': DATA_DIR, 'env_file': ENV_FILE}
-
-# # Get licenses, saving them to the specified directory.
-# data_dir = args.get('d', args.get('data_dir'))
-# env_file = args.get('env_file')
-# data = get_licenses_az(data_dir, env_file=env_file)
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_az(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_ca.py b/algorithms/get_licenses_ca.py
index af211b52fa2600d3ecd792a9d58560f02ec604d5..92d8dcb1867cb15838c8a1766e99adbb12121ae2 100644
--- a/algorithms/get_licenses_ca.py
+++ b/algorithms/get_licenses_ca.py
@@ -80,11 +80,18 @@ def get_licenses_ca(
columns = [camel_to_snake(x) for x in columns]
license_data.columns = columns
+ # TODO: Lookup business website and image.
+ license_data['business_image_url'] = None
+ license_data['business_website'] = None
+
+ # Restrict to only active licenses.
+ license_data = license_data.loc[license_data['license_status'] == 'Active']
+
# Save and return the data.
if data_dir is not None:
if not os.path.exists(data_dir): os.makedirs(data_dir)
timestamp = datetime.now().isoformat()[:19].replace(':', '-')
- license_data.to_excel(f'{data_dir}/licenses-ca-{timestamp}.xlsx')
+ license_data.to_csv(f'{data_dir}/licenses-ca-{timestamp}.csv', index=False)
return license_data
if __name__ == '__main__':
diff --git a/algorithms/get_licenses_co.py b/algorithms/get_licenses_co.py
index 545d37131d70aed645a016df52cba654283d0710..d54babcb5c229672f83b8149c49a2a623502ff37 100644
--- a/algorithms/get_licenses_co.py
+++ b/algorithms/get_licenses_co.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/4/2022
License:
Description:
@@ -15,7 +15,207 @@ Description:
Data Source:
- - Colorado
- URL: <>
+ - Colorado Department of Revenue | Marijuana Enforcement Division
+ URL:
-"""
\ No newline at end of file
+"""
+# Standard imports.
+from datetime import datetime
+import os
+from time import sleep
+from typing import Optional
+
+# External imports.
+from bs4 import BeautifulSoup
+from cannlytics.data.data import load_google_sheet
+from cannlytics.data.gis import search_for_address
+from dotenv import dotenv_values
+import pandas as pd
+import requests
+
+
+# Specify where your data lives.
+DATA_DIR = '../data/co'
+ENV_FILE = '../.env'
+
+# Specify state-specific constants.
+STATE = 'CO'
+COLORADO = {
+ 'licensing_authority_id': 'MED',
+ 'licensing_authority': 'Colorado Marijuana Enforcement Division',
+ 'licenses_url': 'https://sbg.colorado.gov/med/licensed-facilities',
+ 'licenses': {
+ 'columns': {
+ 'LicenseNumber': 'license_number',
+ 'FacilityName': 'business_legal_name',
+ 'DBA': 'business_dba_name',
+ 'City': 'premise_city',
+ 'ZipCode': 'premise_zip_code',
+ 'DateUpdated': 'data_refreshed_date',
+ 'Licensee Name ': 'business_legal_name',
+ 'License # ': 'license_number',
+ 'City ': 'premise_city',
+ 'Zip': 'premise_zip_code',
+ },
+ 'drop_columns': [
+ 'FacilityType', # This causes an error with `license_type`.
+ 'Potency',
+ 'Solvents',
+ 'Microbial',
+ 'Pesticides',
+ 'Mycotoxin',
+ 'Elemental Impurities',
+ 'Water Activity'
+ ]
+ }
+}
+
+
+def get_licenses_co(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ ):
+ """Get Colorado cannabis license data."""
+
+ # Get the licenses webpage.
+ url = COLORADO['licenses_url']
+ response = requests.get(url)
+ soup = BeautifulSoup(response.content, 'html.parser')
+
+ # Get the Google Sheets for each license type.
+ docs = {}
+ links = soup.find_all('a')
+ for link in links:
+ try:
+ href = link['href']
+ except KeyError:
+ pass
+ if 'docs.google' in href:
+ docs[link.text] = href
+
+ # Download each "Medical" and "Retail" Google Sheet.
+ licenses = pd.DataFrame()
+ license_designations = ['Medical', 'Retail']
+ columns=COLORADO['licenses']['columns']
+ drop_columns=COLORADO['licenses']['drop_columns']
+ for license_type, doc in docs.items():
+ for license_designation in license_designations:
+ license_data = load_google_sheet(doc, license_designation)
+ license_data['license_type'] = license_type
+ license_data['license_designation'] = license_designation
+ license_data.rename(columns=columns, inplace=True)
+ license_data.drop(columns=drop_columns, inplace=True, errors='ignore')
+ licenses = pd.concat([licenses, license_data])
+ sleep(0.22)
+
+ # Standardize the license data.
+ licenses = licenses.assign(
+ id=licenses['license_number'],
+ license_status=None,
+ licensing_authority_id=COLORADO['licensing_authority_id'],
+ licensing_authority=COLORADO['licensing_authority'],
+ license_designation='Adult-Use',
+ premise_state=STATE,
+ license_status_date=None,
+ license_term=None,
+ issue_date=None,
+ expiration_date=None,
+ business_owner_name=None,
+ business_structure=None,
+ activity=None,
+ parcel_number=None,
+ business_phone=None,
+ business_email=None,
+ business_image_url=None,
+ )
+
+ # Fill empty DBA names and strip trailing whitespace.
+ licenses.loc[licenses['business_dba_name'] == '', 'business_dba_name'] = licenses['business_legal_name']
+ licenses.business_dba_name.fillna(licenses.business_legal_name, inplace=True)
+ licenses.business_legal_name.fillna(licenses.business_dba_name, inplace=True)
+ licenses = licenses.loc[~licenses.business_dba_name.isna()]
+ licenses.business_dba_name = licenses.business_dba_name.apply(lambda x: x.strip())
+ licenses.business_legal_name = licenses.business_legal_name.apply(lambda x: x.strip())
+
+ # Optional: Turn all capital case to title case.
+
+ # Clean zip code column.
+ licenses['premise_zip_code'] = licenses['premise_zip_code'].apply(
+ lambda x: str(round(x)) if pd.notnull(x) else x
+ )
+ licenses.loc[licenses['premise_zip_code'].isnull(), 'premise_zip_code'] = ''
+
+ # Search for address for each retail license.
+ # Only search for a query once, then re-use the response.
+ # Note: There is probably a much, much more efficient way to do this!!!
+ config = dotenv_values(env_file)
+ api_key = config['GOOGLE_MAPS_API_KEY']
+ cols = ['business_dba_name', 'premise_city', 'premise_state', 'premise_zip_code']
+ retailers = licenses.loc[licenses['license_type'] == 'Stores']
+ retailers['query'] = retailers[cols].apply(
+ lambda row: ', '.join(row.values.astype(str)),
+ axis=1,
+ )
+ queries = {}
+ fields = [
+ 'formatted_address',
+ 'formatted_phone_number',
+ 'geometry/location/lat',
+ 'geometry/location/lng',
+ 'website',
+ ]
+ retailers = retailers.reset_index(drop=True)
+ retailers = retailers.assign(
+ premise_street_address=None,
+ premise_county=None,
+ premise_latitude=None,
+ premise_longitude=None,
+ business_website=None,
+ business_phone=None,
+ )
+ for index, row in retailers.iterrows():
+ query = row['query']
+ gis_data = queries.get(query)
+ if gis_data is None:
+ try:
+ gis_data = search_for_address(query, api_key=api_key, fields=fields)
+ except:
+ gis_data = {}
+ queries[query] = gis_data
+ retailers.iat[index, retailers.columns.get_loc('premise_street_address')] = gis_data.get('street')
+ retailers.iat[index, retailers.columns.get_loc('premise_county')] = gis_data.get('county')
+ retailers.iat[index, retailers.columns.get_loc('premise_latitude')] = gis_data.get('latitude')
+ retailers.iat[index, retailers.columns.get_loc('premise_longitude')] = gis_data.get('longitude')
+ retailers.iat[index, retailers.columns.get_loc('business_website')] = gis_data.get('website')
+ retailers.iat[index, retailers.columns.get_loc('business_phone')] = gis_data.get('formatted_phone_number')
+
+ # Clean-up after getting GIS data.
+ retailers.drop(columns=['query'], inplace=True)
+
+ # Save and return the data.
+ if data_dir is not None:
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ licenses.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ return licenses
+
+
+# === Test ===
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_co(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_ct.py b/algorithms/get_licenses_ct.py
index e7f340ce52724fc335e9572d3f3bb8a762fcf330..9a2cb48c5398e0c2e989b2bc8d24acbbf7d8be59 100644
--- a/algorithms/get_licenses_ct.py
+++ b/algorithms/get_licenses_ct.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/3/2022
License:
Description:
@@ -15,7 +15,149 @@ Description:
Data Source:
- - Connecticut
+ - Connecticut State Department of Consumer Protection
URL:
-"""
\ No newline at end of file
+"""
+# Standard imports.
+from datetime import datetime
+import os
+from typing import Optional
+
+# External imports.
+from bs4 import BeautifulSoup
+from cannlytics.data.gis import geocode_addresses
+from dotenv import dotenv_values
+import pandas as pd
+import requests
+
+
+# Specify where your data lives.
+DATA_DIR = '../data/ct'
+ENV_FILE = '../.env'
+
+# Specify state-specific constants.
+STATE = 'CT'
+CONNECTICUT = {
+ 'licensing_authority_id': 'CSDCP',
+ 'licensing_authority': 'Connecticut State Department of Consumer Protection',
+ 'licenses_url': 'https://portal.ct.gov/DCP/Medical-Marijuana-Program/Connecticut-Medical-Marijuana-Dispensary-Facilities',
+ 'retailers': {
+ 'columns': [
+ 'business_legal_name',
+ 'address',
+ 'business_website',
+ 'business_email',
+ 'business_phone',
+ ]
+ }
+}
+
+
+def get_licenses_ct(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ ):
+ """Get Connecticut cannabis license data."""
+
+ # Get the license webpage.
+ url = CONNECTICUT['licenses_url']
+ response = requests.get(url)
+ soup = BeautifulSoup(response.content, 'html.parser')
+
+ # Extract the license data.
+ data = []
+ columns = CONNECTICUT['retailers']['columns']
+ table = soup.find('table')
+ rows = table.find_all('tr')
+ for row in rows[1:]:
+ cells = row.find_all('td')
+ obs = {}
+ for i, cell in enumerate(cells):
+ column = columns[i]
+ obs[column] = cell.text
+ data.append(obs)
+
+ # Standardize the license data.
+ retailers = pd.DataFrame(data)
+ retailers = retailers.assign(
+ id=retailers.index,
+ license_status=None,
+ business_dba_name=retailers['business_legal_name'],
+ license_number=None,
+ licensing_authority_id=CONNECTICUT['licensing_authority_id'],
+ licensing_authority=CONNECTICUT['licensing_authority'],
+ license_designation='Adult-Use',
+ premise_state=STATE,
+ license_status_date=None,
+ license_term=None,
+ issue_date=None,
+ expiration_date=None,
+ business_owner_name=None,
+ business_structure=None,
+ activity=None,
+ parcel_number=None,
+ business_image_url=None,
+ license_type=None,
+ )
+
+ # Get address parts.
+ retailers['premise_street_address'] = retailers['address'].apply(
+ lambda x: x.split(',')[0]
+ )
+ retailers['premise_city'] = retailers['address'].apply(
+ lambda x: x.split('CT')[0].strip().split(',')[-2]
+ )
+ retailers['premise_zip_code'] = retailers['address'].apply(
+ lambda x: x.split('CT')[-1].replace('\xa0', '').replace(',', '').strip()
+ )
+
+ # Geocode the licenses.
+ config = dotenv_values(env_file)
+ google_maps_api_key = config['GOOGLE_MAPS_API_KEY']
+ retailers = geocode_addresses(
+ retailers,
+ api_key=google_maps_api_key,
+ address_field='address',
+ )
+ retailers['premise_city'] = retailers['formatted_address'].apply(
+ lambda x: x.split(', ')[1].split(',')[0] if STATE in str(x) else x
+ )
+ drop_cols = ['state', 'state_name', 'address', 'formatted_address']
+ retailers.drop(columns=drop_cols, inplace=True)
+ gis_cols = {
+ 'county': 'premise_county',
+ 'latitude': 'premise_latitude',
+ 'longitude': 'premise_longitude'
+ }
+ retailers.rename(columns=gis_cols, inplace=True)
+
+ # Get the refreshed date.
+ retailers['data_refreshed_date'] = datetime.now().isoformat()
+
+ # Save and return the data.
+ if data_dir is not None:
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ return retailers
+
+
+# === Test ===
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_ct(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_il.py b/algorithms/get_licenses_il.py
index 115dc5034978abc5e119b448e6f20803093f20a2..fbb2a4f8a14945253c1d4b7f04642e2002c7987d 100644
--- a/algorithms/get_licenses_il.py
+++ b/algorithms/get_licenses_il.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/3/2022
License:
Description:
@@ -15,7 +15,180 @@ Description:
Data Source:
- - Illinois
- URL: <>
+ - Illinois Department of Financial and Professional Regulation
+ Licensed Adult Use Cannabis Dispensaries
+ URL:
-"""
\ No newline at end of file
+"""
+# Standard imports.
+from datetime import datetime
+import os
+from typing import Optional
+
+# External imports.
+from dotenv import dotenv_values
+from cannlytics.data.gis import geocode_addresses
+import pandas as pd
+import pdfplumber
+import requests
+
+
+# Specify where your data lives.
+DATA_DIR = '../data/il'
+ENV_FILE = '../.env'
+
+# Specify state-specific constants.
+STATE = 'IL'
+ILLINOIS = {
+ 'licensing_authority_id': 'IDFPR',
+ 'licensing_authority': 'Illinois Department of Financial and Professional Regulation',
+ 'retailers': {
+ 'url': 'https://www.idfpr.com/LicenseLookup/AdultUseDispensaries.pdf',
+ 'columns': [
+ 'business_legal_name',
+ 'business_dba_name',
+ 'address',
+ 'medical',
+ 'issue_date',
+ 'license_number',
+ ],
+ },
+}
+
+
+def get_licenses_il(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ **kwargs,
+ ):
+ """Get Illinois cannabis license data."""
+
+ # Create necessary directories.
+ pdf_dir = f'{data_dir}/pdfs'
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ if not os.path.exists(pdf_dir): os.makedirs(pdf_dir)
+
+ # Download the retailers PDF.
+ retailers_url = ILLINOIS['retailers']['url']
+ filename = f'{pdf_dir}/illinois_retailers.pdf'
+ response = requests.get(retailers_url)
+ with open(filename, 'wb') as f:
+ f.write(response.content)
+
+ # Read the retailers PDF.
+ pdf = pdfplumber.open(filename)
+
+ # Get the table data, excluding the headers and removing empty cells.
+ table_data = []
+ for i, page in enumerate(pdf.pages):
+ table = page.extract_table()
+ if i == 0:
+ table = table[4:]
+ table = [c for row in table
+ if (c := [elem for elem in row if elem is not None])]
+ table_data += table
+
+ # Standardize the data.
+ licensee_columns = ILLINOIS['retailers']['columns']
+ retailers = pd.DataFrame(table_data, columns=licensee_columns)
+ retailers = retailers.assign(
+ licensing_authority_id=ILLINOIS['licensing_authority_id'],
+ licensing_authority=ILLINOIS['licensing_authority'],
+ license_designation='Adult-Use',
+ premise_state=STATE,
+ license_status='Active',
+ license_status_date=None,
+ license_type='Commercial - Retailer',
+ license_term=None,
+ expiration_date=None,
+ business_legal_name=retailers['business_dba_name'],
+ business_owner_name=None,
+ business_structure=None,
+ business_email=None,
+ activity=None,
+ parcel_number=None,
+ id=retailers['license_number'],
+ business_image_url=None,
+ business_website=None,
+ )
+
+ # Apply `medical` to `license_designation`
+ retailers.loc[retailers['medical'] == 'Yes', 'license_designation'] = 'Adult-Use and Medicinal'
+ retailers.drop(columns=['medical'], inplace=True)
+
+ # Clean the organization names.
+ retailers['business_legal_name'] = retailers['business_legal_name'].str.replace('\n', '', regex=False)
+ retailers['business_dba_name'] = retailers['business_dba_name'].str.replace('*', '', regex=False)
+
+ # Separate address into 'street', 'city', 'state', 'zip_code', 'phone_number'.
+ streets, cities, states, zip_codes, phone_numbers = [], [], [], [], []
+ for index, row in retailers.iterrows():
+ parts = row.address.split(' \n')
+ streets.append(parts[0])
+ phone_numbers.append(parts[-1])
+ locales = parts[1]
+ city_locales = locales.split(', ')
+ state_locales = city_locales[-1].split(' ')
+ cities.append(city_locales[0])
+ states.append(state_locales[0])
+ zip_codes.append(state_locales[-1])
+ retailers['premise_street_address'] = pd.Series(streets)
+ retailers['premise_city'] = pd.Series(cities)
+ retailers['premise_state'] = pd.Series(states)
+ retailers['premise_zip_code'] = pd.Series(zip_codes)
+ retailers['business_phone'] = pd.Series(phone_numbers)
+
+ # Convert the issue date to ISO format.
+ retailers['issue_date'] = retailers['issue_date'].apply(
+ lambda x: pd.to_datetime(x).isoformat()
+ )
+
+ # Get the refreshed date.
+ date = pdf.metadata['ModDate'].replace('D:', '')
+ date = date[:4] + '-' + date[4:6] + '-' + date[6:8] + 'T' + date[8:10] + \
+ ':' + date[10:12] + ':' + date[12:].replace("'", ':').rstrip(':')
+ retailers['data_refreshed_date'] = date
+
+ # Geocode licenses to get `premise_latitude` and `premise_longitude`.
+ config = dotenv_values(env_file)
+ google_maps_api_key = config['GOOGLE_MAPS_API_KEY']
+ retailers['address'] = retailers['address'].str.replace('*', '', regex=False)
+ retailers = geocode_addresses(
+ retailers,
+ api_key=google_maps_api_key,
+ address_field='address',
+ )
+ drop_cols = ['state', 'state_name', 'address', 'formatted_address']
+ retailers.drop(columns=drop_cols, inplace=True)
+ gis_cols = {
+ 'county': 'premise_county',
+ 'latitude': 'premise_latitude',
+ 'longitude': 'premise_longitude'
+ }
+ retailers.rename(columns=gis_cols, inplace=True)
+
+ # Save and return the data.
+ if data_dir is not None:
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ return retailers
+
+
+# === Test ===
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_il(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_ma.py b/algorithms/get_licenses_ma.py
index 161f81339ef921fbd82d43790efcb67d6ed44dd4..6d71672305226fa2d3e0bdb45cbd3be58313a294 100644
--- a/algorithms/get_licenses_ma.py
+++ b/algorithms/get_licenses_ma.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/7/2022
License:
Description:
@@ -15,9 +15,132 @@ Description:
Data Source:
- - Massachusetts
- URL: <>
+ - Massachusetts Cannabis Control Commission Data Catalog
+ URL:
"""
+# Standard imports.
+from datetime import datetime
+import os
+from typing import Optional
+# External imports.
+from cannlytics.data.opendata import OpenData
+
+# Specify where your data lives.
+DATA_DIR = '../data/ma'
+
+# Specify state-specific constants.
+STATE = 'MA'
+MASSACHUSETTS = {
+ 'licensing_authority_id': 'MACCC',
+ 'licensing_authority': 'Massachusetts Cannabis Control Commission',
+ 'licenses': {
+ 'columns': {
+ 'license_number': 'license_number',
+ 'business_name': 'business_legal_name',
+ 'establishment_address_1': 'premise_street_address',
+ 'establishment_address_2': 'premise_street_address_2',
+ 'establishment_city': 'premise_city',
+ 'establishment_zipcode': 'premise_zip_code',
+ 'county': 'premise_county',
+ 'license_type': 'license_type',
+ 'application_status': 'license_status',
+ 'lic_status': 'license_term',
+ 'approved_license_type': 'license_designation',
+ 'commence_operations_date': 'license_status_date',
+ 'massachusetts_business': 'id',
+ 'dba_name': 'business_dba_name',
+ 'establishment_activities': 'activity',
+ 'cccupdatedate': 'data_refreshed_date',
+ 'establishment_state': 'premise_state',
+ 'latitude': 'premise_latitude',
+ 'longitude': 'premise_longitude',
+ },
+ 'drop': [
+ 'square_footage_establishment',
+ 'cooperative_total_canopy',
+ 'cooperative_cultivation_environment',
+ 'establishment_cultivation_environment',
+ 'abutters_count',
+ 'is_abutters_notified',
+ 'business_zipcode',
+ 'dph_rmd_number',
+ 'geocoded_county',
+ 'geocoded_address',
+ 'name_of_rmd',
+ 'priority_applicant_type',
+ 'rmd_priority_certification',
+ 'dba_registration_city',
+ 'county_lat',
+ 'county_long',
+ ]
+ },
+}
+
+
+def get_licenses_ma(
+ data_dir: Optional[str] = None,
+ **kwargs,
+ ):
+ """Get Massachusetts cannabis license data."""
+
+ # Get the licenses data.
+ ccc = OpenData()
+ licenses = ccc.get_licensees('approved')
+
+ # Standardize the licenses data.
+ constants = MASSACHUSETTS['licenses']
+ licenses.drop(columns=constants['drop'], inplace=True)
+ licenses.rename(columns=constants['columns'], inplace=True)
+ licenses = licenses.assign(
+ licensing_authority_id=MASSACHUSETTS['licensing_authority_id'],
+ licensing_authority=MASSACHUSETTS['licensing_authority'],
+ business_structure=None,
+ business_email=None,
+ business_owner_name=None,
+ parcel_number=None,
+ issue_date=None,
+ expiration_date=None,
+ business_image_url=None,
+ business_website=None,
+ business_phone=None,
+ )
+
+ # Append `premise_street_address_2` to `premise_street_address`.
+ cols = ['premise_street_address', 'premise_street_address_2']
+ licenses['premise_street_address'] = licenses[cols].apply(
+ lambda x : '{} {}'.format(x[0].strip(), x[1]).replace('nan', '').strip().replace(' ', ' '),
+ axis=1,
+ )
+ licenses.drop(columns=['premise_street_address_2'], inplace=True)
+
+ # Optional: Look-up business websites for each license.
+
+ # Save and return the data.
+ if data_dir is not None:
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ retailers = licenses.loc[licenses['license_type'].str.contains('Retailer')]
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ licenses.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
+ return licenses
+
+
+# === Test ===
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ data = get_licenses_ma(data_dir)
diff --git a/algorithms/get_licenses_me.py b/algorithms/get_licenses_me.py
index 8906b2ea1980a4b2ec89938a1179fe71ae44dc6a..3d62714d5041864caeaa1ee1834622e941304ddc 100644
--- a/algorithms/get_licenses_me.py
+++ b/algorithms/get_licenses_me.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/30/2022
+Updated: 10/7/2022
License:
Description:
@@ -90,7 +90,7 @@ def get_licenses_me(
break
# Download the licenses workbook.
- filename = licenses_url.split('/')[-1]
+ filename = licenses_url.split('/')[-1].split('?')[0]
licenses_source_file = os.path.join(file_dir, filename)
response = requests.get(licenses_url)
with open(licenses_source_file, 'wb') as doc:
@@ -99,21 +99,24 @@ def get_licenses_me(
# Extract the data from the license workbook.
licenses = pd.read_excel(licenses_source_file)
licenses.rename(columns=MAINE['licenses']['columns'], inplace=True)
- licenses['licensing_authority_id'] = MAINE['licensing_authority_id']
- licenses['licensing_authority'] = MAINE['licensing_authority']
- licenses['license_designation'] = 'Adult-Use'
- licenses['premise_state'] = STATE
- licenses['license_status_date'] = None
- licenses['license_term'] = None
- licenses['issue_date'] = None
- licenses['expiration_date'] = None
- licenses['business_structure'] = None
- licenses['business_email'] = None
- licenses['business_phone'] = None
- licenses['activity'] = None
- licenses['parcel_number'] = None
- licenses['premise_street_address'] = None
- licenses['id'] = licenses['license_number']
+ licenses = licenses.assign(
+ licensing_authority_id=MAINE['licensing_authority_id'],
+ licensing_authority=MAINE['licensing_authority'],
+ license_designation='Adult-Use',
+ premise_state=STATE,
+ license_status_date=None,
+ license_term=None,
+ issue_date=None,
+ expiration_date=None,
+ business_structure=None,
+ business_email=None,
+ business_phone=None,
+ activity=None,
+ parcel_number=None,
+ premise_street_address=None,
+ id=licenses['license_number'],
+ business_image_url=None,
+ )
# Remove duplicates.
licenses.drop_duplicates(subset='license_number', inplace=True)
@@ -137,31 +140,30 @@ def get_licenses_me(
# Geocode licenses to get `premise_latitude` and `premise_longitude`.
config = dotenv_values(env_file)
- google_maps_api_key = config['GOOGLE_MAPS_API_KEY']
+ api_key = config['GOOGLE_MAPS_API_KEY']
cols = ['premise_city', 'premise_state']
licenses['address'] = licenses[cols].apply(
lambda row: ', '.join(row.values.astype(str)),
axis=1,
)
- licenses = geocode_addresses(
- licenses,
- api_key=google_maps_api_key,
- address_field='address',
- )
+ licenses = geocode_addresses(licenses, address_field='address', api_key=api_key)
drop_cols = ['state', 'state_name', 'address', 'formatted_address',
'contact_type', 'contact_city', 'contact_description']
- licenses.drop(columns=drop_cols, inplace=True)
gis_cols = {
'county': 'premise_county',
'latitude': 'premise_latitude',
'longitude': 'premise_longitude',
}
+ licenses['premise_zip_code'] = licenses['formatted_address'].apply(
+ lambda x: x.split(', ')[2].split(',')[0].split(' ')[-1] if STATE in str(x) else x
+ )
+ licenses.drop(columns=drop_cols, inplace=True)
licenses.rename(columns=gis_cols, inplace=True)
# Save and return the data.
if data_dir is not None:
timestamp = datetime.now().isoformat()[:19].replace(':', '-')
- licenses.to_excel(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.xlsx')
+ licenses.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
return licenses
diff --git a/algorithms/get_licenses_mi.py b/algorithms/get_licenses_mi.py
index 9ca96acfaba48264e995f615055c05b40161a2ed..9e5f5748802bdf7d1af870e74cd973661681c2da 100644
--- a/algorithms/get_licenses_mi.py
+++ b/algorithms/get_licenses_mi.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/8/2022
License:
Description:
@@ -15,7 +15,245 @@ Description:
Data Source:
- - Michigan
- URL: <>
+ - Michigan Cannabis Regulatory Agency
+ URL:
-"""
\ No newline at end of file
+"""
+# Standard imports.
+from datetime import datetime
+import os
+from time import sleep
+from typing import Optional
+
+# External imports.
+from cannlytics.data.gis import geocode_addresses
+from dotenv import dotenv_values
+import pandas as pd
+
+# Selenium imports.
+from selenium import webdriver
+from selenium.webdriver.chrome.options import Options
+from selenium.webdriver.common.by import By
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support.ui import Select
+try:
+ import chromedriver_binary # Adds chromedriver binary to path.
+except ImportError:
+ pass # Otherwise, ChromeDriver should be in your path.
+
+
+# Specify where your data lives.
+DATA_DIR = '../data/mi'
+ENV_FILE = '../.env'
+
+# Specify state-specific constants.
+STATE = 'MI'
+MICHIGAN = {
+ 'licensing_authority_id': 'CRA',
+ 'licensing_authority': 'Michigan Cannabis Regulatory Agency',
+ 'licenses_url': 'https://aca-prod.accela.com/MIMM/Cap/CapHome.aspx?module=Adult_Use&TabName=Adult_Use',
+ 'medicinal_url': 'https://aca-prod.accela.com/MIMM/Cap/CapHome.aspx?module=Licenses&TabName=Licenses&TabList=Home%7C0%7CLicenses%7C1%7CAdult_Use%7C2%7CEnforcement%7C3%7CRegistryCards%7C4%7CCurrentTabIndex%7C1',
+ 'licenses': {
+ 'columns': {
+ 'Record Number': 'license_number',
+ 'Record Type': 'license_type',
+ 'License Name': 'business_legal_name',
+ 'Address': 'address',
+ 'Expiration Date': 'expiration_date',
+ 'Status': 'license_status',
+ 'Action': 'activity',
+ 'Notes': 'license_designation',
+ 'Disciplinary Action': 'license_term',
+ },
+ },
+}
+
+
+def wait_for_id_invisible(driver, value, seconds=30):
+ """Wait for a given value to be invisible."""
+ WebDriverWait(driver, seconds).until(
+ EC.invisibility_of_element((By.ID, value))
+ )
+
+
+def get_licenses_mi(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ ):
+ """Get Michigan cannabis license data."""
+
+ # Initialize Selenium and specify options.
+ service = Service()
+ options = Options()
+ options.add_argument('--window-size=1920,1200')
+
+ # DEV: Run with the browser open.
+ options.headless = False
+
+ # PRODUCTION: Run with the browser closed.
+ # options.add_argument('--headless')
+ # options.add_argument('--disable-gpu')
+ # options.add_argument('--no-sandbox')
+
+ # Initiate a Selenium driver.
+ driver = webdriver.Chrome(options=options, service=service)
+
+ # Load the license page.
+ url = MICHIGAN['licenses_url']
+ driver.get(url)
+
+ # Get the various license types, excluding certain types without addresses.
+ select = Select(driver.find_element(by=By.TAG_NAME, value='select'))
+ license_types = []
+ options = driver.find_elements(by=By.TAG_NAME, value='option')
+ for option in options:
+ text = option.text
+ if text and '--' not in text:
+ license_types.append(text)
+
+ # Restrict certain license types.
+ license_types = license_types[1:-2]
+
+ # FIXME: Iterate over license types.
+ data = []
+ columns = list(MICHIGAN['licenses']['columns'].values())
+ for license_type in license_types:
+
+ # Select the various license types.
+ try:
+ select.select_by_visible_text(license_type)
+ except:
+ pass
+ wait_for_id_invisible(driver, 'divGlobalLoading')
+
+ # Click the search button.
+ search_button = driver.find_element(by=By.ID, value='ctl00_PlaceHolderMain_btnNewSearch')
+ search_button.click()
+ wait_for_id_invisible(driver, 'divGlobalLoading')
+
+ # Iterate over all of the pages.
+ iterate = True
+ while iterate:
+
+ # Get all of the license data.
+ grid = driver.find_element(by=By.ID, value='ctl00_PlaceHolderMain_dvSearchList')
+ rows = grid.find_elements(by=By.TAG_NAME, value='tr')
+ rows = [x.text for x in rows]
+ rows = [x for x in rows if 'Download results' not in x and not x.startswith('< Prev')]
+ cells = []
+ for row in rows[1:]: # Skip the header.
+ obs = {}
+ cells = row.split('\n')
+ for i, cell in enumerate(cells):
+ column = columns[i]
+ obs[column] = cell
+ data.append(obs)
+
+ # Keep clicking the next button until the next button is disabled.
+ next_button = driver.find_elements(by=By.CLASS_NAME, value='aca_pagination_PrevNext')[-1]
+ current_page = driver.find_element(by=By.CLASS_NAME, value='SelectedPageButton').text
+ next_button.click()
+ wait_for_id_invisible(driver, 'divGlobalLoading')
+ next_page = driver.find_element(by=By.CLASS_NAME, value='SelectedPageButton').text
+ if current_page == next_page:
+ iterate = False
+
+ # TODO: Also get all of the medical licenses!
+ # https://aca-prod.accela.com/MIMM/Cap/CapHome.aspx?module=Licenses&TabName=Licenses&TabList=Home%7C0%7CLicenses%7C1%7CAdult_Use%7C2%7CEnforcement%7C3%7CRegistryCards%7C4%7CCurrentTabIndex%7C1
+
+ # End the browser session.
+ service.stop()
+
+ # Standardize the data.
+ licenses = pd.DataFrame(data)
+ licenses = licenses.assign(
+ id=licenses.index,
+ licensing_authority_id=MICHIGAN['licensing_authority_id'],
+ licensing_authority=MICHIGAN['licensing_authority'],
+ premise_state=STATE,
+ license_status_date=None,
+ issue_date=None,
+ business_owner_name=None,
+ business_structure=None,
+ parcel_number=None,
+ business_phone=None,
+ business_email=None,
+ business_image_url=None,
+ license_designation=None,
+ business_website=None,
+ business_dba_name=licenses['business_legal_name'],
+ )
+
+ # Assign `license_term` if necessary.
+ try:
+ licenses['license_term']
+ except KeyError:
+ licenses['license_term'] = None
+
+ # Clean `license_type`.
+ licenses['license_type'] = licenses['license_type'].apply(
+ lambda x: x.replace(' - License', '')
+ )
+
+ # Format expiration date as an ISO formatted date.
+ licenses['expiration_date'] = licenses['expiration_date'].apply(
+ lambda x: pd.to_datetime(x).isoformat()
+ )
+
+ # Geocode the licenses.
+ config = dotenv_values(env_file)
+ google_maps_api_key = config['GOOGLE_MAPS_API_KEY']
+ licenses = geocode_addresses(
+ licenses,
+ api_key=google_maps_api_key,
+ address_field='address',
+ )
+ licenses['premise_street_address'] = licenses['formatted_address'].apply(
+ lambda x: x.split(',')[0] if STATE in str(x) else x
+ )
+ licenses['premise_city'] = licenses['formatted_address'].apply(
+ lambda x: x.split(', ')[1].split(',')[0] if STATE in str(x) else x
+ )
+ licenses['premise_zip_code'] = licenses['formatted_address'].apply(
+ lambda x: x.split(', ')[2].split(',')[0].split(' ')[-1] if STATE in str(x) else x
+ )
+ drop_cols = ['state', 'state_name', 'address', 'formatted_address']
+ gis_cols = {
+ 'county': 'premise_county',
+ 'latitude': 'premise_latitude',
+ 'longitude': 'premise_longitude'
+ }
+ licenses.drop(columns=drop_cols, inplace=True)
+ licenses.rename(columns=gis_cols, inplace=True)
+
+ # Get the refreshed date.
+ licenses['data_refreshed_date'] = datetime.now().isoformat()
+
+ # Save and return the data.
+ if data_dir is not None:
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ licenses.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
+ return licenses
+
+
+# === Test ===
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_mi(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_mt.py b/algorithms/get_licenses_mt.py
index fad0c2a4380c7c78c0be1f8284e5214b920d1581..346ba8607e6205e6afec16ee5d030d8162f22892 100644
--- a/algorithms/get_licenses_mt.py
+++ b/algorithms/get_licenses_mt.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/27/2022
-Updated: 9/30/2022
+Updated: 10/5/2022
License:
Description:
@@ -22,12 +22,13 @@ Data Source:
# Standard imports.
from datetime import datetime
import os
-from time import sleep
+from typing import Optional
# External imports.
-from bs4 import BeautifulSoup
-from cannlytics.utils import camel_to_snake
+from cannlytics.data.gis import search_for_address
from cannlytics.utils.constants import DEFAULT_HEADERS
+from dotenv import dotenv_values
+import pandas as pd
import pdfplumber
import requests
@@ -39,6 +40,32 @@ ENV_FILE = '../.env'
# Specify state-specific constants.
STATE = 'MT'
MONTANA = {
+ 'licensing_authority_id': 'MTCCD',
+ 'licensing_authority': 'Montana Cannabis Control Division',
+ 'licenses': {
+ 'columns': [
+ {
+ 'key': 'premise_city',
+ 'name': 'City',
+ 'area': [0, 0.25, 0.2, 0.95],
+ },
+ {
+ 'key': 'business_legal_name',
+ 'name': 'Location Name',
+ 'area': [0.2, 0.25, 0.6, 0.95],
+ },
+ {
+ 'key': 'license_designation',
+ 'name': 'Sales Type',
+ 'area': [0.6, 0.25, 0.75, 0.95],
+ },
+ {
+ 'key': 'business_phone',
+ 'name': 'Phone Number',
+ 'area': [0.75, 0.25, 1, 0.95],
+ },
+ ]
+ },
'retailers': {
'url': 'https://mtrevenue.gov/?mdocs-file=60245',
'columns': ['city', 'dba', 'license_type', 'phone']
@@ -49,104 +76,203 @@ MONTANA = {
'transporters': {'url': 'https://mtrevenue.gov/?mdocs-file=72489'},
}
-# DEV:
-data_dir = DATA_DIR
-pdf_dir = f'{data_dir}/pdfs'
-
-# Create directories if necessary.
-if not os.path.exists(data_dir): os.makedirs(data_dir)
-if not os.path.exists(pdf_dir): os.makedirs(pdf_dir)
-
-# Download the retailers PDF.
-timestamp = datetime.now().isoformat()[:19].replace(':', '-')
-outfile = f'{pdf_dir}/mt-retailers-{timestamp}.pdf'
-response = requests.get(MONTANA['retailers']['url'], headers=DEFAULT_HEADERS)
-with open(outfile, 'wb') as pdf:
- pdf.write(response.content)
-
-# FIXME: Extract text by section!
-# E.g.
-# doc = pdfplumber.open(outfile)
-# page = doc.pages[0]
-# img = page.to_image(resolution=150)
-# img.draw_rects(
-# [[0, 0.25 * page.height, 0.2 * page.width, 0.95 * page.height]]
-# )
-
-# Extract the data from the PDF.
-rows = []
-skip_lines = ['GOVERNOR ', 'DIRECTOR ', 'Cannabis Control Division',
-'Licensed Dispensary locations', 'Please note', 'registered ',
-'City Location Name Sales Type Phone Number', 'Page ']
-doc = pdfplumber.open(outfile)
-for page in doc.pages:
- text = page.extract_text()
- lines = text.split('\n')
- for line in lines:
- skip = False
- for skip_line in skip_lines:
- if line.startswith(skip_line):
- skip = True
+
+def get_licenses_mt(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ ):
+ """Get Montana cannabis license data."""
+
+ # Create directories if necessary.
+ pdf_dir = f'{data_dir}/pdfs'
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ if not os.path.exists(pdf_dir): os.makedirs(pdf_dir)
+
+ # Download the retailers PDF.
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ outfile = f'{pdf_dir}/mt-retailers-{timestamp}.pdf'
+ response = requests.get(MONTANA['retailers']['url'], headers=DEFAULT_HEADERS)
+ with open(outfile, 'wb') as pdf:
+ pdf.write(response.content)
+
+ # Read the PDF.
+ doc = pdfplumber.open(outfile)
+
+ # Get the table rows.
+ rows = []
+ front_page = doc.pages[0]
+ width, height = front_page.width, front_page.height
+ x0, y0, x1, y1 = tuple([0, 0.25, 1, 0.95])
+ page_area = (x0 * width, y0 * height, x1 * width, y1 * height)
+ for page in doc.pages:
+ crop = page.within_bbox(page_area)
+ text = crop.extract_text()
+ lines = text.split('\n')
+ for line in lines:
+ rows.append(line)
+
+ # Get cities from the first column, used to identify the city for each line.
+ cities = []
+ city_area = MONTANA['licenses']['columns'][0]['area']
+ x0, y0, x1, y1 = tuple(city_area)
+ column_area = (x0 * width, y0 * height, x1 * width, y1 * height)
+ for page in doc.pages:
+ crop = page.within_bbox(column_area)
+ text = crop.extract_text()
+ lines = text.split('\n')
+ for line in lines:
+ cities.append(line)
+
+ # Find all of the unique cities.
+ cities = list(set(cities))
+ cities = [x for x in cities if x != 'City']
+
+ # Get all of the license data.
+ data = []
+ rows = [x for x in rows if not x.startswith('City')]
+ for row in rows:
+
+ # Get all of the license observation data.
+ obs = {}
+ text = str(row)
+
+ # Identify the city and remove the city from the name (only once b/c of DBAs!).
+ for city in cities:
+ if city in row:
+ obs['premise_city'] = city.title()
+ text = text.replace(city, '', 1).strip()
break
- if skip:
- continue
- rows.append(line)
-
-# Collect licensee data.
-licensees = []
-for row in rows:
-
- # FIXME: Rows with double-line text get cut-off.
- if '(' not in row:
- continue
-
- obs = {}
- if 'Adult Use' in row:
- parts = row.split('Adult Use')
- obs['license_type'] = 'Adult Use'
- else:
- parts = row.split('Medical Only')
- obs['license_type'] = 'Medical Only'
- obs['dba'] = parts[0].strip()
- obs['phone'] = parts[-1].strip()
- licensees.append(obs)
-
-# Get a list of Montana cities.
-cities = []
-# response = requests.get('http://www.mlct.org/', headers=DEFAULT_HEADERS)
-# soup = BeautifulSoup(response.content, 'html.parser')
-# table = soup.find('table')
-# for tr in table.findAll('tr'):
-# if not tr.text.strip().replace('\n', ''):
-# continue
-# city = tr.find('td').text
-# if '©' in city or ',' in city or '\n' in city or city == 'Home' or city == 'City':
-# continue
-# cities.append(city)
-
-# remove_lines = ['RESOURCES', 'Official State Website', 'State Legislature',
-# 'Chamber of Commerce', 'Contact Us']
-# for ele in remove_lines:
-# cities.remove(ele)
-
-# FIXME:
-url = 'https://dojmt.gov/wp-content/uploads/2011/05/mvmtcitiescountieszips.pdf'
-
-# TODO: Separate `city` from `dba` using list of Montana cities.
-for i, licensee in enumerate(licensees):
- dba = licensee['dba']
- city_found = False
- for city in cities:
- city_name = city.upper()
- if city_name in dba:
- licensees[i]['dba'] = dba.replace(city_name, '').strip()
- licensees[i]['city'] = city
- city_found = True
- break
- if not city_found:
- print("Couldn't identify city:", dba)
-
-# TODO: Remove duplicates.
-
-
-# TODO: Lookup the address of the licenses?
+
+ # Identify the license designation.
+ if 'Adult Use' in row:
+ parts = text.split('Adult Use')
+ obs['license_designation'] = 'Adult Use'
+ else:
+ parts = text.split('Medical Only')
+ obs['license_designation'] = 'Medical Only'
+
+ # Skip rows with double-row text.
+ if len(row) == 1: continue
+
+ # Record the name.
+ obs['business_legal_name'] = name = parts[0]
+
+ # Record the phone number.
+ if '(' in text:
+ obs['business_phone'] = parts[-1].strip()
+
+ # Record the observation.
+ data.append(obs)
+
+ # Aggregate the data.
+ retailers = pd.DataFrame(data)
+ retailers = retailers.loc[~retailers['premise_city'].isna()]
+
+ # Convert certain columns from upper case title case.
+ cols = ['business_legal_name', 'premise_city']
+ for col in cols:
+ retailers[col] = retailers[col].apply(
+ lambda x: x.title().replace('Llc', 'LLC').replace("'S", "'s").strip()
+ )
+
+ # Standardize the data.
+ retailers['id'] = retailers.index
+ retailers['license_number'] = None # FIXME: It would be awesome to find these!
+ retailers['licensing_authority_id'] = MONTANA['licensing_authority_id']
+ retailers['licensing_authority'] = MONTANA['licensing_authority']
+ retailers['premise_state'] = STATE
+ retailers['license_status'] = 'Active'
+ retailers['license_status_date'] = None
+ retailers['license_type'] = 'Commercial - Retailer'
+ retailers['license_term'] = None
+ retailers['issue_date'] = None
+ retailers['expiration_date'] = None
+ retailers['business_owner_name'] = None
+ retailers['business_structure'] = None
+ retailers['activity'] = None
+ retailers['parcel_number'] = None
+ retailers['business_email'] = None
+ retailers['business_image_url'] = None
+
+ # Separate any `business_dba_name` from `business_legal_name`.
+ retailers['business_dba_name'] = retailers['business_legal_name']
+ criterion = retailers['business_legal_name'].str.contains('Dba')
+ retailers.loc[criterion, 'business_dba_name'] = retailers.loc[criterion] \
+ ['business_legal_name'].apply(lambda x: x.split('Dba')[-1].strip())
+ retailers.loc[criterion, 'business_legal_name'] = retailers.loc[criterion] \
+ ['business_legal_name'].apply(lambda x: x.split('Dba')[0].strip())
+
+ # Search for address for each retail license.
+ # Only search for a query once, then re-use the response.
+ # Note: There is probably a much, much more efficient way to do this!!!
+ config = dotenv_values(env_file)
+ api_key = config['GOOGLE_MAPS_API_KEY']
+ cols = ['business_dba_name', 'premise_city', 'premise_state']
+ retailers['query'] = retailers[cols].apply(
+ lambda row: ', '.join(row.values.astype(str)),
+ axis=1,
+ )
+ queries = {}
+ fields = [
+ 'formatted_address',
+ 'geometry/location/lat',
+ 'geometry/location/lng',
+ 'website',
+ ]
+ retailers = retailers.reset_index(drop=True)
+ retailers = retailers.assign(
+ premise_street_address=None,
+ premise_county=None,
+ premise_zip_code=None,
+ premise_latitude=None,
+ premise_longitude=None,
+ business_website=None,
+ )
+ for index, row in retailers.iterrows():
+ query = row['query']
+ gis_data = queries.get(query)
+ if gis_data is None:
+ try:
+ gis_data = search_for_address(query, api_key=api_key, fields=fields)
+ except:
+ gis_data = {}
+ queries[query] = gis_data
+ retailers.iat[index, retailers.columns.get_loc('premise_street_address')] = gis_data.get('street')
+ retailers.iat[index, retailers.columns.get_loc('premise_county')] = gis_data.get('county')
+ retailers.iat[index, retailers.columns.get_loc('premise_zip_code')] = gis_data.get('zipcode')
+ retailers.iat[index, retailers.columns.get_loc('premise_latitude')] = gis_data.get('latitude')
+ retailers.iat[index, retailers.columns.get_loc('premise_longitude')] = gis_data.get('longitude')
+ retailers.iat[index, retailers.columns.get_loc('business_website')] = gis_data.get('website')
+
+ # Clean-up after getting GIS data.
+ retailers.drop(columns=['query'], inplace=True)
+
+ # Get the refreshed date.
+ retailers['data_refreshed_date'] = datetime.now().isoformat()
+
+ # Save and return the data.
+ if data_dir is not None:
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ return retailers
+
+
+# === Test ===
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_mt(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_nj.py b/algorithms/get_licenses_nj.py
index bc81a6fa5905d727bc76ddadbe7e0cfc42fd358e..3023488cc65df8aff353fb8600bb3d0898fda510 100644
--- a/algorithms/get_licenses_nj.py
+++ b/algorithms/get_licenses_nj.py
@@ -92,6 +92,11 @@ def get_licenses_nj(
data['business_email'] = None
data['activity'] = None
data['parcel_number'] = None
+ data['business_image_url'] = None
+ data['id'] = None
+ data['license_number'] = None
+ data['license_status'] = None
+ data['data_refreshed_date'] = datetime.now().isoformat()
# Convert certain columns from upper case title case.
cols = ['premise_city', 'premise_county', 'premise_street_address']
@@ -102,7 +107,7 @@ def get_licenses_nj(
if data_dir is not None:
if not os.path.exists(data_dir): os.makedirs(data_dir)
timestamp = datetime.now().isoformat()[:19].replace(':', '-')
- data.to_excel(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.xlsx')
+ data.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
return data
diff --git a/algorithms/get_licenses_nm.py b/algorithms/get_licenses_nm.py
index cf1d24f09454b26da6b40154738946e91e6a6cdf..5b7d125b4d60574b089d65f2a50a2fca82822184 100644
--- a/algorithms/get_licenses_nm.py
+++ b/algorithms/get_licenses_nm.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/6/2022
License:
Description:
@@ -15,7 +15,295 @@ Description:
Data Source:
- - New Mexico
- URL: <>
+ - New Mexico Regulation and Licensing Department | Cannabis Control Division
+ URL:
-"""
\ No newline at end of file
+"""
+# Standard imports.
+from datetime import datetime
+import os
+from time import sleep
+from typing import Optional
+
+# External imports.
+from cannlytics.data.gis import geocode_addresses, search_for_address
+from dotenv import dotenv_values
+import pandas as pd
+
+# Selenium imports.
+from selenium import webdriver
+from selenium.webdriver.chrome.options import Options
+from selenium.webdriver.common.by import By
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.ui import WebDriverWait
+try:
+ import chromedriver_binary # Adds chromedriver binary to path.
+except ImportError:
+ pass # Otherwise, ChromeDriver should be in your path.
+
+
+# Specify where your data lives.
+DATA_DIR = '../data/nm'
+ENV_FILE = '../.env'
+
+# Specify state-specific constants.
+STATE = 'NM'
+NEW_MEXICO = {
+ 'licensing_authority_id': 'NMCCD',
+ 'licensing_authority': 'New Mexico Cannabis Control Division',
+ 'licenses_url': 'https://nmrldlpi.force.com/bcd/s/public-search-license?division=CCD&language=en_US',
+}
+
+
+def get_licenses_nm(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ ):
+ """Get New Mexico cannabis license data."""
+
+ # Create directories if necessary.
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+
+ # Initialize Selenium and specify options.
+ service = Service()
+ options = Options()
+ options.add_argument('--window-size=1920,1200')
+
+ # DEV: Run with the browser open.
+ options.headless = False
+
+ # PRODUCTION: Run with the browser closed.
+ # options.add_argument('--headless')
+ # options.add_argument('--disable-gpu')
+ # options.add_argument('--no-sandbox')
+
+ # Initiate a Selenium driver.
+ driver = webdriver.Chrome(options=options, service=service)
+
+ # Load the license page.
+ driver.get(NEW_MEXICO['licenses_url'])
+
+ # FIXME: Wait for the page to load by waiting to detect the image.
+ # try:
+ # el = (By.CLASS_NAME, 'slds-radio--faux')
+ # WebDriverWait(driver, 15).until(EC.presence_of_element_located(el))
+ # except TimeoutException:
+ # print('Failed to load page within %i seconds.' % (30))
+ sleep(5)
+
+ # Get the main content and click "License Type" raido.
+ content = driver.find_element(by=By.CLASS_NAME, value='siteforceContentArea')
+ radio = content.find_element(by=By.CLASS_NAME, value='slds-radio--faux')
+ radio.click()
+ sleep(2)
+
+ # Select retailers.
+ # TODO: Also get "Cannabis Manufacturer", "Cannabis Producer", and
+ # "Cannabis Producer Microbusiness".
+ search = content.find_element(by=By.ID, value='comboboxId-40')
+ search.click()
+ choices = content.find_elements(by=By.CLASS_NAME, value='slds-listbox__item')
+ for choice in choices:
+ if choice.text == 'Cannabis Retailer':
+ choice.click()
+ sleep(2)
+ break
+
+ # Click the search button.
+ search = content.find_element(by=By.CLASS_NAME, value='vlocity-btn')
+ search.click()
+ sleep(2)
+
+ # Iterate over all of the pages.
+ # Wait for the table to load, then iterate over the pages.
+ sleep(5)
+ data = []
+ iterate = True
+ while(iterate):
+
+ # Get all of the licenses.
+ items = content.find_elements(by=By.CLASS_NAME, value='block-container')
+ for item in items[3:]:
+ text = item.text
+ if not text:
+ continue
+ values = text.split('\n')
+ data.append({
+ 'license_type': values[0],
+ 'license_status': values[1],
+ 'business_legal_name': values[2],
+ 'address': values[-1],
+ 'details_url': '',
+ })
+
+ # Get the page number and stop at the last page.
+ # FIXME: This doesn't correctly break!
+ par = content.find_elements(by=By.TAG_NAME, value='p')[-1].text
+ page_number = int(par.split(' ')[2])
+ total_pages = int(par.split(' ')[-2])
+ if page_number == total_pages:
+ iterate = False
+
+ # Otherwise, click the next button.
+ buttons = content.find_elements(by=By.TAG_NAME, value='button')
+ for button in buttons:
+ if button.text == 'Next Page':
+ button.click()
+ sleep(5)
+ break
+
+ # Search for each license name, 1 by 1, to get details.
+ retailers = pd.DataFrame(columns=['business_legal_name'])
+ for i, licensee in enumerate(data):
+
+ # Skip recorded rows.
+ if len(retailers.loc[retailers['business_legal_name'] == licensee['business_legal_name']]):
+ continue
+
+ # Click the "Business Name" search field.
+ content = driver.find_element(by=By.CLASS_NAME, value='siteforceContentArea')
+ radio = content.find_elements(by=By.CLASS_NAME, value='slds-radio--faux')[1]
+ radio.click()
+ sleep(1)
+
+ # Enter the `business_legal_name` into the search.
+ search_field = content.find_element(by=By.CLASS_NAME, value='vlocity-input')
+ search_field.clear()
+ search_field.send_keys(licensee['business_legal_name'])
+
+ # Click the search button.
+ search = content.find_element(by=By.CLASS_NAME, value='vlocity-btn')
+ search.click()
+
+ # FIXME: Wait for the table to load.
+ # WebDriverWait(content, 5).until(EC.presence_of_element_located((By.CLASS_NAME, 'slds-button_icon')))
+ sleep(1.5)
+
+ # Click the "Action" button to get to the details page.
+ # FIXME: There can be multiple search candidates!
+ action = content.find_element(by=By.CLASS_NAME, value='slds-button_icon')
+ try:
+ action.click()
+ except:
+ continue # FIXME: Formally check if "No record found!".
+
+ # FIXME: Wait for the details page to load.
+ el = (By.CLASS_NAME, 'body')
+ WebDriverWait(driver, 5).until(EC.presence_of_element_located(el))
+
+ # Get the page
+ page = driver.find_element(by=By.CLASS_NAME, value='body')
+
+ # FIXME: Wait for the details to load!
+ # el = (By.TAG_NAME, 'vlocity_ins-omniscript-step')
+ # WebDriverWait(page, 5).until(EC.presence_of_element_located(el))
+ sleep(1.5)
+
+ # Get all of the details!
+ fields = [
+ 'license_number',
+ 'license_status',
+ 'issue_date',
+ 'expiration_date',
+ 'business_owner_name',
+ ]
+ values = page.find_elements(by=By.CLASS_NAME, value='field-value')
+ if len(values) > 5:
+ for j, value in enumerate(values[:5]):
+ data[i][fields[j]] = value.text
+ for value in values[5:]:
+ data[i]['business_owner_name'] += f', {value.text}'
+ else:
+ for j, value in enumerate(values):
+ data[i][fields[j]] = value.text
+
+ # Create multiple entries for each address!!!
+ premises = page.find_elements(by=By.CLASS_NAME, value='block-header')
+ for premise in premises:
+ values = premise.text.split('\n')
+ licensee['address'] = values[0].replace(',', ', ')
+ licensee['license_number'] = values[2]
+ retailers = pd.concat([retailers, pd.DataFrame([licensee])])
+
+ # Click the "Back to Search" button.
+ back_button = page.find_element(by=By.CLASS_NAME, value='vlocity-btn')
+ back_button.click()
+ sleep(1)
+
+ # End the browser session.
+ service.stop()
+
+ # Standardize the data, restricting to "Approved" retailers.
+ retailers = retailers.loc[retailers['license_status'] == 'Active']
+ retailers = retailers.assign(
+ business_email=None,
+ business_structure=None,
+ licensing_authority_id=NEW_MEXICO['licensing_authority_id'],
+ licensing_authority=NEW_MEXICO['licensing_authority'],
+ license_designation='Adult-Use',
+ license_status_date=None,
+ license_term=None,
+ premise_state=STATE,
+ parcel_number=None,
+ activity=None,
+ business_image_url=None,
+ business_website=None,
+ business_phone=None,
+ id=retailers['license_number'],
+ business_dba_name=retailers['business_legal_name'],
+ )
+
+ # Get the refreshed date.
+ retailers['data_refreshed_date'] = datetime.now().isoformat()
+
+ # Geocode licenses.
+ # FIXME: This is not working as intended. Perhaps try `search_for_address`?
+ config = dotenv_values(env_file)
+ api_key = config['GOOGLE_MAPS_API_KEY']
+ retailers = geocode_addresses(retailers, api_key=api_key, address_field='address')
+ retailers['premise_street_address'] = retailers['formatted_address'].apply(
+ lambda x: x.split(',')[0] if STATE in str(x) else x
+ )
+ retailers['premise_city'] = retailers['formatted_address'].apply(
+ lambda x: x.split(', ')[1].split(',')[0] if STATE in str(x) else x
+ )
+ retailers['premise_zip_code'] = retailers['formatted_address'].apply(
+ lambda x: x.split(', ')[2].split(',')[0].split(' ')[-1] if STATE in str(x) else x
+ )
+ drop_cols = ['state', 'state_name', 'address', 'formatted_address',
+ 'details_url']
+ gis_cols = {
+ 'county': 'premise_county',
+ 'latitude': 'premise_latitude',
+ 'longitude': 'premise_longitude'
+ }
+ retailers.drop(columns=drop_cols, inplace=True)
+ retailers.rename(columns=gis_cols, inplace=True)
+
+ # Save and return the data.
+ if data_dir is not None:
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ return retailers
+
+
+# === Test ===
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_nm(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_nv.py b/algorithms/get_licenses_nv.py
index 8aac1620adf2af2ce03a737cea21fbd29a2af498..15f79ec3e0ff748fe4727331a2063df2815a8f27 100644
--- a/algorithms/get_licenses_nv.py
+++ b/algorithms/get_licenses_nv.py
@@ -93,6 +93,7 @@ def get_licenses_nv(
# Extract and standardize the data from the workbook.
licenses = pd.read_excel(licenses_source_file, skiprows=1)
licenses.rename(columns=NEVADA['licenses']['columns'], inplace=True)
+ licenses['id'] = licenses['license_number']
licenses['licensing_authority_id'] = NEVADA['licensing_authority_id']
licenses['licensing_authority'] = NEVADA['licensing_authority']
licenses['license_designation'] = 'Adult-Use'
@@ -107,6 +108,9 @@ def get_licenses_nv(
licenses['business_email'] = None
licenses['activity'] = None
licenses['parcel_number'] = None
+ licenses['business_image_url'] = None
+ licenses['business_phone'] = None
+ licenses['business_website'] = None
# Convert certain columns from upper case title case.
cols = ['business_dba_name', 'premise_county']
@@ -123,7 +127,7 @@ def get_licenses_nv(
# Save the licenses
if data_dir is not None:
timestamp = datetime.now().isoformat()[:19].replace(':', '-')
- licenses.to_excel(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.xlsx')
+ licenses.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
#--------------------------------------------------------------------------
# Get retailer data
@@ -168,6 +172,15 @@ def get_licenses_nv(
retailers['business_email'] = None
retailers['activity'] = None
retailers['parcel_number'] = None
+ retailers['business_website'] = None
+ retailers['business_image_url'] = None
+ retailers['business_phone'] = None
+
+ # FIXME: Merge `license_number`, `premise_county`, `data_refreshed_date`
+ # from licenses.
+ retailers['license_number'] = None
+ retailers['id'] = None
+ retailers['data_refreshed_date'] = datetime.now().isoformat()
# Geocode the retailers.
config = dotenv_values(env_file)
@@ -182,20 +195,22 @@ def get_licenses_nv(
api_key=google_maps_api_key,
address_field='address',
)
- drop_cols = ['state', 'state_name', 'county', 'address', 'formatted_address']
- retailers.drop(columns=drop_cols, inplace=True)
+ drop_cols = ['state', 'state_name', 'address', 'formatted_address']
gis_cols = {
+ 'county': 'premise_county',
'latitude': 'premise_latitude',
'longitude': 'premise_longitude'
}
+ licenses['premise_zip_code'] = licenses['formatted_address'].apply(
+ lambda x: x.split(', ')[2].split(',')[0].split(' ')[-1] if STATE in str(x) else x
+ )
+ retailers.drop(columns=drop_cols, inplace=True)
retailers.rename(columns=gis_cols, inplace=True)
- # Future work: Merge the retailers with the licenses data?
-
# Save the retailers
if data_dir is not None:
timestamp = datetime.now().isoformat()[:19].replace(':', '-')
- retailers.to_excel(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.xlsx')
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
# Return all of the data.
return pd.concat([licenses, retailers])
diff --git a/algorithms/get_licenses_or.py b/algorithms/get_licenses_or.py
index d034b165a12c8af0c6537fc6372cfaa7faf6fab8..0d770ac72930271b71819daecdfd4db1fc4664b3 100644
--- a/algorithms/get_licenses_or.py
+++ b/algorithms/get_licenses_or.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/28/2022
-Updated: 9/28/2022
+Updated: 10/7/2022
License:
Description:
@@ -53,6 +53,10 @@ OREGON = {
'Med Grade': 'medicinal',
'Delivery': 'delivery',
},
+ 'drop_columns': [
+ 'medicinal',
+ 'delivery',
+ ],
},
}
@@ -90,12 +94,26 @@ def get_licenses_or(
data['license_designation'] = 'Adult-Use'
data['premise_state'] = 'OR'
data.loc[data['medicinal'] == 'Yes', 'license_designation'] = 'Adult-Use and Medicinal'
-
- # Convert `medicinal` and `delivery` columns to boolean.
- data['medicinal'] = data['medicinal'].map(dict(Yes=1))
- data['delivery'] = data['delivery'].map(dict(Yes=1))
- data['medicinal'].fillna(0, inplace=True)
- data['delivery'].fillna(0, inplace=True)
+ data['business_image_url'] = None
+ data['license_status_date'] = None
+ data['license_term'] = None
+ data['issue_date'] = None
+ data['expiration_date'] = None
+ data['business_email'] = None
+ data['business_owner_name'] = None
+ data['business_structure'] = None
+ data['business_website'] = None
+ data['activity'] = None
+ data['business_phone'] = None
+ data['parcel_number'] = None
+ data['business_legal_name'] = data['business_dba_name']
+
+ # Optional: Convert `medicinal` and `delivery` columns to boolean.
+ # data['medicinal'] = data['medicinal'].map(dict(Yes=1))
+ # data['delivery'] = data['delivery'].map(dict(Yes=1))
+ # data['medicinal'].fillna(0, inplace=True)
+ # data['delivery'].fillna(0, inplace=True)
+ data.drop(columns=['medicinal', 'delivery'], inplace=True)
# Convert certain columns from upper case title case.
cols = ['business_dba_name', 'premise_city', 'premise_county',
@@ -138,6 +156,7 @@ def get_licenses_or(
}
data.rename(columns=columns, inplace=True)
data.drop(columns=['BUSINESS NAME', 'COUNTY'], inplace=True)
+ data['id'] = data['license_number']
# Geocode licenses to get `premise_latitude` and `premise_longitude`.
config = dotenv_values(env_file)
@@ -171,7 +190,7 @@ def get_licenses_or(
# Save the license data.
if data_dir is not None:
timestamp = datetime.now().isoformat()[:19].replace(':', '-')
- data.to_excel(f'{data_dir}/licenses-or-{timestamp}.xlsx')
+ data.to_csv(f'{data_dir}/licenses-or-{timestamp}.csv', index=False)
return data
diff --git a/algorithms/get_licenses_ri.py b/algorithms/get_licenses_ri.py
index 15ba4d2bc786852e309ce2ab63af481da72f5191..fea1ffa1517bab406884f04bbfe3083a824e0e93 100644
--- a/algorithms/get_licenses_ri.py
+++ b/algorithms/get_licenses_ri.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/3/2022
License:
Description:
@@ -18,4 +18,162 @@ Data Source:
- Rhode Island
URL:
-"""
\ No newline at end of file
+"""
+# Standard imports.
+from datetime import datetime
+import os
+from typing import Optional
+
+# External imports.
+from bs4 import BeautifulSoup
+from cannlytics.data.gis import geocode_addresses
+from dotenv import dotenv_values
+import pandas as pd
+import requests
+
+
+# Specify where your data lives.
+DATA_DIR = '../data/ri'
+ENV_FILE = '../.env'
+
+# Specify state-specific constants.
+STATE = 'RI'
+RHODE_ISLAND = {
+ 'licensing_authority_id': 'RIDBH',
+ 'licensing_authority': 'Rhode Island Department of Business Regulation',
+ 'retailers': {
+ 'url': 'https://dbr.ri.gov/office-cannabis-regulation/compassion-centers/licensed-compassion-centers',
+ 'columns': [
+ 'license_number',
+ 'business_legal_name',
+ 'address',
+ 'business_phone',
+ 'license_designation',
+ ],
+ }
+}
+
+
+def get_licenses_ri(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ ):
+ """Get Rhode Island cannabis license data."""
+
+ # Get the licenses webpage.
+ url = RHODE_ISLAND['retailers']['url']
+ response = requests.get(url)
+ soup = BeautifulSoup(response.content, 'html.parser')
+
+ # Parse the table data.
+ data = []
+ columns = RHODE_ISLAND['retailers']['columns']
+ table = soup.find('table')
+ rows = table.find_all('tr')
+ for row in rows[1:]:
+ cells = row.find_all('td')
+ obs = {}
+ for i, cell in enumerate(cells):
+ column = columns[i]
+ obs[column] = cell.text
+ data.append(obs)
+
+ # Optional: It's possible to download the certificate to get it's `issue_date`.
+
+ # Standardize the license data.
+ retailers = pd.DataFrame(data)
+ retailers['id'] = retailers['license_number']
+ retailers['licensing_authority_id'] = RHODE_ISLAND['licensing_authority_id']
+ retailers['licensing_authority'] = RHODE_ISLAND['licensing_authority']
+ retailers['premise_state'] = STATE
+ retailers['license_type'] = 'Commercial - Retailer'
+ retailers['license_status'] = 'Active'
+ retailers['license_status_date'] = None
+ retailers['license_term'] = None
+ retailers['issue_date'] = None
+ retailers['expiration_date'] = None
+ retailers['business_owner_name'] = None
+ retailers['business_structure'] = None
+ retailers['business_email'] = None
+ retailers['activity'] = None
+ retailers['parcel_number'] = None
+ retailers['business_image_url'] = None
+ retailers['business_website'] = None
+
+ # Correct `license_designation`.
+ coding = dict(Yes='Adult Use and Cultivation', No='Adult Use')
+ retailers['license_designation'] = retailers['license_designation'].map(coding)
+
+ # Correct `business_dba_name`.
+ criterion = retailers['business_legal_name'].str.contains('D/B/A')
+ retailers['business_dba_name'] = retailers['business_legal_name']
+ retailers.loc[criterion, 'business_dba_name'] = retailers['business_legal_name'].apply(
+ lambda x: x.split('D/B/A')[1].strip() if 'D/B/A' in x else x
+ )
+ retailers.loc[criterion, 'business_legal_name'] = retailers['business_legal_name'].apply(
+ lambda x: x.split('D/B/A')[0].strip()
+ )
+ criterion = retailers['business_legal_name'].str.contains('F/K/A')
+ retailers.loc[criterion, 'business_dba_name'] = retailers['business_legal_name'].apply(
+ lambda x: x.split('F/K/A')[1].strip() if 'D/B/A' in x else x
+ )
+ retailers.loc[criterion, 'business_legal_name'] = retailers['business_legal_name'].apply(
+ lambda x: x.split('F/K/A')[0].strip()
+ )
+
+ # Get the refreshed date.
+ par = soup.find_all('p')[-1]
+ date = par.text.split('updated on ')[-1].split('.')[0]
+ retailers['data_refreshed_date'] = pd.to_datetime(date).isoformat()
+
+ # Geocode the licenses.
+ config = dotenv_values(env_file)
+ google_maps_api_key = config['GOOGLE_MAPS_API_KEY']
+ retailers = geocode_addresses(
+ retailers,
+ api_key=google_maps_api_key,
+ address_field='address',
+ )
+ retailers['premise_street_address'] = retailers['formatted_address'].apply(
+ lambda x: x.split(',')[0]
+ )
+ retailers['premise_city'] = retailers['formatted_address'].apply(
+ lambda x: x.split(', ')[1].split(',')[0]
+ )
+ retailers['premise_zip_code'] = retailers['formatted_address'].apply(
+ lambda x: x.split(', ')[2].split(',')[0].split(' ')[-1]
+ )
+ drop_cols = ['state', 'state_name', 'address', 'formatted_address']
+ retailers.drop(columns=drop_cols, inplace=True)
+ gis_cols = {
+ 'county': 'premise_county',
+ 'latitude': 'premise_latitude',
+ 'longitude': 'premise_longitude'
+ }
+ retailers.rename(columns=gis_cols, inplace=True)
+
+ # Save and return the data.
+ if data_dir is not None:
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ retailers.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
+ return retailers
+
+
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_ri(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_vt.py b/algorithms/get_licenses_vt.py
index a09971f356bf981b90463993671a09e4b17b5a74..71e3142cb61270c123f0017a778208b031074706 100644
--- a/algorithms/get_licenses_vt.py
+++ b/algorithms/get_licenses_vt.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/7/2022
License:
Description:
@@ -18,4 +18,236 @@ Data Source:
- Vermont
URL:
-"""
\ No newline at end of file
+"""
+# Standard imports.
+from datetime import datetime
+import os
+from typing import Optional
+
+# External imports.
+from bs4 import BeautifulSoup
+from cannlytics.data.gis import geocode_addresses
+from dotenv import dotenv_values
+import pandas as pd
+import requests
+
+
+# Specify where your data lives.
+DATA_DIR = '../data/vt'
+ENV_FILE = '../.env'
+
+# Specify state-specific constants.
+STATE = 'VT'
+VERMONT = {
+ 'licensing_authority_id': 'VTCCB',
+ 'licensing_authority': 'Vermont Cannabis Control Board',
+ 'licenses_url': 'https://ccb.vermont.gov/licenses',
+ 'licenses': {
+ 'licensedcultivators': {
+ 'columns': [
+ 'business_legal_name',
+ 'license_type',
+ 'address',
+ 'license_designation',
+ ],
+ },
+ 'outdoorcultivators': {
+ 'columns': [
+ 'business_legal_name',
+ 'license_type',
+ 'premise_city',
+ 'license_designation',
+ ],
+ },
+ 'mixedcultivators': {
+ 'columns': [
+ 'business_legal_name',
+ 'license_type',
+ 'premise_city',
+ 'license_designation',
+ ],
+ },
+ 'testinglaboratories': {
+ 'columns': [
+ 'business_legal_name',
+ 'license_type',
+ 'premise_city',
+ 'license_designation',
+ 'address'
+ ],
+ },
+ 'integrated': {
+ 'columns': [
+ 'business_legal_name',
+ 'license_type',
+ 'premise_city',
+ 'license_designation',
+ ],
+ },
+ 'retailers': {
+ 'columns': [
+ 'business_legal_name',
+ 'license_type',
+ 'address',
+ 'license_designation',
+ ],
+ },
+ 'manufacturers': {
+ 'columns': [
+ 'business_legal_name',
+ 'license_type',
+ 'premise_city',
+ 'license_designation',
+ ],
+ },
+ 'wholesalers': {
+ 'columns': [
+ 'business_legal_name',
+ 'license_type',
+ 'premise_city',
+ 'license_designation',
+ ],
+ },
+ },
+}
+
+
+def get_licenses_vt(
+ data_dir: Optional[str] = None,
+ env_file: Optional[str] = '.env',
+ ):
+ """Get Vermont cannabis license data."""
+
+ # Get the licenses from the webpage.
+ url = VERMONT['licenses_url']
+ response = requests.get(url)
+ soup = BeautifulSoup(response.content, 'html.parser')
+
+ # Parse the various table types.
+ data = []
+ for license_type, values in VERMONT['licenses'].items():
+ columns = values['columns']
+ table = block = soup.find(attrs={'id': f'block-{license_type}'})
+ rows = table.find_all('tr')
+ for row in rows[1:]:
+ cells = row.find_all('td')
+ obs = {}
+ for i, cell in enumerate(cells):
+ column = columns[i]
+ obs[column] = cell.text
+ data.append(obs)
+
+ # Standardize the licenses.
+ licenses = pd.DataFrame(data)
+ licenses['id'] = licenses.index
+ licenses['license_number'] = None # FIXME: It would be awesome to find these!
+ licenses['licensing_authority_id'] = VERMONT['licensing_authority_id']
+ licenses['licensing_authority'] = VERMONT['licensing_authority']
+ licenses['license_designation'] = 'Adult-Use'
+ licenses['premise_state'] = STATE
+ licenses['license_status'] = None
+ licenses['license_status_date'] = None
+ licenses['license_term'] = None
+ licenses['issue_date'] = None
+ licenses['expiration_date'] = None
+ licenses['business_owner_name'] = None
+ licenses['business_structure'] = None
+ licenses['activity'] = None
+ licenses['parcel_number'] = None
+ licenses['business_phone'] = None
+ licenses['business_email'] = None
+ licenses['business_image_url'] = None
+ licenses['business_website'] = None
+
+ # Separate the `license_designation` from `license_type` if (Tier x).
+ criterion = licenses['license_type'].str.contains('Tier ')
+ licenses.loc[criterion, 'license_designation'] = licenses.loc[criterion]['license_type'].apply(
+ lambda x: 'Tier ' + x.split('(Tier ')[1].rstrip(')')
+ )
+ licenses.loc[criterion, 'license_type'] = licenses.loc[criterion]['license_type'].apply(
+ lambda x: x.split('(Tier ')[0].strip()
+ )
+
+ # Separate labs' `business_email` and `business_phone` from the `address`.
+ criterion = licenses['license_type'] == 'Testing Lab'
+ licenses.loc[criterion, 'business_email'] = licenses.loc[criterion]['address'].apply(
+ lambda x: x.split('Email: ')[-1].rstrip('\n') if isinstance(x, str) else x
+ )
+ licenses.loc[criterion, 'business_phone'] = licenses.loc[criterion]['address'].apply(
+ lambda x: x.split('Phone: ')[-1].split('Email: ')[0].rstrip('\n') if isinstance(x, str) else x
+ )
+ licenses.loc[criterion, 'address'] = licenses.loc[criterion]['address'].apply(
+ lambda x: x.split('Phone: ')[0].replace('\n', ' ').strip() if isinstance(x, str) else x
+ )
+
+ # Split any DBA from the legal name.
+ splits = [';', 'DBA - ', '(DBA)', 'DBA ', 'dba ']
+ licenses['business_dba_name'] = licenses['business_legal_name']
+ for split in splits:
+ criterion = licenses['business_legal_name'].str.contains(split)
+ licenses.loc[criterion, 'business_dba_name'] = licenses.loc[criterion]['business_legal_name'].apply(
+ lambda x: x.split(split)[1].replace(')', '').strip() if split in x else x
+ )
+ licenses.loc[criterion, 'business_legal_name'] = licenses.loc[criterion]['business_legal_name'].apply(
+ lambda x: x.split(split)[0].replace('(', '').strip()
+ )
+ licenses.loc[licenses['business_legal_name'] == '', 'business_legal_name'] = licenses['business_dba_name']
+
+ # Get the refreshed date.
+ licenses['data_refreshed_date'] = datetime.now().isoformat()
+
+ # Geocode the licenses.
+ # FIXME: There are some wonky addresses that are output!
+ config = dotenv_values(env_file)
+ google_maps_api_key = config['GOOGLE_MAPS_API_KEY']
+ licenses = geocode_addresses(
+ licenses,
+ api_key=google_maps_api_key,
+ address_field='address',
+ )
+ licenses['premise_street_address'] = licenses['formatted_address'].apply(
+ lambda x: x.split(',')[0] if STATE in str(x) else x
+ )
+ licenses['premise_city'] = licenses['formatted_address'].apply(
+ lambda x: x.split(', ')[1].split(',')[0] if STATE in str(x) else x
+ )
+ licenses['premise_zip_code'] = licenses['formatted_address'].apply(
+ lambda x: x.split(', ')[2].split(',')[0].split(' ')[-1] if STATE in str(x) else x
+ )
+ drop_cols = ['state', 'state_name', 'address', 'formatted_address']
+ licenses.drop(columns=drop_cols, inplace=True)
+ gis_cols = {
+ 'county': 'premise_county',
+ 'latitude': 'premise_latitude',
+ 'longitude': 'premise_longitude'
+ }
+ licenses.rename(columns=gis_cols, inplace=True)
+
+ # Save and return the data.
+ if data_dir is not None:
+ if not os.path.exists(data_dir): os.makedirs(data_dir)
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ retailers = licenses.loc[licenses['license_type'] == 'Retail']
+ licenses.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ return licenses
+
+
+# === Test ===
+if __name__ == '__main__':
+
+ # Support command line usage.
+ import argparse
+ try:
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument('--d', dest='data_dir', type=str)
+ arg_parser.add_argument('--data_dir', dest='data_dir', type=str)
+ arg_parser.add_argument('--env', dest='env_file', type=str)
+ args = arg_parser.parse_args()
+ except SystemExit:
+ args = {'d': DATA_DIR, 'env_file': ENV_FILE}
+
+ # Get licenses, saving them to the specified directory.
+ data_dir = args.get('d', args.get('data_dir'))
+ env_file = args.get('env_file')
+ data = get_licenses_vt(data_dir, env_file=env_file)
diff --git a/algorithms/get_licenses_wa.py b/algorithms/get_licenses_wa.py
index 817cf2b1e9a7d1ce683ce2c0bfaa4d002dc54304..4ba2e75fb0111768c5e9b20f7ad720983d2cf716 100644
--- a/algorithms/get_licenses_wa.py
+++ b/algorithms/get_licenses_wa.py
@@ -6,7 +6,7 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/29/2022
+Updated: 10/7/2022
License:
Description:
@@ -41,6 +41,49 @@ STATE = 'WA'
WASHINGTON = {
'licensing_authority_id': 'WSLCB',
'licensing_authority': 'Washington State Liquor and Cannabis Board',
+ 'licenses_urls': 'https://lcb.wa.gov/records/frequently-requested-lists',
+ 'labs': {
+ 'key': 'Lab-List',
+ 'columns': {
+ 'Lab Name': 'business_legal_name',
+ 'Lab #': 'license_number',
+ 'Address 1': 'premise_street_address',
+ 'Address 2': 'premise_street_address_2',
+ 'City': 'premise_city',
+ 'Zip': 'premise_zip_code',
+ 'Phone': 'business_phone',
+ 'Status': 'license_status',
+ 'Certification Date': 'issue_date',
+ },
+ 'drop_columns': [
+ 'Pesticides',
+ 'Heavy Metals',
+ 'Mycotoxins',
+ 'Water Activity',
+ 'Terpenes',
+ ],
+ },
+ 'medical': {
+ 'key': 'MedicalCannabisEndorsements',
+ 'columns': {
+ 'License': 'license_number',
+ 'UBI': 'id',
+ 'Tradename': 'business_dba_name',
+ 'Privilege': 'license_type',
+ 'Status': 'license_status',
+ 'Med Privilege Code': 'license_designation',
+ 'Termination Code': 'license_term',
+ 'Street Adress': 'premise_street_address',
+ 'Suite Rm': 'premise_street_address_2',
+ 'City': 'premise_city',
+ 'State': 'premise_state',
+ 'County': 'premise_county',
+ 'Zip Code': 'premise_zip_code',
+ 'Date Created': 'issue_date',
+ 'Day Phone': 'business_phone',
+ 'Email': 'business_email',
+ },
+ },
'retailers': {
'key': 'CannabisApplicants',
'columns': {
@@ -57,10 +100,27 @@ WASHINGTON = {
'Privilege Status': 'license_status',
'Day Phone': 'business_phone',
},
- }
+ },
}
+def download_file(url, dest='./', headers=None):
+ """Download a file from a given URL to a local destination.
+ Args:
+ url (str): The URL of the data file.
+ dest (str): The destination for the data file, `./` by default (optional).
+ headers (dict): HTTP headers, `None` by default (optional).
+ Returns:
+ (str): The location for the data file.
+ """
+ filename = url.split('/')[-1]
+ data_file = os.path.join(dest, filename)
+ response = requests.get(url, headers=headers)
+ with open(data_file, 'wb') as doc:
+ doc.write(response.content)
+ return data_file
+
+
def get_licenses_wa(
data_dir: Optional[str] = None,
env_file: Optional[str] = '.env',
@@ -74,52 +134,75 @@ def get_licenses_wa(
# Get the URLs for the license workbooks.
labs_url, medical_url, retailers_url = None, None, None
- url = 'https://lcb.wa.gov/records/frequently-requested-lists'
+ labs_key = WASHINGTON['labs']['key']
+ medical_key = WASHINGTON['medical']['key']
+ retailers_key = WASHINGTON['retailers']['key']
+ url = WASHINGTON['licenses_urls']
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
links = soup.find_all('a')
for link in links:
href = link['href']
- if 'Lab-List' in href:
+ if labs_key in href:
labs_url = href
- elif 'CannabisApplicants' in href:
+ elif retailers_key in href:
retailers_url = href
- elif 'MedicalCannabisEndorsements' in href:
+ elif medical_key in href:
medical_url = href
break
- # TODO: Also download and collect lab + medical dispensary data.
-
- # Download the licenses workbook.
- filename = retailers_url.split('/')[-1]
- retailers_source_file = os.path.join(file_dir, filename)
- response = requests.get(retailers_url)
- with open(retailers_source_file, 'wb') as doc:
- doc.write(response.content)
+ # Download the workbooks.
+ lab_source_file = download_file(labs_url, dest=file_dir)
+ medical_source_file = download_file(medical_url, dest=file_dir)
+ retailers_source_file = download_file(retailers_url, dest=file_dir)
# Extract and standardize the data from the workbook.
retailers = pd.read_excel(retailers_source_file)
retailers.rename(columns=WASHINGTON['retailers']['columns'], inplace=True)
- retailers['licensing_authority_id'] = WASHINGTON['licensing_authority_id']
- retailers['licensing_authority'] = WASHINGTON['licensing_authority']
retailers['license_designation'] = 'Adult-Use'
- retailers['premise_state'] = STATE
- retailers['license_status_date'] = None
- retailers['license_term'] = None
- retailers['issue_date'] = None
- retailers['expiration_date'] = None
- retailers['business_legal_name'] = retailers['business_dba_name']
- retailers['business_owner_name'] = None
- retailers['business_structure'] = None
- retailers['business_email'] = None
- retailers['activity'] = None
- retailers['parcel_number'] = None
+ retailers['license_type'] = 'Adult-Use Retailer'
+
+ labs = pd.read_excel(lab_source_file)
+ labs.rename(columns=WASHINGTON['labs']['columns'], inplace=True)
+ labs.drop(columns=WASHINGTON['labs']['drop_columns'], inplace=True)
+ labs['license_type'] = 'Lab'
+
+ medical = pd.read_excel(medical_source_file, skiprows=2)
+ medical.rename(columns=WASHINGTON['medical']['columns'], inplace=True)
+ medical['license_designation'] = 'Medicinal'
+ medical['license_type'] = 'Medical Retailer'
+
+ # Aggregate the licenses.
+ licenses = pd.concat([retailers, medical, labs])
+
+ # Standardize all of the licenses at once!
+ licenses = licenses.assign(
+ licensing_authority_id=WASHINGTON['licensing_authority_id'],
+ licensing_authority=WASHINGTON['licensing_authority'],
+ premise_state=STATE,
+ license_status_date=None,
+ expiration_date=None,
+ activity=None,
+ parcel_number=None,
+ business_owner_name=None,
+ business_structure=None,
+ business_image_url=None,
+ business_website=None,
+ )
+
+ # Fill legal and DBA names.
+ licenses['id'].fillna(licenses['license_number'], inplace=True)
+ licenses['business_legal_name'].fillna(licenses['business_dba_name'], inplace=True)
+ licenses['business_dba_name'].fillna(licenses['business_legal_name'], inplace=True)
+ cols = ['business_legal_name', 'business_dba_name']
+ for col in cols:
+ licenses[col] = licenses[col].apply(
+ lambda x: x.title().replace('Llc', 'LLC').replace("'S", "'s").strip()
+ )
# Keep only active licenses.
- retailers = retailers.loc[
- (retailers['license_status'] == 'ACTIVE (ISSUED)') |
- (retailers['license_status'] == 'ACTIVE TITLE CERTIFICATE')
- ]
+ license_statuses = ['Active', 'ACTIVE (ISSUED)', 'ACTIVE TITLE CERTIFICATE',]
+ licenses = licenses.loc[licenses['license_status'].isin(license_statuses)]
# Convert certain columns from upper case title case.
cols = ['business_dba_name', 'premise_city', 'premise_county',
@@ -130,38 +213,42 @@ def get_licenses_wa(
# Get the refreshed date.
date = retailers_source_file.split('\\')[-1].split('.')[0]
date = date.replace('CannabisApplicants', '')
- date = date[:2] + '-' + date[2:4] + '-' + date[4:]
- retailers['data_refreshed_date'] = pd.to_datetime(date).isoformat()
-
- # FIXME: Append `premise_street_address_2` to `premise_street_address`.
+ date = date[:2] + '-' + date[2:4] + '-' + date[4:8]
+ licenses['data_refreshed_date'] = pd.to_datetime(date).isoformat()
+ # Append `premise_street_address_2` to `premise_street_address`.
+ cols = ['premise_street_address', 'premise_street_address_2']
+ licenses['premise_street_address'] = licenses[cols].apply(
+ lambda x : '{} {}'.format(x[0].strip(), x[1]).replace('nan', '').strip().replace(' ', ' '),
+ axis=1,
+ )
+ licenses.drop(columns=['premise_street_address_2'], inplace=True)
# Geocode licenses to get `premise_latitude` and `premise_longitude`.
config = dotenv_values(env_file)
- google_maps_api_key = config['GOOGLE_MAPS_API_KEY']
+ api_key = config['GOOGLE_MAPS_API_KEY']
cols = ['premise_street_address', 'premise_city', 'premise_state',
'premise_zip_code']
- retailers['address'] = retailers[cols].apply(
+ licenses['address'] = licenses[cols].apply(
lambda row: ', '.join(row.values.astype(str)),
axis=1,
)
- retailers = geocode_addresses(
- retailers,
- api_key=google_maps_api_key,
- address_field='address',
- )
+ licenses = geocode_addresses(licenses, address_field='address', api_key=api_key)
drop_cols = ['state', 'state_name', 'county', 'address', 'formatted_address']
- retailers.drop(columns=drop_cols, inplace=True)
- gis_cols = {
- 'latitude': 'premise_latitude',
- 'longitude': 'premise_longitude'
- }
- retailers.rename(columns=gis_cols, inplace=True)
+ gis_cols = {'latitude': 'premise_latitude', 'longitude': 'premise_longitude'}
+ licenses.drop(columns=drop_cols, inplace=True)
+ licenses.rename(columns=gis_cols, inplace=True)
+
+ # TODO: Search for business website and image.
# Save and return the data.
if data_dir is not None:
timestamp = datetime.now().isoformat()[:19].replace(':', '-')
- retailers.to_excel(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.xlsx')
+ licenses.to_csv(f'{data_dir}/licenses-{STATE.lower()}-{timestamp}.csv', index=False)
+ retailers = licenses.loc[licenses['license_type'] == 'Adult-Use Retailer']
+ retailers.to_csv(f'{data_dir}/retailers-{STATE.lower()}-{timestamp}.csv', index=False)
+ labs = licenses.loc[licenses['license_type'] == 'Lab']
+ labs.to_csv(f'{data_dir}/labs-{STATE.lower()}-{timestamp}.csv', index=False)
return retailers
diff --git a/algorithms/main.py b/algorithms/main.py
index 4459846774069dfe940c20d44599ce0592914d6a..bada08779537690854f37596f69e3eacc6a8cbbb 100644
--- a/algorithms/main.py
+++ b/algorithms/main.py
@@ -6,43 +6,89 @@ Authors:
Keegan Skeate
Candace O'Sullivan-Sutherland
Created: 9/29/2022
-Updated: 9/30/2022
+Updated: 10/7/2022
License:
Description:
- Collect all cannabis license data from all states with permitted adult-use:
+ Collect all cannabis license data from states with permitted adult-use:
- - Alaska
- - Arizona
+ ✓ Alaska (Selenium)
+ ✓ Arizona (Selenium)
✓ California
- - Colorado
- - Connecticut
- - Illinois
+ ✓ Colorado
+ ✓ Connecticut
+ ✓ Illinois
✓ Maine
- - Massachusetts
- - Michigan
- - Montana
+ ✓ Massachusetts
+ ✓ Michigan (Selenium)
+ ✓ Montana
✓ Nevada
✓ New Jersey
- - New Mexico
- - New York
+ x New Mexico (Selenium) (FIXME)
✓ Oregon
- - Rhode Island
- - Vermont
+ ✓ Rhode Island
+ ✓ Vermont
✓ Washington
-
"""
-from .get_licenses_ca import get_licenses_ca
-from .get_licenses_me import get_licenses_me
-from .get_licenses_nj import get_licenses_nj
-from .get_licenses_nv import get_licenses_nv
-from .get_licenses_or import get_licenses_or
-from .get_licenses_wa import get_licenses_wa
+# Standard imports.
+from datetime import datetime
+import importlib
+import os
+
+# External imports.
+import pandas as pd
+
+
+# Specify state-specific algorithms.
+ALGORITHMS = {
+ 'ak': 'get_licenses_ak',
+ 'az': 'get_licenses_az',
+ 'ca': 'get_licenses_ca',
+ 'co': 'get_licenses_co',
+ 'ct': 'get_licenses_ct',
+ 'il': 'get_licenses_il',
+ 'ma': 'get_licenses_ma',
+ 'me': 'get_licenses_me',
+ 'mi': 'get_licenses_mi',
+ 'mt': 'get_licenses_mt',
+ 'nj': 'get_licenses_nj',
+ # 'nm': 'get_licenses_nm',
+ 'nv': 'get_licenses_nv',
+ 'or': 'get_licenses_or',
+ 'ri': 'get_licenses_ri',
+ 'vt': 'get_licenses_vt',
+ 'wa': 'get_licenses_wa',
+}
+DATA_DIR = '../data'
+
+
+def main(data_dir, env_file):
+ """Collect all cannabis license data from states with permitted adult-use,
+ dynamically importing modules and finding the entry point for each of the
+ `ALGORITHMS`."""
+ licenses = pd.DataFrame()
+ for state, algorithm in ALGORITHMS.items():
+ module = importlib.import_module(f'{algorithm}')
+ entry_point = getattr(module, algorithm)
+ try:
+ print(f'Getting license data for {state.upper()}.')
+ data = entry_point(data_dir, env_file=env_file)
+ if not os.path.exists(f'{DATA_DIR}/{state}'): os.makedirs(f'{DATA_DIR}/{state}')
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ data.to_csv(f'{DATA_DIR}/{state}/licenses-{state}-{timestamp}.csv', index=False)
+ licenses = pd.concat([licenses, data])
+ except:
+ print(f'Failed to collect {state.upper()} licenses.')
+
+ # Save all of the retailers.
+ timestamp = datetime.now().isoformat()[:19].replace(':', '-')
+ licenses.to_csv(f'{DATA_DIR}/all/licenses-{timestamp}.csv', index=False)
+ return licenses
# === Test ===
-if __name__ == '__main':
+if __name__ == '__main__':
# Support command line usage.
import argparse
@@ -60,9 +106,4 @@ if __name__ == '__main':
env_file = args.get('env_file')
# Get licenses for each state.
- get_licenses_ca(data_dir, env_file=env_file)
- get_licenses_me(data_dir, env_file=env_file)
- get_licenses_nj(data_dir, env_file=env_file)
- get_licenses_nv(data_dir, env_file=env_file)
- get_licenses_or(data_dir, env_file=env_file)
- get_licenses_wa(data_dir, env_file=env_file)
+ all_licenses = main(data_dir, env_file)
diff --git a/analysis/figures/cannabis-licenses-map.html b/analysis/figures/cannabis-licenses-map.html
index 5354a42717b70654477bcf8aa8d2c65490c3571b..47bef60a28582300934dca01c07be65c65b0df9d 100644
--- a/analysis/figures/cannabis-licenses-map.html
+++ b/analysis/figures/cannabis-licenses-map.html
@@ -23,7 +23,7 @@