cannabis_results / algorithms /get_results_ny.py
keeganskeate's picture
latest-2024-08-11 (#6)
d1ae506 verified
"""
Get Results | New York
Copyright (c) 2024 Cannlytics
Authors: Keegan Skeate <https://github.com/keeganskeate>
Created: 6/24/2024
Updated: 6/24/2024
License: MIT License <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
Data Sources:
- [Jetty Extracts](https://jettyextracts.com/coa-new-york/)
- [MFNY]('https://www.mycoa.info/')
- [Hudson Cannabis](https://www.hudsoncannabis.co/coas)
- [NYSCannabis](https://www.reddit.com/r/NYSCannabis)
"""
# Standard imports:
from datetime import datetime
import json
import os
import shutil
from time import sleep
from urllib.parse import urljoin
# External imports:
from bs4 import BeautifulSoup
from cannlytics.data.cache import Bogart
from cannlytics.data.coas import CoADoc
from cannlytics.data.web import initialize_selenium, download_google_drive_file
from cannlytics.utils.constants import DEFAULT_HEADERS
from cannlytics.utils.utils import (
download_file_with_selenium,
remove_duplicate_files,
kebab_case,
)
from dotenv import dotenv_values
import gdown
import logging
import pandas as pd
import praw
import requests
import tempfile
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
#-----------------------------------------------------------------------
# Setup.
#-----------------------------------------------------------------------
# Define where the Reddit data will be stored.
data_dir = r"D:\data\new-york\NYSCannabis"
# Create a directory to store the downloaded images.
images_directory = 'D://data/new-york/NYSCannabis/images'
os.makedirs(images_directory, exist_ok=True)
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
#-----------------------------------------------------------------------
# Get Jetty Extracts COAs.
#-----------------------------------------------------------------------
try:
# Define the URL.
url = 'https://jettyextracts.com/coa-new-york/'
# Define the PDF directory.
pdf_dir = 'D://data/new-york/jetty-extracts/pdfs'
os.makedirs(pdf_dir, exist_ok=True)
# TODO: Download the CSV programmatically.
datafile = r"D:\data\new-york\jetty-extracts\jetty-extracts-coas-2024-06-24.csv"
coas = pd.read_csv(datafile)
# Download the COAs from the CSV.
last_column = coas.columns[-1]
folder_urls = coas[last_column].values
for folder_url in reversed(folder_urls):
try:
gdown.download_folder(folder_url, output=pdf_dir, quiet=False)
sleep(3.33)
except:
print('Failed to download:', folder_url)
except:
print('Failed to download Jetty Extracts COAs.')
#-----------------------------------------------------------------------
# Get My COAs.
#-----------------------------------------------------------------------
try:
# Define the URL.
url = 'https://www.mycoa.info/'
# Define the PDF directory.
pdf_dir = 'D://data/new-york/my-coa/pdfs'
os.makedirs(pdf_dir, exist_ok=True)
# Get all of the PDF links.
driver = initialize_selenium(headless=False, download_dir=pdf_dir)
driver.get(url)
sleep(5)
pdf_links = driver.find_elements(By.XPATH, "//a[contains(@href, 'dropbox.com/s')]")
pdf_urls = [link.get_attribute('href') for link in pdf_links]
print(f'Found {len(pdf_links)} PDF links.')
# Download all of the PDFs from Dropbox.
for pdf_url in pdf_urls:
driver.get(pdf_url)
sleep(3.33)
wait = WebDriverWait(driver, 10)
download_button = wait.until(EC.presence_of_element_located((By.XPATH, "//button[@aria-label='Download']")))
download_button.click()
sleep(3.33)
print('Downloaded:', pdf_url)
print('All PDFs have been downloaded.')
# Close the Selenium driver
driver.quit()
except:
print('Failed to download My COAs.')
#-----------------------------------------------------------------------
# Get Hudson Cannabis COAs.
#-----------------------------------------------------------------------
try:
# Define the URL.
url = 'https://www.hudsoncannabis.co/coas'
# Define the PDF directory.
pdf_dir = 'D://data/new-york/hudson-cannabis/pdfs'
os.makedirs(pdf_dir, exist_ok=True)
# Find all of the PDF Links.
driver = initialize_selenium(headless=False, download_dir=pdf_dir)
driver.get(url)
wait = WebDriverWait(driver, 10)
wait.until(EC.presence_of_element_located((By.ID, "root")))
sleep(5)
pdf_links = driver.find_elements(By.XPATH, "//a[contains(@href, 'drive.google.com/file')]")
print(f'Found {len(pdf_links)} PDF links.')
# Download each PDF.
for link in pdf_links:
pdf_url = link.get_attribute('href')
pdf_name = pdf_url.split('/')[-2] + '.pdf'
save_path = os.path.join(pdf_dir, pdf_name)
print(f'Downloading {pdf_name} from {pdf_url}')
download_google_drive_file(pdf_url, save_path)
sleep(3.33)
print('All PDFs have been downloaded.')
# Close the Selenium driver
driver.quit()
except:
print('Failed to download Hudson Cannabis COAs.')
#-----------------------------------------------------------------------
# Get Reddit posts with Selenium.
#-----------------------------------------------------------------------
def get_reddit_posts(driver, data, recorded_posts):
"""Get the posts from the Reddit page."""
page_source = driver.page_source
soup = BeautifulSoup(page_source, 'html.parser')
posts = soup.find_all('shreddit-post')
for post in posts:
post_id = post.get('id')
if post_id in recorded_posts:
continue
recorded_posts.append(post_id)
title = post.get('post-title')
url = post.get('content-href')
created_timestamp = post.get('created-timestamp')
author_id = post.get('author-id')
author = post.get('author')
number_comments = post.get('comment-count')
subreddit_id = post.get('subreddit-id')
subreddit_name = post.get('subreddit-prefixed-name')
data.append({
'title': title,
'url': url,
'created_timestamp': created_timestamp,
'author_id': author_id,
'author': author,
'post_id': post_id,
'number_comments': number_comments,
'subreddit_id': subreddit_id,
'subreddit_name': subreddit_name,
})
print(f'Number of posts: {len(data)}')
return data, recorded_posts
# Get the Subreddit page.
# Note: This required being logged-in in the browser.
driver = initialize_selenium(headless=False)
query = 'COA'
queries = [
'COA',
'COA attached',
'COA in', # e.g. in the comments, included, etc.
'Certificate',
'Certificate of Analysis',
'lab results',
'test results',
'results',
'effect',
'aroma',
'taste',
'smell',
'flavor',
]
sort_by = 'new'
subreddit = 'NYSCannabis'
driver.get(f"https://www.reddit.com/r/{subreddit}/search/?q={query}&sort={sort_by}")
sleep(5)
# Manual iteration of queries here to collect post details.
data, recorded_posts = [], []
data, recorded_posts = get_reddit_posts(driver, data, recorded_posts)
# Close the driver.
driver.close()
driver.quit()
# Save the post data.
data_dir = r"D:\data\new-york\NYSCannabis"
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
datafile = os.path.join(data_dir, f'ny-reddit-posts-{timestamp}.xlsx')
df = pd.DataFrame(data)
df.to_excel(datafile, index=False)
print('Saved post data:', datafile)
#-----------------------------------------------------------------------
# Get Reddit post data with the Reddit API.
#-----------------------------------------------------------------------
def initialize_reddit(config):
reddit = praw.Reddit(
client_id=config['REDDIT_CLIENT_ID'],
client_secret=config['REDDIT_SECRET'],
password=config['REDDIT_PASSWORD'],
user_agent=config['REDDIT_USER_AGENT'],
username=config['REDDIT_USERNAME'],
)
return reddit
# DEV:
# data = pd.read_excel(r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data\fl-medical-trees-posts-2024-05-07-11-45-14.xlsx")
# data = data.to_dict(orient='records')
# recorded_posts = [x['post_id'] for x in data]
# # Read already collected posts.
# data_dir = r"D:\data\new-york\NYSCannabis"
# post_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'posts' in x and 'results' not in x]
# posts = pd.concat([pd.read_excel(x) for x in post_datafiles])
# posts.drop_duplicates(subset=['post_id', 'coa_url', 'redirect_url'], inplace=True)
# collected_posts = list(set(posts['post_id'].values) - set(recorded_posts))
# print('Total number of already collected posts:', len(collected_posts))
# print('Number of posts to collect:', len(data) - len(collected_posts))
def get_post_content(reddit, post_id, config):
"""Retrieve the post content."""
try:
submission = reddit.submission(id=post_id)
except:
try:
print('Failed to retrieve post:', post_id)
print('Waiting 60 seconds to retry...')
sleep(61)
reddit = initialize_reddit(config)
submission = reddit.submission(id=post_id)
except:
print('Failed to retrieve post:', post_id)
print('Waiting 60 seconds to retry...')
sleep(61)
reddit = initialize_reddit(config)
submission = reddit.submission(id=post_id)
return submission
def get_post_images(submission):
images = []
if 'imgur.com' in submission.url or submission.url.endswith(('.jpg', '.jpeg', '.png', '.gif')):
images.append(submission.url)
try:
if submission.is_gallery:
image_dict = submission.media_metadata
for image_item in image_dict.values():
try:
largest_image = image_item['s']
image_url = largest_image['u']
images.append(image_url)
except KeyError:
pass
except AttributeError:
pass
return images
def download_post_images(post_id, images, images_directory):
for i, image_url in enumerate(images, start=1):
file_extension = os.path.splitext(image_url)[-1].split('?')[0]
filename = f"{post_id}_image_{i}{file_extension}"
if file_extension not in ['.jpg', '.jpeg', '.png', '.gif']:
filename = f"{post_id}_image_{i}.jpg"
outfile = os.path.join(images_directory, filename)
if os.path.exists(outfile):
continue
try:
response = requests.get(image_url, headers={'User-agent': 'CannBot'})
except:
try:
print('Failed to download image:', image_url)
print('Waiting 60 seconds to retry...')
sleep(60)
response = requests.get(image_url, headers={'User-agent': 'CannBot'})
except:
print('Failed to download image:', image_url)
print('Waiting 60 seconds to retry...')
sleep(60)
response = requests.get(image_url, headers={'User-agent': 'CannBot'})
sleep(3.33)
if response.status_code != 200:
print('Unsuccessful request for image:', image_url)
continue
with open(outfile, 'wb') as file:
file.write(response.content)
print(f"Downloaded image: {outfile}")
def get_post_comments(submission):
"""Retrieve the post comments."""
comments = []
submission.comments.replace_more(limit=None)
for comment in submission.comments.list():
comments.append({
'comment_id': comment.id,
'comment_author': comment.author.name if comment.author else None,
'comment_body': comment.body,
'comment_created_utc': datetime.utcfromtimestamp(comment.created_utc).strftime('%Y-%m-%d %H:%M:%S')
})
return comments
def get_reddit_post_data(all_posts=None, collected_posts=None, data=None):
"""Get the data for each post."""
# Initialize Reddit.
config = dotenv_values('.env')
reddit = initialize_reddit(config)
# Get each post page and data for each post.
if all_posts is None: all_posts = []
if collected_posts is None: collected_posts = []
for n, post_data in enumerate(data[len(all_posts):]):
# Retrieve the post content.
post_id = post_data['post_id'].split('_')[-1]
if post_id in collected_posts:
print('Post already collected:', post_id)
continue
print('Getting data for post:', post_id)
submission = get_post_content(reddit, post_id, config)
post_content = submission.selftext
# Retrieve images.
images = get_post_images(submission)
# Download images.
download_post_images(post_id, images, images_directory)
# Retrieve comments.
comments = get_post_comments(submission)
# Update post_data with the retrieved information.
post_data['post_content'] = post_content
post_data['upvotes'] = submission.ups
post_data['downvotes'] = submission.downs
post_data['images'] = images
post_data['comments'] = comments
all_posts.append(post_data)
print('Post data retrieved:', submission.title)
sleep(3.33)
def save_post_data(all_posts, data_dir, namespace):
"""Save the post data."""
try:
df = pd.DataFrame(all_posts)
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
datafile = os.path.join(data_dir, f'{namespace}-{timestamp}.xlsx')
df.to_excel(datafile, index=False)
print('Saved post data:', datafile)
except:
print('No posts to curate.')
# Get all of the post data.
all_posts = get_reddit_post_data(data=data)
# Save the post data.
save_post_data(all_posts, data_dir, 'ny-reddit-coa-posts')
#-----------------------------------------------------------------------
# Parse COA URLs from images.
#-----------------------------------------------------------------------
# FIXME: Read saved COA URLs.
coa_urls = {}
# text_file = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\158-reported-effects\data\scanned-images.txt"
# with open(text_file, 'r') as file:
# lines = file.readlines()
# coa_urls = {}
# for line in lines:
# if 'COA URL found for post' in line:
# post_id, coa_url = line.split(':', maxsplit=1)
# post_id = 't3_' + post_id.split(' ')[-1].strip()
# coa_url = coa_url.strip()
# post_urls = coa_urls.get(post_id, [])
# post_urls.append(coa_url)
# coa_urls[post_id] = list(set(post_urls))
# print('Number of COA URLs:', len(coa_urls))
# Initialize CoADoc.
parser = CoADoc()
temp_path = tempfile.mkdtemp()
# Scan all images for COA URLs.
image_files = os.listdir(images_directory)
image_files = [os.path.join(images_directory, x) for x in image_files]
print('Number of images:', len(image_files))
for image_file in image_files:
post_id = os.path.basename(image_file).split('_')[0]
if post_id in coa_urls:
continue
post_urls = coa_urls.get(post_id, [])
print('Scanning image:', image_file)
try:
coa_url = parser.scan(
image_file,
temp_path=temp_path,
)
except:
print('Failed to scan:', image_file)
continue
if coa_url:
print(f"COA URL found for post {post_id}: {coa_url}")
post_urls.append(coa_url)
coa_urls[post_id] = list(set(post_urls))
else:
print(f"No COA URL found for post {post_id}.")
# Clean up the temporary directory.
try:
shutil.rmtree(temp_path)
except:
pass
#-----------------------------------------------------------------------
# Download COA PDFs using the COA URLs.
#-----------------------------------------------------------------------
def download_kaycha_coa(url, outfile):
"""Download a Kaycha Labs COA."""
base = 'https://yourcoa.com'
sample_id = url.split('/')[-1].split('?')[0].split('&')[0]
if sample_id == 'coa-download':
sample_id = url.split('sample=')[-1]
try:
coa_url = f'{base}/coa/download?sample={sample_id}'
response = requests.get(coa_url, headers=DEFAULT_HEADERS)
if response.status_code == 200:
if len(response.content) < MIN_FILE_SIZE:
print('File size is small, retrying with Selenium:', url)
response = requests.get(url, allow_redirects=True)
if response.status_code == 200:
redirected_url = response.url
download_file_with_selenium(
redirected_url,
download_dir=pdf_dir,
)
print('Downloaded with Selenium:', redirected_url)
return redirected_url
else:
with open(outfile, 'wb') as pdf:
pdf.write(response.content)
print('Downloaded:', outfile)
else:
print('Failed to download, retrying with Selenium:', url)
response = requests.get(url, allow_redirects=True)
if response.status_code == 200:
redirected_url = response.url
download_file_with_selenium(
redirected_url,
download_dir=pdf_dir,
)
print('Downloaded with Selenium:', redirected_url)
return redirected_url
except:
coa_url = f'{base}/coa/coa-view?sample={sample_id}'
response = requests.get(coa_url, allow_redirects=True)
if response.status_code == 200:
redirected_url = response.url
download_file_with_selenium(
redirected_url,
download_dir=pdf_dir,
)
print('Downloaded with Selenium:', redirected_url)
return redirected_url
# Download all PDFs.
MIN_FILE_SIZE = 21 * 1024
pdf_dir = r'D:\data\new-york\NYSCannabis\pdfs'
os.makedirs(pdf_dir, exist_ok=True)
redirect_urls = {}
for post_id, urls in coa_urls.items():
print(f"Downloading COA for post: {post_id}")
for i, url in enumerate(urls, start=1):
# Skip if the URL is already downloaded.
filename = f"{post_id}-coa-{i}.pdf"
outfile = os.path.join(pdf_dir, filename)
if os.path.exists(outfile):
print('Cached:', outfile)
redirect_urls[post_id] = url
continue
# Handle QBench COAs.
if 'qbench.net' in url and 'download' not in url:
download_file_with_selenium(
url,
download_dir=pdf_dir,
method='button',
el_id='qbenchDownloadPdfButton',
)
print('Downloaded with Selenium:', url)
# Download Kaycha Labs COAs.
elif 'yourcoa.com' in url:
url = download_kaycha_coa(url, outfile)
# Download regular PDFs.
elif url.startswith('http'):
response = requests.get(url, allow_redirects=True)
if response.status_code == 200:
# if len(response.content) < MIN_FILE_SIZE:
# redirected_url = response.url
# print('File size is small, retrying with Selenium:', redirected_url)
# download_file_with_selenium(
# redirected_url,
# download_dir=pdf_dir,
# )
# print('Downloaded with Selenium:', redirected_url)
# else:
with open(outfile, 'wb') as file:
file.write(response.content)
print(f"Downloaded COA: {outfile}")
else:
print('Failed to download:', url)
sleep(1)
# Skip invalid URLs.
else:
print('Invalid URL:', url)
continue
# Save the URL that was downloaded.
redirect_urls[post_id] = url
# Remove duplicate PDFs.
remove_duplicate_files(pdf_dir, verbose=True)
try:
# Tie the COA URLs to the posts.
df = pd.DataFrame(all_posts)
df['coa_url'] = df['post_id'].map(coa_urls)
df['redirect_url'] = df['post_id'].map(redirect_urls)
# Save the post data.
data_dir = r"D:\data\new-york\NYSCannabis"
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
datafile = os.path.join(data_dir, f'ny-reddit-coa-posts-{timestamp}.xlsx')
df.to_excel(datafile, index=False)
print('Saved post data:', datafile)
except:
print('No posts to curate.')
#-----------------------------------------------------------------------
# Parse COA data from the PDFs.
#-----------------------------------------------------------------------
# Parse COA data from the PDFs.
parser = CoADoc()
pdf_dir = r'D:\data\reddit\FLMedicalTrees\pdfs'
pdf_files = os.listdir(pdf_dir)
pdf_files = [os.path.join(pdf_dir, x) for x in pdf_files]
logging.info('Parsing %i COA PDFs...' % len(pdf_files))
all_coa_data = {}
failed = []
temp_path = tempfile.mkdtemp()
for pdf_file in pdf_files:
try:
coa_data = parser.parse_pdf(
pdf_file,
temp_path=temp_path,
verbose=True,
use_qr_code=False,
)
except:
logging.info('Failed to parse: %s' % pdf_file)
failed.append(pdf_file)
continue
if coa_data:
logging.info('Parsed: %s' % pdf_file)
coa_id = os.path.basename(pdf_file).split(' ')[0]
if isinstance(coa_data, list):
all_coa_data[coa_id] = coa_data[0]
elif isinstance(coa_data, dict):
all_coa_data[coa_id] = coa_data
else:
logging.info('Found no data: %s' % pdf_file)
try:
shutil.rmtree(temp_path)
except:
pass
# Compile the COA data and remove duplicates.
coa_df = pd.DataFrame(list(all_coa_data.values()))
coa_df['coa_id'] = [x.replace('.pdf', '') for x in list(all_coa_data.keys())]
coa_df.drop_duplicates(subset=['coa_id', 'sample_id', 'results_hash'], inplace=True)
# Save the COA data.
data_dir = r"D:\data\new-york\NYSCannabis"
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
datafile = os.path.join(data_dir, f'ny-reddit-coa-data-{timestamp}.xlsx')
coa_df.to_excel(datafile, index=False)
logging.info('Saved %i COA data: %s' % (len(coa_df), datafile))
# Save all of the COAs that failed to be parsed.
with open(os.path.join(data_dir, f'unidentified-coas-{timestamp}.json'), 'w') as f:
json.dump(failed, f, indent=4)
logging.info('Saved list of failed PDFs.')
#-----------------------------------------------------------------------
# Create a sample of posts that have COA URls.
#-----------------------------------------------------------------------
# Define where the data lives.
data_dir = r"D:\data\new-york\NYSCannabis"
# Read all of the posts.
post_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'posts' in x and 'results' not in x]
posts = pd.concat([pd.read_excel(x) for x in post_datafiles])
posts.drop_duplicates(subset=['post_id', 'coa_url', 'redirect_url'], inplace=True)
print('Number of posts:', len(posts))
# Read in all of the COA datafiles.
coa_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'coa-data' in x]
results = pd.concat([pd.read_excel(x) for x in coa_datafiles])
results.drop_duplicates(subset=['sample_id', 'results_hash'], inplace=True)
print('Number of COA results:', len(results))
# Find the post ID for the lab results.
post_coa_data = {}
for index, row in results.iterrows():
coa_id = row['coa_id'].replace('t3_', '').split('-coa')[0]
matches = posts.loc[posts['post_id'].str.contains(coa_id)]
try:
if matches.empty:
matches = posts.loc[posts['coa_url'].str.contains(coa_id)]
if matches.empty:
matches = posts.loc[posts['redirect_url'].str.contains(coa_id)]
except:
pass
if not matches.empty:
post_id = matches['post_id'].values[0]
post_coa_data[post_id] = row.to_dict()
# Merge the lab results with the post data.
coa_data_df = pd.DataFrame.from_dict(post_coa_data, orient='index')
coa_data_df.reset_index(inplace=True)
coa_data_df.rename(columns={'index': 'post_id'}, inplace=True)
merged_df = posts.merge(coa_data_df, on='post_id', how='left')
merged_df = merged_df.loc[~merged_df['coa_id'].isna()]
print('Number of posts with COA data:', len(merged_df))
# Save the updated post data.
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
datafile = os.path.join(data_dir, f'ny-reddit-posts-with-results-{timestamp}.xlsx')
merged_df.to_excel(datafile, index=False)
print('Saved %i posts with COA data:' % len(merged_df), datafile)