|
""" |
|
Get Results | New York |
|
Copyright (c) 2024 Cannlytics |
|
|
|
Authors: Keegan Skeate <https://github.com/keeganskeate> |
|
Created: 6/24/2024 |
|
Updated: 6/24/2024 |
|
License: MIT License <https://github.com/cannlytics/cannlytics/blob/main/LICENSE> |
|
|
|
Data Sources: |
|
|
|
- [Jetty Extracts](https://jettyextracts.com/coa-new-york/) |
|
- [MFNY]('https://www.mycoa.info/') |
|
- [Hudson Cannabis](https://www.hudsoncannabis.co/coas) |
|
- [NYSCannabis](https://www.reddit.com/r/NYSCannabis) |
|
|
|
""" |
|
|
|
from datetime import datetime |
|
import json |
|
import os |
|
import shutil |
|
from time import sleep |
|
from urllib.parse import urljoin |
|
|
|
|
|
from bs4 import BeautifulSoup |
|
from cannlytics.data.cache import Bogart |
|
from cannlytics.data.coas import CoADoc |
|
from cannlytics.data.web import initialize_selenium, download_google_drive_file |
|
from cannlytics.utils.constants import DEFAULT_HEADERS |
|
from cannlytics.utils.utils import ( |
|
download_file_with_selenium, |
|
remove_duplicate_files, |
|
kebab_case, |
|
) |
|
from dotenv import dotenv_values |
|
import gdown |
|
import logging |
|
import pandas as pd |
|
import praw |
|
import requests |
|
import tempfile |
|
from selenium.webdriver.common.by import By |
|
from selenium.webdriver.support import expected_conditions as EC |
|
from selenium.webdriver.support.ui import WebDriverWait |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_dir = r"D:\data\new-york\NYSCannabis" |
|
|
|
|
|
images_directory = 'D://data/new-york/NYSCannabis/images' |
|
os.makedirs(images_directory, exist_ok=True) |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
url = 'https://jettyextracts.com/coa-new-york/' |
|
|
|
|
|
pdf_dir = 'D://data/new-york/jetty-extracts/pdfs' |
|
os.makedirs(pdf_dir, exist_ok=True) |
|
|
|
|
|
datafile = r"D:\data\new-york\jetty-extracts\jetty-extracts-coas-2024-06-24.csv" |
|
coas = pd.read_csv(datafile) |
|
|
|
|
|
last_column = coas.columns[-1] |
|
folder_urls = coas[last_column].values |
|
for folder_url in reversed(folder_urls): |
|
try: |
|
gdown.download_folder(folder_url, output=pdf_dir, quiet=False) |
|
sleep(3.33) |
|
except: |
|
print('Failed to download:', folder_url) |
|
|
|
except: |
|
print('Failed to download Jetty Extracts COAs.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
url = 'https://www.mycoa.info/' |
|
|
|
|
|
pdf_dir = 'D://data/new-york/my-coa/pdfs' |
|
os.makedirs(pdf_dir, exist_ok=True) |
|
|
|
|
|
driver = initialize_selenium(headless=False, download_dir=pdf_dir) |
|
driver.get(url) |
|
sleep(5) |
|
pdf_links = driver.find_elements(By.XPATH, "//a[contains(@href, 'dropbox.com/s')]") |
|
pdf_urls = [link.get_attribute('href') for link in pdf_links] |
|
print(f'Found {len(pdf_links)} PDF links.') |
|
|
|
|
|
for pdf_url in pdf_urls: |
|
driver.get(pdf_url) |
|
sleep(3.33) |
|
wait = WebDriverWait(driver, 10) |
|
download_button = wait.until(EC.presence_of_element_located((By.XPATH, "//button[@aria-label='Download']"))) |
|
download_button.click() |
|
sleep(3.33) |
|
print('Downloaded:', pdf_url) |
|
print('All PDFs have been downloaded.') |
|
|
|
|
|
driver.quit() |
|
|
|
except: |
|
print('Failed to download My COAs.') |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
url = 'https://www.hudsoncannabis.co/coas' |
|
|
|
|
|
pdf_dir = 'D://data/new-york/hudson-cannabis/pdfs' |
|
os.makedirs(pdf_dir, exist_ok=True) |
|
|
|
|
|
driver = initialize_selenium(headless=False, download_dir=pdf_dir) |
|
driver.get(url) |
|
wait = WebDriverWait(driver, 10) |
|
wait.until(EC.presence_of_element_located((By.ID, "root"))) |
|
sleep(5) |
|
pdf_links = driver.find_elements(By.XPATH, "//a[contains(@href, 'drive.google.com/file')]") |
|
print(f'Found {len(pdf_links)} PDF links.') |
|
|
|
|
|
for link in pdf_links: |
|
pdf_url = link.get_attribute('href') |
|
pdf_name = pdf_url.split('/')[-2] + '.pdf' |
|
save_path = os.path.join(pdf_dir, pdf_name) |
|
print(f'Downloading {pdf_name} from {pdf_url}') |
|
download_google_drive_file(pdf_url, save_path) |
|
sleep(3.33) |
|
|
|
print('All PDFs have been downloaded.') |
|
|
|
|
|
driver.quit() |
|
|
|
except: |
|
print('Failed to download Hudson Cannabis COAs.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_reddit_posts(driver, data, recorded_posts): |
|
"""Get the posts from the Reddit page.""" |
|
page_source = driver.page_source |
|
soup = BeautifulSoup(page_source, 'html.parser') |
|
posts = soup.find_all('shreddit-post') |
|
for post in posts: |
|
post_id = post.get('id') |
|
if post_id in recorded_posts: |
|
continue |
|
recorded_posts.append(post_id) |
|
title = post.get('post-title') |
|
url = post.get('content-href') |
|
created_timestamp = post.get('created-timestamp') |
|
author_id = post.get('author-id') |
|
author = post.get('author') |
|
number_comments = post.get('comment-count') |
|
subreddit_id = post.get('subreddit-id') |
|
subreddit_name = post.get('subreddit-prefixed-name') |
|
data.append({ |
|
'title': title, |
|
'url': url, |
|
'created_timestamp': created_timestamp, |
|
'author_id': author_id, |
|
'author': author, |
|
'post_id': post_id, |
|
'number_comments': number_comments, |
|
'subreddit_id': subreddit_id, |
|
'subreddit_name': subreddit_name, |
|
}) |
|
print(f'Number of posts: {len(data)}') |
|
return data, recorded_posts |
|
|
|
|
|
|
|
|
|
driver = initialize_selenium(headless=False) |
|
query = 'COA' |
|
queries = [ |
|
'COA', |
|
'COA attached', |
|
'COA in', |
|
'Certificate', |
|
'Certificate of Analysis', |
|
'lab results', |
|
'test results', |
|
'results', |
|
'effect', |
|
'aroma', |
|
'taste', |
|
'smell', |
|
'flavor', |
|
] |
|
sort_by = 'new' |
|
subreddit = 'NYSCannabis' |
|
driver.get(f"https://www.reddit.com/r/{subreddit}/search/?q={query}&sort={sort_by}") |
|
sleep(5) |
|
|
|
|
|
data, recorded_posts = [], [] |
|
data, recorded_posts = get_reddit_posts(driver, data, recorded_posts) |
|
|
|
|
|
driver.close() |
|
driver.quit() |
|
|
|
|
|
data_dir = r"D:\data\new-york\NYSCannabis" |
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'ny-reddit-posts-{timestamp}.xlsx') |
|
df = pd.DataFrame(data) |
|
df.to_excel(datafile, index=False) |
|
print('Saved post data:', datafile) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def initialize_reddit(config): |
|
reddit = praw.Reddit( |
|
client_id=config['REDDIT_CLIENT_ID'], |
|
client_secret=config['REDDIT_SECRET'], |
|
password=config['REDDIT_PASSWORD'], |
|
user_agent=config['REDDIT_USER_AGENT'], |
|
username=config['REDDIT_USERNAME'], |
|
) |
|
return reddit |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_post_content(reddit, post_id, config): |
|
"""Retrieve the post content.""" |
|
try: |
|
submission = reddit.submission(id=post_id) |
|
except: |
|
try: |
|
print('Failed to retrieve post:', post_id) |
|
print('Waiting 60 seconds to retry...') |
|
sleep(61) |
|
reddit = initialize_reddit(config) |
|
submission = reddit.submission(id=post_id) |
|
except: |
|
print('Failed to retrieve post:', post_id) |
|
print('Waiting 60 seconds to retry...') |
|
sleep(61) |
|
reddit = initialize_reddit(config) |
|
submission = reddit.submission(id=post_id) |
|
return submission |
|
|
|
|
|
def get_post_images(submission): |
|
images = [] |
|
if 'imgur.com' in submission.url or submission.url.endswith(('.jpg', '.jpeg', '.png', '.gif')): |
|
images.append(submission.url) |
|
try: |
|
if submission.is_gallery: |
|
image_dict = submission.media_metadata |
|
for image_item in image_dict.values(): |
|
try: |
|
largest_image = image_item['s'] |
|
image_url = largest_image['u'] |
|
images.append(image_url) |
|
except KeyError: |
|
pass |
|
except AttributeError: |
|
pass |
|
return images |
|
|
|
|
|
def download_post_images(post_id, images, images_directory): |
|
for i, image_url in enumerate(images, start=1): |
|
file_extension = os.path.splitext(image_url)[-1].split('?')[0] |
|
filename = f"{post_id}_image_{i}{file_extension}" |
|
if file_extension not in ['.jpg', '.jpeg', '.png', '.gif']: |
|
filename = f"{post_id}_image_{i}.jpg" |
|
outfile = os.path.join(images_directory, filename) |
|
if os.path.exists(outfile): |
|
continue |
|
try: |
|
response = requests.get(image_url, headers={'User-agent': 'CannBot'}) |
|
except: |
|
try: |
|
print('Failed to download image:', image_url) |
|
print('Waiting 60 seconds to retry...') |
|
sleep(60) |
|
response = requests.get(image_url, headers={'User-agent': 'CannBot'}) |
|
except: |
|
print('Failed to download image:', image_url) |
|
print('Waiting 60 seconds to retry...') |
|
sleep(60) |
|
response = requests.get(image_url, headers={'User-agent': 'CannBot'}) |
|
sleep(3.33) |
|
if response.status_code != 200: |
|
print('Unsuccessful request for image:', image_url) |
|
continue |
|
with open(outfile, 'wb') as file: |
|
file.write(response.content) |
|
print(f"Downloaded image: {outfile}") |
|
|
|
|
|
def get_post_comments(submission): |
|
"""Retrieve the post comments.""" |
|
comments = [] |
|
submission.comments.replace_more(limit=None) |
|
for comment in submission.comments.list(): |
|
comments.append({ |
|
'comment_id': comment.id, |
|
'comment_author': comment.author.name if comment.author else None, |
|
'comment_body': comment.body, |
|
'comment_created_utc': datetime.utcfromtimestamp(comment.created_utc).strftime('%Y-%m-%d %H:%M:%S') |
|
}) |
|
return comments |
|
|
|
|
|
def get_reddit_post_data(all_posts=None, collected_posts=None, data=None): |
|
"""Get the data for each post.""" |
|
|
|
|
|
config = dotenv_values('.env') |
|
reddit = initialize_reddit(config) |
|
|
|
|
|
if all_posts is None: all_posts = [] |
|
if collected_posts is None: collected_posts = [] |
|
for n, post_data in enumerate(data[len(all_posts):]): |
|
|
|
|
|
post_id = post_data['post_id'].split('_')[-1] |
|
if post_id in collected_posts: |
|
print('Post already collected:', post_id) |
|
continue |
|
print('Getting data for post:', post_id) |
|
submission = get_post_content(reddit, post_id, config) |
|
post_content = submission.selftext |
|
|
|
|
|
images = get_post_images(submission) |
|
|
|
|
|
download_post_images(post_id, images, images_directory) |
|
|
|
|
|
comments = get_post_comments(submission) |
|
|
|
|
|
post_data['post_content'] = post_content |
|
post_data['upvotes'] = submission.ups |
|
post_data['downvotes'] = submission.downs |
|
post_data['images'] = images |
|
post_data['comments'] = comments |
|
all_posts.append(post_data) |
|
print('Post data retrieved:', submission.title) |
|
sleep(3.33) |
|
|
|
|
|
def save_post_data(all_posts, data_dir, namespace): |
|
"""Save the post data.""" |
|
try: |
|
df = pd.DataFrame(all_posts) |
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'{namespace}-{timestamp}.xlsx') |
|
df.to_excel(datafile, index=False) |
|
print('Saved post data:', datafile) |
|
except: |
|
print('No posts to curate.') |
|
|
|
|
|
|
|
all_posts = get_reddit_post_data(data=data) |
|
|
|
|
|
save_post_data(all_posts, data_dir, 'ny-reddit-coa-posts') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
coa_urls = {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser = CoADoc() |
|
temp_path = tempfile.mkdtemp() |
|
|
|
|
|
image_files = os.listdir(images_directory) |
|
image_files = [os.path.join(images_directory, x) for x in image_files] |
|
print('Number of images:', len(image_files)) |
|
for image_file in image_files: |
|
post_id = os.path.basename(image_file).split('_')[0] |
|
if post_id in coa_urls: |
|
continue |
|
post_urls = coa_urls.get(post_id, []) |
|
print('Scanning image:', image_file) |
|
try: |
|
coa_url = parser.scan( |
|
image_file, |
|
temp_path=temp_path, |
|
) |
|
except: |
|
print('Failed to scan:', image_file) |
|
continue |
|
if coa_url: |
|
print(f"COA URL found for post {post_id}: {coa_url}") |
|
post_urls.append(coa_url) |
|
coa_urls[post_id] = list(set(post_urls)) |
|
else: |
|
print(f"No COA URL found for post {post_id}.") |
|
|
|
|
|
try: |
|
shutil.rmtree(temp_path) |
|
except: |
|
pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
def download_kaycha_coa(url, outfile): |
|
"""Download a Kaycha Labs COA.""" |
|
base = 'https://yourcoa.com' |
|
sample_id = url.split('/')[-1].split('?')[0].split('&')[0] |
|
if sample_id == 'coa-download': |
|
sample_id = url.split('sample=')[-1] |
|
try: |
|
coa_url = f'{base}/coa/download?sample={sample_id}' |
|
response = requests.get(coa_url, headers=DEFAULT_HEADERS) |
|
if response.status_code == 200: |
|
if len(response.content) < MIN_FILE_SIZE: |
|
print('File size is small, retrying with Selenium:', url) |
|
response = requests.get(url, allow_redirects=True) |
|
if response.status_code == 200: |
|
redirected_url = response.url |
|
download_file_with_selenium( |
|
redirected_url, |
|
download_dir=pdf_dir, |
|
) |
|
print('Downloaded with Selenium:', redirected_url) |
|
return redirected_url |
|
else: |
|
with open(outfile, 'wb') as pdf: |
|
pdf.write(response.content) |
|
print('Downloaded:', outfile) |
|
else: |
|
print('Failed to download, retrying with Selenium:', url) |
|
response = requests.get(url, allow_redirects=True) |
|
if response.status_code == 200: |
|
redirected_url = response.url |
|
download_file_with_selenium( |
|
redirected_url, |
|
download_dir=pdf_dir, |
|
) |
|
print('Downloaded with Selenium:', redirected_url) |
|
return redirected_url |
|
except: |
|
coa_url = f'{base}/coa/coa-view?sample={sample_id}' |
|
response = requests.get(coa_url, allow_redirects=True) |
|
if response.status_code == 200: |
|
redirected_url = response.url |
|
download_file_with_selenium( |
|
redirected_url, |
|
download_dir=pdf_dir, |
|
) |
|
print('Downloaded with Selenium:', redirected_url) |
|
return redirected_url |
|
|
|
|
|
|
|
MIN_FILE_SIZE = 21 * 1024 |
|
pdf_dir = r'D:\data\new-york\NYSCannabis\pdfs' |
|
os.makedirs(pdf_dir, exist_ok=True) |
|
redirect_urls = {} |
|
for post_id, urls in coa_urls.items(): |
|
print(f"Downloading COA for post: {post_id}") |
|
for i, url in enumerate(urls, start=1): |
|
|
|
|
|
filename = f"{post_id}-coa-{i}.pdf" |
|
outfile = os.path.join(pdf_dir, filename) |
|
if os.path.exists(outfile): |
|
print('Cached:', outfile) |
|
redirect_urls[post_id] = url |
|
continue |
|
|
|
|
|
if 'qbench.net' in url and 'download' not in url: |
|
download_file_with_selenium( |
|
url, |
|
download_dir=pdf_dir, |
|
method='button', |
|
el_id='qbenchDownloadPdfButton', |
|
) |
|
print('Downloaded with Selenium:', url) |
|
|
|
|
|
elif 'yourcoa.com' in url: |
|
url = download_kaycha_coa(url, outfile) |
|
|
|
|
|
elif url.startswith('http'): |
|
response = requests.get(url, allow_redirects=True) |
|
if response.status_code == 200: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open(outfile, 'wb') as file: |
|
file.write(response.content) |
|
print(f"Downloaded COA: {outfile}") |
|
else: |
|
print('Failed to download:', url) |
|
sleep(1) |
|
|
|
|
|
else: |
|
print('Invalid URL:', url) |
|
continue |
|
|
|
|
|
redirect_urls[post_id] = url |
|
|
|
|
|
remove_duplicate_files(pdf_dir, verbose=True) |
|
|
|
try: |
|
|
|
|
|
df = pd.DataFrame(all_posts) |
|
df['coa_url'] = df['post_id'].map(coa_urls) |
|
df['redirect_url'] = df['post_id'].map(redirect_urls) |
|
|
|
|
|
data_dir = r"D:\data\new-york\NYSCannabis" |
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'ny-reddit-coa-posts-{timestamp}.xlsx') |
|
df.to_excel(datafile, index=False) |
|
print('Saved post data:', datafile) |
|
|
|
except: |
|
print('No posts to curate.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser = CoADoc() |
|
pdf_dir = r'D:\data\reddit\FLMedicalTrees\pdfs' |
|
pdf_files = os.listdir(pdf_dir) |
|
pdf_files = [os.path.join(pdf_dir, x) for x in pdf_files] |
|
logging.info('Parsing %i COA PDFs...' % len(pdf_files)) |
|
all_coa_data = {} |
|
failed = [] |
|
temp_path = tempfile.mkdtemp() |
|
for pdf_file in pdf_files: |
|
try: |
|
coa_data = parser.parse_pdf( |
|
pdf_file, |
|
temp_path=temp_path, |
|
verbose=True, |
|
use_qr_code=False, |
|
) |
|
except: |
|
logging.info('Failed to parse: %s' % pdf_file) |
|
failed.append(pdf_file) |
|
continue |
|
if coa_data: |
|
logging.info('Parsed: %s' % pdf_file) |
|
coa_id = os.path.basename(pdf_file).split(' ')[0] |
|
if isinstance(coa_data, list): |
|
all_coa_data[coa_id] = coa_data[0] |
|
elif isinstance(coa_data, dict): |
|
all_coa_data[coa_id] = coa_data |
|
else: |
|
logging.info('Found no data: %s' % pdf_file) |
|
try: |
|
shutil.rmtree(temp_path) |
|
except: |
|
pass |
|
|
|
|
|
coa_df = pd.DataFrame(list(all_coa_data.values())) |
|
coa_df['coa_id'] = [x.replace('.pdf', '') for x in list(all_coa_data.keys())] |
|
coa_df.drop_duplicates(subset=['coa_id', 'sample_id', 'results_hash'], inplace=True) |
|
|
|
|
|
data_dir = r"D:\data\new-york\NYSCannabis" |
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'ny-reddit-coa-data-{timestamp}.xlsx') |
|
coa_df.to_excel(datafile, index=False) |
|
logging.info('Saved %i COA data: %s' % (len(coa_df), datafile)) |
|
|
|
|
|
with open(os.path.join(data_dir, f'unidentified-coas-{timestamp}.json'), 'w') as f: |
|
json.dump(failed, f, indent=4) |
|
logging.info('Saved list of failed PDFs.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_dir = r"D:\data\new-york\NYSCannabis" |
|
|
|
|
|
post_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'posts' in x and 'results' not in x] |
|
posts = pd.concat([pd.read_excel(x) for x in post_datafiles]) |
|
posts.drop_duplicates(subset=['post_id', 'coa_url', 'redirect_url'], inplace=True) |
|
print('Number of posts:', len(posts)) |
|
|
|
|
|
coa_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'coa-data' in x] |
|
results = pd.concat([pd.read_excel(x) for x in coa_datafiles]) |
|
results.drop_duplicates(subset=['sample_id', 'results_hash'], inplace=True) |
|
print('Number of COA results:', len(results)) |
|
|
|
|
|
post_coa_data = {} |
|
for index, row in results.iterrows(): |
|
coa_id = row['coa_id'].replace('t3_', '').split('-coa')[0] |
|
matches = posts.loc[posts['post_id'].str.contains(coa_id)] |
|
try: |
|
if matches.empty: |
|
matches = posts.loc[posts['coa_url'].str.contains(coa_id)] |
|
if matches.empty: |
|
matches = posts.loc[posts['redirect_url'].str.contains(coa_id)] |
|
except: |
|
pass |
|
if not matches.empty: |
|
post_id = matches['post_id'].values[0] |
|
post_coa_data[post_id] = row.to_dict() |
|
|
|
|
|
coa_data_df = pd.DataFrame.from_dict(post_coa_data, orient='index') |
|
coa_data_df.reset_index(inplace=True) |
|
coa_data_df.rename(columns={'index': 'post_id'}, inplace=True) |
|
merged_df = posts.merge(coa_data_df, on='post_id', how='left') |
|
merged_df = merged_df.loc[~merged_df['coa_id'].isna()] |
|
print('Number of posts with COA data:', len(merged_df)) |
|
|
|
|
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'ny-reddit-posts-with-results-{timestamp}.xlsx') |
|
merged_df.to_excel(datafile, index=False) |
|
print('Saved %i posts with COA data:' % len(merged_df), datafile) |
|
|