|
""" |
|
Get Reddit Results | Cannlytics |
|
Copyright (c) 2023-2024 Cannlytics |
|
|
|
Authors: Keegan Skeate <https://github.com/keeganskeate> |
|
Created: 12/1/2023 |
|
Updated: 5/21/2024 |
|
License: MIT License <https://github.com/cannlytics/cannlytics/blob/main/LICENSE> |
|
|
|
Description: |
|
|
|
This tool collects cannabis product reviews and associated product |
|
images to perform research. |
|
|
|
Product data from the product label images, as well as natural |
|
language data from the review, such as sentiment rating, can be used |
|
to analyze how product data may affect how the product is reviewed. |
|
|
|
""" |
|
|
|
from datetime import datetime |
|
import json |
|
import os |
|
import shutil |
|
from time import sleep |
|
|
|
|
|
from bs4 import BeautifulSoup |
|
from cannlytics.data.cache import Bogart |
|
from cannlytics.data.coas import CoADoc |
|
from cannlytics.data.web import initialize_selenium |
|
from cannlytics.utils.constants import DEFAULT_HEADERS |
|
from cannlytics.utils.utils import ( |
|
download_file_with_selenium, |
|
remove_duplicate_files, |
|
) |
|
from dotenv import dotenv_values |
|
import logging |
|
import pandas as pd |
|
import praw |
|
import requests |
|
import tempfile |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
images_directory = 'D://data/reddit/FLMedicalTrees/images' |
|
os.makedirs(images_directory, exist_ok=True) |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
driver = initialize_selenium(headless=False) |
|
query = 'COA' |
|
queries = [ |
|
'COA', |
|
'COA attached', |
|
'COA in', |
|
'Certificate', |
|
'Certificate of Analysis', |
|
'lab results', |
|
'test results', |
|
'results', |
|
'effect', |
|
'aroma', |
|
'taste', |
|
'smell', |
|
'flavor', |
|
] |
|
sort_by = 'new' |
|
subreddit = 'FLMedicalTrees' |
|
driver.get(f"https://www.reddit.com/r/{subreddit}/search/?q={query}&sort={sort_by}") |
|
sleep(5) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data = [] |
|
recorded_posts = [] |
|
|
|
|
|
page_source = driver.page_source |
|
soup = BeautifulSoup(page_source, 'html.parser') |
|
posts = soup.find_all('faceplate-tracker', {'data-testid': 'search-post'}) |
|
for post in posts: |
|
context = post.get('data-faceplate-tracking-context') |
|
context = json.loads(context) |
|
post_context = context['post'] |
|
post_id = post_context['id'] |
|
if post_id in recorded_posts: |
|
continue |
|
recorded_posts.append(post_id) |
|
data.append({ |
|
'title': post_context['title'], |
|
'url': 'https://www.reddit.com' + post_context['url'], |
|
'created_timestamp': post_context['created_timestamp'], |
|
'author_id': post_context['author_id'], |
|
'post_id': post_id, |
|
'number_comments': post_context['number_comments'], |
|
'subreddit_id': post_context['subreddit_id'], |
|
'subreddit_name': post_context['subreddit_name'], |
|
}) |
|
print(f'Number of posts: {len(data)}') |
|
|
|
|
|
driver.close() |
|
driver.quit() |
|
|
|
|
|
data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data" |
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'fl-medical-trees-posts-{timestamp}.xlsx') |
|
df = pd.DataFrame(data) |
|
df.to_excel(datafile, index=False) |
|
print('Saved post data:', datafile) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data = pd.read_excel(r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data\fl-medical-trees-posts-2024-06-25-18-59-50.xlsx") |
|
data = data.to_dict(orient='records') |
|
recorded_posts = [x['post_id'] for x in data] |
|
|
|
|
|
def initialize_reddit(config): |
|
reddit = praw.Reddit( |
|
client_id=config['REDDIT_CLIENT_ID'], |
|
client_secret=config['REDDIT_SECRET'], |
|
password=config['REDDIT_PASSWORD'], |
|
user_agent=config['REDDIT_USER_AGENT'], |
|
username=config['REDDIT_USERNAME'], |
|
) |
|
return reddit |
|
|
|
|
|
|
|
collected_posts = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config = dotenv_values('.env') |
|
reddit = initialize_reddit(config) |
|
|
|
|
|
all_posts = [] |
|
for n, post_data in enumerate(data[len(all_posts):]): |
|
|
|
|
|
post_id = post_data['post_id'].split('_')[-1] |
|
if post_id in collected_posts: |
|
print('Post already collected:', post_id) |
|
continue |
|
print('Getting data for post:', post_id) |
|
try: |
|
submission = reddit.submission(id=post_id) |
|
except: |
|
try: |
|
print('Failed to retrieve post:', post_id) |
|
print('Waiting 60 seconds to retry...') |
|
sleep(61) |
|
reddit = initialize_reddit(config) |
|
submission = reddit.submission(id=post_id) |
|
except: |
|
print('Failed to retrieve post:', post_id) |
|
print('Waiting 60 seconds to retry...') |
|
sleep(61) |
|
reddit = initialize_reddit(config) |
|
submission = reddit.submission(id=post_id) |
|
post_content = submission.selftext |
|
|
|
|
|
images = [] |
|
if 'imgur.com' in submission.url or submission.url.endswith(('.jpg', '.jpeg', '.png', '.gif')): |
|
images.append(submission.url) |
|
|
|
try: |
|
if submission.is_gallery: |
|
image_dict = submission.media_metadata |
|
for image_item in image_dict.values(): |
|
try: |
|
largest_image = image_item['s'] |
|
image_url = largest_image['u'] |
|
images.append(image_url) |
|
except KeyError: |
|
pass |
|
except AttributeError: |
|
pass |
|
|
|
|
|
for i, image_url in enumerate(images, start=1): |
|
file_extension = os.path.splitext(image_url)[-1].split('?')[0] |
|
filename = f"{post_id}_image_{i}{file_extension}" |
|
if file_extension not in ['.jpg', '.jpeg', '.png', '.gif']: |
|
filename = f"{post_id}_image_{i}.jpg" |
|
outfile = os.path.join(images_directory, filename) |
|
if os.path.exists(outfile): |
|
continue |
|
try: |
|
response = requests.get(image_url, headers={'User-agent': 'CannBot'}) |
|
except: |
|
try: |
|
print('Failed to download image:', image_url) |
|
print('Waiting 60 seconds to retry...') |
|
sleep(60) |
|
response = requests.get(image_url, headers={'User-agent': 'CannBot'}) |
|
except: |
|
print('Failed to download image:', image_url) |
|
print('Waiting 60 seconds to retry...') |
|
sleep(60) |
|
response = requests.get(image_url, headers={'User-agent': 'CannBot'}) |
|
sleep(3.33) |
|
if response.status_code != 200: |
|
print('Unsuccessful request for image:', image_url) |
|
continue |
|
with open(outfile, 'wb') as file: |
|
file.write(response.content) |
|
print(f"Downloaded image: {outfile}") |
|
|
|
|
|
comments = [] |
|
submission.comments.replace_more(limit=None) |
|
for comment in submission.comments.list(): |
|
comments.append({ |
|
'comment_id': comment.id, |
|
'comment_author': comment.author.name if comment.author else None, |
|
'comment_body': comment.body, |
|
'comment_created_utc': datetime.utcfromtimestamp(comment.created_utc).strftime('%Y-%m-%d %H:%M:%S') |
|
}) |
|
|
|
|
|
post_data['post_content'] = post_content |
|
post_data['upvotes'] = submission.ups |
|
post_data['downvotes'] = submission.downs |
|
post_data['images'] = images |
|
post_data['comments'] = comments |
|
print('Post data retrieved:', submission.title) |
|
all_posts.append(post_data) |
|
sleep(3.33) |
|
|
|
|
|
|
|
|
|
try: |
|
df = pd.DataFrame(all_posts) |
|
data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data" |
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'fl-medical-trees-coa-posts-{timestamp}.xlsx') |
|
df.to_excel(datafile, index=False) |
|
print('Saved post data:', datafile) |
|
except: |
|
print('No posts to curate.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
text_file = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\158-reported-effects\data\scanned-images.txt" |
|
with open(text_file, 'r') as file: |
|
lines = file.readlines() |
|
coa_urls = {} |
|
for line in lines: |
|
if 'COA URL found for post' in line: |
|
post_id, coa_url = line.split(':', maxsplit=1) |
|
post_id = 't3_' + post_id.split(' ')[-1].strip() |
|
coa_url = coa_url.strip() |
|
post_urls = coa_urls.get(post_id, []) |
|
post_urls.append(coa_url) |
|
coa_urls[post_id] = list(set(post_urls)) |
|
print('Number of COA URLs:', len(coa_urls)) |
|
|
|
|
|
images_directory = 'D://data/reddit/FLMedicalTrees/images' |
|
|
|
|
|
parser = CoADoc() |
|
temp_path = tempfile.mkdtemp() |
|
|
|
|
|
|
|
image_files = os.listdir(images_directory) |
|
image_files = [os.path.join(images_directory, x) for x in image_files] |
|
print('Number of images:', len(image_files)) |
|
for image_file in image_files: |
|
post_id = os.path.basename(image_file).split('_')[0] |
|
if post_id in coa_urls: |
|
continue |
|
print('Scanning:', image_file) |
|
post_urls = coa_urls.get(post_id, []) |
|
try: |
|
coa_url = parser.scan( |
|
image_file, |
|
temp_path=temp_path, |
|
) |
|
except: |
|
print('Failed to scan:', image_file) |
|
continue |
|
if coa_url: |
|
print(f"COA URL found for post {post_id}: {coa_url}") |
|
post_urls.append(coa_url) |
|
coa_urls[post_id] = list(set(post_urls)) |
|
|
|
|
|
try: |
|
shutil.rmtree(temp_path) |
|
except: |
|
pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MIN_FILE_SIZE = 21 * 1024 |
|
|
|
|
|
pdf_dir = 'D://data/reddit/FLMedicalTrees/pdfs' |
|
redirect_urls = {} |
|
for post_id, urls in coa_urls.items(): |
|
print(f"Downloading COA for post {post_id}: {urls}") |
|
for i, url in enumerate(urls, start=1): |
|
filename = f"{post_id}-coa-{i}.pdf" |
|
outfile = os.path.join(pdf_dir, filename) |
|
if os.path.exists(outfile): |
|
print('Cached:', outfile) |
|
continue |
|
|
|
|
|
if 'yourcoa.com' in url: |
|
base = 'https://yourcoa.com' |
|
sample_id = url.split('/')[-1].split('?')[0].split('&')[0] |
|
if sample_id == 'coa-download': |
|
sample_id = url.split('sample=')[-1] |
|
try: |
|
coa_url = f'{base}/coa/download?sample={sample_id}' |
|
response = requests.get(coa_url, headers=DEFAULT_HEADERS) |
|
if response.status_code == 200: |
|
if len(response.content) < MIN_FILE_SIZE: |
|
print('File size is small, retrying with Selenium:', url) |
|
|
|
response = requests.get(url, allow_redirects=True) |
|
if response.status_code == 200: |
|
redirected_url = response.url |
|
download_file_with_selenium( |
|
redirected_url, |
|
download_dir=pdf_dir, |
|
) |
|
print('Downloaded with Selenium:', redirected_url) |
|
else: |
|
with open(outfile, 'wb') as pdf: |
|
pdf.write(response.content) |
|
print('Downloaded:', outfile) |
|
else: |
|
print('Failed to download, retrying with Selenium:', url) |
|
response = requests.get(url, allow_redirects=True) |
|
if response.status_code == 200: |
|
redirected_url = response.url |
|
download_file_with_selenium( |
|
redirected_url, |
|
download_dir=pdf_dir, |
|
) |
|
print('Downloaded with Selenium:', redirected_url) |
|
except: |
|
coa_url = f'{base}/coa/coa-view?sample={sample_id}' |
|
response = requests.get(coa_url, allow_redirects=True) |
|
if response.status_code == 200: |
|
redirected_url = response.url |
|
download_file_with_selenium( |
|
redirected_url, |
|
download_dir=pdf_dir, |
|
) |
|
print('Downloaded with Selenium:', redirected_url) |
|
|
|
|
|
elif 'mete.labdrive.net' in url: |
|
download_file_with_selenium( |
|
url, |
|
download_dir=pdf_dir, |
|
method='a', |
|
tag_name='a', |
|
filename=f"{post_id}-coa-{i}.pdf", |
|
) |
|
print('Downloaded with Selenium:', url) |
|
|
|
|
|
|
|
elif url.startswith('http'): |
|
response = requests.get(url, allow_redirects=True) |
|
if response.status_code == 200: |
|
filename = f"{post_id}-coa-{i}.pdf" |
|
outfile = os.path.join(pdf_dir, filename) |
|
with open(outfile, 'wb') as file: |
|
file.write(response.content) |
|
print(f"Downloaded COA: {outfile}") |
|
sleep(1) |
|
|
|
|
|
else: |
|
print('Invalid URL:', url) |
|
continue |
|
|
|
|
|
remove_duplicate_files(pdf_dir, verbose=True) |
|
|
|
try: |
|
|
|
|
|
df = pd.DataFrame(all_posts) |
|
df['coa_url'] = df['post_id'].map(coa_urls) |
|
df['redirect_url'] = df['post_id'].map(redirect_urls) |
|
|
|
|
|
data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data" |
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'fl-medical-trees-coa-posts-{timestamp}.xlsx') |
|
df.to_excel(datafile, index=False) |
|
print('Saved post data:', datafile) |
|
|
|
except: |
|
print('No posts to curate.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
parser = CoADoc() |
|
pdf_dir = r'D:\data\reddit\FLMedicalTrees\pdfs' |
|
pdf_files = os.listdir(pdf_dir) |
|
pdf_files = [os.path.join(pdf_dir, x) for x in pdf_files] |
|
logging.info('Parsing %i COA PDFs...' % len(pdf_files)) |
|
all_coa_data = {} |
|
failed = [] |
|
temp_path = tempfile.mkdtemp() |
|
for pdf_file in pdf_files: |
|
try: |
|
coa_data = parser.parse_pdf( |
|
pdf_file, |
|
temp_path=temp_path, |
|
verbose=True, |
|
use_qr_code=False, |
|
) |
|
except: |
|
logging.info('Failed to parse: %s' % pdf_file) |
|
failed.append(pdf_file) |
|
continue |
|
if coa_data: |
|
logging.info('Parsed: %s' % pdf_file) |
|
coa_id = os.path.basename(pdf_file).split(' ')[0] |
|
if isinstance(coa_data, list): |
|
all_coa_data[coa_id] = coa_data[0] |
|
elif isinstance(coa_data, dict): |
|
all_coa_data[coa_id] = coa_data |
|
else: |
|
logging.info('Found no data: %s' % pdf_file) |
|
try: |
|
shutil.rmtree(temp_path) |
|
except: |
|
pass |
|
|
|
|
|
coa_df = pd.DataFrame(list(all_coa_data.values())) |
|
coa_df['coa_id'] = [x.replace('.pdf', '') for x in list(all_coa_data.keys())] |
|
coa_df.drop_duplicates(subset=['coa_id', 'sample_id', 'results_hash'], inplace=True) |
|
|
|
|
|
data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data" |
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'fl-medical-trees-coa-data-{timestamp}.xlsx') |
|
coa_df.to_excel(datafile, index=False) |
|
logging.info('Saved %i COA data: %s' % (len(coa_df), datafile)) |
|
|
|
|
|
with open(os.path.join(data_dir, f'unidentified-coas-{timestamp}.json'), 'w') as f: |
|
json.dump(failed, f, indent=4) |
|
logging.info('Saved list of failed PDFs.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_dir = r"C:\Users\keega\Documents\cannlytics\cannabis-data-science\season-4\155-seed-to-smoke\data" |
|
|
|
|
|
post_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'posts' in x and 'results' not in x] |
|
posts = pd.concat([pd.read_excel(x) for x in post_datafiles]) |
|
posts.drop_duplicates(subset=['post_id', 'coa_url', 'redirect_url'], inplace=True) |
|
print('Number of posts:', len(posts)) |
|
|
|
|
|
coa_datafiles = [os.path.join(data_dir, x) for x in os.listdir(data_dir) if 'coa-data' in x] |
|
results = pd.concat([pd.read_excel(x) for x in coa_datafiles]) |
|
results.drop_duplicates(subset=['sample_id', 'results_hash'], inplace=True) |
|
print('Number of COA results:', len(results)) |
|
|
|
|
|
post_coa_data = {} |
|
for index, row in results.iterrows(): |
|
coa_id = row['coa_id'].replace('t3_', '').split('-coa')[0] |
|
matches = posts.loc[posts['post_id'].str.contains(coa_id)] |
|
try: |
|
if matches.empty: |
|
matches = posts.loc[posts['coa_url'].str.contains(coa_id)] |
|
if matches.empty: |
|
matches = posts.loc[posts['redirect_url'].str.contains(coa_id)] |
|
except: |
|
pass |
|
if not matches.empty: |
|
post_id = matches['post_id'].values[0] |
|
post_coa_data[post_id] = row.to_dict() |
|
|
|
|
|
coa_data_df = pd.DataFrame.from_dict(post_coa_data, orient='index') |
|
coa_data_df.reset_index(inplace=True) |
|
coa_data_df.rename(columns={'index': 'post_id'}, inplace=True) |
|
merged_df = posts.merge(coa_data_df, on='post_id', how='left') |
|
merged_df = merged_df.loc[~merged_df['coa_id'].isna()] |
|
print('Number of posts with COA data:', len(merged_df)) |
|
|
|
|
|
timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') |
|
datafile = os.path.join(data_dir, f'fl-medical-trees-posts-with-results-{timestamp}.xlsx') |
|
merged_df.to_excel(datafile, index=False) |
|
print('Saved %i posts with COA data:' % len(merged_df), datafile) |
|
|