|
""" |
|
Update Readme | Cannabis Results |
|
Copyright (c) 2024 Cannlytics |
|
|
|
Authors: |
|
Keegan Skeate <https://github.com/keeganskeate> |
|
Candace O'Sullivan-Sutherland <https://github.com/candy-o> |
|
Created: 8/14/2024 |
|
Updated: 8/14/2024 |
|
License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE> |
|
""" |
|
import os |
|
import pandas as pd |
|
|
|
|
|
ALGORITHMS_PATH = '../algorithms' |
|
DATA_PATH = '../data' |
|
README_PATH = "../readme.md" |
|
|
|
|
|
def extract_data_sources(file_path): |
|
"""Extract data sources from algorithm docstrings.""" |
|
with open(file_path, 'r', encoding='utf-8') as file: |
|
content = file.read() |
|
start = content.find('Data Sources:') |
|
end = content.find('"""', start) |
|
data_sources = [] |
|
if start != -1 and end != -1: |
|
data_sources_section = content[start:end] |
|
lines = data_sources_section.split('\n') |
|
for line in lines: |
|
if line.strip().startswith('- '): |
|
data_sources.append(line.strip()[2:].strip()) |
|
return data_sources |
|
|
|
|
|
def count_observations(file_path): |
|
"""Count the number of observations in a CSV file.""" |
|
try: |
|
df = pd.read_csv(file_path, low_memory=False) |
|
return len(df) |
|
except Exception as e: |
|
try: |
|
file_path = file_path.replace('.csv', '.xlsx') |
|
df = pd.read_excel(file_path) |
|
return len(df) |
|
except Exception as e: |
|
print(f'Error reading {file_path}: {e}') |
|
return 0 |
|
|
|
|
|
def update_readme(): |
|
"""Update the README.md file with the latest dataset information.""" |
|
|
|
|
|
algorithms = {} |
|
for root, _, files in os.walk(ALGORITHMS_PATH): |
|
for file in files: |
|
if file.endswith('.py') and file not in ['main.py', 'parse_coas_ai.py']: |
|
key = file.replace('get_results_', '').replace('.py', '').replace('_', '-') |
|
file_path = os.path.join(root, file) |
|
sources = extract_data_sources(file_path) |
|
algorithms[key] = sources |
|
|
|
|
|
observations = {} |
|
for root, _, files in os.walk(DATA_PATH): |
|
for file in files: |
|
if file.endswith('-results-latest.csv'): |
|
key = root.split(os.sep)[-1] |
|
file_path = os.path.join(root, file) |
|
count = count_observations(file_path) |
|
observations[key] = count |
|
print(f'{key}: {count}') |
|
|
|
|
|
dataset_rows = [] |
|
for key in sorted(observations.keys()): |
|
state_abbr = key.split('-')[0].upper() |
|
dataset = f'`{key}`' |
|
|
|
count = observations.get(key.split('-')[0], '') |
|
row = f'| {dataset} | {state_abbr} | {count:,} |' |
|
dataset_rows.append(row) |
|
|
|
new_table_content = """| Subset | State | Observations | |
|
|--------|------|--------------|\n""" + "\n".join(dataset_rows) + "\n" |
|
|
|
with open(README_PATH, 'r', encoding='utf-8') as readme_file: |
|
readme_content = readme_file.readlines() |
|
|
|
|
|
table_start_index = readme_content.index("<!-- Automated Table -->\n") + 1 |
|
|
|
|
|
table_end_index = table_start_index |
|
while table_end_index < len(readme_content) and readme_content[table_end_index].startswith('|'): |
|
table_end_index += 1 |
|
|
|
|
|
readme_content = readme_content[:table_start_index] + [new_table_content] + readme_content[table_end_index:] |
|
|
|
with open(README_PATH, 'w', encoding='utf-8') as readme_file: |
|
readme_file.writelines(readme_content) |
|
|
|
print('README.md updated successfully.') |
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
update_readme() |
|
|