File size: 3,848 Bytes
d1ae506 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
"""
Update Readme | Cannabis Results
Copyright (c) 2024 Cannlytics
Authors:
Keegan Skeate <https://github.com/keeganskeate>
Candace O'Sullivan-Sutherland <https://github.com/candy-o>
Created: 8/14/2024
Updated: 8/14/2024
License: <https://github.com/cannlytics/cannlytics/blob/main/LICENSE>
"""
import os
import pandas as pd
# Path constants
ALGORITHMS_PATH = '../algorithms'
DATA_PATH = '../data'
README_PATH = "../readme.md"
def extract_data_sources(file_path):
"""Extract data sources from algorithm docstrings."""
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
start = content.find('Data Sources:')
end = content.find('"""', start)
data_sources = []
if start != -1 and end != -1:
data_sources_section = content[start:end]
lines = data_sources_section.split('\n')
for line in lines:
if line.strip().startswith('- '):
data_sources.append(line.strip()[2:].strip())
return data_sources
def count_observations(file_path):
"""Count the number of observations in a CSV file."""
try:
df = pd.read_csv(file_path, low_memory=False)
return len(df)
except Exception as e:
try:
file_path = file_path.replace('.csv', '.xlsx')
df = pd.read_excel(file_path)
return len(df)
except Exception as e:
print(f'Error reading {file_path}: {e}')
return 0
def update_readme():
"""Update the README.md file with the latest dataset information."""
# Extract data sources from algorithms.
algorithms = {}
for root, _, files in os.walk(ALGORITHMS_PATH):
for file in files:
if file.endswith('.py') and file not in ['main.py', 'parse_coas_ai.py']:
key = file.replace('get_results_', '').replace('.py', '').replace('_', '-')
file_path = os.path.join(root, file)
sources = extract_data_sources(file_path)
algorithms[key] = sources
# Count observations in data files.
observations = {}
for root, _, files in os.walk(DATA_PATH):
for file in files:
if file.endswith('-results-latest.csv'):
key = root.split(os.sep)[-1]
file_path = os.path.join(root, file)
count = count_observations(file_path)
observations[key] = count
print(f'{key}: {count}')
# Update the README.md.
dataset_rows = []
for key in sorted(observations.keys()):
state_abbr = key.split('-')[0].upper()
dataset = f'`{key}`'
# sources = ', '.join(algorithms[key])
count = observations.get(key.split('-')[0], '')
row = f'| {dataset} | {state_abbr} | {count:,} |'
dataset_rows.append(row)
new_table_content = """| Subset | State | Observations |
|--------|------|--------------|\n""" + "\n".join(dataset_rows) + "\n"
with open(README_PATH, 'r', encoding='utf-8') as readme_file:
readme_content = readme_file.readlines()
# Find the index of the table start.
table_start_index = readme_content.index("<!-- Automated Table -->\n") + 1
# Remove the old table content.
table_end_index = table_start_index
while table_end_index < len(readme_content) and readme_content[table_end_index].startswith('|'):
table_end_index += 1
# Insert the new table content.
readme_content = readme_content[:table_start_index] + [new_table_content] + readme_content[table_end_index:]
with open(README_PATH, 'w', encoding='utf-8') as readme_file:
readme_file.writelines(readme_content)
print('README.md updated successfully.')
# === Tests ===
# [✓] Tested: 2024-08-14 by Keegan Skeate <keegan@cannlytics>
if __name__ == "__main__":
update_readme()
|