|
|
|
|
|
""" |
|
|
Load document data from text files into a DuckDB database. |
|
|
|
|
|
This script scans the data/document directory for text files and loads them |
|
|
into a local DuckDB database. Documents are organized by type (sb/hb), congress, |
|
|
and document number. |
|
|
|
|
|
Directory structure: data/document/{type}/{congress}/{range}/{TYPE}-{number}.txt |
|
|
Example: data/document/sb/20/00001-01000/SB-00002.txt |
|
|
- type: sb (senate bill) or hb (house bill) |
|
|
- congress: 20 (20th congress) |
|
|
- document_number: 2 |
|
|
""" |
|
|
|
|
|
import sys |
|
|
import argparse |
|
|
from pathlib import Path |
|
|
import duckdb |
|
|
import logging |
|
|
from datetime import datetime |
|
|
import re |
|
|
|
|
|
|
|
|
sys.path.insert(0, str(Path(__file__).parent.parent)) |
|
|
|
|
|
from schemas.document import DOCUMENT_SCHEMA |
|
|
from config import DATABASE_PATH, DOCUMENT_DATA_DIR |
|
|
|
|
|
|
|
|
def parse_document_path(file_path: Path) -> dict: |
|
|
""" |
|
|
Parse document file path to extract metadata. |
|
|
|
|
|
Args: |
|
|
file_path: Path to document file |
|
|
|
|
|
Returns: |
|
|
Dict with document_type, congress, document_number |
|
|
|
|
|
Example: |
|
|
data/document/sb/20/00001-01000/SB-00002.txt -> |
|
|
{ |
|
|
'document_type': 'sb', |
|
|
'congress': 20, |
|
|
'document_number': 2 |
|
|
} |
|
|
""" |
|
|
|
|
|
parts = file_path.parts |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
doc_index = parts.index('document') |
|
|
document_type = parts[doc_index + 1].lower() |
|
|
congress = int(parts[doc_index + 2]) |
|
|
|
|
|
|
|
|
filename = file_path.stem |
|
|
match = re.match(r'^[A-Z]+-(\d+)$', filename) |
|
|
if match: |
|
|
document_number = int(match.group(1)) |
|
|
else: |
|
|
raise ValueError(f"Cannot parse document number from filename: {filename}") |
|
|
|
|
|
return { |
|
|
'document_type': document_type, |
|
|
'congress': congress, |
|
|
'document_number': document_number |
|
|
} |
|
|
except (ValueError, IndexError) as e: |
|
|
raise ValueError(f"Cannot parse document path: {file_path}") from e |
|
|
|
|
|
|
|
|
def get_document_files(data_dir: Path): |
|
|
"""Recursively find all .txt files in the document data directory (generator).""" |
|
|
return data_dir.glob('**/*.txt') |
|
|
|
|
|
|
|
|
def create_documents_table(conn: duckdb.DuckDBPyConnection): |
|
|
"""Create the documents table using the explicit schema.""" |
|
|
create_sql = DOCUMENT_SCHEMA.get_create_table_sql() |
|
|
|
|
|
print(f"Creating table '{DOCUMENT_SCHEMA.table_name}' with {len(DOCUMENT_SCHEMA.schema)} columns:") |
|
|
print(f" Fields: {', '.join(DOCUMENT_SCHEMA.field_order)}") |
|
|
|
|
|
conn.execute(create_sql) |
|
|
|
|
|
|
|
|
def load_documents_to_db( |
|
|
data_dir: Path, |
|
|
db_path: Path, |
|
|
export_parquet: bool = False, |
|
|
parquet_path: Path = None, |
|
|
batch_size: int = 1000, |
|
|
progress_interval: int = 100 |
|
|
): |
|
|
"""Load all document text files into the DuckDB database with batch inserts.""" |
|
|
|
|
|
error_log_path = db_path.parent / 'load_documents_errors.log' |
|
|
logging.basicConfig( |
|
|
filename=str(error_log_path), |
|
|
level=logging.ERROR, |
|
|
format='%(asctime)s - %(message)s', |
|
|
filemode='w' |
|
|
) |
|
|
|
|
|
print(f"Connecting to database: {db_path}") |
|
|
print(f"Error log: {error_log_path}") |
|
|
print(f"Batch size: {batch_size}") |
|
|
conn = duckdb.connect(str(db_path)) |
|
|
|
|
|
|
|
|
create_documents_table(conn) |
|
|
|
|
|
|
|
|
insert_sql = DOCUMENT_SCHEMA.get_insert_sql() |
|
|
|
|
|
|
|
|
print("\nScanning for document files...") |
|
|
doc_files = list(get_document_files(data_dir)) |
|
|
total_files = len(doc_files) |
|
|
print(f"Found {total_files} files to process") |
|
|
|
|
|
|
|
|
print("\nLoading document data...") |
|
|
loaded_count = 0 |
|
|
error_count = 0 |
|
|
processed_count = 0 |
|
|
|
|
|
|
|
|
document_batch = [] |
|
|
|
|
|
|
|
|
start_time = datetime.now() |
|
|
|
|
|
def flush_batch(): |
|
|
"""Helper to insert accumulated batch and commit.""" |
|
|
nonlocal loaded_count |
|
|
|
|
|
if not document_batch: |
|
|
return |
|
|
|
|
|
batch_size_to_commit = len(document_batch) |
|
|
print(f" Committing batch of {batch_size_to_commit} records...", end='', flush=True) |
|
|
|
|
|
try: |
|
|
conn.execute("BEGIN TRANSACTION") |
|
|
conn.executemany(insert_sql, document_batch) |
|
|
conn.execute("COMMIT") |
|
|
loaded_count += len(document_batch) |
|
|
print(f" done!") |
|
|
|
|
|
except Exception as e: |
|
|
conn.execute("ROLLBACK") |
|
|
logging.error(f"Batch insert failed: {e}") |
|
|
print(f"\n Warning: Batch insert failed, see error log") |
|
|
|
|
|
document_batch.clear() |
|
|
|
|
|
|
|
|
try: |
|
|
for doc_file in doc_files: |
|
|
try: |
|
|
|
|
|
metadata = parse_document_path(doc_file) |
|
|
|
|
|
|
|
|
with open(doc_file, 'r', encoding='utf-8') as f: |
|
|
content = f.read() |
|
|
|
|
|
|
|
|
doc_id = f"{metadata['document_type']}-{metadata['congress']}-{metadata['document_number']}" |
|
|
|
|
|
|
|
|
values = [ |
|
|
doc_id, |
|
|
metadata['document_type'], |
|
|
metadata['congress'], |
|
|
metadata['document_number'], |
|
|
str(doc_file), |
|
|
content |
|
|
] |
|
|
document_batch.append(values) |
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
error_count += 1 |
|
|
logging.error(f"{doc_file}: {e}") |
|
|
|
|
|
processed_count += 1 |
|
|
|
|
|
|
|
|
if len(document_batch) >= batch_size: |
|
|
flush_batch() |
|
|
|
|
|
|
|
|
if processed_count % progress_interval == 0 or processed_count == total_files: |
|
|
pct = (processed_count / total_files * 100) if total_files > 0 else 0 |
|
|
pending = len(document_batch) |
|
|
total_loaded = loaded_count + pending |
|
|
print(f" [{pct:5.1f}%] {processed_count}/{total_files} files | " |
|
|
f"{total_loaded} loaded ({loaded_count} committed, {pending} pending) | " |
|
|
f"{error_count} errors") |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
print("\n\n*** Interrupted by user (Ctrl+C) ***") |
|
|
print("Flushing any pending records to database...") |
|
|
flush_batch() |
|
|
|
|
|
|
|
|
end_time = datetime.now() |
|
|
total_seconds = (end_time - start_time).total_seconds() |
|
|
total_minutes = total_seconds / 60 |
|
|
|
|
|
print(f"\nPartial load completed:") |
|
|
print(f" Files processed: {processed_count}/{total_files}") |
|
|
print(f" Records loaded: {loaded_count}") |
|
|
print(f" Errors: {error_count}") |
|
|
print(f" Time elapsed: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)") |
|
|
conn.close() |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
flush_batch() |
|
|
|
|
|
|
|
|
end_time = datetime.now() |
|
|
total_seconds = (end_time - start_time).total_seconds() |
|
|
total_minutes = total_seconds / 60 |
|
|
|
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print(f"Load complete!") |
|
|
print(f" Total files processed: {processed_count}") |
|
|
print(f" Successfully loaded: {loaded_count}") |
|
|
print(f" Errors: {error_count}") |
|
|
print(f" Total time: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)") |
|
|
if error_count > 0: |
|
|
print(f" Error details logged to: {error_log_path}") |
|
|
|
|
|
|
|
|
documents_count = conn.execute("SELECT COUNT(*) as total FROM documents").fetchone() |
|
|
print(f" Total documents in database: {documents_count[0]}") |
|
|
print(f"{'='*60}") |
|
|
|
|
|
|
|
|
print("\nSample documents (first 5 rows):") |
|
|
sample = conn.execute(""" |
|
|
SELECT id, document_type, congress, document_number, LENGTH(content) as content_length |
|
|
FROM documents |
|
|
ORDER BY document_type, congress, document_number |
|
|
LIMIT 5 |
|
|
""").fetchall() |
|
|
|
|
|
for row in sample: |
|
|
print(f" {row[0]}: {row[1].upper()}-{row[3]} (Congress {row[2]}, {row[4]} chars)") |
|
|
|
|
|
|
|
|
print("\nDocument statistics by type:") |
|
|
stats = conn.execute(""" |
|
|
SELECT |
|
|
document_type, |
|
|
COUNT(*) as count, |
|
|
MIN(congress) as min_congress, |
|
|
MAX(congress) as max_congress, |
|
|
MIN(document_number) as min_doc_num, |
|
|
MAX(document_number) as max_doc_num |
|
|
FROM documents |
|
|
GROUP BY document_type |
|
|
ORDER BY document_type |
|
|
""").fetchall() |
|
|
|
|
|
for row in stats: |
|
|
print(f" {row[0].upper()}: {row[1]} documents (Congress {row[2]}-{row[3]}, " |
|
|
f"Doc# {row[4]}-{row[5]})") |
|
|
|
|
|
|
|
|
if export_parquet: |
|
|
print(f"\n{'='*60}") |
|
|
print("Exporting to Parquet format...") |
|
|
print(f" Output: {parquet_path}") |
|
|
|
|
|
try: |
|
|
conn.execute(f"COPY documents TO '{parquet_path}' (FORMAT PARQUET)") |
|
|
if parquet_path.exists(): |
|
|
file_size = parquet_path.stat().st_size |
|
|
file_size_mb = file_size / (1024 * 1024) |
|
|
print(f" Successfully exported documents.parquet ({file_size_mb:.2f} MB)") |
|
|
else: |
|
|
print(" Warning: Export completed but file not found") |
|
|
except Exception as e: |
|
|
print(f" Error exporting: {e}") |
|
|
|
|
|
print(f"{'='*60}") |
|
|
|
|
|
conn.close() |
|
|
print(f"\nDatabase saved to: {db_path}") |
|
|
|
|
|
|
|
|
def main(): |
|
|
"""Main entry point.""" |
|
|
parser = argparse.ArgumentParser( |
|
|
description='Load document data from text files into a DuckDB database', |
|
|
formatter_class=argparse.RawDescriptionHelpFormatter, |
|
|
epilog=""" |
|
|
Examples: |
|
|
# Use default database path (databases/data.duckdb) |
|
|
python scripts/load_documents_to_db.py |
|
|
|
|
|
# Specify custom database path |
|
|
python scripts/load_documents_to_db.py --db-path /path/to/custom.duckdb |
|
|
|
|
|
# Use a different data directory |
|
|
python scripts/load_documents_to_db.py --data-dir /path/to/document/data |
|
|
|
|
|
# Export to Parquet for Hugging Face dataset viewer |
|
|
python scripts/load_documents_to_db.py --export-parquet |
|
|
""" |
|
|
) |
|
|
parser.add_argument( |
|
|
'--db-path', |
|
|
type=Path, |
|
|
default=DATABASE_PATH, |
|
|
help=f'Path to the DuckDB database (default: {DATABASE_PATH})' |
|
|
) |
|
|
parser.add_argument( |
|
|
'--data-dir', |
|
|
type=Path, |
|
|
default=DOCUMENT_DATA_DIR, |
|
|
help=f'Path to the document data directory (default: {DOCUMENT_DATA_DIR})' |
|
|
) |
|
|
parser.add_argument( |
|
|
'--export-parquet', |
|
|
action='store_true', |
|
|
help='Export the documents table to Parquet format after loading' |
|
|
) |
|
|
parser.add_argument( |
|
|
'--parquet-path', |
|
|
type=Path, |
|
|
default=Path(__file__).parent.parent / 'databases' / 'documents.parquet', |
|
|
help='Path for the exported Parquet file (default: databases/documents.parquet)' |
|
|
) |
|
|
parser.add_argument( |
|
|
'--batch-size', |
|
|
type=int, |
|
|
default=1000, |
|
|
help='Number of records to insert per batch/transaction (default: 1000)' |
|
|
) |
|
|
parser.add_argument( |
|
|
'--progress-interval', |
|
|
type=int, |
|
|
default=100, |
|
|
help='Show progress every N files (default: 100)' |
|
|
) |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
if not args.data_dir.exists(): |
|
|
print(f"Error: Data directory not found: {args.data_dir}") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
args.db_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
load_documents_to_db( |
|
|
args.data_dir, |
|
|
args.db_path, |
|
|
args.export_parquet, |
|
|
args.parquet_path, |
|
|
args.batch_size, |
|
|
args.progress_interval |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
main() |
|
|
|