| import asyncio |
| import glob |
| import json |
| import logging |
| import multiprocessing |
| import os |
| import xml.etree.ElementTree as ET |
| from datetime import datetime |
| from typing import List, Optional, Set |
|
|
| import pandas as pd |
| from datasets import Dataset |
| from dotenv import load_dotenv |
| from monumenten import MonumentenClient |
|
|
| load_dotenv() |
|
|
| |
| logging.basicConfig(level=logging.INFO) |
| logger = logging.getLogger(__name__) |
|
|
| |
| XML_DIRECTORY = "vbo_xmls/" |
| INTERMEDIATE_CSV_PATH = "verblijfsobjecten_ids.csv" |
| FINAL_CSV_PATH = "monumenten.csv" |
|
|
| HF_REPO_ID = "woonstadrotterdam/monumenten" |
| HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
|
|
| def is_valid_identificatie(id_value: str) -> bool: |
| """ |
| Validate if the ID is a proper verblijfsobject ID. |
| Valid IDs must be 16 characters long, consist of digits, |
| and have '01' at positions 4-5 (0-indexed). |
| Example: 'xxxx01xxxxxxxxxx' where x is a digit (e.g., '0304010000269586'). |
| """ |
| if id_value is None: |
| return False |
| return len(id_value) == 16 and id_value.isdigit() and id_value[4:6] == "01" |
|
|
|
|
| def extract_identificaties(xml_path: str) -> List[str]: |
| """ |
| Extract all valid identificatie values from a single XML file using iterative parsing. |
| """ |
| identificaties = [] |
| try: |
| context = ET.iterparse(xml_path, events=("end",)) |
| for event, elem in context: |
| if elem.tag.endswith("identificatie"): |
| id_value = elem.text |
| if is_valid_identificatie(id_value): |
| identificaties.append(id_value) |
| elem.clear() |
|
|
| if identificaties: |
| logger.debug( |
| f"Found {len(identificaties)} valid identificatie values in {xml_path}" |
| ) |
| return identificaties |
| except Exception as e: |
| logger.error(f"Error parsing XML file {xml_path}: {e}") |
| return [] |
|
|
|
|
| def get_xml_files() -> List[str]: |
| """ |
| Get list of XML files from the specified directory. |
| """ |
| xml_files = glob.glob(os.path.join(XML_DIRECTORY, "*.xml")) |
| if not xml_files: |
| logger.error(f"No XML files found in {XML_DIRECTORY}") |
| else: |
| logger.info(f"Found {len(xml_files)} XML files in {XML_DIRECTORY}") |
| return xml_files |
|
|
|
|
| def process_files_parallel(xml_files: List[str]) -> Set[str]: |
| """ |
| Process XML files in parallel using multiprocessing. |
| Returns a set of unique identificaties. |
| """ |
| unique_identificaties = set() |
|
|
| logger.info(f"Starting parallel processing of {len(xml_files)} XML files...") |
| with multiprocessing.Pool() as pool: |
| results = pool.imap_unordered(extract_identificaties, xml_files) |
| for i, file_identificaties in enumerate(results): |
| unique_identificaties.update(file_identificaties) |
| if (i + 1) % 100 == 0: |
| logger.info( |
| f"Processed {i + 1}/{len(xml_files)} files. " |
| f"Current unique identificaties: {len(unique_identificaties)}" |
| ) |
|
|
| logger.info( |
| f"All files processed. Total unique identificaties found: {len(unique_identificaties)}" |
| ) |
| return unique_identificaties |
|
|
|
|
| def create_identificaties_dataframe(unique_ids: Set[str]) -> Optional[pd.DataFrame]: |
| """ |
| Create and save DataFrame from unique identificaties. |
| Returns the DataFrame or None if no valid identificaties found. |
| """ |
| if not unique_ids: |
| logger.info("No valid identificaties found.") |
| return None |
|
|
| df = pd.DataFrame(list(unique_ids), columns=["bag_verblijfsobject_id"]) |
| logger.info(f"Created DataFrame with {len(df)} unique valid identificaties.") |
|
|
| |
| df.to_csv(INTERMEDIATE_CSV_PATH, index=False) |
| logger.info(f"Saved DataFrame to {INTERMEDIATE_CSV_PATH}") |
|
|
| |
| print("\nFirst few rows of the extracted identificaties DataFrame:") |
| print(df.head()) |
| print("\nIdentificaties DataFrame Info:") |
| df.info() |
|
|
| return df |
|
|
|
|
| async def process_with_monumenten_client(df: pd.DataFrame) -> Optional[pd.DataFrame]: |
| """ |
| Process the DataFrame using MonumentenClient. |
| Returns processed DataFrame or None if processing fails. |
| """ |
| if df.empty: |
| logger.warning("Empty DataFrame provided to MonumentenClient.") |
| return None |
|
|
| logger.info(f"Processing {len(df)} identificaties with MonumentenClient...") |
| try: |
| async with MonumentenClient() as client: |
| result_df = await client.process_from_df( |
| df=df, verblijfsobject_id_col="bag_verblijfsobject_id" |
| ) |
| logger.info("Finished processing with MonumentenClient.") |
| return result_df |
| except Exception as e: |
| logger.error(f"Error processing with MonumentenClient: {e}") |
| return None |
|
|
|
|
| def save_final_results(result_df: Optional[pd.DataFrame]) -> None: |
| """ |
| Save the final results to CSV if valid data is present. |
| """ |
| if result_df is not None and not result_df.empty: |
| result_df.to_csv(FINAL_CSV_PATH, index=False) |
| logger.info(f"Successfully saved final monumenten data to {FINAL_CSV_PATH}") |
| print(f"\nFinal data saved to {FINAL_CSV_PATH}") |
| print(result_df.head()) |
| |
| if push_to_huggingface(result_df): |
| print(f"\nData successfully pushed to Hugging Face dataset: {HF_REPO_ID}") |
| else: |
| print("\nFailed to push data to Hugging Face. Check logs for details.") |
| elif result_df is not None and result_df.empty: |
| logger.info("Processing resulted in an empty DataFrame. Nothing to save.") |
| print("\nProcessing resulted in an empty DataFrame.") |
| else: |
| logger.warning("No valid data to save. Process did not complete successfully.") |
| print("\nProcess did not complete successfully or returned no data.") |
|
|
|
|
| def push_to_huggingface(result_df: pd.DataFrame) -> bool: |
| """ |
| Push the final results to Hugging Face datasets hub using datasets.push_to_hub |
| with a custom split name. |
| Returns True if successful, False otherwise. |
| """ |
| if not HF_TOKEN: |
| logger.error("No Hugging Face token found in environment variables (HF_TOKEN)") |
| return False |
|
|
| if result_df.empty: |
| logger.warning( |
| "Result DataFrame is empty. Skipping push of main dataset to Hugging Face." |
| ) |
| else: |
| logger.info( |
| f"Converting DataFrame with {len(result_df)} rows to Hugging Face Dataset." |
| ) |
|
|
| hf_dataset_single = Dataset.from_pandas(result_df) |
|
|
| hf_dataset_single.push_to_hub( |
| repo_id=HF_REPO_ID, |
| commit_message=f"Update monumenten dataset", |
| token=HF_TOKEN, |
| ) |
| logger.info(f"Successfully pushed dataset dictionary to {HF_REPO_ID}") |
|
|
|
|
| async def main() -> Optional[pd.DataFrame]: |
| """ |
| Main function orchestrating the entire process. |
| Returns the final processed DataFrame or None if processing fails. |
| """ |
| |
| xml_files = get_xml_files() |
| if not xml_files: |
| return None |
|
|
| |
| unique_identificaties = process_files_parallel(xml_files) |
|
|
| |
| df = create_identificaties_dataframe(unique_identificaties) |
| if df is None: |
| return None |
|
|
| |
| result_df = await process_with_monumenten_client(df) |
| return result_df |
|
|
|
|
| if __name__ == "__main__": |
| |
| result_dataframe = asyncio.run(main()) |
|
|
| |
| save_final_results(result_dataframe) |
|
|