import streamlit as st import csv import os import re def delete_existing_state_csv_files(): """Delete all CSV files with 6-character names in the current directory.""" for file in os.listdir('.'): if re.match(r'^[A-Z]{2}\.csv$', file, re.I): os.remove(file) def clean_column_name(column_name): """Remove spaces and punctuation from column names, keeping only alphabets.""" return re.sub(r'[^a-zA-Z]', '', column_name) def process_file(input_file_path): # Delete existing state CSV files before processing delete_existing_state_csv_files() # Open the input file for reading with open(input_file_path, mode='r', encoding='utf-8') as input_file: reader = csv.DictReader(input_file) # Prepare headers with cleaned column names and truncate to the first 107 fields headers = [clean_column_name(header) for header in reader.fieldnames[:107]] state_files = {} # Dictionary to keep track of open file handles for each state for row in reader: state = row['Provider Business Mailing Address State Name'] # Skip if state is not exactly two letters if not re.match(r'^[A-Z]{2}$', state, re.I): continue # Generate the file name based on the state file_name = f'{state}.csv' # Check if we've already opened this state file if state not in state_files: # Open a new file for the state and write the header state_file = open(file_name, mode='w', newline='', encoding='utf-8') writer = csv.DictWriter(state_file, fieldnames=headers) writer.writeheader() state_files[state] = state_file else: # Get the writer for the already opened file writer = csv.DictWriter(state_files[state], fieldnames=headers) # Write the current row to the state's file, cleaning and truncating each row to the first 50 fields cleaned_row = {clean_column_name(k): v for k, v in list(row.items())[:50]} writer.writerow(cleaned_row) # Close all open state files for file in state_files.values(): file.close() if __name__ == "__main__": input_file_path = 'npidata_pfile_20050523-20240107.csv' # Replace with the path to your large file process_file(input_file_path)