awacke1 commited on
Commit
62dfb13
โ€ข
1 Parent(s): fdbd255

Upload 5 files

Browse files
NPPES_Data_Dissemination_CodeValues.pdf ADDED
Binary file (557 kB). View file
 
NPPES_Data_Dissemination_Readme.pdf ADDED
Binary file (570 kB). View file
 
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import csv
3
+ import os
4
+ import re
5
+
6
+ def delete_existing_state_csv_files():
7
+ """Delete all CSV files with 6-character names in the current directory."""
8
+ for file in os.listdir('.'):
9
+ if re.match(r'^[A-Z]{2}\.csv$', file, re.I):
10
+ os.remove(file)
11
+
12
+ def clean_column_name(column_name):
13
+ """Remove spaces and punctuation from column names, keeping only alphabets."""
14
+ return re.sub(r'[^a-zA-Z]', '', column_name)
15
+
16
+ def process_file(input_file_path):
17
+ # Delete existing state CSV files before processing
18
+ delete_existing_state_csv_files()
19
+
20
+ # Open the input file for reading
21
+ with open(input_file_path, mode='r', encoding='utf-8') as input_file:
22
+ reader = csv.DictReader(input_file)
23
+ # Prepare headers with cleaned column names and truncate to the first 107 fields
24
+ headers = [clean_column_name(header) for header in reader.fieldnames[:107]]
25
+
26
+ state_files = {} # Dictionary to keep track of open file handles for each state
27
+
28
+ for row in reader:
29
+ state = row['Provider Business Mailing Address State Name']
30
+
31
+ # Skip if state is not exactly two letters
32
+ if not re.match(r'^[A-Z]{2}$', state, re.I):
33
+ continue
34
+
35
+ # Generate the file name based on the state
36
+ file_name = f'{state}.csv'
37
+
38
+ # Check if we've already opened this state file
39
+ if state not in state_files:
40
+ # Open a new file for the state and write the header
41
+ state_file = open(file_name, mode='w', newline='', encoding='utf-8')
42
+ writer = csv.DictWriter(state_file, fieldnames=headers)
43
+ writer.writeheader()
44
+ state_files[state] = state_file
45
+ else:
46
+ # Get the writer for the already opened file
47
+ writer = csv.DictWriter(state_files[state], fieldnames=headers)
48
+
49
+ # Write the current row to the state's file, cleaning and truncating each row to the first 50 fields
50
+ cleaned_row = {clean_column_name(k): v for k, v in list(row.items())[:50]}
51
+ writer.writerow(cleaned_row)
52
+
53
+ # Close all open state files
54
+ for file in state_files.values():
55
+ file.close()
56
+
57
+ if __name__ == "__main__":
58
+ input_file_path = 'npidata_pfile_20050523-20240107.csv' # Replace with the path to your large file
59
+ process_file(input_file_path)
nucc_taxonomy_240.csv ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
File without changes