|
import os |
|
import json |
|
import pandas as pd |
|
import re |
|
|
|
def find_json_files(directory): |
|
json_files = [] |
|
for root, dirs, files in os.walk(directory): |
|
for file in files: |
|
if file.endswith(".json"): |
|
json_files.append(os.path.join(root, file)) |
|
return json_files |
|
|
|
def clean_and_convert_data(json_data): |
|
if "text" in json_data: |
|
json_data["text"] = re.sub(r'[^\x00-\x7F]+', ' ', json_data["text"]) |
|
|
|
int_fields = ["minkilled", "mincaptured", "minleaderskilled", "minfacilitatorskilled", "minleaderscaptured", "minfacilitatorscaptured"] |
|
bool_fields = ["killq", "captureq", "killcaptureraid", "airstrike", "noshotsfired", "dataprocessed", "flagged", "glossarymeta", "leaderq"] |
|
|
|
for field in int_fields: |
|
if field in json_data: |
|
json_data[field] = int(json_data[field]) |
|
|
|
for field in bool_fields: |
|
if field in json_data: |
|
json_data[field] = json_data[field].lower() == "true" |
|
|
|
return json_data |
|
|
|
def load_json_to_dataframe(json_files): |
|
data = [] |
|
skipped_files = [] |
|
for file in json_files: |
|
try: |
|
with open(file, "r") as f: |
|
json_data = json.load(f) |
|
json_data = clean_and_convert_data(json_data) |
|
data.append(json_data) |
|
except json.JSONDecodeError as e: |
|
print(f"Skipping file {file} due to JSON decoding error: {str(e)}") |
|
skipped_files.append(file) |
|
return pd.DataFrame(data), skipped_files |
|
|
|
def main(): |
|
|
|
directory = "../original_json_data" |
|
|
|
|
|
json_files = find_json_files(directory) |
|
|
|
|
|
df, skipped_files = load_json_to_dataframe(json_files) |
|
|
|
|
|
output_file = "../exported_press_releases_2024.parquet" |
|
df.to_parquet(output_file) |
|
|
|
print(f"Successfully exported {len(json_files) - len(skipped_files)} JSON files to {output_file}") |
|
if skipped_files: |
|
print(f"Skipped {len(skipped_files)} files due to JSON decoding errors.") |
|
print("Skipped files:") |
|
for file in skipped_files: |
|
print(file) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|