nynorsk_culturax / src /upload_culturax.py
pere's picture
Upload src/upload_culturax.py with huggingface_hub
178dcfa verified
import os
import json
import argparse
from datetime import datetime
from datasets import Dataset
from huggingface_hub import HfApi, upload_file
import shutil
import math
def clean_jsonl_data(file_path):
"""Clean and validate JSONL file data."""
cleaned_data = []
with open(file_path, "r", encoding="utf-8") as f:
for line_number, line in enumerate(f, start=1):
try:
data = json.loads(line)
# Validate 'timestamp' field
if "timestamp" in data:
if not data["timestamp"] or not isinstance(data["timestamp"], str):
data["timestamp"] = None
else:
try:
datetime_obj = datetime.fromisoformat(
data["timestamp"].replace("Z", "+00:00")
)
data["timestamp"] = datetime_obj.isoformat()
except ValueError:
data["timestamp"] = None
# Ensure 'text' is a string
if "text" in data and not isinstance(data["text"], str):
data["text"] = str(data["text"]) if data["text"] is not None else None
# Validate 'url' and 'source'
if "url" in data and not isinstance(data["url"], str):
data["url"] = str(data["url"]) if data["url"] is not None else None
if "source" in data and not isinstance(data["source"], str):
data["source"] = str(data["source"]) if data["source"] is not None else None
cleaned_data.append(data)
except json.JSONDecodeError as e:
print(f"JSON decode error at line {line_number}: {e}")
except Exception as e:
print(f"Error processing line {line_number}: {e}")
return cleaned_data
def estimate_num_shards(file_path, target_shard_size_gb=1):
"""Estimate the number of shards needed based on file size."""
file_size_gb = os.path.getsize(file_path) / (1024 ** 3) # Bytes to GB
num_shards = max(1, math.ceil(file_size_gb / target_shard_size_gb))
return num_shards
def split_jsonl_file(input_file, output_prefix, max_size_gb=45):
"""Split large JSONL files into smaller shards."""
file_size_gb = os.path.getsize(input_file) / (1024 ** 3) # Convert bytes to GB
if file_size_gb <= max_size_gb:
return [input_file] # No need to split if below limit
# Calculate lines per shard
with open(input_file, "r", encoding="utf-8") as f:
lines = f.readlines()
num_lines = len(lines)
num_shards = math.ceil(file_size_gb / max_size_gb)
lines_per_shard = math.ceil(num_lines / num_shards)
shard_files = []
for i in range(num_shards):
shard_file = f"{output_prefix}_part{i+1}.jsonl"
with open(shard_file, "w", encoding="utf-8") as f:
f.writelines(lines[i * lines_per_shard:(i + 1) * lines_per_shard])
shard_files.append(shard_file)
return shard_files
def upload_large_file(file_path, repo_id, path_in_repo, repo_type="dataset"):
"""Upload large files with multi-part upload handling."""
file_size_mb = os.path.getsize(file_path) / (1024 ** 2) # Convert bytes to MB
# Use multi-part upload for files > 5MB
if file_size_mb > 5:
upload_file(
path_or_fileobj=file_path,
path_in_repo=path_in_repo,
repo_id=repo_id,
repo_type=repo_type,
use_auth_token=True,
)
print(f"Uploaded '{path_in_repo}' with multi-part upload.")
else:
# Direct upload for smaller files
with open(file_path, 'rb') as f:
api = HfApi()
api.upload_file(
path_or_fileobj=f,
path_in_repo=path_in_repo,
repo_id=repo_id,
repo_type=repo_type,
use_auth_token=True,
)
print(f"Uploaded '{path_in_repo}' with direct upload.")
def create_and_upload_dataset(language):
# Define constants
org_name = "ScandLM"
dataset_name = f"{language}_culturax"
repo_id = f"{org_name}/{dataset_name}"
jsonl_file = f"{language}_culturax.jsonl"
temp_folder = f"temp_{language}"
jsonl_folder = os.path.join(temp_folder, "jsonl")
data_folder = os.path.join(temp_folder, "data")
src_folder = os.path.join(temp_folder, "src")
# Language codes
language_codes = {"danish": "da", "swedish": "sv", "norwegian": "no", "nynorsk": "nn"}
language_code = language_codes.get(language, "unknown")
# YAML front matter
yaml_tags = (
f"---\n"
f"language: [{language_code}]\n"
f"---\n\n"
f"# {language.capitalize()} Culturax Dataset\n\n"
f"This dataset is simply a reformatting of uonlp/CulturaX. "
f"Some minor formatting errors have been corrected.\n\n"
f"## Usage\n\n"
f"```python\n"
f"from datasets import load_dataset\n\n"
f"dataset = load_dataset(\"ScandLM/{language}_culturax\")\n"
f"```\n"
)
# Verify JSONL file
if not os.path.exists(jsonl_file):
raise FileNotFoundError(f"The file '{jsonl_file}' was not found.")
# Clean data and create a temporary JSONL file
cleaned_data = clean_jsonl_data(jsonl_file)
os.makedirs(jsonl_folder, exist_ok=True)
cleaned_jsonl_file = os.path.join(jsonl_folder, f"cleaned_{jsonl_file}")
with open(cleaned_jsonl_file, "w", encoding="utf-8") as f:
for entry in cleaned_data:
json.dump(entry, f)
f.write("\n")
# Split JSONL if too large
jsonl_shards = split_jsonl_file(cleaned_jsonl_file, os.path.join(jsonl_folder, language), max_size_gb=45)
# Load data into Dataset
dataset = Dataset.from_json(cleaned_jsonl_file)
# Estimate and create Parquet shards
num_shards = estimate_num_shards(cleaned_jsonl_file, target_shard_size_gb=1)
print(f"Number of Parquet shards: {num_shards}")
os.makedirs(data_folder, exist_ok=True)
parquet_files = []
for shard_id in range(num_shards):
shard = dataset.shard(num_shards=num_shards, index=shard_id)
parquet_file = os.path.join(data_folder, f"train-{shard_id:05d}-of-{num_shards:05d}.parquet")
shard.to_parquet(parquet_file)
parquet_files.append(parquet_file)
print(f"Parquet file created: {parquet_file}")
# Authenticate with Hugging Face
api = HfApi()
# Create dataset repo
api.create_repo(repo_id=repo_id, repo_type="dataset", private=False, exist_ok=True)
print(f"Dataset repository '{repo_id}' created successfully.")
# Upload Parquet files
for parquet_file in parquet_files:
upload_large_file(
file_path=parquet_file,
repo_id=repo_id,
path_in_repo=f"data/{os.path.basename(parquet_file)}",
)
# Upload JSONL shards
for shard_file in jsonl_shards:
upload_large_file(
file_path=shard_file,
repo_id=repo_id,
path_in_repo=f"jsonl/{os.path.basename(shard_file)}",
)
# Upload README
readme_path = os.path.join(temp_folder, "README.md")
with open(readme_path, "w", encoding="utf-8") as f:
f.write(yaml_tags)
upload_file(
path_or_fileobj=readme_path,
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
use_auth_token=True
)
print("README.md uploaded successfully.")
# Upload scripts
os.makedirs(src_folder, exist_ok=True)
for script in ["download_culturax.py", "upload_culturax.py"]:
if os.path.exists(script):
upload_large_file(
file_path=script,
repo_id=repo_id,
path_in_repo=f"src/{script}",
)
# Clean up temporary files
if os.path.exists(readme_path):
os.remove(readme_path)
# Remove directories
shutil.rmtree(temp_folder, ignore_errors=True)
print("Dataset setup complete!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Upload a cultural dataset to Hugging Face.")
parser.add_argument("language", type=str, help="The language for the dataset (e.g., danish, swedish, norwegian, nynorsk).")
args = parser.parse_args()
create_and_upload_dataset(args.language)