|
import os |
|
import json |
|
import argparse |
|
from datetime import datetime |
|
from datasets import Dataset |
|
from huggingface_hub import HfApi, upload_file |
|
import shutil |
|
import math |
|
|
|
def clean_jsonl_data(file_path): |
|
"""Clean and validate JSONL file data.""" |
|
cleaned_data = [] |
|
with open(file_path, "r", encoding="utf-8") as f: |
|
for line_number, line in enumerate(f, start=1): |
|
try: |
|
data = json.loads(line) |
|
|
|
|
|
if "timestamp" in data: |
|
if not data["timestamp"] or not isinstance(data["timestamp"], str): |
|
data["timestamp"] = None |
|
else: |
|
try: |
|
datetime_obj = datetime.fromisoformat( |
|
data["timestamp"].replace("Z", "+00:00") |
|
) |
|
data["timestamp"] = datetime_obj.isoformat() |
|
except ValueError: |
|
data["timestamp"] = None |
|
|
|
|
|
if "text" in data and not isinstance(data["text"], str): |
|
data["text"] = str(data["text"]) if data["text"] is not None else None |
|
|
|
|
|
if "url" in data and not isinstance(data["url"], str): |
|
data["url"] = str(data["url"]) if data["url"] is not None else None |
|
|
|
if "source" in data and not isinstance(data["source"], str): |
|
data["source"] = str(data["source"]) if data["source"] is not None else None |
|
|
|
cleaned_data.append(data) |
|
|
|
except json.JSONDecodeError as e: |
|
print(f"JSON decode error at line {line_number}: {e}") |
|
except Exception as e: |
|
print(f"Error processing line {line_number}: {e}") |
|
|
|
return cleaned_data |
|
|
|
def estimate_num_shards(file_path, target_shard_size_gb=1): |
|
"""Estimate the number of shards needed based on file size.""" |
|
file_size_gb = os.path.getsize(file_path) / (1024 ** 3) |
|
num_shards = max(1, math.ceil(file_size_gb / target_shard_size_gb)) |
|
return num_shards |
|
|
|
def split_jsonl_file(input_file, output_prefix, max_size_gb=45): |
|
"""Split large JSONL files into smaller shards.""" |
|
file_size_gb = os.path.getsize(input_file) / (1024 ** 3) |
|
if file_size_gb <= max_size_gb: |
|
return [input_file] |
|
|
|
|
|
with open(input_file, "r", encoding="utf-8") as f: |
|
lines = f.readlines() |
|
num_lines = len(lines) |
|
|
|
num_shards = math.ceil(file_size_gb / max_size_gb) |
|
lines_per_shard = math.ceil(num_lines / num_shards) |
|
|
|
shard_files = [] |
|
for i in range(num_shards): |
|
shard_file = f"{output_prefix}_part{i+1}.jsonl" |
|
with open(shard_file, "w", encoding="utf-8") as f: |
|
f.writelines(lines[i * lines_per_shard:(i + 1) * lines_per_shard]) |
|
shard_files.append(shard_file) |
|
|
|
return shard_files |
|
|
|
def upload_large_file(file_path, repo_id, path_in_repo, repo_type="dataset"): |
|
"""Upload large files with multi-part upload handling.""" |
|
file_size_mb = os.path.getsize(file_path) / (1024 ** 2) |
|
|
|
if file_size_mb > 5: |
|
upload_file( |
|
path_or_fileobj=file_path, |
|
path_in_repo=path_in_repo, |
|
repo_id=repo_id, |
|
repo_type=repo_type, |
|
use_auth_token=True, |
|
) |
|
print(f"Uploaded '{path_in_repo}' with multi-part upload.") |
|
else: |
|
|
|
with open(file_path, 'rb') as f: |
|
api = HfApi() |
|
api.upload_file( |
|
path_or_fileobj=f, |
|
path_in_repo=path_in_repo, |
|
repo_id=repo_id, |
|
repo_type=repo_type, |
|
use_auth_token=True, |
|
) |
|
print(f"Uploaded '{path_in_repo}' with direct upload.") |
|
|
|
def create_and_upload_dataset(language): |
|
|
|
org_name = "ScandLM" |
|
dataset_name = f"{language}_culturax" |
|
repo_id = f"{org_name}/{dataset_name}" |
|
jsonl_file = f"{language}_culturax.jsonl" |
|
temp_folder = f"temp_{language}" |
|
jsonl_folder = os.path.join(temp_folder, "jsonl") |
|
data_folder = os.path.join(temp_folder, "data") |
|
src_folder = os.path.join(temp_folder, "src") |
|
|
|
|
|
language_codes = {"danish": "da", "swedish": "sv", "norwegian": "no", "nynorsk": "nn"} |
|
language_code = language_codes.get(language, "unknown") |
|
|
|
|
|
yaml_tags = ( |
|
f"---\n" |
|
f"language: [{language_code}]\n" |
|
f"---\n\n" |
|
f"# {language.capitalize()} Culturax Dataset\n\n" |
|
f"This dataset is simply a reformatting of uonlp/CulturaX. " |
|
f"Some minor formatting errors have been corrected.\n\n" |
|
f"## Usage\n\n" |
|
f"```python\n" |
|
f"from datasets import load_dataset\n\n" |
|
f"dataset = load_dataset(\"ScandLM/{language}_culturax\")\n" |
|
f"```\n" |
|
) |
|
|
|
|
|
if not os.path.exists(jsonl_file): |
|
raise FileNotFoundError(f"The file '{jsonl_file}' was not found.") |
|
|
|
|
|
cleaned_data = clean_jsonl_data(jsonl_file) |
|
os.makedirs(jsonl_folder, exist_ok=True) |
|
cleaned_jsonl_file = os.path.join(jsonl_folder, f"cleaned_{jsonl_file}") |
|
with open(cleaned_jsonl_file, "w", encoding="utf-8") as f: |
|
for entry in cleaned_data: |
|
json.dump(entry, f) |
|
f.write("\n") |
|
|
|
|
|
jsonl_shards = split_jsonl_file(cleaned_jsonl_file, os.path.join(jsonl_folder, language), max_size_gb=45) |
|
|
|
|
|
dataset = Dataset.from_json(cleaned_jsonl_file) |
|
|
|
|
|
num_shards = estimate_num_shards(cleaned_jsonl_file, target_shard_size_gb=1) |
|
print(f"Number of Parquet shards: {num_shards}") |
|
|
|
os.makedirs(data_folder, exist_ok=True) |
|
parquet_files = [] |
|
for shard_id in range(num_shards): |
|
shard = dataset.shard(num_shards=num_shards, index=shard_id) |
|
parquet_file = os.path.join(data_folder, f"train-{shard_id:05d}-of-{num_shards:05d}.parquet") |
|
shard.to_parquet(parquet_file) |
|
parquet_files.append(parquet_file) |
|
print(f"Parquet file created: {parquet_file}") |
|
|
|
|
|
api = HfApi() |
|
|
|
|
|
api.create_repo(repo_id=repo_id, repo_type="dataset", private=False, exist_ok=True) |
|
print(f"Dataset repository '{repo_id}' created successfully.") |
|
|
|
|
|
for parquet_file in parquet_files: |
|
upload_large_file( |
|
file_path=parquet_file, |
|
repo_id=repo_id, |
|
path_in_repo=f"data/{os.path.basename(parquet_file)}", |
|
) |
|
|
|
|
|
for shard_file in jsonl_shards: |
|
upload_large_file( |
|
file_path=shard_file, |
|
repo_id=repo_id, |
|
path_in_repo=f"jsonl/{os.path.basename(shard_file)}", |
|
) |
|
|
|
|
|
readme_path = os.path.join(temp_folder, "README.md") |
|
with open(readme_path, "w", encoding="utf-8") as f: |
|
f.write(yaml_tags) |
|
|
|
upload_file( |
|
path_or_fileobj=readme_path, |
|
path_in_repo="README.md", |
|
repo_id=repo_id, |
|
repo_type="dataset", |
|
use_auth_token=True |
|
) |
|
print("README.md uploaded successfully.") |
|
|
|
|
|
os.makedirs(src_folder, exist_ok=True) |
|
for script in ["download_culturax.py", "upload_culturax.py"]: |
|
if os.path.exists(script): |
|
upload_large_file( |
|
file_path=script, |
|
repo_id=repo_id, |
|
path_in_repo=f"src/{script}", |
|
) |
|
|
|
|
|
if os.path.exists(readme_path): |
|
os.remove(readme_path) |
|
|
|
|
|
shutil.rmtree(temp_folder, ignore_errors=True) |
|
|
|
print("Dataset setup complete!") |
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="Upload a cultural dataset to Hugging Face.") |
|
parser.add_argument("language", type=str, help="The language for the dataset (e.g., danish, swedish, norwegian, nynorsk).") |
|
args = parser.parse_args() |
|
create_and_upload_dataset(args.language) |
|
|