Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Danish
Libraries:
Datasets
Dask
pere commited on
Commit
d90d2fe
1 Parent(s): e09e74c

Upload src/upload_culturax.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/upload_culturax.py +228 -0
src/upload_culturax.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import argparse
4
+ from datetime import datetime
5
+ from datasets import Dataset
6
+ from huggingface_hub import HfApi, upload_file
7
+ import shutil
8
+ import math
9
+
10
+ def clean_jsonl_data(file_path):
11
+ """Clean and validate JSONL file data."""
12
+ cleaned_data = []
13
+ with open(file_path, "r", encoding="utf-8") as f:
14
+ for line_number, line in enumerate(f, start=1):
15
+ try:
16
+ data = json.loads(line)
17
+
18
+ # Validate 'timestamp' field
19
+ if "timestamp" in data:
20
+ if not data["timestamp"] or not isinstance(data["timestamp"], str):
21
+ data["timestamp"] = None
22
+ else:
23
+ try:
24
+ datetime_obj = datetime.fromisoformat(
25
+ data["timestamp"].replace("Z", "+00:00")
26
+ )
27
+ data["timestamp"] = datetime_obj.isoformat()
28
+ except ValueError:
29
+ data["timestamp"] = None
30
+
31
+ # Ensure 'text' is a string
32
+ if "text" in data and not isinstance(data["text"], str):
33
+ data["text"] = str(data["text"]) if data["text"] is not None else None
34
+
35
+ # Validate 'url' and 'source'
36
+ if "url" in data and not isinstance(data["url"], str):
37
+ data["url"] = str(data["url"]) if data["url"] is not None else None
38
+
39
+ if "source" in data and not isinstance(data["source"], str):
40
+ data["source"] = str(data["source"]) if data["source"] is not None else None
41
+
42
+ cleaned_data.append(data)
43
+
44
+ except json.JSONDecodeError as e:
45
+ print(f"JSON decode error at line {line_number}: {e}")
46
+ except Exception as e:
47
+ print(f"Error processing line {line_number}: {e}")
48
+
49
+ return cleaned_data
50
+
51
+ def estimate_num_shards(file_path, target_shard_size_gb=1):
52
+ """Estimate the number of shards needed based on file size."""
53
+ file_size_gb = os.path.getsize(file_path) / (1024 ** 3) # Bytes to GB
54
+ num_shards = max(1, math.ceil(file_size_gb / target_shard_size_gb))
55
+ return num_shards
56
+
57
+ def split_jsonl_file(input_file, output_prefix, max_size_gb=45):
58
+ """Split large JSONL files into smaller shards."""
59
+ file_size_gb = os.path.getsize(input_file) / (1024 ** 3) # Convert bytes to GB
60
+ if file_size_gb <= max_size_gb:
61
+ return [input_file] # No need to split if below limit
62
+
63
+ # Calculate lines per shard
64
+ with open(input_file, "r", encoding="utf-8") as f:
65
+ lines = f.readlines()
66
+ num_lines = len(lines)
67
+
68
+ num_shards = math.ceil(file_size_gb / max_size_gb)
69
+ lines_per_shard = math.ceil(num_lines / num_shards)
70
+
71
+ shard_files = []
72
+ for i in range(num_shards):
73
+ shard_file = f"{output_prefix}_part{i+1}.jsonl"
74
+ with open(shard_file, "w", encoding="utf-8") as f:
75
+ f.writelines(lines[i * lines_per_shard:(i + 1) * lines_per_shard])
76
+ shard_files.append(shard_file)
77
+
78
+ return shard_files
79
+
80
+ def upload_large_file(file_path, repo_id, path_in_repo, repo_type="dataset"):
81
+ """Upload large files with multi-part upload handling."""
82
+ file_size_mb = os.path.getsize(file_path) / (1024 ** 2) # Convert bytes to MB
83
+ # Use multi-part upload for files > 5MB
84
+ if file_size_mb > 5:
85
+ upload_file(
86
+ path_or_fileobj=file_path,
87
+ path_in_repo=path_in_repo,
88
+ repo_id=repo_id,
89
+ repo_type=repo_type,
90
+ use_auth_token=True,
91
+ )
92
+ print(f"Uploaded '{path_in_repo}' with multi-part upload.")
93
+ else:
94
+ # Direct upload for smaller files
95
+ with open(file_path, 'rb') as f:
96
+ api = HfApi()
97
+ api.upload_file(
98
+ path_or_fileobj=f,
99
+ path_in_repo=path_in_repo,
100
+ repo_id=repo_id,
101
+ repo_type=repo_type,
102
+ use_auth_token=True,
103
+ )
104
+ print(f"Uploaded '{path_in_repo}' with direct upload.")
105
+
106
+ def create_and_upload_dataset(language):
107
+ # Define constants
108
+ org_name = "ScandLM"
109
+ dataset_name = f"{language}_culturax"
110
+ repo_id = f"{org_name}/{dataset_name}"
111
+ jsonl_file = f"{language}_culturax.jsonl"
112
+ temp_folder = f"temp_{language}"
113
+ jsonl_folder = os.path.join(temp_folder, "jsonl")
114
+ data_folder = os.path.join(temp_folder, "data")
115
+ src_folder = os.path.join(temp_folder, "src")
116
+
117
+ # Language codes
118
+ language_codes = {"danish": "da", "swedish": "sv", "norwegian": "no", "nynorsk": "nn"}
119
+ language_code = language_codes.get(language, "unknown")
120
+
121
+ # YAML front matter
122
+ yaml_tags = (
123
+ f"---\n"
124
+ f"language: [{language_code}]\n"
125
+ f"---\n\n"
126
+ f"# {language.capitalize()} Culturax Dataset\n\n"
127
+ f"This dataset is simply a reformatting of uonlp/CulturaX. "
128
+ f"Some minor formatting errors have been corrected.\n\n"
129
+ f"## Usage\n\n"
130
+ f"```python\n"
131
+ f"from datasets import load_dataset\n\n"
132
+ f"dataset = load_dataset(\"ScandLM/{language}_culturax\")\n"
133
+ f"```\n"
134
+ )
135
+
136
+ # Verify JSONL file
137
+ if not os.path.exists(jsonl_file):
138
+ raise FileNotFoundError(f"The file '{jsonl_file}' was not found.")
139
+
140
+ # Clean data and create a temporary JSONL file
141
+ cleaned_data = clean_jsonl_data(jsonl_file)
142
+ os.makedirs(jsonl_folder, exist_ok=True)
143
+ cleaned_jsonl_file = os.path.join(jsonl_folder, f"cleaned_{jsonl_file}")
144
+ with open(cleaned_jsonl_file, "w", encoding="utf-8") as f:
145
+ for entry in cleaned_data:
146
+ json.dump(entry, f)
147
+ f.write("\n")
148
+
149
+ # Split JSONL if too large
150
+ jsonl_shards = split_jsonl_file(cleaned_jsonl_file, os.path.join(jsonl_folder, language), max_size_gb=45)
151
+
152
+ # Load data into Dataset
153
+ dataset = Dataset.from_json(cleaned_jsonl_file)
154
+
155
+ # Estimate and create Parquet shards
156
+ num_shards = estimate_num_shards(cleaned_jsonl_file, target_shard_size_gb=1)
157
+ print(f"Number of Parquet shards: {num_shards}")
158
+
159
+ os.makedirs(data_folder, exist_ok=True)
160
+ parquet_files = []
161
+ for shard_id in range(num_shards):
162
+ shard = dataset.shard(num_shards=num_shards, index=shard_id)
163
+ parquet_file = os.path.join(data_folder, f"train-{shard_id:05d}-of-{num_shards:05d}.parquet")
164
+ shard.to_parquet(parquet_file)
165
+ parquet_files.append(parquet_file)
166
+ print(f"Parquet file created: {parquet_file}")
167
+
168
+ # Authenticate with Hugging Face
169
+ api = HfApi()
170
+
171
+ # Create dataset repo
172
+ api.create_repo(repo_id=repo_id, repo_type="dataset", private=False, exist_ok=True)
173
+ print(f"Dataset repository '{repo_id}' created successfully.")
174
+
175
+ # Upload Parquet files
176
+ for parquet_file in parquet_files:
177
+ upload_large_file(
178
+ file_path=parquet_file,
179
+ repo_id=repo_id,
180
+ path_in_repo=f"data/{os.path.basename(parquet_file)}",
181
+ )
182
+
183
+ # Upload JSONL shards
184
+ for shard_file in jsonl_shards:
185
+ upload_large_file(
186
+ file_path=shard_file,
187
+ repo_id=repo_id,
188
+ path_in_repo=f"jsonl/{os.path.basename(shard_file)}",
189
+ )
190
+
191
+ # Upload README
192
+ readme_path = os.path.join(temp_folder, "README.md")
193
+ with open(readme_path, "w", encoding="utf-8") as f:
194
+ f.write(yaml_tags)
195
+
196
+ upload_file(
197
+ path_or_fileobj=readme_path,
198
+ path_in_repo="README.md",
199
+ repo_id=repo_id,
200
+ repo_type="dataset",
201
+ use_auth_token=True
202
+ )
203
+ print("README.md uploaded successfully.")
204
+
205
+ # Upload scripts
206
+ os.makedirs(src_folder, exist_ok=True)
207
+ for script in ["download_culturax.py", "upload_culturax.py"]:
208
+ if os.path.exists(script):
209
+ upload_large_file(
210
+ file_path=script,
211
+ repo_id=repo_id,
212
+ path_in_repo=f"src/{script}",
213
+ )
214
+
215
+ # Clean up temporary files
216
+ if os.path.exists(readme_path):
217
+ os.remove(readme_path)
218
+
219
+ # Remove directories
220
+ shutil.rmtree(temp_folder, ignore_errors=True)
221
+
222
+ print("Dataset setup complete!")
223
+
224
+ if __name__ == "__main__":
225
+ parser = argparse.ArgumentParser(description="Upload a cultural dataset to Hugging Face.")
226
+ parser.add_argument("language", type=str, help="The language for the dataset (e.g., danish, swedish, norwegian, nynorsk).")
227
+ args = parser.parse_args()
228
+ create_and_upload_dataset(args.language)