buttercutter commited on
Commit
48344db
·
verified ·
1 Parent(s): 98e9132

Upload 2 files

Browse files
Files changed (2) hide show
  1. model_metadata.parquet +3 -0
  2. postprocess_jsonl_latest.py +1626 -0
model_metadata.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0853f8b1a3286dbc3c4ca3da2f5a66c7922003a9f5b1d72036857de3c47188c6
3
+ size 628246962
postprocess_jsonl_latest.py ADDED
@@ -0,0 +1,1626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import logging
4
+ import time
5
+ import traceback
6
+ from pathlib import Path
7
+ import shutil
8
+ import psutil
9
+ import glob
10
+ import gc
11
+ from datetime import datetime
12
+ from tqdm.auto import tqdm
13
+ from typing import Optional, Union, Set, Dict, List, Tuple
14
+
15
+ # Hugging Face related
16
+ from huggingface_hub import list_models, hf_hub_download, HfApi
17
+ from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, HFValidationError
18
+
19
+ # Data handling and Parquet
20
+ import pandas as pd
21
+ import pyarrow as pa
22
+ import pyarrow.parquet as pq
23
+
24
+ # Embeddings
25
+ from sentence_transformers import SentenceTransformer
26
+ import torch
27
+
28
+ # --- IPFS CID Generation Code (from provided ipfs_multiformats.py) ---
29
+ import hashlib
30
+ from multiformats import CID, multihash
31
+ import tempfile
32
+ import sys
33
+
34
+ class ipfs_multiformats_py:
35
+ def __init__(self, resources=None, metadata=None):
36
+ self.multihash = multihash
37
+ # Added error handling for multihash version/import
38
+ if not hasattr(self.multihash, 'wrap') or not hasattr(self.multihash, 'decode'):
39
+ logging.warning("Multihash library structure might have changed. CID generation may fail.")
40
+ return None
41
+
42
+ def get_file_sha256(self, file_path):
43
+ hasher = hashlib.sha256()
44
+ try:
45
+ with open(file_path, 'rb') as f:
46
+ while chunk := f.read(8192):
47
+ hasher.update(chunk)
48
+ return hasher.digest()
49
+ except Exception as e:
50
+ logging.error(f"Error hashing file {file_path}: {e}")
51
+ return None
52
+
53
+ # Takes bytes input directly
54
+ def get_bytes_sha256(self, data_bytes: bytes):
55
+ hasher = hashlib.sha256()
56
+ hasher.update(data_bytes)
57
+ return hasher.digest()
58
+
59
+ def get_multihash_sha256(self, content_hash):
60
+ if content_hash is None:
61
+ return None
62
+ try:
63
+ # Try using multihash.digest instead of wrap
64
+ mh = self.multihash.digest(content_hash, 'sha2-256')
65
+ return mh
66
+ except Exception as e:
67
+ logging.error(f"Error creating multihash: {e}")
68
+ return None
69
+
70
+ def get_multihash_sha256_old(self, content_hash):
71
+ if content_hash is None:
72
+ return None
73
+ try:
74
+ # Use 'sha2-256' which corresponds to code 0x12
75
+ #mh = self.multihash.wrap(code='sha2-256', digest=content_hash)
76
+ mh = self.multihash.wrap('sha2-256', content_hash)
77
+ return mh
78
+ except Exception as e:
79
+ logging.error(f"Error wrapping hash in multihash: {e}")
80
+ return None
81
+
82
+ def get_cid_old(self, data):
83
+ """Generates CID v1 base32 for bytes data or file path."""
84
+ mh = None
85
+ try:
86
+ if isinstance(data, (str, Path)) and os.path.isfile(data):
87
+ # logging.debug(f"Calculating CID for file: {data}")
88
+ file_content_hash = self.get_file_sha256(data)
89
+ mh = self.get_multihash_sha256(file_content_hash)
90
+ elif isinstance(data, bytes):
91
+ # logging.debug(f"Calculating CID for bytes (length: {len(data)})")
92
+ bytes_hash = self.get_bytes_sha256(data)
93
+ mh = self.get_multihash_sha256(bytes_hash)
94
+ elif isinstance(data, str):
95
+ # logging.debug(f"Calculating CID for string (length: {len(data)})")
96
+ # Treat string as UTF-8 bytes
97
+ bytes_hash = self.get_bytes_sha256(data.encode('utf-8'))
98
+ mh = self.get_multihash_sha256(bytes_hash)
99
+ else:
100
+ logging.warning(f"Unsupported data type for CID generation: {type(data)}. Skipping CID.")
101
+ return None
102
+
103
+ if mh:
104
+ # CIDv1, base32, raw codec (0x55)
105
+ cid = CID(base='base32', version=1, codec='raw', multihash=mh)
106
+ return str(cid)
107
+ else:
108
+ return None
109
+ except Exception as e:
110
+ logging.error(f"Error generating CID: {e}", exc_info=False)
111
+ return None
112
+
113
+ def get_cid(self, data):
114
+ """Generates CID v1 base32 for bytes data or file path."""
115
+ try:
116
+ # Get the hash first
117
+ content_hash = None
118
+ if isinstance(data, (str, Path)) and os.path.isfile(data):
119
+ content_hash = self.get_file_sha256(data)
120
+ elif isinstance(data, bytes):
121
+ content_hash = self.get_bytes_sha256(data)
122
+ elif isinstance(data, str):
123
+ content_hash = self.get_bytes_sha256(data.encode('utf-8'))
124
+ else:
125
+ logging.warning(f"Unsupported data type for CID generation: {type(data)}. Skipping CID.")
126
+ return None
127
+
128
+ if not content_hash:
129
+ return None
130
+
131
+ # Try the new CID API format
132
+ try:
133
+ # Version 1 of multiformats may use from_digest or other method instead of passing multihash directly
134
+ from multiformats import multihash
135
+ digest = multihash.digest(content_hash, 'sha2-256')
136
+ cid = CID.from_digest(digest, 'raw') # Try this format first
137
+ return str(cid)
138
+ except (AttributeError, TypeError):
139
+ try:
140
+ # Try alternate creation method
141
+ mh = self.get_multihash_sha256(content_hash)
142
+ cid = CID(version=1, codec='raw', hash=mh) # Try with hash parameter
143
+ return str(cid)
144
+ except:
145
+ # Fallback to simple base64 encoding if CID creation fails
146
+ import base64
147
+ b64_hash = base64.b64encode(content_hash).decode('ascii')
148
+ return f"sha256:{b64_hash}"
149
+
150
+ except Exception as e:
151
+ logging.error(f"Error generating CID: {e}", exc_info=False)
152
+ # Fallback to a simple hash representation
153
+ try:
154
+ if isinstance(data, (str, Path)) and os.path.isfile(data):
155
+ content_hash = self.get_file_sha256(data)
156
+ elif isinstance(data, bytes):
157
+ content_hash = self.get_bytes_sha256(data)
158
+ elif isinstance(data, str):
159
+ content_hash = self.get_bytes_sha256(data.encode('utf-8'))
160
+ else:
161
+ return None
162
+
163
+ import base64
164
+ return f"sha256:{base64.b64encode(content_hash).decode('ascii')}"
165
+ except:
166
+ return None
167
+ # --- End IPFS CID Code ---
168
+
169
+
170
+ # --- Configuration ---
171
+ # --- Paths ---
172
+ GDRIVE_MOUNT_POINT = "/content/drive/MyDrive"
173
+ GDRIVE_FOLDER_NAME = "hf_metadata_dataset_collection"
174
+ LOCAL_FOLDER_NAME = "./hf_metadata_dataset_local_fallback"
175
+ LOCAL_WORK_DIR = Path(os.path.abspath("./hf_embedding_work"))
176
+
177
+ # Input JSONL File
178
+ INPUT_JSONL_FILENAME = "all_models_metadata.jsonl" # Assumed in final dir
179
+
180
+ # --- Output File Names ---
181
+ # Final Destination (Drive/Local Fallback)
182
+ FINAL_METADATA_PARQUET_FILENAME = "model_metadata.parquet" # Metadata + CIDs
183
+ FINAL_EMBEDDINGS_PARQUET_FILENAME = "model_embeddings.parquet" # CIDs + Embeddings
184
+ FINAL_LOG_FILENAME = "embedding_generator.log"
185
+
186
+ # Local Temporary Files (in LOCAL_WORK_DIR)
187
+ LOCAL_TEMP_METADATA_JSONL = "temp_model_metadata.jsonl"
188
+ LOCAL_TEMP_EMBEDDINGS_JSONL = "temp_model_embeddings.jsonl"
189
+ LOCAL_TEMP_LOG_FILENAME = "temp_embedding_generator.log"
190
+
191
+ # --- Batch Configuration ---
192
+ BATCH_SAVE_THRESHOLD = 1000 # Save after processing this many records
193
+ BATCH_SAVE_DIR_NAME = "batch_files" # Subdirectory for batch files
194
+ PERIODIC_MERGE_FREQUENCY = 5 # Merge to Google Drive every X batches (0 to disable)
195
+ CLEAN_AFTER_PERIODIC_MERGE = True # Whether to clean up batch files after periodic merge
196
+
197
+ # --- Memory Management Configuration ---
198
+ MEMORY_CLEANUP_THRESHOLD_MB = 1000 # Force extra cleanup if memory growth exceeds this
199
+
200
+ # --- Processing Config ---
201
+ MAX_RECORDS_TO_PROCESS = None # Limit records from JSONL (for testing), None for all
202
+ BATCH_SIZE = 1024
203
+ EMBEDDING_MODEL_NAME = 'all-MiniLM-L6-v2'
204
+ # Control what gets embedded and CID generated
205
+ PROCESS_CONFIG_JSON = True
206
+ PROCESS_README_CONTENT = True
207
+
208
+ # --- Hub Upload Config ---
209
+ UPLOAD_TO_HUB = True
210
+ TARGET_REPO_ID = "YourUsername/your-dataset-repo-name" # CHANGE THIS
211
+ TARGET_REPO_TYPE = "dataset"
212
+ METADATA_FILENAME_IN_REPO = "model_metadata.parquet"
213
+ EMBEDDINGS_FILENAME_IN_REPO = "model_embeddings.parquet"
214
+
215
+ # --- Setup Logging ---
216
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
217
+
218
+
219
+ # --- Helper Functions ---
220
+ def make_serializable(obj):
221
+ """Converts common non-serializable types found in ModelInfo."""
222
+ if hasattr(obj, 'isoformat'): return obj.isoformat()
223
+ if hasattr(obj, 'rfilename'): return obj.rfilename
224
+ try: return str(obj)
225
+ except Exception: return None
226
+
227
+ def safe_serialize_dict(data_dict):
228
+ """Attempts to serialize a dictionary, handling non-serializable items."""
229
+ # This function might not be needed if we read directly from JSONL,
230
+ # but keep it for potential future use or if handling raw ModelInfo objects.
231
+ serializable_dict = {}
232
+ if not isinstance(data_dict, dict): logging.warning(f"safe_serialize_dict non-dict input: {type(data_dict)}"); return {}
233
+ for key, value in data_dict.items():
234
+ if isinstance(value, (list, tuple)): serializable_dict[key] = [make_serializable(item) for item in value]
235
+ elif isinstance(value, dict): serializable_dict[key] = safe_serialize_dict(value)
236
+ elif isinstance(value, (str, int, float, bool, type(None))): serializable_dict[key] = value
237
+ else: serializable_dict[key] = make_serializable(value)
238
+ return {k: v for k, v in serializable_dict.items() if v is not None or (k in data_dict and data_dict[k] is None)}
239
+
240
+ # --- NEW: Generate Record CID Function ---
241
+ def generate_record_cid(cid_generator, model_id: str, config_cid: Optional[str] = None, readme_cid: Optional[str] = None) -> str:
242
+ """
243
+ Generate a primary record CID from model_id and available content CIDs.
244
+ This will be used as the primary key for both Parquet files.
245
+ """
246
+ # Create a base string that combines all available IDs
247
+ cid_parts = [f"model:{model_id}"]
248
+ if config_cid:
249
+ cid_parts.append(f"config:{config_cid}")
250
+ if readme_cid:
251
+ cid_parts.append(f"readme:{readme_cid}")
252
+
253
+ # Join all parts and generate a CID from the combined string
254
+ combined_string = "|".join(cid_parts)
255
+ return cid_generator.get_cid(combined_string)
256
+
257
+ # --- Safe Parquet Saving Function ---
258
+ def save_dataframe_to_parquet_safely(df, filepath):
259
+ """Saves DataFrame to Parquet with explicit schema handling for mixed types."""
260
+ try:
261
+ # First attempt: Convert known problematic columns to string
262
+ df_safe = df.copy()
263
+
264
+ # Handle the 'gated' column specifically which caused the original error
265
+ if 'gated' in df_safe.columns:
266
+ df_safe['gated'] = df_safe['gated'].astype(str)
267
+
268
+ # Convert all object columns except model_id and record_cid to string to be safe
269
+ for col in df_safe.select_dtypes(include=['object']).columns:
270
+ if col not in ['model_id', 'record_cid', 'config_cid', 'readme_cid']: # Keep IDs as is
271
+ df_safe[col] = df_safe[col].astype(str)
272
+
273
+ # Try saving with pandas
274
+ df_safe.to_parquet(filepath, index=False, compression='gzip')
275
+ return True
276
+
277
+ except Exception as e:
278
+ logging.warning(f"First attempt to save Parquet failed: {e}")
279
+
280
+ try:
281
+ # Second attempt: Use PyArrow with explicit schema
282
+ schema = pa.Schema.from_pandas(df)
283
+ fields = list(schema)
284
+
285
+ # Convert all string/binary fields to string type except IDs
286
+ for i, field in enumerate(fields):
287
+ if (pa.types.is_string(field.type) or pa.types.is_binary(field.type)) and \
288
+ field.name not in ['model_id', 'record_cid', 'config_cid', 'readme_cid']:
289
+ fields[i] = pa.field(field.name, pa.string())
290
+
291
+ new_schema = pa.schema(fields)
292
+
293
+ # Force conversion of problematic columns
294
+ df_safe = df.copy()
295
+ for col in df_safe.select_dtypes(include=['object']).columns:
296
+ if col not in ['model_id', 'record_cid', 'config_cid', 'readme_cid']:
297
+ df_safe[col] = df_safe[col].astype(str)
298
+
299
+ # Convert to table with schema and write
300
+ table = pa.Table.from_pandas(df_safe, schema=new_schema)
301
+ pq.write_table(table, filepath)
302
+ logging.info(f"Successfully saved to {filepath} using PyArrow with schema conversion")
303
+ return True
304
+
305
+ except Exception as e2:
306
+ logging.error(f"Both Parquet saving attempts failed for {filepath}: {e2}")
307
+
308
+ # Last resort - save to CSV instead
309
+ try:
310
+ csv_filepath = filepath.with_suffix('.csv')
311
+ logging.warning(f"Falling back to CSV format: {csv_filepath}")
312
+ df.to_csv(csv_filepath, index=False)
313
+ logging.info(f"Saved as CSV instead: {csv_filepath}")
314
+ return False
315
+ except Exception as e3:
316
+ logging.error(f"Even CSV fallback failed: {e3}")
317
+ return False
318
+
319
+ # --- UPDATED: Load Processed CIDs from EMBEDDINGS Parquet and Batch Files ---
320
+ def load_processed_cids_from_parquet(filepath: Path, batch_dir: Optional[Path] = None) -> set:
321
+ """
322
+ Reads the record_cid column from:
323
+ 1. The final EMBEDDINGS Parquet file
324
+ 2. Any batch files in the batch_dir, if provided
325
+ 3. Also checks for CSV fallback files
326
+
327
+ Returns a set of processed record_cids.
328
+ """
329
+ processed_cids = set()
330
+
331
+ # 1. Load from final Parquet if it exists
332
+ if filepath.is_file():
333
+ logging.info(f"Found existing EMBEDDINGS Parquet: {filepath}. Loading processed CIDs...")
334
+ try:
335
+ # Only load the record_cid column for efficiency
336
+ df_existing = pd.read_parquet(filepath, columns=['record_cid'])
337
+ file_cids = set(df_existing['record_cid'].tolist())
338
+ processed_cids.update(file_cids)
339
+ logging.info(f"Loaded {len(file_cids)} CIDs from existing Embeddings Parquet.")
340
+ except Exception as e:
341
+ logging.warning(f"Could not load 'record_cid' from '{filepath}': {e}. Will check for CSV fallback.")
342
+ # Check for CSV fallback
343
+ csv_filepath = filepath.with_suffix('.csv')
344
+ if csv_filepath.is_file():
345
+ try:
346
+ df_csv = pd.read_csv(csv_filepath, usecols=['record_cid'])
347
+ csv_cids = set(df_csv['record_cid'].tolist())
348
+ processed_cids.update(csv_cids)
349
+ logging.info(f"Loaded {len(csv_cids)} CIDs from CSV fallback: {csv_filepath}")
350
+ except Exception as csv_e:
351
+ logging.warning(f"Could not load CIDs from CSV fallback: {csv_e}")
352
+
353
+ # 2. Load from batch files if provided
354
+ if batch_dir and batch_dir.is_dir():
355
+ # Check both Parquet and CSV batch files
356
+ batch_files_parquet = list(batch_dir.glob("embeddings_batch_*.parquet"))
357
+ batch_files_csv = list(batch_dir.glob("embeddings_batch_*.csv"))
358
+
359
+ if batch_files_parquet:
360
+ logging.info(f"Found {len(batch_files_parquet)} embedding batch Parquet files.")
361
+ batch_cids_count = 0
362
+
363
+ for batch_file in batch_files_parquet:
364
+ try:
365
+ df_batch = pd.read_parquet(batch_file, columns=['record_cid'])
366
+ batch_cids = set(df_batch['record_cid'].tolist())
367
+ batch_cids_count += len(batch_cids)
368
+ processed_cids.update(batch_cids)
369
+ except Exception as e:
370
+ logging.warning(f"Error loading CIDs from batch file {batch_file}: {e}")
371
+
372
+ logging.info(f"Loaded {batch_cids_count} additional CIDs from Parquet batch files.")
373
+
374
+ if batch_files_csv:
375
+ logging.info(f"Found {len(batch_files_csv)} embedding batch CSV files.")
376
+ csv_batch_cids_count = 0
377
+
378
+ for batch_file in batch_files_csv:
379
+ try:
380
+ df_batch = pd.read_csv(batch_file, usecols=['record_cid'])
381
+ batch_cids = set(df_batch['record_cid'].tolist())
382
+ csv_batch_cids_count += len(batch_cids)
383
+ processed_cids.update(batch_cids)
384
+ except Exception as e:
385
+ logging.warning(f"Error loading CIDs from CSV batch file {batch_file}: {e}")
386
+
387
+ logging.info(f"Loaded {csv_batch_cids_count} additional CIDs from CSV batch files.")
388
+
389
+ total_cids = len(processed_cids)
390
+ if total_cids > 0:
391
+ logging.info(f"Total of {total_cids} unique record CIDs loaded for resume.")
392
+ else:
393
+ logging.info(f"No existing processed CIDs found. Will process all records.")
394
+
395
+ return processed_cids
396
+
397
+
398
+ # final conversion from JSONL to Parquet would happen only once at the end of all processing.
399
+ def convert_jsonl_to_parquet(
400
+ meta_jsonl_path: Path,
401
+ embed_jsonl_path: Path,
402
+ local_temp_metadata_path: Path,
403
+ local_temp_embeddings_path: Path,
404
+ chunk_size: int = 50000,
405
+ max_memory_mb: int = 2000 # Memory threshold for adaptive processing
406
+ ):
407
+ """
408
+ Convert very large JSONL files to Parquet using a streaming approach with minimal memory usage.
409
+
410
+ Args:
411
+ meta_jsonl_path: Path to metadata JSONL file
412
+ embed_jsonl_path: Path to embeddings JSONL file
413
+ local_temp_metadata_path: Output path for metadata Parquet file
414
+ local_temp_embeddings_path: Output path for embeddings Parquet file
415
+ chunk_size: Initial number of records to process at once (will adapt based on memory usage)
416
+ max_memory_mb: Maximum memory usage threshold in MB
417
+ """
418
+ import json
419
+ import os
420
+ import gc
421
+ import time
422
+ import pyarrow as pa
423
+ import pyarrow.parquet as pq
424
+ from tqdm import tqdm
425
+ import psutil
426
+
427
+ logging.info("Starting optimized streaming conversion from JSONL to Parquet")
428
+
429
+ def get_memory_usage_mb():
430
+ """Get current memory usage in MB"""
431
+ process = psutil.Process(os.getpid())
432
+ return process.memory_info().rss / (1024 * 1024)
433
+
434
+ def estimate_total_lines(file_path, sample_size=1000000):
435
+ """Estimate total lines in file without reading entire file"""
436
+ try:
437
+ # Get file size
438
+ file_size = os.path.getsize(file_path)
439
+
440
+ # If file is small enough, just count lines directly
441
+ if file_size < 100 * 1024 * 1024: # 100 MB
442
+ with open(file_path, 'r') as f:
443
+ return sum(1 for _ in f)
444
+
445
+ # Sample beginning of file to estimate line size
446
+ line_count = 0
447
+ bytes_read = 0
448
+ with open(file_path, 'r') as f:
449
+ for _ in range(sample_size):
450
+ line = f.readline()
451
+ if not line:
452
+ break
453
+ bytes_read += len(line.encode('utf-8'))
454
+ line_count += 1
455
+
456
+ if line_count == 0:
457
+ return 0
458
+
459
+ # Calculate average line size and estimate total
460
+ avg_line_size = bytes_read / line_count
461
+ estimated_lines = int(file_size / avg_line_size)
462
+
463
+ logging.info(f"Estimated lines in {file_path.name}: {estimated_lines:,} (based on avg line size: {avg_line_size:.1f} bytes)")
464
+ return estimated_lines
465
+
466
+ except Exception as e:
467
+ logging.error(f"Error estimating lines in file: {e}")
468
+ return 0
469
+
470
+ def infer_schema_from_samples(file_path, num_samples=1000):
471
+ """Infer schema by sampling from beginning, middle, and end of file"""
472
+ try:
473
+ file_size = os.path.getsize(file_path)
474
+ if file_size == 0:
475
+ return None
476
+
477
+ samples = []
478
+ with open(file_path, 'r') as f:
479
+ # Read samples from beginning
480
+ for _ in range(num_samples // 3):
481
+ line = f.readline()
482
+ if not line:
483
+ break
484
+ try:
485
+ samples.append(json.loads(line))
486
+ except json.JSONDecodeError:
487
+ continue
488
+
489
+ # Read samples from middle
490
+ middle_pos = file_size // 2
491
+ f.seek(middle_pos)
492
+ f.readline() # Skip partial line
493
+ for _ in range(num_samples // 3):
494
+ line = f.readline()
495
+ if not line:
496
+ break
497
+ try:
498
+ samples.append(json.loads(line))
499
+ except json.JSONDecodeError:
500
+ continue
501
+
502
+ # Read samples from end
503
+ end_pos = max(0, file_size - 100000) # 100 KB from end
504
+ f.seek(end_pos)
505
+ f.readline() # Skip partial line
506
+ for _ in range(num_samples // 3):
507
+ line = f.readline()
508
+ if not line:
509
+ break
510
+ try:
511
+ samples.append(json.loads(line))
512
+ except json.JSONDecodeError:
513
+ continue
514
+
515
+ if not samples:
516
+ logging.error(f"No valid JSON samples found in {file_path}")
517
+ return None
518
+
519
+ # Convert samples to pyarrow schema
520
+ import pandas as pd
521
+ sample_df = pd.DataFrame(samples)
522
+
523
+ # Convert all columns to string type to avoid type mismatches
524
+ for col in sample_df.columns:
525
+ if col != 'embedding': # Keep embedding as is since it's numeric
526
+ sample_df[col] = sample_df[col].astype(str)
527
+
528
+ # Handle embedding field specially if it exists
529
+ if 'embedding' in sample_df.columns:
530
+ # Ensure embedding is a list of float
531
+ if sample_df['embedding'].dtype != 'object':
532
+ # If not already a list, convert to string
533
+ sample_df['embedding'] = sample_df['embedding'].astype(str)
534
+
535
+ # Convert to PyArrow Table and extract schema
536
+ table = pa.Table.from_pandas(sample_df)
537
+ logging.info(f"Inferred schema with {len(table.schema.names)} fields")
538
+ return table.schema
539
+
540
+ except Exception as e:
541
+ logging.error(f"Error inferring schema: {e}", exc_info=True)
542
+ return None
543
+
544
+ def stream_jsonl_to_parquet(jsonl_path, parquet_path, file_type, initial_chunk_size):
545
+ """Process a JSONL file in a streaming fashion with adaptive chunk sizing"""
546
+ if not jsonl_path.exists():
547
+ logging.warning(f"{file_type} JSONL file not found: {jsonl_path}")
548
+ return False
549
+
550
+ logging.info(f"Starting streaming conversion of {file_type} JSONL: {jsonl_path} -> {parquet_path}")
551
+ start_time = time.time()
552
+
553
+ # Get schema by sampling
554
+ schema = infer_schema_from_samples(jsonl_path)
555
+ if schema is None:
556
+ logging.error(f"Failed to infer schema for {file_type}")
557
+ return False
558
+
559
+ # Estimate total for progress reporting
560
+ estimated_total = estimate_total_lines(jsonl_path)
561
+
562
+ # Track current chunk size - will adapt based on memory usage
563
+ current_chunk_size = initial_chunk_size
564
+ records_processed = 0
565
+ chunk_count = 0
566
+
567
+ try:
568
+ # Create parquet writer with inferred schema
569
+ with pq.ParquetWriter(parquet_path, schema) as writer:
570
+ # Process in chunks to limit memory usage
571
+ buffer = []
572
+
573
+ with tqdm(total=estimated_total, desc=f"Converting {file_type}") as pbar:
574
+ with open(jsonl_path, 'r') as f:
575
+ for line_num, line in enumerate(f, 1):
576
+ try:
577
+ record = json.loads(line)
578
+
579
+ # Convert all string fields to ensure type consistency
580
+ for key, value in record.items():
581
+ if key != 'embedding' and value is not None and not isinstance(value, (list, dict)):
582
+ record[key] = str(value)
583
+
584
+ buffer.append(record)
585
+
586
+ # When buffer reaches chunk size, write to parquet
587
+ if len(buffer) >= current_chunk_size:
588
+ # Convert buffer to PyArrow table
589
+ import pandas as pd
590
+ chunk_df = pd.DataFrame(buffer)
591
+
592
+ # Handle embedding field specially if it exists
593
+ if 'embedding' in chunk_df.columns:
594
+ # Ensure embedding is a list of float
595
+ if chunk_df['embedding'].dtype != 'object':
596
+ # If not already a list, convert to string
597
+ chunk_df['embedding'] = chunk_df['embedding'].astype(str)
598
+
599
+ # Convert non-embedding fields to string
600
+ for col in chunk_df.columns:
601
+ if col != 'embedding':
602
+ chunk_df[col] = chunk_df[col].astype(str)
603
+
604
+ # Write chunk
605
+ table = pa.Table.from_pandas(chunk_df, schema=schema)
606
+ writer.write_table(table)
607
+
608
+ # Update progress
609
+ records_processed += len(buffer)
610
+ pbar.update(len(buffer))
611
+ chunk_count += 1
612
+
613
+ # Clear buffer and force garbage collection
614
+ buffer = []
615
+ del chunk_df, table
616
+ gc.collect()
617
+
618
+ # Adaptive chunk sizing based on memory usage
619
+ current_memory = get_memory_usage_mb()
620
+ if current_memory > max_memory_mb:
621
+ # Reduce chunk size if memory usage is too high
622
+ new_chunk_size = max(1000, int(current_chunk_size * 0.8))
623
+ logging.info(f"Memory usage high ({current_memory:.1f} MB). Reducing chunk size from {current_chunk_size} to {new_chunk_size}")
624
+ current_chunk_size = new_chunk_size
625
+ elif current_memory < max_memory_mb * 0.5 and current_chunk_size < initial_chunk_size:
626
+ # Increase chunk size if memory usage is low
627
+ new_chunk_size = min(initial_chunk_size, int(current_chunk_size * 1.2))
628
+ logging.info(f"Memory usage low ({current_memory:.1f} MB). Increasing chunk size from {current_chunk_size} to {new_chunk_size}")
629
+ current_chunk_size = new_chunk_size
630
+
631
+ # Log progress periodically
632
+ if chunk_count % 10 == 0:
633
+ elapsed = time.time() - start_time
634
+ rate = records_processed / elapsed if elapsed > 0 else 0
635
+ logging.info(f"Processed {records_processed:,} records ({rate:.1f} records/sec), memory: {current_memory:.1f} MB")
636
+
637
+ except json.JSONDecodeError:
638
+ logging.warning(f"Invalid JSON at line {line_num}")
639
+ continue
640
+ except Exception as e:
641
+ logging.warning(f"Error processing line {line_num}: {e}")
642
+ continue
643
+
644
+ # Write any remaining records
645
+ if buffer:
646
+ try:
647
+ import pandas as pd
648
+ chunk_df = pd.DataFrame(buffer)
649
+
650
+ # Handle embedding field specially if it exists
651
+ if 'embedding' in chunk_df.columns:
652
+ # Ensure embedding is a list of float
653
+ if chunk_df['embedding'].dtype != 'object':
654
+ # If not already a list, convert to string
655
+ chunk_df['embedding'] = chunk_df['embedding'].astype(str)
656
+
657
+ # Convert non-embedding fields to string
658
+ for col in chunk_df.columns:
659
+ if col != 'embedding':
660
+ chunk_df[col] = chunk_df[col].astype(str)
661
+
662
+ # Write final chunk
663
+ table = pa.Table.from_pandas(chunk_df, schema=schema)
664
+ writer.write_table(table)
665
+
666
+ # Update progress
667
+ records_processed += len(buffer)
668
+ pbar.update(len(buffer))
669
+
670
+ except Exception as e:
671
+ logging.error(f"Error writing final chunk: {e}")
672
+
673
+ # Report final stats
674
+ elapsed = time.time() - start_time
675
+ rate = records_processed / elapsed if elapsed > 0 else 0
676
+ logging.info(f"Successfully converted {records_processed:,} {file_type} records in {elapsed:.1f} seconds ({rate:.1f} records/sec)")
677
+ logging.info(f"Created {file_type} Parquet file: {parquet_path} ({os.path.getsize(parquet_path) / (1024*1024):.1f} MB)")
678
+ return True
679
+
680
+ except Exception as e:
681
+ logging.error(f"Error during {file_type} conversion: {e}", exc_info=True)
682
+ return False
683
+
684
+ # Convert metadata file
685
+ meta_success = stream_jsonl_to_parquet(meta_jsonl_path, local_temp_metadata_path, "metadata", chunk_size)
686
+
687
+ # Force garbage collection before processing embeddings
688
+ gc.collect()
689
+
690
+ # Convert embeddings file
691
+ embed_success = stream_jsonl_to_parquet(embed_jsonl_path, local_temp_embeddings_path, "embeddings", chunk_size)
692
+
693
+ if meta_success and embed_success:
694
+ logging.info("JSONL to Parquet conversion completed successfully")
695
+ return True
696
+ else:
697
+ logging.error("JSONL to Parquet conversion encountered errors")
698
+ return False
699
+
700
+
701
+
702
+ # --- Sync Local Files to Final Destination ---
703
+ def sync_local_files_to_final(
704
+ local_metadata_path: Path,
705
+ local_embeddings_path: Path,
706
+ local_log_path: Path,
707
+ final_metadata_path: Path,
708
+ final_embeddings_path: Path,
709
+ final_log_path: Path
710
+ ):
711
+ """
712
+ Copies local Parquet/log files to overwrite final destination files.
713
+ Returns True if all necessary copies succeeded.
714
+ """
715
+ success = True # Assume success initially
716
+
717
+ # Copy Metadata Parquet or CSV
718
+ if local_metadata_path.is_file():
719
+ try:
720
+ logging.info(f"Copying local Metadata '{local_metadata_path}' to '{final_metadata_path}'...")
721
+ final_metadata_path.parent.mkdir(parents=True, exist_ok=True)
722
+ shutil.copyfile(local_metadata_path, final_metadata_path)
723
+ logging.info("Metadata file copy successful.")
724
+ except Exception as e:
725
+ logging.error(f"Failed to copy Metadata file: {e}", exc_info=True)
726
+ success = False
727
+
728
+ # Also check for CSV fallback
729
+ csv_path = local_metadata_path.with_suffix('.csv')
730
+ if csv_path.is_file():
731
+ try:
732
+ csv_dest = final_metadata_path.with_suffix('.csv')
733
+ logging.info(f"Copying CSV fallback: {csv_path} to {csv_dest}")
734
+ shutil.copyfile(csv_path, csv_dest)
735
+ except Exception as e:
736
+ logging.error(f"Failed to copy CSV fallback: {e}")
737
+ # Don't affect overall success status for CSV fallback
738
+ else:
739
+ logging.debug("Local Metadata file non-existent. Skipping copy.")
740
+
741
+ # Copy Embeddings Parquet or CSV
742
+ if local_embeddings_path.is_file():
743
+ try:
744
+ logging.info(f"Copying local Embeddings '{local_embeddings_path}' to '{final_embeddings_path}'...")
745
+ final_embeddings_path.parent.mkdir(parents=True, exist_ok=True)
746
+ shutil.copyfile(local_embeddings_path, final_embeddings_path)
747
+ logging.info("Embeddings file copy successful.")
748
+ except Exception as e:
749
+ logging.error(f"Failed to copy Embeddings file: {e}", exc_info=True)
750
+ success = False
751
+
752
+ # Also check for CSV fallback
753
+ csv_path = local_embeddings_path.with_suffix('.csv')
754
+ if csv_path.is_file():
755
+ try:
756
+ csv_dest = final_embeddings_path.with_suffix('.csv')
757
+ logging.info(f"Copying CSV fallback: {csv_path} to {csv_dest}")
758
+ shutil.copyfile(csv_path, csv_dest)
759
+ except Exception as e:
760
+ logging.error(f"Failed to copy CSV fallback: {e}")
761
+ # Don't affect overall success status for CSV fallback
762
+ else:
763
+ logging.debug("Local Embeddings file non-existent. Skipping copy.")
764
+
765
+ # Copy Log File
766
+ if local_log_path.is_file() and local_log_path.stat().st_size > 0:
767
+ try:
768
+ logging.info(f"Copying local log '{local_log_path}' to overwrite '{final_log_path}'...")
769
+ final_log_path.parent.mkdir(parents=True, exist_ok=True)
770
+ shutil.copyfile(local_log_path, final_log_path)
771
+ logging.info("Log file copy successful.")
772
+ except Exception as e:
773
+ logging.error(f"Failed to copy log file: {e}", exc_info=True)
774
+ success = False # Log copy fail is less critical but still indicate
775
+ else:
776
+ logging.debug("Local temp log empty/non-existent. Skipping log copy.")
777
+
778
+ return success
779
+
780
+
781
+ # Track memory across perform_periodic_merge() function calls
782
+ last_merge_memory_usage = 0
783
+
784
+ def perform_periodic_merge(
785
+ batch_save_dir: Path,
786
+ merged_batch_tracker: Set[str],
787
+ local_temp_metadata_path: Path,
788
+ local_temp_embeddings_path: Path,
789
+ final_log_path: Path,
790
+ local_temp_log_path: Path
791
+ ):
792
+ """
793
+ 100% JSONL-only periodic merge with NO Parquet operations whatsoever.
794
+ Only merges to JSONL files, conversion to Parquet happens separately at the end.
795
+ """
796
+ global last_merge_memory_usage
797
+
798
+ # Track memory at function start
799
+ process = psutil.Process()
800
+ start_memory = process.memory_info().rss / (1024 * 1024)
801
+ logging.info(f"Starting JSONL-only periodic merge. Current memory: {start_memory:.2f} MB")
802
+
803
+ # Define paths for working JSONL files (strip .parquet suffix if present)
804
+ meta_jsonl_path = Path(str(local_temp_metadata_path).replace('.parquet', '.jsonl'))
805
+ embed_jsonl_path = Path(str(local_temp_embeddings_path).replace('.parquet', '.jsonl'))
806
+
807
+ # Find all JSONL batch files that haven't been merged yet
808
+ meta_batch_files = []
809
+ embed_batch_files = []
810
+
811
+ # Only look for JSONL batch files
812
+ for batch_file in batch_save_dir.glob("metadata_batch_*.jsonl"):
813
+ if batch_file.name not in merged_batch_tracker:
814
+ meta_batch_files.append(batch_file)
815
+
816
+ for batch_file in batch_save_dir.glob("embeddings_batch_*.jsonl"):
817
+ if batch_file.name not in merged_batch_tracker:
818
+ embed_batch_files.append(batch_file)
819
+
820
+ if not meta_batch_files and not embed_batch_files:
821
+ logging.info("No new JSONL batches to merge periodically.")
822
+ return 0
823
+
824
+ logging.info(f"Performing JSONL-only merge of {len(meta_batch_files)} metadata files and {len(embed_batch_files)} embedding files")
825
+
826
+ # --- Process metadata files ---
827
+ if meta_batch_files:
828
+ try:
829
+ # Load existing record CIDs from JSONL to avoid duplicates
830
+ existing_cids = set()
831
+
832
+ # Check if JSONL exists from previous run and load CIDs
833
+ if meta_jsonl_path.exists():
834
+ logging.info(f"Scanning existing JSONL for CIDs: {meta_jsonl_path}")
835
+ with open(meta_jsonl_path, 'r') as f:
836
+ for line in f:
837
+ try:
838
+ record = json.loads(line)
839
+ if 'record_cid' in record:
840
+ existing_cids.add(record['record_cid'])
841
+ except:
842
+ pass
843
+ logging.info(f"Found {len(existing_cids)} existing CIDs in metadata JSONL")
844
+
845
+ # Open JSONL in append mode
846
+ with open(meta_jsonl_path, 'a') as jsonl_out:
847
+ # Process each batch file
848
+ for batch_file in meta_batch_files:
849
+ try:
850
+ logging.info(f"Processing metadata batch: {batch_file.name}")
851
+
852
+ # Process the JSONL batch file line by line
853
+ new_records_count = 0
854
+ total_records_count = 0
855
+
856
+ with open(batch_file, 'r') as batch_in:
857
+ for line in batch_in:
858
+ total_records_count += 1
859
+ try:
860
+ record = json.loads(line)
861
+ # Filter out records with CIDs that already exist
862
+ if 'record_cid' in record and record['record_cid'] not in existing_cids:
863
+ # Write new record to output JSONL
864
+ jsonl_out.write(line)
865
+ # Add to existing CIDs to avoid future duplicates
866
+ existing_cids.add(record['record_cid'])
867
+ new_records_count += 1
868
+ except json.JSONDecodeError:
869
+ logging.warning(f"Could not parse JSON line in {batch_file.name}")
870
+
871
+ # Log stats
872
+ logging.info(f"Batch has {total_records_count} records, {new_records_count} are new")
873
+
874
+ # Mark batch as processed
875
+ merged_batch_tracker.add(batch_file.name)
876
+
877
+ # Clean up batch file if enabled
878
+ if CLEAN_AFTER_PERIODIC_MERGE:
879
+ try:
880
+ batch_file.unlink()
881
+ logging.debug(f"Removed processed batch file: {batch_file}")
882
+ except Exception as e:
883
+ logging.warning(f"Could not remove batch file: {e}")
884
+
885
+ # Force memory cleanup after each batch
886
+ gc.collect()
887
+
888
+ except Exception as e:
889
+ logging.error(f"Error processing batch file {batch_file}: {e}")
890
+ except Exception as e:
891
+ logging.error(f"Error in metadata merge process: {e}", exc_info=True)
892
+
893
+ # Force memory cleanup between metadata and embeddings
894
+ gc.collect()
895
+
896
+ # --- Process embeddings files (similar approach) ---
897
+ if embed_batch_files:
898
+ try:
899
+ # Load existing record CIDs from JSONL to avoid duplicates
900
+ existing_cids = set()
901
+
902
+ # Check if JSONL exists from previous run and load CIDs
903
+ if embed_jsonl_path.exists():
904
+ logging.info(f"Scanning existing JSONL for CIDs: {embed_jsonl_path}")
905
+ with open(embed_jsonl_path, 'r') as f:
906
+ for line in f:
907
+ try:
908
+ record = json.loads(line)
909
+ if 'record_cid' in record:
910
+ existing_cids.add(record['record_cid'])
911
+ except:
912
+ pass
913
+ logging.info(f"Found {len(existing_cids)} existing CIDs in embeddings JSONL")
914
+
915
+ # Open JSONL in append mode
916
+ with open(embed_jsonl_path, 'a') as jsonl_out:
917
+ # Process each batch file
918
+ for batch_file in embed_batch_files:
919
+ try:
920
+ logging.info(f"Processing embeddings batch: {batch_file.name}")
921
+
922
+ # Process the JSONL batch file line by line
923
+ new_records_count = 0
924
+ total_records_count = 0
925
+
926
+ with open(batch_file, 'r') as batch_in:
927
+ for line in batch_in:
928
+ total_records_count += 1
929
+ try:
930
+ record = json.loads(line)
931
+ # Filter out records with CIDs that already exist
932
+ if 'record_cid' in record and record['record_cid'] not in existing_cids:
933
+ # Write new record to output JSONL
934
+ jsonl_out.write(line)
935
+ # Add to existing CIDs to avoid future duplicates
936
+ existing_cids.add(record['record_cid'])
937
+ new_records_count += 1
938
+ except json.JSONDecodeError:
939
+ logging.warning(f"Could not parse JSON line in {batch_file.name}")
940
+
941
+ # Log stats
942
+ logging.info(f"Batch has {total_records_count} records, {new_records_count} are new")
943
+
944
+ # Mark batch as processed
945
+ merged_batch_tracker.add(batch_file.name)
946
+
947
+ # Clean up batch file if enabled
948
+ if CLEAN_AFTER_PERIODIC_MERGE:
949
+ try:
950
+ batch_file.unlink()
951
+ logging.debug(f"Removed processed batch file: {batch_file}")
952
+ except Exception as e:
953
+ logging.warning(f"Could not remove batch file: {e}")
954
+
955
+ # Force memory cleanup after each batch
956
+ gc.collect()
957
+
958
+ except Exception as e:
959
+ logging.error(f"Error processing batch file {batch_file}: {e}")
960
+ except Exception as e:
961
+ logging.error(f"Error in embeddings merge process: {e}", exc_info=True)
962
+
963
+ # -- Only sync the log file, no Parquet files during runtime --
964
+ try:
965
+ if local_temp_log_path.is_file() and local_temp_log_path.stat().st_size > 0:
966
+ final_log_path.parent.mkdir(parents=True, exist_ok=True)
967
+ shutil.copyfile(local_temp_log_path, final_log_path)
968
+ logging.info("Log file sync successful.")
969
+ except Exception as e:
970
+ logging.error(f"Failed to sync log file: {e}")
971
+
972
+ # Final cleanup
973
+ for _ in range(3):
974
+ gc.collect()
975
+
976
+ # Update memory tracking for next call
977
+ end_memory = process.memory_info().rss / (1024 * 1024)
978
+ logging.info(f"Memory at end of merge: {end_memory:.2f} MB (Change: {end_memory - start_memory:.2f} MB)")
979
+ last_merge_memory_usage = end_memory
980
+
981
+ return len(meta_batch_files) + len(embed_batch_files)
982
+
983
+
984
+
985
+ # --- UPDATED: Main Embedding Generation Function with CID-based Primary Key ---
986
+ def create_embedding_dataset(
987
+ input_jsonl_filepath: Path,
988
+ final_metadata_parquet_path: Path,
989
+ final_embeddings_parquet_path: Path,
990
+ local_temp_metadata_path: Path,
991
+ local_temp_embeddings_path: Path,
992
+ local_temp_log_path: Path,
993
+ final_log_filepath: Path,
994
+ max_records: Optional[int] = None,
995
+ batch_size: int = 32,
996
+ embedding_model_name: str = EMBEDDING_MODEL_NAME,
997
+ process_config: bool = PROCESS_CONFIG_JSON,
998
+ process_readme: bool = PROCESS_README_CONTENT,
999
+ ):
1000
+ """
1001
+ JSONL-only workflow that reads metadata, generates CIDs & embeddings,
1002
+ and saves all outputs as JSONL until the very end.
1003
+ """
1004
+ # --- Setup batch directory ---
1005
+ batch_save_dir = LOCAL_WORK_DIR / BATCH_SAVE_DIR_NAME
1006
+ batch_save_dir.mkdir(parents=True, exist_ok=True)
1007
+
1008
+ # --- Define JSONL paths by converting Parquet paths ---
1009
+ meta_jsonl_path = Path(str(local_temp_metadata_path).replace('.parquet', '.jsonl'))
1010
+ embed_jsonl_path = Path(str(local_temp_embeddings_path).replace('.parquet', '.jsonl'))
1011
+
1012
+ # --- Configure logging to use the local temp log file ---
1013
+ log_file_handler = logging.FileHandler(local_temp_log_path)
1014
+ log_stream_handler = logging.StreamHandler()
1015
+ for handler in logging.root.handlers[:]: logging.root.removeHandler(handler)
1016
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[log_file_handler, log_stream_handler])
1017
+ logging.getLogger('huggingface_hub.repocard_data').setLevel(logging.ERROR)
1018
+
1019
+ # --- Log configuration ---
1020
+ logging.info(f"--- Starting Embedding Generation with JSONL-only workflow ---")
1021
+ logging.info(f"Input JSONL: '{input_jsonl_filepath}'")
1022
+ logging.info(f"Metadata JSONL Output: '{meta_jsonl_path}'")
1023
+ logging.info(f"Embeddings JSONL Output: '{embed_jsonl_path}'")
1024
+ logging.info(f"Final Metadata Parquet Output (post-processing): '{final_metadata_parquet_path}'")
1025
+ logging.info(f"Final Embeddings Parquet Output (post-processing): '{final_embeddings_parquet_path}'")
1026
+ logging.info(f"Batch Save Directory: '{batch_save_dir}'")
1027
+ logging.info(f"Batch Save Threshold: {BATCH_SAVE_THRESHOLD}")
1028
+ logging.info(f"Periodic Merge Frequency: {PERIODIC_MERGE_FREQUENCY} batches")
1029
+ logging.info(f"Clean After Periodic Merge: {CLEAN_AFTER_PERIODIC_MERGE}")
1030
+ logging.info(f"Local Temp Log: '{local_temp_log_path}'")
1031
+ logging.info(f"Final Log Output: '{final_log_filepath}'")
1032
+ logging.info(f"Embedding Model: '{embedding_model_name}', Batch Size: {batch_size}")
1033
+ logging.info(f"Process Config: {process_config}, Process README: {process_readme}")
1034
+ logging.info(f"Max Records: {'All' if max_records is None else max_records}")
1035
+
1036
+ # --- Load Embedding Model ---
1037
+ try:
1038
+ logging.info(f"Loading embedding model: {embedding_model_name}")
1039
+
1040
+ # Check for MPS (Apple Silicon GPU) availability first, then CUDA, then fall back to CPU
1041
+ if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
1042
+ device = 'mps'
1043
+ logging.info(f"Using Apple Silicon GPU (MPS)")
1044
+ elif torch.cuda.is_available():
1045
+ device = 'cuda'
1046
+ logging.info(f"Using NVIDIA GPU (CUDA)")
1047
+ else:
1048
+ device = 'cpu'
1049
+ logging.info(f"Using CPU (no GPU acceleration available)")
1050
+
1051
+ model = SentenceTransformer(embedding_model_name, device=device)
1052
+ cid_generator = ipfs_multiformats_py() # Initialize CID generator
1053
+ logging.info("Embedding model & CID generator loaded.")
1054
+ except Exception as e:
1055
+ logging.error(f"Failed to load embedding model or init CID generator: {e}", exc_info=True)
1056
+ return None, None # Return None for both paths
1057
+
1058
+ # --- Load processed CIDs from existing JSONL files ---
1059
+ processed_cids = set()
1060
+
1061
+ # 1. Check the main embeddings JSONL file
1062
+ if embed_jsonl_path.exists():
1063
+ logging.info(f"Found existing embeddings JSONL: {embed_jsonl_path}. Loading processed CIDs...")
1064
+ try:
1065
+ with open(embed_jsonl_path, 'r') as f:
1066
+ for line in f:
1067
+ try:
1068
+ record = json.loads(line)
1069
+ if 'record_cid' in record:
1070
+ processed_cids.add(record['record_cid'])
1071
+ except:
1072
+ pass
1073
+ logging.info(f"Loaded {len(processed_cids)} CIDs from existing embeddings JSONL.")
1074
+ except Exception as e:
1075
+ logging.warning(f"Could not load CIDs from '{embed_jsonl_path}': {e}")
1076
+
1077
+ # 2. Check batch files
1078
+ batch_files = list(batch_save_dir.glob("embeddings_batch_*.jsonl"))
1079
+ if batch_files:
1080
+ logging.info(f"Found {len(batch_files)} embedding batch JSONL files.")
1081
+ batch_cids_count = 0
1082
+
1083
+ for batch_file in batch_files:
1084
+ try:
1085
+ with open(batch_file, 'r') as f:
1086
+ for line in f:
1087
+ try:
1088
+ record = json.loads(line)
1089
+ if 'record_cid' in record:
1090
+ processed_cids.add(record['record_cid'])
1091
+ batch_cids_count += 1
1092
+ except:
1093
+ pass
1094
+ except Exception as e:
1095
+ logging.warning(f"Error loading CIDs from batch file {batch_file}: {e}")
1096
+
1097
+ logging.info(f"Loaded {batch_cids_count} additional CIDs from JSONL batch files.")
1098
+
1099
+ initial_processed_count = len(processed_cids)
1100
+ logging.info(f"Resuming from {initial_processed_count} records already processed.")
1101
+
1102
+ # --- Batch saving and periodic merge setup ---
1103
+ batch_counter = 0
1104
+ records_since_last_save = 0
1105
+ merged_batch_tracker = set() # Track which batch files have been merged
1106
+
1107
+ # Keep a lookup of model_id to record_cid for this session
1108
+ model_id_to_record_cid = {}
1109
+
1110
+ # --- Process JSONL File ---
1111
+ metadata_records_list = [] # Holds dicts for metadata
1112
+ embeddings_records_list = [] # Holds dicts for embeddings
1113
+ batch_inputs = [] # Holds tuples for batch processing
1114
+ record_count_from_jsonl = 0; processed_count_this_run = 0; skipped_resume_count = 0; skipped_error_count = 0
1115
+ start_time = None
1116
+
1117
+ try:
1118
+ logging.info(f"Opening input JSONL file: {input_jsonl_filepath}")
1119
+ start_time = time.time()
1120
+ with input_jsonl_filepath.open('r', encoding='utf-8') as f_jsonl:
1121
+ pbar = tqdm(f_jsonl, desc="Processing JSONL", unit="record")
1122
+ for line in pbar:
1123
+ record_count_from_jsonl += 1
1124
+ if max_records is not None and processed_count_this_run >= max_records:
1125
+ logging.info(f"Reached max_records limit ({max_records}). Stopping.");
1126
+ break
1127
+
1128
+ try:
1129
+ line = line.strip()
1130
+ if not line: continue
1131
+ data = json.loads(line) # Original metadata dictionary
1132
+ model_id = data.get('id')
1133
+ if not model_id or not isinstance(model_id, str):
1134
+ logging.warning(f"Skip record {record_count_from_jsonl}: missing/invalid 'id'.");
1135
+ skipped_error_count += 1;
1136
+ continue
1137
+
1138
+ # --- Extract text for embedding & CID generation ---
1139
+ config_text = ""; config_cid = None; config_dict_or_str = data.get('config')
1140
+ if process_config and config_dict_or_str is not None:
1141
+ if isinstance(config_dict_or_str, dict):
1142
+ try:
1143
+ config_text = json.dumps(config_dict_or_str, separators=(',', ':'));
1144
+ config_cid = cid_generator.get_cid(config_text) # Use compact string for CID
1145
+ except TypeError:
1146
+ logging.warning(f"Cannot serialize config for {model_id}. Skip CID/embed.")
1147
+ elif isinstance(config_dict_or_str, str): # Handle if config is already a string
1148
+ config_text = config_dict_or_str;
1149
+ config_cid = cid_generator.get_cid(config_text)
1150
+ else:
1151
+ logging.warning(f"Config for {model_id} type {type(config_dict_or_str)}. Skip CID/embed.")
1152
+
1153
+ readme_text = ""; readme_cid = None
1154
+ if process_readme:
1155
+ card_data = data.get('cardData')
1156
+ if isinstance(card_data, dict):
1157
+ readme_text = card_data.get('text', '') or ''
1158
+ elif isinstance(card_data, str):
1159
+ readme_text = card_data # If cardData itself is the string
1160
+ if not readme_text and isinstance(data.get('description'), str):
1161
+ readme_text = data['description'] # Fallback
1162
+ if readme_text:
1163
+ readme_cid = cid_generator.get_cid(readme_text)
1164
+
1165
+ # --- Generate record_cid (primary key) ---
1166
+ record_cid = generate_record_cid(cid_generator, model_id, config_cid, readme_cid)
1167
+
1168
+ # Store in lookup for future reference
1169
+ model_id_to_record_cid[model_id] = record_cid
1170
+
1171
+ # Skip if this record_cid has already been processed
1172
+ if record_cid in processed_cids:
1173
+ skipped_resume_count += 1
1174
+ continue
1175
+
1176
+ processed_count_this_run += 1
1177
+ pbar.set_postfix_str(f"Batching: {model_id}", refresh=True)
1178
+
1179
+ # Add to batch for embedding
1180
+ batch_inputs.append((data, config_text, readme_text, config_cid, readme_cid, record_cid))
1181
+
1182
+ # --- Process Batch when full ---
1183
+ if len(batch_inputs) >= batch_size:
1184
+ pbar.set_postfix_str(f"Embedding batch ({len(batch_inputs)})...", refresh=True)
1185
+ try:
1186
+ original_data_batch = [item[0] for item in batch_inputs]
1187
+ config_texts_batch = [item[1] for item in batch_inputs]
1188
+ readme_texts_batch = [item[2] for item in batch_inputs]
1189
+ config_cids_batch = [item[3] for item in batch_inputs]
1190
+ readme_cids_batch = [item[4] for item in batch_inputs]
1191
+ record_cids_batch = [item[5] for item in batch_inputs]
1192
+
1193
+ # Generate embeddings
1194
+ config_embeddings = model.encode(config_texts_batch, batch_size=batch_size, show_progress_bar=False) if process_config else [None] * len(batch_inputs)
1195
+ readme_embeddings = model.encode(readme_texts_batch, batch_size=batch_size, show_progress_bar=False) if process_readme else [None] * len(batch_inputs)
1196
+
1197
+ # --- Create records for BOTH data formats ---
1198
+ for i, original_data in enumerate(original_data_batch):
1199
+ current_model_id = original_data.get('id')
1200
+ current_record_cid = record_cids_batch[i]
1201
+ if not current_model_id or not current_record_cid: continue
1202
+
1203
+ # 1. Metadata Record
1204
+ metadata_record = original_data.copy() # Start with all original metadata
1205
+ # Remove bulky/embedded fields if they exist, keep CIDs
1206
+ metadata_record.pop('config_embedding', None)
1207
+ metadata_record.pop('readme_embedding', None)
1208
+ # Add CIDs
1209
+ metadata_record['record_cid'] = current_record_cid # Primary key
1210
+ if process_config: metadata_record['config_cid'] = config_cids_batch[i]
1211
+ if process_readme: metadata_record['readme_cid'] = readme_cids_batch[i]
1212
+ metadata_records_list.append(metadata_record)
1213
+
1214
+ # 2. Embedding Record
1215
+ embedding_record = {
1216
+ 'record_cid': current_record_cid, # Primary key
1217
+ 'model_id': current_model_id # Keep model_id for reference
1218
+ }
1219
+ if process_config:
1220
+ embedding_record['config_embedding'] = config_embeddings[i].tolist() if config_texts_batch[i] else None
1221
+ if process_readme:
1222
+ embedding_record['readme_embedding'] = readme_embeddings[i].tolist() if readme_texts_batch[i] else None
1223
+ embeddings_records_list.append(embedding_record)
1224
+
1225
+ # Mark this record as processed to avoid reprocessing if script restarts
1226
+ processed_cids.add(current_record_cid)
1227
+
1228
+ # Increment counter for batch saving
1229
+ records_since_last_save += 1
1230
+
1231
+ logging.debug(f"Processed batch. Metadata size: {len(metadata_records_list)}, Embeddings size: {len(embeddings_records_list)}")
1232
+
1233
+ # --- Save batch if we've reached the threshold ---
1234
+ if records_since_last_save >= BATCH_SAVE_THRESHOLD:
1235
+ batch_counter += 1
1236
+ timestamp = int(time.time())
1237
+
1238
+ # Save metadata batch as JSONL
1239
+ meta_batch_file = batch_save_dir / f"metadata_batch_{batch_counter}_{timestamp}.jsonl"
1240
+ success_meta = True
1241
+ try:
1242
+ with open(meta_batch_file, 'w') as f:
1243
+ for record in metadata_records_list:
1244
+ f.write(json.dumps(safe_serialize_dict(record)) + '\n')
1245
+ logging.info(f"Saved metadata batch {batch_counter} as JSONL with {len(metadata_records_list)} records")
1246
+ except Exception as e:
1247
+ logging.error(f"Error saving metadata batch as JSONL: {e}")
1248
+ success_meta = False
1249
+
1250
+ # Save embeddings batch as JSONL
1251
+ embed_batch_file = batch_save_dir / f"embeddings_batch_{batch_counter}_{timestamp}.jsonl"
1252
+ success_embed = True
1253
+ try:
1254
+ with open(embed_batch_file, 'w') as f:
1255
+ for record in embeddings_records_list:
1256
+ f.write(json.dumps(safe_serialize_dict(record)) + '\n')
1257
+ logging.info(f"Saved embeddings batch {batch_counter} as JSONL with {len(embeddings_records_list)} records")
1258
+ except Exception as e:
1259
+ logging.error(f"Error saving embeddings batch as JSONL: {e}")
1260
+ success_embed = False
1261
+
1262
+ if success_meta and success_embed:
1263
+ logging.info(f"Saved batch {batch_counter} with {len(embeddings_records_list)} records")
1264
+ else:
1265
+ logging.warning(f"Batch {batch_counter} save had issues. Check logs.")
1266
+
1267
+ # Clear the lists to start a new batch and reset counter
1268
+ metadata_records_list = []
1269
+ embeddings_records_list = []
1270
+ records_since_last_save = 0
1271
+
1272
+ # --- Periodic merge to final JSONL files ---
1273
+ if PERIODIC_MERGE_FREQUENCY > 0 and batch_counter % PERIODIC_MERGE_FREQUENCY == 0:
1274
+ pbar.set_postfix_str(f"Periodic merge to JSONL...", refresh=True)
1275
+ batches_merged = perform_periodic_merge(
1276
+ batch_save_dir=batch_save_dir,
1277
+ merged_batch_tracker=merged_batch_tracker,
1278
+ local_temp_metadata_path=local_temp_metadata_path,
1279
+ local_temp_embeddings_path=local_temp_embeddings_path,
1280
+ final_log_path=final_log_filepath,
1281
+ local_temp_log_path=local_temp_log_path
1282
+ )
1283
+ pbar.set_postfix_str(f"Merged {batches_merged} batches to JSONL", refresh=True)
1284
+
1285
+ except Exception as e_embed:
1286
+ logging.error(f"Error embedding batch: {e_embed}", exc_info=True)
1287
+ skipped_error_count += len(batch_inputs) # Count whole batch as skipped
1288
+
1289
+ batch_inputs = [] # Clear batch
1290
+
1291
+ # Handle line processing errors
1292
+ except json.JSONDecodeError:
1293
+ logging.warning(f"Skip record {record_count_from_jsonl}: JSON decode error.");
1294
+ skipped_error_count += 1
1295
+ except Exception as e_line:
1296
+ logging.error(f"Skip record {record_count_from_jsonl}: Error - {e_line}", exc_info=False);
1297
+ skipped_error_count += 1
1298
+ # --- End reading JSONL file ---
1299
+
1300
+ # --- Process Final Remaining Batch ---
1301
+ if batch_inputs:
1302
+ pbar.set_postfix_str(f"Embedding final batch ({len(batch_inputs)})...", refresh=True)
1303
+ try:
1304
+ # Process just like the main batch
1305
+ original_data_batch = [item[0] for item in batch_inputs]
1306
+ config_texts_batch = [item[1] for item in batch_inputs]
1307
+ readme_texts_batch = [item[2] for item in batch_inputs]
1308
+ config_cids_batch = [item[3] for item in batch_inputs]
1309
+ readme_cids_batch = [item[4] for item in batch_inputs]
1310
+ record_cids_batch = [item[5] for item in batch_inputs]
1311
+
1312
+ config_embeddings = model.encode(config_texts_batch, batch_size=batch_size, show_progress_bar=False) if process_config else [None] * len(batch_inputs)
1313
+ readme_embeddings = model.encode(readme_texts_batch, batch_size=batch_size, show_progress_bar=False) if process_readme else [None] * len(batch_inputs)
1314
+
1315
+ for i, original_data in enumerate(original_data_batch):
1316
+ current_model_id = original_data.get('id')
1317
+ current_record_cid = record_cids_batch[i]
1318
+ if not current_model_id or not current_record_cid: continue
1319
+
1320
+ metadata_record = original_data.copy()
1321
+ metadata_record.pop('config_embedding', None)
1322
+ metadata_record.pop('readme_embedding', None)
1323
+ metadata_record['record_cid'] = current_record_cid # Primary key
1324
+ if process_config: metadata_record['config_cid'] = config_cids_batch[i]
1325
+ if process_readme: metadata_record['readme_cid'] = readme_cids_batch[i]
1326
+ metadata_records_list.append(metadata_record)
1327
+
1328
+ embedding_record = {
1329
+ 'record_cid': current_record_cid, # Primary key
1330
+ 'model_id': current_model_id # Keep model_id for reference
1331
+ }
1332
+ if process_config:
1333
+ embedding_record['config_embedding'] = config_embeddings[i].tolist() if config_texts_batch[i] else None
1334
+ if process_readme:
1335
+ embedding_record['readme_embedding'] = readme_embeddings[i].tolist() if readme_texts_batch[i] else None
1336
+ embeddings_records_list.append(embedding_record)
1337
+
1338
+ # Mark as processed
1339
+ processed_cids.add(current_record_cid)
1340
+ records_since_last_save += 1
1341
+
1342
+ logging.debug(f"Processed final batch. Metadata size: {len(metadata_records_list)}, Embeddings size: {len(embeddings_records_list)}")
1343
+ except Exception as e_embed_final:
1344
+ logging.error(f"Error embedding final batch: {e_embed_final}", exc_info=True)
1345
+ skipped_error_count += len(batch_inputs)
1346
+ # --- End processing batches ---
1347
+
1348
+ # --- Save any remaining records as a final batch ---
1349
+ if metadata_records_list:
1350
+ batch_counter += 1
1351
+ timestamp = int(time.time())
1352
+
1353
+ # Save final metadata batch as JSONL
1354
+ meta_batch_file = batch_save_dir / f"metadata_batch_{batch_counter}_{timestamp}.jsonl"
1355
+ success_meta = True
1356
+ try:
1357
+ with open(meta_batch_file, 'w') as f:
1358
+ for record in metadata_records_list:
1359
+ f.write(json.dumps(safe_serialize_dict(record)) + '\n')
1360
+ logging.info(f"Saved final metadata batch as JSONL with {len(metadata_records_list)} records")
1361
+ except Exception as e:
1362
+ logging.error(f"Error saving final metadata batch as JSONL: {e}")
1363
+ success_meta = False
1364
+
1365
+ # Save final embeddings batch as JSONL
1366
+ embed_batch_file = batch_save_dir / f"embeddings_batch_{batch_counter}_{timestamp}.jsonl"
1367
+ success_embed = True
1368
+ try:
1369
+ with open(embed_batch_file, 'w') as f:
1370
+ for record in embeddings_records_list:
1371
+ f.write(json.dumps(safe_serialize_dict(record)) + '\n')
1372
+ logging.info(f"Saved final embeddings batch as JSONL with {len(embeddings_records_list)} records")
1373
+ except Exception as e:
1374
+ logging.error(f"Error saving final embeddings batch as JSONL: {e}")
1375
+ success_embed = False
1376
+
1377
+ if success_meta and success_embed:
1378
+ logging.info(f"Saved final batch {batch_counter} with {len(embeddings_records_list)} records")
1379
+ else:
1380
+ logging.warning(f"Final batch {batch_counter} save had issues. Check logs.")
1381
+
1382
+ # Clear lists
1383
+ metadata_records_list = []
1384
+ embeddings_records_list = []
1385
+
1386
+ pbar.close()
1387
+ logging.info("Finished processing records from JSONL.")
1388
+
1389
+ # --- Merge all remaining batches into the final JSONL files ---
1390
+ # Process any remaining batches that haven't been merged
1391
+ if PERIODIC_MERGE_FREQUENCY > 0:
1392
+ logging.info("Performing final merge of any remaining batches...")
1393
+ batches_merged = perform_periodic_merge(
1394
+ batch_save_dir=batch_save_dir,
1395
+ merged_batch_tracker=merged_batch_tracker,
1396
+ local_temp_metadata_path=local_temp_metadata_path,
1397
+ local_temp_embeddings_path=local_temp_embeddings_path,
1398
+ final_log_path=final_log_filepath,
1399
+ local_temp_log_path=local_temp_log_path
1400
+ )
1401
+ logging.info(f"Final merge: processed {batches_merged} remaining batches")
1402
+
1403
+ # Return the JSONL paths for final conversion
1404
+ return meta_jsonl_path, embed_jsonl_path
1405
+
1406
+ # Handle file/main processing errors
1407
+ except FileNotFoundError:
1408
+ logging.error(f"CRITICAL: Input JSONL file not found: {input_jsonl_filepath}.");
1409
+ return None, None
1410
+ except Exception as e_main:
1411
+ logging.error(f"CRITICAL error: {e_main}", exc_info=True);
1412
+ return None, None
1413
+
1414
+ # --- Final Summary ---
1415
+ finally:
1416
+ total_processed_in_run = processed_count_this_run
1417
+ total_batches_saved = batch_counter
1418
+ total_batches_merged = len(merged_batch_tracker)
1419
+
1420
+ logging.info("--- Embedding Generation Summary ---")
1421
+ logging.info(f"Records read from JSONL: {record_count_from_jsonl}")
1422
+ logging.info(f"Records skipped (resume): {skipped_resume_count}")
1423
+ logging.info(f"Records processed this run: {total_processed_in_run}")
1424
+ logging.info(f"Records skipped (errors): {skipped_error_count}")
1425
+ logging.info(f"Total batches saved: {total_batches_saved}")
1426
+ logging.info(f"Total batches merged: {total_batches_merged}")
1427
+ logging.info(f"Total unique records processed (including previous runs): {len(processed_cids)}")
1428
+ if start_time:
1429
+ logging.info(f"Total processing time: {time.time() - start_time:.2f} seconds")
1430
+ logging.info("------------------------------------")
1431
+
1432
+
1433
+
1434
+ # --- Upload Function (Modified for two files) ---
1435
+ def upload_files_to_hub(
1436
+ local_metadata_path: Path,
1437
+ local_embeddings_path: Path,
1438
+ repo_id: str,
1439
+ repo_type: str = "dataset",
1440
+ metadata_path_in_repo: Optional[str] = None,
1441
+ embeddings_path_in_repo: Optional[str] = None,
1442
+ hf_token: Union[str, bool, None] = None
1443
+ ):
1444
+ """Uploads the generated Parquet files to the Hugging Face Hub."""
1445
+ api = HfApi(token=hf_token)
1446
+ uploaded_meta = False
1447
+ uploaded_embed = False
1448
+
1449
+ # Upload Metadata (Parquet or CSV)
1450
+ if local_metadata_path and local_metadata_path.exists():
1451
+ path_in_repo_meta = metadata_path_in_repo or local_metadata_path.name
1452
+ logging.info(f"Uploading Metadata: {local_metadata_path} to {repo_id} as {path_in_repo_meta}...")
1453
+ try:
1454
+ api.upload_file(
1455
+ path_or_fileobj=str(local_metadata_path), path_in_repo=path_in_repo_meta, repo_id=repo_id, repo_type=repo_type,
1456
+ commit_message=f"Update metadata ({local_metadata_path.suffix}) {time.strftime('%Y-%m-%d %H:%M:%S')}"
1457
+ ); logging.info("Metadata upload successful."); uploaded_meta = True
1458
+ except Exception as e: logging.error(f"Metadata upload failed: {e}", exc_info=True)
1459
+ else: logging.warning("Local metadata file not found or not specified. Skipping metadata upload.")
1460
+
1461
+ # Upload Embeddings (Parquet or CSV)
1462
+ if local_embeddings_path and local_embeddings_path.exists():
1463
+ path_in_repo_embed = embeddings_path_in_repo or local_embeddings_path.name
1464
+ logging.info(f"Uploading Embeddings: {local_embeddings_path} to {repo_id} as {path_in_repo_embed}...")
1465
+ try:
1466
+ api.upload_file(
1467
+ path_or_fileobj=str(local_embeddings_path), path_in_repo=path_in_repo_embed, repo_id=repo_id, repo_type=repo_type,
1468
+ commit_message=f"Update embeddings ({local_embeddings_path.suffix}) {time.strftime('%Y-%m-%d %H:%M:%S')}"
1469
+ ); logging.info("Embeddings upload successful."); uploaded_embed = True
1470
+ except Exception as e: logging.error(f"Embeddings upload failed: {e}", exc_info=True)
1471
+ else: logging.warning("Local embeddings file not found or not specified. Skipping embeddings upload.")
1472
+
1473
+ return uploaded_meta and uploaded_embed # Return overall success
1474
+
1475
+ # --- Script Execution (`if __name__ == "__main__":`) ---
1476
+ if __name__ == "__main__":
1477
+ # --- Determine Paths ---
1478
+ print("--- Determining Output Paths ---")
1479
+ gdrive_base = Path(GDRIVE_MOUNT_POINT); gdrive_target_dir = gdrive_base / GDRIVE_FOLDER_NAME
1480
+ local_fallback_dir = Path(LOCAL_FOLDER_NAME); effective_final_dir = None;
1481
+ print(f"Checking GDrive: {gdrive_base}");
1482
+ if gdrive_base.is_dir() and gdrive_base.exists():
1483
+ print(f"Mount OK. Checking target: {gdrive_target_dir}");
1484
+
1485
+ if gdrive_target_dir.is_dir():
1486
+ print(f"Target Google Drive directory found. Using Google Drive.")
1487
+ effective_final_dir = gdrive_target_dir
1488
+ else:
1489
+ print(f"Target Google Drive directory '{gdrive_target_dir}' not found. Will attempt to create.")
1490
+ try:
1491
+ gdrive_target_dir.mkdir(parents=True, exist_ok=True)
1492
+ print(f"Successfully created Google Drive directory.")
1493
+ effective_final_dir = gdrive_target_dir
1494
+ except Exception as e:
1495
+ print(f"Error creating Google Drive directory '{gdrive_target_dir}': {e}")
1496
+ print("Falling back to local directory.")
1497
+ effective_final_dir = local_target_dir
1498
+
1499
+ else:
1500
+ local_fallback_dir.mkdir(parents=True, exist_ok=True)
1501
+ print(f"Mount not found. Using local fallback: {local_fallback_dir}")
1502
+ effective_final_dir = local_fallback_dir
1503
+
1504
+ effective_final_dir.mkdir(parents=True, exist_ok=True); LOCAL_WORK_DIR.mkdir(parents=True, exist_ok=True); print(f"Effective final destination directory: {effective_final_dir}");
1505
+
1506
+ # Define final destination paths
1507
+ final_metadata_filepath = effective_final_dir / FINAL_METADATA_PARQUET_FILENAME
1508
+ final_embeddings_filepath = effective_final_dir / FINAL_EMBEDDINGS_PARQUET_FILENAME
1509
+ final_log_filepath = effective_final_dir / FINAL_LOG_FILENAME
1510
+ input_jsonl_filepath = effective_final_dir / INPUT_JSONL_FILENAME # Assume input is also in final dir
1511
+
1512
+ # Define local working paths
1513
+ local_temp_metadata_path = LOCAL_WORK_DIR / LOCAL_TEMP_METADATA_JSONL
1514
+ local_temp_embeddings_path = LOCAL_WORK_DIR / LOCAL_TEMP_EMBEDDINGS_JSONL
1515
+ local_temp_log_path = LOCAL_WORK_DIR / LOCAL_TEMP_LOG_FILENAME
1516
+
1517
+ print(f"Input JSONL path: {input_jsonl_filepath}")
1518
+ print(f"Final Metadata Parquet path: {final_metadata_filepath}")
1519
+ print(f"Final Embeddings Parquet path: {final_embeddings_filepath}")
1520
+ print(f"Final log file path: {final_log_filepath}")
1521
+ print(f"Local temp Metadata path: {local_temp_metadata_path}")
1522
+ print(f"Local temp Embeddings path: {local_temp_embeddings_path}")
1523
+ print(f"Local temp log file path: {local_temp_log_path}")
1524
+ print("-" * 30)
1525
+
1526
+
1527
+ # Check for existing local temp files (for resumption)
1528
+ resuming_from_previous_run = False
1529
+ if local_temp_metadata_path.exists() and local_temp_embeddings_path.exists():
1530
+ file_size_meta = local_temp_metadata_path.stat().st_size
1531
+ file_size_embed = local_temp_embeddings_path.stat().st_size
1532
+
1533
+ if file_size_meta > 0 and file_size_embed > 0:
1534
+ print(f"Found existing temp files, will resume processing:")
1535
+ print(f" - Metadata file: {local_temp_metadata_path} ({file_size_meta} bytes)")
1536
+ print(f" - Embeddings file: {local_temp_embeddings_path} ({file_size_embed} bytes)")
1537
+ resuming_from_previous_run = True
1538
+ else:
1539
+ print(f"Found existing temp files but they're empty, removing them:")
1540
+ if file_size_meta == 0:
1541
+ print(f" - Removing empty metadata file: {local_temp_metadata_path}")
1542
+ local_temp_metadata_path.unlink()
1543
+ if file_size_embed == 0:
1544
+ print(f" - Removing empty embeddings file: {local_temp_embeddings_path}")
1545
+ local_temp_embeddings_path.unlink()
1546
+ else:
1547
+ print(f"No existing temp files found, starting fresh processing run.")
1548
+
1549
+
1550
+ # --- Run the Embedding Generation ---
1551
+ # Returns paths to the *local* temp parquet files if successful
1552
+ local_meta_path, local_embed_path = create_embedding_dataset(
1553
+ input_jsonl_filepath=input_jsonl_filepath,
1554
+ final_metadata_parquet_path=final_metadata_filepath, # For loading resume
1555
+ final_embeddings_parquet_path=final_embeddings_filepath, # For loading resume
1556
+ local_temp_metadata_path=local_temp_metadata_path, # Local save dest
1557
+ local_temp_embeddings_path=local_temp_embeddings_path, # Local save dest
1558
+ local_temp_log_path=local_temp_log_path, # Local log dest
1559
+ final_log_filepath=final_log_filepath, # Final log for logging clarity
1560
+ max_records=MAX_RECORDS_TO_PROCESS,
1561
+ batch_size=BATCH_SIZE,
1562
+ embedding_model_name=EMBEDDING_MODEL_NAME,
1563
+ process_config=PROCESS_CONFIG_JSON,
1564
+ process_readme=PROCESS_README_CONTENT,
1565
+ )
1566
+
1567
+ # --- Sync final local files to Drive/Destination ---
1568
+ if local_meta_path or local_embed_path: # Check if at least one file was created
1569
+ logging.info("Attempting to sync final local files to destination...")
1570
+
1571
+ # After all processing is complete
1572
+ meta_jsonl_path = LOCAL_WORK_DIR / LOCAL_TEMP_METADATA_JSONL
1573
+ embed_jsonl_path = LOCAL_WORK_DIR / LOCAL_TEMP_EMBEDDINGS_JSONL
1574
+
1575
+ # After all processing is complete
1576
+ # Define Parquet output paths
1577
+ local_temp_metadata_parquet = LOCAL_WORK_DIR / FINAL_METADATA_PARQUET_FILENAME
1578
+ local_temp_embeddings_parquet = LOCAL_WORK_DIR / FINAL_EMBEDDINGS_PARQUET_FILENAME
1579
+
1580
+ # One-time conversion from JSONL to Parquet at the very end
1581
+ convert_jsonl_to_parquet(
1582
+ meta_jsonl_path=meta_jsonl_path,
1583
+ embed_jsonl_path=embed_jsonl_path,
1584
+ local_temp_metadata_path=local_temp_metadata_parquet,
1585
+ local_temp_embeddings_path=local_temp_embeddings_parquet,
1586
+ chunk_size=50000, # Starting chunk size (will adapt)
1587
+ max_memory_mb=2000 # Memory threshold in MB
1588
+ )
1589
+
1590
+ sync_success = sync_local_files_to_final(
1591
+ local_metadata_path=local_temp_metadata_parquet, # Use the defined local path vars
1592
+ local_embeddings_path=local_temp_embeddings_path,
1593
+ local_log_path=local_temp_log_path,
1594
+ final_metadata_path=final_metadata_filepath,
1595
+ final_embeddings_path=final_embeddings_filepath,
1596
+ final_log_path=final_log_filepath
1597
+ )
1598
+
1599
+ if sync_success:
1600
+ logging.info("Final sync to destination successful.")
1601
+ # --- Upload final Parquet from Destination to Hub (Optional) ---
1602
+ if UPLOAD_TO_HUB:
1603
+ upload_files_to_hub(
1604
+ local_metadata_path=final_metadata_filepath, # Upload from final dest
1605
+ local_embeddings_path=final_embeddings_filepath,
1606
+ repo_id=TARGET_REPO_ID,
1607
+ repo_type=TARGET_REPO_TYPE,
1608
+ metadata_path_in_repo=METADATA_FILENAME_IN_REPO,
1609
+ embeddings_path_in_repo=EMBEDDINGS_FILENAME_IN_REPO,
1610
+ hf_token=None # Uses login
1611
+ )
1612
+ else: logging.info("Hub upload skipped by configuration.")
1613
+ else: logging.error("Final sync to destination FAILED. Cannot upload to Hub.")
1614
+ else: logging.warning("Local Parquet file creation failed or no data processed. Skipping final sync and Hub upload.")
1615
+
1616
+ '''
1617
+ # --- Clean up local temp files ---
1618
+ logging.info("Attempting final cleanup of local temp files...")
1619
+ try:
1620
+ if local_temp_metadata_path.is_file(): local_temp_metadata_path.unlink(); logging.info(f"Cleaned {local_temp_metadata_path}")
1621
+ if local_temp_embeddings_path.is_file(): local_temp_embeddings_path.unlink(); logging.info(f"Cleaned {local_temp_embeddings_path}")
1622
+ if local_temp_log_path.is_file(): local_temp_log_path.unlink(); logging.info(f"Cleaned {local_temp_log_path}")
1623
+ except Exception as clean_e: logging.warning(f"Could not clean up local temp files: {clean_e}")
1624
+ '''
1625
+ logging.info("Script finished.")
1626
+