Upload download_CodeConvo.py
Browse files- download_CodeConvo.py +465 -0
download_CodeConvo.py
ADDED
|
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Script to examine and download CodeConvo dataset from Hugging Face
|
| 3 |
+
Uses huggingface_hub library for inspection and download
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from huggingface_hub import (
|
| 7 |
+
list_repo_files,
|
| 8 |
+
hf_hub_download,
|
| 9 |
+
snapshot_download
|
| 10 |
+
)
|
| 11 |
+
from datasets import load_dataset
|
| 12 |
+
import os
|
| 13 |
+
import shutil
|
| 14 |
+
import traceback
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
import argparse
|
| 17 |
+
|
| 18 |
+
def examine_repo_structure():
|
| 19 |
+
"""Examine the structure of the dataset repository"""
|
| 20 |
+
print("\n" + "="*70)
|
| 21 |
+
print("EXAMINING REPOSITORY STRUCTURE")
|
| 22 |
+
print("="*70)
|
| 23 |
+
|
| 24 |
+
repo_id = "jiebi/CodeConvo"
|
| 25 |
+
repo_type = "dataset"
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
# Get basic repo info
|
| 29 |
+
print("\n1. Repository Information:")
|
| 30 |
+
print("-" * 70)
|
| 31 |
+
print(f"Repo ID: {repo_id}")
|
| 32 |
+
print(f"Repo Type: {repo_type}")
|
| 33 |
+
|
| 34 |
+
# List all files and folders
|
| 35 |
+
print("\n2. Files and Folders in Repository:")
|
| 36 |
+
print("-" * 70)
|
| 37 |
+
files = list_repo_files(repo_id=repo_id, repo_type=repo_type)
|
| 38 |
+
print(f"Total items: {len(files)}\n")
|
| 39 |
+
|
| 40 |
+
# Organize by folder
|
| 41 |
+
folders = {}
|
| 42 |
+
for file_path in sorted(files):
|
| 43 |
+
if "/" in file_path:
|
| 44 |
+
folder = file_path.split("/")[0]
|
| 45 |
+
if folder not in folders:
|
| 46 |
+
folders[folder] = []
|
| 47 |
+
folders[folder].append(file_path)
|
| 48 |
+
else:
|
| 49 |
+
if "root" not in folders:
|
| 50 |
+
folders["root"] = []
|
| 51 |
+
folders["root"].append(file_path)
|
| 52 |
+
|
| 53 |
+
# Display structure
|
| 54 |
+
for folder in sorted(folders.keys()):
|
| 55 |
+
print(f"\n📁 {folder}/")
|
| 56 |
+
for file_path in sorted(folders[folder])[:10]: # Show first 10 files
|
| 57 |
+
size_marker = ""
|
| 58 |
+
if file_path.endswith((".parquet", ".jsonl", ".json", ".arrow")):
|
| 59 |
+
size_marker = " [data file]"
|
| 60 |
+
print(f" └─ {file_path}{size_marker}")
|
| 61 |
+
if len(folders[folder]) > 10:
|
| 62 |
+
print(f" └─ ... and {len(folders[folder]) - 10} more files")
|
| 63 |
+
|
| 64 |
+
print("\n" + "="*70)
|
| 65 |
+
return True
|
| 66 |
+
|
| 67 |
+
except Exception as e:
|
| 68 |
+
print(f"\n✗ Error examining repository: {type(e).__name__}")
|
| 69 |
+
print(f"Message: {str(e)}")
|
| 70 |
+
traceback.print_exc()
|
| 71 |
+
return False
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _filter_files_for_request(files, split=None, repo=None, direction=None):
|
| 75 |
+
"""Filter repository files based on requested split/repo/direction."""
|
| 76 |
+
if not split:
|
| 77 |
+
return files
|
| 78 |
+
|
| 79 |
+
split = split.lower()
|
| 80 |
+
|
| 81 |
+
# --split train: download train/{direction}/...
|
| 82 |
+
if split == "train":
|
| 83 |
+
direction = direction or "c2i"
|
| 84 |
+
prefix = f"train/{direction}/"
|
| 85 |
+
return [f for f in files if f.startswith(prefix)]
|
| 86 |
+
|
| 87 |
+
# --split dev|test: download {repo}/{direction}/{split}/... and optional flat jsonl files
|
| 88 |
+
if split in ["dev", "test"]:
|
| 89 |
+
prefix = f"{repo}/{direction}/{split}/"
|
| 90 |
+
flat_jsonl_prefix = f"{repo}/{repo}.{direction}.{split}"
|
| 91 |
+
return [
|
| 92 |
+
f for f in files
|
| 93 |
+
if f.startswith(prefix) or f.startswith(flat_jsonl_prefix)
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
return []
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def download_dataset(split=None, repo=None, direction=None):
|
| 100 |
+
"""Download full dataset or a requested subset from Hugging Face.
|
| 101 |
+
|
| 102 |
+
Uses hf_hub_download() with force_download=True to ensure actual file download
|
| 103 |
+
(not symlinks or LFS pointers).
|
| 104 |
+
"""
|
| 105 |
+
print("\n" + "="*70)
|
| 106 |
+
print("DOWNLOADING DATASET")
|
| 107 |
+
print("="*70)
|
| 108 |
+
|
| 109 |
+
repo_id = "jiebi/CodeConvo"
|
| 110 |
+
local_dir = "./dataset/CodeConvo"
|
| 111 |
+
|
| 112 |
+
if split:
|
| 113 |
+
scope = f"{split}_{repo or 'all'}_{direction or 'default'}"
|
| 114 |
+
completion_marker = os.path.join(local_dir, f".download_complete_{scope}")
|
| 115 |
+
else:
|
| 116 |
+
completion_marker = os.path.join(local_dir, ".download_complete")
|
| 117 |
+
|
| 118 |
+
# Backward compatibility for previous full download marker
|
| 119 |
+
if not split and os.path.exists(completion_marker):
|
| 120 |
+
print("\n✓ Dataset already downloaded (completion marker found)")
|
| 121 |
+
print(f"Location: {local_dir}")
|
| 122 |
+
return True
|
| 123 |
+
|
| 124 |
+
# Create parent directory
|
| 125 |
+
os.makedirs("./dataset", exist_ok=True)
|
| 126 |
+
|
| 127 |
+
if split:
|
| 128 |
+
print(f"\nDownloading subset to: {local_dir}")
|
| 129 |
+
print(f"Requested split={split}, repo={repo or 'N/A'}, direction={direction or 'N/A'}")
|
| 130 |
+
else:
|
| 131 |
+
print(f"\nDownloading entire dataset to: {local_dir}")
|
| 132 |
+
print("-" * 70)
|
| 133 |
+
|
| 134 |
+
try:
|
| 135 |
+
# Step 1: List all files in the repository
|
| 136 |
+
print("\nStep 1: Listing all files in repository...")
|
| 137 |
+
files = list_repo_files(repo_id=repo_id, repo_type="dataset")
|
| 138 |
+
print(f"✓ Found {len(files)} files")
|
| 139 |
+
|
| 140 |
+
# Filter out certain non-essential files
|
| 141 |
+
skip_patterns = ['.gitattributes', 'README.md', '.huggingface']
|
| 142 |
+
candidate_files = [f for f in files if not any(f.startswith(p) for p in skip_patterns)]
|
| 143 |
+
files_to_download = _filter_files_for_request(
|
| 144 |
+
candidate_files,
|
| 145 |
+
split=split,
|
| 146 |
+
repo=repo,
|
| 147 |
+
direction=direction,
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
if not files_to_download:
|
| 151 |
+
print("✗ No files matched the requested selection")
|
| 152 |
+
return False
|
| 153 |
+
|
| 154 |
+
print(f" Will download {len(files_to_download)} files (after filtering)")
|
| 155 |
+
|
| 156 |
+
# Step 2: Download each file individually with force_download=True
|
| 157 |
+
print("\nStep 2: Downloading files (this may take a while)...")
|
| 158 |
+
downloaded_count = 0
|
| 159 |
+
failed_files = []
|
| 160 |
+
|
| 161 |
+
for i, file_path in enumerate(files_to_download, 1):
|
| 162 |
+
try:
|
| 163 |
+
# Show progress every 10 files
|
| 164 |
+
if i % 10 == 1 or i == len(files_to_download):
|
| 165 |
+
print(f" [{i}/{len(files_to_download)}] Downloading {file_path}...", end=" ", flush=True)
|
| 166 |
+
show_status = True
|
| 167 |
+
else:
|
| 168 |
+
show_status = False
|
| 169 |
+
|
| 170 |
+
# Download file with force_download=True to ensure actual download
|
| 171 |
+
downloaded_path = hf_hub_download(
|
| 172 |
+
repo_id=repo_id,
|
| 173 |
+
filename=file_path,
|
| 174 |
+
repo_type="dataset",
|
| 175 |
+
cache_dir="./dataset/.cache",
|
| 176 |
+
force_download=True, # Force actual download, not symlinks
|
| 177 |
+
force_filename=None
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# Create target directory structure
|
| 181 |
+
target_file = os.path.join(local_dir, file_path)
|
| 182 |
+
os.makedirs(os.path.dirname(target_file), exist_ok=True)
|
| 183 |
+
|
| 184 |
+
# Copy downloaded file to target location
|
| 185 |
+
import shutil
|
| 186 |
+
shutil.copy2(downloaded_path, target_file)
|
| 187 |
+
|
| 188 |
+
if show_status:
|
| 189 |
+
file_size = os.path.getsize(target_file)
|
| 190 |
+
size_str = f"{file_size/1024/1024:.1f}MB" if file_size > 1024*1024 else f"{file_size/1024:.1f}KB"
|
| 191 |
+
print(f"✓ ({size_str})")
|
| 192 |
+
|
| 193 |
+
downloaded_count += 1
|
| 194 |
+
|
| 195 |
+
except Exception as file_error:
|
| 196 |
+
failed_files.append((file_path, str(file_error)[:50]))
|
| 197 |
+
if show_status:
|
| 198 |
+
print(f"✗")
|
| 199 |
+
|
| 200 |
+
print(f"\n✓ Downloaded {downloaded_count}/{len(files_to_download)} files")
|
| 201 |
+
|
| 202 |
+
if failed_files:
|
| 203 |
+
print(f"⚠ {len(failed_files)} files failed to download:")
|
| 204 |
+
for fname, error in failed_files[:5]:
|
| 205 |
+
print(f" - {fname}: {error}")
|
| 206 |
+
if len(failed_files) > 5:
|
| 207 |
+
print(f" ... and {len(failed_files) - 5} more")
|
| 208 |
+
|
| 209 |
+
# Step 3: Clean up cache directory
|
| 210 |
+
print("\nStep 3: Cleaning up cache...")
|
| 211 |
+
cache_dir = "./dataset/.cache"
|
| 212 |
+
if os.path.exists(cache_dir):
|
| 213 |
+
import shutil
|
| 214 |
+
shutil.rmtree(cache_dir)
|
| 215 |
+
print("✓ Cleaned up cache")
|
| 216 |
+
|
| 217 |
+
# Step 4: Create completion marker
|
| 218 |
+
os.makedirs(local_dir, exist_ok=True)
|
| 219 |
+
with open(completion_marker, 'w') as f:
|
| 220 |
+
f.write("Download completed successfully\n")
|
| 221 |
+
print(f"✓ Created completion marker")
|
| 222 |
+
|
| 223 |
+
return downloaded_count > 0
|
| 224 |
+
|
| 225 |
+
except Exception as e:
|
| 226 |
+
print(f"\n✗ Download failed: {type(e).__name__}")
|
| 227 |
+
print(f"Message: {str(e)}")
|
| 228 |
+
traceback.print_exc()
|
| 229 |
+
return False
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def show_downloaded_structure():
|
| 233 |
+
"""Display the structure of downloaded dataset"""
|
| 234 |
+
print("\n" + "="*70)
|
| 235 |
+
print("DOWNLOADED DATASET STRUCTURE")
|
| 236 |
+
print("="*70)
|
| 237 |
+
|
| 238 |
+
local_dir = "./dataset/CodeConvo"
|
| 239 |
+
|
| 240 |
+
if not os.path.exists(local_dir):
|
| 241 |
+
print(f"\n✗ Directory not found: {local_dir}")
|
| 242 |
+
return
|
| 243 |
+
|
| 244 |
+
print(f"\nLocation: {local_dir}\n")
|
| 245 |
+
|
| 246 |
+
def show_tree(path, prefix="", max_depth=3, current_depth=0):
|
| 247 |
+
if current_depth >= max_depth:
|
| 248 |
+
return
|
| 249 |
+
|
| 250 |
+
try:
|
| 251 |
+
items = sorted(os.listdir(path))
|
| 252 |
+
dirs = [i for i in items if os.path.isdir(os.path.join(path, i))]
|
| 253 |
+
files = [i for i in items if os.path.isfile(os.path.join(path, i))]
|
| 254 |
+
|
| 255 |
+
# Show directories
|
| 256 |
+
for i, dir_name in enumerate(dirs[:10]):
|
| 257 |
+
is_last = (i == len(dirs) - 1) and len(files) == 0
|
| 258 |
+
print(f"{prefix}{'└── ' if is_last else '├── '}{dir_name}/")
|
| 259 |
+
|
| 260 |
+
new_prefix = prefix + (" " if is_last else "│ ")
|
| 261 |
+
show_tree(os.path.join(path, dir_name), new_prefix, max_depth, current_depth + 1)
|
| 262 |
+
|
| 263 |
+
if len(dirs) > 10:
|
| 264 |
+
print(f"{prefix}├── ... and {len(dirs) - 10} more directories")
|
| 265 |
+
|
| 266 |
+
# Show files
|
| 267 |
+
for i, file_name in enumerate(files[:10]):
|
| 268 |
+
is_last = i == len(files) - 1
|
| 269 |
+
size = os.path.getsize(os.path.join(path, file_name))
|
| 270 |
+
size_str = f"({size/1024/1024:.1f}MB)" if size > 1024*1024 else f"({size/1024:.1f}KB)" if size > 1024 else f"({size}B)"
|
| 271 |
+
print(f"{prefix}{'└── ' if is_last else '├── '}{file_name} {size_str}")
|
| 272 |
+
|
| 273 |
+
if len(files) > 10:
|
| 274 |
+
print(f"{prefix}└── ... and {len(files) - 10} more files")
|
| 275 |
+
|
| 276 |
+
except PermissionError:
|
| 277 |
+
print(f"{prefix}[Permission Denied]")
|
| 278 |
+
|
| 279 |
+
show_tree(local_dir)
|
| 280 |
+
print("\n" + "="*70)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def resolve_data_path(base_dir, split=None, repo=None, direction=None):
|
| 284 |
+
"""Resolve a dataset subfolder path based on repo/direction/split.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
base_dir: Base dataset directory (e.g., ./dataset/CodeConvo)
|
| 288 |
+
split: Split name (e.g., train, dev, test)
|
| 289 |
+
repo: Repo folder name (e.g., ids, swe, kubernetes) - required for dev/test
|
| 290 |
+
direction: Retrieval direction (e.g., i2c, c2i) - defaults to c2i for train
|
| 291 |
+
|
| 292 |
+
Returns:
|
| 293 |
+
Resolved path string or None if invalid combination
|
| 294 |
+
|
| 295 |
+
Examples:
|
| 296 |
+
- split=train, direction=c2i -> base_dir/train/c2i/
|
| 297 |
+
- split=train, direction=i2c -> base_dir/train/i2c/
|
| 298 |
+
- split=test, repo=kubernetes, direction=i2c -> base_dir/kubernetes/i2c/test
|
| 299 |
+
"""
|
| 300 |
+
if not split:
|
| 301 |
+
return base_dir
|
| 302 |
+
|
| 303 |
+
# For train split, path is train/{direction}
|
| 304 |
+
# Default direction to c2i for train
|
| 305 |
+
if split.lower() == "train":
|
| 306 |
+
direction = direction or "c2i"
|
| 307 |
+
path = os.path.join(base_dir, "train", direction)
|
| 308 |
+
return path
|
| 309 |
+
|
| 310 |
+
# For dev/test splits, require repo and direction
|
| 311 |
+
# Path structure: {repo}/{direction}/{split}
|
| 312 |
+
if split.lower() in ["dev", "test"]:
|
| 313 |
+
if not repo or not direction:
|
| 314 |
+
return None
|
| 315 |
+
path = os.path.join(base_dir, repo, direction, split)
|
| 316 |
+
return path
|
| 317 |
+
|
| 318 |
+
# Unknown split
|
| 319 |
+
return None
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def parse_args():
|
| 323 |
+
parser = argparse.ArgumentParser(
|
| 324 |
+
description="Inspect CodeConvo structure and download full dataset or a selected subset.",
|
| 325 |
+
epilog="Examples:\n"
|
| 326 |
+
" # Inspect structure only\n"
|
| 327 |
+
" python download_CodeConvo.py --no-download\n\n"
|
| 328 |
+
" # Download entire dataset\n"
|
| 329 |
+
" python download_CodeConvo.py\n\n"
|
| 330 |
+
" # Download only train files (defaults to c2i)\n"
|
| 331 |
+
" python download_CodeConvo.py --split train\n"
|
| 332 |
+
" python download_CodeConvo.py --split train --direction i2c\n\n"
|
| 333 |
+
" # Download only dev/test files (requires --repo and --direction)\n"
|
| 334 |
+
" python download_CodeConvo.py --split test --repo kubernetes --direction i2c\n",
|
| 335 |
+
formatter_class=argparse.RawDescriptionHelpFormatter
|
| 336 |
+
)
|
| 337 |
+
parser.add_argument(
|
| 338 |
+
"--split",
|
| 339 |
+
type=str,
|
| 340 |
+
choices=["train", "dev", "test"],
|
| 341 |
+
help="Split name to resolve path for.",
|
| 342 |
+
)
|
| 343 |
+
parser.add_argument(
|
| 344 |
+
"--repo",
|
| 345 |
+
type=str,
|
| 346 |
+
help="Repo folder name (only valid for dev/test splits).",
|
| 347 |
+
)
|
| 348 |
+
parser.add_argument(
|
| 349 |
+
"--direction",
|
| 350 |
+
type=str,
|
| 351 |
+
choices=["i2c", "c2i"],
|
| 352 |
+
help="Retrieval direction. For train: defaults to c2i. For dev/test: required.",
|
| 353 |
+
)
|
| 354 |
+
parser.add_argument(
|
| 355 |
+
"--no-download",
|
| 356 |
+
action="store_true",
|
| 357 |
+
help="Skip downloading files and only inspect/resolve folder path.",
|
| 358 |
+
)
|
| 359 |
+
return parser.parse_args()
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def main():
|
| 363 |
+
"""Main execution function"""
|
| 364 |
+
print("\n" + "="*70)
|
| 365 |
+
print("CODECONVO DATASET DOWNLOADER")
|
| 366 |
+
print("Repository: https://huggingface.co/datasets/jiebi/CodeConvo")
|
| 367 |
+
print("="*70)
|
| 368 |
+
|
| 369 |
+
args = parse_args()
|
| 370 |
+
base_dir = "./dataset/CodeConvo"
|
| 371 |
+
|
| 372 |
+
# Validate arguments
|
| 373 |
+
if args.split == "train":
|
| 374 |
+
# For train split, only --direction is allowed (no --repo)
|
| 375 |
+
if args.repo:
|
| 376 |
+
print("\n✗ ERROR: --repo is not allowed when --split is 'train'")
|
| 377 |
+
print(f"\nUsage: python download_CodeConvo.py --split train [--direction <i2c|c2i>]")
|
| 378 |
+
print("Note: --direction defaults to 'c2i' if not specified")
|
| 379 |
+
return False
|
| 380 |
+
elif args.split in ["dev", "test"]:
|
| 381 |
+
# For dev/test splits, require both --repo and --direction
|
| 382 |
+
if not args.repo or not args.direction:
|
| 383 |
+
print("\n✗ ERROR: --repo and --direction are required when --split is 'dev' or 'test'")
|
| 384 |
+
print(f"\nUsage: python download_CodeConvo.py --split {args.split} --repo <repo_name> --direction <i2c|c2i>")
|
| 385 |
+
print("\nAvailable repos: ids, ids-supp, swe, kubernetes")
|
| 386 |
+
return False
|
| 387 |
+
|
| 388 |
+
try:
|
| 389 |
+
# Step 1: Examine repository
|
| 390 |
+
if not examine_repo_structure():
|
| 391 |
+
print("\n⚠ Could not examine repository, but attempting download anyway...")
|
| 392 |
+
|
| 393 |
+
# Step 2: Download full dataset or requested subset (unless skipped)
|
| 394 |
+
if not args.no_download:
|
| 395 |
+
if not download_dataset(
|
| 396 |
+
split=args.split,
|
| 397 |
+
repo=args.repo,
|
| 398 |
+
direction=args.direction,
|
| 399 |
+
):
|
| 400 |
+
print("\n✗ Download failed!")
|
| 401 |
+
return False
|
| 402 |
+
|
| 403 |
+
# Step 3: Show downloaded structure
|
| 404 |
+
show_downloaded_structure()
|
| 405 |
+
|
| 406 |
+
# Step 4: Resolve and validate specific folder path if split is specified
|
| 407 |
+
if args.split:
|
| 408 |
+
resolved_path = resolve_data_path(
|
| 409 |
+
base_dir,
|
| 410 |
+
split=args.split,
|
| 411 |
+
repo=args.repo,
|
| 412 |
+
direction=args.direction
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
print("\n" + "="*70)
|
| 416 |
+
print("RESOLVED FOLDER PATH")
|
| 417 |
+
print("="*70)
|
| 418 |
+
print(f"Requested split: {args.split}")
|
| 419 |
+
print(f"Requested repo: {args.repo or 'N/A'}")
|
| 420 |
+
print(f"Requested direction: {args.direction or 'N/A'}")
|
| 421 |
+
print(f"\nResolved path: {resolved_path}")
|
| 422 |
+
|
| 423 |
+
if resolved_path and os.path.exists(resolved_path):
|
| 424 |
+
print(f"Status: ✓ EXISTS")
|
| 425 |
+
|
| 426 |
+
# Show contents
|
| 427 |
+
try:
|
| 428 |
+
items = os.listdir(resolved_path)
|
| 429 |
+
print(f"\nContents ({len(items)} items):")
|
| 430 |
+
for item in sorted(items)[:10]:
|
| 431 |
+
item_path = os.path.join(resolved_path, item)
|
| 432 |
+
if os.path.isdir(item_path):
|
| 433 |
+
print(f" 📁 {item}/")
|
| 434 |
+
else:
|
| 435 |
+
size = os.path.getsize(item_path)
|
| 436 |
+
size_str = f"{size/1024/1024:.1f}MB" if size > 1024*1024 else f"{size/1024:.1f}KB"
|
| 437 |
+
print(f" 📄 {item} ({size_str})")
|
| 438 |
+
if len(items) > 10:
|
| 439 |
+
print(f" ... and {len(items) - 10} more items")
|
| 440 |
+
except Exception as e:
|
| 441 |
+
print(f" (Could not list contents: {e})")
|
| 442 |
+
else:
|
| 443 |
+
print(f"Status: ✗ DOES NOT EXIST")
|
| 444 |
+
print(f"\nThe specified path was not found in the downloaded dataset.")
|
| 445 |
+
print(f"Please verify the dataset structure and your arguments.")
|
| 446 |
+
|
| 447 |
+
print("\n" + "="*70)
|
| 448 |
+
print("✓ COMPLETED SUCCESSFULLY")
|
| 449 |
+
print("="*70)
|
| 450 |
+
return True
|
| 451 |
+
|
| 452 |
+
except Exception as e:
|
| 453 |
+
print("\n" + "="*70)
|
| 454 |
+
print("✗ UNEXPECTED ERROR")
|
| 455 |
+
print("="*70)
|
| 456 |
+
print(f"\nException: {type(e).__name__}")
|
| 457 |
+
print(f"Message: {str(e)}")
|
| 458 |
+
print("\nFull traceback:")
|
| 459 |
+
traceback.print_exc()
|
| 460 |
+
return False
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
if __name__ == "__main__":
|
| 464 |
+
success = main()
|
| 465 |
+
exit(0 if success else 1)
|