project_name
stringclasses 3
values | file_path
stringlengths 26
44
| code_data
stringlengths 1
2.47k
|
---|---|---|
./src | ./src/dataprep.egg-info/PKG-INFO | Metadata-Version: 2.1
Name: dataprep
Version: 0.1.0
Summary: A small Dataset preparation toolkit
Requires-Python: >=3.10
Description-Content-Type: text/markdown
Requires-Dist: fastapi
Requires-Dist: uvicorn
Provides-Extra: dev
Requires-Dist: pytest; extra == "dev"
Requires-Dist: pytest-cov; extra == "dev"
Requires-Dist: ruff; extra == "dev"
Requires-Dist: watchdog; extra == "dev"
Requires-Dist: httpx; extra == "dev"
# Data Prep
A small Dataset preparation toolkit
## Development
Note: We use [uv](https://github.com/astral-sh/uv?tab=readme-ov-file#getting-started) for installing things, make sure you have it.
1. Make sure you are running in a virtual environment (e.g., `python3 -m venv .env`)
2. Activate it (e.g. `source .env/bin/activate`)
```shell
(.env) $ make install-dev
```
3. Run the tests
```shell
(.env) $ make test
```
4. Run the API
```shell
(.env) $ make api
```
5. For more help:
```shell
(.env) $ make help
```
|
./src | ./src/dataprep.egg-info/SOURCES.txt | README.md
pyproject.toml
src/dataprep/__init__.py
src/dataprep.egg-info/PKG-INFO
src/dataprep.egg-info/SOURCES.txt
src/dataprep.egg-info/dependency_links.txt
src/dataprep.egg-info/entry_points.txt
src/dataprep.egg-info/requires.txt
src/dataprep.egg-info/top_level.txt
src/dataprep/api/__init__.py
src/dataprep/api/main.py
src/dataprep/cli/__init__.py
src/dataprep/cli/data_prep.py
src/dataprep/cli/main.py
src/dataprep/common/__init__.py
src/dataprep/common/file_parser.py
src/dataprep/common/main.py
src/dataprep/common/repository_scanner.py
tests/test_api.py
tests/test_dataprep.py |
./src | ./src/dataprep.egg-info/entry_points.txt | [console_scripts]
eprep = dataprep.cli.main:main
|
./src | ./src/dataprep.egg-info/requires.txt | fastapi
uvicorn
[dev]
pytest
pytest-cov
ruff
watchdog
httpx
|
./src | ./src/dataprep.egg-info/top_level.txt | dataprep
|
./src | ./src/dataprep.egg-info/dependency_links.txt | |
./src/dataprep | ./src/dataprep/cli/__init__.py | from dataprep.cli.main import main
if __name__ == "__main__":
main()
|
./src/dataprep | ./src/dataprep/cli/data_prep.py | import argparse
from datasets import Dataset
from dataprep.common.hub.repo import upload_to_hub
from dataprep.common.repository_scanner import RepositoryScanner
class DataPrepCLI:
def __init__(self):
self.parser = argparse.ArgumentParser(
description="CLI tool for preparing datasets from a local Git repository"
)
self._register_arguments()
def _register_arguments(self):
self.parser.add_argument(
"-r",
"--repo",
required=True,
help="Path to the local Git repository",
)
self.parser.add_argument(
"-o",
"--output",
default="dataset.csv",
help="Output file name for the prepared dataset (default: dataset.csv)",
)
self.parser.add_argument(
"-d",
"--dataset",
default="dataset.csv",
help="The Hub repo you want to publish to",
)
self.parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Enable verbose output",
)
self.parser.add_argument(
"--version", action="version", version="DataPrep CLI 1.0"
)
def execute(self):
args = self.parser.parse_args()
self.prepare_dataset(args.repo, args.output, args.dataset, args.verbose)
def prepare_dataset(self, repo_path: str, output_file: str, dataset: str, verbose=False):
# prepare the dataset
if verbose:
print("Verbose mode enabled")
print(f"Preparing dataset from repository at {repo_path}...")
rs = RepositoryScanner(
directory=repo_path,
output_format="ftr"
)
df = rs.scan_repository()
# print(df.head())
upload_to_hub(file_format="ftr", repo_id=dataset)
ds = Dataset.from_pandas(df)
ds.push_to_hub(repo_id=dataset)
print(print("ftr files uploaded to the Hub."))
if verbose:
print(f"Dataset saved to {output_file}")
else:
print("Dataset preparation complete.")
|
./src/dataprep | ./src/dataprep/cli/main.py | from dataprep.cli.data_prep import DataPrepCLI
def main():
cli = DataPrepCLI()
cli.execute()
|
./src/dataprep | ./src/dataprep/common/repository_scanner.py | import os
import pandas as pd
from tqdm import tqdm
from dataprep.common.constant import EXCLUDE_DIR, EXCLUDE_FORMATS
from dataprep.common.file_parser import FileParser
# ? Do we want to scan remote, or only local
class RepositoryScanner:
def __init__(
self,
directory: str,
excluded_formats: tuple =EXCLUDE_FORMATS,
excluded_paths: tuple =EXCLUDE_DIR,
chunk_size: int=1000,
output_format: str="ftr",
):
self.directory = directory
self.excluded_formats = excluded_formats
self.excluded_paths = excluded_paths
self.chunk_size = chunk_size
self.output_format = output_format
self.file_paths = []
def get_all_file_paths(self):
for root, _, files in os.walk(self.directory):
for file in files:
file_path = os.path.join(root, file)
if not file_path.endswith(self.excluded_formats) and all(
exclusion not in file_path
for exclusion in self.excluded_paths
):
self.file_paths.append((os.path.dirname(root), file_path))
print(f"Total file paths: {len(self.file_paths)}.")
def serialize_dataframe(self, df, chunk_flag):
df_path = f"df_chunk_{chunk_flag}_{len(df)}.{self.output_format}"
print(f"Serializing dataframe to {df_path}...")
df.reset_index(drop=True).to_feather(df_path)
def scan_repository(self) -> pd.DataFrame:
self.get_all_file_paths()
print("Reading file contents...")
df = pd.DataFrame(columns=["project_name", "file_path", "code_data"])
chunk_flag = 0
for directory_name, file_path in tqdm(self.file_paths):
file_content = FileParser.process_file_content(
directory_name, file_path
)
if file_content:
temp_df = pd.DataFrame.from_dict([file_content])
df = pd.concat([df, temp_df], ignore_index=True)
if self.chunk_size and len(df) >= self.chunk_size:
self.serialize_dataframe(df, chunk_flag)
df = pd.DataFrame(
columns=["project_name", "file_path", "code_data"]
)
chunk_flag += 1
# Serialize any remaining data in the final chunk
if not df.empty:
self.serialize_dataframe(df, chunk_flag)
return df
|
./src/dataprep | ./src/dataprep/common/file_parser.py | from typing import Dict
class FileParser:
@staticmethod
def filter_code_cell(cell) -> bool:
only_shell = cell["source"].startswith("!")
only_magic = "%%capture" in cell["source"]
return not (only_shell or only_magic)
@staticmethod
def process_file(directory_name: str, file_path: str) -> Dict[str, str]:
try:
with open(file_path, "r", encoding="utf-8") as file:
content = file.read()
except Exception:
content = ""
return {
"project_name": directory_name,
"file_path": file_path,
"code_data": content,
}
@staticmethod
def process_file_content(directory_name: str, file_path: str):
try:
content = FileParser.process_file(
directory_name, file_path
) # Assuming `process_file` is defined elsewhere
if content["code_data"]:
return content
except Exception as e:
print(f"Error processing file {file_path}: {e}")
return None
|
./src/dataprep | ./src/dataprep/common/constant.py | IMAGE = ["png", "jpg", "jpeg", "gif"]
VIDEO = ["mp4", "jfif"]
DOC = [
"key",
"PDF",
"pdf",
"docx",
"xlsx",
"pptx",
]
AUDIO = ["flac", "ogg", "mid", "webm", "wav", "mp3"]
ARCHIVE = ["jar", "aar", "gz", "zip", "bz2"]
MODEL = ["onnx", "pickle", "model", "neuron"]
OTHERS = [
"npy",
"index",
"inv",
"index",
"DS_Store",
"rdb",
"pack",
"idx",
"glb",
"gltf",
"len",
"otf",
"unitypackage",
"ttf",
"xz",
"pcm",
"opus",
"env"
]
EXCLUDE_FORMATS = tuple(IMAGE + VIDEO + DOC + AUDIO + ARCHIVE + OTHERS)
EXCLUDE_DIR = (
".git",
"__pycache__",
"xcodeproj",
"node_modules",
"dist",
".firebase",
".nx",
".angular",
".idea",
".husky",
"build",
".yarn",
)
|
./src/dataprep | ./src/dataprep/common/main.py | def hello_world():
return "We are here"
|
./src/dataprep/common | ./src/dataprep/common/hub/repo.py | import glob
import os
import subprocess
import tempfile
from huggingface_hub import create_repo, upload_folder, Repository
def upload_to_hub(file_format: str, repo_id: str):
try:
repo = Repository(local_dir=f"./{repo_id}", clone_from=repo_id, repo_type="dataset")
except Exception as e:
create_repo(
repo_id=repo_id,
exist_ok=True,
repo_type="dataset",
private=True,
)
repo = Repository(local_dir=f"./{repo_id}", clone_from=repo_id)
with tempfile.TemporaryDirectory() as tmpdirname:
files_to_move = glob.glob(f"*.{file_format}")
if files_to_move:
command = f"mv *.{file_format} {tmpdirname}"
subprocess.run(command.split(), shell=True)
else:
print(f"No files to move: {files_to_move}")
print(f"Uploading contents of {tmpdirname} to {repo_id}")
upload_folder(
repo_id=repo_id, folder_path=tmpdirname, repo_type="dataset"
)
|
./src/dataprep | ./src/dataprep/api/main.py | from importlib.metadata import version
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from dataprep.common.main import hello_world
app = FastAPI(
title="dataprep API",
version=version("dataprep"),
)
@app.get("/")
async def root():
return RedirectResponse("/docs")
@app.post(
"/hello",
)
async def hello():
return hello_world()
|
Subsets and Splits