zkml-github-repos / preprocess.py
sa8's picture
Upload preprocess.py
4a52c57 verified
import os
import json
import requests
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Get the GitHub token from the environment variable
GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
# Set the GitHub repository URLs
repo_urls = [
"https://github.com/gizatechxyz/orion",
"https://github.com/gizatechxyz/Giza-Hub",
"https://github.com/zkonduit/ezkl",
"https://github.com/socathie/keras2circom",
"https://github.com/socathie/circomlib-ml"
"https://github.com/worldcoin/proto-neural-zkp",
"https://github.com/Modulus-Labs/RockyBot"
"https://github.com/ora-io/keras2circom",
"https://github.com/zk-ml/tachikoma",
"https://github.com/only4sim/ZK-DTP",
"https://github.com/ddkang/zkml",
"https://github.com/socathie/ZKaggleV2"
]
# Set the output file name
output_file = "dataset.json"
# Initialize an empty list to store the dataset
dataset = []
def retrieve_files(repo_url, path=""):
repo_owner, repo_name = repo_url.split("/")[-2:]
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/contents/{path}"
headers = {
"Authorization": f"Bearer {GITHUB_TOKEN}",
"Accept": "application/vnd.github.v3+json"
}
response = requests.get(api_url, headers=headers)
if response.status_code == 200:
contents = response.json()
for item in contents:
# Skip issues, commits, and pull requests
if "/issues/" in item["url"] or "/commits/" in item["url"] or "/pulls/" in item["url"]:
continue
# Skip directories and files starting with a dot
if item["name"].startswith("."):
continue
if item["type"] == "file":
if item["name"].endswith(( ".py", ".js", ".java", ".c", ".cpp", ".h", ".hpp", ".rs", "cairo", ".zkey", ".sol", ".circom", ".ejs", ".ipynb" )):
file_url = item["download_url"]
file_response = requests.get(file_url)
file_content = file_response.text
data_entry = {
"repo": repo_url,
"file_path": item["path"],
"content": file_content
}
dataset.append(data_entry)
print("Appended ", item["path"])
elif item["type"] == "dir":
retrieve_files(repo_url, item["path"])
else:
print(f"Failed to retrieve contents for path: {path} in repository: {repo_url}")
# Load existing dataset if the file exists
if os.path.exists(output_file):
with open(output_file, "r") as file:
existing_dataset = json.load(file)
dataset.extend(existing_dataset)
# Iterate over each repository URL
for repo_url in repo_urls:
print("Scrapping ", repo_url)
retrieve_files(repo_url)
# Write the dataset to the output file in JSON format
with open(output_file, "w") as file:
json.dump(dataset, file, indent=4)
print(f"Dataset created successfully. Saved to {output_file}.")