Datasets:
Upload specs/gnn_topology.yaml with huggingface_hub
Browse files- specs/gnn_topology.yaml +64 -0
specs/gnn_topology.yaml
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Research Spec: GNN Decoder Topology — Track 4
|
| 2 |
+
#
|
| 3 |
+
# Tests tree-aware decoding: giving the decoder the actual AST edge structure
|
| 4 |
+
# instead of sequential chain edges. Three modes:
|
| 5 |
+
# - chain: Legacy baseline (0→1→2→…)
|
| 6 |
+
# - teacher_forced: Ground-truth AST edges during GNN message passing
|
| 7 |
+
# - iterative: Two-pass: chain→predict parents→rebuild tree→refine
|
| 8 |
+
#
|
| 9 |
+
# The hypothesis: the decoder's GNN never sees tree topology, so it cannot
|
| 10 |
+
# learn structure-sensitive generation. Providing real edges should improve
|
| 11 |
+
# parent prediction accuracy and node-type diversity.
|
| 12 |
+
#
|
| 13 |
+
# Launch:
|
| 14 |
+
# ratiocinator research specs/gnn_topology.yaml
|
| 15 |
+
|
| 16 |
+
# What to research
|
| 17 |
+
topic: "Decoder topology for GNN code generation: does giving the decoder the real AST tree structure (instead of sequential chain edges) improve reconstruction? Compare chain baseline, teacher-forced ground-truth edges, and iterative predict-then-refine. Cross with GAT/GCN/GIN decoder architectures and improved/comprehensive loss functions."
|
| 18 |
+
goal_metric: syntactic_validity_pct
|
| 19 |
+
maximize: true
|
| 20 |
+
|
| 21 |
+
# Target codebase
|
| 22 |
+
repo_url: https://github.com/timlawrenz/jubilant-palm-tree.git
|
| 23 |
+
repo_branch: experiment/ratiocinator-gnn-study
|
| 24 |
+
runner_script: scripts/run_topology_arm.sh
|
| 25 |
+
|
| 26 |
+
# Infrastructure — ~850K params, moderate training
|
| 27 |
+
hardware:
|
| 28 |
+
gpu: "RTX 4090"
|
| 29 |
+
num_gpus: 1
|
| 30 |
+
min_cpu_ram_gb: 32
|
| 31 |
+
min_inet_down: 1000.0
|
| 32 |
+
min_cuda_version: 12.0
|
| 33 |
+
max_dph: 0.40
|
| 34 |
+
disk_gb: 50.0
|
| 35 |
+
image: pytorch/pytorch:2.7.0-cuda12.8-cudnn9-runtime
|
| 36 |
+
|
| 37 |
+
data:
|
| 38 |
+
source: none # Dataset is in the repo branch
|
| 39 |
+
|
| 40 |
+
deps:
|
| 41 |
+
pre_install:
|
| 42 |
+
- "apt-get update -qq && apt-get install -y -qq git-lfs > /dev/null 2>&1 || true"
|
| 43 |
+
- "cd /workspace/experiment && git lfs install && git lfs pull"
|
| 44 |
+
- "pip install torch-geometric torch-scatter torch-sparse -f https://data.pyg.org/whl/torch-2.7.0+cu128.html"
|
| 45 |
+
- "pip install pandas tqdm sentence-transformers nltk scikit-learn numpy"
|
| 46 |
+
requirements: requirements.txt
|
| 47 |
+
exclude_from_requirements:
|
| 48 |
+
- torch
|
| 49 |
+
- torchvision
|
| 50 |
+
- torch_geometric
|
| 51 |
+
verify: "python -c \"import torch_geometric; print(f'PyG {torch_geometric.__version__}')\""
|
| 52 |
+
|
| 53 |
+
metrics:
|
| 54 |
+
protocol: json_line
|
| 55 |
+
json_prefix: "METRICS:"
|
| 56 |
+
|
| 57 |
+
# Budget — ~9 arms (3 edge modes × 3 conv types), ~10 min each
|
| 58 |
+
max_iterations: 2
|
| 59 |
+
max_dollars: 15.00
|
| 60 |
+
train_timeout_s: 2400
|
| 61 |
+
download_timeout_s: 600
|
| 62 |
+
|
| 63 |
+
# Output
|
| 64 |
+
paper_title: "What Graph Neural Networks Can and Cannot Learn About Code: A Systematic Empirical Study on Ruby AST Analysis"
|