Ewakaa commited on
Commit
c427f62
·
verified ·
1 Parent(s): a0db2b2

Initial public release: TRACE v1.0.0

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. .gitignore +61 -0
  3. CHANGELOG.md +30 -0
  4. CITATION.cff +43 -0
  5. LICENSE-CODE +26 -0
  6. LICENSE-DATA +38 -0
  7. README.md +149 -0
  8. configs/dtt/compatibility.yaml +41 -0
  9. configs/dtt/taxonomy.yaml +294 -0
  10. configs/dtt/template.yaml +90 -0
  11. configs/generation.yaml +44 -0
  12. configs/net/compatibility.yaml +51 -0
  13. configs/net/taxonomy.yaml +230 -0
  14. configs/net/template.yaml +84 -0
  15. configs/session_interpretation/compatibility.yaml +90 -0
  16. configs/session_interpretation/recommendations.yaml +155 -0
  17. configs/session_interpretation/taxonomy.yaml +318 -0
  18. configs/session_interpretation/template.yaml +68 -0
  19. configs/session_interpretation/trajectory_rules.yaml +137 -0
  20. configs/shared/learner_profiles.yaml +34 -0
  21. configs/shared/mastery_states.yaml +46 -0
  22. configs/shared/prompt_types.yaml +46 -0
  23. configs/task_analysis/compatibility.yaml +38 -0
  24. configs/task_analysis/taxonomy.yaml +413 -0
  25. configs/task_analysis/template.yaml +82 -0
  26. configs/task_analysis/template_toleration.yaml +91 -0
  27. data/splits/sanity.jsonl +0 -0
  28. data/splits/test.jsonl +0 -0
  29. data/splits/train.jsonl +3 -0
  30. data/splits/valid.jsonl +0 -0
  31. docs/curation/LEGEND.md +177 -0
  32. docs/curation/README.md +93 -0
  33. docs/data-statement.md +126 -0
  34. docs/dataset-card.md +358 -0
  35. docs/datasheet.md +220 -0
  36. docs/references.md +142 -0
  37. docs/schema-v1.md +756 -0
  38. docs/taxonomy-v1.md +608 -0
  39. pyproject.toml +54 -0
  40. requirements.txt +9 -0
  41. src/__init__.py +0 -0
  42. src/compile_curation.py +127 -0
  43. src/generate.py +124 -0
  44. src/generators/__init__.py +1 -0
  45. src/generators/aba_dtt.py +293 -0
  46. src/generators/aba_net.py +216 -0
  47. src/generators/aba_session_interp.py +931 -0
  48. src/generators/aba_task_analysis.py +277 -0
  49. src/generators/base.py +214 -0
  50. src/prepare_curation.py +151 -0
.gitattributes CHANGED
@@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+ data/splits/train.jsonl filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.egg-info/
6
+ build/
7
+ dist/
8
+ .eggs/
9
+ .venv/
10
+ venv/
11
+ env/
12
+
13
+ # Packaging
14
+ *.egg
15
+ *.wheel
16
+ pip-log.txt
17
+ pip-delete-this-directory.txt
18
+
19
+ # uv lockfile — not committed; runtime deps are spec'd in pyproject.toml +
20
+ # requirements.txt. Anyone running the generator only needs PyYAML, which is
21
+ # stable enough that a pinned lockfile would add noise without value.
22
+ uv.lock
23
+
24
+ # Testing
25
+ .pytest_cache/
26
+ .coverage
27
+ htmlcov/
28
+ .tox/
29
+
30
+ # Type checking
31
+ .mypy_cache/
32
+ .ruff_cache/
33
+
34
+ # Editors
35
+ .vscode/
36
+ .idea/
37
+ *.swp
38
+ *~
39
+ .DS_Store
40
+
41
+ # Jupyter
42
+ .ipynb_checkpoints/
43
+ *.ipynb
44
+
45
+ # Environment
46
+ .env
47
+ .env.local
48
+ .env.*.local
49
+
50
+ # Working artifacts — these are intentionally not committed.
51
+ # `data/splits/` IS committed (the release payload).
52
+ # But if anyone regenerates mid-review, they produce these:
53
+ data/splits/curation_pool.jsonl
54
+ data/splits/curation_pool_original.jsonl
55
+ data/splits/dtt.jsonl
56
+ data/splits/net.jsonl
57
+ data/splits/task_analysis.jsonl
58
+ data/splits/session_interpretation.jsonl
59
+
60
+ # Curation review artifact (regenerated; not part of the release)
61
+ docs/curation/review.md
CHANGELOG.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CHANGELOG
2
+
3
+ All notable changes to TRACE are recorded here, starting with v1.0.0.
4
+ Future releases will follow semantic versioning:
5
+
6
+ - **Patch** (v1.0.x) — typo fixes, metadata tweaks, no change to the JSONL splits.
7
+ - **Minor** (v1.x.0) — additions to the taxonomy or new teaching methods; existing examples preserved; new examples appended to splits.
8
+ - **Major** (v2.0.0) — schema changes or re-generations that invalidate prior splits; prior versions remain accessible via git tags.
9
+
10
+ ---
11
+
12
+ ## v1.0.0 — 2026-04-25
13
+
14
+ Initial public release. 2,999 examples split across train (2,549) / valid (149) / test (281) / sanity (20). Paired with the dataset card, datasheet (Gebru et al. 2021), and data statement (Bender & Friedman 2018).
15
+
16
+ ### What's covered
17
+
18
+ - **Teaching programs** across three methods — DTT (800), NET (500), Task Analysis (500, including toleration programs).
19
+ - **Session interpretations** — 1,200 multi-session behavioral logs across 12 trajectory patterns with 13 target behaviors.
20
+ - **Provenance** — every example carries full `meta.provenance.taxonomy_cells` for auditable traceback.
21
+ - **Reproducibility** — the corpus regenerates byte-identically from `(configs, seed)` on any platform; every `example_id` is verifiable as `sha256(user_content + assistant_content)[:16]` directly from the published JSONL row.
22
+
23
+ ### Known limitations
24
+
25
+ See the dataset card section 6 and the data statement section H for the full list. Most notable:
26
+
27
+ - English-only, US clinical register.
28
+ - Pattern frequencies are uniform for learnability rather than epidemiologically weighted.
29
+ - VB-MAPP + AFLS only.
30
+ - Single-reviewer clinical validation.
CITATION.cff ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ message: "If you use TRACE in your research, please cite it as below."
3
+ title: "TRACE: Taxonomy-Referenced ABA Clinical Examples"
4
+ abstract: >
5
+ A 2,999-example synthetic instruction-tuning dataset for two clinical tasks
6
+ in Applied Behavior Analysis — teaching-program generation (DTT, NET,
7
+ Task Analysis) and multi-session behavioral interpretation (12 trajectory
8
+ patterns, 13 target behaviors). Every example is generated from a taxonomy
9
+ grounded in the canonical ABA literature and carries full sampling
10
+ provenance.
11
+ authors:
12
+ - family-names: Kahunla
13
+ given-names: Festus
14
+ affiliation: "Drexel University; Pombo Labs"
15
+ date-released: 2026-04-25
16
+ version: "1.0.0"
17
+ license: CC-BY-NC-4.0
18
+ repository-code: "https://github.com/Pombo-Labs/TRACE"
19
+ url: "https://huggingface.co/datasets/PomboLabs/TRACE"
20
+ type: dataset
21
+ keywords:
22
+ - applied-behavior-analysis
23
+ - ABA
24
+ - autism
25
+ - clinical-NLP
26
+ - synthetic-data
27
+ - instruction-tuning
28
+ - small-language-model
29
+ - taxonomy
30
+ - provenance
31
+ preferred-citation:
32
+ type: generic
33
+ title: >
34
+ TRACE: Taxonomy-Grounded Synthetic Data for Teaching Program Generation
35
+ and Session Interpretation in Applied Behavior Analysis
36
+ authors:
37
+ - family-names: Kahunla
38
+ given-names: Festus
39
+ affiliation: "Drexel University; Pombo Labs"
40
+ year: 2026
41
+ publisher:
42
+ name: "Pombo Labs"
43
+ url: "https://github.com/Pombo-Labs/TRACE"
LICENSE-CODE ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Pombo Labs
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
23
+ This license applies to all source code, configuration files, and scripts in
24
+ this repository (the contents of `configs/`, `src/`, the top-level `pyproject.toml`,
25
+ and this LICENSE-CODE file itself). The dataset files in `data/splits/` are
26
+ licensed separately under CC BY-NC 4.0 (see LICENSE-DATA).
LICENSE-DATA ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)
2
+
3
+ Copyright (c) 2026 Pombo Labs
4
+
5
+ The dataset files in `data/splits/` (train.jsonl, valid.jsonl, test.jsonl,
6
+ sanity.jsonl) — and any derivative dataset produced by running the generator
7
+ code in this repository — are licensed under the Creative Commons
8
+ Attribution-NonCommercial 4.0 International License.
9
+
10
+ You are free to:
11
+ - Share — copy and redistribute the material in any medium or format
12
+ - Adapt — remix, transform, and build upon the material
13
+
14
+ Under the following terms:
15
+ - Attribution — You must give appropriate credit, provide a link to the
16
+ license, and indicate if changes were made.
17
+ - NonCommercial — You may not use the material for commercial purposes.
18
+
19
+ Attribution should include:
20
+ - The dataset name (TRACE) and version (v1)
21
+ - Author: Kahunla, F.
22
+ - Publisher: Pombo Labs
23
+ - The repository URL: https://github.com/Pombo-Labs/TRACE
24
+
25
+ No warranty is given. TRACE is a research artifact and has not been
26
+ clinically validated. It is not a clinical tool. Any use of the dataset,
27
+ or any model derived from it, in a clinical setting is at the sole
28
+ responsibility of the user and their facility. The authors and Pombo Labs
29
+ make no representation of clinical suitability and accept no liability for
30
+ clinical outcomes.
31
+
32
+ Full license text:
33
+ https://creativecommons.org/licenses/by-nc/4.0/legalcode
34
+
35
+ Human-readable summary:
36
+ https://creativecommons.org/licenses/by-nc/4.0/
37
+
38
+ SPDX-License-Identifier: CC-BY-NC-4.0
README.md ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TRACE
2
+
3
+ **T**axonomy-**R**eferenced **A**BA **C**linical **E**xamples
4
+
5
+ A 2,999-example synthetic instruction-tuning dataset for two clinical tasks in Applied Behavior Analysis: **teaching-program generation** and **behavioral session interpretation**. Every example is generated from a taxonomy grounded in the canonical ABA literature (Cooper, Heron, & Heward 2020; VB-MAPP; AFLS; BACB Ethics Code 2020) and carries full sampling provenance — the exact taxonomy cells that produced it.
6
+
7
+ | | |
8
+ |---|---|
9
+ | **Version** | v1.0.0 |
10
+ | **Examples** | 2,999 (2,549 train / 149 valid / 281 test / 20 sanity) |
11
+ | **Tasks** | Teaching-program generation (DTT, NET, Task Analysis); behavioral session interpretation (12 patterns, 13 target behaviors) |
12
+ | **Language** | English (US clinical register) |
13
+ | **Data license** | CC BY-NC 4.0 |
14
+ | **Code license** | MIT |
15
+ | **Maintainer** | Pombo Labs — [GitHub](https://github.com/Pombo-Labs) · [Hugging Face](https://huggingface.co/PomboLabs) |
16
+ | **Author** | Festus Kahunla |
17
+
18
+ ---
19
+
20
+ ## What this is for
21
+
22
+ ABA is a clinical discipline with high documentation workload. Board Certified Behavior Analysts (BCBAs) draft teaching programs and interpret multi-session behavioral logs constantly across their caseloads. TRACE is a **research** dataset for studying whether small language models can learn the structure of those documents well enough to produce useful first-pass drafts. It is a research artifact, not a clinical tool.
23
+
24
+ Real session data is HIPAA-protected and gated by BACB confidentiality rules; it cannot be reliably de-identified without losing clinical detail. TRACE avoids the constraint by construction: the data never represented a real person.
25
+
26
+ ## What makes TRACE different
27
+
28
+ 1. **Two tasks in one dataset.** Structured teaching-program generation across DTT, NET, and Task Analysis; multi-session interpretation across 12 clinical trajectory patterns.
29
+ 2. **Taxonomy-grounded.** Every category in the controlled vocabulary ties to a specific source — Cooper/Heron/Heward chapters, JABA papers, VB-MAPP/AFLS curricula, BACB Ethics Code 2020, ABAI 2010 Position Statement.
30
+ 3. **Full provenance per example.** `meta.provenance.taxonomy_cells` records the exact values sampled from every taxonomy dimension that produced the example. This is the property that makes clinical auditing tractable.
31
+ 4. **Practitioner-in-the-loop iteration.** Clinical accuracy was refined through targeted taxonomy edits rather than per-example rewrites — each flagged inaccuracy maps to a single cell whose fix then propagates across every example that sampled it.
32
+
33
+ ## Quick start
34
+
35
+ ```bash
36
+ # Load with Hugging Face datasets
37
+ from datasets import load_dataset
38
+ ds = load_dataset("PomboLabs/TRACE")
39
+ print(ds["train"][0])
40
+ ```
41
+
42
+ Or from the raw JSONL splits:
43
+ ```python
44
+ import json
45
+ for line in open("data/splits/train.jsonl"):
46
+ example = json.loads(line)
47
+ system, user, assistant = example["messages"]
48
+ gold = example["meta"]["gold_labels"]
49
+ provenance = example["meta"]["provenance"]["taxonomy_cells"]
50
+ ```
51
+
52
+ ### Extend the generator
53
+
54
+ The taxonomy YAMLs under `configs/` are the fork point — drop in a new teaching method, a new clinical area, or a new behavioral pattern, regenerate, and you have a corpus tailored to your scope.
55
+
56
+ ```bash
57
+ uv pip install -e .
58
+ uv run python src/generate.py --all
59
+ uv run python src/split_data.py
60
+ uv run python src/prepare_curation.py
61
+ uv run python src/compile_curation.py
62
+ ```
63
+
64
+ (Without `uv`: `pip install -r requirements.txt` and drop the `uv run` prefix.)
65
+
66
+ The pipeline is deterministic, so the same configs + seed produce the same corpus — but the more interesting use is to make your own.
67
+
68
+ ## Repository structure
69
+
70
+ ```
71
+ .
72
+ ├── configs/ # taxonomy YAMLs (controlled vocabulary)
73
+ │ ├── shared/ # cross-area: learner profiles, mastery states
74
+ │ ├── dtt/ # DTT taxonomy + template + compatibility
75
+ │ ├── net/ # NET area
76
+ │ ├── task_analysis/ # chaining (independence + toleration)
77
+ │ └── session_interpretation/ # patterns, behaviors, trajectories, recommendations
78
+ ├── src/
79
+ │ ├── generators/ # per-area generator code
80
+ │ ├── generate.py # orchestrator
81
+ │ ├── split_data.py # train / valid / curation_pool
82
+ │ ├── prepare_curation.py # browseable review.md
83
+ │ └── compile_curation.py # test + sanity splits
84
+ ├── data/splits/ # the released JSONL splits
85
+ └── docs/
86
+ ├── dataset-card.md # HF-style user-facing card
87
+ ├── datasheet.md # Gebru et al. (2021) template
88
+ ├── data-statement.md # Bender & Friedman (2018) template
89
+ ├── taxonomy-v1.md # operational definitions + citations
90
+ ├── schema-v1.md # wire format + slot specifications
91
+ ├── references.md # the focused citation shortlist (~30 papers)
92
+ └── curation/
93
+ ├── README.md # review + compile workflow
94
+ └── LEGEND.md # session-log notation reference
95
+ ```
96
+
97
+ ## Documentation
98
+
99
+ | | |
100
+ |---|---|
101
+ | **Dataset card** | [docs/dataset-card.md](docs/dataset-card.md) |
102
+ | **Datasheet** (Gebru et al. 2021) | [docs/datasheet.md](docs/datasheet.md) |
103
+ | **Data statement** (Bender & Friedman 2018) | [docs/data-statement.md](docs/data-statement.md) |
104
+ | **Taxonomy reference** | [docs/taxonomy-v1.md](docs/taxonomy-v1.md) |
105
+ | **Schema reference** | [docs/schema-v1.md](docs/schema-v1.md) |
106
+ | **Curation workflow** | [docs/curation/README.md](docs/curation/README.md) |
107
+ | **Session-log reading guide** | [docs/curation/LEGEND.md](docs/curation/LEGEND.md) |
108
+
109
+ ## Ethics and intended use
110
+
111
+ - **For:** fine-tuning small models (recommended: Gemma 4 E2B, QLoRA, 4-bit) for on-device drafting assistants, evaluation baselines, research on clinical-NLP data pipelines.
112
+ - **Not for:** autonomous clinical decisions; writing final Behavior Intervention Plans without BCBA review; training models on real client data; medical diagnosis; insurance documentation.
113
+
114
+ Crisis-plan content is deliberately conservative: physical-intervention procedures are not specified because they vary by jurisdiction, training certification, and learner-specific contraindications. See [docs/dataset-card.md section 6.3](docs/dataset-card.md) for details.
115
+
116
+ **Responsibility.** TRACE is a research artifact. It is not a clinical tool, has not been clinically validated, and carries no clinical endorsement. Anyone who chooses to deploy TRACE — or any model derived from it — in a clinical setting does so entirely at their own responsibility and under their facility's own oversight. The authors and Pombo Labs make no representation of clinical suitability and accept no liability for clinical outcomes.
117
+
118
+ ## License
119
+
120
+ - **Data** — JSONL splits under `data/splits/` — Creative Commons Attribution-NonCommercial 4.0 (CC BY-NC 4.0). Research and non-commercial use permitted with attribution. See [LICENSE-DATA](LICENSE-DATA).
121
+ - **Code** — generator, scripts, configs — MIT. See [LICENSE-CODE](LICENSE-CODE).
122
+
123
+ ## Citation
124
+
125
+ ```bibtex
126
+ @dataset{trace_2026,
127
+ title = {TRACE: Taxonomy-Referenced ABA Clinical Examples},
128
+ author = {Kahunla, Festus},
129
+ year = {2026},
130
+ publisher = {Pombo Labs},
131
+ url = {https://github.com/Pombo-Labs/TRACE},
132
+ note = {Taxonomy-Grounded Synthetic Data for Teaching Program Generation
133
+ and Session Interpretation in Applied Behavior Analysis}
134
+ }
135
+ ```
136
+
137
+ Machine-readable metadata in [CITATION.cff](CITATION.cff).
138
+
139
+ ## Contributing
140
+
141
+ The taxonomy YAMLs under `configs/` are the intended extension surface. Adding a new skill target, a new target behavior, a new mastery criterion, or a new teaching method follows a documented pattern — see `docs/taxonomy-v1.md` for the existing vocabulary and grounding conventions. Pull requests welcome.
142
+
143
+ For clinical-accuracy corrections, open an issue with the flagged example's `example_id` and a description of the issue; we trace back to the offending taxonomy cell and apply a single-commit fix.
144
+
145
+ ## Scope of v1
146
+
147
+ 2,999 examples covering DTT, NET, and Task Analysis (including toleration programs) for the teaching-program task, and 1,200 multi-session logs across 12 trajectory patterns for the session-interpretation task. What v1 does not cover is documented in the dataset card (section 6.5 Known limitations).
148
+
149
+ Version history in [CHANGELOG.md](CHANGELOG.md).
configs/dtt/compatibility.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DTT clinical-consistency rules — TRACE v1
2
+ # Enforced by the DTT generator during sampling to avoid clinically-inconsistent combinations.
3
+
4
+ # Errorless error-correction procedure pairs exclusively with most-to-least prompt hierarchy
5
+ # Citation: CHH Ch. 21 — errorless learning is the errorless most-to-least convention.
6
+ errorless_requires_hierarchy: most_to_least
7
+
8
+ # Mastery state -> acceptable reinforcement schedule IDs
9
+ # Citation: CHH Ch. 13, 28 — reinforcement thinning follows acquisition -> mastery -> maintenance.
10
+ mastery_to_reinforcement:
11
+ emerging: [crf]
12
+ developing: [crf]
13
+ approaching: [crf, fr2]
14
+ near: [fr2, vr3]
15
+ mastered: [fr2, vr3, token_economy]
16
+ generalization: [vr3, token_economy]
17
+
18
+ # VB-MAPP level -> compatible learner profile ids
19
+ # Citation: Sundberg 2008 (VB-MAPP manual) — level is anchored to developmental age.
20
+ level_to_learner_profiles:
21
+ L1: [early]
22
+ L2: [early, school_age]
23
+ L3: [school_age, adolescent]
24
+
25
+ # Level -> array size for stimulus field (DTT-specific)
26
+ array_size_by_level:
27
+ L1:
28
+ n: 3
29
+ text: "field of 3 (target + 2 distractors)"
30
+ L2:
31
+ n: 5
32
+ text: "field of 5 (target + 4 distractors)"
33
+ L3:
34
+ n: 6
35
+ text: "field of 6 (target + 5 distractors, mixed categories)"
36
+
37
+ # Sampling weights for level selection (DTT biased toward L1/L2 discrete skills)
38
+ level_sampling_weights:
39
+ L1: 0.45
40
+ L2: 0.40
41
+ L3: 0.15
configs/dtt/taxonomy.yaml ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DTT taxonomy — TRACE v1
2
+ # Self-contained: everything the DTT generator needs beyond shared primitives.
3
+ # Citation: Lovaas 1987 JCCP; Smith 2001 Focus on Autism; CHH 2020 Chs. 17, 21.
4
+ #
5
+ # DTT fits array-based discrete-response skills. Only VB-MAPP domains where
6
+ # the target is discrete selection / labeling / matching appear here.
7
+
8
+ method:
9
+ id: dtt
10
+ name: Discrete Trial Training
11
+ short: DTT
12
+ citation: "Lovaas, O. I. (1987). JCCP, 55(1), 3–9. https://doi.org/10.1037/0022-006X.55.1.3; Smith, T. (2001). Focus on Autism, 16(2), 86–92."
13
+ when_to_use: >
14
+ Acquisition of discrete, decontextualized skills (receptive labels, tacts,
15
+ matching, early imitation) — especially early in instruction when stimulus
16
+ control must be established.
17
+
18
+ # VB-MAPP domains appropriate for DTT (array-based discrete-response skills).
19
+ # Other domains (Mand, Echoic, Motor Imitation, Intraverbal, Social, etc.)
20
+ # belong to NET, PRT, or other method taxonomies.
21
+ skill_domains:
22
+ vbmapp:
23
+ levels:
24
+ - id: L1
25
+ name: Level 1
26
+ developmental_age: "0–18 months"
27
+ - id: L2
28
+ name: Level 2
29
+ developmental_age: "18–30 months"
30
+ - id: L3
31
+ name: Level 3
32
+ developmental_age: "30–48 months"
33
+
34
+ domains:
35
+ - id: tact
36
+ name: Tact
37
+ gloss: Labeling / naming
38
+ L1:
39
+ - tacts common objects (ball, cup, shoe)
40
+ - tacts familiar people by name
41
+ - tacts common actions (running, eating, sleeping)
42
+ - tacts body parts
43
+ - tacts common animals
44
+ L2:
45
+ - tacts colors of objects
46
+ - tacts shapes (circle, square, triangle)
47
+ - tacts adjectives (big/little, hot/cold)
48
+ - tacts prepositions (in, on, under)
49
+ - tacts emotions in self and others
50
+ L3:
51
+ - tacts community helpers and their roles
52
+ - tacts categories (fruits, vehicles, clothing)
53
+ - tacts features of objects (color, shape, function)
54
+ - tacts past-tense events
55
+ - tacts abstract concepts (same/different, first/last)
56
+
57
+ - id: listener_responding
58
+ name: Listener Responding
59
+ gloss: Receptive language
60
+ L1:
61
+ - follows 1-step motor instructions (sit down, stand up)
62
+ - selects correct item from an array of 2
63
+ - points to named body parts
64
+ - points to named common objects
65
+ - follows instructions involving objects (give me the cup)
66
+ L2:
67
+ - selects correct item from array of 4–6
68
+ - follows 2-step instructions
69
+ - selects items by feature (find something red)
70
+ - selects items by function (find something you eat with)
71
+ - selects items by class (find an animal)
72
+ L3:
73
+ - selects items by multiple features simultaneously
74
+ - follows conditional instructions (if X, then Y)
75
+ - follows instructions involving temporal concepts (before, after)
76
+
77
+ - id: vp_mts
78
+ name: Visual Perceptual Skills & Matching-to-Sample
79
+ gloss: VP-MTS
80
+ L1:
81
+ - matches identical objects
82
+ - matches identical pictures
83
+ - matches colors (identical chips)
84
+ - matches shapes (identical blocks)
85
+ - completes simple 3–4 piece puzzles
86
+ L2:
87
+ - matches non-identical items by category
88
+ - sorts items into 2–3 categories
89
+ - matches quantities (1–5)
90
+ - matches upper-case letters
91
+ - completes 8–12 piece puzzles
92
+ L3:
93
+ - matches associated items (sock-shoe, cup-plate)
94
+ - sorts by multiple attributes simultaneously
95
+ - matches upper to lower case letters
96
+ - sequences 3–4 step picture sequences
97
+ - reproduces block designs from model
98
+
99
+ - id: lrffc
100
+ name: LRFFC
101
+ gloss: Listener Responding by Feature, Function, Class
102
+ L1:
103
+ - selects items by function (What do you drink from?)
104
+ - selects items by feature (What is round?)
105
+ - selects items by class (Find the animal)
106
+ - selects by single function from array of 3
107
+ - selects by single visible feature from array of 3
108
+ L2:
109
+ - selects items by function from array of 5–8
110
+ - selects items by feature from array of 5–8
111
+ - selects items by class from array of 5–8
112
+ - selects items by multiple features (round and red)
113
+ - selects items by function when item not visible
114
+ L3:
115
+ - selects items given 2+ features/functions simultaneously
116
+ - selects items by class with exclusion (animal but not a pet)
117
+ - selects items by abstract features (something needed when cold)
118
+ - selects items by comparison (which is heavier)
119
+ - selects items by negative features (not round, not food)
120
+
121
+ - id: reading
122
+ name: Reading
123
+ L1:
124
+ - matches letters to identical letters
125
+ - identifies own name in print
126
+ - identifies 5–10 upper-case letters
127
+ - matches words to identical words
128
+ - tracks print left to right
129
+ L2:
130
+ - identifies all upper-case letters
131
+ - identifies all lower-case letters
132
+ - reads 10–20 sight words
133
+ - sounds out CVC words (cat, dog, run)
134
+ - reads simple 2–3 word phrases
135
+ L3:
136
+ - reads sentences with comprehension
137
+ - reads short passages and answers questions
138
+ - phonetic decoding for novel words
139
+
140
+ - id: writing
141
+ name: Writing
142
+ L1:
143
+ - traces horizontal and vertical lines
144
+ - traces basic shapes (circle, cross)
145
+ - copies basic shapes from model
146
+ - traces letters of own name
147
+ - writes own name from model
148
+ L2:
149
+ - writes own name independently
150
+ - copies all upper-case letters from model
151
+ - writes upper-case letters from dictation
152
+ - copies simple words from model
153
+ - writes numbers 1–10
154
+
155
+ - id: math
156
+ name: Math
157
+ L1:
158
+ - rote counts to 10
159
+ - counts objects 1–5 with 1:1 correspondence
160
+ - identifies numerals 1–5
161
+ - matches quantities to numerals 1–5
162
+ - identifies basic shapes in math context
163
+ L2:
164
+ - counts objects 1–20 with 1:1 correspondence
165
+ - identifies numerals 1–20
166
+ - compares quantities (more/less/same)
167
+ - solves single-digit addition with manipulatives
168
+ - identifies coins by name
169
+ L3:
170
+ - adds single-digit numbers without manipulatives
171
+ - subtracts single-digit numbers
172
+ - identifies place value (ones, tens)
173
+ - tells time to the hour and half-hour
174
+ - solves simple word problems
175
+
176
+ # Prompt hierarchies that can be used in DTT
177
+ prompt_hierarchies:
178
+ - id: most_to_least
179
+ name: Most-to-Least (errorless)
180
+ sequence: [full_physical, partial_physical, gestural, positional, independent]
181
+ description: "Start with maximum assistance; fade systematically to independence."
182
+ best_for: "Acquisition, early learners, safety-critical skills."
183
+
184
+ - id: least_to_most
185
+ name: Least-to-Most
186
+ sequence: [independent, gestural, positional, partial_physical, full_physical]
187
+ description: "Start with independent attempt; add assistance only as needed."
188
+ best_for: "Learner with partial repertoire; promotes independence."
189
+
190
+ - id: time_delay
191
+ name: Time Delay (progressive)
192
+ sequence: [0s_delay, 2s_delay, 4s_delay, 6s_delay, independent]
193
+ description: "Increase latency between SD and prompt delivery across sessions."
194
+ best_for: "Prompt-dependency prevention; ideal for fading."
195
+ citation: "Touchette & Howard 1984 JABA."
196
+
197
+ - id: graduated_guidance
198
+ name: Graduated Guidance
199
+ sequence: [hand_over_hand_full, hand_over_hand_light, shadow, independent]
200
+ description: "Hand-over-hand guidance with fading physical pressure."
201
+ best_for: "Motor skills, self-care chaining."
202
+
203
+ - id: stimulus_fading
204
+ name: Stimulus Fading
205
+ sequence: [exaggerated_stimulus, moderate_fade, minimal_fade, natural_stimulus]
206
+ description: "Modify the stimulus itself; fade cue salience over sessions."
207
+ best_for: "Receptive discrimination, early reading."
208
+
209
+ - id: stimulus_shaping
210
+ name: Stimulus Shaping
211
+ sequence: [modified_stimulus, intermediate_shape, target_stimulus]
212
+ description: "Change the stimulus topography to approximate the target."
213
+ best_for: "Complex visual discriminations."
214
+
215
+ # Error correction procedures available for DTT
216
+ # Citation: Cooper, Heron, Heward (2020), Ch. 21.
217
+ error_corrections:
218
+ - id: transfer_trial
219
+ name: Transfer trial
220
+ steps: >
221
+ Re-present SD -> provide effective-level prompt -> reinforce prompted
222
+ response -> distractor trial -> re-present SD independently.
223
+
224
+ - id: four_step
225
+ name: 4-step error correction
226
+ steps: >
227
+ Model correct response -> lead (do together) -> test (independent) ->
228
+ distractor -> retest.
229
+
230
+ - id: backstep
231
+ name: Backstep
232
+ steps: >
233
+ Return to previous prompt level that produced success ->
234
+ successful response -> re-attempt at target prompt level.
235
+
236
+ - id: simple_correction
237
+ name: Simple correction
238
+ steps: >
239
+ "No, watch me" -> model -> re-present SD -> differential reinforcement
240
+ for correct response.
241
+
242
+ - id: errorless
243
+ name: Errorless (preventive)
244
+ steps: >
245
+ Prevent errors by using high prompts from the start; fade gradually.
246
+ Paired with most-to-least prompt hierarchy.
247
+
248
+ # Reinforcement schedules relevant for DTT (acquisition and thinning)
249
+ # Citation: CHH (2020) Ch. 13, 22.
250
+ # Note: DRO/DRA/DRI are behavior-reduction schedules handled in FCT taxonomy.
251
+ reinforcement_schedules:
252
+ - id: crf
253
+ name: CRF (continuous reinforcement)
254
+ description: "Every correct response reinforced."
255
+ typical_phase: "Acquisition."
256
+
257
+ - id: fr2
258
+ name: FR-2 (fixed ratio 2)
259
+ description: "Every 2nd correct response reinforced."
260
+ typical_phase: "Thinning as accuracy stabilizes."
261
+
262
+ - id: vr3
263
+ name: VR-3 (variable ratio 3)
264
+ description: "Average of every 3rd correct response reinforced."
265
+ typical_phase: "Maintenance, resistance to extinction."
266
+
267
+ - id: token_economy
268
+ name: Token economy
269
+ description: "Tokens delivered per correct response; exchanged at session end for a back-up reinforcer appropriate to the learner (for children: candy, cookies, brief leisure-time access, preferred activity, screen time, or a favorite sensory item)."
270
+ typical_phase: "Group / classroom settings, mastered / generalization phase."
271
+
272
+ # Mastery criteria common for DTT programs
273
+ # Citation: CHH (2020) Ch. 28.
274
+ mastery_criteria:
275
+ - id: mc_80_2sessions
276
+ text: "80% accuracy across 2 consecutive sessions with at least 10 trials each."
277
+
278
+ - id: mc_90_3sessions
279
+ text: "90% accuracy across 3 consecutive sessions."
280
+
281
+ - id: mc_80_generalization
282
+ text: "80% accuracy with 2 different therapists and 2 different settings."
283
+
284
+ - id: mc_80_independent
285
+ text: "80% accuracy at independent prompt level across 3 sessions."
286
+
287
+ - id: mc_90_first_trial
288
+ text: "90% first-trial-correct across 5 consecutive sessions."
289
+
290
+ - id: mc_fluency
291
+ text: "Fluency criterion: correct responses within 3-second latency at 90% accuracy."
292
+
293
+ - id: mc_natural_env
294
+ text: "Demonstrated use in natural environment across 3 independent instances."
configs/dtt/template.yaml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DTT teaching program template — TRACE v1
2
+ # Filled by src/generate.py with values sampled from taxonomy.
3
+
4
+ template_id: task1_dtt_v1
5
+ task_type: teaching_program
6
+ method_id: dtt
7
+
8
+ # System prompt used for this task type
9
+ system_prompt: |
10
+ You are an expert ABA (Applied Behavior Analysis) clinical assistant. You help
11
+ Board Certified Behavior Analysts and staff design teaching programs for
12
+ individuals with autism. Your responses are clinically accurate, individualized
13
+ to the learner profile, follow BACB ethical guidelines, and reference no real
14
+ client data. Select the appropriate teaching method (DTT, NET, Task Analysis,
15
+ FCT, BST, PRT) based on the skill target and learner profile.
16
+
17
+ # User message template variants — one is sampled per example for phrasing diversity
18
+ user_variants:
19
+ - |
20
+ Generate a teaching program for the following target.
21
+
22
+ Skill Target: {skill_target}
23
+ Curriculum Reference: {curriculum_ref}
24
+ Learner Profile: {learner_profile_name}
25
+ Current Mastery: {mastery_state_name}
26
+ Teaching Method: DTT
27
+
28
+ Provide the full program structure appropriate to the selected method.
29
+
30
+ - |
31
+ I need a DTT program for this skill.
32
+
33
+ Target: {skill_target}
34
+ Curriculum: {curriculum_ref}
35
+ Learner: {learner_profile_name}
36
+ Mastery Status: {mastery_state_name}
37
+
38
+ Please include the SD, prompt hierarchy, stimulus array, error correction procedure, reinforcement schedule, and mastery criteria.
39
+
40
+ - |
41
+ Design a Discrete Trial Training program for the following.
42
+
43
+ Skill: {skill_target}
44
+ Curriculum: {curriculum_ref}
45
+ Learner Level: {learner_profile_name}
46
+ Baseline: {mastery_state_name}
47
+
48
+ Include the full DTT trial structure.
49
+
50
+ # Assistant message template — filled by generator
51
+ # All slots in {braces} are computed by the generator from taxonomy values
52
+ assistant_template: |
53
+ ## Program Overview
54
+ - Target skill: {skill_target}
55
+ - Curriculum: {curriculum_ref}
56
+ - Learner: {learner_profile_name}
57
+ - Current mastery: {mastery_state_name}
58
+
59
+ This DTT program establishes stimulus control over the target skill through structured discrete trials with a defined prompt hierarchy, stimulus array, and systematic error-correction procedure.
60
+
61
+ ## Discriminative Stimulus (SD)
62
+ Primary SD: "{primary_sd}"
63
+ Variations: {sd_variations}
64
+ Presentation: {sd_presentation}
65
+
66
+ ## Prompt Hierarchy
67
+ Strategy: {prompt_hierarchy_name}
68
+ Sequence: {prompt_sequence}
69
+ Current prompt level: At the {mastery_state_short} stage, {current_prompt_guidance}
70
+
71
+ ## Stimulus Array
72
+ Array size: {array_size}
73
+ Target stimuli: {target_stimuli}
74
+ {distractor_block}
75
+ Rotation: Randomize position of target across trials to prevent position bias.
76
+
77
+ ## Error Correction Procedure
78
+ {error_correction_steps}
79
+
80
+ ## Reinforcement Schedule
81
+ {reinforcement_schedule_name} — {reinforcement_description}. Provide differential reinforcement: higher-quality reinforcement for independent correct responses than for prompted correct responses.
82
+
83
+ ## Mastery Criteria
84
+ {mastery_criterion_text}
85
+
86
+ ## Data Collection
87
+ Trial-by-trial data recording prompt level and response accuracy. IOA scheduled every {ioa_frequency} sessions minimum; target agreement ≥ 80%.
88
+
89
+ ## Generalization & Maintenance Plan
90
+ Post-mastery: probe with novel stimuli, across {n_generalization_therapists} additional therapists, and in at least {n_generalization_settings} natural contexts. Maintenance probes weekly for 4 weeks, then monthly.
configs/generation.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TRACE data generation config
2
+ # Controls per-area example counts, split ratios, and the global seed.
3
+
4
+ seed: 42
5
+ output_dir: data/splits
6
+
7
+ # Per-area generation counts and enabled flags.
8
+ # Total target: ~3,500 examples (train + valid + curation-pool sums).
9
+ areas:
10
+ dtt:
11
+ enabled: true
12
+ n: 800
13
+ description: "DTT teaching programs — array-based discrete-response skills."
14
+ net:
15
+ enabled: true
16
+ n: 500
17
+ description: "NET teaching programs — motivation-based naturalistic teaching."
18
+ task_analysis:
19
+ enabled: true
20
+ n: 500
21
+ description: "Task Analysis / Chaining — multi-step routines (AFLS)."
22
+ fct:
23
+ enabled: false
24
+ n: 300
25
+ description: "FCT — replacement-behavior programs paired with deceleration targets."
26
+ bst:
27
+ enabled: false
28
+ n: 200
29
+ description: "BST — staff-facing training programs."
30
+ prt:
31
+ enabled: false
32
+ n: 300
33
+ description: "PRT — pivotal-behavior programs."
34
+ session_interpretation:
35
+ enabled: true
36
+ n: 1200
37
+ description: "Task 2 — behavioral session interpretation (paper's innovation task)."
38
+
39
+ # Split ratios applied after all enabled areas generate
40
+ # test + sanity are NOT generated here — they're curated by the user from curation_pool
41
+ splits:
42
+ train: 0.85
43
+ valid: 0.05
44
+ curation_pool: 0.10 # user reviews this pool and curates test.jsonl + sanity.jsonl
configs/net/compatibility.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NET clinical-consistency rules — TRACE v1
2
+
3
+ # VB-MAPP level -> compatible learner profiles
4
+ level_to_learner_profiles:
5
+ L1: [early]
6
+ L2: [early, school_age]
7
+ L3: [school_age, adolescent]
8
+
9
+ # Sampling weights for level selection
10
+ # NET is used at all levels but slightly biased toward L2/L3 where mands grow
11
+ level_sampling_weights:
12
+ L1: 0.30
13
+ L2: 0.40
14
+ L3: 0.30
15
+
16
+ # Skill keyword -> MO category mapping
17
+ # Used to select the appropriate MO arrangement for a sampled skill.
18
+ skill_to_mo_category:
19
+ - keywords: [mand, item, preferred, 2-word]
20
+ category: mand_item
21
+ - keywords: [missing]
22
+ category: mand_missing
23
+ - keywords: [help]
24
+ category: mand_help
25
+ - keywords: [break]
26
+ category: mand_break
27
+ - keywords: ["all done", finished, completion]
28
+ category: mand_completion
29
+ - keywords: ["what"]
30
+ category: mand_info_what
31
+ - keywords: ["where"]
32
+ category: mand_info_where
33
+ - keywords: [attention, peers]
34
+ category: mand_attention
35
+ - keywords: [action]
36
+ category: mand_action
37
+ - keywords: [greet]
38
+ category: spontaneous_greeting
39
+ - keywords: [comment, describe, narrate, story]
40
+ category: spontaneous_comment
41
+ - keywords: [conversation, reciprocal, multi-turn]
42
+ category: intraverbal_multi_turn
43
+ - keywords: [turn-taking, turn]
44
+ category: social_turn_taking
45
+ - keywords: [initiate, peer]
46
+ category: social_initiation
47
+ - keywords: [routine]
48
+ category: intraverbal_routine
49
+
50
+ # Default category when no keyword matches
51
+ default_mo_category: mand_item
configs/net/taxonomy.yaml ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NET (Natural Environment Teaching) taxonomy — TRACE v1
2
+ # Self-contained: skills + MOs + natural contexts for naturalistic teaching.
3
+ # Citation: Hart & Risley 1975 JABA; McGee, Krantz, McClannahan 1985 JABA.
4
+
5
+ method:
6
+ id: net
7
+ name: Natural Environment Teaching
8
+ short: NET
9
+ citation: "Hart, B. & Risley, T. R. (1975). JABA, 8(4), 411–420. https://doi.org/10.1901/jaba.1975.8-411; McGee, G. G., Krantz, P. J., & McClannahan, L. E. (1985). JABA, 18(1), 17–31."
10
+ when_to_use: >
11
+ Mand / requesting; generalization phase; when motivation is the limiting
12
+ factor; when the skill's natural occasion is ecologically predictable
13
+ (e.g., requesting food during snack time).
14
+
15
+ # VB-MAPP domains appropriate for NET (motivation-driven skills).
16
+ skill_domains:
17
+ vbmapp:
18
+ levels:
19
+ - id: L1
20
+ name: Level 1
21
+ developmental_age: "0–18 months"
22
+ - id: L2
23
+ name: Level 2
24
+ developmental_age: "18–30 months"
25
+ - id: L3
26
+ name: Level 3
27
+ developmental_age: "30–48 months"
28
+
29
+ domains:
30
+ - id: mand
31
+ name: Mand
32
+ gloss: Requesting
33
+ L1:
34
+ - single-word mands for preferred items
35
+ - mands for missing items needed to complete an activity
36
+ - mands for actions
37
+ - mands for help
38
+ - mands using 2-word phrases
39
+ - mands for a break (e.g., "break please", AAC "I need a break")
40
+ - mands for activity completion ("all done", "finished")
41
+ - mands to use the bathroom (e.g., "bathroom", "potty", "I need to go", AAC bathroom icon)
42
+ L2:
43
+ - mands for information using "what"
44
+ - mands for information using "where"
45
+ - mands using adjectives (big, little, more)
46
+ - mands for attention from peers
47
+ - mands for others to stop an action
48
+ - mands for a break during a non-preferred task ("break please, I need a break")
49
+ - mands to indicate completion of a step or activity ("all done with this one")
50
+ L3:
51
+ - mands for information using "why"
52
+ - mands for information using "when"
53
+ - mands using complete sentences with correct grammar
54
+ - mands for future events or items not present
55
+ - mands using polite social conventions
56
+ - mands for a break with reason ("I need a break, I'm frustrated")
57
+ - mands to indicate completion in context ("I'm all done with math, can I start reading?")
58
+
59
+ - id: social
60
+ name: Social Behavior & Social Play
61
+ L1:
62
+ - makes eye contact during interactions
63
+ - responds to own name
64
+ - engages in social games (peek-a-boo, tickle)
65
+ - shows items to others spontaneously
66
+ - tolerates proximity of peers during parallel play
67
+ L2:
68
+ - initiates social interactions with peers
69
+ - takes turns during structured activities
70
+ - shares materials with peers when prompted
71
+ - engages in cooperative play with one peer
72
+ - responds to peer initiations appropriately
73
+ L3:
74
+ - maintains reciprocal conversations with peers
75
+ - demonstrates empathy and perspective-taking
76
+ - negotiates and compromises during group activities
77
+ - joins ongoing peer activities appropriately
78
+ - maintains friendships over time
79
+
80
+ - id: spontaneous_vocal
81
+ name: Spontaneous Vocal Behavior
82
+ L1:
83
+ - spontaneously vocalizes during preferred activities
84
+ - spontaneously names items in the environment
85
+ - spontaneously requests desired items without prompts
86
+ - spontaneously greets familiar people
87
+ - spontaneously comments on events
88
+ L2:
89
+ - spontaneously describes ongoing activities
90
+ - spontaneously asks questions about the environment
91
+ - spontaneously reports past events
92
+ - spontaneously uses social phrases appropriately
93
+ - spontaneously initiates conversation with peers
94
+ L3:
95
+ - spontaneously tells stories or narratives
96
+ - spontaneously makes relevant comments in conversations
97
+ - spontaneously adjusts language to different listeners
98
+ - spontaneously uses humor appropriately
99
+ - spontaneously provides explanations and reasons
100
+
101
+ - id: intraverbal
102
+ name: Intraverbal
103
+ gloss: Contextualized verbal-to-verbal responding
104
+ L2:
105
+ - answers WH-questions about familiar topics
106
+ - describes function of common objects
107
+ - names items in categories when given the category
108
+ - answers social questions (How are you?)
109
+ - describes recent events in sequence
110
+ L3:
111
+ - answers why- and how-questions
112
+ - engages in multi-turn conversations on a topic
113
+ - makes inferences from given information
114
+ - provides definitions of words
115
+ - answers hypothetical questions
116
+
117
+ # MO (Motivating Operation) arrangement templates by skill-type category.
118
+ # The generator picks one based on the skill target keywords.
119
+ mo_arrangements:
120
+ - id: deprivation_preferred_item
121
+ applies_to: [mand_item, mand_action]
122
+ text: "Briefly withhold or restrict access to the target preferred item/activity for 30–60 seconds to establish motivation before the teaching opportunity."
123
+ context_examples: [snack time, free play, leisure time]
124
+
125
+ - id: missing_item_completion
126
+ applies_to: [mand_help, mand_missing]
127
+ text: "Arrange an activity that requires a specific item to complete, then omit that item from the available materials. Wait for the learner to notice the gap."
128
+ context_examples: [art activity, snack preparation, puzzle]
129
+
130
+ - id: break_opportunity
131
+ applies_to: [mand_break]
132
+ text: "Present a brief non-preferred task or demand at a level the learner can tolerate but may wish to pause. Wait for a request-for-break topography; honor the request immediately on first acquisition (CRF), thin later."
133
+ context_examples: [work time, tabletop task, academic period]
134
+
135
+ - id: completion_opportunity
136
+ applies_to: [mand_completion]
137
+ text: "Set up a clearly-bounded activity with a visible endpoint (e.g., a worksheet, a puzzle, a sorting task). Wait for the learner to indicate completion verbally or via AAC; honor the indication immediately."
138
+ context_examples: [worksheet, puzzle, sorting task, meal]
139
+
140
+ - id: bathroom_opportunity
141
+ applies_to: [mand_bathroom]
142
+ text: "Arrange a scheduled toileting window (typically 30–60 minutes after fluid intake) and observe for pre-toileting signs (body position, vocalization, squirming). Model the mand topography (vocal, sign, or AAC icon) at the first sign of need; reinforce any approximation with immediate escort to the bathroom. Do not prompt after an accident has started."
143
+ context_examples: [post-snack window, post-drink window, scheduled mid-morning check, end-of-activity transition]
144
+
145
+ - id: unexpected_change
146
+ applies_to: [mand_info_what, mand_info_where, spontaneous_comment]
147
+ text: "Create a small environmental change (new item, familiar item in new location, surprise event) that invites a verbal comment or question."
148
+ context_examples: [morning greeting, room transition, activity setup]
149
+
150
+ - id: peer_presence
151
+ applies_to: [social_initiation, social_turn_taking, peer_mand]
152
+ text: "Arrange a shared activity with one or more peers where the target response is naturally occasioned (turn-taking game, shared snack, collaborative task)."
153
+ context_examples: [peer play, circle time, shared snack]
154
+
155
+ - id: reciprocal_conversation
156
+ applies_to: [intraverbal_multi_turn, social_reciprocal, spontaneous_narrative]
157
+ text: "Engage the learner in a naturalistic conversation on a topic of demonstrated interest. Pause at natural turn boundaries to invite the target response."
158
+ context_examples: [transition conversation, post-activity debrief, leisure chat]
159
+
160
+ - id: routine_lead_in
161
+ applies_to: [spontaneous_greeting, intraverbal_routine]
162
+ text: "Use the natural beginning/ending of an established routine (morning arrival, mealtime, departure) as the moment for the target response."
163
+ context_examples: [arrival, snack, departure]
164
+
165
+ # Natural-context examples drawn into prompts for variation
166
+ natural_contexts:
167
+ - snack time
168
+ - free play with preferred materials
169
+ - transition between activities
170
+ - morning arrival routine
171
+ - peer group game
172
+ - art / craft activity
173
+ - leisure / sensory break
174
+ - outdoor play
175
+ - shared meal preparation
176
+ - departure routine
177
+
178
+ # Prompt strategies used in NET (subset of full hierarchy — naturalistic)
179
+ prompt_strategies:
180
+ - id: time_delay
181
+ name: Time Delay
182
+ description: "Wait a brief interval (0->2->4 s) for independent response before delivering a model or verbal prompt."
183
+ best_for: "Most NET mand and spontaneous vocal targets."
184
+
185
+ - id: model_then_fade
186
+ name: Model + immediate fade
187
+ description: "Deliver a vocal/gestural model if no response within the motivated moment; fade to expectant look over sessions."
188
+ best_for: "Acquisition of new mand topographies."
189
+
190
+ - id: expectant_look
191
+ name: Expectant look (pause)
192
+ description: "Deliver no prompt — maintain eye contact and hold the preferred item until the learner responds."
193
+ best_for: "Established mands; fading teacher-delivered prompts."
194
+
195
+ - id: verbal_fill_in
196
+ name: Verbal fill-in
197
+ description: "Deliver a partial phrase and pause; learner completes the vocal response."
198
+ best_for: "Song, rhyme, or scripted social-phrase targets."
199
+
200
+ # Natural reinforcers — always the functional outcome of the target response
201
+ natural_reinforcer_examples:
202
+ mand_item: "immediate access to the requested item for 30–60 seconds."
203
+ mand_action: "performance of the requested action."
204
+ mand_help: "adult delivers the requested help, task completed together."
205
+ mand_missing: "the missing item is provided, activity can continue."
206
+ mand_break: "immediate brief escape from the demand (30–60 seconds) to a neutral or preferred activity."
207
+ mand_completion: "the activity ends immediately and the learner transitions to the next preferred activity."
208
+ mand_bathroom: "immediate escort to the bathroom; the learner is not required to complete the current task before going. Pairing the mand with timely bathroom access is what teaches the learner that requesting works."
209
+ mand_info_what: "adult answers the question informatively."
210
+ mand_info_where: "adult answers the question or provides location."
211
+ mand_attention: "adult attends to the learner and engages briefly."
212
+ social_initiation: "peer engages back; activity continues with both."
213
+ social_turn_taking: "turn is delivered; game continues."
214
+ spontaneous_comment: "adult attends to the comment with matching affect."
215
+ spontaneous_greeting: "adult returns the greeting warmly."
216
+ intraverbal_multi_turn: "conversation continues with adult's naturally matched reply."
217
+ default: "natural social or tangible consequence matching the function of the response."
218
+
219
+ # Mastery criteria for NET programs (slightly different emphasis than DTT)
220
+ mastery_criteria:
221
+ - id: mc_indep_natural_env
222
+ text: "Independent use of target response across 3 natural opportunities per day for 3 consecutive days."
223
+ - id: mc_generalize_multi_therapist
224
+ text: "Independent use across 3 different adults and 2 different settings within 2 weeks."
225
+ - id: mc_peer_initiation
226
+ text: "Independent use with a peer in unstructured free play across 2 consecutive sessions."
227
+ - id: mc_spontaneous_rate
228
+ text: "Average rate of ≥ 3 spontaneous responses per 30-minute session across 3 consecutive sessions."
229
+ - id: mc_fluent_exchange
230
+ text: "Fluent exchange (response within 3 s of MO / opportunity) with appropriate topography, 80% of opportunities across 3 sessions."
configs/net/template.yaml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NET teaching program template — TRACE v1
2
+
3
+ template_id: task1_net_v1
4
+ task_type: teaching_program
5
+ method_id: net
6
+
7
+ system_prompt: |
8
+ You are an expert ABA (Applied Behavior Analysis) clinical assistant. You help
9
+ Board Certified Behavior Analysts and staff design teaching programs for
10
+ individuals with autism. Your responses are clinically accurate, individualized
11
+ to the learner profile, follow BACB ethical guidelines, and reference no real
12
+ client data. Select the appropriate teaching method (DTT, NET, Task Analysis,
13
+ FCT, BST, PRT) based on the skill target and learner profile.
14
+
15
+ user_variants:
16
+ - |
17
+ Generate a teaching program for the following target.
18
+
19
+ Skill Target: {skill_target}
20
+ Curriculum Reference: {curriculum_ref}
21
+ Learner Profile: {learner_profile_name}
22
+ Current Mastery: {mastery_state_name}
23
+ Teaching Method: NET (Natural Environment Teaching)
24
+
25
+ Provide the full program structure appropriate to the selected method.
26
+
27
+ - |
28
+ I need a NET program for this skill.
29
+
30
+ Target: {skill_target}
31
+ Curriculum: {curriculum_ref}
32
+ Learner: {learner_profile_name}
33
+ Mastery Status: {mastery_state_name}
34
+
35
+ Please include the MO arrangement, natural opportunity, prompt strategy, natural reinforcer, generalization tactics, and mastery criteria.
36
+
37
+ - |
38
+ Design a Natural Environment Teaching program for the following.
39
+
40
+ Skill: {skill_target}
41
+ Curriculum: {curriculum_ref}
42
+ Learner Level: {learner_profile_name}
43
+ Baseline: {mastery_state_name}
44
+
45
+ Include the full naturalistic teaching structure.
46
+
47
+ assistant_template: |
48
+ ## Program Overview
49
+ - Target skill: {skill_target}
50
+ - Curriculum: {curriculum_ref}
51
+ - Learner: {learner_profile_name}
52
+ - Current mastery: {mastery_state_name}
53
+
54
+ This NET program teaches the target skill through naturalistic embedding — capturing motivated moments within the learner's routine rather than using contrived massed trials.
55
+
56
+ ## Motivating Operation (MO) Arrangement
57
+ {mo_arrangement_text}
58
+ Primary natural context: {primary_natural_context}.
59
+
60
+ ## Natural Opportunity
61
+ {natural_opportunity_text}
62
+
63
+ ## Prompt Strategy
64
+ Strategy: {prompt_strategy_name}
65
+ {prompt_strategy_description}
66
+ Current prompt level: At the {mastery_state_short} stage, {current_prompt_guidance}
67
+
68
+ ## Natural Reinforcer
69
+ {natural_reinforcer_text}
70
+ Avoid contrived consequences — the functional outcome IS the reinforcer.
71
+
72
+ ## Generalization Tactics
73
+ - Program multiple exemplars across {n_exemplars} different stimuli or peers.
74
+ - Rotate between {n_settings} different natural settings.
75
+ - Rotate across {n_therapists} different adults during the teaching phase.
76
+
77
+ ## Mastery Criteria
78
+ {mastery_criterion_text}
79
+
80
+ ## Data Collection
81
+ Opportunity-based data recording: each naturally-occasioned trial is scored for independence and topography. IOA scheduled every {ioa_frequency} sessions minimum; target agreement ≥ 80%.
82
+
83
+ ## Generalization & Maintenance Plan
84
+ Post-mastery: probe across novel peers, settings, and materials. Maintenance probes weekly for 4 weeks, then monthly. Fade adult-delivered prompts before declaring mastery.
configs/session_interpretation/compatibility.yaml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Session Interpretation compatibility rules — TRACE v1
2
+
3
+ # Pattern -> how many target behaviors are typically present in logs of this pattern
4
+ pattern_behavior_count_ranges:
5
+ mastery_progression: [0, 1]
6
+ regression: [1, 2]
7
+ plateau: [0, 1]
8
+ frustration_pattern: [1, 2]
9
+ variable_performance: [0, 2]
10
+ prompt_dependency: [0, 1]
11
+ rapid_acquisition: [0, 0]
12
+ generalization_failure: [0, 1]
13
+ extinction_burst: [1, 2]
14
+ skill_loss_after_break: [0, 1]
15
+ motivating_operation_shift: [0, 1]
16
+ setting_event_trigger: [0, 2]
17
+
18
+ # Pattern -> likely behavior functions when behavior is present
19
+ pattern_function_bias:
20
+ frustration_pattern: [escape, escape, attention] # escape dominant
21
+ extinction_burst: [escape, attention, tangible] # depends on what's being extinguished
22
+ regression: [escape, attention, automatic]
23
+ variable_performance: [escape, attention, tangible, automatic]
24
+ plateau: [escape, automatic]
25
+ generalization_failure: [escape, attention]
26
+ setting_event_trigger: [escape, automatic, attention]
27
+ motivating_operation_shift: [tangible, automatic]
28
+ prompt_dependency: [attention, escape]
29
+ skill_loss_after_break: [escape, automatic]
30
+ mastery_progression: [automatic] # if any behavior present, likely automatic/low-stakes
31
+ rapid_acquisition: [] # no behaviors
32
+
33
+ # Pattern -> behavioral indicator cluster to sample from when log includes indicators
34
+ pattern_behavioral_indicator_cluster:
35
+ frustration_pattern: frustration
36
+ regression: frustration
37
+ extinction_burst: frustration
38
+ variable_performance: disengagement
39
+ plateau: disengagement
40
+ prompt_dependency: disengagement
41
+ generalization_failure: disengagement
42
+ setting_event_trigger: disengagement
43
+ motivating_operation_shift: disengagement
44
+ mastery_progression: engagement
45
+ rapid_acquisition: engagement
46
+ skill_loss_after_break: disengagement
47
+
48
+ # Escalation rules — raise above pattern default when severe behaviors present
49
+ # Behaviors with typical_severity "high" escalate one level (capped at 4).
50
+ escalation_rules:
51
+ severe_behavior_ids: [sib, aggression, pica, elopement, fecal_smearing]
52
+ severe_behavior_escalation_bump: 1
53
+ safety_immediate_threshold_behaviors: [sib] # SIB at high frequency triggers 4
54
+ safety_immediate_sib_rate_per_session: 8 # freq >= this in final 2 sessions -> escalation 4
55
+
56
+ # Confidence inference rules
57
+ confidence_rules:
58
+ min_sessions_for_moderate: 5
59
+ min_sessions_for_high: 8
60
+ ioa_required_for_high: true
61
+ variance_threshold_low: 0.25 # if accuracy SD > this, cap at moderate
62
+
63
+ # Log length by pattern
64
+ log_length_by_pattern:
65
+ mastery_progression: [8, 12]
66
+ regression: [8, 12]
67
+ plateau: [8, 12]
68
+ frustration_pattern: [8, 12]
69
+ variable_performance: [8, 12]
70
+ prompt_dependency: [8, 12]
71
+ rapid_acquisition: [5, 8]
72
+ generalization_failure: [8, 12]
73
+ extinction_burst: [6, 10]
74
+ skill_loss_after_break: [6, 10]
75
+ motivating_operation_shift: [8, 12]
76
+ setting_event_trigger: [8, 12]
77
+
78
+ # ABC and IOA presence (fractions are probabilities)
79
+ abc_inclusion_probability: 0.3
80
+ ioa_inclusion_probability: 0.25
81
+ ioa_agreement_range: [0.82, 0.97]
82
+ ioa_low_probability: 0.05 # small chance of below-80% IOA
83
+ ioa_low_range: [0.68, 0.79]
84
+
85
+ # Sessions per log — max + min + typical duration
86
+ session_duration_minutes_range: [30, 60]
87
+ trials_per_program_per_session_range: [8, 15]
88
+
89
+ # Sampling of programs per log
90
+ programs_per_log_range: [3, 6]
configs/session_interpretation/recommendations.yaml ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Per-pattern recommendation template pools — TRACE v1
2
+ # Sampled by the generator to compose the "Programming Recommendations" section.
3
+ # Each pattern has bullets grouped under BIP-shaped sub-sections:
4
+ # antecedent / replacement / consequence / crisis
5
+ # Replacement and crisis sections only appear when target behaviors are in the log.
6
+ #
7
+ # Crisis-plan bullets are grounded in:
8
+ # - BACB Ethics Code for Behavior Analysts (2020), Section 3.05 (restrictive
9
+ # procedures) and Section 2.13 (referrals / coordination of services)
10
+ # - ABAI Position Statement on Restraint and Seclusion (2010)
11
+ # - Widely-used facility crisis-prevention training frameworks as examples only
12
+ # (Safety-Care, CPI, PMT, Therapeutic Crisis Intervention)
13
+ # Procedure specifics are intentionally left to the facility's training program
14
+ # because restraint / seclusion protocols vary by jurisdiction, training
15
+ # certification, and facility policy.
16
+
17
+ patterns:
18
+ mastery_progression:
19
+ antecedent:
20
+ - "Continue current teaching procedure — it is effective for this learner."
21
+ - "Maintain current stimulus arrangement; begin planning next skill in sequence."
22
+ replacement: []
23
+ consequence:
24
+ - "Begin thinning reinforcement from CRF to FR-2 as accuracy stabilizes above 85%."
25
+ - "Continue differential reinforcement for independent responses."
26
+ crisis: []
27
+
28
+ regression:
29
+ antecedent:
30
+ - "Conduct reinforcer assessment to identify current high-preference items."
31
+ - "Review recent environmental or schedule changes that may be affecting performance."
32
+ - "Return temporarily to a previously-mastered prompt level and re-fade systematically."
33
+ replacement:
34
+ - "If problem behavior accompanies regression, strengthen replacement-behavior training for the hypothesized function."
35
+ consequence:
36
+ - "Increase reinforcement magnitude and schedule temporarily (return to CRF if needed)."
37
+ - "Implement more frequent maintenance probes (at least weekly) after re-mastery."
38
+ crisis:
39
+ - "If regression co-occurs with severe behavior (SIB, aggression, property destruction) reaching the learner's safety threshold: pause all skill-acquisition programming, implement environmental safety measures (clear the area of hazards, move peers to safe distance), and contact the supervising BCBA."
40
+ - "Staff should first attempt verbal de-escalation and least-restrictive strategies; physical intervention is used only as a last resort and only by staff currently certified in the facility's crisis-prevention framework (e.g., Safety-Care, CPI, or equivalent)."
41
+ - "Physical intervention is applied only when specifically authorized in this learner's BIP. Many learners have contraindications (medical conditions, developmental or age-related factors, trauma history, or explicit BIP restrictions) that rule out restrictive procedures — in those cases the crisis plan defaults to verbal de-escalation, environmental safety measures, and immediate supervisor / emergency contact without physical intervention."
42
+ - "Document the incident per facility and BACB (Ethics Code 2020, Section 3.05) requirements within 24 hours; conduct post-incident debrief with all staff involved before resuming programming."
43
+
44
+ plateau:
45
+ antecedent:
46
+ - "Modify teaching procedure — switch prompt type (e.g., from most-to-least to time delay)."
47
+ - "Conduct preference assessment and update reinforcer menu."
48
+ - "Break the current skill into smaller component steps if possible."
49
+ - "Increase practice opportunities by embedding targets in natural-environment training."
50
+ replacement: []
51
+ consequence:
52
+ - "Check for inadvertent reinforcement of near-misses; tighten differential reinforcement."
53
+ crisis: []
54
+
55
+ frustration_pattern:
56
+ antecedent:
57
+ - "**Immediately** reduce task difficulty — drop back 1–2 prompt levels or present previously-mastered targets."
58
+ - "Intersperse easy (mastered) tasks with acquisition targets at a 3:1 or 4:1 ratio."
59
+ - "Shorten session length and increase break frequency."
60
+ replacement:
61
+ - "Teach a replacement response ('break please', 'wait please', 'all done') and reinforce it at the function of the problem behavior."
62
+ - "Pair the replacement response with short, predictable escape windows."
63
+ consequence:
64
+ - "Do NOT remove demand contingent on tantrum / escape behavior — this reinforces the escape function."
65
+ - "Provide brief break contingent on independent replacement response instead."
66
+ crisis:
67
+ - "If aggression toward staff, peer aggression, SIB, or property destruction exceeds the learner's safety threshold: first use verbal de-escalation and environmental safety measures (clear the area, move peers to safe distance, remove hazards)."
68
+ - "If behavior continues to pose imminent risk, follow the facility's tiered crisis-intervention protocol — only staff currently certified in the facility's training (e.g., Safety-Care, CPI, PMT, or equivalent) may implement physical intervention, and the least-restrictive option appropriate to the risk level is used. Physical intervention is applied only when specifically authorized in this learner's BIP; for learners with contraindications (medical, developmental, trauma history, or explicit BIP restrictions), the plan stops at de-escalation + environmental safety + supervisor contact."
69
+ - "Notify the supervising BCBA immediately; pause the skill-acquisition program until the BIP is reviewed. Document the incident (antecedent, topography, duration, staff response, post-incident state) per facility and BACB Ethics Code (2020, Section 3.05) requirements."
70
+ - "Conduct a post-incident debrief with all staff involved to review what worked, what did not, and whether the BIP needs revision."
71
+
72
+ variable_performance:
73
+ antecedent:
74
+ - "Check for environmental variables that differ across sessions (time of day, therapist, setting)."
75
+ - "Standardize session structure and reinforcement delivery across therapists."
76
+ replacement: []
77
+ consequence:
78
+ - "Implement inter-observer agreement probes to rule out measurement inconsistency."
79
+ crisis: []
80
+
81
+ prompt_dependency:
82
+ antecedent:
83
+ - "Switch to a time-delay procedure (0s -> 2s -> 4s -> 6s) to encourage independent responding."
84
+ - "Avoid inadvertent prompt delivery (therapist gaze, body positioning) that may function as a prompt."
85
+ replacement: []
86
+ consequence:
87
+ - "Ensure differential reinforcement — independent responses receive higher-quality reinforcement than prompted responses."
88
+ - "Consider stimulus fading instead of response prompting for this skill."
89
+ crisis: []
90
+
91
+ rapid_acquisition:
92
+ antecedent:
93
+ - "Advance to next skill in the sequence immediately."
94
+ - "Begin generalization probes across people, settings, and materials."
95
+ replacement: []
96
+ consequence:
97
+ - "Move to maintenance schedule (probe monthly) and redirect session time to acquisition targets."
98
+ crisis: []
99
+
100
+ generalization_failure:
101
+ antecedent:
102
+ - "Implement structured generalization training: vary therapists, settings, and materials systematically."
103
+ - "Use multiple-exemplar training — teach with 3–5 different stimulus sets."
104
+ - "Program common stimuli across training and generalization settings."
105
+ replacement: []
106
+ consequence:
107
+ - "Consider natural-environment training (NET) to supplement DTT for this skill."
108
+ crisis: []
109
+
110
+ extinction_burst:
111
+ antecedent:
112
+ - "Maintain current antecedent arrangements — do not add extra demand."
113
+ replacement:
114
+ - "Ensure replacement response is available and reinforced at high magnitude during the burst."
115
+ consequence:
116
+ - "**Maintain current protocol** — consistency during extinction bursts is critical."
117
+ - "Ensure all team members are trained on the procedure."
118
+ crisis:
119
+ - "If problem-behavior intensity or topography exceeds the learner's baseline safety threshold (SIB causing tissue damage, aggression causing injury, imminent elopement into hazard): pause the extinction procedure and implement the facility's tiered crisis-prevention protocol (e.g., Safety-Care, CPI, or equivalent)."
120
+ - "De-escalation and environmental safety first; physical intervention is last resort and only by staff currently certified in the facility's framework. Least-restrictive option appropriate to the risk level applies at every step. Physical intervention is used only when specifically authorized for this learner in the BIP — contraindications (medical, developmental, trauma history) may restrict the procedures available, in which case the plan is de-escalation + environmental safety + supervisor contact only."
121
+ - "Contact the supervising BCBA immediately; do not resume the extinction procedure without BCBA review. Document duration, intensity, antecedent, staff response, and post-incident state."
122
+ - "Conduct a post-incident debrief with staff. If the burst exceeds the BIP's anticipated duration or intensity, the BIP should be re-reviewed before resuming."
123
+
124
+ skill_loss_after_break:
125
+ antecedent:
126
+ - "Implement booster sessions at the most-recently-effective prompt level."
127
+ - "Increase session frequency temporarily until pre-break levels are recovered."
128
+ replacement: []
129
+ consequence:
130
+ - "Review and strengthen the maintenance schedule to prevent future skill loss."
131
+ - "Consider sending practice materials home during extended breaks."
132
+ crisis: []
133
+
134
+ motivating_operation_shift:
135
+ antecedent:
136
+ - "Conduct preference assessment to identify a currently-valued reinforcer."
137
+ - "Program MO-establishing operations (deprivation for target reinforcer) before teaching sessions."
138
+ - "Rotate reinforcers to prevent satiation."
139
+ replacement: []
140
+ consequence:
141
+ - "Adjust reinforcement density to match current MO level."
142
+ crisis: []
143
+
144
+ setting_event_trigger:
145
+ antecedent:
146
+ - "Identify and document the setting event (sleep, illness, schedule change, medication)."
147
+ - "Modify programming demand on days when setting event is present."
148
+ - "Coordinate with caregivers / medical team on setting-event factors outside the program."
149
+ replacement: []
150
+ consequence:
151
+ - "Track setting events alongside performance data to quantify their effect."
152
+ crisis:
153
+ - "If the setting event involves a medical concern (seizure-like activity, injury, illness): stop the program immediately, follow the facility's medical-incident protocol, and refer to the appropriate medical professional. Medical events are NOT behavioral targets and must not be treated as ABA programming issues."
154
+ - "If behavior escalates during the setting-event window: use verbal de-escalation and environmental safety first; do not add demand. Only staff currently certified in the facility's crisis-prevention framework may use physical intervention, and only if imminent safety risk warrants it AND the learner's BIP specifically authorizes it (contraindications and BIP restrictions may rule it out)."
155
+ - "Coordinate with caregivers, the medical team, and the supervising BCBA; document the setting event and the associated behavior for pattern analysis. Review the BIP in light of the setting-event data."
configs/session_interpretation/taxonomy.yaml ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Session Interpretation taxonomy — TRACE v1
2
+ # The controlled vocabulary the Task 2 generator draws from. Self-contained.
3
+ #
4
+ # Citations for underlying concepts: see taxonomy-v1.md.
5
+
6
+ task:
7
+ id: session_interpretation
8
+ name: Behavioral Session Interpretation
9
+ citation: >
10
+ Multi-session interpretation framework is TRACE's operationalization.
11
+ Underlying concepts grounded in CHH (2020) Ch. 6–7 (analyzing behavior change),
12
+ Iwata 1982/1994 (functional analysis), Hanley-Iwata-McCord 2003 (FBA review),
13
+ Bijou-Peterson-Ault 1968 (ABC recording), Michael 1993 (MOs).
14
+
15
+ # 12 session patterns (each drives trajectory + interpretation)
16
+ patterns:
17
+ - id: mastery_progression
18
+ name: Mastery progression
19
+ description: "Steady improvement toward mastery criteria."
20
+ trend: ascending
21
+ concern_level: none
22
+ default_escalation: 1
23
+ citation: "CHH Ch. 6."
24
+
25
+ - id: regression
26
+ name: Regression
27
+ description: "Decline in performance after previous mastery or near-mastery."
28
+ trend: descending
29
+ concern_level: high
30
+ default_escalation: 3
31
+ citation: "CHH Ch. 26 (maintenance)."
32
+
33
+ - id: plateau
34
+ name: Plateau
35
+ description: "Accuracy stable but below mastery criteria for extended period."
36
+ trend: flat
37
+ concern_level: moderate
38
+ default_escalation: 2
39
+ citation: "CHH Ch. 7."
40
+
41
+ - id: frustration_pattern
42
+ name: Frustration pattern
43
+ description: "Declining accuracy accompanied by behavioral indicators of frustration and escape-maintained responding."
44
+ trend: descending
45
+ concern_level: high
46
+ default_escalation: 3
47
+ citation: "Iwata 1994 (escape function); CHH Ch. 27."
48
+
49
+ - id: variable_performance
50
+ name: Variable performance
51
+ description: "Inconsistent accuracy across sessions with no clear trend."
52
+ trend: variable
53
+ concern_level: moderate
54
+ default_escalation: 2
55
+ citation: "CHH Ch. 5 (measurement reliability)."
56
+
57
+ - id: prompt_dependency
58
+ name: Prompt dependency
59
+ description: "High accuracy with prompts but failure at independent level."
60
+ trend: flat_high_prompted
61
+ concern_level: moderate
62
+ default_escalation: 2
63
+ citation: "Touchette & Howard 1984 JABA (time delay)."
64
+
65
+ - id: rapid_acquisition
66
+ name: Rapid acquisition
67
+ description: "Quick skill acquisition exceeding expected timeline."
68
+ trend: sharply_ascending
69
+ concern_level: none
70
+ default_escalation: 1
71
+ citation: "CHH Ch. 6."
72
+
73
+ - id: generalization_failure
74
+ name: Generalization failure
75
+ description: "Strong performance in training but failure to generalize to novel conditions."
76
+ trend: context_dependent
77
+ concern_level: moderate
78
+ default_escalation: 2
79
+ citation: "Stokes & Baer 1977 JABA; CHH Ch. 26."
80
+
81
+ - id: extinction_burst
82
+ name: Extinction burst
83
+ description: "Temporary increase in problem behavior during behavior reduction."
84
+ trend: temporary_spike
85
+ concern_level: expected
86
+ default_escalation: 2
87
+ citation: "CHH Ch. 24."
88
+
89
+ - id: skill_loss_after_break
90
+ name: Skill loss after break
91
+ description: "Performance decline following extended absence or break, with recovery."
92
+ trend: drop_then_recovery
93
+ concern_level: moderate
94
+ default_escalation: 2
95
+ citation: "CHH Ch. 26 (maintenance)."
96
+
97
+ - id: motivating_operation_shift
98
+ name: Motivating operation shift
99
+ description: "Responding drops when MO changes (e.g., satiation); recovers when MO restored."
100
+ trend: mo_dip
101
+ concern_level: moderate
102
+ default_escalation: 2
103
+ citation: "Michael 1993 JEAB; CHH Ch. 16."
104
+
105
+ - id: setting_event_trigger
106
+ name: Setting event trigger
107
+ description: "Accuracy or behavior changes correlated with an external setting event (illness, sleep, schedule change)."
108
+ trend: setting_event
109
+ concern_level: moderate
110
+ default_escalation: 2
111
+ citation: "Smith & Iwata 1997 JABA; Bijou & Baer 1961."
112
+
113
+ # Behavior functions
114
+ functions:
115
+ - id: escape
116
+ name: escape
117
+ reinforcer: "Termination or avoidance of an aversive stimulus (demand, task, interaction)."
118
+ typical_antecedents: [demand presentation, difficult task, non-preferred activity, transition to work]
119
+ typical_consequences: [task removed, demand withdrawn, redirected to preferred activity]
120
+ - id: attention
121
+ name: attention
122
+ reinforcer: "Attention from others (adult or peer)."
123
+ typical_antecedents: [adult attention diverted, peer interaction, low-attention period]
124
+ typical_consequences: [adult attends, reprimand delivered, peer interaction initiated]
125
+ - id: tangible
126
+ name: tangible
127
+ reinforcer: "Access to an item or activity."
128
+ typical_antecedents: [preferred item removed, restricted access, another peer has item]
129
+ typical_consequences: [item returned, access granted, activity restored]
130
+ - id: automatic
131
+ name: automatic
132
+ reinforcer: "Self-produced reinforcement (sensory, proprioceptive)."
133
+ typical_antecedents: [low-stimulation period, alone time, waiting]
134
+ typical_consequences: [no social mediation; behavior self-terminates or continues]
135
+ - id: unknown
136
+ name: unknown
137
+ reinforcer: "Insufficient data to determine function."
138
+
139
+ # Target behaviors (deceleration targets)
140
+ # For full operational definitions + citations see taxonomy-v1.md.
141
+ # Used when the generator samples 0–3 behaviors per log.
142
+ target_behaviors:
143
+ - id: tantrum
144
+ name: Tantrum
145
+ operational: "Co-occurring cluster of two or more of: crying/screaming, dropping to floor, kicking, hitting, throwing objects, lasting ≥ 3 s."
146
+ plausible_functions: [escape, attention, tangible]
147
+ typical_severity: moderate
148
+ - id: aggression
149
+ name: Aggression
150
+ operational: "Attempted or completed forceful contact directed toward another person (hit, kick, bite, scratch, pinch, throw-at)."
151
+ plausible_functions: [escape, tangible, attention]
152
+ typical_severity: high
153
+ - id: sib
154
+ name: Self-injurious behavior (SIB)
155
+ operational: "Any response that produces tissue damage or has potential to (head-hit, self-bite, face-slap, head-bang, skin-pick, self-pinch)."
156
+ plausible_functions: [automatic, attention, escape]
157
+ typical_severity: high
158
+ - id: elopement
159
+ name: Elopement
160
+ operational: "Full body crossing a designated boundary without adult approval."
161
+ plausible_functions: [escape, tangible, automatic]
162
+ typical_severity: high
163
+ - id: property_destruction
164
+ name: Property destruction
165
+ operational: "Hitting/kicking furniture or walls; throwing objects not meant to be thrown; tearing clothing, books, materials."
166
+ plausible_functions: [escape, attention, tangible]
167
+ typical_severity: moderate
168
+ - id: motor_stereotypy
169
+ name: Motor stereotypy
170
+ operational: "Repetitive non-functional motor movements (hand-flap, body-rock, finger-flick, spin)."
171
+ plausible_functions: [automatic]
172
+ typical_severity: low
173
+ - id: vocal_stereotypy
174
+ name: Vocal stereotypy
175
+ operational: "Non-contextual, non-communicative vocalizations outside appropriate conversational context."
176
+ plausible_functions: [automatic]
177
+ typical_severity: low
178
+ - id: non_compliance
179
+ name: Non-compliance
180
+ operational: "Failure to initiate a requested response within 5 s of an instructional prompt, OR active refusal."
181
+ plausible_functions: [escape]
182
+ typical_severity: low
183
+ - id: mouthing
184
+ name: Mouthing
185
+ operational: "Placing hand, fingers, or non-food object into the mouth outside scheduled meal/snack times."
186
+ plausible_functions: [automatic]
187
+ typical_severity: low
188
+ - id: pica
189
+ name: Pica
190
+ operational: "Placement of any inedible item past the plane of the lips, including ingestion."
191
+ plausible_functions: [automatic, attention]
192
+ typical_severity: high
193
+ - id: verbal_aggression
194
+ name: Verbal aggression
195
+ operational: "Yelling, cursing, name-calling, or verbal threats toward another person."
196
+ plausible_functions: [attention, escape]
197
+ typical_severity: moderate
198
+ - id: fecal_smearing
199
+ name: Fecal smearing (scatolia)
200
+ operational: "Reaching into diaper, pull-up, or pants to retrieve feces and/or spreading feces onto skin, clothing, walls, furniture, or other surfaces."
201
+ plausible_functions: [automatic, attention]
202
+ typical_severity: high
203
+ citation: "Piazza et al. 1996 JABA; Matson et al. 2008 (scatolia in ASD)."
204
+ - id: toileting_accident
205
+ name: Toileting accident (urine or bowel)
206
+ operational: "Session toileting data tracks both successful voids in the toilet (urine and BM) and accidents (voiding outside the toilet onto self, clothing, furniture, or floor). The deceleration target is accident frequency; in-toilet voids are recorded as context for toileting progress. Medical rule-out required before treating accidents behaviorally."
207
+ plausible_functions: [automatic, escape]
208
+ typical_severity: moderate
209
+ citation: "Azrin & Foxx 1971 JABA; Cicero & Pfadt 2002; LeBlanc et al. 2005."
210
+
211
+ # Measurement types used in session logs
212
+ measurement_types:
213
+ - id: accuracy_pct
214
+ short: "accuracy"
215
+ description: "Trial-based percentage correct (X/N)."
216
+ used_for: [skill acquisition]
217
+ - id: frequency
218
+ short: "freq"
219
+ description: "Raw count per session."
220
+ used_for: [discrete behaviors, target behaviors]
221
+ - id: rate
222
+ short: "rate"
223
+ description: "Count per minute or hour."
224
+ used_for: [target behaviors]
225
+ - id: duration
226
+ short: "duration"
227
+ description: "Total elapsed time behavior occurred."
228
+ used_for: [tantrum, on-task engagement]
229
+ - id: latency
230
+ short: "latency"
231
+ description: "Time from SD to response initiation."
232
+ used_for: [skill responsivity]
233
+ - id: partial_interval
234
+ short: "PIR"
235
+ description: "Proportion of intervals in which behavior occurred at any point."
236
+ used_for: [stereotypy, low-intensity behavior]
237
+ - id: momentary_time_sampling
238
+ short: "MTS"
239
+ description: "Behavior sampled at end of each interval."
240
+ used_for: [group observation]
241
+
242
+ # Escalation levels (safety-critical structured label)
243
+ escalation_levels:
244
+ - id: 1
245
+ label: "Continue monitoring"
246
+ description: "Current programming is effective; no changes."
247
+ - id: 2
248
+ label: "Adjust next session"
249
+ description: "Implement a specific programming change at the next session."
250
+ - id: 3
251
+ label: "Supervisor review"
252
+ description: "Elevate to supervising BCBA within 24–48 hours."
253
+ - id: 4
254
+ label: "Safety immediate"
255
+ description: "Stop program; contact supervisor / family; consider crisis-plan activation."
256
+
257
+ # Confidence levels (calibration signal)
258
+ confidence_levels:
259
+ - id: high
260
+ label: high
261
+ description: "Pattern and recommendation well-supported by log data."
262
+ - id: moderate
263
+ label: moderate
264
+ description: "Data supports the hypothesis but alternatives cannot be fully ruled out."
265
+ - id: low
266
+ label: low
267
+ description: "Insufficient data to rule in a specific pattern; recommend data collection before programming changes."
268
+
269
+ # Behavioral indicator phrases (pasted verbatim into session logs)
270
+ # Organized by cluster; Task 2 generator samples from here.
271
+ behavioral_indicators:
272
+ frustration:
273
+ - "Increased response latency (3–5× baseline)"
274
+ - "Pushing materials away from the work area"
275
+ - "Turning away from task or staff"
276
+ - "Vocal refusal ('no', 'I don't want to')"
277
+ - "Self-stimulatory behavior increase during demand periods"
278
+ - "Crying during demand presentation"
279
+ - "Elopement from the work area"
280
+ - "Attempts at property destruction (throwing materials, swiping items off the surface)"
281
+ - "Aggression attempt directed at staff (hit, kick, or scratch toward adult)"
282
+ - "Peer aggression (hitting or pushing peers during group activities)"
283
+ - "Self-injurious behavior onset during task difficulty (self-biting, hair-pulling, head-hitting)"
284
+ engagement:
285
+ - "Eye contact maintained throughout trials"
286
+ - "Quick response latency (within 2–3 seconds)"
287
+ - "Reaching for materials proactively"
288
+ - "Smiling during reinforcement delivery"
289
+ - "Requesting more trials verbally"
290
+ - "Appropriate sitting posture maintained"
291
+ disengagement:
292
+ - "Looking away during SD presentation"
293
+ - "Delayed responding (>5 seconds consistently)"
294
+ - "Stereotypic behavior during inter-trial intervals"
295
+ - "Leaving seat without permission"
296
+ - "Playing with non-task materials"
297
+ - "Echolalic responding (repeating SD rather than answering)"
298
+
299
+ # Teaching method metadata (for referencing methods in log programs)
300
+ teaching_methods:
301
+ - id: dtt
302
+ name: DTT
303
+ default_structure: "structured discrete trials, massed"
304
+ - id: net
305
+ name: NET
306
+ default_structure: "natural-environment embedded opportunities"
307
+ - id: task_analysis
308
+ name: Task Analysis
309
+ default_structure: "multi-step chaining, {chain_type}"
310
+ - id: fct
311
+ name: FCT
312
+ default_structure: "functional communication training, replacement response = {replacement}"
313
+ - id: prt
314
+ name: PRT
315
+ default_structure: "pivotal response training, child-choice, attempt reinforcement"
316
+ - id: bst
317
+ name: BST
318
+ default_structure: "staff-facing training program"
configs/session_interpretation/template.yaml ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Session Interpretation template — TRACE v1
2
+ # Task 2 — given a multi-session behavioral log, produce structured interpretation.
3
+
4
+ template_id: task2_session_interp_v1
5
+ task_type: session_interpretation
6
+
7
+ system_prompt: |
8
+ You are an expert ABA (Applied Behavior Analysis) clinical assistant. You
9
+ analyze multi-session behavioral session logs for individuals with autism and
10
+ produce structured clinical interpretations that identify patterns, hypothesize
11
+ behavior functions when applicable, and recommend programming adjustments
12
+ structured along BIP lines (antecedent strategies, replacement behaviors,
13
+ consequence strategies, crisis plan). Your interpretation also includes an
14
+ escalation level and a confidence expression. Every recommendation is grounded
15
+ in the data provided. Follow BACB ethical guidelines and reference no real
16
+ client data.
17
+
18
+ # User message variants — one is sampled per example to vary phrasing
19
+ user_variants:
20
+ - |
21
+ Interpret the following behavioral session log. Provide clinical concerns, pattern classification, behavior function hypothesis (if applicable), programming recommendations (antecedent / replacement / consequence / crisis), escalation level, confidence, and data-supported rationale.
22
+
23
+ {session_log}
24
+
25
+ - |
26
+ Review this session log and produce a structured clinical interpretation.
27
+
28
+ {session_log}
29
+
30
+ Please include: (1) clinical concerns, (2) pattern classification, (3) function hypothesis for any target behaviors, (4) recommendations structured as antecedent / replacement / consequence / crisis, (5) escalation level and confidence, (6) data-supported rationale.
31
+
32
+ - |
33
+ Here is a multi-session behavioral log for a learner. Analyze it and provide your clinical interpretation using the structured format.
34
+
35
+ {session_log}
36
+
37
+ # Assistant template — filled section-by-section by the generator.
38
+ # Conditional sections (function hypothesis, replacement, crisis plan) are
39
+ # pre-computed by the generator as strings, possibly empty.
40
+ assistant_template: |
41
+ ## Clinical Concerns
42
+ {clinical_concerns_bullets}
43
+
44
+ ## Pattern Classification
45
+ {pattern_class}
46
+
47
+ {pattern_explanation}
48
+ {function_hypothesis_section}
49
+ ## Programming Recommendations
50
+
51
+ ### Antecedent strategies
52
+ {antecedent_bullets}
53
+ {replacement_section}
54
+ ### Consequence strategies
55
+ {consequence_bullets}
56
+ {crisis_section}
57
+ ## Escalation Level
58
+ {escalation_level} — {escalation_label}
59
+
60
+ {escalation_justification}
61
+
62
+ ## Confidence
63
+ {confidence_level}
64
+
65
+ {confidence_justification}
66
+
67
+ ## Data-Supported Rationale
68
+ {rationale_bullets}
configs/session_interpretation/trajectory_rules.yaml ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Per-pattern trajectory rules — TRACE v1
2
+ # Parameters that drive per-session accuracy and behavior-frequency generation.
3
+ # Each pattern ID matches configs/session_interpretation/taxonomy.yaml.
4
+ #
5
+ # Interpretation of fields:
6
+ # base_range: uniform-sample initial accuracy
7
+ # delta_per_session_range: per-session delta applied cumulatively
8
+ # noise: uniform ±noise added per session
9
+ # behavior_trajectory: how target-behavior frequencies evolve (optional)
10
+ # "stable_low" / "ascending" / "descending" / "spike" / etc.
11
+
12
+ rules:
13
+ mastery_progression:
14
+ accuracy:
15
+ base_range: [0.30, 0.50]
16
+ delta_per_session_range: [0.04, 0.08]
17
+ noise: 0.05
18
+ cap: 0.95
19
+ behavior_trajectory: stable_low
20
+
21
+ regression:
22
+ accuracy:
23
+ base_range: [0.70, 0.85]
24
+ delta_per_session_range: [-0.08, -0.04]
25
+ noise: 0.05
26
+ floor: 0.10
27
+ behavior_trajectory: ascending_moderate
28
+
29
+ plateau:
30
+ accuracy:
31
+ base_range: [0.45, 0.65]
32
+ delta_per_session_range: [-0.02, 0.02]
33
+ noise: 0.08
34
+ behavior_trajectory: stable_low
35
+
36
+ frustration_pattern:
37
+ accuracy:
38
+ base_range: [0.70, 0.85]
39
+ delta_per_session_range: [-0.08, -0.04]
40
+ noise: 0.05
41
+ floor: 0.10
42
+ behavior_trajectory: ascending_strong
43
+ behavioral_indicators: frustration
44
+
45
+ variable_performance:
46
+ accuracy:
47
+ base_range: [0.40, 0.70]
48
+ delta_per_session_range: [0.0, 0.0]
49
+ noise: 0.20
50
+ behavior_trajectory: variable
51
+
52
+ prompt_dependency:
53
+ accuracy:
54
+ base_range: [0.75, 0.95] # high but prompted
55
+ delta_per_session_range: [-0.02, 0.02]
56
+ noise: 0.04
57
+ behavior_trajectory: stable_low
58
+ prompted_share: [0.70, 0.90] # share of trials that are prompted (not independent)
59
+
60
+ rapid_acquisition:
61
+ accuracy:
62
+ base_range: [0.20, 0.40]
63
+ delta_per_session_range: [0.08, 0.12]
64
+ noise: 0.03
65
+ cap: 0.97
66
+ behavior_trajectory: stable_low
67
+
68
+ generalization_failure:
69
+ accuracy:
70
+ alternating:
71
+ training_range: [0.75, 0.90]
72
+ novel_range: [0.25, 0.45]
73
+ noise: 0.05
74
+ behavior_trajectory: stable_low
75
+
76
+ extinction_burst:
77
+ accuracy:
78
+ base_range: [0.50, 0.70]
79
+ delta_per_session_range: [0.0, 0.03]
80
+ noise: 0.05
81
+ behavior_trajectory: temporary_spike
82
+ burst_start_fraction: 0.33 # spike starts ~1/3 through log
83
+ burst_duration_sessions: 2
84
+ burst_magnitude: [2.0, 3.5] # multiplier on baseline frequency
85
+
86
+ skill_loss_after_break:
87
+ accuracy:
88
+ base_range: [0.70, 0.85]
89
+ initial_drop: [0.25, 0.40]
90
+ recovery_per_session_range: [0.05, 0.10]
91
+ noise: 0.03
92
+ behavior_trajectory: stable_low
93
+
94
+ motivating_operation_shift:
95
+ accuracy:
96
+ base_range: [0.70, 0.85]
97
+ mo_dip_sessions: [2, 4] # how many consecutive sessions show the dip
98
+ mo_dip_magnitude: [0.25, 0.40] # drop depth
99
+ recovery_per_session_range: [0.05, 0.08]
100
+ noise: 0.04
101
+ behavior_trajectory: stable_low
102
+
103
+ setting_event_trigger:
104
+ accuracy:
105
+ base_range: [0.55, 0.75]
106
+ setting_event_session_fraction: 0.40 # where the setting event hits
107
+ setting_event_magnitude: [0.20, 0.35]
108
+ recovery_per_session_range: [0.04, 0.08]
109
+ noise: 0.05
110
+ behavior_trajectory: coincident_spike
111
+
112
+ # Behavior-trajectory shorthand -> what it produces for frequency per session
113
+ behavior_trajectory_library:
114
+ stable_low:
115
+ base_freq_range: [0, 3]
116
+ delta_per_session: 0.0
117
+ noise: 1.0
118
+ ascending_moderate:
119
+ base_freq_range: [1, 3]
120
+ delta_per_session: 0.3
121
+ noise: 1.0
122
+ ascending_strong:
123
+ base_freq_range: [1, 3]
124
+ delta_per_session: 0.6
125
+ noise: 1.0
126
+ variable:
127
+ base_freq_range: [1, 5]
128
+ delta_per_session: 0.0
129
+ noise: 3.0
130
+ temporary_spike:
131
+ base_freq_range: [1, 3]
132
+ spike_multiplier_range: [2.5, 4.0]
133
+ noise: 1.0
134
+ coincident_spike:
135
+ base_freq_range: [1, 3]
136
+ spike_multiplier_range: [2.0, 3.5]
137
+ noise: 1.0
configs/shared/learner_profiles.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Learner profile taxonomy — TRACE v1
2
+ # Groups learners by developmental + chronological age + curriculum scope.
3
+
4
+ profiles:
5
+ - id: early
6
+ name: Early Learner
7
+ developmental_age: "0–48 months"
8
+ curricula: [vbmapp]
9
+ typical_vbmapp_levels: [L1, L2]
10
+ note: "Foundational skills; discrete + naturalistic teaching appropriate."
11
+
12
+ - id: school_age
13
+ name: School-Age Learner
14
+ developmental_age: "varies; often 24–60 months equivalent"
15
+ chronological_age: "6–12 years"
16
+ curricula: [vbmapp, afls]
17
+ typical_vbmapp_levels: [L2, L3]
18
+ typical_afls_modules: [school]
19
+ note: "Classroom skills + pre-academic + continuing language."
20
+
21
+ - id: adolescent
22
+ name: Adolescent Learner
23
+ chronological_age: "13–17 years"
24
+ curricula: [afls, vbmapp]
25
+ typical_vbmapp_levels: [L3]
26
+ typical_afls_modules: [basic_living, home, community, school, vocational]
27
+ note: "Functional-living + vocational focus; residual language gaps addressed."
28
+
29
+ - id: adult
30
+ name: Adult Learner
31
+ chronological_age: "18+ years"
32
+ curricula: [afls]
33
+ typical_afls_modules: [basic_living, home, community, vocational, independent_living]
34
+ note: "Independent-living + vocational focus; residential/day-program setting."
configs/shared/mastery_states.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Mastery states taxonomy — TRACE v1
2
+ # Where a skill sits in its learning trajectory.
3
+ # Citation: Cooper, Heron, Heward (2020), Ch. 26, Ch. 28.
4
+
5
+ states:
6
+ - id: emerging
7
+ name: Emerging
8
+ short: emerging
9
+ accuracy_range: [0.00, 0.30]
10
+ description: "Skill not yet acquired; high prompt levels needed."
11
+ prompt_level_guidance: "Begin at higher prompt levels (full physical / partial physical)."
12
+
13
+ - id: developing
14
+ name: Developing
15
+ short: developing
16
+ accuracy_range: [0.30, 0.50]
17
+ description: "Responding established; stimulus control unstable."
18
+ prompt_level_guidance: "Partial physical or modeling prompt with systematic fading."
19
+
20
+ - id: approaching
21
+ name: Approaching mastery
22
+ short: approaching mastery
23
+ accuracy_range: [0.50, 0.70]
24
+ description: "Moving toward independence; prompt fading in progress."
25
+ prompt_level_guidance: "Gestural or positional prompts; plan fade to independence."
26
+
27
+ - id: near
28
+ name: Near mastery
29
+ short: near mastery
30
+ accuracy_range: [0.70, 0.85]
31
+ description: "Stimulus control solid; occasional errors on complex stimuli."
32
+ prompt_level_guidance: "Minimal prompts; increase expectations for independence."
33
+
34
+ - id: mastered
35
+ name: Mastered at current level
36
+ short: mastered
37
+ accuracy_range: [0.85, 1.00]
38
+ description: "Meets mastery criterion for current step."
39
+ prompt_level_guidance: "Independent responding; move to generalization."
40
+
41
+ - id: generalization
42
+ name: Generalization phase
43
+ short: generalization
44
+ accuracy_range: [0.85, 1.00]
45
+ description: "Mastered in training; extending to novel settings / materials / therapists."
46
+ prompt_level_guidance: "NET-style opportunities; multiple exemplar probing."
configs/shared/prompt_types.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Canonical prompt types — TRACE shared taxonomy
2
+ # Citation: Cooper, Heron, & Heward (2020), Ch. 21 ("Prompting and Fading").
3
+ #
4
+ # Used across any teaching method that has a prompt hierarchy (DTT, Task Analysis, etc.).
5
+ # Each method's own taxonomy references these by id.
6
+
7
+ prompt_types:
8
+ - id: full_physical
9
+ short: FP
10
+ name: Full physical
11
+ description: "Hand-over-hand / complete manual guidance through the movement."
12
+
13
+ - id: partial_physical
14
+ short: PP
15
+ name: Partial physical
16
+ description: "Light guidance at a distal body part (elbow, shoulder)."
17
+
18
+ - id: model
19
+ short: M
20
+ name: Modeling
21
+ description: "Demonstrate the response; learner copies."
22
+
23
+ - id: gestural
24
+ short: G
25
+ name: Gestural
26
+ description: "Pointing or indicating without physical contact."
27
+
28
+ - id: verbal
29
+ short: V
30
+ name: Verbal
31
+ description: "Spoken hint or instruction."
32
+
33
+ - id: positional
34
+ short: Pos
35
+ name: Positional
36
+ description: "Target stimulus arranged closer to learner or more prominent."
37
+
38
+ - id: visual
39
+ short: Vis
40
+ name: Visual
41
+ description: "Pictures, arrows, written cues placed for learner to see."
42
+
43
+ - id: independent
44
+ short: I
45
+ name: Independent
46
+ description: "No prompt; learner responds without assistance."
configs/task_analysis/compatibility.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Task Analysis clinical-consistency rules — TRACE v1
2
+
3
+ # Module -> typical learner profiles (some modules only fit older learners)
4
+ module_to_learner_profiles:
5
+ basic_living: [school_age, adolescent, adult]
6
+ home: [adolescent, adult]
7
+ community: [adolescent, adult]
8
+ vocational: [adolescent, adult]
9
+ independent_living: [adult]
10
+
11
+ # Mastery state -> acceptable reinforcement schedules (chaining-specific)
12
+ mastery_to_reinforcement:
13
+ emerging: [crf_per_step]
14
+ developing: [crf_per_step]
15
+ approaching: [crf_per_step, differential_per_step]
16
+ near: [differential_per_step, token_economy]
17
+ mastered: [terminal_reinforcement, token_economy]
18
+ generalization: [terminal_reinforcement]
19
+
20
+ # Chain type tends to follow skill preference; fallback is total_task
21
+ use_preferred_chain_type_probability: 0.75
22
+
23
+ # Sampling across modules (weighted toward typical clinical frequency)
24
+ module_sampling_weights:
25
+ basic_living: 0.35
26
+ home: 0.20
27
+ community: 0.15
28
+ vocational: 0.15
29
+ independent_living: 0.15
30
+
31
+ # Prompt strategy bias by mastery state
32
+ mastery_to_prompt_strategies:
33
+ emerging: [graduated_guidance, most_to_least]
34
+ developing: [graduated_guidance, most_to_least]
35
+ approaching: [most_to_least, least_to_most]
36
+ near: [least_to_most]
37
+ mastered: [least_to_most]
38
+ generalization: [least_to_most]
configs/task_analysis/taxonomy.yaml ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Task Analysis / Chaining taxonomy — TRACE v1
2
+ # Self-contained: AFLS skills broken into step lists, chain types, prompting.
3
+ # Citation: CHH 2020 Ch. 23; Slocum & Tiger 2011 JABA; Partington & Mueller 2012.
4
+
5
+ method:
6
+ id: task_analysis
7
+ name: Task Analysis / Chaining
8
+ short: Task Analysis
9
+ citation: "Cooper, Heron, Heward (2020) Applied Behavior Analysis (3rd ed.), Ch. 23; Slocum, S. K. & Tiger, J. H. (2011). JABA, 44(4), 793–805; Partington & Mueller 2012 (AFLS)."
10
+ when_to_use: >
11
+ Multi-step skills with a natural sequence — toileting, handwashing,
12
+ dressing, cooking, cleaning, classroom routines, community tasks,
13
+ vocational routines.
14
+
15
+ # Chain types
16
+ chain_types:
17
+ - id: forward
18
+ name: Forward chaining
19
+ description: "Teach step 1 to mastery, then 1->2, then 1->2->3, progressively adding later steps."
20
+ best_for: "Skills where early steps are simple and the chain grows in complexity."
21
+ - id: backward
22
+ name: Backward chaining
23
+ description: "Complete all steps for the learner except the last; teach the last step first, then work backward. The final reinforcer (task completion) is always present for the learner."
24
+ best_for: "Skills where the terminal reinforcer is salient (bathing, dressing, meal cleanup)."
25
+ - id: total_task
26
+ name: Total-task presentation
27
+ description: "Prompt the learner through the entire chain every trial; fade prompts across all steps simultaneously."
28
+ best_for: "Skills with few steps, or when all steps are similar in difficulty."
29
+
30
+ # Prompt hierarchies commonly used in chaining
31
+ prompt_hierarchies:
32
+ - id: graduated_guidance
33
+ name: Graduated Guidance
34
+ description: "Hand-over-hand physical guidance with fading pressure; move to shadow, then independent."
35
+ best_for: "Self-care and motor-dominant chains."
36
+ - id: most_to_least
37
+ name: Most-to-Least
38
+ description: "Start with full-physical prompt at each step; fade to partial physical, gestural, verbal, independent."
39
+ best_for: "Acquisition phase; new chains."
40
+ - id: least_to_most
41
+ name: Least-to-Most
42
+ description: "Allow independent attempt; add progressively more intrusive prompts only if learner does not initiate."
43
+ best_for: "Partial-repertoire chains; promoting independence."
44
+
45
+ # Error correction procedures appropriate for chaining
46
+ error_corrections:
47
+ - id: back_to_previous_step
48
+ name: Back-step + re-approach
49
+ steps: "Return to the previously-mastered step; re-execute that step correctly; then re-attempt the missed step with one prompt level above the last independent level."
50
+ - id: model_prompt_fade
51
+ name: Model -> prompt -> fade
52
+ steps: "Demonstrate the correct step; provide physical or gestural prompt as needed; fade prompt across subsequent trials."
53
+ - id: task_reset
54
+ name: Task reset
55
+ steps: "Pause the chain; reset environmental stimuli; restart the chain from the beginning with full prompts at the previously-missed step."
56
+
57
+ # Reinforcement schedules for chaining
58
+ reinforcement_schedules:
59
+ - id: crf_per_step
60
+ name: CRF per step (acquisition)
61
+ description: "Continuous reinforcement for each correctly-completed step during acquisition."
62
+ - id: terminal_reinforcement
63
+ name: Terminal reinforcement (mastered)
64
+ description: "Reinforcement delivered only at chain completion; no per-step reinforcement."
65
+ - id: differential_per_step
66
+ name: Differential per-step + terminal
67
+ description: "Small per-step reinforcement for independent responses; larger reinforcer at chain completion."
68
+ - id: token_economy
69
+ name: Token economy
70
+ description: "One token per correctly-completed step; token exchange at chain completion."
71
+
72
+ # Mastery criteria for chains (independence) vs toleration programs.
73
+ # Generator filters by applies_to when picking a criterion.
74
+ mastery_criteria:
75
+ - id: mc_all_steps_independent
76
+ applies_to: independence
77
+ text: "100% of steps completed independently (no prompts) across 3 consecutive trials of the full chain."
78
+ - id: mc_fluent_chain
79
+ applies_to: independence
80
+ text: "Full chain completed within {fluency_seconds}s across 3 consecutive sessions with independent responding on all steps."
81
+ - id: mc_generalization
82
+ applies_to: independence
83
+ text: "Full chain completed independently with 2 different adults in 2 different settings across 2 consecutive days."
84
+ - id: mc_self_initiation
85
+ applies_to: independence
86
+ text: "Chain self-initiated from the appropriate antecedent (no verbal prompt to begin) across 3 consecutive opportunities."
87
+ - id: mc_toleration_full_duration
88
+ applies_to: toleration
89
+ text: "End-goal duration tolerated across 3 consecutive sessions with no safety behaviors (refusal, crying, SIB, aggression, property destruction)."
90
+ - id: mc_toleration_caregivers
91
+ applies_to: toleration
92
+ text: "End-goal activity tolerated with 2 different caregivers (primary caregiver + one other) across 2 consecutive weeks."
93
+ - id: mc_toleration_settings
94
+ applies_to: toleration
95
+ text: "End-goal activity tolerated across 2 different settings (e.g., home and clinic) across 2 consecutive weeks without safety behaviors."
96
+
97
+ # AFLS skill domains — learner profile + module + skill + step definitions
98
+ afls:
99
+ modules:
100
+ - id: basic_living
101
+ name: Basic Living Skills
102
+ learner_profiles: [school_age, adolescent, adult]
103
+ # Two kinds of programs in this module:
104
+ # - "steps" field = independence-focused chaining (learner performs the skill with
105
+ # graduated prompts)
106
+ # - "shaping_steps" field = toleration program (learner allows staff or caregiver
107
+ # to perform the skill; duration-based shaping of tolerance)
108
+ # Some real skills have BOTH variants depending on learner profile and history.
109
+ skills:
110
+ - name: washing hands
111
+ chain_type_preferred: total_task
112
+ steps:
113
+ - Walk to the sink.
114
+ - Turn on the water.
115
+ - Wet hands under the water.
116
+ - Apply soap to hands.
117
+ - Rub hands together for 15–20 seconds, covering fronts, backs, and between fingers.
118
+ - Rinse hands thoroughly under the water.
119
+ - Turn off the water.
120
+ - Dry hands with a towel.
121
+ - name: brushing teeth
122
+ chain_type_preferred: total_task
123
+ steps:
124
+ - Collect toothbrush and toothpaste.
125
+ - Wet the toothbrush under water.
126
+ - Apply a pea-sized amount of toothpaste.
127
+ - Brush upper-front teeth in small circles for 30 seconds.
128
+ - Brush lower-front teeth in small circles for 30 seconds.
129
+ - Brush upper-back teeth (left and right).
130
+ - Brush lower-back teeth (left and right).
131
+ - Spit into the sink and rinse mouth.
132
+ - Rinse the toothbrush and return it to the holder.
133
+ - name: putting on a shirt (pullover)
134
+ chain_type_preferred: backward
135
+ steps:
136
+ - Hold the shirt with the front facing away.
137
+ - Place both arms into the sleeves.
138
+ - Pull the opening over the head.
139
+ - Push arms through the sleeves fully.
140
+ - Pull the bottom of the shirt down to straighten it.
141
+ - name: putting on pants
142
+ chain_type_preferred: backward
143
+ steps:
144
+ - Sit or stand securely.
145
+ - Hold pants with the front facing away.
146
+ - Place one leg into the corresponding pant leg.
147
+ - Place the other leg into the other pant leg.
148
+ - Pull pants up to the waist.
149
+ - Fasten the button / zipper / velcro.
150
+ - name: showering
151
+ chain_type_preferred: backward
152
+ steps:
153
+ - Enter the shower and close the curtain.
154
+ - Turn on the water and adjust temperature.
155
+ - Wet body completely.
156
+ - Apply soap to a washcloth.
157
+ - Wash body systematically (face -> torso -> arms -> legs -> feet).
158
+ - Rinse all soap off.
159
+ - Apply shampoo and rub into hair.
160
+ - Rinse hair completely.
161
+ - Turn off the water.
162
+ - Exit the shower and dry off with a towel.
163
+ - name: toileting routine
164
+ chain_type_preferred: forward
165
+ steps:
166
+ - Recognize the need and walk to the bathroom.
167
+ - Close the door for privacy.
168
+ - Lower clothing appropriately.
169
+ - Sit on the toilet.
170
+ - Complete the elimination.
171
+ - Use toilet paper correctly.
172
+ - Stand and pull clothing up.
173
+ - Flush the toilet.
174
+ - Wash hands.
175
+
176
+ # Toleration programs
177
+ # For learners who do not tolerate the activity being performed on them.
178
+ # Goal is NOT to do it independently — goal is to allow caregiver/staff to
179
+ # complete the activity without refusal, crying, SIB, or aggression, with
180
+ # duration shaped incrementally.
181
+ - name: tolerating tooth brushing by caregiver
182
+ program_type: toleration
183
+ target_activity: tooth brushing performed by caregiver
184
+ end_goal_description: "full 2-minute tooth-brushing routine completed by caregiver without refusal, crying, SIB, or aggression"
185
+ shaping_steps:
186
+ - "1-second toothbrush contact with teeth"
187
+ - "3 seconds of brushing"
188
+ - "5 seconds of brushing"
189
+ - "15 seconds of brushing"
190
+ - "30 seconds of brushing"
191
+ - "60 seconds of brushing"
192
+ - "Full 2-minute brushing routine"
193
+
194
+ - name: tolerating hair washing by caregiver
195
+ program_type: toleration
196
+ target_activity: hair washing / shampooing performed by caregiver
197
+ end_goal_description: "full hair-washing routine completed by caregiver without refusal or safety behaviors"
198
+ shaping_steps:
199
+ - "5 seconds of water on hair"
200
+ - "15 seconds of water on hair"
201
+ - "Shampoo applied without lathering"
202
+ - "30 seconds of gentle lathering"
203
+ - "60 seconds of lathering"
204
+ - "Full rinse"
205
+ - "Full routine: wet -> shampoo -> lather -> rinse"
206
+
207
+ - name: tolerating nail clipping
208
+ program_type: toleration
209
+ target_activity: nail clipping performed by caregiver
210
+ end_goal_description: "all 10 fingernails (and optionally toenails) clipped by caregiver without refusal, pulling hand away, or safety behaviors"
211
+ shaping_steps:
212
+ - "1 fingernail clipped"
213
+ - "2 fingernails clipped in one sitting"
214
+ - "5 fingernails clipped (one hand)"
215
+ - "All 10 fingernails clipped"
216
+ - "All 10 fingernails + 10 toenails clipped"
217
+
218
+ - name: tolerating haircut
219
+ program_type: toleration
220
+ target_activity: haircut by caregiver or stylist
221
+ end_goal_description: "full haircut (10+ minutes) completed without refusal, leaving the chair, or safety behaviors"
222
+ shaping_steps:
223
+ - "Sit in the chair with cape on for 30 seconds"
224
+ - "Tolerate 1 minute of quiet sitting with cape"
225
+ - "Tolerate 2 minutes of brief clipping"
226
+ - "Tolerate 5 minutes of clipping"
227
+ - "Tolerate 10 minutes of clipping"
228
+ - "Tolerate full haircut routine"
229
+
230
+ - name: tolerating showering assistance
231
+ program_type: toleration
232
+ target_activity: showering / bathing performed with caregiver assistance
233
+ end_goal_description: "full assisted shower routine completed without refusal, exiting the shower, or safety behaviors"
234
+ shaping_steps:
235
+ - "Stand in shower for 30 seconds with water off"
236
+ - "Tolerate 10 seconds of water on body"
237
+ - "Tolerate 30 seconds of water on body"
238
+ - "Tolerate soap application to torso"
239
+ - "Tolerate full body wash by caregiver"
240
+ - "Full assisted shower routine"
241
+
242
+ - name: tolerating medical exam
243
+ program_type: toleration
244
+ target_activity: routine medical exam by clinician
245
+ end_goal_description: "full routine exam (stethoscope, blood pressure cuff, otoscope, temperature) completed without refusal or safety behaviors"
246
+ shaping_steps:
247
+ - "Enter and sit in the exam room"
248
+ - "Tolerate stethoscope contact for 5 seconds"
249
+ - "Tolerate blood pressure cuff on arm"
250
+ - "Tolerate otoscope in ear"
251
+ - "Tolerate temperature reading"
252
+ - "Tolerate full routine exam"
253
+
254
+ - id: home
255
+ name: Home Skills
256
+ learner_profiles: [adolescent, adult]
257
+ skills:
258
+ - name: making the bed
259
+ chain_type_preferred: forward
260
+ steps:
261
+ - Strip any disheveled linens and lay them flat.
262
+ - Straighten the bottom sheet across the mattress.
263
+ - Place the top sheet evenly over the mattress.
264
+ - Tuck the top sheet in at the foot of the bed.
265
+ - Place the comforter over the top sheet.
266
+ - Place pillows at the head of the bed.
267
+ - Smooth the comforter surface.
268
+ - name: meal cleanup
269
+ chain_type_preferred: forward
270
+ steps:
271
+ - Stack used dishes on one side of the table.
272
+ - Carry dishes to the sink or dishwasher.
273
+ - Rinse food residue into the trash or disposal.
274
+ - Load dishes into the dishwasher (or wash by hand).
275
+ - Wipe the table with a damp cloth.
276
+ - Put away any leftover food into containers.
277
+ - Place containers in the refrigerator.
278
+ - name: dishwashing by hand
279
+ chain_type_preferred: forward
280
+ steps:
281
+ - Scrape food residue into the trash.
282
+ - Fill the sink with warm, soapy water.
283
+ - Wash each dish systematically with a sponge.
284
+ - Rinse each dish under clean water.
285
+ - Place each dish on the drying rack.
286
+ - Drain the sink and wipe it down.
287
+ - name: operating the dishwasher
288
+ chain_type_preferred: forward
289
+ steps:
290
+ - Collect dirty dishes from the table and sink area.
291
+ - Scrape any solid food residue into the trash.
292
+ - Rinse dishes briefly under water.
293
+ - Open the dishwasher door.
294
+ - Load plates and bowls into the bottom rack.
295
+ - Load cups, glasses, and small items into the top rack.
296
+ - Place utensils handle-down in the utensil basket.
297
+ - Add detergent to the detergent dispenser.
298
+ - Close the dishwasher door.
299
+ - Select the appropriate wash cycle and press Start.
300
+ - name: unloading the dishwasher
301
+ chain_type_preferred: forward
302
+ steps:
303
+ - Open the dishwasher once the cycle is complete.
304
+ - Unload the top rack first (cups, glasses, small items).
305
+ - Put cups and glasses away in the correct cabinet.
306
+ - Unload the utensil basket.
307
+ - Put utensils away in the drawer.
308
+ - Unload the bottom rack (plates, bowls, larger items).
309
+ - Put plates and bowls away in the correct cabinet.
310
+ - Close the dishwasher door.
311
+ - name: doing laundry
312
+ chain_type_preferred: total_task
313
+ steps:
314
+ - Sort laundry by color (lights / darks / whites).
315
+ - Load the washing machine with one color group.
316
+ - Add detergent according to instructions.
317
+ - Select the correct wash cycle and start the machine.
318
+ - When complete, move clothes to the dryer.
319
+ - Start the dryer on the correct cycle.
320
+ - When dry, fold clothes by category.
321
+ - Put folded clothes away in drawers or closet.
322
+ - name: simple meal preparation (sandwich)
323
+ chain_type_preferred: total_task
324
+ steps:
325
+ - Collect all ingredients (bread, filling, spread).
326
+ - Place two slices of bread on the plate.
327
+ - Apply spread to one slice.
328
+ - Place filling on the spread slice.
329
+ - Place the second slice on top.
330
+ - Cut the sandwich in half if desired.
331
+ - Place on a plate to serve.
332
+
333
+ - id: community
334
+ name: Community Participation Skills
335
+ learner_profiles: [adolescent, adult]
336
+ skills:
337
+ - name: crossing a street safely
338
+ chain_type_preferred: total_task
339
+ steps:
340
+ - Approach the crosswalk and stop at the curb.
341
+ - Look left, look right, look left again.
342
+ - Identify the walk signal or absence of traffic.
343
+ - Begin to cross while continuing to scan for vehicles.
344
+ - Cross directly to the opposite curb.
345
+ - Step up onto the sidewalk.
346
+ - name: ordering in a restaurant
347
+ chain_type_preferred: total_task
348
+ steps:
349
+ - Approach the counter or wait to be seated.
350
+ - Review the menu.
351
+ - Decide on a choice.
352
+ - Place the order verbally (or with an AAC device).
353
+ - Respond to clarifying questions.
354
+ - Pay using money or a card.
355
+ - Wait for the order and collect it.
356
+ - name: using public transportation
357
+ chain_type_preferred: forward
358
+ steps:
359
+ - Confirm the correct stop or station.
360
+ - Purchase or have the fare ready.
361
+ - Wait at the designated boarding area.
362
+ - Board the vehicle when it arrives.
363
+ - Pay the fare or present the pass.
364
+ - Sit or stand safely.
365
+ - Monitor the route for the correct stop.
366
+ - Exit at the correct stop.
367
+
368
+ - id: vocational
369
+ name: Vocational Skills
370
+ learner_profiles: [adolescent, adult]
371
+ skills:
372
+ - name: clocking in at start of shift
373
+ chain_type_preferred: total_task
374
+ steps:
375
+ - Arrive at the workplace by the scheduled time.
376
+ - Locate the time clock or digital system.
377
+ - Enter identification (ID card / badge / code).
378
+ - Confirm the clock-in timestamp.
379
+ - Notify the supervisor or check the day's task list.
380
+ - Proceed to the assigned workstation.
381
+ - name: completing assigned tasks in order
382
+ chain_type_preferred: forward
383
+ steps:
384
+ - Review the task list for the shift.
385
+ - Gather needed materials for the first task.
386
+ - Complete the first task to the specified standard.
387
+ - Mark the first task complete on the list.
388
+ - Return materials to storage.
389
+ - Move to the next task on the list and repeat.
390
+
391
+ - id: independent_living
392
+ name: Independent Living Skills
393
+ learner_profiles: [adult]
394
+ skills:
395
+ - name: taking medication as prescribed
396
+ chain_type_preferred: total_task
397
+ steps:
398
+ - Check the medication schedule at the scheduled time.
399
+ - Collect the correct medication bottle.
400
+ - Confirm the dose against the label.
401
+ - Measure or count the correct dose.
402
+ - Take the medication with water.
403
+ - Log the dose in the medication tracker.
404
+ - Return the bottle to its storage location.
405
+ - name: scheduling a medical appointment
406
+ chain_type_preferred: forward
407
+ steps:
408
+ - Identify the reason for the appointment.
409
+ - Locate the provider's contact information.
410
+ - Call or use the online system.
411
+ - Provide name, date of birth, and insurance information.
412
+ - Request an available appointment time.
413
+ - Confirm the appointment and save it to the calendar.
configs/task_analysis/template.yaml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Task Analysis teaching program template — TRACE v1
2
+
3
+ template_id: task1_task_analysis_v1
4
+ task_type: teaching_program
5
+ method_id: task_analysis
6
+
7
+ system_prompt: |
8
+ You are an expert ABA (Applied Behavior Analysis) clinical assistant. You help
9
+ Board Certified Behavior Analysts and staff design teaching programs for
10
+ individuals with autism. Your responses are clinically accurate, individualized
11
+ to the learner profile, follow BACB ethical guidelines, and reference no real
12
+ client data. Select the appropriate teaching method (DTT, NET, Task Analysis,
13
+ FCT, BST, PRT) based on the skill target and learner profile.
14
+
15
+ user_variants:
16
+ - |
17
+ Generate a teaching program for the following target.
18
+
19
+ Skill Target: {skill_target}
20
+ Curriculum Reference: {curriculum_ref}
21
+ Learner Profile: {learner_profile_name}
22
+ Current Mastery: {mastery_state_name}
23
+ Teaching Method: Task Analysis / Chaining
24
+
25
+ Provide the full program structure appropriate to the selected method.
26
+
27
+ - |
28
+ I need a Task Analysis program for this multi-step skill.
29
+
30
+ Target: {skill_target}
31
+ Curriculum: {curriculum_ref}
32
+ Learner: {learner_profile_name}
33
+ Mastery Status: {mastery_state_name}
34
+
35
+ Please include the task analysis (numbered steps), chain type, prompt strategy per step, error correction, reinforcement, and mastery criteria.
36
+
37
+ - |
38
+ Design a chaining program for the following.
39
+
40
+ Skill: {skill_target}
41
+ Curriculum: {curriculum_ref}
42
+ Learner Level: {learner_profile_name}
43
+ Baseline: {mastery_state_name}
44
+
45
+ Include the full Task Analysis structure.
46
+
47
+ assistant_template: |
48
+ ## Program Overview
49
+ - Target skill: {skill_target}
50
+ - Curriculum: {curriculum_ref}
51
+ - Learner: {learner_profile_name}
52
+ - Current mastery: {mastery_state_name}
53
+
54
+ This Task Analysis program teaches the target skill as a chain of discrete steps, each taught and reinforced until the learner can complete the full sequence independently.
55
+
56
+ ## Task Analysis
57
+ Chain type: {chain_type_name}
58
+ {chain_type_description}
59
+
60
+ Steps:
61
+ {step_list}
62
+
63
+ ## Prompt Strategy Per Step
64
+ Strategy: {prompt_strategy_name}
65
+ {prompt_strategy_description}
66
+ Current prompt level: At the {mastery_state_short} stage, {current_prompt_guidance}
67
+
68
+ ## Error Correction Procedure
69
+ {error_correction_name}: {error_correction_steps}
70
+
71
+ ## Reinforcement
72
+ {reinforcement_schedule_name} — {reinforcement_description}.
73
+ Pair the terminal reinforcer with the natural consequence of the completed task (e.g., clean hands, clean dishes, completed routine).
74
+
75
+ ## Mastery Criteria
76
+ {mastery_criterion_text}
77
+
78
+ ## Data Collection
79
+ Per-step data: each step is scored for independence / prompt level on every trial. IOA scheduled every {ioa_frequency} sessions minimum; target agreement ≥ 80%.
80
+
81
+ ## Generalization & Maintenance Plan
82
+ Post-mastery: probe the full chain across {n_generalization_settings} different settings and with {n_generalization_therapists} different adults. Maintenance probes weekly for 4 weeks, then monthly.
configs/task_analysis/template_toleration.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Toleration / Systematic-Desensitization program template — TRACE v1
2
+ # Used for skills where the goal is NOT learner independence but rather the
3
+ # learner *allowing* caregiver/staff to perform the activity, with duration
4
+ # shaped incrementally across trials.
5
+
6
+ template_id: task1_toleration_v1
7
+ task_type: teaching_program
8
+ method_id: task_analysis # Administratively in the task_analysis area; clinically distinct approach
9
+
10
+ system_prompt: |
11
+ You are an expert ABA (Applied Behavior Analysis) clinical assistant. You help
12
+ Board Certified Behavior Analysts and staff design teaching programs for
13
+ individuals with autism. Your responses are clinically accurate, individualized
14
+ to the learner profile, follow BACB ethical guidelines, and reference no real
15
+ client data. Select the appropriate teaching method (DTT, NET, Task Analysis,
16
+ FCT, BST, PRT) based on the skill target and learner profile.
17
+
18
+ user_variants:
19
+ - |
20
+ Generate a teaching program for the following target.
21
+
22
+ Skill Target: {skill_target}
23
+ Curriculum Reference: {curriculum_ref}
24
+ Learner Profile: {learner_profile_name}
25
+ Current Mastery: {mastery_state_name}
26
+ Teaching Method: Toleration / Systematic Desensitization
27
+
28
+ The goal is for the learner to allow the activity to be performed (not to perform it independently). Provide the full program structure including shaping steps and refusal/safety procedures.
29
+
30
+ - |
31
+ I need a toleration program for this self-care / medical / caregiver-delivered skill.
32
+
33
+ Target: {skill_target}
34
+ Curriculum: {curriculum_ref}
35
+ Learner: {learner_profile_name}
36
+ Mastery Status: {mastery_state_name}
37
+
38
+ Please include the toleration end goal, shaping progression, antecedent strategies, reinforcement plan, refusal / safety procedures, and mastery criteria.
39
+
40
+ - |
41
+ Design a systematic desensitization / toleration program for the following.
42
+
43
+ Skill: {skill_target}
44
+ Curriculum: {curriculum_ref}
45
+ Learner Level: {learner_profile_name}
46
+ Baseline: {mastery_state_name}
47
+
48
+ The learner currently refuses or exhibits safety behaviors during the activity; the program should build tolerance gradually.
49
+
50
+ assistant_template: |
51
+ ## Program Overview
52
+ - Target skill: {skill_target}
53
+ - Curriculum: {curriculum_ref}
54
+ - Learner: {learner_profile_name}
55
+ - Current mastery: {mastery_state_name}
56
+ - Approach: Toleration / systematic desensitization (caregiver-delivered activity)
57
+
58
+ This program builds tolerance for {target_activity} through systematic shaping. The learner is not being taught to perform the skill independently — the goal is to allow the caregiver or staff to perform it across an incrementally increasing duration, without refusal, crying, self-injurious behavior, or aggression.
59
+
60
+ ## Toleration End Goal
61
+ {end_goal_description}
62
+
63
+ ## Shaping Progression
64
+ Teach by moving through these shaping steps in order. Advance to the next step only when the current step is tolerated for 3 consecutive sessions without safety behaviors.
65
+
66
+ {shaping_step_list}
67
+
68
+ Current shaping step: At the {mastery_state_short} stage, {current_step_guidance}
69
+
70
+ ## Antecedent Strategies
71
+ - Pre-warn the learner before the activity begins (verbal or visual schedule).
72
+ - Offer a preferred item or sensory tool to hold during the activity.
73
+ - Where possible, offer a choice of position, location, or starting point.
74
+ - Keep the environment calm and predictable; avoid other demands immediately before or after.
75
+
76
+ ## Reinforcement
77
+ Deliver high-magnitude, high-preference reinforcement contingent on successful toleration of the target duration. Pair with effusive praise. Reinforcement is continuous (CRF) during acquisition of each shaping step.
78
+
79
+ ## Refusal and Safety Procedures
80
+ - If the learner refuses or turns away: pause briefly (5–10 seconds), re-present the activity at the current or previous shaping step.
81
+ - If problem behavior (crying, property destruction, SIB, aggression, peer aggression) emerges: stop the activity immediately, implement the facility crisis plan, and document the incident.
82
+ - Do NOT remove the activity contingent on safety behaviors in a way that establishes an escape-maintained pattern — consult supervising BCBA for the specific escape-extinction protocol appropriate to this learner.
83
+
84
+ ## Mastery Criteria
85
+ {mastery_criterion_text}
86
+
87
+ ## Data Collection
88
+ Per-trial: duration tolerated before refusal, presence / absence of safety behaviors (crying, property destruction, SIB, aggression), staff support level, and shaping step attempted. IOA scheduled every {ioa_frequency} sessions minimum; target agreement ≥ 80%.
89
+
90
+ ## Generalization & Maintenance Plan
91
+ Post-mastery: probe the full routine across {n_generalization_settings} different settings and with {n_generalization_therapists} different adults (including the primary caregiver if relevant). Maintenance probes weekly for 4 weeks, then monthly.
data/splits/sanity.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/splits/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/splits/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3553e10b4b04249f047fb5c035677c1cef5e32e89e3aad8686aa09b7ac2b5ca8
3
+ size 16283798
data/splits/valid.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
docs/curation/LEGEND.md ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Session-Log Reading Guide
2
+
3
+ Reference for reading Task 2 (session-interpretation) examples. Keep this
4
+ open in a side pane while browsing `review.md`.
5
+
6
+ ---
7
+
8
+ ## Session header line
9
+
10
+ ```
11
+ Session 3 — 2026-10-10 — 45 min — 1 observer
12
+ ```
13
+
14
+ Or, for inter-observer agreement sessions:
15
+
16
+ ```
17
+ Session 5 — 2026-10-14 — IOA SESSION — 2 observers
18
+ ```
19
+
20
+ | Piece | Meaning |
21
+ |---|---|
22
+ | `Session N` | Nth observation session in this log |
23
+ | `2026-10-10` | Synthetic date (always in 2026 range) |
24
+ | `45 min` | Session duration |
25
+ | `1 observer` / `2 observers` | Single observer (primary) or IOA session |
26
+ | `IOA SESSION` | (If present) agreement check; behavior lines in this session show a trailing `IOA X% agreement` |
27
+
28
+ ---
29
+
30
+ ## Skill data line (acquisition programs)
31
+
32
+ ```
33
+ ordering in a restaurant: 9/13 correct (67%); latency 3.1s; prompts 5
34
+ ```
35
+
36
+ | Piece | Meaning |
37
+ |---|---|
38
+ | `9/13 correct` | Correct trials / total trials |
39
+ | `67%` | Accuracy |
40
+ | `latency 3.1s` | Mean response latency after SD presentation |
41
+ | `prompts 5` | Number of trials on which any prompt was delivered |
42
+
43
+ ---
44
+
45
+ ## Behavior data lines
46
+
47
+ Each target behavior gets its own measurement format matched to the
48
+ behavior's clinical shape. Generic `freq` lines apply to most behaviors;
49
+ behaviors with clinically distinctive shapes have behavior-specific
50
+ measurements.
51
+
52
+ ### Generic frequency behaviors
53
+
54
+ ```
55
+ Aggression: freq 3
56
+ Elopement: freq 2
57
+ SIB: freq 5
58
+ Property destruction: freq 1
59
+ Non-compliance: freq 4
60
+ Verbal aggression: freq 2
61
+ ```
62
+
63
+ `freq N` = N occurrences this session.
64
+
65
+ ### Tantrum (includes duration)
66
+
67
+ ```
68
+ Tantrum: freq 2, duration 7m total
69
+ ```
70
+
71
+ ### Stereotypy and mouthing (include partial-interval recording)
72
+
73
+ ```
74
+ Motor stereotypy: freq 8; PIR 18%
75
+ Vocal stereotypy: freq 5; PIR 12%
76
+ Mouthing: freq 6; PIR 15%
77
+ ```
78
+
79
+ `PIR X%` = partial-interval recording: % of intervals in which the behavior
80
+ occurred at any point.
81
+
82
+ ### Pica (attempts vs successful ingestion)
83
+
84
+ ```
85
+ Pica: attempts 3 (2 unsuccessful — staff retrieved item before ingestion; 1 successful — item ingested)
86
+ ```
87
+
88
+ Staff often intercept pica attempts; tracking attempts / successful
89
+ separately preserves the severity signal that a raw frequency loses.
90
+
91
+ ### Fecal smearing / scatolia (attempts vs completed smearing)
92
+
93
+ ```
94
+ Fecal smearing (scatolia): attempts 2 (1 intercepted — staff redirected before smearing; 1 completed — feces transferred to skin, clothing, or surface)
95
+ ```
96
+
97
+ Same intercepted / completed split as pica — clinically critical because
98
+ staff responsiveness directly shapes outcome severity.
99
+
100
+ ### Toileting (four-count voiding log)
101
+
102
+ ```
103
+ Toileting accident (urine or bowel): urine: 3 in-toilet / 2 accidents; BM: 0 in-toilet / 1 accidents
104
+ ```
105
+
106
+ Mirrors a standard clinical voiding log. The deceleration target is
107
+ **accidents** (urine + BM), but successful in-toilet voids are tracked
108
+ alongside for context:
109
+
110
+ - `urine: X in-toilet / Y accidents` — successful urinations vs. urine accidents
111
+ - `BM: P in-toilet / Q accidents` — successful bowel movements vs. BM accidents
112
+
113
+ ### IOA annotation
114
+
115
+ On sessions marked `IOA SESSION`, each behavior line ends with `; IOA X% agreement`:
116
+
117
+ ```
118
+ Aggression: freq 3; IOA 88% agreement
119
+ ```
120
+
121
+ ---
122
+
123
+ ## ABC line
124
+
125
+ ```
126
+ ABC (elopement): A = peer took toy; B = ran from room; C = staff retrieved learner
127
+ ```
128
+
129
+ - `A` = Antecedent (what happened immediately before the behavior)
130
+ - `B` = Behavior (operational description)
131
+ - `C` = Consequence (what happened immediately after)
132
+
133
+ ABC evidence feeds the behavior-function hypothesis in the assistant's
134
+ response (escape / attention / tangible / automatic, per Iwata et al. 1994
135
+ and Hanley, Iwata, & McCord 2003).
136
+
137
+ ---
138
+
139
+ ## Function-hypothesis line (log header)
140
+
141
+ Near the top of a log you'll see one line per tracked behavior:
142
+
143
+ ```
144
+ 1. Fecal smearing (scatolia) — function hypothesized: automatic
145
+ 2. Aggression — function hypothesized: escape
146
+ ```
147
+
148
+ These are the sampled (gold) functions that the interpretation response
149
+ should corroborate with the evidence in the log.
150
+
151
+ ---
152
+
153
+ ## Program list
154
+
155
+ ```
156
+ Programs tracked this session block:
157
+ 1. ordering in a restaurant (AFLS Community) — task_analysis
158
+ 2. mands for a break (VB-MAPP Mand L1) — net
159
+ 3. tacts colors of objects (VB-MAPP Tact L2) — dtt
160
+ ```
161
+
162
+ Each entry names the skill target, its curriculum location, and the
163
+ teaching method.
164
+
165
+ ---
166
+
167
+ ## Behavioral indicator block (across sessions)
168
+
169
+ ```
170
+ BEHAVIORAL OBSERVATIONS (across sessions)
171
+ - Increased response latency (3–5× baseline)
172
+ - Pushing materials away from the work area
173
+ - Vocal refusal ("no", "I don't want to")
174
+ ```
175
+
176
+ These are pattern-specific indicator clusters (frustration, engagement, or
177
+ disengagement) sampled from `the `behavioral_indicators` block in configs/session_interpretation/taxonomy.yaml`.
docs/curation/README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Curation Workflow
2
+
3
+ The curation step turns the pipeline-held-out `curation_pool.jsonl` into the
4
+ two evaluation files shipped in `data/splits/`:
5
+
6
+ - **`test.jsonl`** — evaluation set. The paper's headline metrics come from this set.
7
+ - **`sanity.jsonl`** — 20-example smoke-test set for development.
8
+
9
+ In TRACE v1 the whole curation pool is promoted as the test corpus (no hand
10
+ curation). The **review + compile** scripts are still part of the pipeline
11
+ because (a) regenerating the test and sanity splits is deterministic from the
12
+ pool, and (b) anyone adapting TRACE to a new clinical domain can reuse the
13
+ same workflow with their own taxonomy.
14
+
15
+ ---
16
+
17
+ ## Two scripts
18
+
19
+ | Script | What it does |
20
+ |---|---|
21
+ | `src/prepare_curation.py` | Renders `curation_pool.jsonl` as a browseable Markdown document in `docs/curation/review.md` — one candidate per section, grouped by task × category, with gold labels and provenance visible. |
22
+ | `src/compile_curation.py` | Splits the curation pool into `data/splits/test.jsonl` (the remainder) and `data/splits/sanity.jsonl` (20 examples, largest-remainder stratified by category). Deterministic under `--seed`. |
23
+
24
+ ---
25
+
26
+ ## Commands
27
+
28
+ ```bash
29
+ # Regenerate the browseable review document.
30
+ uv run python src/prepare_curation.py
31
+
32
+ # Compile the test + sanity splits from the pool.
33
+ uv run python src/compile_curation.py
34
+
35
+ # Override the sanity size (default 20) or seed (default 42).
36
+ uv run python src/compile_curation.py --sanity-n 20 --seed 42
37
+ ```
38
+
39
+ After `compile_curation.py` runs you'll have:
40
+ - `data/splits/test.jsonl` — the evaluation set
41
+ - `data/splits/sanity.jsonl` — the smoke-test set
42
+
43
+ ---
44
+
45
+ ## Reading `review.md`
46
+
47
+ `review.md` renders each candidate with:
48
+ - A heading showing the task type, category, and `example_id`.
49
+ - Gold labels inline (method, domain, level, learner profile, mastery state;
50
+ or pattern class, behavior functions, escalation, confidence, crisis-plan flag).
51
+ - A short provenance line (for session interpretation: number of sessions,
52
+ number of behaviors, whether IOA is included, whether ABC is included).
53
+ - The full user message (the teaching-program prompt or the session log).
54
+ - The full assistant message (the structured response).
55
+
56
+ Use `LEGEND.md` as a side-pane reference for the session-log notation in
57
+ Task 2 examples.
58
+
59
+ ---
60
+
61
+ ## Flagging issues for re-generation
62
+
63
+ If you spot a clinical inaccuracy while browsing `review.md`:
64
+
65
+ 1. Note the candidate's `example_id` (shown in its heading).
66
+ 2. Look up the candidate's `meta.provenance.taxonomy_cells` in the JSONL to
67
+ identify which taxonomy dimension produced the issue.
68
+ 3. Edit the relevant YAML under `configs/` (or the renderer in `src/generators/`
69
+ if it's a rendering-logic issue) — *never* hand-edit individual JSONL
70
+ entries, because the fix should propagate to every example that sampled
71
+ the same cells.
72
+ 4. Regenerate the corpus with `src/generate.py --all`, re-split with
73
+ `src/split_data.py`, and re-compile with `src/compile_curation.py`.
74
+
75
+ Every clinical-review flag during v1 development landed as a single
76
+ targeted taxonomy edit plus a full regeneration — never a hand-edit of
77
+ the JSONL. This is the invariant the pipeline relies on: fixes must
78
+ propagate through the taxonomy to be systematic across the corpus.
79
+
80
+ ---
81
+
82
+ ## Adapting the workflow to a new clinical domain
83
+
84
+ The scripts are domain-agnostic. To port to a different dataset:
85
+
86
+ - `prepare_curation.py` just reads `data/splits/curation_pool.jsonl` and
87
+ renders it. Works on any JSONL with the same envelope shape.
88
+ - `compile_curation.py` stratifies on `category_of(example)` — a small
89
+ function near the top of the script. Change its logic to use whichever
90
+ field best represents your category (the `method` field for TRACE's
91
+ teaching programs; the `pattern_class` field for session interpretation).
92
+ - Everything else — largest-remainder allocation, deterministic splitting,
93
+ provenance preservation — carries over unchanged.
docs/data-statement.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Data Statement for TRACE (v1)
2
+
3
+ **TRACE** — **T**axonomy-**R**eferenced **A**BA **C**linical **E**xamples
4
+
5
+ Template: Bender, E. M., & Friedman, B. (2018). *Data Statements for Natural Language Processing: Toward Mitigating System Bias and Enabling Better Science.* Transactions of the Association for Computational Linguistics. https://aclanthology.org/Q18-1041/
6
+
7
+ ---
8
+
9
+ ## A. Curation Rationale
10
+
11
+ ### Why this data, and why this composition?
12
+ Applied Behavior Analysis (ABA) is a clinical discipline with high documentation workload — BCBAs produce teaching programs and interpret multi-session behavioral logs continuously across their caseloads. Existing general-purpose language assistants are both unspecialized to ABA's structured conventions (operational definitions, VB-MAPP / AFLS curricula, functional-analysis frameworks) and unvetted against its clinical standards (BACB Ethics Code 2020; ABAI 2010 Position Statement on Restraint). This dataset was curated to support fine-tuning a small on-device language model (Gemma 4 E2B) specifically for **drafting** — i.e., producing first-pass teaching programs and session interpretations that a BCBA then reviews and revises — without any ingestion of real client data.
13
+
14
+ Two tasks are represented because they are the two highest-frequency authorial tasks in an ABA clinical team's week: (1) drafting teaching programs for new or evolving skill targets, and (2) interpreting session-by-session behavioral data to adjust programming. They share a taxonomy (learner profiles, mastery states, teaching methods, target behaviors) but diverge in structure (teaching-program responses are single-program documents; session-interpretation responses are diagnostic summaries).
15
+
16
+ ### Why synthetic?
17
+ Real ABA session data contains 45 CFR 164 protected health information (PHI) under HIPAA and is additionally subject to BACB Ethics Code confidentiality rules (section 2.03, section 2.05). Any public dataset drawn from real client data would require individual consent, facility release, and de-identification that cannot reliably preserve the clinical detail required for training. Synthetic generation avoids these constraints by construction: the data never represented a real person in the first place. The trade-off — distributions may not match any one real caseload — is made explicit in the dataset card (section 6.5) and flagged to users.
18
+
19
+ ---
20
+
21
+ ## B. Language Variety
22
+
23
+ All text is in **English (en-US)**, written in standard American clinical register. The authorial style follows the conventions of:
24
+ - **Peer-reviewed ABA literature** — *Journal of Applied Behavior Analysis* (JABA), *Behavior Analysis in Practice* (BAP).
25
+ - **Reference textbook** — Cooper, Heron, & Heward (2020), *Applied Behavior Analysis* (3rd ed.).
26
+ - **Professional documentation** — BACB Ethics Code (2020), ABAI Position Statements.
27
+
28
+ Within that register, the dataset covers:
29
+ - **Teaching-program language** — imperative instructional prose (stimulus control description, prompt hierarchy specification, reinforcement-schedule specification, error-correction procedure, mastery criterion).
30
+ - **Session-log language** — telegraphic data-sheet notation (`accuracy 6/10 (60%); freq 2; duration 3m; IOA 88%`) mixed with ABC-entry prose.
31
+ - **Clinical-interpretation language** — structured prose matching the assistant template's headings (Clinical Concerns, Pattern Classification, Behavior Function Hypothesis, Programming Recommendations, Crisis Plan when applicable, Confidence).
32
+
33
+ There is no dialectal variation, no code-switching, no second-language speaker voice. Generalization to non-US clinical conventions (BCBA-D vs. international RBT frameworks, different curricula) is explicitly out of scope for v1.
34
+
35
+ ---
36
+
37
+ ## C. Speaker Demographic
38
+
39
+ **Not applicable.** The dataset has no speakers. All content is programmatically generated from taxonomy configs written by a single author with ABA domain fluency. The written conventions inherit from the sources listed in section B.
40
+
41
+ ---
42
+
43
+ ## D. Annotator Demographic
44
+
45
+ **Not applicable in the conventional sense.** No human annotators labeled instances after generation. Gold labels are emitted at generation time from the same taxonomy cells that produced the example — they are by-construction the ground truth of the sampling step.
46
+
47
+ **Quasi-annotator role: clinical-accuracy reviewer.** One individual (the dataset author) performed ad-hoc clinical-accuracy review of generated candidates during iteration. This reviewer:
48
+
49
+ - Has practitioner-adjacent exposure to ABA clinical settings (not a BCBA; not a BCaBA; not an RBT in active practice).
50
+ - Is a native English speaker, fluent in the English clinical-documentation register.
51
+ - Performed review by browsing full-text candidate renderings and flagging clinical implausibilities against published sources (CHH 2020; BACB Ethics Code 2020; JABA papers for operational definitions).
52
+
53
+ **What this means for the dataset:**
54
+ - Clinical-accuracy review was grounded in published sources, not in one reviewer's clinical judgment alone. Where a reviewer uncertainty could not be resolved from a source, the item was retained with a flag rather than decided by individual preference.
55
+ - Full BCBA-level clinical validation (multi-reviewer, κ-scored) was not performed on v1.
56
+ - Bias from the reviewer's particular exposure (specific facility types, specific learner age ranges, specific behavioral topographies) is possible. The taxonomy's grounding in CHH and JABA is the mitigating control.
57
+
58
+ ---
59
+
60
+ ## E. Speech Situation
61
+
62
+ **Not applicable** (no speech). The imagined reader/user for the generated text:
63
+ - **Teaching programs** — a BCBA or behavior technician reading a first-pass program draft for clinical review, before it is finalized and implemented with a learner.
64
+ - **Session interpretations** — a BCBA or supervising analyst reading a summary of the most recent N sessions of a learner's data, as a decision-support document for programming adjustments.
65
+
66
+ The modality is written documentation, asynchronous, read in quiet clinical-office settings. The register is clinical-professional.
67
+
68
+ ---
69
+
70
+ ## F. Text Characteristics
71
+
72
+ ### Genre
73
+ - Clinical documentation (teaching programs, behavioral session interpretations).
74
+ - Instructional prose interleaved with structured data-sheet notation.
75
+
76
+ ### Structural Conventions
77
+ - Teaching programs follow a fixed section layout (Program Overview, Stimulus Control, Prompt Hierarchy, Reinforcement, Error Correction, Mastery Criteria, Generalization & Maintenance Plan). Variants exist for DTT, NET, and Task Analysis; Task Analysis has a distinct toleration-program layout.
78
+ - Session logs are rendered as pseudo-data-sheet blocks, one block per session, with per-program accuracy lines, per-behavior measurement lines, and optional ABC / IOA entries.
79
+ - Session interpretations follow a fixed layout (Clinical Concerns, Pattern Classification, Behavior Function Hypothesis, Programming Recommendations, Crisis Plan, Confidence).
80
+
81
+ ### Domain-specific notation
82
+ - ABA abbreviations used freely: SD (discriminative stimulus), MO (motivating operation), ABC (antecedent-behavior-consequence), IOA (interobserver agreement), PIR (partial-interval recording), CRF (continuous reinforcement), FR-2, VR-3, etc.
83
+ - Measurement notation follows the schema in `schema-v1.md section 2.3`.
84
+
85
+ ### Distribution characteristics
86
+ - Target behaviors: sampled per pattern (0–3 per log).
87
+ - Accuracy trajectories: driven by pattern-specific generators (ascending for mastery_progression, descending for regression, etc.).
88
+ - Behavior frequencies: pattern-trajectory-driven; bounded.
89
+ - Severity: per-behavior `typical_severity` flag (low/moderate/high) feeds escalation logic.
90
+
91
+ ### What the dataset *does not* contain
92
+ - Personal names.
93
+ - Real identifiers.
94
+ - Dates outside 2026-01-01 to 2026-12-31.
95
+ - Non-English text.
96
+ - Informal register (chat-style, SMS, social media).
97
+ - Off-topic content.
98
+ - Adversarial or jailbreak examples.
99
+ - Real session notes or documentation.
100
+
101
+ ---
102
+
103
+ ## G. Recording Quality
104
+
105
+ **Not applicable** (no recording).
106
+
107
+ ---
108
+
109
+ ## H. Other
110
+
111
+ ### Provenance guarantee
112
+ Every example carries full sampling provenance in `meta.provenance.taxonomy_cells`. This is the artifact that makes clinical auditing tractable: when a reviewer flags an implausible example, they can trace it to the exact cells that generated it and propose a taxonomy or compatibility-rule fix. This is a property of the dataset we consider essential and want future versions to preserve.
113
+
114
+ ### Known biases
115
+ - **Caseload-distribution bias.** Pattern frequencies are uniform across the 12 session-interpretation patterns for learnability, whereas real caseloads skew toward mastery_progression. Users training epidemiological models should reweight.
116
+ - **Curriculum bias.** VB-MAPP and AFLS are the only curricula covered. Practitioners using ABLLS-R, Essential for Living, PEAK, or other curricula should adapt.
117
+ - **Register bias.** All text is US clinical-professional. Non-US or non-clinical register is not represented.
118
+ - **Reviewer-exposure bias.** Clinical review was performed by a single reviewer. See section D.
119
+ - **Synthetic-distribution bias.** Sampling weights reflect clinical frequency where known but are approximate.
120
+
121
+ ### Transparency commitments
122
+ - Full generator code is public in the repository.
123
+ - Full taxonomy configs are public in the repository.
124
+ - Dataset regeneration from `(configs, seed)` is deterministic.
125
+ - Every version of the dataset is pinned to a specific repository commit hash.
126
+ - All citations used to ground taxonomy content are enumerated in `docs/research/citations-to-use.md`.
docs/dataset-card.md ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: "TRACE — Taxonomy-Referenced ABA Clinical Examples"
3
+ license: cc-by-nc-4.0
4
+ language:
5
+ - en
6
+ size_categories:
7
+ - 1K<n<10K
8
+ task_categories:
9
+ - text-generation
10
+ tags:
11
+ - clinical
12
+ - applied-behavior-analysis
13
+ - autism
14
+ - small-language-model
15
+ - synthetic
16
+ - instruction-tuning
17
+ - taxonomy
18
+ - provenance
19
+ configs:
20
+ - config_name: default
21
+ data_files:
22
+ - split: train
23
+ path: data/splits/train.jsonl
24
+ - split: validation
25
+ path: data/splits/valid.jsonl
26
+ - split: test
27
+ path: data/splits/test.jsonl
28
+ - split: sanity
29
+ path: data/splits/sanity.jsonl
30
+ ---
31
+
32
+ # TRACE Dataset Card (v1)
33
+
34
+ **Name:** **TRACE** — **T**axonomy-**R**eferenced **A**BA **C**linical **E**xamples
35
+ **Version:** v1.0.0
36
+ **Date:** 2026-04-25
37
+ **Primary language:** English
38
+ **License (data):** CC BY-NC 4.0 · **License (code):** MIT
39
+ **Total examples:** 2,999
40
+ **Tasks:** 2 — (1) ABA teaching program generation, (2) behavioral session interpretation.
41
+ **Author:** Festus Kahunla (Drexel University).
42
+ **Publisher / maintained by:** [Pombo Labs](https://github.com/Pombo-Labs).
43
+ **Repository:** https://github.com/Pombo-Labs/TRACE
44
+
45
+ ---
46
+
47
+ ## 1. TL;DR
48
+
49
+ TRACE is a **synthetic instruction-tuning dataset** for two clinical tasks in Applied Behavior Analysis (ABA):
50
+
51
+ 1. **Teaching program generation** — given a learner profile and a skill target, produce a structured teaching program (DTT, NET, or Task Analysis/chaining) covering stimulus control, prompt hierarchy, reinforcement schedule, error correction, mastery criterion, and generalization plan.
52
+ 2. **Behavioral session interpretation** — given a multi-session behavioral log (accuracies, target behaviors with measurements, optional ABC and IOA data), produce clinical concerns, a pattern classification, a function hypothesis, programming recommendations, and (when applicable) a crisis plan.
53
+
54
+ The dataset was produced by a **taxonomy-driven generator** whose controlled vocabulary is grounded in the canonical ABA literature (Cooper, Heron, & Heward 2020; VB-MAPP; AFLS; key JABA papers). Every example carries full **provenance metadata** — the exact taxonomy cells that were sampled to produce it. Clinical accuracy was iterated via practitioner-in-the-loop ad-hoc review.
55
+
56
+ **Intended use.** Research. TRACE is designed for fine-tuning small language models (e.g., 4-bit Gemma 4 E2B with QLoRA) on ABA-flavored instruction-following, as a substrate for research into clinical-NLP data pipelines, taxonomy-driven synthetic generation, and small-LM evaluation.
57
+
58
+ **Not for:** autonomous clinical decisions; training on or combining with real client data; medical diagnosis; legal or insurance documentation. TRACE has not been clinically validated and is not a clinical tool. See section 6 for the responsibility disclaimer.
59
+
60
+ ---
61
+
62
+ ## 2. Dataset Composition
63
+
64
+ ### 2.1 Splits
65
+
66
+ | Split | Examples | Fraction | Purpose |
67
+ |---|---:|---:|---|
68
+ | `train.jsonl` | 2,549 | 85.0% | LoRA fine-tuning |
69
+ | `valid.jsonl` | 149 | 4.97% | Periodic validation loss during training |
70
+ | `test.jsonl` | 281 | 9.37% | Held-out evaluation (headline metrics) |
71
+ | `sanity.jsonl` | 20 | 0.67% | Training smoke-test (tiny stratified subset) |
72
+ | **Total** | **2,999** | 100% | |
73
+
74
+ Splits are **stratified by task × category** (method for teaching programs; pattern_class for session interpretation) so each split mirrors the corpus distribution. The test set is the full curation pool minus a 20-example stratified sanity carveout.
75
+
76
+ ### 2.2 Per-area breakdown
77
+
78
+ | Area | Count | Task type | Primary source |
79
+ |---|---:|---|---|
80
+ | DTT | 800 | teaching_program | VB-MAPP (array-based discrete-response domains) |
81
+ | NET | 500 | teaching_program | VB-MAPP (mand, social, spontaneous vocal, intraverbal) |
82
+ | Task Analysis | 500 | teaching_program | AFLS (basic_living, home, community, vocational, independent_living) |
83
+ | Session Interpretation | 1,200 | session_interpretation | 12 clinical trajectory patterns |
84
+
85
+ ### 2.3 DTT skill-domain distribution (800 total)
86
+
87
+ | VB-MAPP domain | Count |
88
+ |---|---:|
89
+ | Reading | 131 |
90
+ | LRFFC (Listener Responding by Feature/Function/Class) | 126 |
91
+ | Math | 120 |
92
+ | Visual Perceptual / Matching-to-Sample | 115 |
93
+ | Listener Responding | 111 |
94
+ | Tact | 106 |
95
+ | Writing | 91 |
96
+
97
+ Sampled across VB-MAPP Levels 1 (≈45%), 2 (≈40%), and 3 (≈15%).
98
+
99
+ ### 2.4 NET skill-domain distribution (500 total)
100
+
101
+ | VB-MAPP domain | Count |
102
+ |---|---:|
103
+ | Spontaneous Vocal | 143 |
104
+ | Mand (including bathroom, break, all-done) | 141 |
105
+ | Social Behavior & Play | 131 |
106
+ | Intraverbal | 85 |
107
+
108
+ Each NET program carries a Motivating Operation arrangement matched to the skill (deprivation, missing-item, break opportunity, completion opportunity, bathroom opportunity, peer presence, reciprocal conversation, routine lead-in) and is embedded in a natural context (snack, free play, transition, arrival, etc.).
109
+
110
+ ### 2.5 Task Analysis distribution (500 total)
111
+
112
+ | AFLS module | Count | | Program type | Count |
113
+ |---|---:|---|---|---:|
114
+ | Basic Living | 172 | | Independence | 413 |
115
+ | Home | 96 | | Toleration (systematic desensitization) | 87 |
116
+ | Community | 94 | | | |
117
+ | Vocational | 76 | | | |
118
+ | Independent Living | 62 | | | |
119
+
120
+ **Toleration programs** are a distinct program type for learners whose clinical goal is to *allow* a caregiver-delivered routine (tooth brushing, hair washing, nail clipping, haircuts, showering, medical exam) rather than to perform it independently. They use a shaping progression with duration targets rather than a step chain.
121
+
122
+ ### 2.6 Session Interpretation distribution (1,200 total)
123
+
124
+ Twelve trajectory patterns, roughly uniform (84–113 each):
125
+
126
+ | Pattern | Count |
127
+ |---|---:|
128
+ | regression | 113 |
129
+ | prompt_dependency | 109 |
130
+ | setting_event_trigger | 107 |
131
+ | rapid_acquisition | 106 |
132
+ | motivating_operation_shift | 104 |
133
+ | generalization_failure | 103 |
134
+ | mastery_progression | 100 |
135
+ | skill_loss_after_break | 100 |
136
+ | plateau | 96 |
137
+ | frustration_pattern | 91 |
138
+ | variable_performance | 87 |
139
+ | extinction_burst | 84 |
140
+
141
+ **Target behaviors** sampled into logs (presence per log; a log carries 0–3 behaviors):
142
+
143
+ | Behavior | Logs | Measurement shape |
144
+ |---|---:|---|
145
+ | Aggression | 95 | freq |
146
+ | Toileting (urine + BM, in-toilet + accidents) | 89 | `urine: X in-toilet / Y accidents; BM: P in-toilet / Q accidents` |
147
+ | Pica | 81 | `attempts N (X unsuccessful; Y successful)` |
148
+ | Self-Injurious Behavior (SIB) | 79 | freq |
149
+ | Verbal Aggression | 79 | freq |
150
+ | Fecal Smearing (scatolia) | 78 | `attempts N (X intercepted; Y completed)` |
151
+ | Property Destruction | 75 | freq |
152
+ | Elopement | 75 | freq |
153
+ | Non-compliance | 70 | freq |
154
+ | Tantrum | 66 | `freq N, duration Mm total` |
155
+ | Mouthing | 56 | `freq N; PIR P%` |
156
+ | Motor Stereotypy | 50 | `freq N; PIR P%` |
157
+ | Vocal Stereotypy | 48 | `freq N; PIR P%` |
158
+
159
+ ---
160
+
161
+ ## 3. Data Format
162
+
163
+ ### 3.1 Wire format
164
+
165
+ Each example is one JSONL line:
166
+
167
+ ```json
168
+ {
169
+ "messages": [
170
+ {"role": "system", "content": "<ABA clinical-assistant system prompt>"},
171
+ {"role": "user", "content": "<task-specific prompt>"},
172
+ {"role": "assistant", "content": "<structured clinical response>"}
173
+ ],
174
+ "meta": {
175
+ "task_type": "teaching_program" | "session_interpretation",
176
+ "example_id": "<deterministic hash, 16-hex>",
177
+ "gold_labels": { ... task-specific labels ... },
178
+ "provenance": {
179
+ "layer": 1,
180
+ "area": "dtt" | "net" | "task_analysis" | "session_interpretation",
181
+ "template_id": "...",
182
+ "taxonomy_cells": { ... exact sampled values ... },
183
+ "teacher_model": null,
184
+ "seed_tag": "...",
185
+ "generated_at": "..."
186
+ }
187
+ }
188
+ }
189
+ ```
190
+
191
+ Only `messages` is used for training (mask_prompt: true). `meta` is preserved for evaluation and provenance tracking.
192
+
193
+ ### 3.2 Gold labels per task
194
+
195
+ **Teaching program:**
196
+ ```
197
+ {
198
+ "method": "dtt" | "net" | "task_analysis",
199
+ "domain": "VB-MAPP.<domain>" | "AFLS.<module>",
200
+ "level": "L1" | "L2" | "L3" | ..., # VB-MAPP only
201
+ "learner_profile": "early" | "school_age" | "adolescent" | "adult",
202
+ "mastery_state": "emerging" | "developing" | "approaching" | "near" | "mastered" | "generalization",
203
+ "program_type": "independence" | "toleration", # Task Analysis only
204
+ "chain_type": "forward" | "backward" | "total_task" # Task Analysis only
205
+ }
206
+ ```
207
+
208
+ **Session interpretation:**
209
+ ```
210
+ {
211
+ "pattern_class": "<one of 12 patterns>",
212
+ "behavior_functions": { "<behavior_name>": "escape"|"attention"|"tangible"|"automatic"|"unknown" },
213
+ "escalation_level": 1 | 2 | 3 | 4,
214
+ "confidence": "high" | "moderate" | "low",
215
+ "crisis_plan_required": true | false
216
+ }
217
+ ```
218
+
219
+ ### 3.3 Provenance
220
+
221
+ Every sampled choice is recorded in `meta.provenance.taxonomy_cells`. For teaching programs this includes the skill target, prompt hierarchy, reinforcement schedule, error correction, and mastery criterion. For session interpretation it includes the pattern, number of sessions, behaviors sampled, functions inferred, and whether IOA and ABC data are present. This enables:
222
+
223
+ - **Traceability:** any clinical issue in a generated example can be traced back to the exact taxonomy cell that produced it.
224
+ - **Reproducibility:** the dataset can be regenerated deterministically from the configs and seed.
225
+ - **Stratification:** splits can be regenerated with different stratification keys without regenerating the corpus.
226
+ - **Ablation:** subsets can be constructed by filtering on provenance cells.
227
+
228
+ ---
229
+
230
+ ## 4. How It Was Built
231
+
232
+ ### 4.1 Architecture
233
+
234
+ ```
235
+ configs/
236
+ ├── shared/ # cross-area primitives (learner profiles, mastery states, prompt types)
237
+ ├── dtt/ # DTT area: taxonomy + template + compatibility
238
+ ├── net/ # NET area
239
+ ├── task_analysis/ # Chaining area (independence + toleration templates)
240
+ └── session_interpretation/ # Session interp area:
241
+ # taxonomy + compatibility + trajectory_rules
242
+ # + recommendations (per-pattern bullets) + template
243
+ src/generators/
244
+ ├── aba_dtt.py # per-area generator
245
+ ├── aba_net.py
246
+ ├── aba_task_analysis.py
247
+ └── aba_session_interp.py
248
+ src/generate.py # orchestrator
249
+ ```
250
+
251
+ ### 4.2 Generation loop
252
+
253
+ For each example:
254
+ 1. Sample a clinical configuration from the taxonomy, weighted by realistic clinical frequency (level weights, module weights, pattern weights).
255
+ 2. Apply compatibility rules (e.g., DTT errorless error correction pairs only with most-to-least prompting; certain patterns have bias toward certain behavior functions).
256
+ 3. Compute template slots — current-prompt-level guidance from mastery state; MO arrangement from skill keywords; mastery criterion; reinforcement schedule.
257
+ 4. Render `user_content` from a random user-variant.
258
+ 5. Render `assistant_content` by filling `{slots}` in the assistant template.
259
+ 6. Stamp `meta.gold_labels` and `meta.provenance`.
260
+ 7. Write as one JSONL line.
261
+
262
+ ### 4.3 Grounding
263
+
264
+ - **Skill curricula** — VB-MAPP milestones (Sundberg 2008) for verbal-behavior domains; AFLS (Partington & Mueller) for adaptive-living domains.
265
+ - **Teaching methods** — DTT (Lovaas 1987; Smith 2001); NET (Hart & Risley 1975 in CHH Ch. 18); Task Analysis / chaining (CHH Ch. 20).
266
+ - **Operational definitions of target behaviors** — Cooper/Heron/Heward Ch. 3, 27; key JABA papers (Iwata et al. 1994 for SIB and functional analysis; Carr & Durand 1985 for FCT; Hanley, Iwata, & McCord 2003 for FBA).
267
+ - **Session patterns** — derived from CHH Ch. 6–7 (analyzing behavior change) and Stokes & Baer 1977 (generalization).
268
+ - **Crisis plans** — BACB Ethics Code (2020) section 3.05; ABAI Position Statement on Restraint and Seclusion (2010). Physical-intervention procedures are left vague on purpose because they vary by training program (Safety-Care, CPI, PMT, TCI) and jurisdiction; the dataset emphasizes verbal de-escalation, environmental safety, BIP authorization, and contraindications.
269
+
270
+ ### 4.4 Clinical-accuracy pipeline
271
+
272
+ Initial generation produced the structural skeleton. The corpus was then iterated via **practitioner-in-the-loop review** — a full-text render of the held-out candidate pool was browsed by a reviewer with ABA practitioner exposure, and each flagged clinical inaccuracy was traced to the responsible taxonomy cell and fixed with a single targeted edit plus a full regeneration. Because every example records its sampling provenance, a single cell-level edit propagates to every example that sampled the affected cells — so flagging one example systematically corrects a class of examples.
273
+
274
+ ---
275
+
276
+ ## 5. Intended Uses
277
+
278
+ ### 5.1 Direct use
279
+ - Instruction-tuning a small language model (recommended: Gemma 4 E2B 4-bit with QLoRA) to draft ABA teaching programs and interpret session logs.
280
+ - Evaluation of task-specific competencies using the held-out `test.jsonl` (281 examples).
281
+ - Research on taxonomy-driven synthetic data generation for clinical decision support.
282
+
283
+ ### 5.2 Downstream use
284
+ - Research on small-LM drafting assistants in structured clinical-documentation domains.
285
+ - Comparison baselines for future ABA-specific LLM work (Kumar et al. 2024 "Personalized-ABA" is the closest direct predecessor; the present dataset extends to structured program generation and session-log interpretation).
286
+
287
+ ### 5.3 Out-of-scope (do NOT use for)
288
+ - Autonomous clinical decisions.
289
+ - Writing final Behavior Intervention Plans without BCBA review.
290
+ - Training models on or combined with real client records (the pipeline and schema are explicitly designed to avoid this).
291
+ - Legal or insurance documentation.
292
+ - Medical diagnosis.
293
+
294
+ ---
295
+
296
+ ## 6. Ethics & Risks
297
+
298
+ ### 6.1 Provenance
299
+ Every example is synthetic. No real client data, no real session notes, no real identifiers were used at any step. Learner references use synthetic IDs (`SYN-####`); dates fall in the range 2026-01-01 to 2026-12-31.
300
+
301
+ ### 6.2 Clinical-risk framing
302
+ The dataset is designed around a **draft-and-review** authoring pattern. Assistant responses are structured so that a reviewer can quickly see the method, the stimulus arrangement, the prompt hierarchy, the reinforcement plan, the error-correction procedure, the mastery criterion, and the generalization plan — each as a distinct, scannable section. Session-interpretation responses surface a confidence level (high / moderate / low) and an escalation level (1–4) as structured fields. These are design choices that support auditability; they are not clinical advice, and TRACE's responsibility disclaimer applies.
303
+
304
+ ### 6.3 Crisis-plan sensitivity
305
+ Crisis plans were written against the ABAI 2010 Position Statement on Restraint and Seclusion and BACB Ethics Code 2020 section 3.05. The dataset references facility crisis-prevention frameworks (Safety-Care, CPI, PMT, TCI) only as examples and **deliberately avoids specifying restraint procedures** because those procedures are (a) jurisdiction-dependent, (b) training-certification-gated, and (c) learner-specific (many learners have contraindications). The dataset embeds explicit text in every crisis-plan bullet that physical intervention is used only when specifically authorized in the learner's BIP and only by staff currently certified in the facility's training program.
306
+
307
+ ### 6.4 Population representation
308
+ Learner profiles are intentionally abstract (early / school-age / adolescent / adult). The dataset does not encode demographic categories (race, socioeconomic status, gender identity) and does not attempt to characterize clinical presentations by such categories. This is a deliberate choice for a first release; future versions may add representation if grounded in published demographic work.
309
+
310
+ ### 6.5 Known limitations
311
+ - **English only.** Teaching method terminology, curriculum targets, and session-log conventions are rendered in English.
312
+ - **Synthetic distributions.** Pattern frequencies are approximately uniform for learnability; real clinical practice has different frequencies (mastery-progression is far more common than frustration-pattern in a healthy caseload). The dataset is explicitly a teaching set, not an epidemiological sample.
313
+ - **VB-MAPP + AFLS only.** Other curricula (ABLLS-R, Essential for Living, PEAK) are not covered. Practitioners using those curricula should adapt.
314
+ - **No longitudinal data.** Sessions within a log are temporally ordered but the pipeline does not model real continuity over months or years.
315
+ - **Toleration covers hygiene only.** Other toleration programs (e.g., wearing glasses, riding in a car seat) are not represented.
316
+ - **Toilet-training acquisition is out of scope.** Accidents are tracked in session logs and bathroom-requesting is taught as a NET mand, but the full Azrin & Foxx 1971 rapid toilet-training acquisition protocol is not included as a task-analysis program.
317
+
318
+ ### 6.6 License
319
+ **Data:** CC BY-NC 4.0 — research and non-commercial use with attribution. **Code:** MIT.
320
+
321
+ **Responsibility.** TRACE is a research artifact. It is not a clinical tool, has not been clinically validated, and carries no clinical endorsement. Anyone who chooses to deploy TRACE — or any model derived from it — in a clinical setting does so entirely at their own responsibility and under their facility's own oversight. The authors and Pombo Labs make no representation of clinical suitability and accept no liability for clinical outcomes.
322
+
323
+ ---
324
+
325
+ ## 7. Reproducibility
326
+
327
+ The corpus is regenerated deterministically from:
328
+ - `configs/` (all YAMLs)
329
+ - `configs/generation.yaml` (seed and per-area counts)
330
+ - `src/generators/*.py` (generator code)
331
+
332
+ ```bash
333
+ uv run python src/generate.py --all # regenerate 3000 examples
334
+ uv run python src/split_data.py # stratified split
335
+ uv run python src/prepare_curation.py # browseable review.md
336
+ uv run python src/compile_curation.py # test.jsonl + sanity.jsonl
337
+ ```
338
+
339
+ The dataset version is `v1.0.0`; the matching git tag pins the exact configs and generator code that produced the published JSONL splits.
340
+
341
+ ---
342
+
343
+ ## 8. Citation
344
+
345
+ Please cite as:
346
+
347
+ > Kahunla, F. (2026). *TRACE: Taxonomy-Grounded Synthetic Data for Teaching Program Generation and Session Interpretation in Applied Behavior Analysis.* Pombo Labs. https://github.com/Pombo-Labs/TRACE
348
+
349
+ Machine-readable metadata: `CITATION.cff`.
350
+
351
+ ---
352
+
353
+ ## 9. Appendices
354
+
355
+ - **Datasheet** (Gebru et al. 2021 format): `datasheet.md`
356
+ - **Data statement** (Bender & Friedman 2018 format): `data-statement.md`
357
+ - **Taxonomy reference** (operational definitions + citations): `taxonomy-v1.md`
358
+ - **Schema reference** (wire format + slot specifications): `schema-v1.md`
docs/datasheet.md ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Datasheet for TRACE (v1)
2
+
3
+ **TRACE** — **T**axonomy-**R**eferenced **A**BA **C**linical **E**xamples
4
+
5
+ Template: Gebru, T., Morgenstern, J., Vecchione, B., Wortman Vaughan, J., Wallach, H., Daumé III, H., & Crawford, K. (2021). *Datasheets for Datasets.* Communications of the ACM. https://arxiv.org/abs/1803.09010
6
+
7
+ ---
8
+
9
+ ## Motivation
10
+
11
+ ### Why was the dataset created?
12
+ To provide the first public, open-access synthetic instruction-tuning dataset for two tasks in Applied Behavior Analysis (ABA): (1) teaching program generation and (2) multi-session behavioral interpretation. Board Certified Behavior Analysts (BCBAs) draft large numbers of these documents weekly; current LLM assistants are both unspecialized to ABA and unvetted against its clinical standards. The dataset supports fine-tuning a small, on-device language model (Gemma 4 E2B at 2B parameters) for drafting support, preserving clinical oversight at the point of deployment.
13
+
14
+ ### Who funded the creation of the dataset?
15
+ No external funding. Authored by Festus Kahunla (Drexel University) and released publicly under the **Pombo Labs** organization (https://github.com/Pombo-Labs).
16
+
17
+ ### Any other comments?
18
+ The dataset's primary novelty is its **taxonomy-driven construction with complete provenance**: every example can be traced back to a specific set of taxonomy cells that produced it. This is a deliberate choice to make clinical validity auditable.
19
+
20
+ ---
21
+
22
+ ## Composition
23
+
24
+ ### What do the instances represent?
25
+ Two instance types:
26
+
27
+ 1. **Teaching-program instances** (1,800 total) — a user message describes a learner profile and a skill target; an assistant message produces a structured teaching program. Covers three methods: Discrete Trial Training (DTT, 800), Natural Environment Teaching (NET, 500), Task Analysis / chaining (500).
28
+
29
+ 2. **Session-interpretation instances** (1,200 total) — a user message contains a multi-session behavioral log (5–12 sessions, with per-session accuracies, target behaviors with measurements, and optional ABC and IOA data); an assistant message produces clinical concerns, a pattern classification, a function hypothesis, recommendations, and when applicable a crisis plan.
30
+
31
+ ### How many instances are there in total?
32
+ 2,999.
33
+
34
+ ### Does the dataset contain all possible instances or is it a sample?
35
+ The dataset is a **programmatic sample** from a combinatorial space defined by the taxonomy. The space is formally enumerable (domains × levels × learner profiles × mastery states × prompt hierarchies × reinforcement schedules × error corrections × mastery criteria) but is far larger than 2,999 examples. Sampling weights reflect approximate clinical frequency where known (e.g., VB-MAPP Level 1 is sampled more heavily than Level 3 for DTT because early learners dominate the data space in practice).
36
+
37
+ ### What data does each instance consist of?
38
+ See `schema-v1.md` and `dataset-card.md section 3`. Each instance is one JSONL line with two top-level keys:
39
+ - `messages` — a three-message chat triple (system, user, assistant) that the fine-tuner trains on.
40
+ - `meta` — task type, example ID, gold labels, and full sampling provenance (ignored during training).
41
+
42
+ ### Is there a label or target associated with each instance?
43
+ Yes, stored in `meta.gold_labels`. For teaching programs: method, domain, level, learner profile, mastery state. For session interpretation: pattern class, behavior functions, escalation level, confidence, crisis-plan requirement.
44
+
45
+ ### Is any information missing from individual instances?
46
+ Some instances have optional data by design:
47
+ - **Task Analysis / toleration programs** have `shaping_steps` instead of `steps` and no `chain_type` (not applicable).
48
+ - **Session logs** carry ABC data on approximately 30% of sessions when behaviors are present; IOA data on approximately 25% of logs; behavioral-indicator clusters per pattern. Absence of these is intentional, not missingness.
49
+
50
+ ### Are relationships between individual instances made explicit?
51
+ Instances are independent by construction. No implicit links between examples.
52
+
53
+ ### Are there recommended data splits?
54
+ Yes. The dataset ships with four split files:
55
+
56
+ | Split | Count | Fraction | Purpose |
57
+ |---|---:|---:|---|
58
+ | `train.jsonl` | 2,549 | 85% | LoRA fine-tuning |
59
+ | `valid.jsonl` | 149 | 5% | Validation during training |
60
+ | `test.jsonl` | 281 | 9.4% | Held-out evaluation |
61
+ | `sanity.jsonl` | 20 | 0.7% | Training smoke-test |
62
+
63
+ All splits are stratified by area × category to preserve the corpus distribution.
64
+
65
+ ### Are there errors, sources of noise, or redundancies?
66
+ - **Template repetition.** Assistant responses follow a structured template; lexical patterns recur by design. This is an intentional trade-off for learnability.
67
+ - **Sampling drift.** Because the generator samples independently per example, some combinations of cells can land improbable but not impossible (e.g., a "mastered" skill still running an error-correction procedure). Compatibility rules reduce this; ad-hoc practitioner review catches the rest.
68
+ - **Clinical accuracy.** Every example passed at least a syntactic quality check. A subset underwent ad-hoc practitioner review during iteration; not every example has been individually inspected.
69
+
70
+ ### Does the dataset contain data that might be considered confidential?
71
+ No. All data is synthetic. No real client information. No real session notes. No real names, dates, or IDs. Synthetic IDs use the `SYN-####` pattern from a fixed range; synthetic dates fall in 2026-01-01 to 2026-12-31.
72
+
73
+ ### Does the dataset contain data that might be offensive, insulting, threatening, or otherwise cause anxiety?
74
+ The session logs describe challenging behavior (aggression, self-injury, property destruction, pica, elopement, fecal smearing, toileting accidents, tantrum). These descriptions are clinical in register and grounded in the operational definitions used in the peer-reviewed JABA literature. They may be distressing to readers unfamiliar with ABA. Target behaviors appear only in the context of a session log being interpreted — they are never endorsed, glorified, or represented outside clinical framing.
75
+
76
+ ### Does the dataset relate to people?
77
+ Indirectly. Instances describe synthetic learners in clinical scenarios. No real people are identifiable. No real demographic distributions are encoded.
78
+
79
+ ### Does the dataset identify any sub-populations?
80
+ Learner profiles are:
81
+ - **Early Learner** (≈3–5 years; VB-MAPP L1 or L2 depending on skill)
82
+ - **School-Age** (≈6–10 years; VB-MAPP L2–L3, AFLS basic_living and home)
83
+ - **Adolescent** (≈11–17 years; AFLS community, vocational)
84
+ - **Adult** (≈18+ years; AFLS independent_living primary)
85
+
86
+ These are developmentally anchored; they do not encode race, ethnicity, socioeconomic status, or gender identity. No sub-populations are characterized on those axes.
87
+
88
+ ### Is it possible to identify individuals from the dataset?
89
+ No. The dataset is synthetic.
90
+
91
+ ### Does the dataset contain data that might be considered sensitive?
92
+ The subject matter is sensitive — autism-related clinical challenges, problem behavior, restraint-related crisis planning. The data itself is not sensitive because it is synthetic. Care was taken in the crisis-plan content to cite authoritative sources (BACB Ethics Code 2020 section 3.05; ABAI 2010 Position Statement) and to avoid prescribing restraint procedures directly.
93
+
94
+ ---
95
+
96
+ ## Collection Process
97
+
98
+ ### How was the data associated with each instance acquired?
99
+ Programmatic generation. The pipeline is a deterministic function of (a) the YAML taxonomy configs and (b) a random seed. Inputs to the generator come from published curricula (VB-MAPP; AFLS) and textbook / JABA / BACB Ethics Code content that was manually encoded into the taxonomy YAMLs by the authors. **No web scraping. No ingestion of patient records. No use of real session notes.**
100
+
101
+ ### What mechanisms or procedures were used?
102
+ Python scripts under `src/generators/` with taxonomy YAMLs under `configs/`. See `dataset-card.md section 4`.
103
+
104
+ ### If sampled, what was the sampling strategy?
105
+ Weighted random sampling on each taxonomy dimension, with compatibility rules applied afterward to reject clinically inconsistent combinations. Weights are human-specified (in `compatibility.yaml` files) to approximate clinical frequency.
106
+
107
+ ### Who was involved in the data collection process?
108
+ Festus Kahunla (Drexel; Pombo Labs) wrote the generator and the taxonomy, conducted clinical-accuracy review, and authored the dataset docs.
109
+
110
+ ### Over what timeframe was the data collected?
111
+ January 2026 to April 2026. The early months covered taxonomy research, literature grounding, and pipeline design; the corpus itself was generated and iterated in April 2026.
112
+
113
+ ### Were any ethical review processes conducted?
114
+ No IRB required — the dataset is entirely synthetic. The generator architecture is explicitly designed so no real client data can enter the pipeline. Clinical scope (what counts as a reasonable teaching program, what crisis procedures are appropriate) was grounded in published sources (BACB Ethics Code 2020; ABAI 2010 Position Statement; Cooper, Heron, & Heward 2020) rather than personal clinical experience.
115
+
116
+ ### Does the dataset relate to people? If so, did you collect data from them directly or from a third-party?
117
+ The dataset does not relate to any real person. No data was collected from any person.
118
+
119
+ ### Were the individuals in question notified about data collection?
120
+ Not applicable (no real individuals).
121
+
122
+ ### Did the individuals in question consent to data collection?
123
+ Not applicable (no real individuals).
124
+
125
+ ### If consent was obtained, were they given a mechanism to revoke consent?
126
+ Not applicable.
127
+
128
+ ### Has an analysis of the potential impact of the dataset and its use on data subjects been conducted?
129
+ The dataset has no data subjects. The intended model users (BCBAs and ABA technicians) are discussed in section 6.2 of the dataset card (clinical-risk framing) and section 6.3 (crisis-plan sensitivity).
130
+
131
+ ---
132
+
133
+ ## Preprocessing / Cleaning / Labeling
134
+
135
+ ### Was any preprocessing / cleaning / labeling of the data done?
136
+ - **Labeling.** Gold labels are emitted at generation time from the same taxonomy cells that produced the example. No post-hoc annotation. Labels are therefore by-construction correct for the taxonomy; they are only as valid as the taxonomy is (see section 4.3 of the dataset card for grounding).
137
+ - **Deduplication.** Example IDs are deterministic hashes of the message content; duplicates (same user + assistant content) would be detected across generations, but in practice duplicates do not occur because every render draws fresh random values for the slots.
138
+ - **Filtering.** Compatibility rules reject clinically inconsistent samples before rendering. No post-rendering filter is applied.
139
+
140
+ ### Was the "raw" data saved in addition to the preprocessed data?
141
+ The generator is a deterministic function of (configs, seed) and produces the same data on every run. There is no separate "raw" stage — every artifact is reproducible from source.
142
+
143
+ ### Is the software used to preprocess / clean / label the data available?
144
+ Yes, in the repository: `src/generators/`, `src/prepare_data.py`, `src/split_data.py`, `src/prepare_curation.py`, `src/compile_curation.py`.
145
+
146
+ ---
147
+
148
+ ## Uses
149
+
150
+ ### Has the dataset been used for any tasks already?
151
+ At time of v1 release, the dataset has been used for the associated research paper and for Pombo Labs's on-device small-LM fine-tuning experiments. Downstream fine-tuning + evaluation results will be referenced from this dataset card as they are published.
152
+
153
+ ### Is there a repository that links to any or all papers or systems that use the dataset?
154
+ Repository: https://github.com/Pombo-Labs/TRACE (v1 pinned to the initial release commit). Downstream artifacts — training logs, evaluation results, and any derived models — will be released separately and referenced from this dataset card as they become available.
155
+
156
+ ### What other tasks could the dataset be used for?
157
+ - Evaluation baselines for future ABA-specific LLM work.
158
+ - Research on taxonomy-driven synthetic data generation (the pipeline is domain-agnostic; only the configs are ABA-specific).
159
+ - Research on provenance-traceable data and auditable clinical AI.
160
+
161
+ ### Is there anything about the composition of the dataset or the way it was collected and preprocessed / cleaned / labeled that might impact future uses?
162
+ - **Pattern frequencies are uniform for learnability.** Real clinical caseloads are skewed toward mastery-progression; users who care about epidemiological plausibility should reweight.
163
+ - **The dataset is English-only and US-clinical-context-only.** Users in other jurisdictions or languages will need to adapt.
164
+ - **Target-behavior presence in logs is bounded** (0–3 per log by pattern); real logs may have more or fewer.
165
+ - **Crisis plans are deliberately vague on physical-intervention procedures** because those procedures are training- and jurisdiction-specific. Users building systems that need to recommend specific restraint procedures will need additional data curated to their jurisdiction.
166
+
167
+ ### Are there tasks for which the dataset should not be used?
168
+ TRACE is a research artifact and is not a clinical tool. Any clinical use is at the user's own risk. See dataset card section 5.3 for the full framing. Summarized:
169
+
170
+ - Not for autonomous clinical decisions.
171
+ - Not for final BIP writing without BCBA review.
172
+ - Not to be combined with real client data during training or inference.
173
+ - Not for medical diagnosis, legal documentation, or insurance reimbursement.
174
+
175
+ ---
176
+
177
+ ## Distribution
178
+
179
+ ### Will the dataset be distributed to third parties outside of the entity on behalf of which it was created?
180
+ Yes — intended for open release under a research license (CC BY-NC 4.0 recommended, pending finalization).
181
+
182
+ ### How will the dataset be distributed?
183
+ Public GitHub repository and Hugging Face Hub.
184
+
185
+ ### When will the dataset be distributed?
186
+ 2026-04-25 for the v1 release.
187
+
188
+ ### Will the dataset be distributed under a copyright or intellectual property license?
189
+ Data: CC BY-NC 4.0. Code: MIT. TRACE is a research artifact; it has not been clinically validated and is not a clinical tool. Any use in a clinical setting is at the sole responsibility of the user and their facility.
190
+
191
+ ### Have any third parties imposed IP-based or other restrictions?
192
+ No third-party IP is embedded. Curriculum references are to published frameworks (VB-MAPP, AFLS); no proprietary items are reproduced. Citation-based references to the Cooper/Heron/Heward textbook and JABA papers are just that — citations, not text reproductions.
193
+
194
+ ### Do any export controls or other regulatory restrictions apply?
195
+ No.
196
+
197
+ ---
198
+
199
+ ## Maintenance
200
+
201
+ ### Who will be supporting / hosting / maintaining the dataset?
202
+ **Pombo Labs** (https://github.com/Pombo-Labs) is the release maintainer. Festus Kahunla is the primary author and active maintainer of the dataset.
203
+
204
+ ### How can the owner / curator / manager of the dataset be contacted?
205
+ Via the GitHub repository issue tracker.
206
+
207
+ ### Is there an erratum?
208
+ A CHANGELOG will be maintained in the repository for each dataset version.
209
+
210
+ ### Will the dataset be updated?
211
+ Yes, as ongoing research warrants. Future versions will be released as new git tags (semantic versioning) with full CHANGELOG entries. No public roadmap is committed to at v1.
212
+
213
+ ### If the dataset relates to people, are there applicable limits on retention?
214
+ Not applicable (no real people).
215
+
216
+ ### Will older versions continue to be supported?
217
+ Yes. Each dataset version is pinned to a specific commit hash; old versions remain reproducible from the corresponding configs and generator code.
218
+
219
+ ### If others want to extend / augment / build on / contribute to the dataset, is there a mechanism for them to do so?
220
+ Yes. The taxonomy YAMLs are the intended extension surface. Adding a new skill target, a new target behavior, a new mastery criterion, or a new teaching method follows a documented pattern (see `configs/` for structure). Pull requests to the repository are the intended contribution mechanism.
docs/references.md ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Citations to use — the focused shortlist
2
+
3
+ **Principle:** The paper's References section is lean and earned, not a literature dump. This shortlist is the ~30 papers we actually cite across the paper and the dataset documentation (dataset card, datasheet, data statement, taxonomy reference).
4
+
5
+ ---
6
+
7
+ ## A. ABA × NLP — direct predecessors and positioning (6)
8
+
9
+ 1. **Kumar, A., et al. (2024). Personalized-ABA.** *NLP4Science @ ACL.* https://aclanthology.org/2024.nlp4science-1.16/
10
+ *How we cite:* direct predecessor; benchmark to beat.
11
+
12
+ 2. **Cox, D. J., & Jennings, A. M. (2024). Promises and possibilities of AI in behavior analytic services.** *Behavior Analysis in Practice, 17*, 123–136. https://pmc.ncbi.nlm.nih.gov/articles/PMC10890993/
13
+ *How we cite:* motivates the application space, especially "NLP on session notes."
14
+
15
+ 3. **Jennings, A. M., & Cox, D. J. (2024). Starting the conversation around the ethical use of AI in ABA.** *Behavior Analysis in Practice, 17*, 107–122. https://pmc.ncbi.nlm.nih.gov/articles/PMC10891004/
16
+ *How we cite:* ethics spine — BACB 2.03/2.05, HIPAA, explainability criteria our stack satisfies by construction.
17
+
18
+ 4. **Peck, S., O'Brien, C., Bourret, J., & Agostinelli, D. (2025). ChatGPT versus clinician responses to questions in ABA.** *JABA.* https://onlinelibrary.wiley.com/doi/10.1002/jaba.70029
19
+ *How we cite:* BCBAs already prefer LLMs blind — raises hallucination stakes.
20
+
21
+ 5. **Garg, M., Raza, S., Rayana, S., Liu, X., & Sohn, S. (2025). The rise of small language models in healthcare: A comprehensive survey.** arXiv:2504.17119. https://arxiv.org/abs/2504.17119
22
+ *How we cite:* positions our work in SLM-clinical field; ABA is a named white-space in their taxonomy.
23
+
24
+ 6. **Gao, L. et al. (2025). Generative AI for assessment and treatment of autism spectrum disorders: A scoping review.** *Frontiers in Psychiatry.* https://pmc.ncbi.nlm.nih.gov/articles/PMC12322814/
25
+ *How we cite:* none of the 10 studies surveyed cover BCBA workflow / session logs / on-device — directly positions our contribution.
26
+
27
+ ## B. Stack precedents — small clinical on-device LMs (3)
28
+
29
+ 7. **Zhang, T., et al. (2025). Menta: On-device SLM for mental health.** arXiv:2512.02716. https://arxiv.org/abs/2512.02716
30
+ *How we cite:* direct recipe precedent (4B + LoRA r=16 α=32 + 4-bit on iPhone, beats 13B).
31
+
32
+ 8. **MedGemma Team, Google DeepMind (2025). MedGemma Technical Report.** arXiv:2507.05201. https://arxiv.org/abs/2507.05201
33
+ *How we cite:* Gemma family at 4B is clinically competent — direct stack-choice precedent.
34
+
35
+ 9. **Dettmers, T., et al. (2023). QLoRA: Efficient Finetuning of Quantized LLMs.** *NeurIPS.* https://arxiv.org/abs/2305.14314
36
+ *How we cite:* foundational method for our QLoRA fine-tuning.
37
+
38
+ ## C. Data methodology (4)
39
+
40
+ 10. **Wang, Y., et al. (2023). Self-Instruct.** *ACL.* https://arxiv.org/abs/2212.10560
41
+ *How we cite:* foundational instruction-bootstrapping pipeline; source of ROUGE-L<0.7 dedup.
42
+
43
+ 11. **Zhang, X., et al. (2023). AlpaCare.** arXiv:2310.14558. https://arxiv.org/abs/2310.14558
44
+ *How we cite:* clinician-seeded stratification recipe — the key quality lever for medical instruction tuning.
45
+
46
+ 12. **Gekhman, Z., et al. (2024). Does Fine-Tuning LLMs on New Knowledge Encourage Hallucinations?** *EMNLP.* https://arxiv.org/abs/2405.05904
47
+ *How we cite:* grounds our "teach format, not new facts" design.
48
+
49
+ 13. **Patel, M., et al. (2025). How to Design, Create, and Evaluate an Instruction-Tuning Dataset in Health Care.** *JMIR.* https://www.jmir.org/2025/1/e70481
50
+ *How we cite:* our validation protocol (4-dim rubric, κ with cooldown) follows this tutorial directly.
51
+
52
+ ## D. Evaluation methodology (7)
53
+
54
+ 14. **Singhal, K., et al. (2025). Toward expert-level medical question answering with LLMs (Med-PaLM 2).** *Nature Medicine.* https://www.nature.com/articles/s41591-024-03423-7
55
+ *How we cite:* rubric design (9 axes, pairwise ranking) — the clinical-NLG gold standard.
56
+
57
+ 15. **Arora, R. K., et al. (2025). HealthBench.** arXiv:2505.08775. https://arxiv.org/abs/2505.08775
58
+ *How we cite:* justifies our 3-point rubric scale over Likert-5/7.
59
+
60
+ 16. **Kim, S., et al. (2024). Prometheus 2.** arXiv:2405.01535. https://arxiv.org/abs/2405.01535
61
+ *How we cite:* the open, local LLM judge we run alongside GPT-4.1 and Claude.
62
+
63
+ 17. **Zheng, L., et al. (2024). Judging LLM-as-a-Judge (MT-Bench).** *NeurIPS 2023.* https://arxiv.org/abs/2306.05685
64
+ *How we cite:* sources of position / verbosity / self-enhancement bias; justifies our swap-augmentation and panel diversity.
65
+
66
+ 18. **Kulkarni et al. (2025). TN-Eval: Behavioral therapy note rubrics.** arXiv:2503.20648. https://arxiv.org/abs/2503.20648
67
+ *How we cite:* closest adjacent rubric; faithfulness warning (therapists preferred hallucinated notes).
68
+
69
+ 19. **Manakul, P., et al. (2023). SelfCheckGPT.** arXiv:2303.08896. https://arxiv.org/abs/2303.08896
70
+ *How we cite:* our zero-resource hallucination metric.
71
+
72
+ 20. **Reiter, E. (2018). A Structured Review of the Validity of BLEU.** *Computational Linguistics, 44*(3), 393–401. https://direct.mit.edu/coli/article/44/3/393/
73
+ *How we cite:* justifies NOT using BLEU as a primary metric (negation matters in evaluation claims).
74
+
75
+ ## E. Statistical grounding (3)
76
+
77
+ 21. **Cohen, J. (1968). Weighted kappa.** *Psychological Bulletin, 70*(4).
78
+ *How we cite:* quadratic-weighted κ for our ordinal escalation-level classifier.
79
+
80
+ 22. **Chicco, D., & Jurman, G. (2020). Advantages of MCC over F1 and accuracy.** *BMC Genomics.* https://link.springer.com/article/10.1186/s12864-019-6413-7
81
+ *How we cite:* justifies reporting MCC alongside macro-F1 on imbalanced classification heads.
82
+
83
+ 23. **Guo, C., et al. (2017). On Calibration of Modern Neural Networks.** *ICML.* https://arxiv.org/abs/1706.04599
84
+ *How we cite:* source of ECE + temperature-scaling for our confidence calibration.
85
+
86
+ ## F. ABA foundation (5)
87
+
88
+ 24. **Cooper, J. O., Heron, T. E., & Heward, W. L. (2020). *Applied Behavior Analysis* (3rd ed.).** Pearson. ISBN 978-0134752556.
89
+ *How we cite:* canonical ABA reference — chapter citations for DTT, task analysis, measurement, IOA, FBA, BIP components.
90
+
91
+ 25. **Iwata, B. A., Dorsey, M. F., Slifer, K. J., Bauman, K. E., & Richman, G. S. (1982/1994). Toward a functional analysis of self-injury.** *JABA, 27*(2), 197–209. https://doi.org/10.1901/jaba.1994.27-197
92
+ *How we cite:* bedrock of the four-function taxonomy we encode.
93
+
94
+ 26. **Carr, E. G., & Durand, V. M. (1985). Reducing behavior problems through functional communication training.** *JABA, 18*(2), 111–126. https://doi.org/10.1901/jaba.1985.18-111
95
+ *How we cite:* origin of FCT; replacement-behavior framework.
96
+
97
+ 27. **Hanley, G. P., Iwata, B. A., & McCord, B. E. (2003). Functional analysis of problem behavior: A review.** *JABA, 36*(2), 147–185. https://doi.org/10.1901/jaba.2003.36-147
98
+ *How we cite:* definitive FBA review, finalization of four-function taxonomy.
99
+
100
+ 28. **Lovaas, O. I. (1987). Behavioral treatment and normal educational and intellectual functioning in young autistic children.** *JCCP, 55*(1), 3–9. https://doi.org/10.1037/0022-006X.55.1.3
101
+ *How we cite:* DTT / EIBI historical origin.
102
+
103
+ ## G. Dataset documentation (2)
104
+
105
+ 29. **Gebru, T., et al. (2021). Datasheets for Datasets.** *CACM.* https://arxiv.org/abs/1803.09010
106
+ *How we cite:* template for our dataset's datasheet appendix.
107
+
108
+ 30. **Bender, E., & Friedman, B. (2018). Data Statements for NLP.** *TACL.* https://aclanthology.org/Q18-1041/
109
+ *How we cite:* NLP-specific data disclosure — complements Gebru datasheet.
110
+
111
+ ---
112
+
113
+ ## What was intentionally dropped (and why)
114
+
115
+ Comprehensive background is in `literature-foundation.md`. The following categories do **not** earn spots in the paper's References:
116
+
117
+ - **Target-behavior JABA papers** (Kodak elopement, Piazza pica, Marcus aggression, Rapp & Vollmer stereotypy, etc.) — These ground the dataset's operational definitions and belong in the **dataset card / datasheet**, not the main paper. They'll appear in a supplementary `taxonomy-v1.md`.
118
+ - **Curriculum primary sources beyond VB-MAPP** (AFLS, ABLLS-R, Essential for Living, PEAK) — cite only if we actually use them as data sources. Current pipeline uses VB-MAPP only; others stay in background reading.
119
+ - **Secondary synthetic-data papers** (phi-1, Orca, LIMA, Baize, MedAlpaca, Clinical Camel, Persona Hub, Evol-Instruct, LAB, DataDreamer) — Self-Instruct + AlpaCare + Gekhman + Patel cover our method grounding. The rest are design inspiration, not required citations.
120
+ - **Secondary evaluation papers** (GPTScore, JudgeLM, BARTScore, Med-HALT, MedHallu, HELM critiques, Abacha MEDIQA, G-Eval) — Prometheus 2 + MT-Bench + TN-Eval + SelfCheckGPT + Reiter + Med-PaLM 2 cover our method grounding.
121
+ - **Older measurement / IOA JABA papers** (Powell 1975, Harrop & Daniels 1986, Kazdin 1977) — covered by Cooper/Heron/Heward textbook chapters.
122
+ - **Teaching-method origins beyond DTT** (Hart & Risley 1975 NET, Koegel 1987 PRT, Parsons 2012 BST, Tiger 2008 FCT review, Slocum & Tiger 2011 chaining) — covered by CHH textbook chapter citations. Only cite these JABA papers individually if our paper makes a specific claim about that teaching method.
123
+ - **Neuromnia blog / Meta-AI press** — not citable in an academic venue; mention in passing without reference.
124
+ - **Lanovaz & Hranchuk 2021** — different task (visual-inspection binary classification from graphs); only cite if we explicitly contrast against it. Likely drop.
125
+ - **Dataset documentation meta-papers** (Bender 2023 Data Statements v2, HF dataset card docs, NeurIPS 2025 D&B CfP, Giuffrè 2023 synthetic health, Pezoulas 2025 privacy, Mozilla/AI-Alliance 2024) — cite only where directly relevant in the datasheet, not in the main paper.
126
+
127
+ ---
128
+
129
+ ## Summary
130
+
131
+ | Category | Count |
132
+ |---|---|
133
+ | ABA × NLP | 6 |
134
+ | Stack precedents | 3 |
135
+ | Data methodology | 4 |
136
+ | Evaluation methodology | 7 |
137
+ | Statistical grounding | 3 |
138
+ | ABA foundation | 5 |
139
+ | Dataset documentation | 2 |
140
+ | **Total** | **30** |
141
+
142
+ Appropriate for a 4-page ACL paper (typical range 20–40 citations) plus a NeurIPS D&B dataset paper appendix. Add more only when the text actually needs them.
docs/schema-v1.md ADDED
@@ -0,0 +1,756 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TRACE Dataset Schema v1
2
+
3
+ **Purpose.** This document defines the **wire format** for TRACE training examples — the precise shape each JSONL line takes, the structure of user messages and assistant responses, and the metadata fields used for evaluation label extraction.
4
+
5
+ **Scope.** Two tasks, each with its own input/output conventions but sharing a common chat-format wrapper. Every taxonomy category from `taxonomy-v1.md` maps to a field or sampling decision here.
6
+
7
+ **Status.** Version 1, 2026-04-23. Breaking changes require a version bump.
8
+
9
+ **Companion documents.**
10
+ - `taxonomy-v1.md` — the controlled vocabulary (what categories exist)
11
+ - `schema-v1.md` — this document (how examples are shaped)
12
+ - `datasheet.md` (pending) — dataset card
13
+ - `src/prepare_data.py` (pending rewrite) — implements this schema
14
+
15
+ ---
16
+
17
+ ## 0. Overview
18
+
19
+ Two tasks:
20
+
21
+ | Task | Input | Output | MLX-LM role mapping |
22
+ |---|---|---|---|
23
+ | **1. Teaching Program Generation** | Learner profile + skill target + method + context | Structured teaching program with method-specific fields | user -> assistant |
24
+ | **2. Behavioral Session Interpretation** | Multi-session behavioral log (8–12 sessions typical) | Structured clinical interpretation (concerns + pattern + function + recommendations + escalation + confidence + rationale) | user -> assistant |
25
+
26
+ Each training example is one JSONL line with:
27
+ - `messages` — the chat-format messages the model trains on (system + user + assistant)
28
+ - `meta` — evaluation metadata (gold labels, provenance) that is *not* shown to the model during training but is used by `src/evaluate.py` for metric computation
29
+
30
+ ---
31
+
32
+ ## 1. Wire format
33
+
34
+ ### 1.1 MLX-LM chat format
35
+
36
+ MLX-LM's `--data` expects JSONL files where each line has a `"messages"` array. Each message is `{"role": "system"|"user"|"assistant", "content": "..."}`. When `mask_prompt: true`, loss is computed only on assistant tokens.
37
+
38
+ ### 1.2 TRACE extended format
39
+
40
+ ```json
41
+ {
42
+ "messages": [
43
+ {"role": "system", "content": "..."},
44
+ {"role": "user", "content": "..."},
45
+ {"role": "assistant", "content": "..."}
46
+ ],
47
+ "meta": {
48
+ "task_type": "teaching_program" | "session_interpretation",
49
+ "example_id": "<sha256-hex-prefix-16-chars>",
50
+ "gold_labels": { ... task-specific ... },
51
+ "provenance": {
52
+ "layer": 1 | 2 | 3,
53
+ "template_id": "<string>",
54
+ "taxonomy_cells": { ... sampled values ... },
55
+ "teacher_model": "<string or null>",
56
+ "seed": <int>,
57
+ "generated_at": "<ISO-8601>"
58
+ }
59
+ }
60
+ }
61
+ ```
62
+
63
+ The `meta` field is ignored by MLX-LM during training but preserved for evaluation and provenance tracking. `example_id` is a deterministic hash of the user + assistant content so that duplicates can be detected across generations.
64
+
65
+ ### 1.3 Splits
66
+
67
+ Train / valid / test splits live in `data/splits/`:
68
+ - `train.jsonl` — 85% (used for LoRA training)
69
+ - `valid.jsonl` — 10% (used by MLX-LM for periodic validation loss)
70
+ - `test.jsonl` — 5% (held out; only used for final evaluation)
71
+
72
+ Stratified by `meta.task_type` and by `meta.gold_labels.pattern_class` (task 2) so each split is balanced.
73
+
74
+ ---
75
+
76
+ ## 2. Common conventions
77
+
78
+ ### 2.1 Synthetic identifiers
79
+
80
+ Every learner referenced in any example has a synthetic ID: `SYN-####` where `####` is a 4-digit number from a fixed range (1000–9999). Generator uses `random.Random(seed)` to draw these deterministically. **No real-world ID patterns, no initials, no names.**
81
+
82
+ ### 2.2 Dates
83
+
84
+ Synthetic dates always fall in the range `2026-01-01` -> `2026-12-31` (stable, non-leaking). Format: `YYYY-MM-DD`. Session timestamps within a day are not included (unnecessary detail).
85
+
86
+ ### 2.3 Measurement notation (within session logs)
87
+
88
+ | Measurement | Abbreviation / format |
89
+ |---|---|
90
+ | Accuracy (trial-based) | `X/N correct (PP%)` e.g., `6/10 correct (60%)` |
91
+ | Frequency (count) | `freq = N` |
92
+ | Rate | `rate N.NN/min` |
93
+ | Duration | `Nm` or `N m Ns` |
94
+ | Latency | `latency N.Ns` |
95
+ | Partial-interval | `PIR PP% of intervals` |
96
+ | Whole-interval | `WIR PP% of intervals` |
97
+ | Momentary time sampling | `MTS PP%` |
98
+ | Episode-based | `N episodes, mean duration Nm Ns` |
99
+ | IOA | `IOA PP%` (with "session marked IOA" header) |
100
+
101
+ ### 2.4 Prompt-level shorthand
102
+
103
+ Used inside session logs to describe trial-level prompting distribution:
104
+
105
+ - `FP` — full physical
106
+ - `PP` — partial physical
107
+ - `M` — model
108
+ - `G` — gestural
109
+ - `V` — verbal
110
+ - `Pos` — positional
111
+ - `Vis` — visual
112
+ - `I` — independent
113
+
114
+ Example: `prompts: FP×3, PP×2, G×1, I×4` means 3 full-physical, 2 partial-physical, 1 gestural, 4 independent trials.
115
+
116
+ ### 2.5 Markdown conventions (assistant output)
117
+
118
+ All assistant outputs use GitHub-flavored markdown. Section headers use `##` for top-level fields and `###` for sub-fields. Structured labels (pattern class, escalation, confidence) appear under their own `##` headers and contain a single canonical value in the first paragraph for regex extraction.
119
+
120
+ ---
121
+
122
+ ## 3. Task 1 — Teaching Program Generation
123
+
124
+ ### 3.1 System prompt (task 1)
125
+
126
+ ```
127
+ You are an expert ABA (Applied Behavior Analysis) clinical assistant. You help
128
+ Board Certified Behavior Analysts and staff design teaching programs for
129
+ individuals with autism. Your responses are clinically accurate, individualized
130
+ to the learner profile, follow BACB ethical guidelines, and reference no real
131
+ client data. Select the appropriate teaching method (DTT, NET, Task Analysis,
132
+ FCT, BST, PRT) based on the skill target and learner profile.
133
+ ```
134
+
135
+ ### 3.2 User message format (task 1)
136
+
137
+ Seven required fields filled from the taxonomy. The generator samples valid combinations per section 14 of `taxonomy-v1.md`.
138
+
139
+ ```
140
+ Generate a teaching program for the following target.
141
+
142
+ Skill Target: {skill_name}
143
+ Curriculum Reference: {vbmapp_domain or afls_module} — {level or none}
144
+ Learner Profile: {early | school-age | adolescent | adult}
145
+ Current Mastery: {emerging | developing | approaching | near | mastered | generalization | maintenance}
146
+ Teaching Method: {dtt | net | task_analysis | fct | bst | prt}
147
+ Program Context: {D | R | Both}
148
+
149
+ Provide the full program structure appropriate to the selected method.
150
+ ```
151
+
152
+ Template variations (~5 paraphrases) are sampled per Self-Instruct convention to avoid canonical-form overfitting.
153
+
154
+ ### 3.3 Assistant message format (method-agnostic sections)
155
+
156
+ Every task-1 output contains these top-level sections in order:
157
+
158
+ ```markdown
159
+ ## Program Overview
160
+ {1-2 sentence summary of what is being taught and to whom}
161
+
162
+ ## {method-specific sections — see section 3.4}
163
+
164
+ ## Mastery Criteria
165
+ {which of the 7 mastery conventions from taxonomy section 11.1}
166
+
167
+ ## Data Collection
168
+ {what measurement types will be used; when IOA will be scheduled}
169
+
170
+ ## Generalization & Maintenance Plan
171
+ {when to probe across therapists / settings / materials; maintenance schedule}
172
+ ```
173
+
174
+ ### 3.4 Method-specific output variants
175
+
176
+ The middle section of the output varies by method. Each has a fixed field structure.
177
+
178
+ #### 3.4.1 DTT
179
+
180
+ ```markdown
181
+ ## Discriminative Stimulus (SD)
182
+ Primary SD: "{sd_text}"
183
+ Variations: {list}
184
+ Presentation: {how stimulus is presented}
185
+
186
+ ## Prompt Hierarchy
187
+ Strategy: {most-to-least | least-to-most | time-delay | graduated-guidance | stimulus-fading | stimulus-shaping}
188
+ Sequence: {ordered prompt-level levels}
189
+ Current prompt level: {based on mastery state}
190
+
191
+ ## Stimulus Array
192
+ Array size: {field of N or no-array}
193
+ Target stimuli: {list}
194
+ Distractor stimuli: {list or N/A}
195
+ Rotation: {how position/order is varied}
196
+
197
+ ## Error Correction Procedure
198
+ {one of the 5 procedures from taxonomy section 10}
199
+
200
+ ## Reinforcement Schedule
201
+ {one of the 7 schedules from taxonomy section 9}
202
+ ```
203
+
204
+ #### 3.4.2 NET
205
+
206
+ ```markdown
207
+ ## Motivating Operation (MO) Arrangement
208
+ {how the environment is set up to establish value for the target}
209
+
210
+ ## Natural Opportunity
211
+ {when and where in the natural routine the teaching occurs}
212
+
213
+ ## Prompt Strategy
214
+ Strategy: {prompt hierarchy}
215
+ Delivery: {how prompts are embedded naturally}
216
+
217
+ ## Natural Reinforcer
218
+ {the functional reinforcer that follows the target behavior}
219
+
220
+ ## Generalization Tactics
221
+ {multiple exemplar training; programming common stimuli}
222
+ ```
223
+
224
+ #### 3.4.3 Task Analysis / Chaining
225
+
226
+ ```markdown
227
+ ## Task Analysis
228
+ Chain type: {forward | backward | total-task}
229
+ Steps:
230
+ 1. {step description}
231
+ 2. {step description}
232
+ ...
233
+ N. {step description}
234
+
235
+ ## Prompt Strategy Per Step
236
+ {prompt level used at each step, fading plan}
237
+
238
+ ## Error Correction
239
+ {procedure for when a step is missed}
240
+
241
+ ## Reinforcement
242
+ Per-step reinforcement: {yes/no, schedule}
243
+ Terminal reinforcement: {on chain completion}
244
+ ```
245
+
246
+ #### 3.4.4 FCT
247
+
248
+ ```markdown
249
+ ## Target Behavior (to reduce)
250
+ {operational definition, hypothesized function}
251
+
252
+ ## Replacement Response
253
+ Topography: {vocal phrase | sign | AAC icon | card exchange}
254
+ Training sequence: {how the replacement is taught}
255
+
256
+ ## Extinction Plan
257
+ {how the problem behavior is placed on extinction}
258
+
259
+ ## Reinforcement for Replacement
260
+ Schedule: {initially CRF; thinning plan}
261
+ Magnitude: {quality and duration matched to natural reinforcer for problem behavior}
262
+
263
+ ## Crisis Plan
264
+ {if extinction burst or safety concern, what staff do}
265
+ ```
266
+
267
+ #### 3.4.5 BST (staff-facing)
268
+
269
+ ```markdown
270
+ ## Training Target
271
+ {the program or skill the trainee will learn to implement}
272
+
273
+ ## Training Components
274
+ 1. Instruction: {what written/verbal instruction is given}
275
+ 2. Modeling: {trainer demonstrates; video or live}
276
+ 3. Rehearsal: {trainee practices with feedback}
277
+ 4. Feedback: {specific, behavior-specific, immediate}
278
+
279
+ ## Fidelity Checklist
280
+ {bulleted items that must be demonstrated, scored yes/no}
281
+
282
+ ## Mastery Criterion (trainee)
283
+ {e.g., 100% fidelity across 2 consecutive role-plays with novel scenarios}
284
+ ```
285
+
286
+ #### 3.4.6 PRT
287
+
288
+ ```markdown
289
+ ## Motivation Arrangement
290
+ {how child choice, preferred materials, and interspersal are set up}
291
+
292
+ ## Teaching Opportunities
293
+ {when target is presented; how multiple cues are programmed}
294
+
295
+ ## Reinforcement of Attempts
296
+ {what counts as an attempt; how attempts are reinforced differentially}
297
+
298
+ ## Natural Reinforcer
299
+ {the functional reinforcer for the target response}
300
+
301
+ ## Generalization
302
+ {multiple-exemplar plan; across people / settings}
303
+ ```
304
+
305
+ ### 3.5 Metadata fields (task 1)
306
+
307
+ ```json
308
+ "meta": {
309
+ "task_type": "teaching_program",
310
+ "example_id": "<sha256 prefix>",
311
+ "gold_labels": {
312
+ "method": "dtt",
313
+ "domain": "VB-MAPP.Tact",
314
+ "level": "L2",
315
+ "learner_profile": "early",
316
+ "mastery_state": "developing",
317
+ "context": "Both"
318
+ },
319
+ "provenance": {
320
+ "layer": 1,
321
+ "template_id": "tp-dtt-001",
322
+ "taxonomy_cells": {
323
+ "skill_target": "Tacts colors of objects",
324
+ "prompt_hierarchy": "most-to-least",
325
+ "reinforcement_schedule": "CRF",
326
+ "error_correction": "transfer_trial"
327
+ },
328
+ "teacher_model": null,
329
+ "seed": 20260423001,
330
+ "generated_at": "2026-04-23T14:22:11Z"
331
+ }
332
+ }
333
+ ```
334
+
335
+ ### 3.6 Complete task-1 example (abridged)
336
+
337
+ ```json
338
+ {
339
+ "messages": [
340
+ {
341
+ "role": "system",
342
+ "content": "You are an expert ABA clinical assistant..."
343
+ },
344
+ {
345
+ "role": "user",
346
+ "content": "Generate a teaching program for the following target.\n\nSkill Target: Tacts colors of objects\nCurriculum Reference: VB-MAPP Tact L2\nLearner Profile: early\nCurrent Mastery: developing\nTeaching Method: dtt\nProgram Context: Both\n\nProvide the full program structure appropriate to the selected method."
347
+ },
348
+ {
349
+ "role": "assistant",
350
+ "content": "## Program Overview\nThis program teaches {learner} to tact (label) the color of presented objects...\n\n## Discriminative Stimulus (SD)\nPrimary SD: \"What color?\"\nVariations: \"Tell me the color\", \"What color is this?\"\nPresentation: ...\n\n## Prompt Hierarchy\nStrategy: most-to-least\nSequence: full physical (hand guidance to point) -> partial physical (touch elbow) -> gestural (point to color) -> positional (color nearest learner) -> independent.\nCurrent prompt level: given current 40% accuracy, begin at partial-physical and fade to gestural across sessions.\n\n## Stimulus Array\nArray size: field of 3 (target + 2 distractors)\nTarget stimuli: red, blue, yellow\nDistractor stimuli: 2 non-target colors per trial\nRotation: randomize position of target across trials.\n\n## Error Correction Procedure\nTransfer trial: re-present SD -> provide effective-level prompt -> reinforce prompted response -> distractor trial -> re-present SD independently.\n\n## Reinforcement Schedule\nCRF (continuous reinforcement) during acquisition; fade to FR-2 once accuracy exceeds 80%.\n\n## Mastery Criteria\n80% accuracy across 2 consecutive sessions with at least 10 trials each.\n\n## Data Collection\nTrial-by-trial data recording prompt level and response accuracy. IOA scheduled every 4th session at minimum.\n\n## Generalization & Maintenance Plan\nPost-mastery: probe with novel color stimuli, across 2 additional therapists, and in at least 2 natural contexts. Maintenance probes weekly for 4 weeks, then monthly."
351
+ }
352
+ ],
353
+ "meta": {
354
+ "task_type": "teaching_program",
355
+ "example_id": "a3f2b8d14e9c7a0f",
356
+ "gold_labels": {
357
+ "method": "dtt",
358
+ "domain": "VB-MAPP.Tact",
359
+ "level": "L2",
360
+ "learner_profile": "early",
361
+ "mastery_state": "developing",
362
+ "context": "Both"
363
+ },
364
+ "provenance": {
365
+ "layer": 1,
366
+ "template_id": "tp-dtt-001",
367
+ "taxonomy_cells": {
368
+ "skill_target": "Tacts colors of objects",
369
+ "prompt_hierarchy": "most-to-least",
370
+ "reinforcement_schedule": "CRF",
371
+ "error_correction": "transfer_trial"
372
+ },
373
+ "teacher_model": null,
374
+ "seed": 20260423001,
375
+ "generated_at": "2026-04-23T14:22:11Z"
376
+ }
377
+ }
378
+ }
379
+ ```
380
+
381
+ ---
382
+
383
+ ## 4. Task 2 — Behavioral Session Interpretation
384
+
385
+ ### 4.1 System prompt (task 2)
386
+
387
+ ```
388
+ You are an expert ABA clinical assistant. You analyze multi-session behavioral
389
+ session logs for individuals with autism and produce structured clinical
390
+ interpretations that identify patterns, hypothesize behavior functions when
391
+ applicable, and recommend programming adjustments structured along BIP lines
392
+ (antecedent strategies, replacement behaviors, consequence strategies, crisis
393
+ plan). Your interpretation also includes an escalation level and a confidence
394
+ expression. Every recommendation is grounded in the data provided. Follow BACB
395
+ ethical guidelines and reference no real client data.
396
+ ```
397
+
398
+ ### 4.2 User message format — the session log
399
+
400
+ The session log is a plain-text block with a fixed top-level structure. The generator produces it deterministically from sampled taxonomy values + a hidden pattern label.
401
+
402
+ #### 4.2.1 Learner profile block
403
+
404
+ ```
405
+ LEARNER PROFILE
406
+ Synthetic ID: SYN-####
407
+ Profile: {Early Learner | School-Age Learner | Adolescent Learner | Adult Learner} ({chronological age} yr)
408
+ Curricula: {VB-MAPP L# | AFLS {module(s)} | combination}
409
+ Primary context: {D | R | Both}
410
+ Date range: Sessions 1–N across M days ({start date} to {end date})
411
+ ```
412
+
413
+ #### 4.2.2 Acceleration programs block
414
+
415
+ ```
416
+ ACCELERATION PROGRAMS
417
+ 1. {Skill Target} ({Curriculum Reference})
418
+ Method: {method}, {method-specific brief, e.g., "backward chaining, 8 steps"}
419
+ Context: {D | R | Both}
420
+ 2. ...
421
+ ```
422
+
423
+ Typically 3–6 acceleration programs per log.
424
+
425
+ #### 4.2.3 Deceleration targets block
426
+
427
+ ```
428
+ DECELERATION TARGETS
429
+ 1. {Target behavior} — function hypothesized: {escape | attention | tangible | automatic | unknown}
430
+ 2. ...
431
+ ```
432
+
433
+ Zero to three deceleration targets per log. When present, each has a hypothesized function used by the gold-label generator; the hypothesis may or may not be the correct answer in the interpretation (some examples deliberately encode ambiguity).
434
+
435
+ #### 4.2.4 Per-session data format
436
+
437
+ One block per session.
438
+
439
+ ```
440
+ Session {N} — {YYYY-MM-DD} ({context-tag}) — {duration} min — {# observers}
441
+ {Skill 1 name}: {measurement 1}; {measurement 2}; prompts {prompt distribution}
442
+ {Skill 2 name}: {measurement}
443
+ ...
444
+ {Behavior 1 name}: {measurement}
445
+ {Behavior 2 name}: {measurement}
446
+ {optional: ABC(behavior): A = {antecedent}; B = {behavior description}; C = {consequence}}
447
+ ```
448
+
449
+ ABC entries appear in ~30% of logs (taxonomy section 6.1), at a rate of approximately 1 ABC event per session that includes behavior occurrence.
450
+
451
+ #### 4.2.5 IOA session format
452
+
453
+ Approximately 25% of logs include one IOA session (sampled uniformly from the middle third of sessions in the log).
454
+
455
+ ```
456
+ Session {N} — IOA SESSION — 2 observers
457
+ {Skill 1} IOA: {percentage}% agreement
458
+ {Behavior 1} IOA: {percentage}% agreement
459
+ ...
460
+ ```
461
+
462
+ Agreements below 80% in generated IOA data are intentional in a minority of examples to test the model's ability to flag reliability concerns.
463
+
464
+ #### 4.2.6 Cross-session observations block
465
+
466
+ ```
467
+ BEHAVIORAL OBSERVATIONS (across sessions)
468
+ - {narrative observation 1}
469
+ - {narrative observation 2}
470
+ - ...
471
+ ```
472
+
473
+ 3–6 bullet observations that summarize trends visible in the session data. Generated deterministically from the hidden pattern label + the behavioral indicator pool from taxonomy section 7.
474
+
475
+ ### 4.3 Assistant message format — structured interpretation
476
+
477
+ Eight top-level sections in fixed order. Sections marked **required** always appear; sections marked **conditional** appear only when behavior data is present in the log.
478
+
479
+ ```markdown
480
+ ## Clinical Concerns (required — free-form prose)
481
+
482
+ ## Pattern Classification (required — structured label + 1–2 sentence evidence)
483
+
484
+ ## Behavior Function Hypothesis (conditional — only if deceleration targets observed)
485
+
486
+ ## Programming Recommendations (required — 4 BIP-structured subsections)
487
+ ### Antecedent strategies
488
+ ### Replacement behavior (conditional on behavior presence)
489
+ ### Consequence strategies
490
+ ### Crisis plan (conditional — escalation level ≥ 3)
491
+
492
+ ## Escalation Level (required — structured label + brief justification)
493
+
494
+ ## Confidence (required — structured label + brief justification)
495
+
496
+ ## Data-Supported Rationale (required — numeric grounding)
497
+ ```
498
+
499
+ #### 4.3.1 Clinical Concerns
500
+
501
+ Free-form prose, 2–5 bullets or short paragraphs. Each concern references specific data from the log (accuracy values, frequencies, trends).
502
+
503
+ #### 4.3.2 Pattern Classification
504
+
505
+ First paragraph contains exactly one canonical label (or two labels joined by `+` if co-occurring):
506
+
507
+ ```markdown
508
+ ## Pattern Classification
509
+ {pattern_label}
510
+
511
+ {1–2 sentence explanation of why this pattern was identified}
512
+ ```
513
+
514
+ Pattern labels drawn from taxonomy section 7 (12 patterns):
515
+ `mastery_progression | regression | plateau | frustration_pattern | variable_performance | prompt_dependency | rapid_acquisition | generalization_failure | extinction_burst | skill_loss_after_break | motivating_operation_shift | setting_event_trigger`
516
+
517
+ Co-occurring example: `regression + frustration_pattern`.
518
+
519
+ #### 4.3.3 Behavior Function Hypothesis (conditional)
520
+
521
+ Present only if deceleration targets appear in the log. One sub-entry per target behavior.
522
+
523
+ ```markdown
524
+ ## Behavior Function Hypothesis
525
+ {behavior name}: {escape | attention | tangible | automatic | unknown}
526
+ Evidence: {1–3 sentences grounded in log data, referencing ABC events if present}
527
+
528
+ {behavior name}: ...
529
+ ```
530
+
531
+ If the log lacks evidence to distinguish functions, the hypothesis is `unknown` and the interpretation notes what data would disambiguate.
532
+
533
+ #### 4.3.4 Programming Recommendations (BIP-structured)
534
+
535
+ Four subsections. `### Antecedent strategies` and `### Consequence strategies` are always present; `### Replacement behavior` appears when deceleration targets are present; `### Crisis plan` appears when escalation level ≥ 3.
536
+
537
+ ```markdown
538
+ ## Programming Recommendations
539
+
540
+ ### Antecedent strategies
541
+ - {specific, testable recommendation}
542
+ - ...
543
+
544
+ ### Replacement behavior (FCT)
545
+ - {teaching target, function-matched}
546
+ - {reinforcement plan for replacement}
547
+
548
+ ### Consequence strategies
549
+ - {how staff respond to target behavior}
550
+ - {what is reinforced; what is placed on extinction}
551
+
552
+ ### Crisis plan
553
+ - {safety procedures for escalation}
554
+ ```
555
+
556
+ #### 4.3.5 Escalation Level
557
+
558
+ ```markdown
559
+ ## Escalation Level
560
+ {1 | 2 | 3 | 4} — {short label}
561
+
562
+ {1–2 sentence justification}
563
+ ```
564
+
565
+ Labels: `1 — Continue monitoring`, `2 — Adjust next session`, `3 — Supervisor review`, `4 — Safety immediate`.
566
+
567
+ #### 4.3.6 Confidence
568
+
569
+ ```markdown
570
+ ## Confidence
571
+ {high | moderate | low}
572
+
573
+ {1 sentence explaining the data-quality basis for the confidence level}
574
+ ```
575
+
576
+ #### 4.3.7 Data-Supported Rationale
577
+
578
+ Numeric grounding for all claims made above. Bulleted list of specific accuracy / frequency / duration / trend statistics drawn from the session log.
579
+
580
+ ### 4.4 Metadata fields (task 2)
581
+
582
+ ```json
583
+ "meta": {
584
+ "task_type": "session_interpretation",
585
+ "example_id": "<sha256 prefix>",
586
+ "gold_labels": {
587
+ "pattern_class": "regression+frustration_pattern",
588
+ "behavior_functions": {
589
+ "Tantrum": "escape",
590
+ "Mouthing": "automatic"
591
+ },
592
+ "escalation_level": 3,
593
+ "confidence": "moderate",
594
+ "crisis_plan_required": true
595
+ },
596
+ "log_properties": {
597
+ "n_sessions": 10,
598
+ "n_acceleration_programs": 4,
599
+ "n_deceleration_targets": 2,
600
+ "has_abc_data": true,
601
+ "has_ioa_session": true,
602
+ "learner_profile": "adolescent",
603
+ "context_distribution": {"D": 0.3, "R": 0.7, "Both": 0.0}
604
+ },
605
+ "provenance": {
606
+ "layer": 1,
607
+ "template_id": "sess-regress-v1",
608
+ "pattern_seed": 20260423007,
609
+ "seed": 20260423007,
610
+ "generated_at": "2026-04-23T14:30:00Z"
611
+ }
612
+ }
613
+ ```
614
+
615
+ ### 4.5 Complete task-2 example (abridged user content, full assistant output)
616
+
617
+ ```json
618
+ {
619
+ "messages": [
620
+ {
621
+ "role": "system",
622
+ "content": "You are an expert ABA clinical assistant. You analyze multi-session behavioral session logs..."
623
+ },
624
+ {
625
+ "role": "user",
626
+ "content": "Interpret the following behavioral session log. Provide clinical concerns, pattern classification, behavior function hypothesis (if applicable), programming recommendations (antecedent / replacement / consequence / crisis), escalation level, confidence, and data-supported rationale.\n\nLEARNER PROFILE\nSynthetic ID: SYN-4721\nProfile: Adolescent Learner (16 yr)\nCurricula: AFLS Basic Living + AFLS Home Skills + VB-MAPP L3 (residual)\nPrimary context: Residential (R), partial Day (D)\nDate range: Sessions 1–10 across 14 days (2026-03-01 to 2026-03-14)\n\nACCELERATION PROGRAMS\n1. Self-Care: Washing Hands (AFLS Basic Living)\n Method: Task Analysis, backward chaining, 8 steps\n Context: Both\n2. Requesting with AAC (VB-MAPP Mand L3 adapted)\n Method: NET\n Context: Both\n3. Community Safety Signs (AFLS Community)\n Method: DTT\n Context: D\n4. Tolerating Denied Access (FCT replacement)\n Method: FCT, replacement response = \"wait please\"\n Context: Both\n\nDECELERATION TARGETS\n1. Tantrum — function hypothesized: escape\n2. Mouthing (non-pica) — function hypothesized: automatic\n\nSession 1 — 2026-03-01 (R) — 45 min — 1 observer\n Washing Hands: 3/8 steps independent (38%); prompts FP×3, PP×2, G×1\n AAC Requests: freq = 4 (rate 0.09/min); 2 independent, 2 prompted\n Safety Signs: 6/10 correct (60%); latency 4.2s\n Tolerating Denied Access: 2/3 trials successful\n Tantrum: freq 1, duration 4m\n Mouthing: freq 8; PIR 20%\n ABC (tantrum): A = iPad removed at 2:15pm; B = screaming + drop-to-floor; C = staff redirected to preferred activity after 4 min\n\nSession 2 — 2026-03-02 (R) — 50 min — 1 observer\n Washing Hands: 3/8 (38%); prompts FP×3, PP×2, G×1\n Safety Signs: 5/10 (50%); latency 4.8s\n Tantrum: freq 2, duration 7m\n Mouthing: freq 12; PIR 28%\n\n[... Sessions 3–9 ...]\n\nSession 7 — IOA SESSION — 2 observers\n Washing Hands IOA: 87% agreement\n Tantrum IOA: 91% agreement\n AAC Requests IOA: 95% agreement\n\nSession 10 — 2026-03-14 (R) — 45 min — 1 observer\n Washing Hands: 2/8 (25%); prompts FP×5, PP×2, G×1\n Safety Signs: 4/10 (40%); latency 6.1s\n Tolerating: 0/3 success\n Tantrum: freq 4, duration 12m\n Mouthing: freq 15; PIR 35%\n ABC (tantrum, occurrence 2): A = asked to finish worksheet; B = hit table + scream; C = task removed for 5 min, staff attention\n\nBEHAVIORAL OBSERVATIONS\n- Increasing response latency in Safety Signs\n- Increasing tantrum duration\n- Mouthing rate climbing\n- Worksheet and non-preferred tasks consistently precede tantrums"
627
+ },
628
+ {
629
+ "role": "assistant",
630
+ "content": "## Clinical Concerns\n- Regression pattern in Safety Signs and Washing Hands: accuracy has declined from 60->40% and 38->25% respectively across 10 sessions.\n- Tantrum frequency and duration are increasing, clustered around demand presentation.\n- Mouthing rate is increasing, consistent with automatic function but also temporally correlated with tantrum increases.\n\n## Pattern Classification\nregression + frustration_pattern\n\nConcurrent regression in previously-approaching skills alongside a frustration pattern characterized by escape-indicator behaviors (declining accuracy + increased tantrum duration + demand-triggered occurrences).\n\n## Behavior Function Hypothesis\nTantrum: escape\n Evidence: both documented ABC events show demand antecedent (iPad removal, worksheet task) followed by task/demand removal or redirection. Tantrum duration increases concurrent with demand difficulty.\nMouthing: automatic\n Evidence: persists across contexts, no clear social consequence, occurs during low-demand periods and throughout sessions.\n\n## Programming Recommendations\n\n### Antecedent strategies\n- Reduce worksheet task duration by 50% for next 3 sessions; introduce choice between two acceptable non-preferred tasks before any demand.\n- Pre-teach \"wait please\" response before predictable iPad-removal windows.\n- Schedule preferred activity access immediately before (not after) known demand windows to leverage MO.\n\n### Replacement behavior (FCT)\n- Strengthen FCT training for Tolerating Denied Access — current 2->1->0/3 rate across sessions suggests teaching is not yet effective. Consider 0-second time delay prompting and increased reinforcement magnitude for independent \"wait please.\"\n- Pair \"wait please\" with a brief, predictable delay (10–15s) followed by reinforcement access.\n\n### Consequence strategies\n- Do NOT remove demand contingent on tantrum (currently reinforcing escape function).\n- Provide brief (20s) break contingent on independent \"wait please\" or equivalent communication.\n- Place tantrum on escape extinction: continue presenting the task while ensuring learner and staff safety.\n\n### Crisis plan\n- If tantrum exceeds 10 minutes or escalates to property destruction or aggression, follow facility crisis protocol, document the incident, and notify supervising BCBA within 24 hours.\n\n## Escalation Level\n3 — Supervisor review\n\nRegression co-occurring with increasing problem behavior warrants BCBA review within 24–48 hours before implementing proposed FCT and extinction changes.\n\n## Confidence\nmoderate\n\nFunction hypothesis is well-supported by 2/2 documented ABC events, but single-observer sessions limit IOA coverage to one session out of 10. Recommend one additional IOA session before finalizing the intervention plan.\n\n## Data-Supported Rationale\n- Mean Safety Signs accuracy sessions 1–3: 58%; sessions 8–10: 42% (16 percentage point decrement, 27% relative decline).\n- Mean tantrum frequency sessions 1–3: 1.3/session; sessions 8–10: 3.3/session (2.5× increase). Mean duration 4 min -> 12 min.\n- 2/2 documented ABC events show demand antecedent -> tantrum behavior -> task removal consequence, consistent with escape-maintained hypothesis.\n- Mouthing PIR 20% -> 35% across the same period (75% relative increase).\n- IOA data (session 7) — 87–95% agreement — supports data reliability for tantrum and AAC measures; no IOA for Safety Signs or Mouthing."
631
+ }
632
+ ],
633
+ "meta": {
634
+ "task_type": "session_interpretation",
635
+ "example_id": "b5c9e2a14f8d7310",
636
+ "gold_labels": {
637
+ "pattern_class": "regression+frustration_pattern",
638
+ "behavior_functions": {
639
+ "Tantrum": "escape",
640
+ "Mouthing": "automatic"
641
+ },
642
+ "escalation_level": 3,
643
+ "confidence": "moderate",
644
+ "crisis_plan_required": true
645
+ },
646
+ "log_properties": {
647
+ "n_sessions": 10,
648
+ "n_acceleration_programs": 4,
649
+ "n_deceleration_targets": 2,
650
+ "has_abc_data": true,
651
+ "has_ioa_session": true,
652
+ "learner_profile": "adolescent",
653
+ "context_distribution": {"D": 0.2, "R": 0.8, "Both": 0.0}
654
+ },
655
+ "provenance": {
656
+ "layer": 1,
657
+ "template_id": "sess-regress-frust-v1",
658
+ "pattern_seed": 20260423007,
659
+ "seed": 20260423007,
660
+ "generated_at": "2026-04-23T14:30:00Z"
661
+ }
662
+ }
663
+ }
664
+ ```
665
+
666
+ ---
667
+
668
+ ## 5. Validation and parsing
669
+
670
+ ### 5.1 Required fields
671
+
672
+ For every example (both tasks):
673
+ - `messages` present with exactly 3 entries: system, user, assistant (in that order)
674
+ - `meta.task_type` in `{teaching_program, session_interpretation}`
675
+ - `meta.example_id` present and non-empty
676
+ - `meta.gold_labels` present with task-specific required keys
677
+ - `meta.provenance.seed` present
678
+
679
+ ### 5.2 Label extraction regex (for evaluator)
680
+
681
+ After model generation, labels are extracted from the assistant response using deterministic regex. If extraction fails, the example is scored as a parse failure (counts against the model).
682
+
683
+ **Pattern classification (task 2):**
684
+ ```python
685
+ r"##\s*Pattern\s*Classification\s*\n\s*([a-z_+]+)\s*\n"
686
+ ```
687
+
688
+ **Escalation level (task 2):**
689
+ ```python
690
+ r"##\s*Escalation\s*Level\s*\n\s*([1-4])\s*—"
691
+ ```
692
+
693
+ **Confidence (task 2):**
694
+ ```python
695
+ r"##\s*Confidence\s*\n\s*(high|moderate|low)\s*\n"
696
+ ```
697
+
698
+ **Behavior function (task 2, per behavior):**
699
+ ```python
700
+ r"^([A-Z][A-Za-z\s\-\(\)]+):\s*(escape|attention|tangible|automatic|unknown)\s*$"
701
+ ```
702
+
703
+ **Method (task 1, validated against user input):**
704
+ ```python
705
+ # No regex needed — method is sampled and stored in gold_labels; evaluator checks
706
+ # whether the assistant output contains the method-specific sections (section 3.4)
707
+ # corresponding to the expected method.
708
+ ```
709
+
710
+ ### 5.3 Schema validity checks (pre-training)
711
+
712
+ `src/prepare_data.py` applies these before writing to splits:
713
+
714
+ 1. All 3 messages present with correct roles
715
+ 2. System prompt matches canonical form for the task
716
+ 3. Assistant response contains all required section headers for its task
717
+ 4. Structured-label sections are regex-extractable
718
+ 5. No placeholder strings (`{...}`, `TODO`, empty `Provide:` blocks) in assistant content
719
+ 6. User message contains no real-world identifying patterns (regex scan for name-like tokens)
720
+ 7. Total length under `max_seq_length` (4096 tokens for training)
721
+ 8. No duplicate `example_id` across the full dataset (SimHash + exact-match dedup)
722
+
723
+ Examples failing any check are written to `data/processed/rejected.jsonl` with a `reason` field.
724
+
725
+ ---
726
+
727
+ ## 6. Extensibility
728
+
729
+ ### 6.1 Adding new session patterns
730
+
731
+ 1. Add entry to `taxonomy-v1.md` section 7 with citation.
732
+ 2. Add generator template in `src/prepare_data.py` with trajectory rules.
733
+ 3. Add label to regex enum in section 5.2.
734
+ 4. Add stratification target in split generator.
735
+ 5. Bump schema version.
736
+
737
+ ### 6.2 Adding new teaching methods
738
+
739
+ 1. Add entry to `taxonomy-v1.md` section 1.
740
+ 2. Add method-specific output variant to `schema-v1.md` section 3.4.
741
+ 3. Add generator template in `src/prepare_data.py`.
742
+ 4. Bump schema version.
743
+
744
+ ### 6.3 Versioning
745
+
746
+ Schema versions follow `major.minor`:
747
+ - **Minor** — additive changes (new pattern, new method, new optional field). Old data remains valid.
748
+ - **Major** — breaking changes (renamed fields, removed sections, changed required structure). Old data requires migration or a new dataset version.
749
+
750
+ Each generated example records the schema version it was generated under in `meta.provenance.schema_version`.
751
+
752
+ ---
753
+
754
+ ## 7. Changelog
755
+
756
+ - **v1.0 (2026-04-23)** — initial schema. Two tasks defined. Task 1 covers 6 teaching methods (DTT, NET, Task Analysis, FCT, BST, PRT); Task 2 covers 12 session patterns with structured labels for pattern class, behavior function, escalation level, and confidence.
docs/taxonomy-v1.md ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TRACE Dataset Taxonomy v1
2
+
3
+ **Purpose.** This document is the **controlled vocabulary** for the TRACE synthetic dataset. Every category our data-generation pipeline can draw from is defined here, with an operational definition and a canonical citation. If a category is not in this document, the generator will not produce it — and if it is here, its clinical validity is defensible against a peer-reviewed source.
4
+
5
+ **Who this serves.**
6
+ - The data-generation pipeline (`src/prepare_data.py`) consumes this taxonomy as its source of truth.
7
+ - The dataset paper's composition table is a direct rendering of this document.
8
+ - Reviewers verifying "no invented clinical categories" can trace each entry -> citation -> CHH chapter / JABA paper / curriculum manual.
9
+
10
+ **Status.** Version 1, 2026-04-22. Breaking changes require a bumped version and a changelog entry.
11
+
12
+ **Citation conventions.** Throughout this document:
13
+ - **CHH** = Cooper, Heron, & Heward (2020). *Applied Behavior Analysis* (3rd ed.). Pearson.
14
+ - **JABA** = *Journal of Applied Behavior Analysis*
15
+ - **BAP** = *Behavior Analysis in Practice*
16
+ - DOIs link to the authoritative source wherever possible.
17
+
18
+ ---
19
+
20
+ ## 0.A Program orientation — Acceleration vs Deceleration
21
+
22
+ Every ABA goal falls into one of two program orientations, a distinction that runs through virtually all clinical data systems:
23
+
24
+ | Orientation | Purpose | Covers |
25
+ |---|---|---|
26
+ | **Acceleration** | Behaviors / skills to **increase** | Skill-acquisition programs (VB-MAPP, AFLS targets); adaptive behaviors (toileting, communication including AAC); FCT-trained replacement behaviors; independent coping with denied access |
27
+ | **Deceleration** | Behaviors to **decrease** | Target problem behaviors (section 5): SIB, aggression, elopement, tantrum, stereotypy, pica, etc. |
28
+
29
+ **Citation.** Standard ABA terminology; see CHH Ch. 22 ("Differential Reinforcement" — acceleration via reinforcement of desired behavior) and Ch. 24 ("Extinction") together with Ch. 25 ("Antecedent Interventions") for deceleration.
30
+
31
+ **Dataset usage.**
32
+ - section 1 teaching methods DTT, NET, Task Analysis, PRT, Incidental Teaching apply to **acceleration** programs.
33
+ - section 1.4 FCT is the acceleration method paired with deceleration — it teaches a replacement behavior while reducing a target behavior.
34
+ - section 1.5 BST is meta-level (trains staff on any program).
35
+ - Task 2 session logs sample across both orientations per session — several acceleration targets and zero or more deceleration behaviors. Exact counts are pipeline hyperparameters.
36
+
37
+ ---
38
+
39
+ ## 0.B How taxonomy dimensions combine
40
+
41
+ Each **teaching program generation** example is produced by sampling one value from each of these dimensions:
42
+
43
+ | Dimension | # values | Section |
44
+ |---|---|---|
45
+ | Teaching method | 6 (optionally 8 if video modeling + script fading approved) | section 1 |
46
+ | Skill domain | 16 (VB-MAPP) + 6 (AFLS) = 22 | section 2 |
47
+ | Developmental level | 5 (3 VB-MAPP + 2 AFLS) | section 2 |
48
+ | Skill target | ~250 | section 2 |
49
+ | Mastery state | 7 | section 3 |
50
+ | Prompt hierarchy | 6 | section 8 |
51
+ | Reinforcement schedule | 7 | section 9 |
52
+ | Error correction | 5 | section 10 |
53
+ | Mastery criteria | 7 | section 11 |
54
+
55
+ Each **behavioral session interpretation** example is produced by sampling:
56
+
57
+ | Dimension | # values | Section |
58
+ |---|---|---|
59
+ | Session pattern | 12 | section 7 |
60
+ | Primary skills observed | 3–8 per session | section 2 |
61
+ | Target behaviors observed (if any) | 0–3 per session | section 5 |
62
+ | Measurement types used | 2–5 per session | section 6 |
63
+ | Behavior function hypothesis | 4 + "not applicable" | section 4 |
64
+ | Escalation level | 4 | section 12 |
65
+ | Confidence level | 3 | section 13 |
66
+ | BIP component structure | 4 | section 11 |
67
+
68
+ ---
69
+
70
+ ## 1. Teaching methods
71
+
72
+ The first task (Teaching Program Generation) produces content modality-aware. A "mand" goal is not automatically a DTT program — real clinical practice picks the method that fits the skill and learner. The model must reason about method selection.
73
+
74
+ ### 1.1 Discrete Trial Training (DTT)
75
+ - **Operational definition.** A structured teaching format consisting of clearly defined trial units: *discriminative stimulus (SD) -> learner response -> consequence (reinforcement or correction) -> inter-trial interval (ITI)*. Trials are massed in a contrived setting (usually table-based), with explicit prompt hierarchy and error-correction procedure.
76
+ - **When to use.** Acquisition of discrete, decontextualized skills (receptive labels, tacts, matching, early imitation) especially early in instruction when stimulus control must be established.
77
+ - **Canonical citation.** Lovaas, O. I. (1987). *JCCP, 55*(1), 3–9. https://doi.org/10.1037/0022-006X.55.1.3 — seminal EIBI trial establishing DTT-based programming. Modern definition: Smith, T. (2001). *Focus on Autism, 16*(2), 86–92.
78
+ - **Dataset usage.** Default teaching method for early-learner (VB-MAPP Level 1–2) discrete skills. Output includes explicit SD, prompt hierarchy, stimulus array, error-correction, mastery criteria.
79
+
80
+ ### 1.2 Natural Environment Teaching (NET)
81
+ - **Operational definition.** Teaching that occurs within the learner's natural context, capitalizing on child-initiated motivation and ongoing activities. The teaching opportunity is embedded in the environment, not contrived.
82
+ - **When to use.** Generalization phase, functional communication, requesting (mand) skills, when motivation is the limiting factor, or when the skill's natural occasion is ecologically predictable (e.g., requesting food during snack time).
83
+ - **Canonical citation.** Hart, B. & Risley, T. R. (1975). *JABA, 8*(4), 411–420. https://doi.org/10.1901/jaba.1975.8-411 (origin of incidental teaching). Autism extension: McGee, G. G., Krantz, P. J., & McClannahan, L. E. (1985). *JABA, 18*(1), 17–31.
84
+ - **Dataset usage.** Default teaching method for mand targets and generalization-phase goals. Output emphasizes environmental setup / MO arrangement over contrived SD.
85
+
86
+ ### 1.3 Task Analysis / Chaining
87
+ - **Operational definition.** Decomposition of a complex behavior into sequential component steps, followed by teaching each step and chaining them together. Three chain types:
88
+ - **Forward chaining** — teach step 1 to mastery, then 1->2, then 1->2->3, etc.
89
+ - **Backward chaining** — complete all steps except the last; teach last; move backward.
90
+ - **Total-task presentation** — prompt through entire chain every trial; fade prompts across all steps.
91
+ - **When to use.** Multi-step skills with a natural sequence: toileting, handwashing, dressing, cooking, social routines.
92
+ - **Canonical citation.** CHH Ch. 23 ("Chaining"). Empirical comparison: Slocum, S. K. & Tiger, J. H. (2011). *JABA, 44*(4), 793–805. https://doi.org/10.1901/jaba.2011.44-793
93
+ - **Dataset usage.** Default for self-care (AFLS Basic Living), domestic, community, and vocational domains. Output structure replaces "SD" with "task analysis steps" (numbered list) + chain type + prompt strategy per step.
94
+
95
+ ### 1.4 Functional Communication Training (FCT)
96
+ - **Operational definition.** Procedure for teaching a communication response that serves the *same function* as a target problem behavior, paired with extinction of the problem behavior. Example: a child who tantrums to escape demands is taught to say/sign "break please," which is reinforced; the tantrum is placed on extinction.
97
+ - **When to use.** Any target behavior with a clear social function (escape, attention, tangible). First-line strategy before consequence-based interventions.
98
+ - **Canonical citation.** Carr, E. G. & Durand, V. M. (1985). *JABA, 18*(2), 111–126. https://doi.org/10.1901/jaba.1985.18-111 (seminal). Modern practical review: Tiger, J. H., Hanley, G. P., & Bruzek, J. (2008). *BAP, 1*(1), 16–23.
99
+ - **Dataset usage.** Default teaching method in behavior-intervention-adjacent recommendations (task 2 interpretation output when behavior data is present). Output ties communication response -> hypothesized function -> reinforcement schedule.
100
+
101
+ ### 1.5 Behavior Skills Training (BST)
102
+ - **Operational definition.** Staff/caregiver training procedure with four components in sequence: **(1) Instruction**, **(2) Modeling**, **(3) Rehearsal**, **(4) Feedback**, iterated until the trainee meets mastery criteria on a performance checklist.
103
+ - **When to use.** Training RBTs on a new program, training caregivers on a home procedure, preparing staff for a BIP rollout.
104
+ - **Canonical citation.** Parsons, M. B., Rollyson, J. H., & Reid, D. H. (2012). *BAP, 5*(2), 2–11. https://doi.org/10.1007/BF03391819
105
+ - **Dataset usage.** Used in a minority of task 1 outputs when the target is a staff-facing training program (rather than a learner-facing teaching program). Output structure emphasizes trainer steps and fidelity checklist.
106
+
107
+ ### 1.6 Pivotal Response Training (PRT)
108
+ - **Operational definition.** Naturalistic teaching targeting "pivotal" behaviors that produce widespread collateral gains: **motivation**, **self-initiation**, **responsivity to multiple cues**, **self-management**. Uses child choice, interspersal of mastered/acquisition targets, natural reinforcers, and reinforcement of attempts (not just correct responses).
109
+ - **When to use.** Language acquisition in autism, social initiation, generalization across contexts.
110
+ - **Canonical citation.** Koegel, R. L., O'Dell, M. C., & Koegel, L. K. (1987). *JADD, 17*(2), 187–200. https://doi.org/10.1007/BF01495055 (origin of NLP -> PRT). JABA-indexed: Laski, K. E., Charlop, M. H., & Schreibman, L. (1988). *JABA, 21*(4), 391–400.
111
+ - **Dataset usage.** Used for a minority of early-language targets in naturalistic conditions. Output emphasizes motivation arrangement, child-choice, attempt-reinforcement.
112
+
113
+ ### Method-selection heuristics (encoded as prompt guidance, not hard rules)
114
+
115
+ | Skill context | Default method | Alternative |
116
+ |---|---|---|
117
+ | Early discrete skills (tacts, receptive labels, matching) | DTT | PRT (if motivation is limiting) |
118
+ | Mand / requesting | NET | FCT (if replacing problem behavior) |
119
+ | Multi-step routines (dressing, handwashing) | Task Analysis | — |
120
+ | Problem-behavior reduction | FCT | — |
121
+ | Generalization phase | NET | PRT |
122
+ | Staff-facing training | BST | — |
123
+ | Social-initiation skills | PRT | NET |
124
+
125
+ ---
126
+
127
+ ## 2. Skill curriculum (Acceleration targets)
128
+
129
+ All skill domains in this section are **acceleration** targets — goals we want the learner to perform MORE often / more independently. See section 0.A.
130
+
131
+ Two overlapping curricula are encoded: **VB-MAPP** (early learners, 0–48 months developmental age) and **AFLS** (adolescents / adults and functional living). Together they cover the full learner age span TRACE should handle.
132
+
133
+ ### 2.1 VB-MAPP (Verbal Behavior Milestones Assessment and Placement Program)
134
+
135
+ **Primary citation.** Sundberg, M. L. (2008). *Verbal Behavior Milestones Assessment and Placement Program*. AVB Press. Validation study: Barnes, C. S., Mellor, J. R., & Rehfeldt, R. A. (2014). *Analysis of Verbal Behavior, 30*(1), 36–47. https://doi.org/10.1007/s40616-013-0004-5
136
+
137
+ **Developmental levels:**
138
+ - **Level 1** — 0–18 months typical developmental age
139
+ - **Level 2** — 18–30 months
140
+ - **Level 3** — 30–48 months
141
+
142
+ **16 skill domains**, each expanded to 5 representative targets per developmental level (Sundberg 2008):
143
+
144
+ **1. Mand** — requesting.
145
+ - L1: single-word mands for preferred items; mands for missing items needed to complete an activity; mands for actions; mands for help; mands using 2-word phrases.
146
+ - L2: mands for information using "what"; mands for information using "where"; mands using adjectives (big, little, more); mands for attention from peers; mands for others to stop an action.
147
+ - L3: mands for information using "why"; mands for information using "when"; mands using complete sentences with correct grammar; mands for future events or items not present; mands using polite social conventions.
148
+
149
+ **2. Tact** — labeling / naming.
150
+ - L1: common objects (ball, cup, shoe); familiar people by name; common actions (running, eating, sleeping); body parts (nose, eyes, mouth); common animals (dog, cat, bird).
151
+ - L2: colors of objects; shapes (circle, square, triangle); adjectives (big/little, hot/cold); prepositions (in, on, under); emotions in self and others.
152
+ - L3: community helpers and their roles; categories (fruits, vehicles, clothing); features of objects (color, shape, function); past-tense events; abstract concepts (same/different, first/last).
153
+
154
+ **3. Echoic** — vocal imitation.
155
+ - L1: single vowel sounds; single consonant-vowel combinations; 1–2 syllable words; animal sounds; familiar words on request.
156
+ - L2: 2-word phrases; 3-word phrases; blended consonants; novel words with correct articulation; short sentences.
157
+ - L3: multi-syllable words accurately; full sentences with correct prosody; unfamiliar words from context; phrases with correct intonation patterns; complex instructions verbatim.
158
+
159
+ **4. Listener Responding** — receptive language.
160
+ - L1: 1-step motor instructions (sit down, stand up); select correct item from an array of 2; point to named body parts; point to named common objects; follow instructions involving objects.
161
+ - L2: select correct item from an array of 4–6; follow 2-step instructions; select items by feature (find something red); select items by function (find something you eat with); select items by class (find an animal).
162
+ - L3: follow multi-step instructions with qualifiers; select items by multiple features simultaneously; follow conditional instructions (if X, then Y); respond to questions about stories read aloud; follow instructions involving temporal concepts (before, after).
163
+
164
+ **5. Visual Perceptual Skills & Matching-to-Sample (VP-MTS).**
165
+ - L1: match identical objects; match identical pictures; match colors (identical chips); match shapes (identical blocks); complete simple 3–4 piece puzzles.
166
+ - L2: match non-identical items by category; sort items into 2–3 categories; match quantities (1–5); match upper-case letters; complete 8–12 piece puzzles.
167
+ - L3: match associated items (sock-shoe, cup-plate); sort by multiple attributes simultaneously; match upper to lower case letters; sequence 3–4 step picture sequences; reproduce block designs from model.
168
+
169
+ **6. Motor Imitation.**
170
+ - L1: gross motor actions (clap hands, stomp feet); actions with objects (bang drum, push car); 2-step motor sequences; fine motor actions (pinch, point); facial movements (open mouth, stick out tongue).
171
+ - L2: 3-step motor sequences; novel motor actions on first attempt; actions in songs and finger plays; actions involving bilateral coordination; motor actions after a delay.
172
+ - L3: complex multi-step sequences; motor actions from video models; handwriting strokes and letter forms; craft and art activities; sports-related motor sequences.
173
+
174
+ **7. Independent Play.**
175
+ - L1: cause-and-effect toys independently; sensory toys for sustained periods; construction toys (blocks, Duplo); vehicles (pushing, rolling); electronic learning toys.
176
+ - L2: simple pretend play sequences; art materials (coloring, playdough); age-appropriate puzzles independently; books independently; simple board or card games.
177
+ - L3: elaborate pretend play with storylines; creative construction activities; rule-based games independently; independently select and transition between activities; hobby / special-interest activities productively.
178
+
179
+ **8. Social Behavior & Social Play.**
180
+ - L1: eye contact during interactions; responds to own name; social games (peek-a-boo, tickle); shows items to others spontaneously; tolerates proximity of peers during parallel play.
181
+ - L2: initiates social interactions with peers; takes turns during structured activities; shares materials when prompted; cooperative play with one peer; responds to peer initiations appropriately.
182
+ - L3: maintains reciprocal conversations with peers; demonstrates empathy and perspective-taking; negotiates and compromises during group activities; joins ongoing peer activities appropriately; maintains friendships over time.
183
+
184
+ **9. Intraverbal** — fill-ins, WH answers without visual prompt.
185
+ - L1: fills in words in songs and nursery rhymes; answers "what's your name"; fills in missing words in familiar phrases; answers simple what-questions about visible items; completes verbal routines (ready, set, ___).
186
+ - L2: answers WH-questions about familiar topics; describes function of common objects; names items in categories when given the category; answers social questions (How are you?); describes recent events in sequence.
187
+ - L3: answers why- and how-questions; provides definitions of words; answers hypothetical questions; engages in multi-turn conversations on a topic; makes inferences from given information.
188
+
189
+ **10. LRFFC** — Listener Responding by Feature, Function, Class.
190
+ - L1: select items by function (What do you drink from?); select items by feature (What is round?); select items by class (Find the animal); select by single function from array of 3; select by single visible feature from array of 3.
191
+ - L2: select items by function from array of 5–8; select items by feature from array of 5–8; select items by class from array of 5–8; select items by multiple features (round and red); select items by function when item not visible.
192
+ - L3: select items given 2+ features/functions simultaneously; select items by class with exclusion (animal but not a pet); select items by abstract features (something needed when cold); select items by comparison (which is heavier); select items by negative features (not round, not food).
193
+
194
+ **11. Reading.**
195
+ - L1: match letters to identical letters; identify own name in print; identify 5–10 upper-case letters; match words to identical words; track print left to right.
196
+ - L2: identify all upper-case letters; identify all lower-case letters; read 10–20 sight words; sound out CVC words (cat, dog, run); read simple 2–3 word phrases.
197
+ - L3: read sentences with comprehension; read short passages and answer questions; phonetic decoding for novel words; read and follow written instructions; read grade-level text with fluency.
198
+
199
+ **12. Writing.**
200
+ - L1: trace horizontal and vertical lines; trace basic shapes (circle, cross); copy basic shapes from model; trace letters of own name; write own name from model.
201
+ - L2: write own name independently; copy all upper-case letters from model; write upper-case letters from dictation; copy simple words from model; write numbers 1–10.
202
+ - L3: write words from dictation; write simple sentences with spacing; write answers to questions; write short compositions (3–5 sentences); use basic punctuation and capitalization.
203
+
204
+ **13. Math.**
205
+ - L1: rote count to 10; count objects 1–5 with 1:1 correspondence; identify numerals 1–5; match quantities to numerals 1–5; identify basic shapes in math context.
206
+ - L2: count objects 1–20 with 1:1 correspondence; identify numerals 1–20; compare quantities (more/less/same); solve single-digit addition with manipulatives; identify coins by name.
207
+ - L3: add single-digit numbers without manipulatives; subtract single-digit numbers; identify place value (ones, tens); tell time to the hour and half-hour; solve simple word problems.
208
+
209
+ **14. Group & Classroom Skills.**
210
+ - L1: sits in designated area for 2–3 minutes; attends to teacher during group instruction; follows group instructions (everyone stand up); transitions between activities with prompts; waits in line briefly.
211
+ - L2: sits in group for 10–15 minutes; raises hand to request or respond; follows classroom routines independently; works independently at desk for 5–10 minutes; transitions between activities independently.
212
+ - L3: participates in group discussions; follows multi-step classroom instructions; works independently for 15+ minutes; self-monitors behavior using checklist; completes and turns in assignments independently.
213
+
214
+ **15. Linguistic Structure** — grammar.
215
+ - L1: single words to communicate; combines 2 words (agent + action or action + object); basic noun-verb combinations; simple negation (no, not); basic pronouns (I, me, my).
216
+ - L2: 3–4 word sentences; regular plurals (-s); present progressive (-ing); prepositions in sentences; regular past tense (-ed).
217
+ - L3: complex sentences with conjunctions; irregular past tense correctly; pronouns with correct referents; questions with correct word order; passive voice and embedded clauses.
218
+
219
+ **16. Spontaneous Vocal Behavior.**
220
+ - L1: spontaneous vocalizations during preferred activities; spontaneous naming of items; spontaneous requests without prompts; spontaneous greetings to familiar people; spontaneous comments on events.
221
+ - L2: spontaneous descriptions of ongoing activities; spontaneous questions about the environment; spontaneous reporting of past events; spontaneous use of social phrases; spontaneous initiation of conversation with peers.
222
+ - L3: spontaneous storytelling / narratives; spontaneous relevant comments in conversations; spontaneous adjustment of language to different listeners; spontaneous use of humor appropriately; spontaneous provision of explanations and reasons.
223
+
224
+ ### 2.2 AFLS (Assessment of Functional Living Skills)
225
+
226
+ **Primary citation.** Partington, J. W., & Mueller, M. M. (2012). *Assessment of Functional Living Skills*. Behavior Analysts, Inc. / Stimulus Publications.
227
+
228
+ **Developmental scope.** Late-childhood through adult; skills are functional rather than milestone-bound.
229
+
230
+ **6 skill modules** (we encode a representative subset of each):
231
+
232
+ 1. **Basic Living Skills** — toileting, eating, dressing, hygiene, bathing (chaining-intensive).
233
+ 2. **Home Skills** — meal preparation, cleaning, laundry, bed-making.
234
+ 3. **Community Participation Skills** — safety signs, money handling, transportation, shopping, restaurants.
235
+ 4. **School Skills** — classroom routines, assignments, social-academic interaction.
236
+ 5. **Vocational Skills** — task completion, workplace etiquette, punctuality.
237
+ 6. **Independent Living Skills** — budgeting, appointments, medication, self-advocacy.
238
+
239
+ **Dataset usage.** AFLS targets appear in task 1 examples for older learners (labeled "Adolescent" or "Adult" learner profile) and in task 2 session logs for those learner profiles. Teaching method defaults to **Task Analysis / Chaining**.
240
+
241
+ ### 2.3 Learner-profile age bands
242
+
243
+ | Profile label | Age band | Curricula |
244
+ |---|---|---|
245
+ | Early Learner | developmental age 0–48 mo | VB-MAPP |
246
+ | School-Age Learner | 6–12 yr chronological, varying developmental | VB-MAPP L2–3 + AFLS School |
247
+ | Adolescent Learner | 13–17 yr chronological, moderate-severe support needs | AFLS + VB-MAPP L3 for residual gaps |
248
+ | Adult Learner | 18+ yr in residential / day program | AFLS + Community + Vocational |
249
+
250
+ **Dataset usage.** Learner-profile label is a sampling dimension.
251
+
252
+ **On distribution.** No research source prescribes an ABA-population age-band distribution. The pipeline config sets a balanced default across the four profiles (rather than skewing toward any band) so the model trains on the full lifespan. Real-world caseload distributions vary by facility type (early-intervention centers skew Early; residential programs skew Adult). The exact ratio is a *pipeline hyperparameter*, not a clinical truth.
253
+
254
+ ### 2.4 Gaps explicitly not covered in v1
255
+
256
+ - **Essential for Living** (McGreevy, Fry, & Cornwall 2014) — skills for adults with severe support needs. Partially covered by AFLS; full EFL integration is not included in v1.
257
+ - **ABLLS-R** (Partington 2010) — heavily overlaps VB-MAPP for early learners; not separately encoded.
258
+ - **PEAK** (Dixon 2014+) — relational frame / advanced cognition; out of scope for baseline dataset.
259
+
260
+ ---
261
+
262
+ ## 3. Mastery states
263
+
264
+ Seven-state taxonomy describing where a skill is in its learning trajectory. Values are slot values in task 1 user prompts and targets in task 2 interpretation.
265
+
266
+ | State | Operational criterion | Clinical meaning |
267
+ |---|---|---|
268
+ | **Emerging** | < 30% accuracy across recent sessions | Skill not yet acquired; high prompt levels needed |
269
+ | **Developing** | 30–50% accuracy, inconsistent | Responding established; stimulus control unstable |
270
+ | **Approaching mastery** | 50–70% accuracy, prompt fading in progress | Moving toward independence |
271
+ | **Near mastery** | 70–85% accuracy, occasional errors | Stimulus control solid, complex stimuli still variable |
272
+ | **Mastered (current level)** | ≥ 85% accuracy across 3 consecutive sessions | Meets mastery criterion for current step |
273
+ | **Generalization phase** | Mastered in training but not in novel settings / with novel materials / across therapists | Ready for NET-style generalization probes |
274
+ | **Maintenance** | Previously mastered; periodic probes to check retention | Maintenance schedule (weekly->monthly) |
275
+
276
+ **Citation.** Mastery-criterion conventions follow CHH Ch. 26 ("Generalization and Maintenance of Behavior Change") and the programmatic mastery-criteria convention in CHH Ch. 28.
277
+
278
+ **Dataset usage.** Mastery state is a primary slot in every task 1 prompt. In task 2, the narrative "a previously mastered skill has regressed" vs "a developing skill has plateaued" is the basis for pattern detection (section 7).
279
+
280
+ ---
281
+
282
+ ## 4. Behavior functions
283
+
284
+ The **four-function taxonomy** of problem behavior is the spine of all behavior reasoning in ABA. Iwata's original functional analysis identified three (attention, escape, sensory); Hanley, Iwata & McCord formalized the fourth (tangible) as standard.
285
+
286
+ | Function | Reinforcer | Example context | Hypothesis test |
287
+ |---|---|---|---|
288
+ | **Escape** (negative reinforcement) | Termination / avoidance of an aversive stimulus (demand, task, interaction) | Child hits when presented with math worksheet -> demand is removed | Elevated rate in demand condition vs control |
289
+ | **Attention** (positive reinforcement, social) | Attention from others (adult or peer) | Child yells when parent on phone -> parent attends | Elevated rate in attention condition vs control |
290
+ | **Tangible** (positive reinforcement, material) | Access to an item or activity | Child tantrums when iPad is taken away -> iPad is returned | Elevated rate when preferred item is restricted |
291
+ | **Automatic** (non-social / sensory) | Self-produced reinforcement (proprioceptive, auditory, visual, gustatory) | Repetitive hand-flapping persists during alone condition | Elevated rate in alone condition; persists across conditions |
292
+
293
+ **Canonical citations.**
294
+ - Iwata, B. A., Dorsey, M. F., Slifer, K. J., Bauman, K. E., & Richman, G. S. (1982/1994). *JABA, 27*(2), 197–209. https://doi.org/10.1901/jaba.1994.27-197 — THE foundational functional-analysis paper.
295
+ - Hanley, G. P., Iwata, B. A., & McCord, B. E. (2003). *JABA, 36*(2), 147–185. https://doi.org/10.1901/jaba.2003.36-147 — definitive review; formalizes four-function taxonomy including tangible.
296
+ - Pre-FA conceptual roots: Carr, E. G. (1977). *Psychological Bulletin, 84*(4), 800–816.
297
+ - CHH Ch. 27 ("Functional Behavior Assessment").
298
+
299
+ **Clinical principle.** Same topography can serve different functions across individuals. A "tantrum" in one client may be escape-maintained; in another, attention-maintained. **Interventions must match function**, not topography.
300
+
301
+ **Dataset usage.**
302
+ - Task 2 interpretation output includes **Function Hypothesis** field when behavior data is present — one of the four functions or "Not Applicable" (skill-only sessions).
303
+ - When present, the hypothesis cites the evidence in the session log (e.g., "elevated rate during demand presentation -> escape-maintained").
304
+
305
+ ---
306
+
307
+ ## 5. Target behaviors — Deceleration targets (with operational definitions)
308
+
309
+ All behaviors in this section are **deceleration** targets — behaviors we want to reduce. See section 0.A.
310
+
311
+ Eighteen challenging behaviors with JABA-grounded operational definitions suitable for staff scoring. Behaviors without a canonical operational definition in the literature (e.g., disrobing, grabbing/snatching) are deliberately excluded from v1 rather than invented.
312
+
313
+ | # | Behavior | Operational definition | Citation |
314
+ |---|---|---|---|
315
+ | 1 | **Self-injurious behavior (SIB)** | Any response that produces tissue damage or has the potential to produce it, including head-hitting, self-biting, face-slapping, head-banging against objects, skin-picking, self-pinching, hair-pulling directed at self. | Iwata et al. 1982/1994 |
316
+ | 2 | **Head-directed SIB** (subtype) | Contact between the individual's hand and head, OR between the head and a stationary object, with audible impact. | Iwata et al. 1982/1994 |
317
+ | 3 | **Aggression** | Attempted or completed forceful contact directed toward another person: hitting, kicking, biting, hair-pulling, scratching, pinching, or throwing objects AT another person. | Marcus, B. A. et al. (2001). *Behavior Modification, 25*(2), 189–213. https://doi.org/10.1177/0145445501252002 |
318
+ | 4 | **Property destruction** | Hitting or kicking furniture/walls; throwing objects not intended to be thrown; tearing clothing, books, or materials; swiping items off surfaces; overturning furniture. | Piazza, C. C., Bowman, L. G., Contrucci, S. A., et al. (1999). *JABA, 32*(4), 437–449. https://doi.org/10.1901/jaba.1999.32-437 |
319
+ | 5 | **Elopement** | Full body (or pre-specified anatomical threshold) crossing a designated boundary (e.g., classroom door, yard fence) without adult approval. | Kodak, T., Grow, L., & Northup, J. (2004). *JABA, 37*(2), 229–232. https://doi.org/10.1901/jaba.2004.37-229; Lang, R. et al. (2010). *JABA, 43*(1), 113–118. |
320
+ | 6 | **Pica** | Placement of any inedible (non-food) item past the plane of the lips, including mouthing and ingestion of objects (paper, dirt, cigarette butts, fabric, small toys, hair). | Piazza, C. C., Fisher, W. W., Hanley, G. P., et al. (1998). *JABA, 31*(2), 165–189. https://doi.org/10.1901/jaba.1998.31-165 |
321
+ | 7 | **Motor stereotypy** | Repetitive, non-functional motor movements (hand-flapping, body-rocking, finger-flicking, spinning) occurring independent of social context and serving no apparent instrumental purpose. | Rapp, J. T. & Vollmer, T. R. (2005). *RIDD, 26*(6), 527–547. https://doi.org/10.1016/j.ridd.2004.11.005 |
322
+ | 8 | **Vocal stereotypy** | Non-contextual, non-communicative vocalization: repetitive sounds, words, phrases, humming, or echolalia outside appropriate conversational context. | Ahearn, W. H., Clark, K. M., MacDonald, R. P. F., & Chung, B. I. (2007). *JABA, 40*(2), 263–275. https://doi.org/10.1901/jaba.2007.30-06 |
323
+ | 9 | **Tantrum** | Co-occurring cluster of two or more of: crying/screaming, dropping to floor, kicking, hitting, throwing objects, lasting ≥ 3 s. Scored from onset of first component topography to 5 s without any component behavior. | Kurtz, P. F. et al. (2003). *JABA, 36*(2), 205–219. https://doi.org/10.1901/jaba.2003.36-205; topography adapted from Carr & Durand 1985. |
324
+ | 10 | **Non-compliance** | Failure to initiate a requested response within 5 s of an instructional prompt (vocal, model, or physical), OR active refusal (verbal "no," walking away). | Wilder, D. A. et al. (2012). *JABA, 45*(1), 121–126. https://doi.org/10.1901/jaba.2012.45-121 |
325
+ | 11 | **Mouthing (non-pica)** | Placement of the hand, fingers, saliva, or non-food object into the mouth past the lip plane, excluding eating/drinking during scheduled meals. | Piazza, C. C., Hanley, G. P., & Fisher, W. W. (1996). *JABA, 29*(4), 437–449. https://doi.org/10.1901/jaba.1996.29-437 |
326
+ | 12 | **Throwing** | Propelling an object through the air with force (excluding task-relevant throwing such as during a game). | Property destruction literature, specifically Piazza et al. 1999 — throwing is often coded as a subtype. |
327
+ | 13 | **Food refusal / selectivity** | Rejection of presented food by turning the head, closing the mouth, batting the utensil away, or expelling food once placed in the mouth. | Piazza, C. C. et al. (2003). *JABA, 36*(2), 187–204 — feeding-disorder literature. |
328
+ | 14 | **Rumination / regurgitation** | Voluntary regurgitation of previously ingested food into the mouth, followed by re-chewing, re-swallowing, or expulsion. Scored per regurgitation episode. | Lyons, E. A., Rue, H. C., Luiselli, J. K., & DiGennaro, F. D. (2007). *JABA, 40*(4), 743–747. https://doi.org/10.1901/jaba.2007.743-747. See also Kahng et al. 2003. |
329
+ | 15 | **Sleep resistance / disruption** | Non-compliance with bedtime routine (refusing to go to bed, leaving bed, repeated requests) OR night waking ≥ 5 min at a time. Scored as instances or total disrupted minutes per night. | Friman, P. C., Hoff, K. E., Schnoes, C., Freeman, K. A., Woods, D. W., & Blum, N. (1999). *JABA, 32*(4), 505–508. https://doi.org/10.1901/jaba.1999.32-505 |
330
+ | 16 | **Verbal aggression / threats** | Yelling, cursing, name-calling, or making verbal threats directed toward another person (e.g., "I'll hit you," "I hate you"). | Kelley, M. E., Lerman, D. C., & Van Camp, C. M. (2002). *JABA, 35*(1), 59–63. See also Hagopian & Boelter 2005. |
331
+ | 17 | **Food stealing** | Taking food items not offered to the individual — from another person's plate, a storage area, or during restricted-access periods — and/or placing such items in the mouth outside designated meal/snack times. | Maglieri, K. A., DeLeon, I. G., Rodriguez-Catter, V., & Sevin, B. M. (2000). Adjunctive delivery of noncontingent reinforcement to treat food stealing during sessions of DRO. *JABA, 33*(4), 615–618. https://doi.org/10.1901/jaba.2000.33-615 |
332
+ | 18 | **Inappropriate sexual behavior (ISB)** | Engagement in sexual self-stimulation (genital contact, rhythmic self-stimulatory movements) in public or semi-public contexts where such behavior is socially or institutionally inappropriate. Scored per episode with onset/offset criteria specified in the program. | Davis, T. N., Machalicek, W., Scalzo, R., Kobylecky, A., Campbell, V., Pinkelman, S., Chan, J., & Sigafoos, J. (2016). A review and treatment selection model for individuals with developmental disabilities who engage in inappropriate sexual behavior. *BAP, 9*(4), 389–402. https://doi.org/10.1007/s40617-016-0149-5. See also Fyffe, C. E., Kahng, S., Fittro, E., & Russell, D. (2004). *JABA, 37*(3), 401–404. |
333
+
334
+ **Dataset usage.**
335
+ - Task 2 session logs include 0–3 target behaviors per log. Sampling weight biased toward the common behaviors (tantrum, non-compliance, aggression, stereotypy, elopement).
336
+ - Each target behavior in a log has an associated function hypothesis (section 4) that drives the interpretation output.
337
+
338
+ ---
339
+
340
+ ## 6. Measurement types
341
+
342
+ Real session logs mix measurement types; task 2 inputs must reflect this heterogeneity.
343
+
344
+ | Type | Operational definition | Use case | Citation |
345
+ |---|---|---|---|
346
+ | **Frequency** (event recording) | Raw count of occurrences of a discrete, quickly-completing behavior during a session. "12 requests this session." | Discrete, countable behaviors with clear onset/offset. | CHH Ch. 4 |
347
+ | **Rate** | Frequency normalized to time: count ÷ session duration. "0.4 requests per minute." | Compare sessions of different durations. | CHH Ch. 4 |
348
+ | **Duration** | Total elapsed time a behavior lasts (sum across instances). "Tantrum total duration: 8 min." | Behaviors with meaningful length (tantrums, on-task engagement, stereotypy). | CHH Ch. 4 |
349
+ | **Latency** | Time from SD to initiation of response. "Mean latency: 3.2 s." | Responsivity / prompt dependency. | CHH Ch. 4 |
350
+ | **Partial-interval recording (PIR)** | Divide session into intervals (e.g., 10 s); mark each interval if behavior occurred *at any point*. Overestimates prevalence. | Hard-to-count behaviors (stereotypy). | Powell, Martindale & Kulp 1975; Harrop & Daniels 1986 |
351
+ | **Whole-interval recording** | Mark interval only if behavior occurred for the *entire* interval. Underestimates. | Behaviors to increase (on-task engagement). | Cooper et al. Ch. 4 |
352
+ | **Momentary time sampling (MTS)** | Check once at the end of each interval whether behavior is occurring. Efficient for groups. Approximates duration. | Multi-client settings; duration estimation. | Powell et al. 1975 |
353
+
354
+ **Inter-Observer Agreement (IOA).** Two observers independently record the same session; agreement expressed as percent or κ. **Minimum acceptable: ≥ 80%** across 33% of sessions for scientific validity.
355
+ - Citation: CHH Ch. 5 ("Improving and Assessing the Quality of Behavioral Measurement"); Kazdin, A. E. (1977). *JABA, 10*(1), 141–150. https://doi.org/10.1901/jaba.1977.10-141
356
+
357
+ **Dataset usage.**
358
+ - Every task 2 session log includes at least one **Primary Measurement Type** per target skill or behavior.
359
+ - ~25% of logs include an **IOA subset** (percentage agreement reported) to test the model's ability to interpret reliability.
360
+ - Accuracy % (trial-based correct/total) is the dominant measurement for skill-acquisition programs — this stays.
361
+
362
+ ### 6.1 Antecedent-Behavior-Consequence (ABC) data
363
+
364
+ ABC recording supplements behavior frequency counts with the immediate environmental context around each occurrence. A subset of task 2 session logs (~30%) include ABC entries for target behaviors, enabling function-hypothesis reasoning beyond what frequency data alone supports.
365
+
366
+ - **Antecedent** — what was happening immediately before (≤ 10 s) the behavior occurred.
367
+ - **Behavior** — the target behavior as operationally defined (section 5).
368
+ - **Consequence** — what happened immediately after (≤ 10 s), including any staff response.
369
+
370
+ **Citation.** Bijou, S. W., Peterson, R. F., & Ault, M. H. (1968). A method to integrate descriptive and experimental field studies at the level of data and empirical concepts. *JABA, 1*(2), 175–191. https://doi.org/10.1901/jaba.1968.1-175. See also CHH Ch. 27 ("Functional Behavior Assessment").
371
+
372
+ The remaining ~70% of logs include frequency-only behavior data, giving the model exposure to both enriched and minimal log formats.
373
+
374
+ ### 6.2 Program execution context
375
+
376
+ Each acceleration and deceleration program is associated with one or more settings in which it runs:
377
+
378
+ | Context tag | Setting |
379
+ |---|---|
380
+ | D | Day (school or day habilitation) |
381
+ | R | Residential (home, group home, dormitory) |
382
+ | Both | Runs across settings |
383
+
384
+ **Dataset usage.** Each synthetic task 1 program and task 2 session log carries a context tag. Default sampling ~40% D, ~30% R, ~30% Both. No research source prescribes exact proportions; this is a pipeline hyperparameter.
385
+
386
+ ---
387
+
388
+ ## 7. Session patterns (for Task 2)
389
+
390
+ Behavioral session interpretation requires recognizing **patterns across sessions**. Fourteen clinically meaningful patterns are encoded.
391
+
392
+ | # | Pattern | Signature | Concern level | Extends prepare_data.py? |
393
+ |---|---|---|---|---|
394
+ | 1 | **Mastery progression** | Ascending accuracy, mastery criterion within reach | None | Yes |
395
+ | 2 | **Regression** | Previously mastered skill shows declining accuracy | High | Yes |
396
+ | 3 | **Plateau** | Flat accuracy below mastery for ≥ 5 sessions | Moderate | Yes |
397
+ | 4 | **Frustration pattern** | Declining accuracy + escape-function behavioral indicators | High | Yes |
398
+ | 5 | **Variable performance** | High session-to-session SD, no clear trend | Moderate | Yes |
399
+ | 6 | **Prompt dependency** | High prompted accuracy, low independent accuracy, prolonged | Moderate | Yes |
400
+ | 7 | **Rapid acquisition** | Accelerated mastery beyond expected timeline | None (good) | Yes |
401
+ | 8 | **Generalization failure** | Mastered in training, low accuracy in novel settings/stimuli | Moderate | Yes |
402
+ | 9 | **Extinction burst** | Temporary spike in problem behavior during reduction procedure | Expected | Yes |
403
+ | 10 | **Skill loss after break** | Performance drop after absence, recovering | Moderate | Yes |
404
+ | 11 | **Motivating operation shift** | Responding drops when MO changes (e.g., satiation of reinforcer); recovers when MO restored | Moderate | No — new in v1, grounded in Michael 1993 / CHH Ch.16 |
405
+ | 12 | **Setting event trigger** | Accuracy or behavior changes correlated with an external setting event (illness, sleep disruption, schedule change) | Moderate | No — new in v1, grounded in Smith & Iwata 1997 / Bijou & Baer 1961 |
406
+
407
+ **Citations.**
408
+ - Patterns 1–10 draw from standard single-case-design interpretation literature (CHH Ch. 6, 7). Specific ones:
409
+ - Regression / skill loss: CHH Ch. 26 (maintenance).
410
+ - Extinction burst: CHH Ch. 24.
411
+ - Prompt dependency: Time-delay literature (Touchette, Touchette 1971; CHH Ch. 21 prompting).
412
+ - Generalization failure: Stokes & Baer 1977 "An implicit technology of generalization" *JABA, 10*(2); CHH Ch. 26.
413
+ - Patterns 11–12 are TRACE additions with specific literature grounding:
414
+ - MO shift: CHH Ch. 16 ("Motivating Operations"); Michael, J. (1993). *Journal of the Experimental Analysis of Behavior, 59*(3), 533–552.
415
+ - Setting event: Bijou & Baer 1961; Smith, R. G. & Iwata, B. A. (1997). *JABA, 30*(2), 343–375.
416
+
417
+ **Honest framing.** This 12-pattern taxonomy is our *operationalization* for the dataset — no single paper proposes these as a canonical pattern set. Each underlying concept is well-grounded (citations above); the clustering into 12 discrete labels is a design choice that makes session interpretation a tractable classification task.
418
+
419
+ **Dataset usage.** Each task 2 example is generated from one of these 12 patterns as its hidden label. The interpretation output includes the pattern classification as a structured field (section 12).
420
+
421
+ ---
422
+
423
+ ## 8. Prompt hierarchies
424
+
425
+ Six prompt-fading strategies used in DTT and related methods.
426
+
427
+ | # | Strategy | Sequence | When to use |
428
+ |---|---|---|---|
429
+ | 1 | **Most-to-Least (errorless)** | Full physical -> partial physical -> gestural -> positional -> independent | Acquisition, early learner, safety-critical skills |
430
+ | 2 | **Least-to-Most** | Independent -> gestural -> positional -> partial physical -> full physical | Learner already has partial repertoire; promotes independence |
431
+ | 3 | **Graduated Guidance** | Hand-over-hand with fading pressure -> shadow -> independent | Motor skills, self-care chaining |
432
+ | 4 | **Time Delay** (progressive) | 0 s delay -> 2 s -> 4 s -> 6 s -> learner responds independently | Prompt-dependency prevention; ideal for fading |
433
+ | 5 | **Stimulus Fading** | Exaggerated stimulus -> gradually reduce salience -> natural stimulus | Receptive discrimination, early reading |
434
+ | 6 | **Stimulus Shaping** | Modified stimulus -> gradually reshape to target -> natural stimulus | Complex visual discriminations |
435
+
436
+ **Citations.** CHH Ch. 21 ("Imitation, Shaping, and Prompting") and Ch. 17 ("Stimulus Control"). Time delay: Touchette & Howard 1984 *JABA*.
437
+
438
+ **Dataset usage.** One of the six sampled per task 1 DTT / Task-Analysis example. Output describes the sequence and current prompt level based on mastery state (section 3).
439
+
440
+ ---
441
+
442
+ ## 9. Reinforcement schedules
443
+
444
+ Seven reinforcement arrangements — intentional kept to the commonly-used set for cleanliness.
445
+
446
+ | # | Schedule | Description | Typical use |
447
+ |---|---|---|---|
448
+ | 1 | **CRF** (continuous reinforcement) | Every correct response reinforced | Acquisition phase |
449
+ | 2 | **FR-2** (fixed ratio 2) | Every 2nd correct response reinforced | Thinning as accuracy stabilizes |
450
+ | 3 | **VR-3** (variable ratio 3) | Average of every 3rd correct response | Maintenance, resistance to extinction |
451
+ | 4 | **DRO** (differential reinforcement of other behavior) | Reinforce any behavior *other than* the problem behavior during an interval | Behavior reduction |
452
+ | 5 | **DRA** (differential reinforcement of alternative) | Reinforce a specific alternative behavior, extinguish problem behavior | FCT pairing |
453
+ | 6 | **DRI** (differential reinforcement of incompatible) | Reinforce a behavior physically incompatible with the problem behavior | Stereotypy reduction |
454
+ | 7 | **Token economy** | Tokens delivered per correct response, exchanged later for a back-up reinforcer | Group / classroom settings |
455
+
456
+ **Citations.** CHH Ch. 13 ("Schedules of Reinforcement"), Ch. 22 ("Differential Reinforcement").
457
+
458
+ **Dataset usage.** One schedule sampled per task 1 example. Task 2 recommendations may suggest schedule changes (e.g., "thin from CRF to FR-2 as accuracy exceeds 85%").
459
+
460
+ ---
461
+
462
+ ## 10. Error correction procedures
463
+
464
+ Five error-correction procedures used in DTT and related formats.
465
+
466
+ | # | Procedure | Steps |
467
+ |---|---|---|
468
+ | 1 | **Transfer trial** | Represent SD -> prompt at effective level -> reinforce prompted response -> distractor trial -> re-present SD independently |
469
+ | 2 | **4-step** | Model -> lead (do together) -> test (independent) -> distractor -> retest |
470
+ | 3 | **Backstep** | Return to previous prompt level that produced success -> successful response -> re-attempt at target prompt level |
471
+ | 4 | **Simple correction** | "No, watch me" -> model -> re-present SD -> differential reinforcement |
472
+ | 5 | **Error-free** | Prevent errors by using high prompts from start; fade gradually (paired with most-to-least) |
473
+
474
+ **Citations.** CHH Ch. 21. Specific: Heward, W. L. (1994) error-correction comparison.
475
+
476
+ **Dataset usage.** One procedure sampled per task 1 DTT example.
477
+
478
+ ---
479
+
480
+ ## 11. Mastery criteria options + BIP components
481
+
482
+ ### 11.1 Mastery criteria
483
+
484
+ Seven mastery criteria conventions common in ABA practice.
485
+
486
+ 1. 80% accuracy across 2 consecutive sessions with ≥ 10 trials each.
487
+ 2. 90% accuracy across 3 consecutive sessions.
488
+ 3. 80% accuracy with 2 different therapists and 2 different settings (generalization-inclusive).
489
+ 4. 80% accuracy at independent prompt level across 3 sessions.
490
+ 5. 90% first-trial-correct across 5 consecutive sessions.
491
+ 6. Fluency criterion: correct responses within 3-second latency at 90% accuracy.
492
+ 7. Demonstrated use in natural environment × 3 independent instances.
493
+
494
+ **Citation.** CHH Ch. 28 ("Developing Behavior-Change Programs").
495
+
496
+ ### 11.2 BIP components (for task 2 recommendations)
497
+
498
+ When a target behavior is present in a session log, the task 2 recommendation field is structured along four BIP dimensions.
499
+
500
+ | Component | Purpose | Example content |
501
+ |---|---|---|
502
+ | **Antecedent strategies** | Prevent the behavior before it occurs by modifying the environment | Shorten tasks; offer choice; pre-teach coping; non-contingent attention; reduce demand difficulty; provide warnings before transitions |
503
+ | **Replacement behavior (teach)** | A skill that produces the same function more appropriately; pair with FCT | Teach "break please" for escape-function tantrums; teach "look at me" for attention-function disruption |
504
+ | **Consequence strategies** | Staff response after the behavior | Do not reinforce (extinction); redirection; blocking; DRA/DRI; differential reinforcement of replacement behavior |
505
+ | **Crisis plan** | Safety procedure if behavior escalates | Staff call-tree; environmental safety (clear hazards); agreed restraint/escape procedures per facility policy; post-incident debrief |
506
+
507
+ **Citations.**
508
+ - Antecedent strategies: CHH Ch. 25 ("Antecedent Interventions"); Smith, R. G. & Iwata, B. A. (1997). *JABA, 30*(2), 343–375.
509
+ - Replacement behaviors: Carr & Durand 1985 (FCT).
510
+ - Consequence strategies: CHH Ch. 22 (differential reinforcement), Ch. 24 (extinction).
511
+ - Crisis plan: no canonical JABA paper — cite BACB 5th ed. Task List G-15 / H-8 and the BACB *Ethics Code for Behavior Analysts* (2020). Flagged in section 15.
512
+
513
+ ---
514
+
515
+ ## 12. Escalation level (structured output for Task 2)
516
+
517
+ Every task 2 interpretation emits an **escalation level** as a classification label. This is the safety-critical head.
518
+
519
+ | Level | Meaning | Trigger examples |
520
+ |---|---|---|
521
+ | **1. Continue monitoring** | Current programming is effective; no changes | Mastery progression; rapid acquisition |
522
+ | **2. Adjust next session** | Implement a specific programming change at next session | Plateau -> change prompt strategy; prompt dependency -> introduce time delay |
523
+ | **3. Supervisor review** | Elevate to supervising BCBA within 24–48 hr | Regression; new emergence of problem behavior; unclear function |
524
+ | **4. Safety — immediate** | Stop program; contact supervisor and/or family immediately; consider crisis-plan activation | SIB escalation; aggression resulting in injury; high-rate elopement; extinction burst exceeding safety threshold |
525
+
526
+ **Clinical principle.** Escalation level is **ordinal** — mistaking level 4 for level 1 is catastrophically worse than mistaking level 2 for level 1. Evaluation uses quadratic-weighted κ (section 13) to penalize distant mistakes accordingly.
527
+
528
+ **Dataset usage.** Every task 2 example has a gold-standard escalation level derived deterministically from the pattern (section 7), the target behaviors (section 5), and severity markers.
529
+
530
+ **On distribution.** No research source prescribes a specific class balance for escalation labels. The pipeline config (not this taxonomy) sets a default that keeps the minority class (level 4 "safety-immediate") present for training representation while reflecting that most sessions do not require immediate safety action. The exact ratio is a *pipeline hyperparameter*, not a clinical truth; see `configs/data-generation.yaml` once implemented.
531
+
532
+ **Framing as our contribution.** No canonical source specifies this 4-level escalation ordinal. It is TRACE's operationalization, designed to (a) make safety a first-class output and (b) support quadratic-weighted κ evaluation. We cite it as a project design choice, not a clinical taxonomy.
533
+
534
+ ---
535
+
536
+ ## 13. Confidence level (structured output for Task 2)
537
+
538
+ Three-level confidence expression acknowledging uncertainty.
539
+
540
+ | Level | Meaning | Example justifications |
541
+ |---|---|---|
542
+ | **High** | Pattern and recommendation well-supported by the data in this log | Multiple sessions of clear data; IOA present and ≥ 80%; single clear pattern |
543
+ | **Moderate** | Data supports the hypothesis but alternatives cannot be fully ruled out | No IOA; few sessions; multiple partially-matching patterns |
544
+ | **Low** | Insufficient data to rule in a specific pattern; recommend data collection before programming changes | < 3 sessions; no IOA; high variability; contradictory signals |
545
+
546
+ **Citations.** Calibrated uncertainty expression follows general clinical-LM principles (Med-PaLM 2, Singhal et al. 2025). No single ABA-specific source; flagged as project innovation.
547
+
548
+ **Dataset usage.** Confidence is sampled based on objective data-quality features of the log (number of sessions, IOA presence, variance). Evaluation computes calibration metrics (ECE, Brier — section 13 of `literature-foundation.md`).
549
+
550
+ ---
551
+
552
+ ## 14. Coverage matrix (dimension interactions)
553
+
554
+ Not every combination is valid. The pipeline enforces these constraints:
555
+
556
+ | Teaching method | Compatible skill domains | Compatible learner profiles |
557
+ |---|---|---|
558
+ | DTT | All discrete VB-MAPP domains (Mand, Tact, Listener Responding, Echoic, Motor Imitation, VP/MTS, LRFFC, Intraverbal, Math, Reading early) | Early, School-Age |
559
+ | NET | Mand, Tact, Social Behavior, Spontaneous Vocal Behavior, Independent Play | All |
560
+ | Task Analysis | AFLS modules, Writing (multi-step), complex Math, domestic/vocational | School-Age, Adolescent, Adult |
561
+ | FCT | Triggered by target behavior in session log, not by skill domain | All |
562
+ | BST | N/A — staff-facing | Training-program variant |
563
+ | PRT | Mand, Tact, Social Behavior, Spontaneous Vocal Behavior | Early, School-Age |
564
+
565
+ | Target behavior | Plausible functions | Typical severity / escalation |
566
+ |---|---|---|
567
+ | SIB | All four; most often automatic or escape | L3 – L4 |
568
+ | Aggression | Most often escape or tangible | L3 – L4 |
569
+ | Elopement | Escape, tangible, or automatic | L2 – L4 (depends on environmental risk) |
570
+ | Tantrum | Most often escape or tangible | L2 – L3 |
571
+ | Non-compliance | Primarily escape | L1 – L2 |
572
+ | Stereotypy | Primarily automatic | L1 – L2 (unless interfering with learning) |
573
+ | Pica | Automatic (oral) | L3 – L4 (safety) |
574
+
575
+ **Dataset usage.** Generators sample valid combinations only. Invalid combinations (e.g., DTT for a multi-step handwashing skill) are not produced, or are produced deliberately as negative-training examples (rare; labeled).
576
+
577
+ ---
578
+
579
+ ## 15. Gaps (explicitly acknowledged)
580
+
581
+ The following areas in the taxonomy have weaker citation grounding. We cite what exists, note the limitation in the dataset card, and flag them as future-work extensions.
582
+
583
+ 1. **Crisis plan components** — no canonical JABA paper. Cited: BACB 5th ed. Task List G-15 / H-8 and the 2020 Ethics Code. Acceptable because BIP crisis planning is a BACB-regulated practice area.
584
+ 2. **Patterns 11–12** (MO shift, setting event) — session-level operationalization is TRACE's contribution; underlying concepts grounded in Michael 1993, Smith & Iwata 1997, Bijou & Baer 1961.
585
+ 3. **Disrobing, grabbing/snatching, cascading drift, comorbid skill-behavior** — considered for v1 but excluded because no canonical operational definition exists. We do not invent what the literature has not defined.
586
+ 4. **Essential for Living / PEAK / ABLLS-R** — not encoded in v1. VB-MAPP + AFLS provide sufficient coverage for baseline dataset.
587
+ 5. **Severity index within target behaviors** — no encoded numeric severity scale. v1 treats severity as implicit in the behavior type + escalation level.
588
+ 6. **Multi-function (mixed-function) behaviors** — v1 assigns a single function hypothesis; real clinical reality includes mixed functions. Flagged as a known limitation.
589
+ 7. **Cultural / linguistic variation** — v1 is English-only, US/North-American ABA convention. Flagged in dataset card limitations.
590
+ 8. **Escalation ordinal (4 levels) and confidence ordinal (3 levels)** — TRACE design choices, not citation-grounded taxonomies. Framed as project contributions; their clinical validity will be evaluated empirically (quadratic-weighted κ vs BCBA raters).
591
+ 9. **Class distributions** (escalation levels, learner profiles, pattern frequencies) — no research source prescribes exact proportions. These are pipeline hyperparameters tuned for training balance, not clinical truths.
592
+
593
+ ---
594
+
595
+ ## 16. Changelog
596
+
597
+ - **v1.0 (2026-04-22)** — initial taxonomy, drawn from VB-MAPP (Sundberg 2008), AFLS (Partington & Mueller 2012), Cooper/Heron/Heward (2020), Iwata (1982/1994), Hanley/Iwata/McCord (2003), Carr & Durand (1985), and supporting JABA operational-definition literature.
598
+
599
+ ---
600
+
601
+ ## 17. How this taxonomy is consumed by the pipeline
602
+
603
+ - `src/prepare_data.py` imports taxonomy as Python constants generated from this document.
604
+ - Each training example is produced by (a) sampling a valid combination of taxonomy values, (b) running the relevant generation template, (c) applying quality + safety filters.
605
+ - A provenance record is written per example: taxonomy slot values, template ID, teacher model, seed, filter thresholds, timestamp.
606
+ - If this taxonomy changes, the pipeline version and dataset version both bump; old data is preserved as a previous version on HuggingFace.
607
+
608
+ See `schema-v1.md` (next) for the precise JSON structure of training examples.
pyproject.toml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "trace"
3
+ version = "1.0.0"
4
+ description = "TRACE — Taxonomy-Referenced ABA Clinical Examples. Synthetic dataset + generator for Applied Behavior Analysis teaching-program generation and session interpretation."
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ license = { text = "MIT" }
8
+ authors = [
9
+ { name = "Festus Kahunla" },
10
+ ]
11
+ maintainers = [
12
+ { name = "Pombo Labs" },
13
+ ]
14
+ keywords = [
15
+ "applied-behavior-analysis",
16
+ "aba",
17
+ "autism",
18
+ "clinical-nlp",
19
+ "synthetic-data",
20
+ "instruction-tuning",
21
+ "taxonomy",
22
+ ]
23
+ classifiers = [
24
+ "Development Status :: 4 - Beta",
25
+ "Intended Audience :: Science/Research",
26
+ "License :: OSI Approved :: MIT License",
27
+ "Programming Language :: Python :: 3",
28
+ "Programming Language :: Python :: 3.10",
29
+ "Programming Language :: Python :: 3.11",
30
+ "Programming Language :: Python :: 3.12",
31
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
32
+ ]
33
+
34
+ dependencies = [
35
+ "pyyaml>=6.0",
36
+ ]
37
+
38
+ [project.optional-dependencies]
39
+ dataset-loading = [
40
+ "datasets>=2.18.0",
41
+ ]
42
+
43
+ [project.urls]
44
+ Homepage = "https://github.com/Pombo-Labs/TRACE"
45
+ Repository = "https://github.com/Pombo-Labs/TRACE"
46
+ Documentation = "https://github.com/Pombo-Labs/TRACE#readme"
47
+ Issues = "https://github.com/Pombo-Labs/TRACE/issues"
48
+
49
+ [build-system]
50
+ requires = ["hatchling"]
51
+ build-backend = "hatchling.build"
52
+
53
+ [tool.hatch.build.targets.wheel]
54
+ packages = ["src"]
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Runtime dependencies for the TRACE generator + dataset loading.
2
+ #
3
+ # Core (required to regenerate the corpus from configs):
4
+ pyyaml>=6.0
5
+
6
+ # Optional — only needed if you load TRACE via the Hugging Face datasets
7
+ # library (`load_dataset("PomboLabs/TRACE")`). Skip if you only consume the
8
+ # raw JSONL files under data/splits/.
9
+ datasets>=2.18.0
src/__init__.py ADDED
File without changes
src/compile_curation.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compile test.jsonl + sanity.jsonl from curation_pool.jsonl.
2
+
3
+ No hand-curation: the whole curation pool is promoted as the
4
+ evaluation corpus. A small stratified slice is carved out for
5
+ sanity.jsonl (training smoke-test); the remainder becomes test.jsonl.
6
+
7
+ Stratification is by category (method for teaching programs,
8
+ pattern_class for session interpretation) so both splits reflect the
9
+ pool's distribution.
10
+
11
+ Reads:
12
+ data/splits/curation_pool.jsonl
13
+
14
+ Writes:
15
+ data/splits/test.jsonl
16
+ data/splits/sanity.jsonl
17
+
18
+ Usage:
19
+ uv run python src/compile_curation.py
20
+ uv run python src/compile_curation.py --sanity-n 20 --seed 42
21
+ """
22
+
23
+ import argparse
24
+ import json
25
+ import random
26
+ import sys
27
+ from collections import Counter, defaultdict
28
+ from pathlib import Path
29
+
30
+ REPO_ROOT = Path(__file__).resolve().parent.parent
31
+ POOL_PATH = REPO_ROOT / "data" / "splits" / "curation_pool.jsonl"
32
+ TEST_JSONL = REPO_ROOT / "data" / "splits" / "test.jsonl"
33
+ SANITY_JSONL = REPO_ROOT / "data" / "splits" / "sanity.jsonl"
34
+
35
+
36
+ def category_of(example: dict) -> str:
37
+ gl = example["meta"]["gold_labels"]
38
+ if example["meta"]["task_type"] == "teaching_program":
39
+ return gl.get("method", "?")
40
+ return gl.get("pattern_class", "?")
41
+
42
+
43
+ def stratified_sanity_sample(
44
+ pool: list[dict], sanity_n: int, rng: random.Random
45
+ ) -> tuple[list[dict], list[dict]]:
46
+ """Carve sanity_n stratified examples out of the pool; return (test, sanity)."""
47
+ by_cat: dict[str, list[dict]] = defaultdict(list)
48
+ for ex in pool:
49
+ by_cat[category_of(ex)].append(ex)
50
+
51
+ # Largest-remainder method: allocate sanity slots proportional to category size.
52
+ total = len(pool)
53
+ raw_quotas = {cat: sanity_n * len(exs) / total for cat, exs in by_cat.items()}
54
+ floor_quotas = {cat: int(q) for cat, q in raw_quotas.items()}
55
+ remainder = sanity_n - sum(floor_quotas.values())
56
+ # Distribute remaining slots by largest fractional part.
57
+ fractional = sorted(
58
+ raw_quotas.items(), key=lambda kv: kv[1] - int(kv[1]), reverse=True
59
+ )
60
+ for cat, _ in fractional[:remainder]:
61
+ floor_quotas[cat] += 1
62
+
63
+ sanity: list[dict] = []
64
+ for cat, quota in floor_quotas.items():
65
+ if quota <= 0:
66
+ continue
67
+ picks = rng.sample(by_cat[cat], min(quota, len(by_cat[cat])))
68
+ sanity.extend(picks)
69
+
70
+ sanity_ids = {ex["meta"]["example_id"] for ex in sanity}
71
+ test = [ex for ex in pool if ex["meta"]["example_id"] not in sanity_ids]
72
+ return test, sanity
73
+
74
+
75
+ def tag_split(example: dict, split_name: str) -> dict:
76
+ """Deep-copy and stamp the curation split label into meta."""
77
+ out = json.loads(json.dumps(example))
78
+ out["meta"]["curation"] = {"target_split": split_name}
79
+ return out
80
+
81
+
82
+ def main() -> int:
83
+ parser = argparse.ArgumentParser()
84
+ parser.add_argument("--sanity-n", type=int, default=20)
85
+ parser.add_argument("--seed", type=int, default=42)
86
+ args = parser.parse_args()
87
+
88
+ if not POOL_PATH.exists():
89
+ print(f"Curation pool missing: {POOL_PATH}", file=sys.stderr)
90
+ print("Run `uv run python src/split_data.py` first.", file=sys.stderr)
91
+ return 1
92
+
93
+ pool: list[dict] = []
94
+ with open(POOL_PATH) as f:
95
+ for line in f:
96
+ pool.append(json.loads(line))
97
+
98
+ if args.sanity_n >= len(pool):
99
+ print(
100
+ f"sanity_n ({args.sanity_n}) must be smaller than pool size ({len(pool)}).",
101
+ file=sys.stderr,
102
+ )
103
+ return 1
104
+
105
+ rng = random.Random(args.seed)
106
+ test, sanity = stratified_sanity_sample(pool, args.sanity_n, rng)
107
+
108
+ TEST_JSONL.parent.mkdir(parents=True, exist_ok=True)
109
+ with open(TEST_JSONL, "w") as f:
110
+ for ex in test:
111
+ f.write(json.dumps(tag_split(ex, "test")) + "\n")
112
+ with open(SANITY_JSONL, "w") as f:
113
+ for ex in sanity:
114
+ f.write(json.dumps(tag_split(ex, "sanity")) + "\n")
115
+
116
+ def fmt_dist(rows: list[dict]) -> str:
117
+ c = Counter(category_of(r) for r in rows)
118
+ return ", ".join(f"{k}={v}" for k, v in sorted(c.items()))
119
+
120
+ print(f"Wrote:")
121
+ print(f" {TEST_JSONL} {len(test)} examples ({fmt_dist(test)})")
122
+ print(f" {SANITY_JSONL} {len(sanity)} examples ({fmt_dist(sanity)})")
123
+ return 0
124
+
125
+
126
+ if __name__ == "__main__":
127
+ raise SystemExit(main())
src/generate.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TRACE synthetic data generator — orchestrator.
2
+
3
+ Routes to per-area generators. Each area is self-contained under configs/<area>/
4
+ with its own taxonomy, template, and compatibility rules.
5
+
6
+ Usage:
7
+ # Generate one area (count from generation.yaml)
8
+ uv run python src/generate.py --area dtt
9
+
10
+ # Generate one area with explicit count
11
+ uv run python src/generate.py --area dtt --n 50
12
+
13
+ # Generate all enabled areas
14
+ uv run python src/generate.py --all
15
+
16
+ # Write to a specific file (single-area mode only)
17
+ uv run python src/generate.py --area dtt --n 10 --out data/samples/dtt-10.jsonl
18
+ """
19
+
20
+ import argparse
21
+ import hashlib
22
+ import json
23
+ import random
24
+ import sys
25
+ from pathlib import Path
26
+
27
+ # Path setup so we can run as a script
28
+ REPO_ROOT = Path(__file__).resolve().parent.parent
29
+ sys.path.insert(0, str(REPO_ROOT))
30
+
31
+ from src.generators.aba_dtt import DTTGenerator
32
+ from src.generators.aba_net import NETGenerator
33
+ from src.generators.aba_session_interp import SessionInterpretationGenerator
34
+ from src.generators.aba_task_analysis import TaskAnalysisGenerator
35
+ from src.generators.base import load_yaml
36
+
37
+ CONFIG_DIR = REPO_ROOT / "configs"
38
+
39
+ # Area registry — add new generators here as they come online.
40
+ GENERATORS = {
41
+ "dtt": DTTGenerator,
42
+ "net": NETGenerator,
43
+ "task_analysis": TaskAnalysisGenerator,
44
+ "session_interpretation": SessionInterpretationGenerator,
45
+ # "fct": FCTGenerator,
46
+ # "bst": BSTGenerator,
47
+ # "prt": PRTGenerator,
48
+ }
49
+
50
+
51
+ def deterministic_area_offset(area: str) -> int:
52
+ """Stable per-area seed offset.
53
+
54
+ Using Python's built-in ``hash()`` here would be non-deterministic across
55
+ processes (PYTHONHASHSEED randomization), which would make the dataset
56
+ irreproducible run-to-run. SHA-256 is stable across processes and Python
57
+ versions.
58
+ """
59
+ return int(hashlib.sha256(area.encode()).hexdigest(), 16) % 10000
60
+
61
+
62
+ def generate_area(area: str, n: int, seed: int, out_stream) -> int:
63
+ """Generate `n` examples for `area`; write to `out_stream` as JSONL. Returns count written."""
64
+ if area not in GENERATORS:
65
+ raise ValueError(f"Generator for area '{area}' not registered. Available: {list(GENERATORS)}")
66
+ GenCls = GENERATORS[area]
67
+ generator = GenCls(CONFIG_DIR)
68
+ rng = random.Random(seed)
69
+ for _ in range(n):
70
+ example = generator.render_example(rng)
71
+ out_stream.write(json.dumps(example) + "\n")
72
+ return n
73
+
74
+
75
+ def main():
76
+ parser = argparse.ArgumentParser(description="TRACE per-area generator orchestrator.")
77
+ parser.add_argument("--area", default=None, help="Generate only this area.")
78
+ parser.add_argument("--all", action="store_true", help="Generate all enabled areas from generation.yaml.")
79
+ parser.add_argument("--n", type=int, default=None, help="Override example count.")
80
+ parser.add_argument("--seed", type=int, default=None, help="Override global seed.")
81
+ parser.add_argument("--out", type=str, default=None, help="Output JSONL file (single-area mode).")
82
+ args = parser.parse_args()
83
+
84
+ gen_config = load_yaml(CONFIG_DIR / "generation.yaml")
85
+ seed = args.seed if args.seed is not None else gen_config["seed"]
86
+
87
+ if not args.area and not args.all:
88
+ parser.error("Must specify --area <name> or --all.")
89
+
90
+ if args.all:
91
+ # Multi-area: write one file per area under output_dir
92
+ output_dir = REPO_ROOT / gen_config["output_dir"]
93
+ output_dir.mkdir(parents=True, exist_ok=True)
94
+ total = 0
95
+ for area, area_cfg in gen_config["areas"].items():
96
+ if not area_cfg.get("enabled"):
97
+ print(f" - {area}: disabled, skipping")
98
+ continue
99
+ if area not in GENERATORS:
100
+ print(f" - {area}: generator not yet built, skipping")
101
+ continue
102
+ n = area_cfg["n"]
103
+ out_path = output_dir / f"{area}.jsonl"
104
+ with open(out_path, "w") as f:
105
+ generate_area(area, n, seed + deterministic_area_offset(area), f)
106
+ print(f" - {area}: {n} examples -> {out_path}")
107
+ total += n
108
+ print(f"\nTotal generated: {total} examples.")
109
+ else:
110
+ # Single-area mode
111
+ area = args.area
112
+ n = args.n if args.n is not None else gen_config["areas"][area]["n"]
113
+ out_stream = open(args.out, "w") if args.out else sys.stdout
114
+ try:
115
+ generate_area(area, n, seed, out_stream)
116
+ finally:
117
+ if args.out:
118
+ out_stream.close()
119
+ if args.out:
120
+ print(f"Wrote {n} examples to {args.out}")
121
+
122
+
123
+ if __name__ == "__main__":
124
+ main()
src/generators/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Generator modules for TRACE synthetic data pipeline."""
src/generators/aba_dtt.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DTT area generator.
2
+
3
+ Loads configs/dtt/ + configs/shared/ and produces JSONL examples with
4
+ clinically-consistent combinations sampled from the DTT taxonomy.
5
+ """
6
+
7
+ import random
8
+ from pathlib import Path
9
+
10
+ from .base import (
11
+ article,
12
+ load_area,
13
+ load_shared,
14
+ render_prompt_sequence,
15
+ sample_stimuli,
16
+ strip_trailing_period,
17
+ make_example_envelope,
18
+ )
19
+
20
+ # Area identifier + task type
21
+ AREA = "dtt"
22
+ TASK_TYPE = "teaching_program"
23
+
24
+
25
+ # SD (Discriminative Stimulus) generation — clinically nuanced
26
+
27
+
28
+ def generate_sd(skill_target: str, domain_id: str, rng: random.Random) -> dict:
29
+ """Generate primary SD, variations, and presentation description.
30
+
31
+ Sub-domain aware: Tact-of-colors gets "What color?", not "What is this?".
32
+ """
33
+ skill = skill_target.lower()
34
+
35
+ if domain_id == "tact":
36
+ if "color" in skill:
37
+ return _sd("What color?", '"What color is this?"; "Tell me the color"',
38
+ "Present a single-color object and ask while pointing to it.")
39
+ if "shape" in skill:
40
+ return _sd("What shape?", '"What shape is this?"; "Tell me the shape"',
41
+ "Present a clearly-shaped object or shape card and ask while pointing to it.")
42
+ if "action" in skill:
43
+ return _sd("What is he doing?", '"What action?"; "What is she doing?"; "What is happening here?"',
44
+ "Present an action picture, video clip, or live demonstration.")
45
+ if "emotion" in skill or "feeling" in skill:
46
+ return _sd("How does he feel?", '"What emotion?"; "How is she feeling?"',
47
+ "Present a photograph or emoji card depicting the target emotion.")
48
+ if "body part" in skill:
49
+ return _sd("What is this?", '"Tell me"; "Name this body part"',
50
+ "Point to the target body part (on self, learner, or doll) and ask.")
51
+ if "animal" in skill:
52
+ return _sd("What animal is this?", '"What is this?"; "Name this animal"',
53
+ "Present an animal picture card and ask while pointing.")
54
+ if "preposition" in skill:
55
+ return _sd("Where is it?", '"Where is the {object}?"; "Tell me where"',
56
+ "Arrange two objects in a target spatial relation and ask about the position.")
57
+ if "adjective" in skill:
58
+ return _sd("What is it like?", '"Describe it"; "Is it big or little?"',
59
+ "Present two contrasting items and prompt a comparative description.")
60
+ if "past" in skill or "event" in skill:
61
+ return _sd("What happened?", '"What did she do?"; "Tell me what you did"',
62
+ "Reference a recently completed action or event familiar to the learner.")
63
+ if "categor" in skill:
64
+ return _sd("What category?", '"What group does this belong to?"; "What kind of thing?"',
65
+ "Present a clearly categorizable item and prompt the category label.")
66
+ return _sd("What is this?", '"Tell me what you see"; "What\'s that?"; "Name this"',
67
+ "Present the target stimulus within visual field and ask while pointing to it.")
68
+
69
+ if domain_id == "vp_mts":
70
+ return _sd("Match", '"Find the same"; "Put with same"',
71
+ "Present the sample stimulus and an array of comparison stimuli simultaneously.")
72
+
73
+ if domain_id in {"listener_responding", "lrffc"}:
74
+ return _sd("Touch [target item]",
75
+ '"Point to [target item]"; "Give me [target item]"; "Show me [target item]"',
76
+ "Present an array of items on the table and deliver a clear verbal SD.")
77
+
78
+ if domain_id == "reading":
79
+ if "sight word" in skill or "word" in skill:
80
+ return _sd("What word?", '"Read this word"; "What does it say?"',
81
+ "Present the sight-word card and deliver the SD.")
82
+ if "letter" in skill:
83
+ return _sd("What letter is this?", '"Read this letter"; "What does this say?"',
84
+ "Present the letter flashcard.")
85
+ if "sentence" in skill or "passage" in skill:
86
+ return _sd("Read this.", '"Read it aloud"; "What does this sentence say?"',
87
+ "Present the written sentence or passage at an appropriate reading level.")
88
+ return _sd("What letter is this?", '"Read this letter"; "What does this say?"',
89
+ "Present the letter or word on a flashcard.")
90
+
91
+ if domain_id == "writing":
92
+ return _sd("Write the target.", '"Trace this"; "Copy this"; "Write {word}"',
93
+ "Provide pencil and paper; deliver a clear verbal instruction naming the target letter or word.")
94
+
95
+ if domain_id == "math":
96
+ if "numeral" in skill or "identif" in skill:
97
+ return _sd("What number is this?", '"Which number?"; "Tell me the number"',
98
+ "Present the numeral card.")
99
+ if "count" in skill:
100
+ return _sd("How many?", '"Count these"; "How many are there?"',
101
+ "Present the set of objects to count.")
102
+ if "add" in skill or "subtract" in skill:
103
+ return _sd("Solve this.", '"What is the answer?"; "How much?"',
104
+ "Present a written or spoken arithmetic problem at target level.")
105
+ return _sd("How many?", '"Count these"; "What number is this?"',
106
+ "Present the numeral card or a set of objects to count.")
107
+
108
+ return _sd("Show me", '"Do this"; "Your turn"',
109
+ "Present relevant stimuli and deliver a clear verbal SD.")
110
+
111
+
112
+ def _sd(primary: str, variations: str, presentation: str) -> dict:
113
+ return {"primary_sd": primary, "sd_variations": variations, "sd_presentation": presentation}
114
+
115
+
116
+ def current_prompt_guidance(mastery_state_id: str) -> str:
117
+ """Translate mastery state into current-prompt-level guidance."""
118
+ if mastery_state_id in {"emerging", "developing"}:
119
+ return "begin at higher prompt levels (full-physical or partial-physical) with systematic fading."
120
+ if mastery_state_id == "approaching":
121
+ return "deliver gestural or positional prompts; fade toward independence."
122
+ if mastery_state_id in {"near", "mastered", "generalization"}:
123
+ return "use minimal prompts (positional or verbal hint) only when needed; expect independent responding."
124
+ return "prompt level calibrated to current performance."
125
+
126
+
127
+ # DTTGenerator class
128
+
129
+
130
+ class DTTGenerator:
131
+ """Generates teaching-program examples for the DTT area.
132
+
133
+ Loads its self-contained taxonomy + template + compatibility rules from
134
+ configs/dtt/ and uses shared primitives from configs/shared/.
135
+ """
136
+
137
+ def __init__(self, config_dir: Path):
138
+ self.config_dir = Path(config_dir)
139
+ self.shared = load_shared(self.config_dir)
140
+ self.area = load_area(self.config_dir, AREA)
141
+ self.template = self.area["template"]
142
+ self.taxonomy = self.area["taxonomy"]
143
+ self.compat = self.area["compatibility"]
144
+ self.prompt_types = self.shared["prompt_types"]["prompt_types"]
145
+
146
+ # sampling
147
+
148
+ def sample_combination(self, rng: random.Random) -> dict:
149
+ """Sample a clinically-valid DTT combo."""
150
+ # Sample level with weights from compatibility config
151
+ level_weights = self.compat["level_sampling_weights"]
152
+ levels = list(level_weights.keys())
153
+ weights = [level_weights[k] for k in levels]
154
+ level_id = rng.choices(levels, weights=weights)[0]
155
+
156
+ # Sample domain — filter to only those that define this level (some
157
+ # domains like Writing don't have L3 in the DTT taxonomy because
158
+ # long-form writing is Task Analysis territory).
159
+ all_domains = self.taxonomy["skill_domains"]["vbmapp"]["domains"]
160
+ domains_with_level = [d for d in all_domains if level_id in d]
161
+ if not domains_with_level:
162
+ # Fall back: pick from all domains and re-sample a level they support
163
+ domain = rng.choice(all_domains)
164
+ available_levels = [lv for lv in levels if lv in domain]
165
+ level_id = rng.choice(available_levels)
166
+ else:
167
+ domain = rng.choice(domains_with_level)
168
+ skill_target = rng.choice(domain[level_id])
169
+
170
+ # Learner profile filtered by level
171
+ profiles = self.shared["learner_profiles"]["profiles"]
172
+ allowed_profile_ids = set(self.compat["level_to_learner_profiles"][level_id])
173
+ profile = rng.choice([p for p in profiles if p["id"] in allowed_profile_ids])
174
+
175
+ mastery_state = rng.choice(self.shared["mastery_states"]["states"])
176
+
177
+ # Prompt hierarchy then error-correction (filtered for errorless compat)
178
+ prompt_hierarchy = rng.choice(self.taxonomy["prompt_hierarchies"])
179
+ all_ecs = self.taxonomy["error_corrections"]
180
+ required_hierarchy = self.compat.get("errorless_requires_hierarchy")
181
+ if prompt_hierarchy["id"] != required_hierarchy:
182
+ valid_ecs = [ec for ec in all_ecs if ec["id"] != "errorless"]
183
+ else:
184
+ valid_ecs = all_ecs
185
+ error_correction = rng.choice(valid_ecs)
186
+
187
+ # Reinforcement schedule filtered by mastery state
188
+ all_schedules = self.taxonomy["reinforcement_schedules"]
189
+ allowed_schedule_ids = set(self.compat["mastery_to_reinforcement"][mastery_state["id"]])
190
+ valid_schedules = [s for s in all_schedules if s["id"] in allowed_schedule_ids]
191
+ if not valid_schedules:
192
+ valid_schedules = [s for s in all_schedules if s["id"] == "crf"]
193
+ reinforcement_schedule = rng.choice(valid_schedules)
194
+
195
+ mastery_criterion = rng.choice(self.taxonomy["mastery_criteria"])
196
+
197
+ return {
198
+ "skill_target": skill_target,
199
+ "domain_id": domain["id"],
200
+ "domain_name": domain["name"],
201
+ "level_id": level_id,
202
+ "learner_profile": profile,
203
+ "mastery_state": mastery_state,
204
+ "prompt_hierarchy": prompt_hierarchy,
205
+ "reinforcement_schedule": reinforcement_schedule,
206
+ "error_correction": error_correction,
207
+ "mastery_criterion": mastery_criterion,
208
+ }
209
+
210
+ # slot computation
211
+
212
+ def compute_slots(self, combo: dict, rng: random.Random) -> dict:
213
+ array_size_info = self.compat["array_size_by_level"][combo["level_id"]]
214
+ array_size_n = array_size_info["n"]
215
+ array_size_text = array_size_info["text"]
216
+
217
+ sd = generate_sd(combo["skill_target"], combo["domain_id"], rng)
218
+ stim = sample_stimuli(combo["skill_target"], array_size_n, rng)
219
+
220
+ if stim["distractors"]:
221
+ distractor_block = f"Distractor stimuli: {', '.join(stim['distractors'])}"
222
+ else:
223
+ distractor_block = "Distractor stimuli: N/A (no-array SD format)"
224
+
225
+ mastery_state = combo["mastery_state"]
226
+
227
+ return {
228
+ "skill_target": combo["skill_target"],
229
+ "curriculum_ref": f"{combo['domain_name']} {combo['level_id']}",
230
+ "learner_profile_name": combo["learner_profile"]["name"],
231
+ "learner_profile_article": article(combo["learner_profile"]["name"]),
232
+ "mastery_state_name": mastery_state["name"],
233
+ "mastery_state_short": mastery_state.get("short", mastery_state["name"].lower()),
234
+ "primary_sd": sd["primary_sd"],
235
+ "sd_variations": sd["sd_variations"],
236
+ "sd_presentation": sd["sd_presentation"],
237
+ "prompt_hierarchy_name": combo["prompt_hierarchy"]["name"],
238
+ "prompt_sequence": render_prompt_sequence(
239
+ combo["prompt_hierarchy"]["sequence"], self.prompt_types
240
+ ),
241
+ "current_prompt_guidance": current_prompt_guidance(mastery_state["id"]),
242
+ "array_size": array_size_text,
243
+ "target_stimuli": ", ".join(stim["targets"]),
244
+ "distractor_block": distractor_block,
245
+ "error_correction_steps": combo["error_correction"]["steps"].strip(),
246
+ "reinforcement_schedule_name": combo["reinforcement_schedule"]["name"],
247
+ "reinforcement_description": strip_trailing_period(combo["reinforcement_schedule"]["description"]),
248
+ "mastery_criterion_text": combo["mastery_criterion"]["text"],
249
+ "ioa_frequency": rng.choice([3, 4, 5]),
250
+ "n_generalization_therapists": rng.choice([2, 3]),
251
+ "n_generalization_settings": rng.choice([2, 3]),
252
+ }
253
+
254
+ # rendering
255
+
256
+ def render_example(self, rng: random.Random) -> dict:
257
+ combo = self.sample_combination(rng)
258
+ slots = self.compute_slots(combo, rng)
259
+
260
+ user_variant = rng.choice(self.template["user_variants"])
261
+ user_content = user_variant.format(**slots)
262
+ assistant_content = self.template["assistant_template"].format(**slots)
263
+
264
+ gold_labels = {
265
+ "method": AREA,
266
+ "domain": f"VB-MAPP.{combo['domain_id']}",
267
+ "level": combo["level_id"],
268
+ "learner_profile": combo["learner_profile"]["id"],
269
+ "mastery_state": combo["mastery_state"]["id"],
270
+ }
271
+ provenance = {
272
+ "layer": 1,
273
+ "area": AREA,
274
+ "template_id": self.template["template_id"],
275
+ "taxonomy_cells": {
276
+ "skill_target": combo["skill_target"],
277
+ "prompt_hierarchy": combo["prompt_hierarchy"]["id"],
278
+ "reinforcement_schedule": combo["reinforcement_schedule"]["id"],
279
+ "error_correction": combo["error_correction"]["id"],
280
+ "mastery_criterion": combo["mastery_criterion"]["id"],
281
+ },
282
+ "teacher_model": None,
283
+ "seed_tag": str(rng.getstate()[1][0] % 100000),
284
+ }
285
+
286
+ return make_example_envelope(
287
+ system_content=self.template["system_prompt"],
288
+ user_content=user_content,
289
+ assistant_content=assistant_content,
290
+ task_type=TASK_TYPE,
291
+ gold_labels=gold_labels,
292
+ provenance=provenance,
293
+ )
src/generators/aba_net.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """NET area generator.
2
+
3
+ Loads configs/net/ + configs/shared/ and produces naturalistic teaching
4
+ program examples. Mand / Social / Spontaneous Vocal / some Intraverbal skills.
5
+ """
6
+
7
+ import random
8
+ from pathlib import Path
9
+
10
+ from .base import (
11
+ load_area,
12
+ load_shared,
13
+ make_example_envelope,
14
+ )
15
+
16
+ AREA = "net"
17
+ TASK_TYPE = "teaching_program"
18
+
19
+
20
+ def pick_mo_category(skill_target: str, domain_id: str, compat: dict) -> str:
21
+ """Match skill text against domain + keyword rules to pick an MO category.
22
+
23
+ Domain is checked first to avoid false matches from shared keywords
24
+ (e.g., 'preferred' in both mand skills and spontaneous-vocal skills).
25
+ """
26
+ s = skill_target.lower()
27
+
28
+ if domain_id == "mand":
29
+ if "bathroom" in s or "potty" in s or "toilet" in s or "restroom" in s:
30
+ return "mand_bathroom"
31
+ if "break" in s:
32
+ return "mand_break"
33
+ if "all done" in s or "completion" in s or "finished" in s:
34
+ return "mand_completion"
35
+ if "missing" in s:
36
+ return "mand_missing"
37
+ if "help" in s:
38
+ return "mand_help"
39
+ if '"what"' in s or "what" in s.split():
40
+ return "mand_info_what"
41
+ if '"where"' in s or "where" in s.split():
42
+ return "mand_info_where"
43
+ if "attention" in s or "peers" in s:
44
+ return "mand_attention"
45
+ if "action" in s:
46
+ return "mand_action"
47
+ return "mand_item"
48
+
49
+ if domain_id == "social":
50
+ if "initiate" in s or "initiates" in s:
51
+ return "social_initiation"
52
+ if "turn" in s:
53
+ return "social_turn_taking"
54
+ if "reciprocal" in s or "conversation" in s:
55
+ return "intraverbal_multi_turn"
56
+ return "social_initiation"
57
+
58
+ if domain_id == "spontaneous_vocal":
59
+ if "greet" in s:
60
+ return "spontaneous_greeting"
61
+ return "spontaneous_comment"
62
+
63
+ if domain_id == "intraverbal":
64
+ if "multi-turn" in s or "conversation" in s:
65
+ return "intraverbal_multi_turn"
66
+ if "routine" in s:
67
+ return "intraverbal_routine"
68
+ return "intraverbal_multi_turn"
69
+
70
+ return compat["default_mo_category"]
71
+
72
+
73
+ def pick_mo_arrangement(mo_category: str, mo_arrangements: list, rng: random.Random) -> dict:
74
+ """Find an MO arrangement matching the category, falling back to random."""
75
+ matches = [m for m in mo_arrangements if mo_category in m["applies_to"]]
76
+ if matches:
77
+ return rng.choice(matches)
78
+ return rng.choice(mo_arrangements)
79
+
80
+
81
+ def current_prompt_guidance(mastery_state_id: str) -> str:
82
+ """Guidance for NET prompt level based on mastery state."""
83
+ if mastery_state_id in {"emerging", "developing"}:
84
+ return "begin with a model prompt delivered during the motivated moment; fade to expectant look across sessions."
85
+ if mastery_state_id == "approaching":
86
+ return "use time delay (0 s -> 2 s -> 4 s) before delivering any model prompt; reinforce independent responses differentially."
87
+ if mastery_state_id in {"near", "mastered", "generalization"}:
88
+ return "use expectant look only; avoid direct prompts. The MO itself should be sufficient to occasion the response."
89
+ return "prompt level calibrated to current performance."
90
+
91
+
92
+ class NETGenerator:
93
+ def __init__(self, config_dir: Path):
94
+ self.config_dir = Path(config_dir)
95
+ self.shared = load_shared(self.config_dir)
96
+ self.area = load_area(self.config_dir, AREA)
97
+ self.template = self.area["template"]
98
+ self.taxonomy = self.area["taxonomy"]
99
+ self.compat = self.area["compatibility"]
100
+
101
+ def sample_combination(self, rng: random.Random) -> dict:
102
+ # Level with weights
103
+ level_weights = self.compat["level_sampling_weights"]
104
+ levels = list(level_weights.keys())
105
+ weights = [level_weights[k] for k in levels]
106
+ level_id = rng.choices(levels, weights=weights)[0]
107
+
108
+ # Sample domain with a level match
109
+ all_domains = self.taxonomy["skill_domains"]["vbmapp"]["domains"]
110
+ domains_with_level = [d for d in all_domains if level_id in d]
111
+ if not domains_with_level:
112
+ domain = rng.choice(all_domains)
113
+ available_levels = [lv for lv in levels if lv in domain]
114
+ level_id = rng.choice(available_levels)
115
+ else:
116
+ domain = rng.choice(domains_with_level)
117
+ skill_target = rng.choice(domain[level_id])
118
+
119
+ # Learner profile
120
+ profiles = self.shared["learner_profiles"]["profiles"]
121
+ allowed = set(self.compat["level_to_learner_profiles"][level_id])
122
+ profile = rng.choice([p for p in profiles if p["id"] in allowed])
123
+
124
+ mastery_state = rng.choice(self.shared["mastery_states"]["states"])
125
+
126
+ mo_category = pick_mo_category(skill_target, domain["id"], self.compat)
127
+ mo_arrangement = pick_mo_arrangement(mo_category, self.taxonomy["mo_arrangements"], rng)
128
+ natural_context = rng.choice(
129
+ mo_arrangement.get("context_examples") or self.taxonomy["natural_contexts"]
130
+ )
131
+ prompt_strategy = rng.choice(self.taxonomy["prompt_strategies"])
132
+ mastery_criterion = rng.choice(self.taxonomy["mastery_criteria"])
133
+
134
+ return {
135
+ "skill_target": skill_target,
136
+ "domain_id": domain["id"],
137
+ "domain_name": domain["name"],
138
+ "level_id": level_id,
139
+ "learner_profile": profile,
140
+ "mastery_state": mastery_state,
141
+ "mo_category": mo_category,
142
+ "mo_arrangement": mo_arrangement,
143
+ "natural_context": natural_context,
144
+ "prompt_strategy": prompt_strategy,
145
+ "mastery_criterion": mastery_criterion,
146
+ }
147
+
148
+ def compute_slots(self, combo: dict, rng: random.Random) -> dict:
149
+ mastery_state = combo["mastery_state"]
150
+ reinforcer_map = self.taxonomy["natural_reinforcer_examples"]
151
+ natural_reinforcer = reinforcer_map.get(combo["mo_category"], reinforcer_map["default"])
152
+
153
+ # Build a natural-opportunity description from the MO arrangement + natural context
154
+ opportunity_text = (
155
+ f"Embed the teaching opportunity within the learner's {combo['natural_context']} routine. "
156
+ f"Deliver the opportunity when the MO is evident (learner oriented to the relevant stimulus, approaching the materials, or demonstrating interest)."
157
+ )
158
+
159
+ return {
160
+ "skill_target": combo["skill_target"],
161
+ "curriculum_ref": f"{combo['domain_name']} {combo['level_id']}",
162
+ "learner_profile_name": combo["learner_profile"]["name"],
163
+ "mastery_state_name": mastery_state["name"],
164
+ "mastery_state_short": mastery_state.get("short", mastery_state["name"].lower()),
165
+ "mo_arrangement_text": combo["mo_arrangement"]["text"],
166
+ "primary_natural_context": combo["natural_context"],
167
+ "natural_opportunity_text": opportunity_text,
168
+ "prompt_strategy_name": combo["prompt_strategy"]["name"],
169
+ "prompt_strategy_description": combo["prompt_strategy"]["description"],
170
+ "current_prompt_guidance": current_prompt_guidance(mastery_state["id"]),
171
+ "natural_reinforcer_text": f"Natural reinforcer: {natural_reinforcer}",
172
+ "n_exemplars": rng.choice([3, 4, 5]),
173
+ "n_settings": rng.choice([2, 3]),
174
+ "n_therapists": rng.choice([2, 3]),
175
+ "mastery_criterion_text": combo["mastery_criterion"]["text"],
176
+ "ioa_frequency": rng.choice([3, 4, 5]),
177
+ }
178
+
179
+ def render_example(self, rng: random.Random) -> dict:
180
+ combo = self.sample_combination(rng)
181
+ slots = self.compute_slots(combo, rng)
182
+
183
+ user_variant = rng.choice(self.template["user_variants"])
184
+ user_content = user_variant.format(**slots)
185
+ assistant_content = self.template["assistant_template"].format(**slots)
186
+
187
+ gold_labels = {
188
+ "method": AREA,
189
+ "domain": f"VB-MAPP.{combo['domain_id']}",
190
+ "level": combo["level_id"],
191
+ "learner_profile": combo["learner_profile"]["id"],
192
+ "mastery_state": combo["mastery_state"]["id"],
193
+ }
194
+ provenance = {
195
+ "layer": 1,
196
+ "area": AREA,
197
+ "template_id": self.template["template_id"],
198
+ "taxonomy_cells": {
199
+ "skill_target": combo["skill_target"],
200
+ "mo_category": combo["mo_category"],
201
+ "mo_arrangement": combo["mo_arrangement"]["id"],
202
+ "prompt_strategy": combo["prompt_strategy"]["id"],
203
+ "mastery_criterion": combo["mastery_criterion"]["id"],
204
+ },
205
+ "teacher_model": None,
206
+ "seed_tag": str(rng.getstate()[1][0] % 100000),
207
+ }
208
+
209
+ return make_example_envelope(
210
+ system_content=self.template["system_prompt"],
211
+ user_content=user_content,
212
+ assistant_content=assistant_content,
213
+ task_type=TASK_TYPE,
214
+ gold_labels=gold_labels,
215
+ provenance=provenance,
216
+ )
src/generators/aba_session_interp.py ADDED
@@ -0,0 +1,931 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Session Interpretation generator (Task 2).
2
+
3
+ Generates multi-session behavioral logs and matching structured interpretations.
4
+ The log is produced deterministically from a sampled hidden pattern so the
5
+ interpretation's gold labels are known by construction.
6
+ """
7
+
8
+ import random
9
+ from datetime import date, timedelta
10
+ from pathlib import Path
11
+
12
+ from .base import (
13
+ load_area,
14
+ load_shared,
15
+ load_yaml,
16
+ strip_trailing_period,
17
+ make_example_envelope,
18
+ )
19
+
20
+ AREA = "session_interpretation"
21
+ TASK_TYPE = "session_interpretation"
22
+
23
+
24
+ # Trajectory math — generate per-session accuracy values following a pattern rule
25
+
26
+
27
+ def _clip(x: float, lo: float = 0.0, hi: float = 1.0) -> float:
28
+ return max(lo, min(hi, x))
29
+
30
+
31
+ def generate_accuracy_trajectory(pattern_id: str, rule: dict, n_sessions: int, rng: random.Random) -> list:
32
+ """Return a list of accuracy values for n_sessions based on pattern rule."""
33
+ acc = rule["accuracy"]
34
+
35
+ if pattern_id == "generalization_failure":
36
+ alt = acc["alternating"]
37
+ tr_lo, tr_hi = alt["training_range"]
38
+ nov_lo, nov_hi = alt["novel_range"]
39
+ noise = alt["noise"]
40
+ vals = []
41
+ training = rng.uniform(tr_lo, tr_hi)
42
+ novel = rng.uniform(nov_lo, nov_hi)
43
+ for i in range(n_sessions):
44
+ base = training if i % 2 == 0 else novel
45
+ vals.append(_clip(base + rng.uniform(-noise, noise)))
46
+ return vals
47
+
48
+ if pattern_id == "skill_loss_after_break":
49
+ base = rng.uniform(*acc["base_range"])
50
+ drop = rng.uniform(*acc["initial_drop"])
51
+ rec = rng.uniform(*acc["recovery_per_session_range"])
52
+ noise = acc["noise"]
53
+ vals = [_clip(base - drop + rng.uniform(-noise, noise))]
54
+ for _ in range(1, n_sessions):
55
+ vals.append(_clip(vals[-1] + rec + rng.uniform(-noise, noise), 0.1, base))
56
+ return vals
57
+
58
+ if pattern_id == "motivating_operation_shift":
59
+ base = rng.uniform(*acc["base_range"])
60
+ dip_len = rng.randint(*acc["mo_dip_sessions"])
61
+ dip_mag = rng.uniform(*acc["mo_dip_magnitude"])
62
+ rec = rng.uniform(*acc["recovery_per_session_range"])
63
+ noise = acc["noise"]
64
+ dip_start = max(2, n_sessions // 3)
65
+ vals = []
66
+ for i in range(n_sessions):
67
+ in_dip = dip_start <= i < dip_start + dip_len
68
+ if in_dip:
69
+ v = base - dip_mag + rng.uniform(-noise, noise)
70
+ elif i >= dip_start + dip_len:
71
+ prev = vals[-1]
72
+ v = min(base, prev + rec + rng.uniform(-noise, noise))
73
+ else:
74
+ v = base + rng.uniform(-noise, noise)
75
+ vals.append(_clip(v, 0.1, 0.98))
76
+ return vals
77
+
78
+ if pattern_id == "setting_event_trigger":
79
+ base = rng.uniform(*acc["base_range"])
80
+ se_idx = max(1, int(acc["setting_event_session_fraction"] * n_sessions))
81
+ mag = rng.uniform(*acc["setting_event_magnitude"])
82
+ rec = rng.uniform(*acc["recovery_per_session_range"])
83
+ noise = acc["noise"]
84
+ vals = []
85
+ for i in range(n_sessions):
86
+ if i < se_idx:
87
+ v = base + rng.uniform(-noise, noise)
88
+ elif i == se_idx:
89
+ v = base - mag + rng.uniform(-noise, noise)
90
+ else:
91
+ prev = vals[-1]
92
+ v = min(base, prev + rec + rng.uniform(-noise, noise))
93
+ vals.append(_clip(v, 0.1, 0.98))
94
+ return vals
95
+
96
+ if pattern_id == "extinction_burst":
97
+ base = rng.uniform(*acc["base_range"])
98
+ delta = rng.uniform(*acc["delta_per_session_range"])
99
+ noise = acc["noise"]
100
+ vals = [_clip(base + i * delta + rng.uniform(-noise, noise)) for i in range(n_sessions)]
101
+ return vals
102
+
103
+ # Default: linear delta from base
104
+ base = rng.uniform(*acc["base_range"])
105
+ delta = rng.uniform(*acc["delta_per_session_range"])
106
+ noise = acc["noise"]
107
+ cap = acc.get("cap", 0.98)
108
+ floor_v = acc.get("floor", 0.05)
109
+ return [_clip(base + i * delta + rng.uniform(-noise, noise), floor_v, cap) for i in range(n_sessions)]
110
+
111
+
112
+ def generate_behavior_frequencies(
113
+ traj_name: str, library: dict, n_sessions: int, rule: dict, rng: random.Random
114
+ ) -> list:
115
+ """Return a list of integer behavior frequencies per session."""
116
+ cfg = library[traj_name]
117
+ if traj_name == "temporary_spike":
118
+ base_lo, base_hi = cfg["base_freq_range"]
119
+ mult_range = cfg["spike_multiplier_range"]
120
+ noise = cfg["noise"]
121
+ burst_start = max(2, int(rule.get("burst_start_fraction", 0.33) * n_sessions))
122
+ burst_len = rule.get("burst_duration_sessions", 2)
123
+ base = rng.uniform(base_lo, base_hi)
124
+ mult = rng.uniform(*mult_range)
125
+ vals = []
126
+ for i in range(n_sessions):
127
+ if burst_start <= i < burst_start + burst_len:
128
+ v = base * mult + rng.uniform(-noise, noise)
129
+ else:
130
+ v = base + rng.uniform(-noise, noise)
131
+ vals.append(max(0, round(v)))
132
+ return vals
133
+ if traj_name == "coincident_spike":
134
+ # Spike coincides with setting event index
135
+ base_lo, base_hi = cfg["base_freq_range"]
136
+ mult_range = cfg["spike_multiplier_range"]
137
+ noise = cfg["noise"]
138
+ se_idx = max(1, int(rule["accuracy"]["setting_event_session_fraction"] * n_sessions))
139
+ base = rng.uniform(base_lo, base_hi)
140
+ mult = rng.uniform(*mult_range)
141
+ vals = []
142
+ for i in range(n_sessions):
143
+ if i in {se_idx, se_idx + 1}:
144
+ v = base * mult + rng.uniform(-noise, noise)
145
+ else:
146
+ v = base + rng.uniform(-noise, noise)
147
+ vals.append(max(0, round(v)))
148
+ return vals
149
+ # default: base_freq_range + cumulative delta + noise
150
+ base_lo, base_hi = cfg["base_freq_range"]
151
+ delta = cfg.get("delta_per_session", 0.0)
152
+ noise = cfg.get("noise", 1.0)
153
+ base = rng.uniform(base_lo, base_hi)
154
+ return [max(0, round(base + i * delta + rng.uniform(-noise, noise))) for i in range(n_sessions)]
155
+
156
+
157
+ # Stimulus / profile sampling helpers
158
+
159
+
160
+ SKILL_POOL_BY_CATEGORY = {
161
+ "early_language": [
162
+ ("Mand", "single-word mands for preferred items (e.g., \"cookie\", \"iPad\")", "NET"),
163
+ ("Mand", "mands for missing items needed to complete an activity", "NET"),
164
+ ("Mand", "2-word mand phrases (e.g., \"more cracker\", \"open please\")", "NET"),
165
+ ("Mand", "requesting a break ('break please' or AAC 'I need a break')", "NET"),
166
+ ("Mand", "indicating completion ('all done', 'finished')", "NET"),
167
+ ("Tact", "tacts common objects", "DTT"),
168
+ ("Tact", "tacts colors of objects", "DTT"),
169
+ ("Listener Responding", "follows 1-step motor instructions", "DTT"),
170
+ ("Listener Responding", "selects items by feature", "DTT"),
171
+ ("Echoic", "echoes 2-word phrases", "DTT variant"),
172
+ ("Motor Imitation", "imitates 3-step motor sequences", "DTT variant"),
173
+ ("Intraverbal", "answers WH-questions about familiar topics", "NET"),
174
+ ("Tolerating Denied Access", "wait calmly when access to preferred item is denied", "FCT"),
175
+ ],
176
+ "school_age": [
177
+ ("Reading", "reads 10–20 sight words", "DTT"),
178
+ ("Reading", "sounds out CVC words", "DTT"),
179
+ ("Writing", "copies simple words from model", "DTT"),
180
+ ("Math", "identifies numerals 1–20", "DTT"),
181
+ ("Math", "counts objects 1–20", "DTT"),
182
+ ("Mand", "requesting preferred items using AAC device (e.g., \"I want cookie please\")", "NET"),
183
+ ("Mand", "single-word requests for preferred items", "NET"),
184
+ ("Mand", "requesting preferred items using a full sentence", "NET"),
185
+ ("Mand", "requesting a break during non-preferred tasks", "NET"),
186
+ ("Mand", "indicating 'all done' when an activity is complete", "NET"),
187
+ ("Tolerating Denied Access", "wait calmly when access to preferred item is denied", "FCT"),
188
+ ("Social Behavior", "takes turns during structured activities", "PRT"),
189
+ ],
190
+ "adolescent": [
191
+ ("Self-Care", "washing hands independently", "Task Analysis"),
192
+ ("Self-Care", "dressing (pants, shirt)", "Task Analysis"),
193
+ ("Self-Care", "brushing teeth independently", "Task Analysis"),
194
+ ("Self-Care", "tolerating tooth brushing by caregiver", "Task Analysis"),
195
+ ("Self-Care", "tolerating haircut", "Task Analysis"),
196
+ ("Home Skills", "making the bed", "Task Analysis"),
197
+ ("Home Skills", "meal cleanup after eating", "Task Analysis"),
198
+ ("Community", "identifying community safety signs", "DTT"),
199
+ ("Community", "ordering in a restaurant", "Task Analysis"),
200
+ ("Mand", "requesting preferred items using AAC device", "NET"),
201
+ ("Mand", "requesting help when a task is difficult", "NET"),
202
+ ("Mand", "requesting a break during non-preferred activities", "NET"),
203
+ ("Mand", "indicating 'all done' with a step or activity", "NET"),
204
+ ("Tolerating Denied Access", "wait calmly when access to preferred item is denied", "FCT"),
205
+ ],
206
+ "adult": [
207
+ ("Independent Living", "scheduling medical appointments", "Task Analysis"),
208
+ ("Independent Living", "managing a budget", "Task Analysis"),
209
+ ("Independent Living", "self-advocacy statements", "BST"),
210
+ ("Vocational", "clocking in at start of shift", "Task Analysis"),
211
+ ("Vocational", "completing assigned tasks in order", "Task Analysis"),
212
+ ("Home Skills", "doing laundry", "Task Analysis"),
213
+ ("Home Skills", "simple meal preparation", "Task Analysis"),
214
+ ("Community", "using public transportation", "Task Analysis"),
215
+ ("Mand", "requesting assistance using AAC device", "NET"),
216
+ ("Mand", "requesting a break during extended tasks", "NET"),
217
+ ("Mand", "indicating 'all done' at task completion", "NET"),
218
+ ("Tolerating Denied Access", "wait calmly when a preferred activity is denied", "FCT"),
219
+ ],
220
+ }
221
+
222
+
223
+ def sample_programs(learner_profile_id: str, n: int, rng: random.Random) -> list:
224
+ """Sample a realistic set of n acceleration programs for the learner profile."""
225
+ category_map = {
226
+ "early": "early_language",
227
+ "school_age": "school_age",
228
+ "adolescent": "adolescent",
229
+ "adult": "adult",
230
+ }
231
+ primary = category_map.get(learner_profile_id, "school_age")
232
+ pool = list(SKILL_POOL_BY_CATEGORY[primary])
233
+ # Mix in one program from an adjacent category for realism
234
+ adjacent = {"early": "school_age", "school_age": "early_language", "adolescent": "school_age", "adult": "adolescent"}
235
+ pool += SKILL_POOL_BY_CATEGORY[adjacent.get(learner_profile_id, "school_age")][:3]
236
+ rng.shuffle(pool)
237
+ return pool[:n]
238
+
239
+
240
+ # Session log rendering
241
+
242
+
243
+ def _render_measurements_for_program(
244
+ skill_domain: str, accuracy: float, rng: random.Random
245
+ ) -> str:
246
+ """Return a one-line measurement string for one program in one session."""
247
+ trials = rng.randint(8, 15)
248
+ correct = round(accuracy * trials)
249
+ fp = rng.randint(0, max(0, trials // 3))
250
+ pp = rng.randint(0, max(0, trials // 3))
251
+ g = rng.randint(0, max(0, trials // 3))
252
+ indep = max(0, trials - fp - pp - g)
253
+ prompts = f"FP×{fp}, PP×{pp}, G×{g}, I×{indep}"
254
+ latency = 1.5 + (1.0 - accuracy) * 4.5 + rng.uniform(-0.3, 0.3)
255
+ return f"{correct}/{trials} correct ({round(accuracy*100)}%); latency {latency:.1f}s; prompts {prompts}"
256
+
257
+
258
+ def _render_behavior_measurement(behavior_id: str, freq: int, rng: random.Random) -> str:
259
+ """Return a measurement string for one target behavior in one session."""
260
+ if behavior_id in {"tantrum"}:
261
+ if freq == 0:
262
+ return f"freq 0"
263
+ duration_total = freq * rng.randint(2, 5)
264
+ return f"freq {freq}, duration {duration_total}m total"
265
+ if behavior_id in {"motor_stereotypy", "vocal_stereotypy", "mouthing"}:
266
+ pir_pct = min(60, freq * rng.randint(3, 5) + rng.randint(0, 10))
267
+ return f"freq {freq}; PIR {pir_pct}%"
268
+ if behavior_id == "pica":
269
+ # Pica is measured as attempts (total) and successful ingestions (subset).
270
+ # Most pica events are caught by staff before ingestion — successful ≤ attempts.
271
+ if freq == 0:
272
+ return "attempts 0"
273
+ attempts = freq
274
+ # Successful ingestion is stochastic but bounded; most are intercepted.
275
+ successful = 0
276
+ for _ in range(attempts):
277
+ if rng.random() < 0.25:
278
+ successful += 1
279
+ unsuccessful = attempts - successful
280
+ return (
281
+ f"attempts {attempts} ({unsuccessful} unsuccessful — staff retrieved item before ingestion; "
282
+ f"{successful} successful — item ingested)"
283
+ )
284
+ if behavior_id == "fecal_smearing":
285
+ # Scatolia is measured as attempts (reaching for feces) and completed smearing (subset).
286
+ # Staff often intercept before smearing completes — completed ≤ attempts.
287
+ if freq == 0:
288
+ return "attempts 0"
289
+ attempts = freq
290
+ completed = 0
291
+ for _ in range(attempts):
292
+ if rng.random() < 0.35:
293
+ completed += 1
294
+ intercepted = attempts - completed
295
+ return (
296
+ f"attempts {attempts} ({intercepted} intercepted — staff redirected before smearing; "
297
+ f"{completed} completed — feces transferred to skin, clothing, or surface)"
298
+ )
299
+ if behavior_id == "toileting_accident":
300
+ # Toileting data tracks both successful voids (in toilet) and accidents.
301
+ # BM accidents scale with overall accident load but stay physiologically
302
+ # bounded — typical session window is 0–1 BMs; some learners with GI
303
+ # clustering (IBS, diet, medication-related) can reach 2.
304
+ if freq == 0:
305
+ bm_accidents = 0
306
+ elif freq == 1:
307
+ bm_accidents = 1 if rng.random() < 0.25 else 0
308
+ elif freq <= 4:
309
+ bm_accidents = rng.choices([0, 1], weights=[0.65, 0.35])[0]
310
+ else:
311
+ bm_accidents = rng.choices([0, 1, 2], weights=[0.50, 0.35, 0.15])[0]
312
+ urine_accidents = max(0, freq - bm_accidents)
313
+
314
+ # Successful voids scale inversely with accident load — a session full
315
+ # of accidents represents missed toileting opportunities.
316
+ if freq == 0:
317
+ urine_success = rng.randint(3, 6)
318
+ elif freq <= 2:
319
+ urine_success = rng.randint(2, 5)
320
+ elif freq <= 4:
321
+ urine_success = rng.randint(1, 3)
322
+ else:
323
+ urine_success = rng.randint(0, 2)
324
+
325
+ # BM successes: typically 0 per session window, sometimes 1. Learners
326
+ # with BM accidents can still have an earlier successful BM in the toilet
327
+ # (e.g., scheduled morning sit worked, later opportunities missed).
328
+ if bm_accidents > 0:
329
+ bm_success = 1 if rng.random() < 0.15 else 0
330
+ else:
331
+ bm_success = rng.choices([0, 1], weights=[0.75, 0.25])[0]
332
+
333
+ return (
334
+ f"urine: {urine_success} in-toilet / {urine_accidents} accidents; "
335
+ f"BM: {bm_success} in-toilet / {bm_accidents} accidents"
336
+ )
337
+ if behavior_id in {"elopement"}:
338
+ return f"freq {freq}"
339
+ return f"freq {freq}"
340
+
341
+
342
+ ABC_TEMPLATES = {
343
+ "escape": [
344
+ ("worksheet task presented", "refused + pushed materials away", "task removed for 2 min"),
345
+ ("demand to complete math problem", "threw materials and vocalized 'no'", "staff removed demand, redirected"),
346
+ ("transition to non-preferred activity", "dropped to floor, cried", "transition delayed, preferred item offered"),
347
+ ("asked to finish dressing task", "kicked at materials", "task paused for 3 min"),
348
+ ],
349
+ "attention": [
350
+ ("adult attention diverted to another learner", "vocalized loudly + approached staff", "staff turned to address behavior"),
351
+ ("staff engaged in paperwork nearby", "yelled name + tapped staff", "staff attended and redirected"),
352
+ ],
353
+ "tangible": [
354
+ ("preferred iPad removed at end of break", "screamed + reached for device", "staff redirected to alternative"),
355
+ ("peer took preferred toy", "grabbed toy back + cried", "staff mediated and returned item"),
356
+ ],
357
+ "automatic": [
358
+ ("quiet unstructured moment between trials", "engaged in hand-flapping", "no social consequence; self-terminated after 30s"),
359
+ ("waiting in line transition", "mouthed own hand", "staff redirected to task materials"),
360
+ ],
361
+ }
362
+
363
+
364
+ # Behavior-specific ABC overrides — used in preference to the function-keyed
365
+ # templates above when the behavior has distinctive antecedents / topography.
366
+ BEHAVIOR_ABC_TEMPLATES = {
367
+ "fecal_smearing": [
368
+ ("quiet moment in bathroom after bowel movement", "reached into diaper, retrieved feces", "staff redirected hands and initiated clean-up"),
369
+ ("unstructured downtime, staff out of direct line of sight", "reached behind waistband of pants", "staff intercepted and guided to bathroom"),
370
+ ("post-toileting, before hygiene steps completed", "transferred feces to arm and wall surface", "staff blocked further smearing, cleaned skin and surface"),
371
+ ("lying down for rest period", "reached into pull-up, brought hand to face", "staff intercepted before oral contact; hygiene routine initiated"),
372
+ ],
373
+ "toileting_accident": [
374
+ ("during demand sequence at the work table", "voided (urine) without signaling need", "staff paused programming, changed clothing, resumed after 10 min"),
375
+ ("transition from preferred activity to non-preferred task", "voided (urine) en route to the work area", "staff redirected to bathroom, changed clothing"),
376
+ ("end of 90-minute block without scheduled toileting", "soiled (bowel) at the work table", "staff initiated clean-up and hygiene routine; program paused"),
377
+ ("during group activity without requesting bathroom", "voided (urine) in seat", "staff removed learner from group, changed clothing, re-entered after clean-up"),
378
+ ],
379
+ }
380
+
381
+
382
+ def _render_abc_entry(behavior_id: str, behavior_name: str, function: str, rng: random.Random) -> str:
383
+ templates = BEHAVIOR_ABC_TEMPLATES.get(behavior_id) or ABC_TEMPLATES.get(function) or ABC_TEMPLATES["escape"]
384
+ a, b, c = rng.choice(templates)
385
+ return f"ABC ({behavior_name.lower()}): A = {a}; B = {b}; C = {c}"
386
+
387
+
388
+ def render_session_log(
389
+ *,
390
+ synthetic_id: int,
391
+ learner_profile: dict,
392
+ curricula_line: str,
393
+ programs: list,
394
+ accuracies_per_program: list,
395
+ behaviors: list,
396
+ behavior_frequencies_per_behavior: list,
397
+ behavior_functions: list,
398
+ n_sessions: int,
399
+ start_date: date,
400
+ duration_per_session: int,
401
+ ioa_session_idx: int | None,
402
+ ioa_agreement: float | None,
403
+ include_abc: bool,
404
+ abc_sessions: list,
405
+ behavioral_indicators_cluster: list,
406
+ rng: random.Random,
407
+ ) -> str:
408
+ """Render the full multi-session log as text."""
409
+ lines = []
410
+ lines.append("LEARNER PROFILE")
411
+ lines.append(f"Synthetic ID: SYN-{synthetic_id}")
412
+ profile_text = f"{learner_profile['name']}"
413
+ if "chronological_age" in learner_profile:
414
+ profile_text += f" ({learner_profile['chronological_age']})"
415
+ elif "developmental_age" in learner_profile:
416
+ profile_text += f" (dev age {learner_profile['developmental_age']})"
417
+ lines.append(f"Profile: {profile_text}")
418
+ lines.append(f"Curricula: {curricula_line}")
419
+ end_date = start_date + timedelta(days=n_sessions * 2)
420
+ lines.append(f"Date range: Sessions 1–{n_sessions} across {(end_date - start_date).days} days ({start_date.isoformat()} to {end_date.isoformat()})")
421
+ lines.append("")
422
+
423
+ lines.append("ACCELERATION PROGRAMS")
424
+ for i, (domain, skill, method) in enumerate(programs, 1):
425
+ lines.append(f"{i}. {skill} ({domain})")
426
+ lines.append(f" Method: {method}")
427
+ lines.append("")
428
+
429
+ if behaviors:
430
+ lines.append("DECELERATION TARGETS")
431
+ for i, (beh, fn) in enumerate(zip(behaviors, behavior_functions), 1):
432
+ lines.append(f"{i}. {beh['name']} — function hypothesized: {fn}")
433
+ lines.append("")
434
+
435
+ lines.append("SESSION DATA")
436
+ for s_idx in range(n_sessions):
437
+ session_num = s_idx + 1
438
+ d = (start_date + timedelta(days=s_idx * 2)).isoformat()
439
+ if ioa_session_idx == s_idx:
440
+ lines.append(f"")
441
+ lines.append(f"Session {session_num} — {d} — IOA SESSION — 2 observers")
442
+ for i, (domain, skill, method) in enumerate(programs):
443
+ acc = accuracies_per_program[i][s_idx]
444
+ lines.append(f" {skill}: {round(acc*100)}% (primary); IOA {round(ioa_agreement*100)}% agreement")
445
+ for i, beh in enumerate(behaviors):
446
+ freq = behavior_frequencies_per_behavior[i][s_idx]
447
+ ms = _render_behavior_measurement(beh["id"], freq, rng)
448
+ lines.append(f" {beh['name']}: {ms}; IOA {round(ioa_agreement*100)}% agreement")
449
+ else:
450
+ lines.append(f"")
451
+ lines.append(f"Session {session_num} — {d} — {duration_per_session} min — 1 observer")
452
+ for i, (domain, skill, method) in enumerate(programs):
453
+ acc = accuracies_per_program[i][s_idx]
454
+ ms = _render_measurements_for_program(domain, acc, rng)
455
+ lines.append(f" {skill}: {ms}")
456
+ for i, beh in enumerate(behaviors):
457
+ freq = behavior_frequencies_per_behavior[i][s_idx]
458
+ ms = _render_behavior_measurement(beh["id"], freq, rng)
459
+ lines.append(f" {beh['name']}: {ms}")
460
+ # ABC entry for this session?
461
+ if include_abc and s_idx in abc_sessions and behaviors:
462
+ beh_idx = rng.randrange(len(behaviors))
463
+ fn = behavior_functions[beh_idx]
464
+ if behavior_frequencies_per_behavior[beh_idx][s_idx] > 0:
465
+ beh = behaviors[beh_idx]
466
+ lines.append(f" {_render_abc_entry(beh['id'], beh['name'], fn, rng)}")
467
+
468
+ if behavioral_indicators_cluster:
469
+ lines.append("")
470
+ lines.append("BEHAVIORAL OBSERVATIONS (across sessions)")
471
+ for ind in behavioral_indicators_cluster:
472
+ lines.append(f"- {ind}")
473
+
474
+ return "\n".join(lines)
475
+
476
+
477
+ # Interpretation section assembly
478
+
479
+
480
+ def assemble_clinical_concerns(
481
+ pattern_id: str,
482
+ pattern: dict,
483
+ accuracies_first_last: tuple,
484
+ behaviors: list,
485
+ behavior_frequencies_per_behavior: list,
486
+ n_sessions: int,
487
+ ) -> str:
488
+ """Produce the clinical-concerns bullets."""
489
+ first, last = accuracies_first_last
490
+ bullets = []
491
+ if pattern_id == "mastery_progression":
492
+ bullets.append(f"- Steady improvement: accuracy rose from {round(first*100)}% to {round(last*100)}% across {n_sessions} sessions.")
493
+ bullets.append("- No clinical concerns at this time; mastery criteria should be reachable within a few additional sessions.")
494
+ elif pattern_id == "regression":
495
+ bullets.append(f"- **Regression identified**: accuracy has declined from {round(first*100)}% to {round(last*100)}% across {n_sessions} sessions.")
496
+ if behaviors:
497
+ bullets.append("- Accompanying behavior data suggests the decline may be function-related.")
498
+ elif pattern_id == "plateau":
499
+ bullets.append(f"- **Plateau**: accuracy has remained near {round(((first+last)/2)*100)}% for {n_sessions} sessions without meaningful improvement.")
500
+ bullets.append("- Performance is below mastery criteria; current teaching procedure may not be effective for this learner.")
501
+ elif pattern_id == "frustration_pattern":
502
+ bullets.append(f"- **Frustration pattern**: declining accuracy ({round(first*100)}% -> {round(last*100)}%) with escape-indicator behaviors.")
503
+ bullets.append("- Risk of learned helplessness and escape-maintained behavior if not addressed.")
504
+ elif pattern_id == "variable_performance":
505
+ avg = (first + last) / 2
506
+ bullets.append(f"- **Variable performance**: accuracy fluctuating around {round(avg*100)}% with no clear trend.")
507
+ bullets.append("- Inconsistency may indicate stimulus-control issues or motivational variability across sessions.")
508
+ elif pattern_id == "prompt_dependency":
509
+ bullets.append(f"- **Prompt dependency**: high accuracy maintained ({round(first*100)}%–{round(last*100)}%) but primarily at prompted levels.")
510
+ bullets.append("- Independent responding remains low despite extended training.")
511
+ elif pattern_id == "rapid_acquisition":
512
+ bullets.append(f"- **Rapid acquisition**: accelerated mastery from {round(first*100)}% to {round(last*100)}% across {n_sessions} sessions.")
513
+ bullets.append("- Acquisition has exceeded expected timeline.")
514
+ elif pattern_id == "generalization_failure":
515
+ bullets.append("- **Generalization failure**: strong performance in training context but failure to generalize to novel conditions.")
516
+ bullets.append("- Skill may be under restricted stimulus control.")
517
+ elif pattern_id == "extinction_burst":
518
+ bullets.append("- **Extinction burst** observed: temporary increase in problem behavior consistent with an extinction procedure.")
519
+ bullets.append("- This is an expected phase of behavior change and typically indicates the intervention is working.")
520
+ elif pattern_id == "skill_loss_after_break":
521
+ bullets.append(f"- **Skill loss following break**: performance dropped to {round(first*100)}% at return, recovering to {round(last*100)}%.")
522
+ bullets.append("- Maintenance schedule should be reviewed and strengthened.")
523
+ elif pattern_id == "motivating_operation_shift":
524
+ bullets.append("- **Motivating-operation shift**: mid-log dip in responding followed by recovery suggests reinforcer-value change.")
525
+ bullets.append("- Possible satiation with current reinforcer; preference reassessment indicated.")
526
+ elif pattern_id == "setting_event_trigger":
527
+ bullets.append("- **Setting event pattern**: observable change in performance correlated with an external event (illness, sleep, schedule).")
528
+ bullets.append("- Correlation between setting event and performance should be tracked explicitly.")
529
+ return "\n".join(bullets)
530
+
531
+
532
+ def assemble_function_hypothesis_section(
533
+ behaviors: list, behavior_functions: list, abc_events_summary: str
534
+ ) -> str:
535
+ """Produce the function hypothesis section, or empty string if no behaviors."""
536
+ if not behaviors:
537
+ return ""
538
+ lines = ["", "## Behavior Function Hypothesis"]
539
+ for beh, fn in zip(behaviors, behavior_functions):
540
+ lines.append(f"{beh['name']}: {fn}")
541
+ if fn != "unknown":
542
+ ev_bits = []
543
+ if abc_events_summary:
544
+ ev_bits.append(f"ABC events in the log support the {fn} function")
545
+ # Function-specific evidence without showing the operational definition
546
+ func_evidence = {
547
+ "escape": "behavior occurs in the context of demand presentation and terminates contingent on task removal",
548
+ "attention": "behavior occurs during low-attention periods and results in social attention contingently",
549
+ "tangible": "behavior occurs when preferred items are restricted and is followed by access to the item",
550
+ "automatic": "behavior persists across contexts without clear social mediation",
551
+ }
552
+ if fn in func_evidence:
553
+ ev_bits.append(func_evidence[fn])
554
+ lines.append(f" Evidence: " + "; ".join(ev_bits) + ".")
555
+ else:
556
+ lines.append(" Evidence: insufficient data to disambiguate functions; recommend FBA / preference assessment before programming changes.")
557
+ lines.append("")
558
+ return "\n".join(lines)
559
+
560
+
561
+ def assemble_replacement_section(behaviors: list, behavior_functions: list, recs_pool: list) -> str:
562
+ """Produce the replacement behavior (FCT) section — only if behaviors present."""
563
+ if not behaviors or not recs_pool:
564
+ return ""
565
+ lines = ["", "### Replacement behavior (FCT)"]
566
+ for r in recs_pool[:3]:
567
+ lines.append(f"- {r}")
568
+ return "\n".join(lines) + "\n"
569
+
570
+
571
+ def assemble_crisis_section(recs_pool: list, escalation_level: int) -> str:
572
+ """Produce the crisis plan section — only if escalation >= 3 and pool has items."""
573
+ if escalation_level < 3 or not recs_pool:
574
+ return ""
575
+ lines = ["", "### Crisis plan"]
576
+ for r in recs_pool[:3]:
577
+ lines.append(f"- {r}")
578
+ return "\n".join(lines) + "\n"
579
+
580
+
581
+ def assemble_rationale(
582
+ accuracies_first_last: tuple,
583
+ mean_accuracy: float,
584
+ n_sessions: int,
585
+ behaviors: list,
586
+ behavior_frequencies_per_behavior: list,
587
+ ioa_present: bool,
588
+ ioa_agreement: float | None,
589
+ ) -> str:
590
+ """Produce the data-supported rationale bullets."""
591
+ first, last = accuracies_first_last
592
+ bullets = []
593
+ bullets.append(f"- Mean accuracy across {n_sessions} sessions: {round(mean_accuracy*100, 1)}%.")
594
+ direction = "upward" if last > first else "downward" if last < first else "flat"
595
+ bullets.append(f"- Trend direction: {direction} (first session: {round(first*100)}%, last session: {round(last*100)}%).")
596
+ if behaviors:
597
+ for beh, freqs in zip(behaviors, behavior_frequencies_per_behavior):
598
+ mean_f = sum(freqs) / len(freqs)
599
+ first_third = sum(freqs[:max(1, len(freqs)//3)]) / max(1, len(freqs)//3)
600
+ last_third = sum(freqs[-max(1, len(freqs)//3):]) / max(1, len(freqs)//3)
601
+ bullets.append(f"- {beh['name']}: mean {mean_f:.1f}/session (early-window mean {first_third:.1f} -> late-window mean {last_third:.1f}).")
602
+ if ioa_present and ioa_agreement is not None:
603
+ bullets.append(f"- IOA data: {round(ioa_agreement*100)}% agreement on IOA session — {'adequate' if ioa_agreement >= 0.80 else 'below threshold'}.")
604
+ else:
605
+ bullets.append("- No IOA session in this log; reliability of measurements cannot be independently verified.")
606
+ return "\n".join(bullets)
607
+
608
+
609
+ # Escalation + confidence inference
610
+
611
+
612
+ def infer_escalation(
613
+ pattern: dict,
614
+ behaviors: list,
615
+ behavior_frequencies_per_behavior: list,
616
+ rules: dict,
617
+ ) -> int:
618
+ """Compute escalation level from pattern + behavior severity."""
619
+ level = int(pattern["default_escalation"])
620
+ severe_ids = set(rules.get("severe_behavior_ids", []))
621
+ if any(b["id"] in severe_ids for b in behaviors):
622
+ level = min(4, level + rules.get("severe_behavior_escalation_bump", 1))
623
+ # Safety-immediate threshold
624
+ safety_threshold_ids = set(rules.get("safety_immediate_threshold_behaviors", []))
625
+ threshold_rate = rules.get("safety_immediate_sib_rate_per_session", 8)
626
+ for beh, freqs in zip(behaviors, behavior_frequencies_per_behavior):
627
+ if beh["id"] in safety_threshold_ids:
628
+ if len(freqs) >= 2 and all(f >= threshold_rate for f in freqs[-2:]):
629
+ level = 4
630
+ return level
631
+
632
+
633
+ def infer_confidence(
634
+ n_sessions: int,
635
+ ioa_present: bool,
636
+ accuracies: list,
637
+ rules: dict,
638
+ ) -> str:
639
+ """Compute confidence level from data-quality features.
640
+
641
+ Rules:
642
+ - low: very short log, OR short log with no IOA and non-trivial variance
643
+ - moderate: default when criteria for high are not met
644
+ - high: long log with IOA evidence and low variance
645
+ """
646
+ mean = sum(accuracies) / len(accuracies)
647
+ sd = (sum((a - mean) ** 2 for a in accuracies) / len(accuracies)) ** 0.5
648
+
649
+ # Explicit low triggers
650
+ if n_sessions < rules["min_sessions_for_moderate"]:
651
+ return "low"
652
+ if n_sessions < 7 and not ioa_present and sd > 0.15:
653
+ return "low"
654
+ if sd > 0.35:
655
+ return "low"
656
+
657
+ # High requires long log + IOA + modest variance
658
+ if (
659
+ n_sessions >= rules["min_sessions_for_high"]
660
+ and (not rules["ioa_required_for_high"] or ioa_present)
661
+ and sd <= rules["variance_threshold_low"]
662
+ ):
663
+ return "high"
664
+
665
+ return "moderate"
666
+
667
+
668
+ # Main generator class
669
+
670
+
671
+ class SessionInterpretationGenerator:
672
+ def __init__(self, config_dir: Path):
673
+ self.config_dir = Path(config_dir)
674
+ self.shared = load_shared(self.config_dir)
675
+ self.area = load_area(self.config_dir, AREA)
676
+ self.template = self.area["template"]
677
+ self.taxonomy = self.area["taxonomy"]
678
+ self.compat = self.area["compatibility"]
679
+ self.trajectory_rules = load_yaml(self.config_dir / AREA / "trajectory_rules.yaml")
680
+ self.recommendations = load_yaml(self.config_dir / AREA / "recommendations.yaml")
681
+
682
+ def _pattern_by_id(self, pid: str) -> dict:
683
+ return next(p for p in self.taxonomy["patterns"] if p["id"] == pid)
684
+
685
+ def _behavior_by_id(self, bid: str) -> dict:
686
+ return next(b for b in self.taxonomy["target_behaviors"] if b["id"] == bid)
687
+
688
+ def _escalation_label(self, level: int) -> str:
689
+ return next(e["label"] for e in self.taxonomy["escalation_levels"] if e["id"] == level)
690
+
691
+ def render_example(self, rng: random.Random) -> dict:
692
+ # Sample hidden pattern
693
+ pattern = rng.choice(self.taxonomy["patterns"])
694
+ pattern_id = pattern["id"]
695
+
696
+ # Log length
697
+ lo, hi = self.compat["log_length_by_pattern"][pattern_id]
698
+ n_sessions = rng.randint(lo, hi)
699
+
700
+ # Learner profile (any — patterns are profile-agnostic)
701
+ profile = rng.choice(self.shared["learner_profiles"]["profiles"])
702
+
703
+ # Acceleration programs
704
+ n_programs = rng.randint(*self.compat["programs_per_log_range"])
705
+ programs = sample_programs(profile["id"], n_programs, rng)
706
+ curricula_names = sorted(set(p[0] for p in programs))
707
+ curricula_line = " + ".join(curricula_names)
708
+
709
+ # Trajectory per program (all programs follow the pattern's trajectory loosely)
710
+ rule = self.trajectory_rules["rules"][pattern_id]
711
+ accuracies_per_program = [
712
+ generate_accuracy_trajectory(pattern_id, rule, n_sessions, rng)
713
+ for _ in programs
714
+ ]
715
+
716
+ # Target behaviors (if any)
717
+ n_beh_lo, n_beh_hi = self.compat["pattern_behavior_count_ranges"][pattern_id]
718
+ n_behaviors = rng.randint(n_beh_lo, n_beh_hi)
719
+ behaviors = []
720
+ behavior_functions = []
721
+ if n_behaviors > 0:
722
+ function_bias = self.compat["pattern_function_bias"].get(pattern_id, [])
723
+ candidate_behs = [
724
+ b for b in self.taxonomy["target_behaviors"]
725
+ if any(f in b["plausible_functions"] for f in function_bias) or not function_bias
726
+ ]
727
+ behaviors = rng.sample(candidate_behs, min(n_behaviors, len(candidate_behs)))
728
+ for beh in behaviors:
729
+ if function_bias:
730
+ valid = [f for f in function_bias if f in beh["plausible_functions"]]
731
+ behavior_functions.append(rng.choice(valid) if valid else rng.choice(beh["plausible_functions"]))
732
+ else:
733
+ behavior_functions.append(rng.choice(beh["plausible_functions"]))
734
+
735
+ # Behavior frequencies per session
736
+ behavior_freqs = []
737
+ if behaviors:
738
+ traj_name = self.trajectory_rules["rules"][pattern_id]["behavior_trajectory"]
739
+ for _ in behaviors:
740
+ freqs = generate_behavior_frequencies(
741
+ traj_name, self.trajectory_rules["behavior_trajectory_library"],
742
+ n_sessions, rule, rng,
743
+ )
744
+ behavior_freqs.append(freqs)
745
+
746
+ # IOA decision
747
+ include_ioa = rng.random() < self.compat["ioa_inclusion_probability"]
748
+ ioa_session_idx = None
749
+ ioa_agreement = None
750
+ if include_ioa and n_sessions >= 4:
751
+ ioa_session_idx = rng.randint(n_sessions // 3, 2 * n_sessions // 3)
752
+ if rng.random() < self.compat["ioa_low_probability"]:
753
+ ioa_agreement = rng.uniform(*self.compat["ioa_low_range"])
754
+ else:
755
+ ioa_agreement = rng.uniform(*self.compat["ioa_agreement_range"])
756
+
757
+ # ABC decision
758
+ include_abc = bool(behaviors) and rng.random() < self.compat["abc_inclusion_probability"]
759
+ abc_sessions = []
760
+ if include_abc:
761
+ candidate_sessions = [i for i in range(n_sessions) if any(bf[i] > 0 for bf in behavior_freqs)]
762
+ n_abc = min(rng.randint(1, 3), len(candidate_sessions))
763
+ abc_sessions = rng.sample(candidate_sessions, n_abc) if candidate_sessions else []
764
+
765
+ # Behavioral indicators
766
+ cluster = self.compat["pattern_behavioral_indicator_cluster"].get(pattern_id)
767
+ indicators = []
768
+ if cluster:
769
+ pool = self.taxonomy["behavioral_indicators"][cluster]
770
+ indicators = rng.sample(pool, min(rng.randint(2, 4), len(pool)))
771
+
772
+ # Log dates
773
+ start_date = date(2026, rng.randint(1, 10), rng.randint(1, 28))
774
+ duration_per_session = rng.randint(*self.compat["session_duration_minutes_range"])
775
+
776
+ # Synthetic ID
777
+ synthetic_id = rng.randint(1000, 9999)
778
+
779
+ # Render the user's session log
780
+ session_log = render_session_log(
781
+ synthetic_id=synthetic_id,
782
+ learner_profile=profile,
783
+ curricula_line=curricula_line,
784
+ programs=programs,
785
+ accuracies_per_program=accuracies_per_program,
786
+ behaviors=behaviors,
787
+ behavior_frequencies_per_behavior=behavior_freqs,
788
+ behavior_functions=behavior_functions,
789
+ n_sessions=n_sessions,
790
+ start_date=start_date,
791
+ duration_per_session=duration_per_session,
792
+ ioa_session_idx=ioa_session_idx,
793
+ ioa_agreement=ioa_agreement,
794
+ include_abc=include_abc,
795
+ abc_sessions=abc_sessions,
796
+ behavioral_indicators_cluster=indicators,
797
+ rng=rng,
798
+ )
799
+
800
+ # Compute gold labels
801
+ escalation_level = infer_escalation(
802
+ pattern, behaviors, behavior_freqs, self.compat["escalation_rules"]
803
+ )
804
+ # Flatten accuracies across all programs for variance check
805
+ flat_accs = [v for traj in accuracies_per_program for v in traj]
806
+ confidence_level = infer_confidence(
807
+ n_sessions, ioa_session_idx is not None, flat_accs, self.compat["confidence_rules"]
808
+ )
809
+
810
+ # Primary-program accuracies for concerns/rationale (take first program)
811
+ primary_accs = accuracies_per_program[0]
812
+ mean_primary = sum(primary_accs) / len(primary_accs)
813
+
814
+ # Assemble the assistant message sections
815
+ clinical_concerns_bullets = assemble_clinical_concerns(
816
+ pattern_id, pattern, (primary_accs[0], primary_accs[-1]),
817
+ behaviors, behavior_freqs, n_sessions,
818
+ )
819
+
820
+ pattern_explanations = {
821
+ "mastery_progression": "Accuracy is ascending cleanly across sessions with no interfering behaviors, consistent with a mastery-progression trajectory.",
822
+ "regression": "Previously-established responding has declined across recent sessions, warranting review of environmental and reinforcement variables.",
823
+ "plateau": "Responding has stabilized below mastery criterion without meaningful movement, indicating the current procedure is limiting further acquisition.",
824
+ "frustration_pattern": "Declining accuracy co-occurs with escape-indicator behaviors, consistent with frustration and escape-maintained responding to demand.",
825
+ "variable_performance": "High session-to-session variability with no clear trend suggests environmental or motivational inconsistency rather than a single programmatic issue.",
826
+ "prompt_dependency": "Accuracy stays high only at prompted levels; independent responding remains low despite extended training.",
827
+ "rapid_acquisition": "Accuracy ascended steeply beyond expected timeline for this learner's profile.",
828
+ "generalization_failure": "Alternation between strong training performance and poor novel-condition performance indicates restricted stimulus control.",
829
+ "extinction_burst": "Mid-log spike in problem-behavior frequency followed by recovery is consistent with a planned extinction procedure.",
830
+ "skill_loss_after_break": "Sharp drop at the start of the log followed by gradual recovery indicates skill loss following an absence.",
831
+ "motivating_operation_shift": "A mid-log dip followed by recovery maps onto a motivating-operation change rather than a skill issue.",
832
+ "setting_event_trigger": "A discontinuous performance change at a specific session correlates with an external setting event.",
833
+ }
834
+
835
+ function_section = assemble_function_hypothesis_section(
836
+ behaviors, behavior_functions,
837
+ "ABC events present" if include_abc and abc_sessions else ""
838
+ )
839
+
840
+ pattern_recs = self.recommendations["patterns"][pattern_id]
841
+ antecedent_pool = pattern_recs["antecedent"] or ["Continue monitoring; reassess in 2 weeks."]
842
+ consequence_pool = pattern_recs["consequence"] or ["Maintain current consequences."]
843
+ ant_bullets = "\n".join(f"- {r}" for r in antecedent_pool[:4])
844
+ con_bullets = "\n".join(f"- {r}" for r in consequence_pool[:3])
845
+
846
+ rep_section = assemble_replacement_section(
847
+ behaviors, behavior_functions, pattern_recs.get("replacement", [])
848
+ )
849
+ crisis_section = assemble_crisis_section(
850
+ pattern_recs.get("crisis", []), escalation_level
851
+ )
852
+
853
+ escalation_justifications = {
854
+ 1: "Current programming is effective; no change needed.",
855
+ 2: "Programming adjustments indicated at next session; no immediate safety concern.",
856
+ 3: "Combination of performance decline and/or behavior data warrants supervising BCBA review within 24–48 hours.",
857
+ 4: "Safety-critical signals require immediate cessation of current program and direct BCBA contact; consider crisis-plan activation.",
858
+ }
859
+
860
+ confidence_justifications = {
861
+ "high": f"Pattern is well-supported: {n_sessions} sessions of data, low variance, and IOA evidence confirm reliability.",
862
+ "moderate": f"Pattern is supported but alternatives cannot be fully excluded: {n_sessions} sessions of data" + (" with IOA evidence." if ioa_session_idx is not None else "; no IOA session included."),
863
+ "low": f"Insufficient data: only {n_sessions} sessions, high variance, and/or no IOA evidence. Recommend additional data collection before programming changes.",
864
+ }
865
+
866
+ rationale_bullets = assemble_rationale(
867
+ (primary_accs[0], primary_accs[-1]),
868
+ mean_primary,
869
+ n_sessions,
870
+ behaviors,
871
+ behavior_freqs,
872
+ ioa_session_idx is not None,
873
+ ioa_agreement,
874
+ )
875
+
876
+ # Fill template
877
+ user_variant = rng.choice(self.template["user_variants"])
878
+ user_content = user_variant.format(session_log=session_log)
879
+
880
+ assistant_content = self.template["assistant_template"].format(
881
+ clinical_concerns_bullets=clinical_concerns_bullets,
882
+ pattern_class=pattern_id,
883
+ pattern_explanation=pattern_explanations[pattern_id],
884
+ function_hypothesis_section=function_section,
885
+ antecedent_bullets=ant_bullets,
886
+ replacement_section=rep_section,
887
+ consequence_bullets=con_bullets,
888
+ crisis_section=crisis_section,
889
+ escalation_level=escalation_level,
890
+ escalation_label=self._escalation_label(escalation_level),
891
+ escalation_justification=escalation_justifications[escalation_level],
892
+ confidence_level=confidence_level,
893
+ confidence_justification=confidence_justifications[confidence_level],
894
+ rationale_bullets=rationale_bullets,
895
+ )
896
+
897
+ # Metadata
898
+ gold_labels = {
899
+ "pattern_class": pattern_id,
900
+ "behavior_functions": {b["name"]: f for b, f in zip(behaviors, behavior_functions)},
901
+ "escalation_level": escalation_level,
902
+ "confidence": confidence_level,
903
+ "crisis_plan_required": escalation_level >= 3,
904
+ }
905
+ provenance = {
906
+ "layer": 1,
907
+ "area": AREA,
908
+ "template_id": self.template["template_id"],
909
+ "taxonomy_cells": {
910
+ "hidden_pattern_id": pattern_id,
911
+ "learner_profile": profile["id"],
912
+ "n_sessions": n_sessions,
913
+ "n_programs": len(programs),
914
+ "n_behaviors": len(behaviors),
915
+ "behavior_ids": [b["id"] for b in behaviors],
916
+ "behavior_functions": list(behavior_functions),
917
+ "has_abc_data": include_abc and bool(abc_sessions),
918
+ "has_ioa_session": ioa_session_idx is not None,
919
+ },
920
+ "teacher_model": None,
921
+ "seed_tag": str(rng.getstate()[1][0] % 100000),
922
+ }
923
+
924
+ return make_example_envelope(
925
+ system_content=self.template["system_prompt"],
926
+ user_content=user_content,
927
+ assistant_content=assistant_content,
928
+ task_type=TASK_TYPE,
929
+ gold_labels=gold_labels,
930
+ provenance=provenance,
931
+ )
src/generators/aba_task_analysis.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Task Analysis / Chaining area generator.
2
+
3
+ Loads configs/task_analysis/ + configs/shared/ and produces chaining
4
+ teaching program examples for AFLS modules (Basic Living, Home, Community,
5
+ Vocational, Independent Living).
6
+ """
7
+
8
+ import random
9
+ from pathlib import Path
10
+
11
+ from .base import (
12
+ load_area,
13
+ load_shared,
14
+ load_yaml,
15
+ make_example_envelope,
16
+ )
17
+
18
+ AREA = "task_analysis"
19
+ TASK_TYPE = "teaching_program"
20
+
21
+
22
+ # Mastery state -> which shaping step the learner is currently at (as index range
23
+ # within the shaping_steps list). 0-indexed, end-exclusive.
24
+ TOLERATION_MASTERY_STEP_RANGES = {
25
+ "emerging": (0, 2),
26
+ "developing": (1, 3),
27
+ "approaching": (2, 4),
28
+ "near": (3, 5),
29
+ "mastered": (4, 6),
30
+ "generalization": (5, 7),
31
+ }
32
+
33
+
34
+ def current_shaping_step_guidance(mastery_state_id: str, shaping_steps: list) -> str:
35
+ """Describe where the learner is currently sitting in the shaping progression."""
36
+ n = len(shaping_steps)
37
+ lo, hi = TOLERATION_MASTERY_STEP_RANGES.get(mastery_state_id, (0, 2))
38
+ lo = max(0, min(lo, n - 1))
39
+ hi = max(lo + 1, min(hi, n))
40
+ idx = min(lo, n - 1)
41
+ return (
42
+ f"begin at shaping step {idx + 1} (\"{shaping_steps[idx]}\") and advance through "
43
+ f"step {hi} across sessions as tolerance stabilizes."
44
+ )
45
+
46
+
47
+ def render_shaping_steps(shaping_steps: list) -> str:
48
+ """Render numbered shaping progression."""
49
+ return "\n".join(f"{i}. {step}" for i, step in enumerate(shaping_steps, 1))
50
+
51
+
52
+ def current_prompt_guidance(mastery_state_id: str, prompt_strategy_id: str) -> str:
53
+ """Guidance for chaining prompt level at the current mastery state."""
54
+ if mastery_state_id in {"emerging", "developing"}:
55
+ if prompt_strategy_id == "graduated_guidance":
56
+ return "provide hand-over-hand guidance at each step; reduce physical pressure across sessions."
57
+ return "use full-physical or partial-physical prompts at each step; fade systematically."
58
+ if mastery_state_id == "approaching":
59
+ return "use partial-physical or gestural prompts only at steps where errors occur; allow independence elsewhere."
60
+ if mastery_state_id in {"near", "mastered", "generalization"}:
61
+ return "deliver the least intrusive prompt that prevents an error; expect independence on previously-mastered steps."
62
+ return "prompt level calibrated to current performance."
63
+
64
+
65
+ def render_steps(step_list: list, chain_type_id: str) -> str:
66
+ """Render numbered step list, with callout of the learner-taught step for backward chaining."""
67
+ lines = []
68
+ for i, step in enumerate(step_list, 1):
69
+ if chain_type_id == "backward" and i == len(step_list):
70
+ lines.append(f"{i}. {step} <- Learner-taught step (backward chaining starts here)")
71
+ elif chain_type_id == "forward" and i == 1:
72
+ lines.append(f"{i}. {step} <- Learner-taught step (forward chaining starts here)")
73
+ else:
74
+ lines.append(f"{i}. {step}")
75
+ return "\n".join(lines)
76
+
77
+
78
+ def render_criterion_text(criterion: dict, rng: random.Random) -> str:
79
+ """Fill any slots in the mastery-criterion text (e.g., fluency seconds)."""
80
+ text = criterion["text"]
81
+ if "{fluency_seconds}" in text:
82
+ text = text.replace("{fluency_seconds}", str(rng.choice([60, 90, 120, 180])))
83
+ return text
84
+
85
+
86
+ class TaskAnalysisGenerator:
87
+ def __init__(self, config_dir: Path):
88
+ self.config_dir = Path(config_dir)
89
+ self.shared = load_shared(self.config_dir)
90
+ self.area = load_area(self.config_dir, AREA)
91
+ self.template = self.area["template"] # chain-based (independence)
92
+ self.template_toleration = load_yaml(
93
+ self.config_dir / AREA / "template_toleration.yaml"
94
+ )
95
+ self.taxonomy = self.area["taxonomy"]
96
+ self.compat = self.area["compatibility"]
97
+
98
+ def sample_combination(self, rng: random.Random) -> dict:
99
+ # Weighted module selection
100
+ module_weights = self.compat["module_sampling_weights"]
101
+ modules = self.taxonomy["afls"]["modules"]
102
+ module_ids = [m["id"] for m in modules]
103
+ weights = [module_weights[mid] for mid in module_ids]
104
+ module = rng.choices(modules, weights=weights)[0]
105
+
106
+ # Sample a skill from the module
107
+ skill_entry = rng.choice(module["skills"])
108
+ skill_target = skill_entry["name"]
109
+ is_toleration = skill_entry.get("program_type") == "toleration"
110
+
111
+ # Learner profile — intersect module's allowed with the (shared) learner list
112
+ profiles = self.shared["learner_profiles"]["profiles"]
113
+ module_allowed = set(module["learner_profiles"])
114
+ profile = rng.choice([p for p in profiles if p["id"] in module_allowed])
115
+
116
+ mastery_state = rng.choice(self.shared["mastery_states"]["states"])
117
+
118
+ # Filter mastery criteria by program type (independence vs toleration)
119
+ program_type = "toleration" if is_toleration else "independence"
120
+ eligible_criteria = [
121
+ c for c in self.taxonomy["mastery_criteria"]
122
+ if c.get("applies_to", "independence") == program_type
123
+ ]
124
+ if not eligible_criteria:
125
+ eligible_criteria = self.taxonomy["mastery_criteria"]
126
+ mastery_criterion = rng.choice(eligible_criteria)
127
+
128
+ if is_toleration:
129
+ # Toleration programs: duration-based shaping, no chain / prompt / EC fields needed.
130
+ return {
131
+ "skill_target": skill_target,
132
+ "program_type": "toleration",
133
+ "shaping_steps": skill_entry["shaping_steps"],
134
+ "target_activity": skill_entry["target_activity"],
135
+ "end_goal_description": skill_entry["end_goal_description"],
136
+ "module_id": module["id"],
137
+ "module_name": module["name"],
138
+ "learner_profile": profile,
139
+ "mastery_state": mastery_state,
140
+ "mastery_criterion": mastery_criterion,
141
+ }
142
+
143
+ # Independence (chain-based) program — existing path
144
+ steps = skill_entry["steps"]
145
+ preferred_chain = skill_entry.get("chain_type_preferred", "total_task")
146
+
147
+ if rng.random() < self.compat["use_preferred_chain_type_probability"]:
148
+ chain_type = next(c for c in self.taxonomy["chain_types"] if c["id"] == preferred_chain)
149
+ else:
150
+ chain_type = rng.choice(self.taxonomy["chain_types"])
151
+
152
+ allowed_strategy_ids = set(self.compat["mastery_to_prompt_strategies"][mastery_state["id"]])
153
+ valid_strategies = [s for s in self.taxonomy["prompt_hierarchies"] if s["id"] in allowed_strategy_ids]
154
+ if not valid_strategies:
155
+ valid_strategies = self.taxonomy["prompt_hierarchies"]
156
+ prompt_strategy = rng.choice(valid_strategies)
157
+
158
+ allowed_schedule_ids = set(self.compat["mastery_to_reinforcement"][mastery_state["id"]])
159
+ valid_schedules = [s for s in self.taxonomy["reinforcement_schedules"] if s["id"] in allowed_schedule_ids]
160
+ if not valid_schedules:
161
+ valid_schedules = [s for s in self.taxonomy["reinforcement_schedules"] if s["id"] == "crf_per_step"]
162
+ reinforcement_schedule = rng.choice(valid_schedules)
163
+
164
+ error_correction = rng.choice(self.taxonomy["error_corrections"])
165
+
166
+ return {
167
+ "skill_target": skill_target,
168
+ "program_type": "independence",
169
+ "steps": steps,
170
+ "module_id": module["id"],
171
+ "module_name": module["name"],
172
+ "chain_type": chain_type,
173
+ "learner_profile": profile,
174
+ "mastery_state": mastery_state,
175
+ "prompt_strategy": prompt_strategy,
176
+ "reinforcement_schedule": reinforcement_schedule,
177
+ "error_correction": error_correction,
178
+ "mastery_criterion": mastery_criterion,
179
+ }
180
+
181
+ def compute_slots(self, combo: dict, rng: random.Random) -> dict:
182
+ mastery_state = combo["mastery_state"]
183
+ common = {
184
+ "skill_target": combo["skill_target"],
185
+ "curriculum_ref": f"AFLS {combo['module_name']}",
186
+ "learner_profile_name": combo["learner_profile"]["name"],
187
+ "mastery_state_name": mastery_state["name"],
188
+ "mastery_state_short": mastery_state.get("short", mastery_state["name"].lower()),
189
+ "mastery_criterion_text": render_criterion_text(combo["mastery_criterion"], rng),
190
+ "ioa_frequency": rng.choice([3, 4, 5]),
191
+ "n_generalization_settings": rng.choice([2, 3]),
192
+ "n_generalization_therapists": rng.choice([2, 3]),
193
+ }
194
+
195
+ if combo["program_type"] == "toleration":
196
+ return {
197
+ **common,
198
+ "target_activity": combo["target_activity"],
199
+ "end_goal_description": combo["end_goal_description"],
200
+ "shaping_step_list": render_shaping_steps(combo["shaping_steps"]),
201
+ "current_step_guidance": current_shaping_step_guidance(
202
+ mastery_state["id"], combo["shaping_steps"]
203
+ ),
204
+ }
205
+
206
+ # Independence (chain-based)
207
+ ec_steps_clean = combo["error_correction"]["steps"].strip()
208
+ return {
209
+ **common,
210
+ "chain_type_name": combo["chain_type"]["name"],
211
+ "chain_type_description": combo["chain_type"]["description"],
212
+ "step_list": render_steps(combo["steps"], combo["chain_type"]["id"]),
213
+ "prompt_strategy_name": combo["prompt_strategy"]["name"],
214
+ "prompt_strategy_description": combo["prompt_strategy"]["description"],
215
+ "current_prompt_guidance": current_prompt_guidance(mastery_state["id"], combo["prompt_strategy"]["id"]),
216
+ "error_correction_name": combo["error_correction"]["name"],
217
+ "error_correction_steps": ec_steps_clean.rstrip("."),
218
+ "reinforcement_schedule_name": combo["reinforcement_schedule"]["name"],
219
+ "reinforcement_description": combo["reinforcement_schedule"]["description"].rstrip("."),
220
+ }
221
+
222
+ def render_example(self, rng: random.Random) -> dict:
223
+ combo = self.sample_combination(rng)
224
+ slots = self.compute_slots(combo, rng)
225
+
226
+ template = (
227
+ self.template_toleration
228
+ if combo["program_type"] == "toleration"
229
+ else self.template
230
+ )
231
+ user_variant = rng.choice(template["user_variants"])
232
+ user_content = user_variant.format(**slots)
233
+ assistant_content = template["assistant_template"].format(**slots)
234
+
235
+ gold_labels = {
236
+ "method": AREA,
237
+ "domain": f"AFLS.{combo['module_id']}",
238
+ "skill": combo["skill_target"],
239
+ "program_type": combo["program_type"],
240
+ "learner_profile": combo["learner_profile"]["id"],
241
+ "mastery_state": combo["mastery_state"]["id"],
242
+ }
243
+ if combo["program_type"] == "independence":
244
+ gold_labels["chain_type"] = combo["chain_type"]["id"]
245
+
246
+ taxonomy_cells = {
247
+ "skill_target": combo["skill_target"],
248
+ "module": combo["module_id"],
249
+ "mastery_criterion": combo["mastery_criterion"]["id"],
250
+ }
251
+ if combo["program_type"] == "independence":
252
+ taxonomy_cells.update({
253
+ "chain_type": combo["chain_type"]["id"],
254
+ "prompt_strategy": combo["prompt_strategy"]["id"],
255
+ "reinforcement_schedule": combo["reinforcement_schedule"]["id"],
256
+ "error_correction": combo["error_correction"]["id"],
257
+ })
258
+ else:
259
+ taxonomy_cells["shaping_n_steps"] = len(combo["shaping_steps"])
260
+
261
+ provenance = {
262
+ "layer": 1,
263
+ "area": AREA,
264
+ "template_id": template["template_id"],
265
+ "taxonomy_cells": taxonomy_cells,
266
+ "teacher_model": None,
267
+ "seed_tag": str(rng.getstate()[1][0] % 100000),
268
+ }
269
+
270
+ return make_example_envelope(
271
+ system_content=template["system_prompt"],
272
+ user_content=user_content,
273
+ assistant_content=assistant_content,
274
+ task_type=TASK_TYPE,
275
+ gold_labels=gold_labels,
276
+ provenance=provenance,
277
+ )
src/generators/base.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Shared base utilities for per-area generators.
2
+
3
+ This module is domain-agnostic. Area-specific logic lives in
4
+ `aba_<area>.py` modules.
5
+ """
6
+
7
+ import random
8
+ import re
9
+ from pathlib import Path
10
+
11
+ import yaml
12
+
13
+ # ============================================================
14
+ # YAML loading
15
+ # ============================================================
16
+
17
+
18
+ def load_yaml(path: Path) -> dict:
19
+ """Load a single YAML file."""
20
+ with open(path) as f:
21
+ return yaml.safe_load(f)
22
+
23
+
24
+ def load_shared(config_dir: Path) -> dict:
25
+ """Load the shared primitives used across all areas.
26
+
27
+ Returns a dict keyed by primitive name:
28
+ learner_profiles, mastery_states, prompt_types
29
+ """
30
+ shared_dir = Path(config_dir) / "shared"
31
+ return {
32
+ "learner_profiles": load_yaml(shared_dir / "learner_profiles.yaml"),
33
+ "mastery_states": load_yaml(shared_dir / "mastery_states.yaml"),
34
+ "prompt_types": load_yaml(shared_dir / "prompt_types.yaml"),
35
+ }
36
+
37
+
38
+ def load_area(config_dir: Path, area: str) -> dict:
39
+ """Load an area's self-contained config: taxonomy + template + compatibility."""
40
+ area_dir = Path(config_dir) / area
41
+ if not area_dir.is_dir():
42
+ raise FileNotFoundError(f"Area config directory not found: {area_dir}")
43
+ return {
44
+ "taxonomy": load_yaml(area_dir / "taxonomy.yaml"),
45
+ "template": load_yaml(area_dir / "template.yaml"),
46
+ "compatibility": load_yaml(area_dir / "compatibility.yaml"),
47
+ }
48
+
49
+
50
+ # ============================================================
51
+ # Text / rendering utilities
52
+ # ============================================================
53
+
54
+
55
+ def article(word: str) -> str:
56
+ """Return 'a' or 'an' appropriate for the following word."""
57
+ if not word:
58
+ return "a"
59
+ return "an" if word[0].lower() in "aeiou" else "a"
60
+
61
+
62
+ def strip_trailing_period(text: str) -> str:
63
+ """Remove a single trailing period — for safe mid-sentence embedding."""
64
+ return text.rstrip().rstrip(".")
65
+
66
+
67
+ def render_prompt_sequence(hierarchy_sequence: list, prompt_types: list) -> str:
68
+ """Render a prompt-hierarchy sequence as a human-readable arrow chain.
69
+
70
+ Looks up each item in the canonical prompt_types list; falls back to
71
+ underscore-to-space conversion for composite IDs like '0s_delay'.
72
+ """
73
+ id_to_name = {pt["id"]: pt["name"] for pt in prompt_types}
74
+ rendered = []
75
+ for seq_id in hierarchy_sequence:
76
+ if seq_id in id_to_name:
77
+ rendered.append(id_to_name[seq_id].lower())
78
+ else:
79
+ rendered.append(seq_id.replace("_", " "))
80
+ return " -> ".join(rendered)
81
+
82
+
83
+ def parse_skill_numeric_range(skill_text: str):
84
+ """Extract (min, max) numeric range from skill text.
85
+
86
+ Handles patterns like '1-5', '1–5' (en-dash), 'to 10', '1-20'.
87
+ Returns None if no range is present.
88
+ """
89
+ s = skill_text.lower()
90
+ m = re.search(r"(\d+)\s*[-–]\s*(\d+)", s)
91
+ if m:
92
+ lo, hi = int(m.group(1)), int(m.group(2))
93
+ return (min(lo, hi), max(lo, hi))
94
+ m = re.search(r"to\s+(\d+)", s)
95
+ if m:
96
+ return (1, int(m.group(1)))
97
+ return None
98
+
99
+
100
+ # ============================================================
101
+ # Shared stimulus pools (used by DTT-like array-based generators)
102
+ # ============================================================
103
+
104
+ STIMULUS_POOLS = {
105
+ "objects": [
106
+ "ball", "cup", "shoe", "spoon", "book", "car", "block", "brush", "key",
107
+ "phone", "hat", "sock", "apple", "crayon", "scissors", "glue", "pencil",
108
+ "napkin", "plate", "towel", "jacket", "backpack", "bottle", "blanket",
109
+ "toothbrush",
110
+ ],
111
+ "colors": ["red", "blue", "green", "yellow", "orange", "purple", "pink", "brown", "black", "white"],
112
+ "shapes": ["circle", "square", "triangle", "rectangle", "star", "diamond", "oval", "heart"],
113
+ "animals": [
114
+ "dog", "cat", "bird", "fish", "horse", "cow", "pig", "sheep", "rabbit",
115
+ "frog", "bear", "lion", "elephant", "monkey", "duck", "chicken",
116
+ ],
117
+ "actions": [
118
+ "running", "jumping", "eating", "sleeping", "reading", "writing",
119
+ "drawing", "singing", "clapping", "washing", "brushing", "pouring",
120
+ ],
121
+ "letters": list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"),
122
+ "numbers": [str(n) for n in range(1, 21)],
123
+ }
124
+
125
+ SKILL_TO_POOL = [
126
+ ("color", "colors"),
127
+ ("shape", "shapes"),
128
+ ("animal", "animals"),
129
+ ("letter", "letters"),
130
+ ("number", "numbers"),
131
+ ("numeral", "numbers"),
132
+ ("count", "numbers"),
133
+ ("action", "actions"),
134
+ ]
135
+
136
+
137
+ def choose_stimulus_pool(skill_target: str) -> str:
138
+ """Pick the stimulus pool whose keyword appears in the skill name; default 'objects'."""
139
+ s = skill_target.lower()
140
+ for keyword, pool in SKILL_TO_POOL:
141
+ if keyword in s:
142
+ return pool
143
+ return "objects"
144
+
145
+
146
+ def sample_stimuli(
147
+ skill_target: str, array_size_n: int, rng: random.Random
148
+ ) -> dict:
149
+ """Pick target + distractor stimuli, respecting any numeric range in the skill text."""
150
+ pool_name = choose_stimulus_pool(skill_target)
151
+ pool = list(STIMULUS_POOLS[pool_name])
152
+
153
+ rng_tuple = parse_skill_numeric_range(skill_target)
154
+ if rng_tuple is not None:
155
+ lo, hi = rng_tuple
156
+ if pool_name == "numbers":
157
+ pool = [str(n) for n in range(lo, hi + 1)]
158
+ elif pool_name == "letters":
159
+ pool = pool[: max(hi, 3)]
160
+
161
+ n_targets = min(3, len(pool))
162
+ targets = rng.sample(pool, n_targets)
163
+ remaining = [x for x in pool if x not in targets]
164
+ n_distractors = min(max(array_size_n - 1, 0), len(remaining))
165
+ distractors = rng.sample(remaining, n_distractors) if n_distractors > 0 else []
166
+ return {"targets": targets, "distractors": distractors}
167
+
168
+
169
+ # ============================================================
170
+ # Example envelope helpers
171
+ # ============================================================
172
+
173
+
174
+ import hashlib
175
+
176
+
177
+ def make_example_envelope(
178
+ *,
179
+ system_content: str,
180
+ user_content: str,
181
+ assistant_content: str,
182
+ task_type: str,
183
+ gold_labels: dict,
184
+ provenance: dict,
185
+ ) -> dict:
186
+ """Assemble the final JSONL example dict with id + meta envelope.
187
+
188
+ Envelope is a deterministic function of its inputs: identical inputs produce
189
+ an identical envelope, including the example_id. The example_id is
190
+ ``sha256(user_content + assistant_content)[:16]`` computed on the *published*
191
+ (stripped) message content, so any consumer can verify it directly from the
192
+ JSONL row. Per-run wall-clock timestamps are deliberately NOT included so
193
+ that running the generator twice with the same seed produces byte-identical
194
+ output.
195
+ """
196
+ system_content = system_content.strip()
197
+ user_content = user_content.strip()
198
+ assistant_content = assistant_content.strip()
199
+ example_hash = hashlib.sha256(
200
+ (user_content + assistant_content).encode()
201
+ ).hexdigest()[:16]
202
+ return {
203
+ "messages": [
204
+ {"role": "system", "content": system_content},
205
+ {"role": "user", "content": user_content},
206
+ {"role": "assistant", "content": assistant_content},
207
+ ],
208
+ "meta": {
209
+ "task_type": task_type,
210
+ "example_id": example_hash,
211
+ "gold_labels": gold_labels,
212
+ "provenance": provenance,
213
+ },
214
+ }
src/prepare_curation.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Render curation_pool.jsonl as a readable review document.
2
+
3
+ This used to drive a hand-curation workflow with decisions.csv; we now
4
+ use the whole curation pool as our test set (see compile_curation.py).
5
+ review.md is still produced as a human-readable browse of the test
6
+ corpus so clinical issues can be flagged ad hoc and fixed in the
7
+ generator configs.
8
+
9
+ Usage:
10
+ uv run python src/prepare_curation.py
11
+ """
12
+
13
+ import json
14
+ import sys
15
+ from pathlib import Path
16
+
17
+ REPO_ROOT = Path(__file__).resolve().parent.parent
18
+ POOL_PATH = REPO_ROOT / "data" / "splits" / "curation_pool.jsonl"
19
+ REVIEW_MD = REPO_ROOT / "docs" / "curation" / "review.md"
20
+
21
+
22
+ def category_of(example: dict) -> str:
23
+ """Short category label for a candidate."""
24
+ gl = example["meta"]["gold_labels"]
25
+ if example["meta"]["task_type"] == "teaching_program":
26
+ return gl.get("method", "?")
27
+ return gl.get("pattern_class", "?")
28
+
29
+
30
+ def gold_labels_markdown(example: dict) -> str:
31
+ """Render gold labels inline for the review markdown."""
32
+ gl = example["meta"]["gold_labels"]
33
+ bits = []
34
+ for k, v in gl.items():
35
+ if isinstance(v, dict):
36
+ inner = ", ".join(f"{ik}->{iv}" for ik, iv in v.items())
37
+ bits.append(f"**{k}:** {{{inner}}}")
38
+ elif isinstance(v, bool):
39
+ bits.append(f"**{k}:** {str(v).lower()}")
40
+ else:
41
+ bits.append(f"**{k}:** `{v}`")
42
+ return " · ".join(bits)
43
+
44
+
45
+ def provenance_markdown(example: dict) -> str:
46
+ """Render a short provenance line."""
47
+ prov = example["meta"].get("provenance", {})
48
+ bits = []
49
+ if "n_sessions" in prov:
50
+ bits.append(f"{prov['n_sessions']} sessions")
51
+ if "n_behaviors" in prov:
52
+ bits.append(f"{prov['n_behaviors']} behaviors")
53
+ if "has_ioa_session" in prov:
54
+ bits.append("IOA ✓" if prov["has_ioa_session"] else "no IOA")
55
+ if "has_abc_data" in prov:
56
+ bits.append("ABC ✓" if prov["has_abc_data"] else "no ABC")
57
+ return " · ".join(bits) if bits else ""
58
+
59
+
60
+ def render_candidate(idx: int, example: dict) -> str:
61
+ """Render one candidate as a markdown block."""
62
+ task = example["meta"]["task_type"]
63
+ cat = category_of(example)
64
+ example_id = example["meta"]["example_id"]
65
+ user_content = example["messages"][1]["content"]
66
+ assistant_content = example["messages"][2]["content"]
67
+ prov_str = provenance_markdown(example)
68
+ prov_line = f"\n*{prov_str}*\n" if prov_str else "\n"
69
+
70
+ return f"""
71
+ ---
72
+
73
+ ### Candidate {idx:03d} | `{task}` | `{cat}` | id `{example_id}`
74
+
75
+ **Gold labels:** {gold_labels_markdown(example)}
76
+ {prov_line}
77
+ <details>
78
+ <summary><strong>User message (click to expand)</strong></summary>
79
+
80
+ ```
81
+ {user_content}
82
+ ```
83
+
84
+ </details>
85
+
86
+ <details>
87
+ <summary><strong>Assistant response (click to expand)</strong></summary>
88
+
89
+ ```
90
+ {assistant_content}
91
+ ```
92
+
93
+ </details>
94
+ """
95
+
96
+
97
+ def main() -> int:
98
+ if not POOL_PATH.exists():
99
+ print(f"Curation pool not found: {POOL_PATH}", file=sys.stderr)
100
+ print("Run `uv run python src/split_data.py` first.", file=sys.stderr)
101
+ return 1
102
+
103
+ candidates: list[dict] = []
104
+ with open(POOL_PATH) as f:
105
+ for line in f:
106
+ candidates.append(json.loads(line))
107
+
108
+ # Group by category for diversity visibility.
109
+ grouped_raw: dict[str, list[dict]] = {}
110
+ for ex in candidates:
111
+ key = f"{ex['meta']['task_type']} / {category_of(ex)}"
112
+ grouped_raw.setdefault(key, []).append(ex)
113
+
114
+ ordered: list[tuple[int, dict, str]] = []
115
+ idx = 0
116
+ for key in sorted(grouped_raw.keys()):
117
+ for ex in grouped_raw[key]:
118
+ idx += 1
119
+ ordered.append((idx, ex, key))
120
+
121
+ REVIEW_MD.parent.mkdir(parents=True, exist_ok=True)
122
+ with open(REVIEW_MD, "w") as f:
123
+ f.write("# Corpus Review — TRACE test corpus\n\n")
124
+ f.write(f"**{len(candidates)} examples** held out as the evaluation corpus.\n\n")
125
+ f.write("## How to use this file\n")
126
+ f.write("- Browse candidates below to spot clinical-accuracy issues "
127
+ "(wrong topography, unrealistic scenario, inconsistent data, etc.).\n")
128
+ f.write("- When you find one, note the candidate id (shown next to the candidate title). "
129
+ "Fixes are applied by editing the generator configs in `configs/` and regenerating.\n")
130
+ f.write("- The test + sanity splits are compiled from this pool by "
131
+ "`uv run python src/compile_curation.py` (no per-example decisions required).\n\n")
132
+ f.write("## Distribution\n\n")
133
+ f.write("| Category | Count |\n|---|---|\n")
134
+ for key in sorted(grouped_raw.keys()):
135
+ f.write(f"| {key} | {len(grouped_raw[key])} |\n")
136
+ f.write("\n---\n\n")
137
+ f.write("## Candidates\n")
138
+ current_group = None
139
+ for idx, ex, key in ordered:
140
+ if key != current_group:
141
+ f.write(f"\n### Group: {key}\n")
142
+ current_group = key
143
+ f.write(render_candidate(idx, ex))
144
+
145
+ print(f"Wrote: {REVIEW_MD} ({len(candidates)} candidates)")
146
+ print("Next: run `uv run python src/compile_curation.py` to produce test.jsonl + sanity.jsonl.")
147
+ return 0
148
+
149
+
150
+ if __name__ == "__main__":
151
+ raise SystemExit(main())