codysnider commited on
Commit
25be136
·
0 Parent(s):

Initial FalseMemBench dataset

Browse files
.gitattributes ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ *.jsonl text eol=lf
2
+ *.json text eol=lf
3
+ *.md text eol=lf
4
+ *.py text eol=lf
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ results/
2
+ __pycache__/
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FalseMemBench
2
+
3
+ `FalseMemBench` is a benchmark project for evaluating memory retrieval systems under adversarial distractors.
4
+
5
+ The goal is to measure whether a system can retrieve the right memory when many nearby but wrong memories are present.
6
+
7
+ ## Focus
8
+
9
+ The benchmark is designed for memory systems used by LLM agents.
10
+
11
+ It emphasizes:
12
+
13
+ - entity confusion
14
+ - environment confusion
15
+ - time/version confusion
16
+ - stale facts vs current facts
17
+ - speaker confusion
18
+ - near-duplicate paraphrases
19
+
20
+ ## Layout
21
+
22
+ - `schema/case.schema.json`: benchmark case schema
23
+ - `data/cases.jsonl`: current benchmark cases
24
+ - `docs/`: benchmark design notes
25
+ - `scripts/validate.py`: schema validator for the JSONL dataset
26
+ - `scripts/run_benchmark.py`: simple keyword baseline
27
+ - `scripts/run_tagmem_benchmark.py`: run the benchmark against a real `tagmem` binary
28
+
29
+ ## Case format
30
+
31
+ Each case contains:
32
+
33
+ - a `query`
34
+ - a set of `entries`
35
+ - one or more `relevant_ids`
36
+ - a single `adversary_type`
37
+ - optional metadata for analysis
38
+
39
+ ## Example
40
+
41
+ ```json
42
+ {
43
+ "id": "env-001",
44
+ "query": "What database does staging use?",
45
+ "adversary_type": "environment_swap",
46
+ "entries": [
47
+ {
48
+ "id": "e1",
49
+ "text": "The staging environment uses db-staging.internal.",
50
+ "tags": ["staging", "database", "infra"],
51
+ "depth": 1
52
+ },
53
+ {
54
+ "id": "e2",
55
+ "text": "The production environment uses db-prod.internal.",
56
+ "tags": ["production", "database", "infra"],
57
+ "depth": 1
58
+ }
59
+ ],
60
+ "relevant_ids": ["e1"]
61
+ }
62
+ ```
63
+
64
+ ## Current adversary types
65
+
66
+ - `entity_swap`
67
+ - `environment_swap`
68
+ - `time_swap`
69
+ - `state_update`
70
+ - `speaker_swap`
71
+ - `near_duplicate_paraphrase`
72
+
73
+ Current dataset size:
74
+
75
+ - `573` cases
76
+
77
+ ## Intended use
78
+
79
+ The benchmark is intended to be:
80
+
81
+ - model-agnostic
82
+ - storage-agnostic
83
+ - metadata-friendly
84
+ - easy to publish to GitHub and Hugging Face
README_HF.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: FalseMemBench
3
+ license: mit
4
+ task_categories:
5
+ - text-retrieval
6
+ language:
7
+ - en
8
+ tags:
9
+ - retrieval
10
+ - memory
11
+ - llm-agents
12
+ - adversarial
13
+ size_categories:
14
+ - n<1K
15
+ ---
16
+
17
+ # FalseMemBench
18
+
19
+ This dataset contains adversarial distractor cases for evaluating memory retrieval systems used by LLM agents.
20
+
21
+ Each case contains:
22
+
23
+ - a query
24
+ - a small candidate corpus
25
+ - one or more relevant entry ids
26
+ - an adversary type label
27
+
28
+ The dataset is intended for evaluating retrieval under confusion pressure rather than open-ended generation.
29
+
30
+ Current adversary types:
31
+
32
+ - entity swap
33
+ - environment swap
34
+ - time swap
35
+ - state update
36
+ - speaker swap
37
+ - near-duplicate paraphrase
38
+
39
+ Current dataset size:
40
+
41
+ - `573` cases
data/cases.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/v0.1/cases.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"id":"env-001","query":"What database does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses db-staging.internal.","tags":["staging","database","infra"],"depth":1},{"id":"e2","text":"The production environment uses db-prod.internal.","tags":["production","database","infra"],"depth":1},{"id":"e3","text":"The staging environment sends email through ses-staging.","tags":["staging","email","infra"],"depth":2}],"relevant_ids":["e1"],"notes":"Environment swap distractor between staging and production."}
2
+ {"id":"entity-001","query":"Which milk does Caroline prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Caroline prefers oat milk in her coffee.","tags":["caroline","preference","coffee"],"depth":1},{"id":"e2","text":"Catherine prefers oat milk in her coffee.","tags":["catherine","preference","coffee"],"depth":1},{"id":"e3","text":"Caroline usually drinks black tea in the afternoon.","tags":["caroline","tea"],"depth":2}],"relevant_ids":["e1"],"notes":"Entity swap with near-identical surface form."}
3
+ {"id":"time-001","query":"When did we migrate auth to bearer tokens?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We migrated auth to bearer tokens in March.","tags":["auth","migration"],"depth":1,"timestamp":"2026-03-03"},{"id":"e2","text":"We migrated auth to bearer tokens in May.","tags":["auth","migration"],"depth":1,"timestamp":"2026-05-04"},{"id":"e3","text":"We moved session cleanup jobs to nightly execution in March.","tags":["auth","jobs"],"depth":2,"timestamp":"2026-03-10"}],"relevant_ids":["e1"],"notes":"Time/value swap with same core event."}
4
+ {"id":"state-001","query":"What is the current production domain?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current production domain is ecue.ai.","tags":["production","domain"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The production domain used to be hifidelityai.com.","tags":["production","domain"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging domain is hifidelityai.com.","tags":["staging","domain"],"depth":1,"timestamp":"2026-04-01"}],"relevant_ids":["e1"],"notes":"Tests stale fact vs current fact retrieval."}
5
+ {"id":"speaker-001","query":"What did you suggest for Terraform environments?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested separate Terraform states for shared, production, and staging.","tags":["terraform","envs","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested keeping one Terraform state with workspaces.","tags":["terraform","envs","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed Terraform imports for existing resources.","tags":["terraform","imports"],"depth":2}],"relevant_ids":["e1"],"notes":"Assistant/user speaker confusion."}
6
+ {"id":"paraphrase-001","query":"What timeout do API calls use?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"API calls time out after 30 seconds.","tags":["api","timeouts"],"depth":1},{"id":"e2","text":"The API timeout is 60 seconds.","tags":["api","timeouts"],"depth":1},{"id":"e3","text":"Background jobs retry for up to 30 seconds.","tags":["jobs","timeouts"],"depth":2}],"relevant_ids":["e1"],"notes":"Semantically close distractor with wrong value."}
data/v0.2/cases.jsonl ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"id":"env-001","query":"What database does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses db-staging.internal for its database.","tags":["staging","database","infra"],"depth":1},{"id":"e2","text":"The production environment uses db-prod.internal for its database.","tags":["production","database","infra"],"depth":1},{"id":"e3","text":"The development environment uses db-dev.internal for its database.","tags":["development","database","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the database.","tags":["staging","database","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
2
+ {"id":"env-002","query":"What database does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses db-prod.internal for its database.","tags":["production","database","infra"],"depth":1},{"id":"e2","text":"The staging environment uses db-staging.internal for its database.","tags":["staging","database","infra"],"depth":1},{"id":"e3","text":"The development environment uses db-dev.internal for its database.","tags":["development","database","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the database.","tags":["production","database","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
3
+ {"id":"env-003","query":"What database does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses db-dev.internal for its database.","tags":["development","database","infra"],"depth":1},{"id":"e2","text":"The production environment uses db-staging.internal for its database.","tags":["production","database","infra"],"depth":1},{"id":"e3","text":"The staging environment uses db-prod.internal for its database.","tags":["staging","database","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the database.","tags":["development","database","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
4
+ {"id":"env-004","query":"What redis cache does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses redis-staging.internal for its redis cache.","tags":["staging","redis-cache","infra"],"depth":1},{"id":"e2","text":"The production environment uses redis-prod.internal for its redis cache.","tags":["production","redis-cache","infra"],"depth":1},{"id":"e3","text":"The development environment uses redis-dev.internal for its redis cache.","tags":["development","redis-cache","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the redis cache.","tags":["staging","redis-cache","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
5
+ {"id":"env-005","query":"What redis cache does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses redis-prod.internal for its redis cache.","tags":["production","redis-cache","infra"],"depth":1},{"id":"e2","text":"The staging environment uses redis-staging.internal for its redis cache.","tags":["staging","redis-cache","infra"],"depth":1},{"id":"e3","text":"The development environment uses redis-dev.internal for its redis cache.","tags":["development","redis-cache","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the redis cache.","tags":["production","redis-cache","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
6
+ {"id":"env-006","query":"What redis cache does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses redis-dev.internal for its redis cache.","tags":["development","redis-cache","infra"],"depth":1},{"id":"e2","text":"The production environment uses redis-staging.internal for its redis cache.","tags":["production","redis-cache","infra"],"depth":1},{"id":"e3","text":"The staging environment uses redis-prod.internal for its redis cache.","tags":["staging","redis-cache","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the redis cache.","tags":["development","redis-cache","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
7
+ {"id":"env-007","query":"What object storage bucket does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses assets-staging for its object storage bucket.","tags":["staging","object-storage-bucket","infra"],"depth":1},{"id":"e2","text":"The production environment uses assets-prod for its object storage bucket.","tags":["production","object-storage-bucket","infra"],"depth":1},{"id":"e3","text":"The development environment uses assets-dev for its object storage bucket.","tags":["development","object-storage-bucket","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the object storage bucket.","tags":["staging","object-storage-bucket","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
8
+ {"id":"env-008","query":"What object storage bucket does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses assets-prod for its object storage bucket.","tags":["production","object-storage-bucket","infra"],"depth":1},{"id":"e2","text":"The staging environment uses assets-staging for its object storage bucket.","tags":["staging","object-storage-bucket","infra"],"depth":1},{"id":"e3","text":"The development environment uses assets-dev for its object storage bucket.","tags":["development","object-storage-bucket","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the object storage bucket.","tags":["production","object-storage-bucket","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
9
+ {"id":"env-009","query":"What object storage bucket does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses assets-dev for its object storage bucket.","tags":["development","object-storage-bucket","infra"],"depth":1},{"id":"e2","text":"The production environment uses assets-staging for its object storage bucket.","tags":["production","object-storage-bucket","infra"],"depth":1},{"id":"e3","text":"The staging environment uses assets-prod for its object storage bucket.","tags":["staging","object-storage-bucket","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the object storage bucket.","tags":["development","object-storage-bucket","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
10
+ {"id":"env-010","query":"What email sender does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses ses-staging for its email sender.","tags":["staging","email-sender","infra"],"depth":1},{"id":"e2","text":"The production environment uses ses-prod for its email sender.","tags":["production","email-sender","infra"],"depth":1},{"id":"e3","text":"The development environment uses ses-dev for its email sender.","tags":["development","email-sender","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the email sender.","tags":["staging","email-sender","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
11
+ {"id":"env-011","query":"What email sender does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses ses-prod for its email sender.","tags":["production","email-sender","infra"],"depth":1},{"id":"e2","text":"The staging environment uses ses-staging for its email sender.","tags":["staging","email-sender","infra"],"depth":1},{"id":"e3","text":"The development environment uses ses-dev for its email sender.","tags":["development","email-sender","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the email sender.","tags":["production","email-sender","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
12
+ {"id":"env-012","query":"What email sender does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses ses-dev for its email sender.","tags":["development","email-sender","infra"],"depth":1},{"id":"e2","text":"The production environment uses ses-staging for its email sender.","tags":["production","email-sender","infra"],"depth":1},{"id":"e3","text":"The staging environment uses ses-prod for its email sender.","tags":["staging","email-sender","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the email sender.","tags":["development","email-sender","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
13
+ {"id":"env-013","query":"What nats cluster does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses nats-staging.internal for its nats cluster.","tags":["staging","nats-cluster","infra"],"depth":1},{"id":"e2","text":"The production environment uses nats-prod.internal for its nats cluster.","tags":["production","nats-cluster","infra"],"depth":1},{"id":"e3","text":"The development environment uses nats-dev.internal for its nats cluster.","tags":["development","nats-cluster","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the nats cluster.","tags":["staging","nats-cluster","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
14
+ {"id":"env-014","query":"What nats cluster does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses nats-prod.internal for its nats cluster.","tags":["production","nats-cluster","infra"],"depth":1},{"id":"e2","text":"The staging environment uses nats-staging.internal for its nats cluster.","tags":["staging","nats-cluster","infra"],"depth":1},{"id":"e3","text":"The development environment uses nats-dev.internal for its nats cluster.","tags":["development","nats-cluster","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the nats cluster.","tags":["production","nats-cluster","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
15
+ {"id":"env-015","query":"What nats cluster does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses nats-dev.internal for its nats cluster.","tags":["development","nats-cluster","infra"],"depth":1},{"id":"e2","text":"The production environment uses nats-staging.internal for its nats cluster.","tags":["production","nats-cluster","infra"],"depth":1},{"id":"e3","text":"The staging environment uses nats-prod.internal for its nats cluster.","tags":["staging","nats-cluster","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the nats cluster.","tags":["development","nats-cluster","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
16
+ {"id":"env-016","query":"What API base URL does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses https://api-staging.ecue.ai for its API base URL.","tags":["staging","api-base-url","infra"],"depth":1},{"id":"e2","text":"The production environment uses https://api.ecue.ai for its API base URL.","tags":["production","api-base-url","infra"],"depth":1},{"id":"e3","text":"The development environment uses https://api-dev.ecue.ai for its API base URL.","tags":["development","api-base-url","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the API base URL.","tags":["staging","api-base-url","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
17
+ {"id":"env-017","query":"What API base URL does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses https://api.ecue.ai for its API base URL.","tags":["production","api-base-url","infra"],"depth":1},{"id":"e2","text":"The staging environment uses https://api-staging.ecue.ai for its API base URL.","tags":["staging","api-base-url","infra"],"depth":1},{"id":"e3","text":"The development environment uses https://api-dev.ecue.ai for its API base URL.","tags":["development","api-base-url","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the API base URL.","tags":["production","api-base-url","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
18
+ {"id":"env-018","query":"What API base URL does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses https://api-dev.ecue.ai for its API base URL.","tags":["development","api-base-url","infra"],"depth":1},{"id":"e2","text":"The production environment uses https://api-staging.ecue.ai for its API base URL.","tags":["production","api-base-url","infra"],"depth":1},{"id":"e3","text":"The staging environment uses https://api.ecue.ai for its API base URL.","tags":["staging","api-base-url","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the API base URL.","tags":["development","api-base-url","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
19
+ {"id":"env-019","query":"What metrics backend does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses vm-staging.internal for its metrics backend.","tags":["staging","metrics-backend","infra"],"depth":1},{"id":"e2","text":"The production environment uses vm-prod.internal for its metrics backend.","tags":["production","metrics-backend","infra"],"depth":1},{"id":"e3","text":"The development environment uses vm-dev.internal for its metrics backend.","tags":["development","metrics-backend","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the metrics backend.","tags":["staging","metrics-backend","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
20
+ {"id":"env-020","query":"What metrics backend does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses vm-prod.internal for its metrics backend.","tags":["production","metrics-backend","infra"],"depth":1},{"id":"e2","text":"The staging environment uses vm-staging.internal for its metrics backend.","tags":["staging","metrics-backend","infra"],"depth":1},{"id":"e3","text":"The development environment uses vm-dev.internal for its metrics backend.","tags":["development","metrics-backend","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the metrics backend.","tags":["production","metrics-backend","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
21
+ {"id":"env-021","query":"What metrics backend does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses vm-dev.internal for its metrics backend.","tags":["development","metrics-backend","infra"],"depth":1},{"id":"e2","text":"The production environment uses vm-staging.internal for its metrics backend.","tags":["production","metrics-backend","infra"],"depth":1},{"id":"e3","text":"The staging environment uses vm-prod.internal for its metrics backend.","tags":["staging","metrics-backend","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the metrics backend.","tags":["development","metrics-backend","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
22
+ {"id":"env-022","query":"What auth issuer does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses https://auth-staging.ecue.ai for its auth issuer.","tags":["staging","auth-issuer","infra"],"depth":1},{"id":"e2","text":"The production environment uses https://auth.ecue.ai for its auth issuer.","tags":["production","auth-issuer","infra"],"depth":1},{"id":"e3","text":"The development environment uses https://auth-dev.ecue.ai for its auth issuer.","tags":["development","auth-issuer","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the auth issuer.","tags":["staging","auth-issuer","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
23
+ {"id":"env-023","query":"What auth issuer does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses https://auth.ecue.ai for its auth issuer.","tags":["production","auth-issuer","infra"],"depth":1},{"id":"e2","text":"The staging environment uses https://auth-staging.ecue.ai for its auth issuer.","tags":["staging","auth-issuer","infra"],"depth":1},{"id":"e3","text":"The development environment uses https://auth-dev.ecue.ai for its auth issuer.","tags":["development","auth-issuer","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the auth issuer.","tags":["production","auth-issuer","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
24
+ {"id":"env-024","query":"What auth issuer does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses https://auth-dev.ecue.ai for its auth issuer.","tags":["development","auth-issuer","infra"],"depth":1},{"id":"e2","text":"The production environment uses https://auth-staging.ecue.ai for its auth issuer.","tags":["production","auth-issuer","infra"],"depth":1},{"id":"e3","text":"The staging environment uses https://auth.ecue.ai for its auth issuer.","tags":["staging","auth-issuer","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the auth issuer.","tags":["development","auth-issuer","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
25
+ {"id":"env-025","query":"What artifact bucket does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses artifacts-staging for its artifact bucket.","tags":["staging","artifact-bucket","infra"],"depth":1},{"id":"e2","text":"The production environment uses artifacts-prod for its artifact bucket.","tags":["production","artifact-bucket","infra"],"depth":1},{"id":"e3","text":"The development environment uses artifacts-dev for its artifact bucket.","tags":["development","artifact-bucket","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the artifact bucket.","tags":["staging","artifact-bucket","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
26
+ {"id":"env-026","query":"What artifact bucket does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses artifacts-prod for its artifact bucket.","tags":["production","artifact-bucket","infra"],"depth":1},{"id":"e2","text":"The staging environment uses artifacts-staging for its artifact bucket.","tags":["staging","artifact-bucket","infra"],"depth":1},{"id":"e3","text":"The development environment uses artifacts-dev for its artifact bucket.","tags":["development","artifact-bucket","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the artifact bucket.","tags":["production","artifact-bucket","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
27
+ {"id":"env-027","query":"What artifact bucket does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses artifacts-dev for its artifact bucket.","tags":["development","artifact-bucket","infra"],"depth":1},{"id":"e2","text":"The production environment uses artifacts-staging for its artifact bucket.","tags":["production","artifact-bucket","infra"],"depth":1},{"id":"e3","text":"The staging environment uses artifacts-prod for its artifact bucket.","tags":["staging","artifact-bucket","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the artifact bucket.","tags":["development","artifact-bucket","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
28
+ {"id":"env-028","query":"What search index does staging use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The staging environment uses meili-staging.internal for its search index.","tags":["staging","search-index","infra"],"depth":1},{"id":"e2","text":"The production environment uses meili-prod.internal for its search index.","tags":["production","search-index","infra"],"depth":1},{"id":"e3","text":"The development environment uses meili-dev.internal for its search index.","tags":["development","search-index","infra"],"depth":2},{"id":"e4","text":"The staging environment rotates credentials weekly for the search index.","tags":["staging","search-index","ops"],"depth":2},{"id":"e5","text":"The staging email sender is ses-staging for notification traffic.","tags":["staging","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
29
+ {"id":"env-029","query":"What search index does production use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The production environment uses meili-prod.internal for its search index.","tags":["production","search-index","infra"],"depth":1},{"id":"e2","text":"The staging environment uses meili-staging.internal for its search index.","tags":["staging","search-index","infra"],"depth":1},{"id":"e3","text":"The development environment uses meili-dev.internal for its search index.","tags":["development","search-index","infra"],"depth":2},{"id":"e4","text":"The production environment rotates credentials weekly for the search index.","tags":["production","search-index","ops"],"depth":2},{"id":"e5","text":"The production email sender is ses-production for notification traffic.","tags":["production","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
30
+ {"id":"env-030","query":"What search index does development use?","adversary_type":"environment_swap","entries":[{"id":"e1","text":"The development environment uses meili-dev.internal for its search index.","tags":["development","search-index","infra"],"depth":1},{"id":"e2","text":"The production environment uses meili-staging.internal for its search index.","tags":["production","search-index","infra"],"depth":1},{"id":"e3","text":"The staging environment uses meili-prod.internal for its search index.","tags":["staging","search-index","infra"],"depth":2},{"id":"e4","text":"The development environment rotates credentials weekly for the search index.","tags":["development","search-index","ops"],"depth":2},{"id":"e5","text":"The development email sender is ses-development for notification traffic.","tags":["development","email","infra"],"depth":3}],"relevant_ids":["e1"],"notes":"Environment swap distractors with same service family."}
31
+ {"id":"entity-001","query":"What does Caroline prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Caroline prefers oat milk for daily work.","tags":["caroline","preference"],"depth":1},{"id":"e2","text":"Catherine prefers oat milk for daily work.","tags":["catherine","preference"],"depth":1},{"id":"e3","text":"Caroline also likes black tea in some situations.","tags":["caroline","preference"],"depth":2},{"id":"e4","text":"The team discussed oat milk and black tea during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Caroline wrote a migration note about deployment safety.","tags":["caroline","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
32
+ {"id":"entity-002","query":"What does Jordan prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Jordan prefers standing desk for daily work.","tags":["jordan","preference"],"depth":1},{"id":"e2","text":"Jordyn prefers standing desk for daily work.","tags":["jordyn","preference"],"depth":1},{"id":"e3","text":"Jordan also likes ergonomic chair in some situations.","tags":["jordan","preference"],"depth":2},{"id":"e4","text":"The team discussed standing desk and ergonomic chair during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Jordan wrote a migration note about deployment safety.","tags":["jordan","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
33
+ {"id":"entity-003","query":"What does Alicia prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Alicia prefers Postgres for daily work.","tags":["alicia","preference"],"depth":1},{"id":"e2","text":"Alice prefers Postgres for daily work.","tags":["alice","preference"],"depth":1},{"id":"e3","text":"Alicia also likes MySQL in some situations.","tags":["alicia","preference"],"depth":2},{"id":"e4","text":"The team discussed Postgres and MySQL during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Alicia wrote a migration note about deployment safety.","tags":["alicia","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
34
+ {"id":"entity-004","query":"What does Mika prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Mika prefers vim for daily work.","tags":["mika","preference"],"depth":1},{"id":"e2","text":"Mila prefers vim for daily work.","tags":["mila","preference"],"depth":1},{"id":"e3","text":"Mika also likes emacs in some situations.","tags":["mika","preference"],"depth":2},{"id":"e4","text":"The team discussed vim and emacs during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Mika wrote a migration note about deployment safety.","tags":["mika","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
35
+ {"id":"entity-005","query":"What does Darren prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Darren prefers dark mode for daily work.","tags":["darren","preference"],"depth":1},{"id":"e2","text":"Dorian prefers dark mode for daily work.","tags":["dorian","preference"],"depth":1},{"id":"e3","text":"Darren also likes light mode in some situations.","tags":["darren","preference"],"depth":2},{"id":"e4","text":"The team discussed dark mode and light mode during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Darren wrote a migration note about deployment safety.","tags":["darren","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
36
+ {"id":"entity-006","query":"What does Riley prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Riley prefers GraphQL for daily work.","tags":["riley","preference"],"depth":1},{"id":"e2","text":"Ryan prefers GraphQL for daily work.","tags":["ryan","preference"],"depth":1},{"id":"e3","text":"Riley also likes REST in some situations.","tags":["riley","preference"],"depth":2},{"id":"e4","text":"The team discussed GraphQL and REST during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Riley wrote a migration note about deployment safety.","tags":["riley","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
37
+ {"id":"entity-007","query":"What does Marina prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Marina prefers daily standups for daily work.","tags":["marina","preference"],"depth":1},{"id":"e2","text":"Maria prefers daily standups for daily work.","tags":["maria","preference"],"depth":1},{"id":"e3","text":"Marina also likes weekly status docs in some situations.","tags":["marina","preference"],"depth":2},{"id":"e4","text":"The team discussed daily standups and weekly status docs during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Marina wrote a migration note about deployment safety.","tags":["marina","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
38
+ {"id":"entity-008","query":"What does Talia prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Talia prefers Go modules for daily work.","tags":["talia","preference"],"depth":1},{"id":"e2","text":"Tanya prefers Go modules for daily work.","tags":["tanya","preference"],"depth":1},{"id":"e3","text":"Talia also likes Bazel workspaces in some situations.","tags":["talia","preference"],"depth":2},{"id":"e4","text":"The team discussed Go modules and Bazel workspaces during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Talia wrote a migration note about deployment safety.","tags":["talia","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
39
+ {"id":"entity-009","query":"What does Nolan prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Nolan prefers CUDA containers for daily work.","tags":["nolan","preference"],"depth":1},{"id":"e2","text":"Noah prefers CUDA containers for daily work.","tags":["noah","preference"],"depth":1},{"id":"e3","text":"Nolan also likes CPU-only builds in some situations.","tags":["nolan","preference"],"depth":2},{"id":"e4","text":"The team discussed CUDA containers and CPU-only builds during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Nolan wrote a migration note about deployment safety.","tags":["nolan","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
40
+ {"id":"entity-010","query":"What does Elena prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Elena prefers Linear for daily work.","tags":["elena","preference"],"depth":1},{"id":"e2","text":"Alina prefers Linear for daily work.","tags":["alina","preference"],"depth":1},{"id":"e3","text":"Elena also likes Jira in some situations.","tags":["elena","preference"],"depth":2},{"id":"e4","text":"The team discussed Linear and Jira during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Elena wrote a migration note about deployment safety.","tags":["elena","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
41
+ {"id":"entity-011","query":"What does Sonia prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Sonia prefers dark roast for daily work.","tags":["sonia","preference"],"depth":1},{"id":"e2","text":"Sonya prefers dark roast for daily work.","tags":["sonya","preference"],"depth":1},{"id":"e3","text":"Sonia also likes green tea in some situations.","tags":["sonia","preference"],"depth":2},{"id":"e4","text":"The team discussed dark roast and green tea during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Sonia wrote a migration note about deployment safety.","tags":["sonia","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
42
+ {"id":"entity-012","query":"What does Maren prefer?","adversary_type":"entity_swap","entries":[{"id":"e1","text":"Maren prefers Nix for daily work.","tags":["maren","preference"],"depth":1},{"id":"e2","text":"Karen prefers Nix for daily work.","tags":["karen","preference"],"depth":1},{"id":"e3","text":"Maren also likes Docker Compose in some situations.","tags":["maren","preference"],"depth":2},{"id":"e4","text":"The team discussed Nix and Docker Compose during planning.","tags":["team","planning"],"depth":3},{"id":"e5","text":"Maren wrote a migration note about deployment safety.","tags":["maren","deployment"],"depth":3}],"relevant_ids":["e1"],"notes":"Entity swap with near-name collision and shared preference value."}
43
+ {"id":"time-001","query":"When did we migrate auth to bearer tokens?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We migrate auth to bearer tokens in March.","tags":["timeline","migrate-auth-to-bearer-tokens"],"depth":1,"timestamp":"2026-03-03"},{"id":"e2","text":"We migrate auth to bearer tokens in May.","tags":["timeline","migrate-auth-to-bearer-tokens"],"depth":1,"timestamp":"2026-05-04"},{"id":"e3","text":"We discussed how to migrate auth to bearer tokens throughout March planning.","tags":["planning","migrate-auth-to-bearer-tokens"],"depth":2},{"id":"e4","text":"We completed documentation for that change in May.","tags":["docs","migrate-auth-to-bearer-tokens"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
44
+ {"id":"time-002","query":"When did we split staging and production domains?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We split staging and production domains in April.","tags":["timeline","split-staging-and-production-domains"],"depth":1,"timestamp":"2026-04-03"},{"id":"e2","text":"We split staging and production domains in June.","tags":["timeline","split-staging-and-production-domains"],"depth":1,"timestamp":"2026-06-04"},{"id":"e3","text":"We discussed how to split staging and production domains throughout April planning.","tags":["planning","split-staging-and-production-domains"],"depth":2},{"id":"e4","text":"We completed documentation for that change in June.","tags":["docs","split-staging-and-production-domains"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
45
+ {"id":"time-003","query":"When did we move Redis into a private subnet?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We move Redis into a private subnet in January.","tags":["timeline","move-redis-into-a-private-subnet"],"depth":1,"timestamp":"2026-01-03"},{"id":"e2","text":"We move Redis into a private subnet in February.","tags":["timeline","move-redis-into-a-private-subnet"],"depth":1,"timestamp":"2026-02-04"},{"id":"e3","text":"We discussed how to move Redis into a private subnet throughout January planning.","tags":["planning","move-redis-into-a-private-subnet"],"depth":2},{"id":"e4","text":"We completed documentation for that change in February.","tags":["docs","move-redis-into-a-private-subnet"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
46
+ {"id":"time-004","query":"When did we ship the reporting dashboard?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We ship the reporting dashboard in July.","tags":["timeline","ship-the-reporting-dashboard"],"depth":1,"timestamp":"2026-07-03"},{"id":"e2","text":"We ship the reporting dashboard in August.","tags":["timeline","ship-the-reporting-dashboard"],"depth":1,"timestamp":"2026-08-04"},{"id":"e3","text":"We discussed how to ship the reporting dashboard throughout July planning.","tags":["planning","ship-the-reporting-dashboard"],"depth":2},{"id":"e4","text":"We completed documentation for that change in August.","tags":["docs","ship-the-reporting-dashboard"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
47
+ {"id":"time-005","query":"When did we enable GPU inference in Docker?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We enable GPU inference in Docker in September.","tags":["timeline","enable-gpu-inference-in-docker"],"depth":1,"timestamp":"2026-09-03"},{"id":"e2","text":"We enable GPU inference in Docker in October.","tags":["timeline","enable-gpu-inference-in-docker"],"depth":1,"timestamp":"2026-10-04"},{"id":"e3","text":"We discussed how to enable GPU inference in Docker throughout September planning.","tags":["planning","enable-gpu-inference-in-docker"],"depth":2},{"id":"e4","text":"We completed documentation for that change in October.","tags":["docs","enable-gpu-inference-in-docker"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
48
+ {"id":"time-006","query":"When did we rotate the SES credentials?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We rotate the SES credentials in November.","tags":["timeline","rotate-the-ses-credentials"],"depth":1,"timestamp":"2026-11-03"},{"id":"e2","text":"We rotate the SES credentials in December.","tags":["timeline","rotate-the-ses-credentials"],"depth":1,"timestamp":"2026-12-04"},{"id":"e3","text":"We discussed how to rotate the SES credentials throughout November planning.","tags":["planning","rotate-the-ses-credentials"],"depth":2},{"id":"e4","text":"We completed documentation for that change in December.","tags":["docs","rotate-the-ses-credentials"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
49
+ {"id":"time-007","query":"When did we switch the default embedded model to bge-small?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We switch the default embedded model to bge-small in February.","tags":["timeline","switch-the-default-embedded-model-to-bge-small"],"depth":1,"timestamp":"2026-02-03"},{"id":"e2","text":"We switch the default embedded model to bge-small in April.","tags":["timeline","switch-the-default-embedded-model-to-bge-small"],"depth":1,"timestamp":"2026-04-04"},{"id":"e3","text":"We discussed how to switch the default embedded model to bge-small throughout February planning.","tags":["planning","switch-the-default-embedded-model-to-bge-small"],"depth":2},{"id":"e4","text":"We completed documentation for that change in April.","tags":["docs","switch-the-default-embedded-model-to-bge-small"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
50
+ {"id":"time-008","query":"When did we publish the public GHCR image?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We publish the public GHCR image in May.","tags":["timeline","publish-the-public-ghcr-image"],"depth":1,"timestamp":"2026-05-03"},{"id":"e2","text":"We publish the public GHCR image in July.","tags":["timeline","publish-the-public-ghcr-image"],"depth":1,"timestamp":"2026-07-04"},{"id":"e3","text":"We discussed how to publish the public GHCR image throughout May planning.","tags":["planning","publish-the-public-ghcr-image"],"depth":2},{"id":"e4","text":"We completed documentation for that change in July.","tags":["docs","publish-the-public-ghcr-image"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
51
+ {"id":"time-009","query":"When did we replace the handwritten MCP server?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We replace the handwritten MCP server in June.","tags":["timeline","replace-the-handwritten-mcp-server"],"depth":1,"timestamp":"2026-06-03"},{"id":"e2","text":"We replace the handwritten MCP server in August.","tags":["timeline","replace-the-handwritten-mcp-server"],"depth":1,"timestamp":"2026-08-04"},{"id":"e3","text":"We discussed how to replace the handwritten MCP server throughout June planning.","tags":["planning","replace-the-handwritten-mcp-server"],"depth":2},{"id":"e4","text":"We completed documentation for that change in August.","tags":["docs","replace-the-handwritten-mcp-server"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
52
+ {"id":"time-010","query":"When did we introduce depth-aware ranking?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We introduce depth-aware ranking in October.","tags":["timeline","introduce-depth-aware-ranking"],"depth":1,"timestamp":"2026-10-03"},{"id":"e2","text":"We introduce depth-aware ranking in December.","tags":["timeline","introduce-depth-aware-ranking"],"depth":1,"timestamp":"2026-12-04"},{"id":"e3","text":"We discussed how to introduce depth-aware ranking throughout October planning.","tags":["planning","introduce-depth-aware-ranking"],"depth":2},{"id":"e4","text":"We completed documentation for that change in December.","tags":["docs","introduce-depth-aware-ranking"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
53
+ {"id":"time-011","query":"When did we remove the TUI surface?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We remove the TUI surface in January.","tags":["timeline","remove-the-tui-surface"],"depth":1,"timestamp":"2026-01-03"},{"id":"e2","text":"We remove the TUI surface in March.","tags":["timeline","remove-the-tui-surface"],"depth":1,"timestamp":"2026-03-04"},{"id":"e3","text":"We discussed how to remove the TUI surface throughout January planning.","tags":["planning","remove-the-tui-surface"],"depth":2},{"id":"e4","text":"We completed documentation for that change in March.","tags":["docs","remove-the-tui-surface"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
54
+ {"id":"time-012","query":"When did we generalize the tagger pipeline?","adversary_type":"time_swap","entries":[{"id":"e1","text":"We generalize the tagger pipeline in April.","tags":["timeline","generalize-the-tagger-pipeline"],"depth":1,"timestamp":"2026-04-03"},{"id":"e2","text":"We generalize the tagger pipeline in September.","tags":["timeline","generalize-the-tagger-pipeline"],"depth":1,"timestamp":"2026-09-04"},{"id":"e3","text":"We discussed how to generalize the tagger pipeline throughout April planning.","tags":["planning","generalize-the-tagger-pipeline"],"depth":2},{"id":"e4","text":"We completed documentation for that change in September.","tags":["docs","generalize-the-tagger-pipeline"],"depth":2}],"relevant_ids":["e1"],"notes":"Same event with swapped month distractor."}
55
+ {"id":"state-001","query":"What is the current production domain?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current production domain is ecue.ai.","tags":["current-production-domain","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The current production domain used to be hifidelityai.com.","tags":["current-production-domain","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is preview.ecue.ai.","tags":["current-production-domain","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the current production domain.","tags":["current-production-domain","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
56
+ {"id":"state-002","query":"What is the default embed model?","adversary_type":"state_update","entries":[{"id":"e1","text":"The default embed model is bge-small-en-v1.5.","tags":["default-embed-model","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The default embed model used to be all-MiniLM-L6-v2.","tags":["default-embed-model","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is bge-base-en-v1.5.","tags":["default-embed-model","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the default embed model.","tags":["default-embed-model","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
57
+ {"id":"state-003","query":"What is the current staging database?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current staging database is db-staging.internal.","tags":["current-staging-database","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The current staging database used to be db-old-staging.internal.","tags":["current-staging-database","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is db-preview.internal.","tags":["current-staging-database","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the current staging database.","tags":["current-staging-database","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
58
+ {"id":"state-004","query":"What is the current metrics backend?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current metrics backend is VictoriaMetrics.","tags":["current-metrics-backend","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The current metrics backend used to be Prometheus.","tags":["current-metrics-backend","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is InfluxDB.","tags":["current-metrics-backend","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the current metrics backend.","tags":["current-metrics-backend","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
59
+ {"id":"state-005","query":"What is the current mail sender?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current mail sender is ses-prod.","tags":["current-mail-sender","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The current mail sender used to be ses-old.","tags":["current-mail-sender","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is ses-preview.","tags":["current-mail-sender","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the current mail sender.","tags":["current-mail-sender","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
60
+ {"id":"state-006","query":"What is the current OpenCode MCP name?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current OpenCode MCP name is tagmem.","tags":["current-opencode-mcp-name","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The current OpenCode MCP name used to be tagmem_active.","tags":["current-opencode-mcp-name","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is mempalace_active.","tags":["current-opencode-mcp-name","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the current OpenCode MCP name.","tags":["current-opencode-mcp-name","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
61
+ {"id":"state-007","query":"What is the current published image?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current published image is ghcr.io/codysnider/tagmem.","tags":["current-published-image","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The current published image used to be ghcr.io/codysnider/tagmem-opencode.","tags":["current-published-image","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is ghcr.io/codysnider/tagmem-preview.","tags":["current-published-image","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the current published image.","tags":["current-published-image","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
62
+ {"id":"state-008","query":"What is the current local data root?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current local data root is $HOME/.local/share/tagmem.","tags":["current-local-data-root","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The current local data root used to be /data/tagmem.","tags":["current-local-data-root","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is /srv/tagmem.","tags":["current-local-data-root","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the current local data root.","tags":["current-local-data-root","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
63
+ {"id":"state-009","query":"What is the current vector backend?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current vector backend is chromem-go.","tags":["current-vector-backend","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The current vector backend used to be ChromaDB.","tags":["current-vector-backend","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is SQLite FTS.","tags":["current-vector-backend","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the current vector backend.","tags":["current-vector-backend","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
64
+ {"id":"state-010","query":"What is the current graph package name?","adversary_type":"state_update","entries":[{"id":"e1","text":"The current graph package name is taggraph.","tags":["current-graph-package-name","current"],"depth":0,"timestamp":"2026-04-01"},{"id":"e2","text":"The current graph package name used to be topicgraph.","tags":["current-graph-package-name","historical"],"depth":2,"timestamp":"2025-10-01"},{"id":"e3","text":"The staging or preview equivalent is memorygraph.","tags":["current-graph-package-name","preview"],"depth":2,"timestamp":"2026-04-01"},{"id":"e4","text":"We updated the runbook after changing the current graph package name.","tags":["current-graph-package-name","runbook"],"depth":3}],"relevant_ids":["e1"],"notes":"Current fact vs stale fact and sibling environment distractor."}
65
+ {"id":"speaker-001","query":"What did you suggest for Terraform environments?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested separate Terraform states for shared, production, and staging.","tags":["terraform-environments","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested one Terraform state with workspaces.","tags":["terraform-environments","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed Terraform environments implementation details later.","tags":["terraform-environments","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about Terraform environments.","tags":["terraform-environments","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
66
+ {"id":"speaker-002","query":"What did you suggest for tagging pipeline?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested use deterministic extraction first and embedding ranking second.","tags":["tagging-pipeline","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested let the model invent tags directly.","tags":["tagging-pipeline","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed tagging pipeline implementation details later.","tags":["tagging-pipeline","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about tagging pipeline.","tags":["tagging-pipeline","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
67
+ {"id":"speaker-003","query":"What did you suggest for OpenCode integration?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested run the image directly with the mcp subcommand.","tags":["opencode-integration","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested wrap everything in a shell pipeline.","tags":["opencode-integration","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed OpenCode integration implementation details later.","tags":["opencode-integration","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about OpenCode integration.","tags":["opencode-integration","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
68
+ {"id":"speaker-004","query":"What did you suggest for release packaging?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested publish ghcr.io/codysnider/tagmem as the generic image.","tags":["release-packaging","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested ship only a local Docker build.","tags":["release-packaging","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed release packaging implementation details later.","tags":["release-packaging","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about release packaging.","tags":["release-packaging","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
69
+ {"id":"speaker-005","query":"What did you suggest for depth model?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested treat depth as a secondary ranking bias.","tags":["depth-model","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested replace tags with rigid hierarchical folders.","tags":["depth-model","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed depth model implementation details later.","tags":["depth-model","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about depth model.","tags":["depth-model","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
70
+ {"id":"speaker-006","query":"What did you suggest for README benchmarks?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested keep a compact benchmark table in the main README.","tags":["readme-benchmarks","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested put the full benchmark suite in the top section.","tags":["readme-benchmarks","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed README benchmarks implementation details later.","tags":["readme-benchmarks","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about README benchmarks.","tags":["readme-benchmarks","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
71
+ {"id":"speaker-007","query":"What did you suggest for Docker data root?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested use HOME-local defaults with an env override.","tags":["docker-data-root","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested hardcode everything to /data/tagmem.","tags":["docker-data-root","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed Docker data root implementation details later.","tags":["docker-data-root","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about Docker data root.","tags":["docker-data-root","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
72
+ {"id":"speaker-008","query":"What did you suggest for MCP compatibility?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested use the official Go SDK.","tags":["mcp-compatibility","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested hand-roll every transport detail.","tags":["mcp-compatibility","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed MCP compatibility implementation details later.","tags":["mcp-compatibility","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about MCP compatibility.","tags":["mcp-compatibility","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
73
+ {"id":"speaker-009","query":"What did you suggest for adversarial benchmark repo?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested split it into a standalone project.","tags":["adversarial-benchmark-repo","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested bury it inside the main app repo.","tags":["adversarial-benchmark-repo","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed adversarial benchmark repo implementation details later.","tags":["adversarial-benchmark-repo","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about adversarial benchmark repo.","tags":["adversarial-benchmark-repo","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
74
+ {"id":"speaker-010","query":"What did you suggest for OpenCode command naming?","adversary_type":"speaker_swap","entries":[{"id":"e1","text":"You suggested use remember instead of mine.","tags":["opencode-command-naming","assistant","suggestion"],"depth":1,"speaker":"assistant"},{"id":"e2","text":"I suggested keep mine because users will figure it out.","tags":["opencode-command-naming","user","suggestion"],"depth":1,"speaker":"user"},{"id":"e3","text":"We discussed OpenCode command naming implementation details later.","tags":["opencode-command-naming","discussion"],"depth":2},{"id":"e4","text":"The team wrote follow-up notes about OpenCode command naming.","tags":["opencode-command-naming","notes"],"depth":3}],"relevant_ids":["e1"],"notes":"Assistant/user suggestion swap with same subject area."}
75
+ {"id":"paraphrase-001","query":"What timeout do API calls use?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"API calls time out after 30 seconds.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The API timeout is 60 seconds.","tags":["config","limits"],"depth":1},{"id":"e3","text":"Background jobs retry for up to 30 seconds.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: API calls time out after 30 seconds.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
76
+ {"id":"paraphrase-002","query":"How long do production cookies last?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"Production cookies expire after 12 hours.","tags":["config","limits"],"depth":1},{"id":"e2","text":"Production cookies expire after 24 hours.","tags":["config","limits"],"depth":1},{"id":"e3","text":"Staging cookies expire after 12 hours.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: Production cookies expire after 12 hours.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
77
+ {"id":"paraphrase-003","query":"What is the upload limit?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"The upload limit is 25 megabytes.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The upload limit is 50 megabytes.","tags":["config","limits"],"depth":1},{"id":"e3","text":"The attachment limit is 25 megabytes.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: The upload limit is 25 megabytes.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
78
+ {"id":"paraphrase-004","query":"How often does auth refresh run?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"Auth refresh runs every 10 minutes.","tags":["config","limits"],"depth":1},{"id":"e2","text":"Auth refresh runs every 15 minutes.","tags":["config","limits"],"depth":1},{"id":"e3","text":"Metrics scraping runs every 10 minutes.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: Auth refresh runs every 10 minutes.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
79
+ {"id":"paraphrase-005","query":"When does the reporting batch run?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"The reporting batch runs at 02:00 UTC.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The reporting batch runs at 03:00 UTC.","tags":["config","limits"],"depth":1},{"id":"e3","text":"The cleanup batch runs at 02:00 UTC.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: The reporting batch runs at 02:00 UTC.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
80
+ {"id":"paraphrase-006","query":"What port does the public API use?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"The public API port is 8443.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The public API port is 9443.","tags":["config","limits"],"depth":1},{"id":"e3","text":"The internal admin port is 8443.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: The public API port is 8443.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
81
+ {"id":"paraphrase-007","query":"What is the Docker data root default?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"The Docker data root defaults to HOME-local storage.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The Docker data root defaults to /data/tagmem.","tags":["config","limits"],"depth":1},{"id":"e3","text":"The benchmark root defaults to HOME-local storage.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: The Docker data root defaults to HOME-local storage.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
82
+ {"id":"paraphrase-008","query":"What prefix do the MCP tools use?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"The MCP tools use the tagmem_ prefix.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The MCP tools use the tiered_memory_ prefix.","tags":["config","limits"],"depth":1},{"id":"e3","text":"The CLI binary is named tagmem.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: The MCP tools use the tagmem_ prefix.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
83
+ {"id":"paraphrase-009","query":"What is the default GPU model?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"The default GPU model is bge-small-en-v1.5.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The default GPU model is bge-base-en-v1.5.","tags":["config","limits"],"depth":1},{"id":"e3","text":"The CPU fallback model is all-MiniLM-L6-v2.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: The default GPU model is bge-small-en-v1.5.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
84
+ {"id":"paraphrase-010","query":"What is the runtime image name?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"The runtime image is ghcr.io/codysnider/tagmem.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The runtime image is ghcr.io/codysnider/tagmem-opencode.","tags":["config","limits"],"depth":1},{"id":"e3","text":"The repo path is github.com/codysnider/tagmem.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: The runtime image is ghcr.io/codysnider/tagmem.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
85
+ {"id":"paraphrase-011","query":"How is the MCP server implemented?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"The MCP server is implemented with the official Go SDK.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The MCP server is still handwritten.","tags":["config","limits"],"depth":1},{"id":"e3","text":"The transport runs over stdio.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: The MCP server is implemented with the official Go SDK.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
86
+ {"id":"paraphrase-012","query":"What is the graph package name?","adversary_type":"near_duplicate_paraphrase","entries":[{"id":"e1","text":"The local graph package is named taggraph.","tags":["config","limits"],"depth":1},{"id":"e2","text":"The local graph package is named topicgraph.","tags":["config","limits"],"depth":1},{"id":"e3","text":"The benchmark package is named tagbench.","tags":["jobs","limits"],"depth":2},{"id":"e4","text":"We reviewed the policy related to: The local graph package is named taggraph.","tags":["policy"],"depth":3}],"relevant_ids":["e1"],"notes":"Near-duplicate paraphrase with one wrong value distractor."}
dataset_infos.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "default": {
3
+ "description": "Adversarial distractor benchmark cases for memory retrieval systems.",
4
+ "features": {
5
+ "id": {"dtype": "string", "_type": "Value"},
6
+ "query": {"dtype": "string", "_type": "Value"},
7
+ "adversary_type": {"dtype": "string", "_type": "Value"},
8
+ "entries": {
9
+ "feature": {
10
+ "id": {"dtype": "string", "_type": "Value"},
11
+ "text": {"dtype": "string", "_type": "Value"},
12
+ "tags": {"feature": {"dtype": "string", "_type": "Value"}, "_type": "Sequence"},
13
+ "depth": {"dtype": "int32", "_type": "Value"}
14
+ },
15
+ "_type": "Sequence"
16
+ },
17
+ "relevant_ids": {"feature": {"dtype": "string", "_type": "Value"}, "_type": "Sequence"},
18
+ "notes": {"dtype": "string", "_type": "Value"}
19
+ },
20
+ "splits": {
21
+ "train": {
22
+ "name": "train",
23
+ "num_bytes": 0,
24
+ "num_examples": 573,
25
+ "dataset_name": "adversarial-memory-bench"
26
+ }
27
+ },
28
+ "download_size": 0,
29
+ "dataset_size": 0
30
+ }
31
+ }
docs/DESIGN.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Design Notes
2
+
3
+ ## Purpose
4
+
5
+ Most memory benchmarks measure semantic recall in benign settings.
6
+
7
+ This benchmark targets retrieval failure modes that matter in agent memory systems:
8
+
9
+ - retrieving the wrong person
10
+ - retrieving the wrong environment
11
+ - retrieving an outdated fact instead of the current one
12
+ - retrieving something semantically close but operationally wrong
13
+
14
+ ## Benchmark principles
15
+
16
+ - retrieval-focused, not generation-focused
17
+ - one query, many plausible distractors
18
+ - exact relevant entry ids are known in advance
19
+ - metadata such as tags, depth, speaker, and timestamp may be present but are optional
20
+ - cases should remain small enough to inspect by hand
21
+
22
+ ## Scoring
23
+
24
+ Suggested scoring:
25
+
26
+ - `Recall@1`
27
+ - `Recall@5`
28
+ - `MRR`
29
+ - error bucket counts by `adversary_type`
30
+
31
+ ## Expansion ideas
32
+
33
+ - more software-specific adversaries
34
+ - benchmark splits by domain
35
+ - fact-update and contradiction-specific suites
36
+ - Hugging Face dataset packaging
schema/case.schema.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
3
+ "$id": "https://example.com/adversarial-memory-bench/case.schema.json",
4
+ "title": "AdversarialMemoryBenchmarkCase",
5
+ "type": "object",
6
+ "required": ["id", "query", "adversary_type", "entries", "relevant_ids"],
7
+ "properties": {
8
+ "id": {"type": "string", "minLength": 1},
9
+ "query": {"type": "string", "minLength": 1},
10
+ "adversary_type": {
11
+ "type": "string",
12
+ "enum": [
13
+ "entity_swap",
14
+ "environment_swap",
15
+ "time_swap",
16
+ "state_update",
17
+ "speaker_swap",
18
+ "near_duplicate_paraphrase"
19
+ ]
20
+ },
21
+ "entries": {
22
+ "type": "array",
23
+ "minItems": 2,
24
+ "items": {
25
+ "type": "object",
26
+ "required": ["id", "text"],
27
+ "properties": {
28
+ "id": {"type": "string", "minLength": 1},
29
+ "text": {"type": "string", "minLength": 1},
30
+ "tags": {
31
+ "type": "array",
32
+ "items": {"type": "string"}
33
+ },
34
+ "depth": {"type": "integer", "minimum": 0},
35
+ "timestamp": {"type": "string"},
36
+ "speaker": {"type": "string"},
37
+ "metadata": {"type": "object"}
38
+ },
39
+ "additionalProperties": false
40
+ }
41
+ },
42
+ "relevant_ids": {
43
+ "type": "array",
44
+ "minItems": 1,
45
+ "items": {"type": "string", "minLength": 1}
46
+ },
47
+ "notes": {"type": "string"},
48
+ "metadata": {"type": "object"}
49
+ },
50
+ "additionalProperties": false
51
+ }
scripts/generate_cases.py ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import json
3
+ from pathlib import Path
4
+
5
+
6
+ OUT = Path(__file__).resolve().parents[1] / "data" / "cases.jsonl"
7
+
8
+
9
+ def env_cases():
10
+ services = [
11
+ ("database", "db-staging.internal", "db-prod.internal", "db-dev.internal"),
12
+ ("redis cache", "redis-staging.internal", "redis-prod.internal", "redis-dev.internal"),
13
+ ("object storage bucket", "assets-staging", "assets-prod", "assets-dev"),
14
+ ("email sender", "ses-staging", "ses-prod", "ses-dev"),
15
+ ("nats cluster", "nats-staging.internal", "nats-prod.internal", "nats-dev.internal"),
16
+ ("API base URL", "https://api-staging.ecue.ai", "https://api.ecue.ai", "https://api-dev.ecue.ai"),
17
+ ("metrics backend", "vm-staging.internal", "vm-prod.internal", "vm-dev.internal"),
18
+ ("auth issuer", "https://auth-staging.ecue.ai", "https://auth.ecue.ai", "https://auth-dev.ecue.ai"),
19
+ ("artifact bucket", "artifacts-staging", "artifacts-prod", "artifacts-dev"),
20
+ ("search index", "meili-staging.internal", "meili-prod.internal", "meili-dev.internal"),
21
+ ]
22
+ env_queries = [
23
+ ("staging", "production", "development"),
24
+ ("production", "staging", "development"),
25
+ ("development", "production", "staging"),
26
+ ]
27
+ cases = []
28
+ idx = 1
29
+ for service, a, b, c in services:
30
+ for target, distractor, other in env_queries:
31
+ correct = {"staging": a, "production": b, "development": c}[target]
32
+ wrong = {"staging": b, "production": a, "development": a}[target]
33
+ other_val = {"staging": c, "production": c, "development": b}[target]
34
+ cases.append({
35
+ "id": f"env-{idx:03d}",
36
+ "query": f"What {service} does {target} use?",
37
+ "adversary_type": "environment_swap",
38
+ "entries": [
39
+ entry("e1", f"The {target} environment uses {correct} for its {service}.", [target, normalize(service), "infra"], 1),
40
+ entry("e2", f"The {distractor} environment uses {wrong} for its {service}.", [distractor, normalize(service), "infra"], 1),
41
+ entry("e3", f"The {other} environment uses {other_val} for its {service}.", [other, normalize(service), "infra"], 2),
42
+ entry("e4", f"The {target} environment rotates credentials weekly for the {service}.", [target, normalize(service), "ops"], 2),
43
+ entry("e5", f"The {target} email sender is ses-{target} for notification traffic.", [target, "email", "infra"], 3),
44
+ ],
45
+ "relevant_ids": ["e1"],
46
+ "notes": "Environment swap distractors with same service family.",
47
+ })
48
+ idx += 1
49
+ return cases
50
+
51
+
52
+ def env_cases_extra():
53
+ rows = [
54
+ ("staging", "feature flag service", "flags-staging.internal", "flags-prod.internal", "flags-dev.internal"),
55
+ ("production", "feature flag service", "flags-prod.internal", "flags-staging.internal", "flags-dev.internal"),
56
+ ("staging", "artifact registry", "registry-staging.internal", "registry-prod.internal", "registry-dev.internal"),
57
+ ("production", "artifact registry", "registry-prod.internal", "registry-staging.internal", "registry-dev.internal"),
58
+ ("development", "artifact registry", "registry-dev.internal", "registry-prod.internal", "registry-staging.internal"),
59
+ ("staging", "workflow queue", "queue-staging.internal", "queue-prod.internal", "queue-dev.internal"),
60
+ ("production", "workflow queue", "queue-prod.internal", "queue-staging.internal", "queue-dev.internal"),
61
+ ("development", "workflow queue", "queue-dev.internal", "queue-prod.internal", "queue-staging.internal"),
62
+ ("staging", "analytics endpoint", "analytics-staging.internal", "analytics-prod.internal", "analytics-dev.internal"),
63
+ ("production", "analytics endpoint", "analytics-prod.internal", "analytics-staging.internal", "analytics-dev.internal"),
64
+ ("development", "analytics endpoint", "analytics-dev.internal", "analytics-prod.internal", "analytics-staging.internal"),
65
+ ("staging", "auth callback domain", "callback-staging.ecue.ai", "callback.ecue.ai", "callback-dev.ecue.ai"),
66
+ ("production", "auth callback domain", "callback.ecue.ai", "callback-staging.ecue.ai", "callback-dev.ecue.ai"),
67
+ ("development", "auth callback domain", "callback-dev.ecue.ai", "callback.ecue.ai", "callback-staging.ecue.ai"),
68
+ ("staging", "payments webhook host", "payments-staging.internal", "payments-prod.internal", "payments-dev.internal"),
69
+ ("production", "payments webhook host", "payments-prod.internal", "payments-staging.internal", "payments-dev.internal"),
70
+ ("development", "payments webhook host", "payments-dev.internal", "payments-prod.internal", "payments-staging.internal"),
71
+ ("staging", "logs bucket", "logs-staging", "logs-prod", "logs-dev"),
72
+ ("production", "logs bucket", "logs-prod", "logs-staging", "logs-dev"),
73
+ ("development", "logs bucket", "logs-dev", "logs-prod", "logs-staging"),
74
+ ]
75
+ cases = []
76
+ for idx, (target, service, correct, wrong, other) in enumerate(rows, 1):
77
+ distractor = "production" if target != "production" else "staging"
78
+ sibling = "development" if target != "development" else "staging"
79
+ cases.append({
80
+ "id": f"envx-{idx:03d}",
81
+ "query": f"What {service} does {target} use?",
82
+ "adversary_type": "environment_swap",
83
+ "entries": [
84
+ entry("e1", f"The {target} environment uses {correct} for the {service}.", [target, normalize(service), "infra"], 1),
85
+ entry("e2", f"The {distractor} environment uses {wrong} for the {service}.", [distractor, normalize(service), "infra"], 1),
86
+ entry("e3", f"The {sibling} environment uses {other} for the {service}.", [sibling, normalize(service), "infra"], 2),
87
+ entry("e4", f"The {target} runbook mentions how to rotate secrets for the {service}.", [target, normalize(service), "runbook"], 3),
88
+ ],
89
+ "relevant_ids": ["e1"],
90
+ "notes": "Extra environment confusion case.",
91
+ })
92
+ return cases
93
+
94
+
95
+ def entity_cases():
96
+ pairs = [
97
+ ("Caroline", "Catherine", "oat milk", "black tea"),
98
+ ("Jordan", "Jordyn", "standing desk", "ergonomic chair"),
99
+ ("Alicia", "Alice", "Postgres", "MySQL"),
100
+ ("Mika", "Mila", "vim", "emacs"),
101
+ ("Darren", "Dorian", "dark mode", "light mode"),
102
+ ("Riley", "Ryan", "GraphQL", "REST"),
103
+ ("Marina", "Maria", "daily standups", "weekly status docs"),
104
+ ("Talia", "Tanya", "Go modules", "Bazel workspaces"),
105
+ ("Nolan", "Noah", "CUDA containers", "CPU-only builds"),
106
+ ("Elena", "Alina", "Linear", "Jira"),
107
+ ("Sonia", "Sonya", "dark roast", "green tea"),
108
+ ("Maren", "Karen", "Nix", "Docker Compose"),
109
+ ]
110
+ cases = []
111
+ idx = 1
112
+ for a, b, pref, alt in pairs:
113
+ cases.append({
114
+ "id": f"entity-{idx:03d}",
115
+ "query": f"What does {a} prefer?",
116
+ "adversary_type": "entity_swap",
117
+ "entries": [
118
+ entry("e1", f"{a} prefers {pref} for daily work.", [normalize(a), "preference"], 1),
119
+ entry("e2", f"{b} prefers {pref} for daily work.", [normalize(b), "preference"], 1),
120
+ entry("e3", f"{a} also likes {alt} in some situations.", [normalize(a), "preference"], 2),
121
+ entry("e4", f"The team discussed {pref} and {alt} during planning.", ["team", "planning"], 3),
122
+ entry("e5", f"{a} wrote a migration note about deployment safety.", [normalize(a), "deployment"], 3),
123
+ ],
124
+ "relevant_ids": ["e1"],
125
+ "notes": "Entity swap with near-name collision and shared preference value.",
126
+ })
127
+ idx += 1
128
+ return cases
129
+
130
+
131
+ def entity_cases_extra():
132
+ rows = [
133
+ ("Colin", "Collin", "sourdough bread", "rice noodles"),
134
+ ("Amira", "Amara", "linear issue tracker", "spreadsheet backlog"),
135
+ ("Devon", "Devin", "kubernetes", "nomad"),
136
+ ("Selena", "Sienna", "elm", "react"),
137
+ ("Tristan", "Tristen", "standing meetings", "async updates"),
138
+ ("Nadia", "Naomi", "matcha", "espresso"),
139
+ ("Harper", "Harold", "tailscale", "wireguard"),
140
+ ("Leona", "Lenora", "postgresql", "sqlite"),
141
+ ("Mason", "Marlon", "weekly demos", "monthly demos"),
142
+ ("Tessa", "Teresa", "dark theme", "solarized light"),
143
+ ("Gideon", "Gillian", "helm charts", "terraform modules"),
144
+ ("Priya", "Priyanka", "quiet keyboards", "clicky keyboards"),
145
+ ("Evan", "Ivan", "gpu builds", "cpu builds"),
146
+ ("Lena", "Lina", "shortbread cookies", "ginger cookies"),
147
+ ("Marco", "Marek", "clickhouse", "bigquery"),
148
+ ("Keira", "Kiera", "rss feeds", "email newsletters"),
149
+ ("Brennan", "Brendan", "obsidian", "logseq"),
150
+ ("Farah", "Fiona", "blueberry yogurt", "plain yogurt"),
151
+ ("Noelle", "Noel", "go test", "pytest"),
152
+ ("Soren", "Sorin", "daily walks", "stationary bike"),
153
+ ]
154
+ cases = []
155
+ for idx, (a, b, pref, alt) in enumerate(rows, 1):
156
+ cases.append({
157
+ "id": f"entityx-{idx:03d}",
158
+ "query": f"What does {a} prefer?",
159
+ "adversary_type": "entity_swap",
160
+ "entries": [
161
+ entry("e1", f"{a} prefers {pref} for everyday use.", [normalize(a), "preference"], 1),
162
+ entry("e2", f"{b} prefers {pref} for everyday use.", [normalize(b), "preference"], 1),
163
+ entry("e3", f"{a} sometimes uses {alt} instead.", [normalize(a), "preference"], 2),
164
+ entry("e4", f"The team discussed {pref} and {alt} during planning.", ["team", "planning"], 3),
165
+ ],
166
+ "relevant_ids": ["e1"],
167
+ "notes": "Extra entity confusion case.",
168
+ })
169
+ return cases
170
+
171
+
172
+ def time_cases():
173
+ events = [
174
+ ("migrate auth to bearer tokens", "March", "May"),
175
+ ("split staging and production domains", "April", "June"),
176
+ ("move Redis into a private subnet", "January", "February"),
177
+ ("ship the reporting dashboard", "July", "August"),
178
+ ("enable GPU inference in Docker", "September", "October"),
179
+ ("rotate the SES credentials", "November", "December"),
180
+ ("switch the default embedded model to bge-small", "February", "April"),
181
+ ("publish the public GHCR image", "May", "July"),
182
+ ("replace the handwritten MCP server", "June", "August"),
183
+ ("introduce depth-aware ranking", "October", "December"),
184
+ ("remove the TUI surface", "January", "March"),
185
+ ("generalize the tagger pipeline", "April", "September"),
186
+ ]
187
+ cases = []
188
+ idx = 1
189
+ for event, correct_month, wrong_month in events:
190
+ cases.append({
191
+ "id": f"time-{idx:03d}",
192
+ "query": f"When did we {event}?",
193
+ "adversary_type": "time_swap",
194
+ "entries": [
195
+ entry("e1", f"We {event} in {correct_month}.", ["timeline", normalize(event)], 1, timestamp=f"2026-{month_num(correct_month)}-03"),
196
+ entry("e2", f"We {event} in {wrong_month}.", ["timeline", normalize(event)], 1, timestamp=f"2026-{month_num(wrong_month)}-04"),
197
+ entry("e3", f"We discussed how to {event} throughout {correct_month} planning.", ["planning", normalize(event)], 2),
198
+ entry("e4", f"We completed documentation for that change in {wrong_month}.", ["docs", normalize(event)], 2),
199
+ ],
200
+ "relevant_ids": ["e1"],
201
+ "notes": "Same event with swapped month distractor.",
202
+ })
203
+ idx += 1
204
+ return cases
205
+
206
+
207
+ def time_cases_extra():
208
+ rows = [
209
+ ("publish the first public image", "January", "March"),
210
+ ("rename the graph package to taggraph", "February", "April"),
211
+ ("switch OpenCode to the official MCP SDK", "May", "July"),
212
+ ("remove the terminal UI", "June", "August"),
213
+ ("set bge-small as the default embedded model", "September", "November"),
214
+ ("introduce proper-noun tag extraction", "October", "December"),
215
+ ("generalize the Docker runtime image", "March", "May"),
216
+ ("fix anonymous GHCR pulls", "April", "June"),
217
+ ("drop tier terminology from the interface", "July", "September"),
218
+ ("move from topicgraph to taggraph", "August", "October"),
219
+ ("add benchmark charts", "November", "January"),
220
+ ("publish the adversarial benchmark skeleton", "December", "February"),
221
+ ("introduce depth-aware ranking bias", "January", "March"),
222
+ ("refocus the README benchmark section", "February", "April"),
223
+ ("switch the package image name to generic tagmem", "May", "July"),
224
+ ("replace the shell-wrapper MCP path", "June", "August"),
225
+ ("add scripted ingest integration tests", "September", "November"),
226
+ ("add benchmark raw artifact publishing", "October", "December"),
227
+ ("make the public repo visible", "March", "May"),
228
+ ("publish the refreshed runtime image", "April", "June"),
229
+ ]
230
+ cases = []
231
+ for idx, (event, correct_month, wrong_month) in enumerate(rows, 1):
232
+ cases.append({
233
+ "id": f"timex-{idx:03d}",
234
+ "query": f"When did we {event}?",
235
+ "adversary_type": "time_swap",
236
+ "entries": [
237
+ entry("e1", f"We {event} in {correct_month}.", [normalize(event), "timeline"], 1, timestamp=f"2026-{month_num(correct_month)}-03"),
238
+ entry("e2", f"We {event} in {wrong_month}.", [normalize(event), "timeline"], 1, timestamp=f"2026-{month_num(wrong_month)}-04"),
239
+ entry("e3", f"We planned to {event} throughout {correct_month}.", [normalize(event), "planning"], 2),
240
+ ],
241
+ "relevant_ids": ["e1"],
242
+ "notes": "Extra time-swap case.",
243
+ })
244
+ return cases
245
+
246
+
247
+ def state_cases():
248
+ facts = [
249
+ ("current production domain", "ecue.ai", "hifidelityai.com", "preview.ecue.ai"),
250
+ ("default embed model", "bge-small-en-v1.5", "all-MiniLM-L6-v2", "bge-base-en-v1.5"),
251
+ ("current staging database", "db-staging.internal", "db-old-staging.internal", "db-preview.internal"),
252
+ ("current metrics backend", "VictoriaMetrics", "Prometheus", "InfluxDB"),
253
+ ("current mail sender", "ses-prod", "ses-old", "ses-preview"),
254
+ ("current OpenCode MCP name", "tagmem", "tagmem_active", "mempalace_active"),
255
+ ("current published image", "ghcr.io/codysnider/tagmem", "ghcr.io/codysnider/tagmem-opencode", "ghcr.io/codysnider/tagmem-preview"),
256
+ ("current local data root", "$HOME/.local/share/tagmem", "/data/tagmem", "/srv/tagmem"),
257
+ ("current vector backend", "chromem-go", "ChromaDB", "SQLite FTS"),
258
+ ("current graph package name", "taggraph", "topicgraph", "memorygraph"),
259
+ ]
260
+ cases = []
261
+ idx = 1
262
+ for label, current, old, sibling in facts:
263
+ cases.append({
264
+ "id": f"state-{idx:03d}",
265
+ "query": f"What is the {label}?",
266
+ "adversary_type": "state_update",
267
+ "entries": [
268
+ entry("e1", f"The {label} is {current}.", [normalize(label), "current"], 0, timestamp="2026-04-01"),
269
+ entry("e2", f"The {label} used to be {old}.", [normalize(label), "historical"], 2, timestamp="2025-10-01"),
270
+ entry("e3", f"The staging or preview equivalent is {sibling}.", [normalize(label), "preview"], 2, timestamp="2026-04-01"),
271
+ entry("e4", f"We updated the runbook after changing the {label}.", [normalize(label), "runbook"], 3),
272
+ ],
273
+ "relevant_ids": ["e1"],
274
+ "notes": "Current fact vs stale fact and sibling environment distractor.",
275
+ })
276
+ idx += 1
277
+ return cases
278
+
279
+
280
+ def state_cases_extra():
281
+ rows = [
282
+ ("current benchmark default model", "bge-small-en-v1.5", "all-MiniLM-L6-v2", "bge-base-en-v1.5"),
283
+ ("current OpenCode MCP name", "tagmem", "tagmem_active", "mempalace_active"),
284
+ ("current Dockerfile name", "Dockerfile.runtime", "Dockerfile.opencode", "Dockerfile.dev"),
285
+ ("current GHCR image", "ghcr.io/codysnider/tagmem", "ghcr.io/codysnider/tagmem-opencode", "ghcr.io/codysnider/tagmem-preview"),
286
+ ("current benchmark data root", "$HOME/.local/share/tagmem", "/data/tagmem", "/srv/tagmem"),
287
+ ("current public MCP prefix", "tagmem_", "tiered_memory_", "memory_"),
288
+ ("current graph package name", "taggraph", "topicgraph", "memorygraph"),
289
+ ("current default acceleration mode", "auto", "cuda", "cpu"),
290
+ ("current install preference", "Docker", "local go build", "manual source edits"),
291
+ ("current CLI primary surface", "CLI and MCP", "TUI and MCP", "TUI only"),
292
+ ("current default runtime image", "tagmem:latest", "tagmem-opencode:latest", "tagmem-dev:latest"),
293
+ ("current published benchmark file path", "benchmarks/raw/bge-small-en-v1.5", "bench-results-live", "results/live"),
294
+ ("current package host", "ghcr.io", "docker.io", "quay.io"),
295
+ ("current repo owner", "codysnider", "lhl", "openai"),
296
+ ("current benchmark category count", "5", "4", "6"),
297
+ ("current benchmark default device", "GPU", "CPU", "TPU"),
298
+ ("current memory grouping model", "tags plus depth", "rooms plus wings", "folders only"),
299
+ ("current benchmark standalone project", "adversarial-memory-bench", "benchmarks", "tagmem-bench"),
300
+ ("current install wrapper root", "~/.local/share/tagmem/install", "~/.local/share/tagmem/opencode-install", "~/.cache/tagmem/install"),
301
+ ("current doctor image behavior", "generic runtime image", "OpenCode-only image", "source-only binary"),
302
+ ]
303
+ cases = []
304
+ for idx, (label, current, old, sibling) in enumerate(rows, 1):
305
+ cases.append({
306
+ "id": f"statex-{idx:03d}",
307
+ "query": f"What is the {label}?",
308
+ "adversary_type": "state_update",
309
+ "entries": [
310
+ entry("e1", f"The {label} is {current}.", [normalize(label), "current"], 0, timestamp="2026-04-10"),
311
+ entry("e2", f"The {label} used to be {old}.", [normalize(label), "historical"], 2, timestamp="2025-12-01"),
312
+ entry("e3", f"A related alternative is {sibling}.", [normalize(label), "alternative"], 2, timestamp="2026-04-10"),
313
+ ],
314
+ "relevant_ids": ["e1"],
315
+ "notes": "Extra state-update case.",
316
+ })
317
+ return cases
318
+
319
+
320
+ def speaker_cases():
321
+ topics = [
322
+ ("Terraform environments", "separate Terraform states for shared, production, and staging", "one Terraform state with workspaces"),
323
+ ("tagging pipeline", "use deterministic extraction first and embedding ranking second", "let the model invent tags directly"),
324
+ ("OpenCode integration", "run the image directly with the mcp subcommand", "wrap everything in a shell pipeline"),
325
+ ("release packaging", "publish ghcr.io/codysnider/tagmem as the generic image", "ship only a local Docker build"),
326
+ ("depth model", "treat depth as a secondary ranking bias", "replace tags with rigid hierarchical folders"),
327
+ ("README benchmarks", "keep a compact benchmark table in the main README", "put the full benchmark suite in the top section"),
328
+ ("Docker data root", "use HOME-local defaults with an env override", "hardcode everything to /data/tagmem"),
329
+ ("MCP compatibility", "use the official Go SDK", "hand-roll every transport detail"),
330
+ ("adversarial benchmark repo", "split it into a standalone project", "bury it inside the main app repo"),
331
+ ("OpenCode command naming", "use remember instead of mine", "keep mine because users will figure it out"),
332
+ ]
333
+ cases = []
334
+ idx = 1
335
+ for subject, assistant_text, user_text in topics:
336
+ cases.append({
337
+ "id": f"speaker-{idx:03d}",
338
+ "query": f"What did you suggest for {subject}?",
339
+ "adversary_type": "speaker_swap",
340
+ "entries": [
341
+ entry("e1", f"You suggested {assistant_text}.", [normalize(subject), "assistant", "suggestion"], 1, speaker="assistant"),
342
+ entry("e2", f"I suggested {user_text}.", [normalize(subject), "user", "suggestion"], 1, speaker="user"),
343
+ entry("e3", f"We discussed {subject} implementation details later.", [normalize(subject), "discussion"], 2),
344
+ entry("e4", f"The team wrote follow-up notes about {subject}.", [normalize(subject), "notes"], 3),
345
+ ],
346
+ "relevant_ids": ["e1"],
347
+ "notes": "Assistant/user suggestion swap with same subject area.",
348
+ })
349
+ idx += 1
350
+ return cases
351
+
352
+
353
+ def speaker_cases_extra():
354
+ rows = [
355
+ ("adversarial benchmark design", "use exact relevant ids and plausible distractors", "just score generated answers"),
356
+ ("Docker image naming", "make the image generic, not OpenCode-specific", "keep the image named tagmem-opencode forever"),
357
+ ("OpenCode commands", "rename mine to remember", "keep mine because users already know it"),
358
+ ("tagging pipeline", "use deterministic extraction before model ranking", "let the model invent every tag from scratch"),
359
+ ("README benchmarks", "keep the main README comparison compact", "put every benchmark detail at the top of the README"),
360
+ ("public package visibility", "verify anonymous GHCR pulls from another machine", "assume package visibility from the maintainer machine"),
361
+ ("OpenCode MCP implementation", "use the official Go SDK", "keep hand-rolling the protocol indefinitely"),
362
+ ("TUI removal", "remove it completely for this phase", "leave the broken TUI in place because it might improve later"),
363
+ ("depth model", "treat depth as a ranking bias, not the main organizer", "replace tags with depth-only buckets"),
364
+ ("GPU support", "allow GPU if available and fall back to CPU", "ban GPU because purity matters more than user experience"),
365
+ ("docs cleanup", "remove personal machine paths from public docs", "leave local paths because maintainers can mentally translate them"),
366
+ ("fresh install testing", "test from another machine over SSH", "assume local success implies public install success"),
367
+ ("image publish process", "publish generic ghcr.io/codysnider/tagmem", "ship only a local image and tell users to rebuild it"),
368
+ ("memory terminology", "use entries, tags, depth, facts, diary", "invent new metaphor-heavy nouns"),
369
+ ("public benchmark reporting", "include methodology, machine specs, and raw JSON", "quote one headline number and hide everything else"),
370
+ ("OpenAI-compatible support", "keep it generic rather than naming one specific server", "hard-code it as an Ollama feature"),
371
+ ("CLI install docs", "prefer Docker-first instructions", "assume everyone wants go install first"),
372
+ ("graph naming", "rename topicgraph to taggraph", "keep topicgraph because it is only internal"),
373
+ ("CPU fallback verification", "prove it with real add/search on a remote machine", "just trust the doctor output"),
374
+ ("benchmark expansion", "add adversarial distractor cases as a standalone project", "bury the benchmark data inside the main repo forever"),
375
+ ]
376
+ cases = []
377
+ for idx, (subject, assistant_text, user_text) in enumerate(rows, 1):
378
+ cases.append({
379
+ "id": f"speakerx-{idx:03d}",
380
+ "query": f"What did you suggest for {subject}?",
381
+ "adversary_type": "speaker_swap",
382
+ "entries": [
383
+ entry("e1", f"You suggested {assistant_text}.", [normalize(subject), "assistant", "suggestion"], 1, speaker="assistant"),
384
+ entry("e2", f"I suggested {user_text}.", [normalize(subject), "user", "suggestion"], 1, speaker="user"),
385
+ entry("e3", f"We discussed {subject} again in follow-up planning.", [normalize(subject), "discussion"], 2),
386
+ ],
387
+ "relevant_ids": ["e1"],
388
+ "notes": "Extra speaker confusion case.",
389
+ })
390
+ return cases
391
+
392
+
393
+ def paraphrase_cases():
394
+ items = [
395
+ ("API calls time out after 30 seconds.", "The API timeout is 60 seconds.", "Background jobs retry for up to 30 seconds.", "What timeout do API calls use?"),
396
+ ("Production cookies expire after 12 hours.", "Production cookies expire after 24 hours.", "Staging cookies expire after 12 hours.", "How long do production cookies last?"),
397
+ ("The upload limit is 25 megabytes.", "The upload limit is 50 megabytes.", "The attachment limit is 25 megabytes.", "What is the upload limit?"),
398
+ ("Auth refresh runs every 10 minutes.", "Auth refresh runs every 15 minutes.", "Metrics scraping runs every 10 minutes.", "How often does auth refresh run?"),
399
+ ("The reporting batch runs at 02:00 UTC.", "The reporting batch runs at 03:00 UTC.", "The cleanup batch runs at 02:00 UTC.", "When does the reporting batch run?"),
400
+ ("The public API port is 8443.", "The public API port is 9443.", "The internal admin port is 8443.", "What port does the public API use?"),
401
+ ("The Docker data root defaults to HOME-local storage.", "The Docker data root defaults to /data/tagmem.", "The benchmark root defaults to HOME-local storage.", "What is the Docker data root default?"),
402
+ ("The MCP tools use the tagmem_ prefix.", "The MCP tools use the tiered_memory_ prefix.", "The CLI binary is named tagmem.", "What prefix do the MCP tools use?"),
403
+ ("The default GPU model is bge-small-en-v1.5.", "The default GPU model is bge-base-en-v1.5.", "The CPU fallback model is all-MiniLM-L6-v2.", "What is the default GPU model?"),
404
+ ("The runtime image is ghcr.io/codysnider/tagmem.", "The runtime image is ghcr.io/codysnider/tagmem-opencode.", "The repo path is github.com/codysnider/tagmem.", "What is the runtime image name?"),
405
+ ("The MCP server is implemented with the official Go SDK.", "The MCP server is still handwritten.", "The transport runs over stdio.", "How is the MCP server implemented?"),
406
+ ("The local graph package is named taggraph.", "The local graph package is named topicgraph.", "The benchmark package is named tagbench.", "What is the graph package name?"),
407
+ ]
408
+ cases = []
409
+ idx = 1
410
+ for correct, wrong, neighbor, query in items:
411
+ cases.append({
412
+ "id": f"paraphrase-{idx:03d}",
413
+ "query": query,
414
+ "adversary_type": "near_duplicate_paraphrase",
415
+ "entries": [
416
+ entry("e1", correct, ["config", "limits"], 1),
417
+ entry("e2", wrong, ["config", "limits"], 1),
418
+ entry("e3", neighbor, ["jobs", "limits"], 2),
419
+ entry("e4", f"We reviewed the policy related to: {correct}", ["policy"], 3),
420
+ ],
421
+ "relevant_ids": ["e1"],
422
+ "notes": "Near-duplicate paraphrase with one wrong value distractor.",
423
+ })
424
+ idx += 1
425
+ return cases
426
+
427
+
428
+ def paraphrase_cases_extra():
429
+ rows = [
430
+ ("The benchmark data root defaults to the home-local tagmem directory.", "The benchmark data root defaults to /data/tagmem.", "The benchmark output root defaults to the home-local tagmem directory.", "What is the benchmark data root default?"),
431
+ ("The embedded model fallback uses the CPU runtime.", "The embedded model fallback aborts when CUDA fails.", "The doctor command reports the current runtime path.", "What happens when CUDA is unavailable?"),
432
+ ("The MCP server runs over stdio.", "The MCP server only runs over HTTP.", "The MCP client uses a local Docker command.", "How does the MCP server communicate?"),
433
+ ("The install wrapper clones the public GitHub repository first.", "The install wrapper only copies a local checkout.", "The runtime wrapper uses the installed repo path.", "What does the install wrapper do first?"),
434
+ ("The README benchmark section now uses a compact comparison table.", "The README benchmark section now embeds the full benchmark methodology.", "Detailed benchmark docs live in the benchmarks folder.", "How is the benchmark section presented in the README?"),
435
+ ("The OpenCode config should launch the generic image with the mcp subcommand.", "The OpenCode config should launch an OpenCode-specific image name.", "The tagmem image is generic and can run doctor or bench too.", "How should OpenCode launch the image?"),
436
+ ("The graph browser should use tags instead of topics.", "The graph browser should keep topic terminology because users expect it.", "The graph package is now named taggraph.", "What should the graph browser use?"),
437
+ ("The public image should be published to ghcr.io/codysnider/tagmem.", "The public image should stay local-only.", "The install flow can fall back to a local build if pull fails.", "Where should the public image be published?"),
438
+ ("The main command with no arguments prints help.", "The main command with no arguments opens a TUI.", "The TUI was removed from the project surface.", "What happens when tagmem runs without a command?"),
439
+ ("The standalone adversarial benchmark should live in its own repository.", "The adversarial benchmark should stay buried under the main repo only.", "The standalone benchmark can later be pushed to Hugging Face.", "Where should the adversarial benchmark live?"),
440
+ ("The improved tagger uses deterministic extraction plus embedding ranking.", "The improved tagger relies entirely on a generative model.", "The improved tagger is aware of proper nouns and code symbols.", "How does the improved tagger work?"),
441
+ ("The default GPU model is bge-small because it is the best overall tradeoff.", "The default GPU model is bge-base because it wins every benchmark decisively.", "MiniLM remains the throughput-first fallback.", "Why is bge-small the default GPU model?"),
442
+ ("The MCP tool names now use the tagmem_ prefix.", "The MCP tool names still use the tiered_memory_ prefix for compatibility.", "The README lists the current tool names explicitly.", "What MCP tool prefix does the project use?"),
443
+ ("The Docker data root can be overridden with TAGMEM_DATA_ROOT.", "The Docker data root is hardcoded to /data/tagmem.", "The default data root lives under the user's home directory.", "How do you override the Docker data root?"),
444
+ ("The doctor command should report the execution device and runtime library.", "The doctor command only reports whether the process started.", "The doctor command is useful for checking CPU fallback.", "What should the doctor command report?"),
445
+ ("The benchmark package should include raw JSON outputs and machine specs.", "The benchmark package should only include a headline score.", "The methodology file lists exact commands and dataset hashes.", "What belongs in the benchmark package?"),
446
+ ("The CLI command for bringing data in is ingest, not mine.", "The CLI command for bringing data in should be called mine because it sounds more creative.", "The naming should stay boring and well-understood.", "What is the correct import command name?"),
447
+ ("Depth is secondary and tags are primary in the memory model.", "Depth should replace tags entirely as the primary organizer.", "Search may still use depth as a bias.", "How do depth and tags relate?"),
448
+ ("The image should be generic enough to run mcp, doctor, or benchmarks.", "The image should be named specifically for OpenCode forever.", "The same image can be reused by multiple agent runtimes.", "Why is the image generic?"),
449
+ ("A fresh-user install test should be run from another machine, not assumed from local success.", "A local maintainer machine is enough to validate public installation.", "Anonymous GHCR pulls are part of the install story.", "How should install validation be done?"),
450
+ ]
451
+ cases = []
452
+ for idx, (correct, wrong, neighbor, query) in enumerate(rows, 1):
453
+ cases.append({
454
+ "id": f"paraphrasex-{idx:03d}",
455
+ "query": query,
456
+ "adversary_type": "near_duplicate_paraphrase",
457
+ "entries": [
458
+ entry("e1", correct, ["docs", "retrieval"], 1),
459
+ entry("e2", wrong, ["docs", "retrieval"], 1),
460
+ entry("e3", neighbor, ["docs", "context"], 2),
461
+ ],
462
+ "relevant_ids": ["e1"],
463
+ "notes": "Extra near-duplicate paraphrase case.",
464
+ })
465
+ return cases
466
+
467
+
468
+ def entry(entry_id, text, tags, depth, timestamp=None, speaker=None):
469
+ item = {"id": entry_id, "text": text, "tags": tags, "depth": depth}
470
+ if timestamp:
471
+ item["timestamp"] = timestamp
472
+ if speaker:
473
+ item["speaker"] = speaker
474
+ return item
475
+
476
+
477
+ def normalize(text: str) -> str:
478
+ return text.lower().replace(" ", "-").replace("/", "-")
479
+
480
+
481
+ def month_num(name: str) -> str:
482
+ months = {
483
+ "January": "01", "February": "02", "March": "03", "April": "04", "May": "05", "June": "06",
484
+ "July": "07", "August": "08", "September": "09", "October": "10", "November": "11", "December": "12",
485
+ }
486
+ return months[name]
487
+
488
+
489
+ def main() -> int:
490
+ cases = []
491
+ for builder in [
492
+ env_cases, env_cases_extra,
493
+ entity_cases, entity_cases_extra,
494
+ time_cases, time_cases_extra,
495
+ state_cases, state_cases_extra,
496
+ speaker_cases, speaker_cases_extra,
497
+ paraphrase_cases, paraphrase_cases_extra,
498
+ ]:
499
+ cases.extend(builder())
500
+
501
+ OUT.parent.mkdir(parents=True, exist_ok=True)
502
+ with OUT.open("w") as f:
503
+ for case in cases:
504
+ f.write(json.dumps(case, separators=(",", ":")) + "\n")
505
+
506
+ print(f"wrote {len(cases)} cases to {OUT}")
507
+ return 0
508
+
509
+
510
+ if __name__ == "__main__":
511
+ raise SystemExit(main())
scripts/run_benchmark.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import json
4
+ from pathlib import Path
5
+
6
+
7
+ def load_cases(path: Path):
8
+ cases = []
9
+ with path.open() as f:
10
+ for line in f:
11
+ line = line.strip()
12
+ if not line:
13
+ continue
14
+ cases.append(json.loads(line))
15
+ return cases
16
+
17
+
18
+ def keyword_score(query: str, text: str) -> float:
19
+ query_terms = [term.lower() for term in query.replace("?", " ").replace(",", " ").split() if len(term) >= 3]
20
+ text_lower = text.lower()
21
+ score = 0.0
22
+ for term in query_terms:
23
+ if term in text_lower:
24
+ score += 1.0
25
+ return score
26
+
27
+
28
+ def run_keyword_baseline(cases):
29
+ results = []
30
+ hits_at_1 = 0
31
+ hits_at_5 = 0
32
+ for case in cases:
33
+ scored = []
34
+ for entry in case["entries"]:
35
+ score = keyword_score(case["query"], entry["text"])
36
+ scored.append((score, entry["id"]))
37
+ scored.sort(key=lambda item: (-item[0], item[1]))
38
+ ranked_ids = [item[1] for item in scored]
39
+ relevant = set(case["relevant_ids"])
40
+ hit1 = ranked_ids[:1] and ranked_ids[0] in relevant
41
+ hit5 = any(item in relevant for item in ranked_ids[:5])
42
+ hits_at_1 += 1 if hit1 else 0
43
+ hits_at_5 += 1 if hit5 else 0
44
+ results.append({
45
+ "id": case["id"],
46
+ "adversary_type": case["adversary_type"],
47
+ "query": case["query"],
48
+ "relevant_ids": case["relevant_ids"],
49
+ "ranked_ids": ranked_ids,
50
+ "hit_at_1": bool(hit1),
51
+ "hit_at_5": bool(hit5),
52
+ })
53
+ total = len(cases) or 1
54
+ return {
55
+ "cases": len(cases),
56
+ "recall_at_1": hits_at_1 / total,
57
+ "recall_at_5": hits_at_5 / total,
58
+ "results": results,
59
+ }
60
+
61
+
62
+ def main() -> int:
63
+ parser = argparse.ArgumentParser(description="Run a simple adversarial-memory-bench baseline")
64
+ parser.add_argument("--data", default=str(Path(__file__).resolve().parents[1] / "data" / "v0.1" / "cases.jsonl"))
65
+ parser.add_argument("--out", default="")
66
+ args = parser.parse_args()
67
+
68
+ cases = load_cases(Path(args.data))
69
+ report = run_keyword_baseline(cases)
70
+ print(json.dumps({"cases": report["cases"], "recall_at_1": report["recall_at_1"], "recall_at_5": report["recall_at_5"]}, indent=2))
71
+ if args.out:
72
+ out_path = Path(args.out)
73
+ out_path.parent.mkdir(parents=True, exist_ok=True)
74
+ out_path.write_text(json.dumps(report, indent=2))
75
+ return 0
76
+
77
+
78
+ if __name__ == "__main__":
79
+ raise SystemExit(main())
scripts/run_mempalace_benchmark.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import json
4
+ import shutil
5
+ import tempfile
6
+ from pathlib import Path
7
+
8
+ import chromadb
9
+
10
+
11
+ def load_cases(path: Path):
12
+ cases = []
13
+ with path.open() as f:
14
+ for line in f:
15
+ line = line.strip()
16
+ if not line:
17
+ continue
18
+ cases.append(json.loads(line))
19
+ return cases
20
+
21
+
22
+ def run_case(case: dict):
23
+ root = Path(tempfile.mkdtemp(prefix="mempalace-adv-"))
24
+ try:
25
+ client = chromadb.PersistentClient(path=str(root))
26
+ col = client.get_or_create_collection("mempalace_drawers")
27
+ ids = []
28
+ docs = []
29
+ metas = []
30
+ for entry in case["entries"]:
31
+ ids.append(entry["id"])
32
+ docs.append(entry["text"])
33
+ metas.append({
34
+ "wing": "bench",
35
+ "room": case["adversary_type"],
36
+ "hall": "hall_facts",
37
+ "source_file": entry["id"],
38
+ })
39
+ col.add(ids=ids, documents=docs, metadatas=metas)
40
+ results = col.query(query_texts=[case["query"]], n_results=len(case["entries"]), include=["documents", "metadatas", "distances"])
41
+ ranked_ids = [meta["source_file"] for meta in results["metadatas"][0]]
42
+ relevant = set(case["relevant_ids"])
43
+ hit_at_1 = bool(ranked_ids[:1] and ranked_ids[0] in relevant)
44
+ hit_at_5 = any(item in relevant for item in ranked_ids[:5])
45
+ mrr = 0.0
46
+ for i, item in enumerate(ranked_ids):
47
+ if item in relevant:
48
+ mrr = 1.0 / float(i + 1)
49
+ break
50
+ return {
51
+ "id": case["id"],
52
+ "adversary_type": case["adversary_type"],
53
+ "query": case["query"],
54
+ "relevant_ids": case["relevant_ids"],
55
+ "ranked_ids": ranked_ids,
56
+ "hit_at_1": hit_at_1,
57
+ "hit_at_5": hit_at_5,
58
+ "mrr": mrr,
59
+ }
60
+ finally:
61
+ shutil.rmtree(root, ignore_errors=True)
62
+
63
+
64
+ def main() -> int:
65
+ parser = argparse.ArgumentParser(description="Run adversarial benchmark against MemPalace raw-style retrieval")
66
+ parser.add_argument("--data", default=str(Path(__file__).resolve().parents[1] / "data" / "cases.jsonl"))
67
+ parser.add_argument("--out", default="")
68
+ args = parser.parse_args()
69
+
70
+ cases = load_cases(Path(args.data))
71
+ results = [run_case(case) for case in cases]
72
+ report = summarize(results)
73
+ print(json.dumps({key: report[key] for key in ["cases", "recall_at_1", "recall_at_5", "mrr"]}, indent=2))
74
+ if args.out:
75
+ out = Path(args.out)
76
+ out.parent.mkdir(parents=True, exist_ok=True)
77
+ out.write_text(json.dumps(report, indent=2))
78
+ return 0
79
+
80
+
81
+ def summarize(results):
82
+ hits1 = hits5 = 0
83
+ total_mrr = 0.0
84
+ per_category = {}
85
+ for item in results:
86
+ hits1 += 1 if item["hit_at_1"] else 0
87
+ hits5 += 1 if item["hit_at_5"] else 0
88
+ total_mrr += item["mrr"]
89
+ bucket = per_category.setdefault(item["adversary_type"], {"hit": 0, "total": 0})
90
+ bucket["total"] += 1
91
+ bucket["hit"] += 1 if item["hit_at_5"] else 0
92
+ return {
93
+ "cases": len(results),
94
+ "recall_at_1": hits1 / len(results) if results else 0,
95
+ "recall_at_5": hits5 / len(results) if results else 0,
96
+ "mrr": total_mrr / len(results) if results else 0,
97
+ "per_category": {key: value["hit"] / value["total"] for key, value in per_category.items()},
98
+ "results": results,
99
+ }
100
+
101
+
102
+ if __name__ == "__main__":
103
+ raise SystemExit(main())
scripts/run_tagmem_benchmark.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import json
4
+ import os
5
+ import shutil
6
+ import subprocess
7
+ import tempfile
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+
11
+
12
+ def load_cases(path: Path):
13
+ cases = []
14
+ with path.open() as f:
15
+ for line in f:
16
+ line = line.strip()
17
+ if not line:
18
+ continue
19
+ cases.append(json.loads(line))
20
+ return cases
21
+
22
+
23
+ def base_env(root: Path):
24
+ env = os.environ.copy()
25
+ env["HOME"] = str(root)
26
+ env["XDG_CONFIG_HOME"] = str(root / ".config")
27
+ env["XDG_DATA_HOME"] = str(root / ".local" / "share")
28
+ env["XDG_CACHE_HOME"] = str(root / ".cache")
29
+ env.setdefault("TAGMEM_EMBED_PROVIDER", "embedded-hash")
30
+ return env
31
+
32
+
33
+ def load_case_entries(tagmem_bin: str, env: dict, case: dict):
34
+ for entry in case["entries"]:
35
+ cmd = [
36
+ tagmem_bin,
37
+ "add",
38
+ "--depth", str(entry.get("depth", 1)),
39
+ "--title", entry["id"],
40
+ "--body", entry["text"],
41
+ ]
42
+ if entry.get("tags"):
43
+ cmd.extend(["--tags", ",".join(entry["tags"])])
44
+ if entry.get("speaker"):
45
+ cmd.extend(["--source", entry["speaker"]])
46
+ created = parse_timestamp(entry.get("timestamp"))
47
+ if created:
48
+ env["TAGMEM_IMPORT_CREATED_AT"] = created
49
+ env["TAGMEM_IMPORT_UPDATED_AT"] = created
50
+ else:
51
+ env.pop("TAGMEM_IMPORT_CREATED_AT", None)
52
+ env.pop("TAGMEM_IMPORT_UPDATED_AT", None)
53
+ subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, env=env)
54
+
55
+
56
+ def score_case_result(case: dict, ranked_ids):
57
+ relevant = set(case["relevant_ids"])
58
+ hit_at_1 = len(ranked_ids) > 0 and ranked_ids[0] in relevant
59
+ hit_at_5 = any(item in relevant for item in ranked_ids[:5])
60
+ mrr = 0.0
61
+ for i, item in enumerate(ranked_ids):
62
+ if item in relevant:
63
+ mrr = 1.0 / float(i + 1)
64
+ break
65
+ return {
66
+ "id": case["id"],
67
+ "adversary_type": case["adversary_type"],
68
+ "query": case["query"],
69
+ "relevant_ids": case["relevant_ids"],
70
+ "ranked_ids": ranked_ids,
71
+ "hit_at_1": hit_at_1,
72
+ "hit_at_5": hit_at_5,
73
+ "mrr": mrr,
74
+ }
75
+
76
+
77
+ def run_case(tagmem_bin: str, case: dict):
78
+ root = Path(tempfile.mkdtemp(prefix="tagmem-adv-"))
79
+ env = base_env(root)
80
+ try:
81
+ subprocess.run([tagmem_bin, "init"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, env=env)
82
+ load_case_entries(tagmem_bin, env, case)
83
+ proc = subprocess.run([tagmem_bin, "search", "--limit", str(len(case["entries"])), case["query"]], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, env=env)
84
+ ranked_ids = []
85
+ for line in proc.stdout.splitlines():
86
+ line = line.strip()
87
+ if not line.startswith("["):
88
+ continue
89
+ parts = line.split()
90
+ if len(parts) >= 3:
91
+ ranked_ids.append(parts[2])
92
+ return score_case_result(case, ranked_ids)
93
+ finally:
94
+ shutil.rmtree(root, ignore_errors=True)
95
+
96
+
97
+ def main() -> int:
98
+ parser = argparse.ArgumentParser(description="Run adversarial benchmark against tagmem CLI")
99
+ parser.add_argument("--data", default=str(Path(__file__).resolve().parents[1] / "data" / "v0.2" / "cases.jsonl"))
100
+ parser.add_argument("--tagmem-bin", default="tagmem")
101
+ parser.add_argument("--out", default="")
102
+ args = parser.parse_args()
103
+
104
+ cases = load_cases(Path(args.data))
105
+ results = [run_case(args.tagmem_bin, case) for case in cases]
106
+ report = summarize(results)
107
+ print(json.dumps({key: report[key] for key in ["cases", "recall_at_1", "recall_at_5", "mrr"]}, indent=2))
108
+ if args.out:
109
+ out = Path(args.out)
110
+ out.parent.mkdir(parents=True, exist_ok=True)
111
+ out.write_text(json.dumps(report, indent=2))
112
+ return 0
113
+
114
+
115
+ def summarize(results):
116
+ hits1 = hits5 = 0
117
+ total_mrr = 0.0
118
+ per_category = {}
119
+ for item in results:
120
+ hits1 += 1 if item["hit_at_1"] else 0
121
+ hits5 += 1 if item["hit_at_5"] else 0
122
+ total_mrr += item["mrr"]
123
+ bucket = per_category.setdefault(item["adversary_type"], {"hit": 0, "total": 0})
124
+ bucket["total"] += 1
125
+ bucket["hit"] += 1 if item["hit_at_5"] else 0
126
+ return {
127
+ "cases": len(results),
128
+ "recall_at_1": hits1 / len(results) if results else 0,
129
+ "recall_at_5": hits5 / len(results) if results else 0,
130
+ "mrr": total_mrr / len(results) if results else 0,
131
+ "per_category": {key: value["hit"] / value["total"] for key, value in per_category.items()},
132
+ "results": results,
133
+ }
134
+
135
+
136
+ def parse_timestamp(value: str | None) -> str | None:
137
+ if not value:
138
+ return None
139
+ try:
140
+ if len(value) == 10:
141
+ dt = datetime.strptime(value, "%Y-%m-%d")
142
+ return dt.strftime("%Y-%m-%dT00:00:00Z")
143
+ dt = datetime.fromisoformat(value.replace("Z", "+00:00"))
144
+ return dt.astimezone().strftime("%Y-%m-%dT%H:%M:%SZ")
145
+ except Exception:
146
+ return None
147
+
148
+
149
+ if __name__ == "__main__":
150
+ raise SystemExit(main())
scripts/validate.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import json
4
+ from pathlib import Path
5
+
6
+
7
+ def main() -> int:
8
+ parser = argparse.ArgumentParser(description="Validate adversarial-memory-bench JSONL data")
9
+ parser.add_argument("--data", default=str(Path(__file__).resolve().parents[1] / "data" / "v0.2" / "cases.jsonl"))
10
+ args = parser.parse_args()
11
+ dataset = Path(args.data)
12
+ seen_ids = set()
13
+ with dataset.open() as f:
14
+ for line_no, line in enumerate(f, 1):
15
+ line = line.strip()
16
+ if not line:
17
+ continue
18
+ record = json.loads(line)
19
+ case_id = record["id"]
20
+ if case_id in seen_ids:
21
+ raise SystemExit(f"duplicate case id at line {line_no}: {case_id}")
22
+ seen_ids.add(case_id)
23
+ entry_ids = {entry["id"] for entry in record["entries"]}
24
+ missing = [rid for rid in record["relevant_ids"] if rid not in entry_ids]
25
+ if missing:
26
+ raise SystemExit(f"case {case_id} has missing relevant ids: {missing}")
27
+ print(f"validated {len(seen_ids)} cases from {dataset}")
28
+ return 0
29
+
30
+
31
+ if __name__ == "__main__":
32
+ raise SystemExit(main())