| import sys |
| sys.path.insert(0, ".") |
|
|
| from src.state import ResearchState, Paper, Claim, Verdict, SessionContext |
| from src.memory import init_db, load_session, save_turn, export_session_md, delete_session |
| from src.state import SessionUpdate |
| import uuid |
|
|
| print("=== Phase 2: State + Memory ===") |
|
|
| |
| p = Paper( |
| title="Test Paper", |
| abstract="This is a test abstract.", |
| year=2024, |
| citation_count=100, |
| paper_id="abc123" |
| ) |
| print(f"β Paper dataclass: {p.title} ({p.year})") |
|
|
| c = Claim(text="Test claim", source_title="Test Paper", source_year=2024, confidence="high") |
| print(f"β Claim dataclass: [{c.confidence}] {c.text}") |
|
|
| print(f"β Verdict constants: {Verdict.PASS} / {Verdict.STALE} / {Verdict.CONTRADICTED}") |
|
|
| |
| init_db() |
| print("β Database initialized") |
|
|
| session_id = str(uuid.uuid4()) |
|
|
| |
| ctx = load_session(session_id) |
| print(f"β Empty session loaded: {len(ctx.prior_positions)} prior positions") |
|
|
| |
| update = SessionUpdate( |
| query="What is the state of KV cache compression?", |
| position="KV cache compression has advanced significantly with methods like H2O and StreamingLLM.", |
| claim_confidences=[ |
| Claim("H2O reduces KV cache size by 20x", "H2O Paper", 2023, "high"), |
| Claim("StreamingLLM enables infinite context", "StreamingLLM", 2023, "medium"), |
| ], |
| contradictions_found=["StreamingLLM contradicted by later infinite attention work (2024)"] |
| ) |
| save_turn(session_id, update) |
| print("β Turn saved to database") |
|
|
| |
| ctx2 = load_session(session_id) |
| print(f"β Session reloaded: {len(ctx2.prior_positions)} prior position(s)") |
| print(f" Prior query: {ctx2.prior_queries[0][:60]}...") |
|
|
| |
| md = export_session_md(session_id) |
| print(f"β Markdown export: {len(md)} characters") |
| print(f" Preview: {md[:120].strip()}") |
|
|
| |
| delete_session(session_id) |
| print("β Session deleted") |
|
|
| print("\nβ
Phase 2 complete") |