| """ |
| Memory Builder |
| Stage 1: Semantic Structured Compression (Section 3.1) |
| & Stage 2: Online Semantic Synthesis (Section 3.2) |
| Implements: |
| - Implicit semantic density gating: Φ_gate(W) → {m_k} (filters low-density windows) |
| - Sliding window processing for dialogue segmentation |
| - Generates compact memory units with resolved coreferences and absolute timestamps |
| """ |
| from typing import List, Optional |
| from models.memory_entry import MemoryEntry, Dialogue |
| from utils.llm_client import LLMClient |
| from database.vector_store import VectorStore |
| import config |
| import json |
| import asyncio |
| import concurrent.futures |
| from functools import partial |
|
|
|
|
| class MemoryBuilder: |
| """ |
| Memory Builder - Semantic Structured Compression (Section 3.1) |
| |
| Core Functions: |
| 1. Sliding window segmentation |
| 2. Implicit semantic density gating: Φ_gate(W) → {m_k} |
| 3. Multi-view indexing: I(m_k) = {s_k, l_k, r_k} |
| 4. Intra-session consolidation during write (Section 3.2): by generating enough memory entries to ensure ALL information is captured |
| """ |
| def __init__( |
| self, |
| llm_client: LLMClient, |
| vector_store: VectorStore, |
| window_size: int = None, |
| overlap_size: Optional[int] = None, |
| enable_parallel_processing: bool = True, |
| max_parallel_workers: int = 3, |
| fail_on_extraction_error: Optional[bool] = None, |
| ): |
| self.llm_client = llm_client |
| self.vector_store = vector_store |
| self.window_size = window_size or config.WINDOW_SIZE |
| self.overlap_size = overlap_size if overlap_size is not None else getattr(config, 'OVERLAP_SIZE', 0) |
| |
| |
| self.step_size = max(1, self.window_size - self.overlap_size) |
|
|
| |
| self.enable_parallel_processing = enable_parallel_processing if enable_parallel_processing is not None else getattr(config, 'ENABLE_PARALLEL_PROCESSING', True) |
| self.max_parallel_workers = max_parallel_workers if max_parallel_workers is not None else getattr(config, 'MAX_PARALLEL_WORKERS', 4) |
|
|
| |
| self.dialogue_buffer: List[Dialogue] = [] |
| self.processed_count = 0 |
| self.fail_on_extraction_error = ( |
| fail_on_extraction_error |
| if fail_on_extraction_error is not None |
| else getattr(config, "FAIL_ON_EXTRACTION_ERROR", False) |
| ) |
|
|
| |
| self.previous_entries: List[MemoryEntry] = [] |
|
|
| def add_dialogue(self, dialogue: Dialogue, auto_process: bool = True): |
| """ |
| Add a dialogue to the buffer |
| """ |
| self.dialogue_buffer.append(dialogue) |
|
|
| |
| if auto_process and len(self.dialogue_buffer) >= self.window_size: |
| self.process_window() |
|
|
| def add_dialogues(self, dialogues: List[Dialogue], auto_process: bool = True): |
| """ |
| Batch add dialogues with optional parallel processing |
| """ |
| if self.enable_parallel_processing and len(dialogues) > self.window_size * 2: |
| |
| self.add_dialogues_parallel(dialogues) |
| else: |
| |
| for dialogue in dialogues: |
| self.add_dialogue(dialogue, auto_process=False) |
|
|
| |
| if auto_process: |
| while len(self.dialogue_buffer) >= self.window_size: |
| self.process_window() |
| |
| def add_dialogues_parallel(self, dialogues: List[Dialogue]): |
| """ |
| Add dialogues using parallel processing for better performance |
| """ |
| |
| |
| pre_existing = list(self.dialogue_buffer) |
| windows_to_process = [] |
| try: |
| |
| self.dialogue_buffer.extend(dialogues) |
|
|
| |
| |
| pos = 0 |
| while pos + self.window_size <= len(self.dialogue_buffer): |
| window = self.dialogue_buffer[pos:pos + self.window_size] |
| windows_to_process.append(window) |
| pos += self.step_size |
|
|
| |
| remaining = self.dialogue_buffer[pos:] |
| if remaining: |
| windows_to_process.append(remaining) |
| self.dialogue_buffer = [] |
|
|
| if windows_to_process: |
| print(f"\n[Parallel Processing] Processing {len(windows_to_process)} batches in parallel with {self.max_parallel_workers} workers") |
| print(f"Batch sizes: {[len(w) for w in windows_to_process]}") |
|
|
| |
| self._process_windows_parallel(windows_to_process) |
|
|
| except Exception as e: |
| print(f"[Parallel Processing] Failed: {e}. Falling back to sequential processing...") |
| |
| |
| |
| |
| |
| |
| if not self.dialogue_buffer: |
| self.dialogue_buffer = pre_existing + list(dialogues) |
| |
| while len(self.dialogue_buffer) >= self.window_size: |
| self.process_window() |
|
|
| def process_window(self): |
| """ |
| Process current window dialogues - Core logic |
| """ |
| if not self.dialogue_buffer: |
| return |
|
|
| |
| |
| window = self.dialogue_buffer[:self.window_size] |
| self.dialogue_buffer = self.dialogue_buffer[self.step_size:] |
|
|
| print(f"\nProcessing window: {len(window)} dialogues (processed {self.processed_count} so far)") |
|
|
| |
| entries = self._generate_memory_entries(window) |
| self.processed_count += len(window) |
|
|
| |
| if entries: |
| self.vector_store.add_entries(entries) |
| self.previous_entries = entries |
|
|
| print(f"Generated {len(entries)} memory entries") |
|
|
| def process_remaining(self): |
| """ |
| Process remaining dialogues (fallback method, normally handled in parallel) |
| """ |
| if self.dialogue_buffer: |
| print(f"\nProcessing remaining dialogues: {len(self.dialogue_buffer)} (fallback mode)") |
| entries = self._generate_memory_entries(self.dialogue_buffer) |
| self.processed_count += len(self.dialogue_buffer) |
| if entries: |
| self.vector_store.add_entries(entries) |
| self.dialogue_buffer = [] |
| print(f"Generated {len(entries)} memory entries") |
|
|
| def _generate_memory_entries(self, dialogues: List[Dialogue]) -> List[MemoryEntry]: |
| """ |
| Implicit Semantic Density Gating (Section 3.1) |
| Φ_gate(W) → {m_k}, generates compact memory units from dialogue window |
| """ |
| |
| dialogue_text = "\n".join([str(d) for d in dialogues]) |
| dialogue_ids = [d.dialogue_id for d in dialogues] |
|
|
| |
| context = "" |
| if self.previous_entries: |
| context = "\n[Previous Window Memory Entries (for reference to avoid duplication)]\n" |
| for entry in self.previous_entries[:3]: |
| context += f"- {entry.lossless_restatement}\n" |
|
|
| |
| prompt = self._build_extraction_prompt(dialogue_text, dialogue_ids, context) |
|
|
| |
| messages = [ |
| { |
| "role": "system", |
| "content": "You are a professional information extraction assistant, skilled at extracting structured, unambiguous information from conversations. You must output valid JSON format." |
| }, |
| { |
| "role": "user", |
| "content": prompt |
| } |
| ] |
|
|
| |
| max_retries = 3 |
| last_exception = None |
| for attempt in range(max_retries): |
| try: |
| |
| response_format = None |
| if hasattr(config, 'USE_JSON_FORMAT') and config.USE_JSON_FORMAT: |
| response_format = {"type": "json_object"} |
|
|
| response = self.llm_client.chat_completion( |
| messages, |
| temperature=0.1, |
| response_format=response_format, |
| max_tokens=2600, |
| ) |
|
|
| |
| entries = self._parse_llm_response(response, dialogue_ids) |
| return entries |
|
|
| except Exception as e: |
| last_exception = e |
| if attempt < max_retries - 1: |
| print(f"Attempt {attempt + 1}/{max_retries} failed to parse LLM response: {e}") |
| print(f"Retrying...") |
| else: |
| print(f"All {max_retries} attempts failed to parse LLM response: {e}") |
| print(f"Raw response: {response[:500] if 'response' in locals() else 'No response'}") |
| if self.fail_on_extraction_error: |
| raise RuntimeError( |
| f"Failed to generate memory entries for dialogue ids {dialogue_ids}" |
| ) from last_exception |
| return [] |
|
|
| def _build_extraction_prompt( |
| self, |
| dialogue_text: str, |
| dialogue_ids: List[int], |
| context: str |
| ) -> str: |
| """ |
| Build LLM extraction prompt |
| """ |
| return f""" |
| Your task is to extract all valuable information from the following dialogues and convert them into structured memory entries. |
| |
| {context} |
| |
| [Current Window Dialogues] |
| {dialogue_text} |
| |
| [Requirements] |
| 1. **Complete Coverage With Concise Synthesis**: Preserve important information from the dialogues while merging tightly related facts into semantically dense entries instead of many small entries |
| 2. **Force Disambiguation**: Absolutely PROHIBIT using pronouns (he, she, it, they, this, that) and relative time (yesterday, today, last week, tomorrow) |
| 3. **Lossless Information**: Each entry's lossless_restatement must be a complete, independent, understandable sentence |
| 4. **Precise Extraction**: |
| - keywords: Core keywords (names, places, entities, topic words) |
| - timestamp: Absolute time in ISO 8601 format (if explicit time mentioned in dialogue) |
| - location: Specific location name (if mentioned) |
| - persons: All person names mentioned |
| - entities: Companies, products, organizations, etc. |
| - topic: The topic of this information |
| |
| [Output Format] |
| Return a JSON object with an "entries" array, where each element is a memory entry: |
| |
| ```json |
| {{ |
| "entries": [ |
| {{ |
| "lossless_restatement": "Complete unambiguous restatement (must include all subjects, objects, time, location, etc.)", |
| "keywords": ["keyword1", "keyword2", ...], |
| "timestamp": "YYYY-MM-DDTHH:MM:SS or null", |
| "location": "location name or null", |
| "persons": ["name1", "name2", ...], |
| "entities": ["entity1", "entity2", ...], |
| "topic": "topic phrase" |
| }}, |
| ... |
| ] |
| }} |
| ``` |
| |
| [Example] |
| Dialogues: |
| [2025-11-15T14:30:00] Alice: Bob, let's meet at Starbucks tomorrow at 2pm to discuss the new product |
| [2025-11-15T14:31:00] Bob: Okay, I'll prepare the materials |
| |
| Output: |
| ```json |
| {{ |
| "entries": [ |
| {{ |
| "lossless_restatement": "Alice suggested at 2025-11-15T14:30:00 to meet with Bob at Starbucks on 2025-11-16T14:00:00 to discuss the new product.", |
| "keywords": ["Alice", "Bob", "Starbucks", "new product", "meeting"], |
| "timestamp": "2025-11-16T14:00:00", |
| "location": "Starbucks", |
| "persons": ["Alice", "Bob"], |
| "entities": ["new product"], |
| "topic": "Product discussion meeting arrangement" |
| }}, |
| {{ |
| "lossless_restatement": "Bob agreed to attend the meeting and committed to prepare relevant materials.", |
| "keywords": ["Bob", "prepare materials", "agree"], |
| "timestamp": null, |
| "location": null, |
| "persons": ["Bob"], |
| "entities": [], |
| "topic": "Meeting preparation confirmation" |
| }} |
| ] |
| }} |
| ``` |
| |
| Now process the above dialogues. Return ONLY the JSON object, no other explanations. |
| """ |
|
|
| def _parse_llm_response( |
| self, |
| response: str, |
| dialogue_ids: List[int] |
| ) -> List[MemoryEntry]: |
| """ |
| Parse LLM response to MemoryEntry list |
| """ |
| |
| data = self.llm_client.extract_json(response) |
|
|
| if isinstance(data, dict): |
| data = data.get("entries") |
|
|
| if not isinstance(data, list): |
| raise ValueError(f"Expected JSON array or object with 'entries' but got: {type(data)}") |
|
|
| entries = [] |
| for item in data: |
| |
| entry = MemoryEntry( |
| lossless_restatement=item["lossless_restatement"], |
| keywords=item.get("keywords", []), |
| timestamp=item.get("timestamp"), |
| location=item.get("location"), |
| persons=item.get("persons", []), |
| entities=item.get("entities", []), |
| topic=item.get("topic") |
| ) |
| entries.append(entry) |
|
|
| return entries |
| |
| def _process_windows_parallel(self, windows: List[List[Dialogue]]): |
| """ |
| Process multiple windows in parallel using ThreadPoolExecutor |
| """ |
| all_entries = [] |
| |
| |
| with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_parallel_workers) as executor: |
| |
| future_to_window = {} |
| for i, window in enumerate(windows): |
| dialogue_ids = [d.dialogue_id for d in window] |
| future = executor.submit(self._generate_memory_entries_worker, window, dialogue_ids, i+1) |
| future_to_window[future] = (window, i+1) |
| |
| |
| for future in concurrent.futures.as_completed(future_to_window): |
| window, window_num = future_to_window[future] |
| try: |
| entries = future.result() |
| all_entries.extend(entries) |
| print(f"[Parallel Processing] Window {window_num} completed: {len(entries)} entries") |
| except Exception as e: |
| print(f"[Parallel Processing] Window {window_num} failed: {e}") |
| |
| |
| if all_entries: |
| print(f"\n[Parallel Processing] Storing {len(all_entries)} entries to database...") |
| self.vector_store.add_entries(all_entries) |
| self.processed_count += sum(len(window) for window in windows) |
| |
| |
| if all_entries: |
| self.previous_entries = all_entries[-10:] |
| |
| print(f"[Parallel Processing] Completed processing {len(windows)} windows") |
| |
| def _generate_memory_entries_worker(self, window: List[Dialogue], dialogue_ids: List[int], window_num: int) -> List[MemoryEntry]: |
| """ |
| Worker function for parallel processing of a single batch (full window or remaining dialogues) |
| """ |
| batch_size = len(window) |
| batch_type = "full window" if batch_size == self.window_size else f"remaining batch" |
| print(f"[Worker {window_num}] Processing {batch_type} with {batch_size} dialogues") |
| |
| |
| dialogue_text = "\n".join([str(d) for d in window]) |
| |
| |
| context = "" |
| if self.previous_entries: |
| context = "\n[Previous Window Memory Entries (for reference to avoid duplication)]\n" |
| for entry in self.previous_entries[:3]: |
| context += f"- {entry.lossless_restatement}\n" |
|
|
| |
| prompt = self._build_extraction_prompt(dialogue_text, dialogue_ids, context) |
|
|
| |
| messages = [ |
| { |
| "role": "system", |
| "content": "You are a professional information extraction assistant, skilled at extracting structured, unambiguous information from conversations. You must output valid JSON format." |
| }, |
| { |
| "role": "user", |
| "content": prompt |
| } |
| ] |
|
|
| |
| max_retries = 3 |
| for attempt in range(max_retries): |
| try: |
| |
| response_format = None |
| if hasattr(config, 'USE_JSON_FORMAT') and config.USE_JSON_FORMAT: |
| response_format = {"type": "json_object"} |
|
|
| response = self.llm_client.chat_completion( |
| messages, |
| temperature=0.1, |
| response_format=response_format, |
| max_tokens=2600, |
| ) |
|
|
| |
| entries = self._parse_llm_response(response, dialogue_ids) |
| print(f"[Worker {window_num}] Generated {len(entries)} entries") |
| return entries |
|
|
| except Exception as e: |
| if attempt < max_retries - 1: |
| print(f"[Worker {window_num}] Attempt {attempt + 1}/{max_retries} failed: {e}. Retrying...") |
| else: |
| print(f"[Worker {window_num}] All {max_retries} attempts failed: {e}") |
| return [] |
|
|