Spaces:
Running
Running
Ayona commited on
Commit ·
e8ea072
1
Parent(s): 240f6e7
extend provider-specific prompt caching to analysis path
Browse files
main.py
CHANGED
|
@@ -112,11 +112,48 @@ def _build_openai_reasoning_params(
|
|
| 112 |
# ============ End OpenAI Reasoning Helpers ============
|
| 113 |
|
| 114 |
# ============ Prompt Assembly Helpers ============
|
| 115 |
-
|
|
|
|
| 116 |
_DEFAULT_COMMENT_TEXT = "Коментар відсутній"
|
| 117 |
|
| 118 |
|
| 119 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
"""Ensure dynamic placeholders exist in the legal-position prompt."""
|
| 121 |
updated = lp_prompt
|
| 122 |
if "{court_decision_text}" not in updated:
|
|
@@ -127,52 +164,42 @@ def _ensure_dynamic_placeholders(lp_prompt: str) -> str:
|
|
| 127 |
return updated
|
| 128 |
|
| 129 |
|
| 130 |
-
def _split_legal_position_prompt(lp_prompt: str) -> Tuple[str, str]:
|
| 131 |
-
"""Split prompt into static prefix and dynamic suffix starting from first placeholder."""
|
| 132 |
-
positions = [lp_prompt.find(p) for p in _DYNAMIC_PLACEHOLDERS if p in lp_prompt]
|
| 133 |
-
if not positions:
|
| 134 |
-
return lp_prompt.strip(), ""
|
| 135 |
-
|
| 136 |
-
split_at = min(positions)
|
| 137 |
-
static_part = lp_prompt[:split_at].strip()
|
| 138 |
-
dynamic_part = lp_prompt[split_at:].strip()
|
| 139 |
-
return static_part, dynamic_part
|
| 140 |
-
|
| 141 |
-
|
| 142 |
def _compile_generation_prompt_blocks(
|
| 143 |
system_prompt: str,
|
| 144 |
lp_prompt: str,
|
| 145 |
court_decision_text: str,
|
| 146 |
comment: str,
|
| 147 |
) -> Dict[str, str]:
|
| 148 |
-
"""Compile provider-ready prompt blocks
|
| 149 |
-
prepared_lp_prompt =
|
| 150 |
-
static_lp, dynamic_lp = _split_legal_position_prompt(prepared_lp_prompt)
|
| 151 |
-
|
| 152 |
-
if not dynamic_lp:
|
| 153 |
-
dynamic_lp = "<court_decision>\n{court_decision_text}\n</court_decision>\n\n<comment>\n{comment}\n</comment>"
|
| 154 |
-
|
| 155 |
final_comment = comment if comment else _DEFAULT_COMMENT_TEXT
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
)
|
| 160 |
|
| 161 |
-
merged_system_prompt = system_prompt.strip()
|
| 162 |
-
if static_lp:
|
| 163 |
-
merged_system_prompt = f"{merged_system_prompt}\n\n{static_lp}" if merged_system_prompt else static_lp
|
| 164 |
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 169 |
|
| 170 |
return {
|
| 171 |
"system_prompt": merged_system_prompt,
|
| 172 |
-
"user_prompt":
|
| 173 |
-
"full_user_prompt":
|
| 174 |
-
"static_prompt":
|
| 175 |
-
"dynamic_template":
|
| 176 |
}
|
| 177 |
# ============ End Prompt Assembly Helpers ============
|
| 178 |
|
|
@@ -454,13 +481,20 @@ class LLMAnalyzer:
|
|
| 454 |
# Determine model name and if it's a reasoning model
|
| 455 |
model_val = self.model_name.value if hasattr(self.model_name, "value") else str(self.model_name)
|
| 456 |
is_reasoning_model = any(m in model_val.lower() for m in ["gpt-4.1", "gpt-4.5", "gpt-5", "o1", "o3"])
|
| 457 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 458 |
# Use developer role for newer models
|
| 459 |
role = "developer" if is_reasoning_model else "system"
|
| 460 |
-
|
| 461 |
messages = [
|
| 462 |
-
ChatMessage(role=role, content=
|
| 463 |
-
ChatMessage(role="user", content=
|
| 464 |
]
|
| 465 |
|
| 466 |
response_format = {
|
|
@@ -489,7 +523,7 @@ class LLMAnalyzer:
|
|
| 489 |
)
|
| 490 |
|
| 491 |
# Log full prompts in debug mode
|
| 492 |
-
_log_prompt("openai-analyzer", model_val,
|
| 493 |
|
| 494 |
# Retry logic for OpenAI analysis
|
| 495 |
max_retries = 3
|
|
@@ -580,14 +614,21 @@ class LLMAnalyzer:
|
|
| 580 |
async def _analyze_with_anthropic(self, prompt: str, response_schema: dict) -> str:
|
| 581 |
"""Analyze text using Anthropic."""
|
| 582 |
try:
|
| 583 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 584 |
|
| 585 |
message_params = {
|
| 586 |
"model": self.model_name,
|
| 587 |
"max_tokens": self.max_tokens or MAX_TOKENS_ANALYSIS,
|
| 588 |
"temperature": self.temperature,
|
| 589 |
-
"system": [{"type": "text", "text":
|
| 590 |
-
"messages": [{"role": "user", "content":
|
| 591 |
}
|
| 592 |
|
| 593 |
if self.thinking_enabled and "claude" in str(self.model_name).lower():
|
|
|
|
| 112 |
# ============ End OpenAI Reasoning Helpers ============
|
| 113 |
|
| 114 |
# ============ Prompt Assembly Helpers ============
|
| 115 |
+
_GENERATION_DYNAMIC_PLACEHOLDERS = ("{court_decision_text}", "{comment}")
|
| 116 |
+
_ANALYSIS_DYNAMIC_TAGS = ("<new_decision>", "<clarifying_question>", "<legal_positions>")
|
| 117 |
_DEFAULT_COMMENT_TEXT = "Коментар відсутній"
|
| 118 |
|
| 119 |
|
| 120 |
+
def _split_prompt_by_placeholders(prompt_text: str, placeholders: Tuple[str, ...]) -> Tuple[str, str]:
|
| 121 |
+
"""Split prompt into static prefix and dynamic suffix starting from first dynamic placeholder."""
|
| 122 |
+
positions = [prompt_text.find(p) for p in placeholders if p in prompt_text]
|
| 123 |
+
if not positions:
|
| 124 |
+
return prompt_text.strip(), ""
|
| 125 |
+
|
| 126 |
+
split_at = min(positions)
|
| 127 |
+
static_part = prompt_text[:split_at].strip()
|
| 128 |
+
dynamic_part = prompt_text[split_at:].strip()
|
| 129 |
+
return static_part, dynamic_part
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _compile_prompt_blocks(
|
| 133 |
+
system_prompt: str,
|
| 134 |
+
prompt_template: str,
|
| 135 |
+
format_values: Dict[str, str],
|
| 136 |
+
placeholders: Tuple[str, ...],
|
| 137 |
+
) -> Dict[str, str]:
|
| 138 |
+
"""Compile provider-ready prompt blocks without changing UI-facing prompt settings."""
|
| 139 |
+
static_part, dynamic_part = _split_prompt_by_placeholders(prompt_template, placeholders)
|
| 140 |
+
merged_system_prompt = system_prompt.strip()
|
| 141 |
+
if static_part:
|
| 142 |
+
merged_system_prompt = f"{merged_system_prompt}\n\n{static_part}" if merged_system_prompt else static_part
|
| 143 |
+
|
| 144 |
+
full_user_prompt = prompt_template.format(**format_values)
|
| 145 |
+
dynamic_payload = dynamic_part.format(**format_values) if dynamic_part else full_user_prompt
|
| 146 |
+
|
| 147 |
+
return {
|
| 148 |
+
"system_prompt": merged_system_prompt,
|
| 149 |
+
"user_prompt": dynamic_payload,
|
| 150 |
+
"full_user_prompt": full_user_prompt,
|
| 151 |
+
"static_prompt": static_part,
|
| 152 |
+
"dynamic_template": dynamic_part,
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def _ensure_generation_placeholders(lp_prompt: str) -> str:
|
| 157 |
"""Ensure dynamic placeholders exist in the legal-position prompt."""
|
| 158 |
updated = lp_prompt
|
| 159 |
if "{court_decision_text}" not in updated:
|
|
|
|
| 164 |
return updated
|
| 165 |
|
| 166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
def _compile_generation_prompt_blocks(
|
| 168 |
system_prompt: str,
|
| 169 |
lp_prompt: str,
|
| 170 |
court_decision_text: str,
|
| 171 |
comment: str,
|
| 172 |
) -> Dict[str, str]:
|
| 173 |
+
"""Compile provider-ready prompt blocks for legal position generation."""
|
| 174 |
+
prepared_lp_prompt = _ensure_generation_placeholders(lp_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
final_comment = comment if comment else _DEFAULT_COMMENT_TEXT
|
| 176 |
+
return _compile_prompt_blocks(
|
| 177 |
+
system_prompt=system_prompt,
|
| 178 |
+
prompt_template=prepared_lp_prompt,
|
| 179 |
+
format_values={
|
| 180 |
+
"court_decision_text": court_decision_text,
|
| 181 |
+
"comment": final_comment,
|
| 182 |
+
},
|
| 183 |
+
placeholders=_GENERATION_DYNAMIC_PLACEHOLDERS,
|
| 184 |
)
|
| 185 |
|
|
|
|
|
|
|
|
|
|
| 186 |
|
| 187 |
+
def _compile_analysis_prompt_blocks(
|
| 188 |
+
system_prompt: str,
|
| 189 |
+
full_prompt: str,
|
| 190 |
+
) -> Dict[str, str]:
|
| 191 |
+
"""Compile provider-ready prompt blocks for precedent analysis from the already formatted prompt."""
|
| 192 |
+
static_part, dynamic_part = _split_prompt_by_placeholders(full_prompt, _ANALYSIS_DYNAMIC_TAGS)
|
| 193 |
+
merged_system_prompt = system_prompt.strip()
|
| 194 |
+
if static_part:
|
| 195 |
+
merged_system_prompt = f"{merged_system_prompt}\n\n{static_part}" if merged_system_prompt else static_part
|
| 196 |
|
| 197 |
return {
|
| 198 |
"system_prompt": merged_system_prompt,
|
| 199 |
+
"user_prompt": dynamic_part if dynamic_part else full_prompt,
|
| 200 |
+
"full_user_prompt": full_prompt,
|
| 201 |
+
"static_prompt": static_part,
|
| 202 |
+
"dynamic_template": dynamic_part,
|
| 203 |
}
|
| 204 |
# ============ End Prompt Assembly Helpers ============
|
| 205 |
|
|
|
|
| 481 |
# Determine model name and if it's a reasoning model
|
| 482 |
model_val = self.model_name.value if hasattr(self.model_name, "value") else str(self.model_name)
|
| 483 |
is_reasoning_model = any(m in model_val.lower() for m in ["gpt-4.1", "gpt-4.5", "gpt-5", "o1", "o3"])
|
| 484 |
+
|
| 485 |
+
analysis_blocks = _compile_analysis_prompt_blocks(
|
| 486 |
+
system_prompt=SYSTEM_PROMPT,
|
| 487 |
+
full_prompt=prompt,
|
| 488 |
+
)
|
| 489 |
+
compiled_system_prompt = analysis_blocks["system_prompt"]
|
| 490 |
+
final_user_prompt = analysis_blocks["user_prompt"]
|
| 491 |
+
|
| 492 |
# Use developer role for newer models
|
| 493 |
role = "developer" if is_reasoning_model else "system"
|
| 494 |
+
|
| 495 |
messages = [
|
| 496 |
+
ChatMessage(role=role, content=compiled_system_prompt),
|
| 497 |
+
ChatMessage(role="user", content=final_user_prompt)
|
| 498 |
]
|
| 499 |
|
| 500 |
response_format = {
|
|
|
|
| 523 |
)
|
| 524 |
|
| 525 |
# Log full prompts in debug mode
|
| 526 |
+
_log_prompt("openai-analyzer", model_val, compiled_system_prompt, final_user_prompt)
|
| 527 |
|
| 528 |
# Retry logic for OpenAI analysis
|
| 529 |
max_retries = 3
|
|
|
|
| 614 |
async def _analyze_with_anthropic(self, prompt: str, response_schema: dict) -> str:
|
| 615 |
"""Analyze text using Anthropic."""
|
| 616 |
try:
|
| 617 |
+
analysis_blocks = _compile_analysis_prompt_blocks(
|
| 618 |
+
system_prompt=SYSTEM_PROMPT,
|
| 619 |
+
full_prompt=prompt,
|
| 620 |
+
)
|
| 621 |
+
compiled_system_prompt = analysis_blocks["system_prompt"]
|
| 622 |
+
final_user_prompt = analysis_blocks["user_prompt"]
|
| 623 |
+
|
| 624 |
+
_log_prompt("anthropic-analyzer", str(self.model_name), compiled_system_prompt, final_user_prompt)
|
| 625 |
|
| 626 |
message_params = {
|
| 627 |
"model": self.model_name,
|
| 628 |
"max_tokens": self.max_tokens or MAX_TOKENS_ANALYSIS,
|
| 629 |
"temperature": self.temperature,
|
| 630 |
+
"system": [{"type": "text", "text": compiled_system_prompt, "cache_control": {"type": "ephemeral"}}],
|
| 631 |
+
"messages": [{"role": "user", "content": final_user_prompt}]
|
| 632 |
}
|
| 633 |
|
| 634 |
if self.thinking_enabled and "claude" in str(self.model_name).lower():
|