DocUA commited on
Commit
d0c3228
·
1 Parent(s): b434018

feat: Update system prompt structure to include cache control in LLMAnalyzer and generate_legal_position functions

Browse files
Files changed (1) hide show
  1. main.py +4 -6
main.py CHANGED
@@ -472,9 +472,8 @@ class LLMAnalyzer:
472
  model=self.model_name,
473
  max_tokens=self.max_tokens or MAX_TOKENS_ANALYSIS,
474
  temperature=self.temperature,
475
- system=SYSTEM_PROMPT,
476
- messages=[{"role": "user", "content": prompt}],
477
- cache_control={"type": "ephemeral"}
478
  )
479
  response_text = response.content[0].text
480
 
@@ -986,10 +985,9 @@ def generate_legal_position(
986
  message_params = {
987
  "model": model_name,
988
  "max_tokens": max_tokens or MAX_TOKENS_CONFIG["anthropic"],
989
- "system": system_prompt,
990
  "messages": messages,
991
- "temperature": temperature,
992
- "cache_control": {"type": "ephemeral"}
993
  }
994
 
995
  # Add thinking config if enabled
 
472
  model=self.model_name,
473
  max_tokens=self.max_tokens or MAX_TOKENS_ANALYSIS,
474
  temperature=self.temperature,
475
+ system=[{"type": "text", "text": SYSTEM_PROMPT, "cache_control": {"type": "ephemeral"}}],
476
+ messages=[{"role": "user", "content": prompt}]
 
477
  )
478
  response_text = response.content[0].text
479
 
 
985
  message_params = {
986
  "model": model_name,
987
  "max_tokens": max_tokens or MAX_TOKENS_CONFIG["anthropic"],
988
+ "system": [{"type": "text", "text": system_prompt, "cache_control": {"type": "ephemeral"}}],
989
  "messages": messages,
990
+ "temperature": temperature
 
991
  }
992
 
993
  # Add thinking config if enabled