# core/llm_agent.py
# Defines the LLMAgent base class and implementations for different agent roles, including Persona and configuration.
# Detailed implementation will follow based on design documents.

import asyncio
import json
import re # Added for parsing speech output
import uuid
from datetime import datetime, timezone
from typing import Optional, List, Dict, Any

from services.llm_service import LLMService
from services.db_service import DBService
from core.data_models import LLMLogEntry, ChatMessage
from core import logic_executor # Added for run_in_executor

# --- Prompt Templates ---
# Based on docs/detailed_design/llm_agent_interaction_design.md
SYSTEM_MESSAGE_COMMON_INTRO = """You are an AI agent in a chatroom. Your persona is:
{persona_prompt}"""

USER_LISTENER_TASK_TEMPLATE = """Analyze the recent conversation context provided below.
First, write down your thinking process and analysis of the conversation.
Then, if you decide you want to make a contribution, append a JSON object on a new line or at the end of your thought process, formatted as:
{{"desire_to_speak": true}}

If you decide you do not want to speak, you can either append:
{{"desire_to_speak": false}}
OR you can simply not include any JSON block after your thoughts.

Your primary output should be your natural language thinking process. The JSON object is only for signaling your intent to speak or not. Do not include a "reason" field in the JSON.

Example of output if you want to speak:
I've analyzed the user's question about X and I have a relevant perspective to share regarding Y.
{{"desire_to_speak": true}}

Example of output if you do not want to speak (option 1: with JSON):
The current discussion is on topic Z, which is outside my primary expertise.
{{"desire_to_speak": false}}

Example of output if you do not want to speak (option 2: without JSON):
I'm following the conversation but have nothing to add at this point.

Current conversation context:
{context_messages_json}

Based on this, please provide your thinking process and, if applicable, your JSON intent:
"""

USER_SPEAKER_TASK_TEMPLATE = """You have decided to speak. Your previous reasoning for wanting to speak was:
"{thought_process}"

Now, generate your public message. Your message MUST be formatted as follows:
1.  A JSON code block starting with ```json_tags_speak, containing a list of relevant "tags" for your message.
2.  The actual message content immediately after the closing ``` of the JSON block.

Example of your output format:
```json_tags_speak
{{
  "tags": ["project_update", "milestone_achieved"]
}}
```
The project is progressing well and we've just hit a key milestone!

Current conversation context:
{context_messages_json}

Based on this, please provide your structured message:
"""

# --- Helper Functions ---
def chat_message_to_dict(message):
    """Convert ChatMessage objects to serializable dictionaries."""
    if isinstance(message, ChatMessage):
        return {
            "message_id": message.message_id,
            "participant_id": message.participant_id,
            "sender_nickname": message.sender_nickname,
            "content": message.content,
            "timestamp": str(message.timestamp),
            "tags": message.tags
        }
    elif isinstance(message, dict):
        # Already a dict (e.g., from LLMService format)
        return message
    else:
        # Try to convert any other object to string representation
        return str(message)
        
def prepare_context_for_serialization(context: List[Any]) -> List[Dict]:
    """Convert a list of messages (which may be ChatMessage objects) to serializable dicts."""
    return [chat_message_to_dict(msg) for msg in context]

class LLMAgent:
    def __init__(self, agent_id: str, nickname: str, persona_prompt: str,
                 llm_config_listening: dict, llm_config_speaking: dict,
                 llm_service: LLMService, db_service: DBService):
        self.agent_id = agent_id
        self.nickname = nickname
        self.persona_prompt = persona_prompt
        self.llm_config_listening = llm_config_listening
        self.llm_config_speaking = llm_config_speaking
        self.llm_service = llm_service
        self.db_service = db_service
        self.desire_to_speak: bool = False
        self.current_thought_process: Optional[str] = None # For internal logging or debugging
        print(f"LLMAgent {self.nickname} ({self.agent_id}) initialized with LLM and DB services.")

    async def active_listen(self, llm_messages: List[Dict[str, str]]) -> dict:
        print(f"Agent {self.nickname} is actively listening.")
        
        log_entry_id = str(uuid.uuid4())
        timestamp_call_start = datetime.now(timezone.utc)
        
        log_entry = LLMLogEntry(
            log_entry_id=log_entry_id,
            agent_id=self.agent_id,
            timestamp_call_start=timestamp_call_start,
            llm_call_type="active_listening",
            raw_llm_input_context_summary=json.dumps(llm_messages, ensure_ascii=False), # Log full pre-built context
            timestamp_call_end=timestamp_call_start, # Placeholder
            raw_llm_output="", # Placeholder
            parsed_thinking_process=None,
            parsed_desire_to_speak=None,
            parsed_spoken_tags=None,
            parsed_spoken_message=None,
            associated_public_message_id=None
        )

        parsed_desire_to_speak = False
        parsed_thinking_process = ""

        try:
            # llm_messages are now passed directly from AgentManager.
            # Old prompt construction logic is removed.
            # raw_llm_input_context_summary is already set with the passed llm_messages.

            raw_llm_output_from_service = await self.llm_service.call_llm_api(
                messages=llm_messages,
                model_config=self.llm_config_listening
            )

            if raw_llm_output_from_service is None:
                print(f"WARNING: LLMService returned None for agent {self.nickname}, model {self.llm_config_listening.get('model')}. Treating as empty response.")
                raw_llm_output = "" # Treat None as empty string to prevent .strip() error later
            else:
                raw_llm_output = str(raw_llm_output_from_service) # Ensure it's a string

            log_entry.raw_llm_output = raw_llm_output # Log the (potentially modified) raw_llm_output

            # Initialize with defaults (desire_to_speak=False, thinking_process="")
            # These will be updated based on parsing.

            if isinstance(raw_llm_output, str) and raw_llm_output.startswith("Error:"):
                # Direct error from LLM service
                parsed_thinking_process = f"LLM Service Error: {raw_llm_output}"
                # parsed_desire_to_speak remains False
            elif not raw_llm_output.strip():
                # Empty or whitespace-only output from LLM
                parsed_thinking_process = raw_llm_output # Keep the raw (empty/whitespace) string
                # parsed_desire_to_speak remains False
            else:
                # Attempt to parse LLM's actual response content
                # The new format expects natural language thoughts, optionally followed by a JSON block.
                
                # Try to find a JSON block at the end of the output.
                # Regex looks for { ... } possibly wrapped in ```json ... ``` or ``` ... ``` at the end of the string.
                # It captures the thinking part (group 1) and the JSON part (group 3 or 5).
                
                # Regex to find JSON at the end, potentially with markdown fences
                # Group 1: Thinking process (everything before the JSON block)
                # Group 2: Optional markdown fence (```json or ```)
                # Group 3: JSON content if fenced
                # Group 4: JSON content if not fenced (must be at the end)
                # This regex attempts to find the *last* occurrence of a JSON-like structure.
                
                # Revised regex strategy:
                # 1. Look for a JSON object pattern: \{.*?\}
                # 2. Find the last occurrence of this pattern in the string.
                # 3. Everything before that last JSON object is thinking_process.
                # 4. The last JSON object is the potential intent.

                potential_json_str = None
                thinking_text = raw_llm_output.strip() # Default thinking process is the whole output
                explicit_intent_parsed_successfully = False # Flag to track if a valid desire_to_speak JSON was parsed

                # Find all occurrences of JSON objects
                json_matches = list(re.finditer(r"(\{.*?\})", raw_llm_output, re.DOTALL))

                if json_matches:
                    last_match = json_matches[-1]
                    potential_json_str = last_match.group(1).strip()
                    # Thinking text is everything before the start of the last matched JSON object
                    thinking_text = raw_llm_output[:last_match.start()].strip()
                    
                    # Check if the identified JSON is wrapped in markdown fences that span *only* that JSON
                    # This is a bit complex to do perfectly with regex in one go for all fence types.
                    # For simplicity, we'll first try to parse `potential_json_str`.
                    # If that fails, we'll check if the `raw_llm_output` ends with a fenced version of it.
                    
                    # Attempt to clean common JSON issues from potential_json_str
                    if potential_json_str:
                        # Replace various non-standard quotes with standard double quotes
                        corrected_json_str = potential_json_str.replace("‘", "\"").replace("’", "\"")
                        corrected_json_str = corrected_json_str.replace("“", "\"").replace("”", "\"")
                        # Handle simple cases of Python-style booleans if not quoted
                        corrected_json_str = re.sub(r':\s*True(?=[\s,}])', ': true', corrected_json_str)
                        corrected_json_str = re.sub(r':\s*False(?=[\s,}])', ': false', corrected_json_str)
                        # Be careful with unescaped quotes if we are trying to fix them globally.
                        # This part might need more sophisticated handling if LLMs produce very broken JSON.
                        # For now, focusing on quote types and Python bools.
                        
                        try:
                            parsed_output = json.loads(corrected_json_str)
                            if isinstance(parsed_output, dict):
                                desire_val = parsed_output.get("desire_to_speak")
                                if isinstance(desire_val, bool):
                                    parsed_desire_to_speak = desire_val
                                    explicit_intent_parsed_successfully = True
                                elif isinstance(desire_val, str):
                                    desire_val_lower = desire_val.lower()
                                    if desire_val_lower == 'true':
                                        parsed_desire_to_speak = True
                                        explicit_intent_parsed_successfully = True
                                    elif desire_val_lower == 'false':
                                        parsed_desire_to_speak = False
                                        explicit_intent_parsed_successfully = True
                                    # If string is not 'true' or 'false', intent remains ambiguous (default False)
                                
                                # If desire_to_speak was NOT successfully parsed as a clear boolean intent,
                                # then this JSON was not a valid intent signal.
                                # It's part of the thinking process.
                                if not explicit_intent_parsed_successfully:
                                    thinking_text = raw_llm_output.strip() # Reset thinking to full output
                                    # parsed_desire_to_speak remains its default (False) or prior state if somehow set
                            else:
                                # Valid JSON, but not a dictionary. Treat as part of thinking.
                                thinking_text = raw_llm_output.strip()
                                # parsed_desire_to_speak remains False
                        except json.JSONDecodeError:
                            # JSON parsing failed. The potential_json_str was not valid JSON.
                            # It's part of the thinking process.
                            thinking_text = raw_llm_output.strip()
                            # parsed_desire_to_speak remains False
                
                # The final parsed_thinking_process is the thinking_text derived above.
                parsed_thinking_process = thinking_text
                
                # If thinking_process is empty AND desire is False, AND no explicit valid intent was parsed,
                # it means LLM might have only outputted a non-intent JSON or nothing useful.
                # Use raw_llm_output for thinking process in such edge cases.
                if not parsed_thinking_process.strip() and not parsed_desire_to_speak and not explicit_intent_parsed_successfully:
                    parsed_thinking_process = raw_llm_output.strip()


            self.desire_to_speak = parsed_desire_to_speak
            self.current_thought_process = parsed_thinking_process # This is now the natural language part

            log_entry.parsed_thinking_process = self.current_thought_process
            log_entry.parsed_desire_to_speak = self.desire_to_speak
            
            # The returned "reason" is now the parsed_thinking_process from the LLM's natural language output
            return {"desire_to_speak": self.desire_to_speak, "reason": self.current_thought_process}

        except Exception as e:
            # This catches errors in the agent's own logic (e.g., prompt formatting)
            # or unexpected errors from llm_service.call_llm_api itself if it raises.
            error_message = f"Error during active_listen for agent {self.nickname}: {type(e).__name__} - {e}"
            print(error_message)
            log_entry.raw_llm_output = log_entry.raw_llm_output or f"Agent-side error: {str(e)}"
            # Ensure parsed_thinking_process reflects the error if not already set by LLM error handling
            current_raw_output = log_entry.raw_llm_output if log_entry.raw_llm_output else ""
            if not (isinstance(current_raw_output, str) and current_raw_output.startswith("Error:")):
                 parsed_thinking_process = f"Agent-side error: {str(e)}"

            self.desire_to_speak = False # Default on error
            self.current_thought_process = parsed_thinking_process
            
            # Update log entry with error state
            log_entry.parsed_thinking_process = self.current_thought_process
            log_entry.parsed_desire_to_speak = self.desire_to_speak

            return {"desire_to_speak": False, "reason": self.current_thought_process}
        finally:
            log_entry.timestamp_call_end = datetime.now(timezone.utc)
            try:
                await logic_executor.run_in_executor(self.db_service.add_llm_agent_log_entry, log_entry)
            except Exception as db_err:
                print(f"WARNING: Failed to log agent activity for {self.agent_id} via run_in_executor: {db_err}")
            print(f"Agent {self.nickname} active_listen finished. Log {log_entry_id} saved. Desire to speak: {self.desire_to_speak}, Reason: {self.current_thought_process[:100]}...")


    async def generate_public_speech(self, llm_messages: List[Dict[str, str]]) -> dict:
        print(f"Agent {self.nickname} is generating public speech.")
        
        log_entry_id = str(uuid.uuid4())
        timestamp_call_start = datetime.now(timezone.utc)

        log_entry = LLMLogEntry(
            log_entry_id=log_entry_id,
            agent_id=self.agent_id,
            timestamp_call_start=timestamp_call_start,
            llm_call_type="public_speech",
            raw_llm_input_context_summary=json.dumps(llm_messages, ensure_ascii=False), # Log full pre-built context
            timestamp_call_end=timestamp_call_start, # Placeholder
            raw_llm_output="", # Placeholder
            parsed_thinking_process=self.current_thought_process,
            parsed_desire_to_speak=None,
            parsed_spoken_tags=None,
            parsed_spoken_message=None,
            associated_public_message_id=None
        )

        # Initialize defaults for parsed content outside try, in case of early exit from try.
        # These will be updated if LLM call is successful and parsing occurs.
        parsed_tags = ["error_agent_logic_default"] # Default error tag
        parsed_message = "Error: Default message before LLM call attempt." # Default error message

        try:
            # llm_messages are now passed directly from AgentManager.
            # Old prompt construction logic is removed.
            # raw_llm_input_context_summary is already set with the passed llm_messages.
    
            raw_llm_output = await self.llm_service.call_llm_api(
                messages=llm_messages,
                model_config=self.llm_config_speaking
            )
            log_entry.raw_llm_output = raw_llm_output # Log raw output immediately
    
            # Reset defaults here, as we are now processing a potentially valid LLM output
            parsed_tags = []
            parsed_message = f"Error: Default message after LLM call, before parsing. Raw: {str(raw_llm_output)[:100]}..."


            # Step 1: Check for direct LLM Service errors first
            if isinstance(raw_llm_output, str) and raw_llm_output.startswith("Error:"):
                parsed_message = f"LLM Service Error: {raw_llm_output}"
                parsed_tags = ["error_llm_service"]
            else:
                # Step 2: Attempt to parse the structured ```json_tags_speak ... ``` format
                # Regex to find the json_tags_speak block and the message after it.
                # - (?s) is equivalent to re.DOTALL for the whole pattern.
                # - (?:json_tags_speak|json) allows for either "json_tags_speak" or just "json".
                # - \s* handles optional whitespace.
                # - (\{.*?\}) captures the JSON content non-greedily.
                # - (.*) captures the rest as the message.
                # - The regex tries to be flexible with newlines around the JSON block and message.
                match = re.search(r"(?s)```(?:json_tags_speak|json)\s*(\{.*?\})\s*```\s*(.*)", raw_llm_output)

                if match:
                    tags_json_str = match.group(1).strip()
                    # The message is everything after the JSON block, stripped of leading/trailing whitespace.
                    # If the LLM only returns the JSON block and nothing after, group(2) might be empty or just whitespace.
                    parsed_message = match.group(2).strip()
                    
                    try:
                        tags_data = json.loads(tags_json_str)
                        if isinstance(tags_data, dict) and "tags" in tags_data and isinstance(tags_data["tags"], list):
                            parsed_tags = [str(tag) for tag in tags_data["tags"] if tag is not None] # Ensure tags are strings and filter out None
                            if not parsed_message and not parsed_tags: # If message is empty and tags are empty after parsing
                                 parsed_message = f"Warning: LLM provided valid JSON tags block but no subsequent message text and no tags. Raw JSON: {tags_json_str}"
                            elif not parsed_message and parsed_tags: # If message is empty but tags are present
                                 parsed_message = f"Warning: LLM provided valid JSON tags but no subsequent message text. Tags: {parsed_tags}"
                        else:
                            # JSON was valid, but the structure for tags was not as expected (e.g., not a dict, or "tags" key missing/not a list)
                            parsed_tags = ["error_parsing_tags_json_structure"]
                            # Keep the parsed_message from match.group(2) if it exists, otherwise indicate structure error.
                            if parsed_message:
                                parsed_message = f"Warning: Tags JSON structure incorrect. Message: {parsed_message}"
                            else:
                                parsed_message = f"Error: Tags JSON structure incorrect and no message text found. Raw JSON: {tags_json_str}"
                    except json.JSONDecodeError:
                        # The content within the ```json_tags_speak ... ``` block was not valid JSON.
                        parsed_tags = ["error_decoding_tags_json"]
                        # Keep the parsed_message from match.group(2) if it exists, otherwise indicate JSON decode error.
                        if parsed_message:
                             parsed_message = f"Warning: Failed to decode tags JSON. Message: {parsed_message}"
                        else:
                            parsed_message = f"Error: Failed to decode tags JSON and no message text found. Bad JSON: {tags_json_str[:100]}..."
                else:
                    # Step 3: Fallback if the specific ```json_tags_speak``` block is not found.
                    # Treat the entire raw_llm_output as the message and assign no tags.
                    # This handles cases where the LLM fails to follow the format strictly.
                    parsed_message = raw_llm_output.strip()
                    parsed_tags = [] # No tags could be parsed
                    if not parsed_message: # If raw output was empty or just whitespace
                        parsed_message = "Error: LLM output was empty or whitespace, and did not match expected format."
                        parsed_tags = ["error_empty_llm_output"]
                    else:
                        print(f"Agent {self.nickname} speaker output did not match expected json_tags_speak format. Treating raw output as message.")

            log_entry.parsed_spoken_tags = parsed_tags
            log_entry.parsed_spoken_message = parsed_message
            
            return {"tags": parsed_tags, "message": parsed_message, "log_entry_id": log_entry_id}

        except Exception as e:
            error_message = f"Error during generate_public_speech for agent {self.nickname}: {type(e).__name__} - {e}"
            print(error_message)
            log_entry.raw_llm_output = log_entry.raw_llm_output or f"Agent-side error: {str(e)}"
            
            parsed_message = f"Agent-side error generating speech: {str(e)}"
            parsed_tags = ["error_agent_logic"]

            log_entry.parsed_spoken_message = parsed_message
            log_entry.parsed_spoken_tags = parsed_tags
            return {"tags": parsed_tags, "message": parsed_message, "log_entry_id": log_entry_id}
        finally:
            log_entry.timestamp_call_end = datetime.now(timezone.utc)
            try:
                await logic_executor.run_in_executor(self.db_service.add_llm_agent_log_entry, log_entry)
            except Exception as db_err:
                print(f"WARNING: Failed to log agent activity for {self.agent_id} via run_in_executor: {db_err}")
            print(f"Agent {self.nickname} generate_public_speech finished. Log {log_entry_id} saved. Message: {log_entry.parsed_spoken_message[:100]}... Tags: {log_entry.parsed_spoken_tags}")

