# core/agent_manager.py
# Manages all LLM Agent instances, and coordinates their "active listening" and "public speaking" phases.
# Detailed implementation will follow based on design documents.

import asyncio
import random
import json # Added for serializing context parts
from typing import Dict, List, TYPE_CHECKING

from core.llm_agent import LLMAgent
from core.data_models import ParticipantConfig, ChatMessage, WhiteboardItem # Added ChatMessage, WhiteboardItem
from services.db_service import DBService
from services.llm_service import LLMService
from services.file_service import FileService # Added FileService

if TYPE_CHECKING:
    from core.data_models import ChatroomState # Forward declaration for type hinting

class AgentManager:
    def __init__(self, db_service: DBService, llm_service: LLMService, file_service: FileService): # Added file_service
        self.agents: Dict[str, LLMAgent] = {}
        self.db_service = db_service
        self.llm_service = llm_service
        self.file_service = file_service # Store file_service
        print("AgentManager initialized with DB, LLM, and File services.")
        self.load_agents_from_db()

    def load_agents_from_db(self):
        """Loads LLM agent configurations from the database and instantiates them."""
        print("AgentManager: Loading agent configurations from DB...")
        all_participant_configs = self.db_service.get_all_participant_configs() # Sync call, OK in init context
        
        loaded_agents_count = 0
        for config in all_participant_configs:
            if config.role == "llm_agent":
                if not config.llm_persona_prompt or not config.llm_config_listening or not config.llm_config_speaking:
                    print(f"Warning: Skipping LLM agent {config.nickname} ({config.participant_id}) due to missing persona or LLM configs.")
                    continue
                
                agent = LLMAgent(
                    agent_id=config.participant_id,
                    nickname=config.nickname,
                    persona_prompt=config.llm_persona_prompt,
                    llm_config_listening=config.llm_config_listening, # type: ignore
                    llm_config_speaking=config.llm_config_speaking, # type: ignore
                    llm_service=self.llm_service, # Use stored llm_service
                    db_service=self.db_service
                )
                self.agents[agent.agent_id] = agent
                loaded_agents_count += 1
        print(f"AgentManager: Loaded {loaded_agents_count} LLM agents from DB.")

    def get_agent_by_id(self, agent_id: str) -> LLMAgent | None:
        return self.agents.get(agent_id)

    async def _build_llm_context(self, agent: LLMAgent, chatroom_state: 'ChatroomState', task_specific_prompt: str, recent_history_for_context: List[ChatMessage]) -> List[Dict[str, str]]:
        llm_messages: List[Dict[str, str]] = []

        # Message 1 (System): Persona and fixed rules
        # TODO: Add more sophisticated fixed rules if needed
        persona_rules_prompt = f"{agent.persona_prompt}\n\nGeneral Rules:\n- Provide your response in JSON format with fields: 'thought_process', 'desire_to_speak' (boolean), 'spoken_message_content' (if speaking), 'spoken_message_tags' (list of strings, if speaking)."
        llm_messages.append({"role": "system", "content": persona_rules_prompt})

        # Message 2 (System): Chatroom state (participants and global tags)
        participants_info = [{"nickname": p.nickname, "role": p.role, "id": p.participant_id} for p in chatroom_state.participants]
        global_tags_info = [{"tag_name": gt.tag_name, "count": gt.occurrence_count} for gt in chatroom_state.global_tags]
        chatroom_state_summary = {
            "participants": participants_info,
            "global_tags": global_tags_info
        }
        llm_messages.append({"role": "system", "content": f"Current chatroom state:\n{json.dumps(chatroom_state_summary, ensure_ascii=False, indent=2)}"})

        # Message 3 (System): Whiteboard items
        whiteboard_content_list = []
        if chatroom_state.whiteboard_items:
            for item in chatroom_state.whiteboard_items:
                loop = asyncio.get_event_loop()
                content, error = await loop.run_in_executor(None, self.file_service.read_file_content, item.full_local_path)
                if error:
                    whiteboard_content_list.append({
                        "file_name": item.display_name,
                        "path": item.full_local_path,
                        "error": error
                    })
                else:
                    # MVP: Truncate long content if necessary
                    truncated_content = content[:2000] + "..." if content and len(content) > 2000 else content
                    whiteboard_content_list.append({
                        "file_name": item.display_name,
                        "path": item.full_local_path,
                        "content_preview": truncated_content
                    })
            llm_messages.append({"role": "system", "content": f"Whiteboard items:\n{json.dumps(whiteboard_content_list, ensure_ascii=False, indent=2)}"})
        else:
            llm_messages.append({"role": "system", "content": "Whiteboard is currently empty."})


        # Message 4 (System): Recent history
        formatted_history = []
        for msg in recent_history_for_context:
            formatted_history.append({
                "sender_nickname": msg.sender_nickname,
                "content": msg.content,
                "timestamp": msg.timestamp.isoformat(),
                "tags": msg.tags
            })
        llm_messages.append({"role": "system", "content": f"Recent conversation history (most recent last):\n{json.dumps(formatted_history, ensure_ascii=False, indent=2)}"})

        # Message 5 (User): Task-specific prompt
        llm_messages.append({"role": "user", "content": task_specific_prompt})
        
        return llm_messages

    async def trigger_active_listening_for_all(self, chatroom_state: 'ChatroomState', recent_history: List[ChatMessage]):
        print("AgentManager triggering active listening for all agents.")
        # Define the task-specific prompt for active listening
        listening_task_prompt = "Analyze the recent conversation, your persona, and the provided context. Decide if you want to speak. If so, formulate your thoughts but do not generate a public message yet."
        
        active_listen_tasks = []
        for agent_id, agent in self.agents.items():
            # Build context for this specific agent
            llm_context = await self._build_llm_context(
                agent=agent,
                chatroom_state=chatroom_state,
                task_specific_prompt=listening_task_prompt,
                recent_history_for_context=recent_history
            )
            # Create a task for the agent's active_listen
            active_listen_tasks.append(agent.active_listen(llm_messages=llm_context))
            print(f"  Prepared listening task for {agent.nickname}")

        # Run all active_listen tasks concurrently
        await asyncio.gather(*active_listen_tasks)
        print("AgentManager: All active listening tasks completed.")


    async def orchestrate_speaking_turn(self, chatroom_state: 'ChatroomState', recent_history: List[ChatMessage]) -> LLMAgent | None:
        """
        Selects an agent to speak based on their desire_to_speak status.
        If an agent is selected, builds their context and has them generate speech.
        """
        print("AgentManager orchestrating speaking turn.")
        
        willing_agents = [agent for agent in self.agents.values() if agent.desire_to_speak]
        
        if not willing_agents:
            print("  No agent currently wants to speak.")
            return None
        
        selected_agent = random.choice(willing_agents)
        print(f"  Agent {selected_agent.nickname} selected to speak.")

        # Define the task-specific prompt for generating public speech
        speaking_task_prompt = "Based on your persona, previous thoughts (if any from listening phase), the current chatroom state, whiteboard items, and recent conversation history, formulate your public message."
        
        # Build context for the speaking agent
        llm_context = await self._build_llm_context(
            agent=selected_agent,
            chatroom_state=chatroom_state,
            task_specific_prompt=speaking_task_prompt,
            recent_history_for_context=recent_history
        )
        
        # The generate_public_speech method will be called by the main loop with this context
        # This method now primarily focuses on selecting the agent.
        # The actual call to generate_public_speech will happen in main.py or the simulation loop,
        # after this method returns the selected_agent.
        # For now, we'll store the context in the agent, assuming it might be useful,
        # or the calling code will use it.
        # A cleaner approach might be to return (selected_agent, llm_context)
        # For now, let's assume the main loop will re-build or use this agent's state.
        # The task description says: "then将结果传递给 await speaking_agent.generate_public_speech(...)"
        # This implies orchestrate_speaking_turn might not directly call it.
        # Let's stick to the original plan of this function just selecting the agent.
        # The context building for the speaking agent will happen where generate_public_speech is called.

        # Re-evaluating: The prompt says "然后将结果传递给 await speaking_agent.generate_public_speech(...)"
        # This implies that orchestrate_speaking_turn *should* prepare the context and it should be used
        # by the caller.
        # However, `generate_public_speech` is called on the agent *instance*.
        # The most straightforward way is for `orchestrate_speaking_turn` to return the agent,
        # and the caller then builds context and calls `generate_public_speech`.
        # Let's adjust `orchestrate_speaking_turn` to *not* build context here,
        # but ensure `main.py` does it.
        # This keeps `orchestrate_speaking_turn` focused on selection.

        # Let's follow the prompt more literally for now: "如果选定了发言Agent，为其调用 await self._build_llm_context(...) ...然后将结果传递给 await speaking_agent.generate_public_speech(...)"
        # This means the call to generate_public_speech should happen *outside* this function,
        # but this function *could* return the context if it built it.
        # The prompt for `main.py` says: "调整 run_simulation_loop 中对 ... generate_public_speech 的调用"
        # This suggests `main.py` will handle the call.

        # Let's simplify: `orchestrate_speaking_turn` selects the agent.
        # The context building and call to `generate_public_speech` will be handled by the caller (e.g., in `main.py`).
        # This means `orchestrate_speaking_turn` does not need `chatroom_state` or `recent_history`.
        # I will revert the signature of `orchestrate_speaking_turn` and remove context building from it.

        # Reset desire_to_speak for the selected agent (or manage this elsewhere if needed)
        # selected_agent.desire_to_speak = False # This should be done after successful speech.
        return selected_agent


if __name__ == '__main__':
    import os # Import os here for cleanup and test file operations
    # Setup DBService for the example
    db_service_instance = DBService(db_name="test_agent_manager.db") # Use a test DB
    db_service_instance.connect()
    db_service_instance.create_tables_if_not_exist()

    # Add some dummy agent configs to the test DB
    agent_configs_for_db = [
        ParticipantConfig(
            participant_id="agent_001_am_test",
            nickname="TestEchoBot",
            role="llm_agent",
            llm_persona_prompt="You echo things.",
            llm_config_listening={"model": "test_listen_model"},
            llm_config_speaking={"model": "test_speak_model"}
        ),
        ParticipantConfig(
            participant_id="human_001_am_test",
            nickname="TestHuman",
            role="human"
        ),
        ParticipantConfig(
            participant_id="agent_002_am_test",
            nickname="TestThinker",
            role="llm_agent",
            llm_persona_prompt="You think deeply.",
            llm_config_listening={"model": "think_listen"},
            llm_config_speaking={"model": "think_speak"}
        )
    ]
    for cfg in agent_configs_for_db:
        db_service_instance.add_or_update_participant_config(cfg)

    # Create LLMService for testing
    llm_service_instance = LLMService()
    file_service_instance = FileService() # Create FileService instance
    
    # AgentManager now takes llm_service and file_service in __init__
    manager = AgentManager(db_service=db_service_instance, llm_service=llm_service_instance, file_service=file_service_instance)

    print(f"\nAgents loaded in manager: {list(manager.agents.keys())}")
    retrieved = manager.get_agent_by_id("agent_001_am_test")
    if retrieved:
        print(f"Retrieved agent: {retrieved.nickname}, Persona: {retrieved.persona_prompt}")

    # Example of triggering listening
    async def run_async_test_ops(manager_instance: AgentManager):
        print("\n--- Running Async Test Operations ---")
        # Simulate some agents wanting to speak
        # In a real scenario, active_listen would set this.
        # For testing, we'll manually set it for one agent.
        test_agent_id_to_speak = "agent_002_am_test"
        if test_agent_id_to_speak in manager_instance.agents:
            manager_instance.agents[test_agent_id_to_speak].desire_to_speak = True
            print(f"Manually set {manager_instance.agents[test_agent_id_to_speak].nickname} desire_to_speak to True for testing.")

        # Mock ChatroomState and recent_history for testing _build_llm_context and trigger_active_listening
        class MockParticipant:
            def __init__(self, id, nickname, role):
                self.participant_id = id
                self.nickname = nickname
                self.role = role
        
        class MockGlobalTag:
            def __init__(self, name, count):
                self.tag_name = name
                self.occurrence_count = count

        class MockWhiteboardItem:
            def __init__(self, id, display_name, full_local_path, uploader_id):
                self.reference_id = id
                self.display_name = display_name
                self.full_local_path = full_local_path # Needs to be a real path for test
                self.uploader_participant_id = uploader_id

        class MockChatroomState:
            def __init__(self):
                self.participants = [
                    MockParticipant("human_001_am_test", "TestHuman", "human"),
                    MockParticipant("agent_001_am_test", "TestEchoBot", "llm_agent"),
                    MockParticipant("agent_002_am_test", "TestThinker", "llm_agent")
                ]
                self.global_tags = [MockGlobalTag("weather", 1), MockGlobalTag("discussion", 1)]
                
                # Create a dummy file for whiteboard testing
                self.test_wb_file_path = "test_am_wb_file.txt"
                with open(self.test_wb_file_path, "w", encoding="utf-8") as f:
                    f.write("Content of the test whiteboard file for AgentManager.")

                self.whiteboard_items = [
                    MockWhiteboardItem("wb_item_1", "Test Document", self.test_wb_file_path, "human_001_am_test")
                ]
        
            def cleanup_test_files(self):
                if os.path.exists(self.test_wb_file_path):
                    os.remove(self.test_wb_file_path)

        mock_chatroom_state = MockChatroomState()
        
        test_recent_history = [
            ChatMessage(participant_id="human_001_am_test", sender_nickname="TestHuman", content="Hello everyone, any thoughts on the weather?"),
            ChatMessage(participant_id="agent_001_am_test", sender_nickname="TestEchoBot", content="The weather is indeed a topic.")
        ]

        print(f"Triggering active listening with mock chatroom_state and recent_history")
        await manager_instance.trigger_active_listening_for_all(
            chatroom_state=mock_chatroom_state, # type: ignore
            recent_history=test_recent_history
        )
        
        print("Orchestrating speaking turn...")
        # Orchestrate speaking turn no longer needs context directly
        speaker_agent = await manager_instance.orchestrate_speaking_turn()
        
        if speaker_agent:
            print(f"Next speaker: {speaker_agent.nickname} (ID: {speaker_agent.agent_id})")
            # Simulate speaking
            # The actual call to generate_public_speech with context will be handled by the main loop.
            # For this test, we can simulate building its context if needed, or just acknowledge selection.
            print(f"  {speaker_agent.nickname} would be the next speaker.")
            # Example of building context for the selected speaker (as main.py might do)
            # speaking_task_prompt = "Based on your persona, previous thoughts, and the current context, formulate your response."
            # speaker_llm_context = await manager_instance._build_llm_context(
            #     agent=speaker_agent,
            #     chatroom_state=mock_chatroom_state, # type: ignore
            #     task_specific_prompt=speaking_task_prompt,
            #     recent_history_for_context=test_recent_history
            # )
            # print(f"Context for {speaker_agent.nickname} to speak (first message): {speaker_llm_context[0] if speaker_llm_context else 'Empty'}")
            # print(f"Context for {speaker_agent.nickname} to speak (last message): {speaker_llm_context[-1] if speaker_llm_context else 'Empty'}")
            # In a real scenario, this context would be passed to speaker_agent.generate_public_speech(...)
        else:
            print("No agent was selected to speak.")
        
        mock_chatroom_state.cleanup_test_files() # Clean up dummy whiteboard file
        print("--- Async Test Operations Complete ---")

    asyncio.run(run_async_test_ops(manager))
    
    db_service_instance.close()
    if os.path.exists("test_agent_manager.db"): # For cleanup
        os.remove("test_agent_manager.db") # Clean up test DB
    print("\nAgentManager example run complete.")