import asyncio # Added import for asyncio.create_task
import uuid
import os
import logging
from datetime import datetime, timezone
from typing import List, Dict, Any, Optional
from functools import partial # Changed import

from core.data_models import ChatMessage, GlobalTag, LLMLogEntry, WhiteboardItem, DEFAULT_HOST_USER_ID # Import constant
from services.db_service import DBService
from services.llm_service import LLMService
from core.chatroom_state import ChatroomState
from core.agent_manager import AgentManager
from core.llm_agent import LLMAgent # Added import for LLMAgent type hint
from core import logic_executor # For run_in_executor

logger = logging.getLogger(__name__)

class InternalApiLogic:
    def __init__(self, db_service: DBService, agent_manager: AgentManager, chatroom_state: ChatroomState, llm_service: LLMService):
        self.db_service = db_service
        self.agent_manager = agent_manager
        self.chatroom_state = chatroom_state
        self.llm_service = llm_service # Added as per design
        logger.info("InternalApiLogic initialized with core services.")

    async def _process_new_message_common_async(self, message: ChatMessage) -> None:
        """
        Common logic to process a new ChatMessage (human or agent):
        1. Add message to DB.
        2. Update in-memory public_chat_history.
        3. Update global tags (DB and in-memory).
        """
        # 1. Add message to DB
        await logic_executor.run_in_executor(self.db_service.add_chat_message, message)
        
        # 2. Update in-memory chat history
        self.chatroom_state.public_chat_history.append(message)
        
        # 3. Update global tags
        if message.tags:
            for raw_tag in message.tags:
                normalized_tag = raw_tag.lower().strip()
                if not normalized_tag:
                    continue
                
                await logic_executor.run_in_executor(
                    self.db_service.add_or_update_global_tag,
                    normalized_tag,
                    message.timestamp
                )
                updated_tag_obj_from_db: Optional[GlobalTag] = await logic_executor.run_in_executor(
                    self.db_service.get_global_tag,
                    normalized_tag
                )

                if updated_tag_obj_from_db:
                    self.chatroom_state.global_tags[normalized_tag] = updated_tag_obj_from_db
                else:
                    logger.warning(f"Tag '{normalized_tag}' was not found in DB after update attempt for message {message.message_id}, creating in-memory.")
                    self.chatroom_state.global_tags[normalized_tag] = GlobalTag(
                         tag_name=normalized_tag,
                         occurrence_count=1,
                         first_seen_timestamp=message.timestamp,
                         last_seen_timestamp=message.timestamp
                     )
        logger.info(f"Common processing for message '{message.message_id}' by '{message.sender_nickname}' complete.")

    async def _handle_agent_speaking_turn_async(self, speaking_agent: LLMAgent, recent_history: List[ChatMessage]) -> Optional[Dict[str, Any]]:
        """
        Handles the process of an agent speaking:
        1. Build LLM context for the speaking agent.
        2. Call agent.generate_public_speech.
        3. Process and save the agent's message using _process_new_message_common_async.
        4. Link LLM log with the public message ID.
        Returns a dictionary formatted for UI if agent spoke, else None.
        """
        logger.info(f"Handling speaking turn for agent {speaking_agent.nickname}.")
        
        # 1. Build context for the speaking agent
        speaking_task_prompt = "Based on your persona, previous thoughts (if any from listening phase), the current chatroom state, whiteboard items, and recent conversation history, formulate your public message."
        speaking_llm_context = await self.agent_manager._build_llm_context(
            agent=speaking_agent,
            chatroom_state=self.chatroom_state,
            task_specific_prompt=speaking_task_prompt
        )
        
        # 2. Agent generates speech
        agent_speech_output = await speaking_agent.generate_public_speech(llm_messages=speaking_llm_context)
        
        agent_message_for_ui = None
        if agent_speech_output and agent_speech_output.get("message"):
            agent_message_content = agent_speech_output["message"]
            agent_message_tags = agent_speech_output.get("tags", [])
            
            agent_chat_message = ChatMessage(
                message_id=str(uuid.uuid4()),
                participant_id=speaking_agent.agent_id,
                sender_nickname=speaking_agent.nickname,
                content=agent_message_content,
                timestamp=datetime.now(timezone.utc),
                tags=agent_message_tags,
                raw_llm_output_if_agent=None
            )
            
            if agent_speech_output.get("log_entry_id"):
                log_entry_for_speech: Optional[LLMLogEntry] = await logic_executor.run_in_executor(
                    self.db_service.get_llm_log_entry_by_id, # Use the new method name
                    agent_speech_output["log_entry_id"]
                )
                if log_entry_for_speech:
                    agent_chat_message.raw_llm_output_if_agent = log_entry_for_speech.raw_llm_output
            
            # 3. Process and save agent's message
            await self._process_new_message_common_async(agent_chat_message)
            
            logger.info(f"Agent {speaking_agent.nickname} spoke: {agent_message_content[:50]}...")
            agent_message_for_ui = {
                "message_id": agent_chat_message.message_id,
                "sender_nickname": agent_chat_message.sender_nickname,
                "content": agent_chat_message.content,
                "timestamp": agent_chat_message.timestamp.isoformat(),
                "tags": agent_chat_message.tags
            }
            
            # 4. Link LLM log entry with the public message ID
            if agent_speech_output.get("log_entry_id"):
                await logic_executor.run_in_executor(
                    self.db_service.update_log_entry_with_public_message_id, # Corrected method name
                    agent_speech_output["log_entry_id"],
                    agent_chat_message.message_id
                )
        else:
            logger.warning(f"Agent {speaking_agent.nickname} was selected but produced no valid message content.")
        
        return agent_message_for_ui

    async def _trigger_agent_interaction_background_task_async(self) -> None:
        """
        Background task to handle agent listening and potential speaking turn
        after a human message has been processed.
        """
        logger.info("Starting background task for agent interaction.")
        try:
            # We need recent history and chatroom_state for agent interactions
            recent_history_count = 10
            # Ensure public_chat_history is not empty before slicing
            if not self.chatroom_state.public_chat_history:
                logger.warning("Public chat history is empty, cannot get recent history for agent interaction.")
                recent_history = []
            else:
                recent_history = self.chatroom_state.public_chat_history[-recent_history_count:]

            await self.agent_manager.trigger_active_listening_for_all(
                chatroom_state=self.chatroom_state
            )
            logger.info("Agent active listening triggered in background.")

            speaking_agent = await self.agent_manager.orchestrate_speaking_turn(
                chatroom_state=self.chatroom_state
            )
            
            if speaking_agent:
                agent_message_data = await self._handle_agent_speaking_turn_async(
                    speaking_agent=speaking_agent,
                    recent_history=recent_history
                )
                if agent_message_data:
                    logger.info(f"Agent {speaking_agent.nickname} spoke in background. Message data: {agent_message_data}")
                    # TODO: Implement mechanism to push agent_message_data to the UI.
                    # This could involve calling a method on PywebviewOuterApi or an event system.
                    # For now, we just log it.
                    # Example: self.ui_update_callback(agent_message_data)
                    # Or: get_core_service('ui_bridge').push_agent_message(agent_message_data)
                    print(f"DEV_NOTE: Agent message to push to UI: {agent_message_data}")
                else:
                    logger.info(f"Agent {speaking_agent.nickname} decided not to speak or speech failed in background.")
            else:
                logger.info("No agent decided to speak this turn in background.")
        except Exception as e:
            logger.error(f"Error in agent interaction background task: {e}", exc_info=True)


    async def handle_human_message_submission_async(self, message_text: str, tags: List[str]) -> Dict[str, Any]:
        logger.debug(f"InternalApiLogic: Async handling human message: '{message_text}' with tags: {tags}.")

        sender_id = DEFAULT_HOST_USER_ID # Use constant for the sender ID
        
        # ChatroomState is already in the logic loop, direct access is fine
        sender_participant = self.chatroom_state.get_participant_by_id(sender_id)
        
        if not sender_participant:
            error_msg = f"Sender participant '{sender_id}' not found."
            logger.error(error_msg)
            return {"success": False, "error": error_msg}

        sender_nickname = sender_participant.nickname # Nickname will be fetched based on the constant ID

        new_message = ChatMessage(
            message_id=str(uuid.uuid4()),
            participant_id=sender_id, # This is now DEFAULT_HOST_USER_ID
            sender_nickname=sender_nickname,
            content=message_text,
            timestamp=datetime.now(timezone.utc),
            tags=tags if tags else []
        )

        try:
            # Process and save the human's message using the common helper
            await self._process_new_message_common_async(new_message)
            
            # --- Agent Interaction Logic (now runs in background) ---
            # Create a background task for agent interactions.
            # This task will run independently and not block the return of this function.
            # asyncio.create_task(self._trigger_agent_interaction_background_task_async()) TODO:处理是通知每个LLM Agent还是让它们定时检查。
            # logger.info("Agent interaction background task created.")
            # --- End Agent Interaction Logic ---

            # Immediately return success for the human message
            response_payload = {
                "success": True,
                "message": {
                    "message_id": new_message.message_id,
                    "sender_nickname": new_message.sender_nickname,
                    "content": new_message.content,
                    "timestamp": new_message.timestamp.isoformat(),
                    "tags": new_message.tags
                }
                # Agent message will be pushed asynchronously if one occurs.
            }
            return response_payload
            
        except Exception as e:
            error_msg = f"Error processing message asynchronously: {e}"
            logger.error(error_msg, exc_info=True)
            return {"success": False, "error": error_msg}

    async def _get_whiteboard_items_for_ui_async(self) -> List[Dict[str, Any]]:
        # This helper can remain largely synchronous if chatroom_state.whiteboard_items is directly usable
        # If chatroom_state.whiteboard_items itself needed async loading, this would change.
        # For now, assuming chatroom_state is managed within the same async context.
        items_for_ui = []
        for item in self.chatroom_state.whiteboard_items: # Accessing state directly
            items_for_ui.append({
                "reference_id": item.reference_id,
                "display_name": item.display_name,
                "full_local_path": item.full_local_path,
                "uploader_participant_id": item.uploader_participant_id,
                "timestamp_added": item.timestamp_added.isoformat()
            })
        return items_for_ui

    async def add_whiteboard_item_async(self, file_path: str) -> Dict[str, Any]:
        logger.debug(f"InternalApiLogic: Async adding whiteboard item: {file_path}")
        try:
            if not file_path or not isinstance(file_path, str):
                return {"success": False, "error": "Invalid file path provided.", "whiteboard_items": await self._get_whiteboard_items_for_ui_async()}

            display_name = os.path.basename(file_path)
            uploader_participant_id = "human_user_main" # MVP: Hardcoded

            new_item = WhiteboardItem(
                display_name=display_name,
                full_local_path=file_path,
                uploader_participant_id=uploader_participant_id
            )
            
            # ChatroomState.add_whiteboard_item might involve DB ops, so wrap if necessary.
            # Assuming ChatroomState.add_whiteboard_item itself handles DB via run_in_executor or is purely in-memory for this part.
            # If ChatroomState.add_whiteboard_item directly calls DBService, it needs to be made async or called via run_in_executor here.
            # For now, let's assume it's designed to be called from an async context and handles its own DB calls appropriately.
            # If it's a simple list append and DB call, we do it here:
            
            await logic_executor.run_in_executor(self.db_service.add_whiteboard_item, new_item)
            self.chatroom_state.whiteboard_items.append(new_item) # Update in-memory state
            
            logger.info(f"Whiteboard item '{new_item.reference_id}' for '{file_path}' added asynchronously.")
            return {"success": True, "whiteboard_items": await self._get_whiteboard_items_for_ui_async()}
        except Exception as e:
            error_msg = f"Error adding whiteboard item asynchronously: {e}"
            logger.error(error_msg, exc_info=True)
            return {"success": False, "error": error_msg, "whiteboard_items": await self._get_whiteboard_items_for_ui_async()}

    async def remove_whiteboard_item_async(self, reference_id: str) -> Dict[str, Any]:
        logger.debug(f"InternalApiLogic: Async removing whiteboard item: {reference_id}")
        try:
            if not reference_id or not isinstance(reference_id, str):
                return {"success": False, "error": "Invalid reference_id provided.", "whiteboard_items": await self._get_whiteboard_items_for_ui_async()}

            # Similar to add, if ChatroomState.remove_whiteboard_item handles DB:
            # success = await self.chatroom_state.remove_whiteboard_item_async(reference_id)
            # For now, explicit DB call:
            removed_from_db = await logic_executor.run_in_executor(self.db_service.remove_whiteboard_item, reference_id)
            
            if removed_from_db:
                initial_len = len(self.chatroom_state.whiteboard_items)
                self.chatroom_state.whiteboard_items = [
                    item for item in self.chatroom_state.whiteboard_items if item.reference_id != reference_id
                ]
                removed_from_memory = len(self.chatroom_state.whiteboard_items) < initial_len
                if removed_from_memory:
                    logger.info(f"Whiteboard item '{reference_id}' removed asynchronously.")
                    return {"success": True, "whiteboard_items": await self._get_whiteboard_items_for_ui_async()}
                else:
                    # DB said success, but not found in memory. This is a state inconsistency.
                    logger.warning(f"Whiteboard item '{reference_id}' removed from DB but not found in memory state.")
                    return {"success": True, "warning": "Removed from DB, but was not in memory. State might be inconsistent.", "whiteboard_items": await self._get_whiteboard_items_for_ui_async()}
            else:
                logger.warning(f"Failed to remove whiteboard item '{reference_id}' from DB. It might not exist.")
                return {"success": False, "error": "Item not found in DB or failed to remove.", "whiteboard_items": await self._get_whiteboard_items_for_ui_async()}
        except Exception as e:
            error_msg = f"Error removing whiteboard item asynchronously: {e}"
            logger.error(error_msg, exc_info=True)
            return {"success": False, "error": error_msg, "whiteboard_items": await self._get_whiteboard_items_for_ui_async()}

    async def get_initial_chatroom_data_async(self) -> Dict[str, Any]:
        logger.debug("InternalApiLogic: Async getting initial chatroom data.")
        
        # Participants from ChatroomState (in-memory, assumed up-to-date)
        participants_ui = []
        for participant_id, participant in self.chatroom_state.participants.items():
            participants_ui.append({
                "participant_id": participant_id,
                "nickname": participant.nickname,
                "role": participant.role
            })
        
        # Chat history from ChatroomState (in-memory, assumed up-to-date after initial load)
        # Or, if we want to ensure it's fresh from DB for this call:
        # chat_history_records = await logic_executor.run_in_executor(self.db_service.get_all_chat_messages)
        chat_history_ui = []
        for msg in self.chatroom_state.public_chat_history: # Using in-memory version
            chat_history_ui.append({
                "message_id": msg.message_id,
                "sender_nickname": msg.sender_nickname,
                "content": msg.content,
                "timestamp": msg.timestamp.isoformat(),
                "tags": msg.tags
            })
        
        # Global tags from ChatroomState (in-memory)
        # Or, fresh from DB:
        # global_tags_records = await logic_executor.run_in_executor(self.db_service.get_all_global_tags)
        global_tags_ui = []
        for tag_name, tag_obj in self.chatroom_state.global_tags.items(): # Using in-memory
            global_tags_ui.append({
                "tag": tag_name,
                "count": tag_obj.occurrence_count
            })
            
        whiteboard_items_ui = await self._get_whiteboard_items_for_ui_async()
        
        return {
            "chat_history": chat_history_ui,
            "participants": participants_ui,
            "whiteboard_items": whiteboard_items_ui,
            "global_tags": global_tags_ui
        }

    async def get_agent_logs_async(self, agent_id: str, limit: int = 20) -> List[Dict[str, Any]]:
        logger.debug(f"InternalApiLogic: Async getting agent logs for {agent_id}, limit {limit}.")
        try:
            log_entries: List[LLMLogEntry] = await logic_executor.run_in_executor(
                self.db_service.get_logs_for_agent, 
                agent_id, 
                limit
            )
            
            logs_for_ui = []
            for entry in log_entries:
                summary = entry.parsed_thinking_process if entry.parsed_thinking_process else entry.raw_llm_output
                if summary and len(summary) > 150: # Truncate long summaries
                    summary = summary[:147] + "..."

                logs_for_ui.append({
                    "log_entry_id": entry.log_entry_id,
                    "timestamp_call_start": entry.timestamp_call_start.isoformat(),
                    "llm_call_type": entry.llm_call_type,
                    "summary": summary if summary else ""
                })
            
            logger.debug(f"Returning {len(logs_for_ui)} logs for agent {agent_id}.")
            return logs_for_ui
            
        except Exception as e:
            error_msg = f"Error fetching agent logs asynchronously for agent {agent_id}: {e}"
            logger.error(error_msg, exc_info=True)
            # Return list with error dict, as per original js_api.py structure for errors in list returns
            return [{"error": error_msg, "details": str(e)}] 

    async def get_active_tags_for_filter_async(self, limit: int = 10) -> List[Dict[str, Any]]:
        logger.debug(f"InternalApiLogic: Async getting active tags for filter, limit {limit}.")
        try:
            # Use functools.partial to pass keyword arguments to run_in_executor
            # Now using 'partial' directly due to changed import
            partial_func = partial(self.db_service.get_recent_active_tags, limit=limit)
            active_tags_from_db: List[GlobalTag] = await logic_executor.run_in_executor(
                partial_func
            )
            
            tags_for_ui = []
            for tag_obj in active_tags_from_db:
                tags_for_ui.append({
                    "name": tag_obj.tag_name,
                    "count": tag_obj.occurrence_count
                })
            
            logger.debug(f"Returning {len(tags_for_ui)} active tags for filter.")
            return tags_for_ui
            
        except Exception as e:
            error_msg = f"Error fetching active tags for filter asynchronously: {e}"
            logger.error(error_msg, exc_info=True)
            return [{"error": error_msg, "details": str(e)}]
    async def filter_chat_history_by_tags_async(self, tags: List[str], limit: int = 100) -> List[Dict[str, Any]]:
        logger.debug(f"InternalApiLogic: Async filtering chat history by tags: {tags}, limit {limit}.")
        if not tags: 
            logger.debug("No tags provided for filtering, returning empty list.")
            return []

        try:
            filtered_messages_from_db: List[ChatMessage] = await logic_executor.run_in_executor(
                # Use functools.partial for functions with multiple keyword arguments
                # Now using 'partial' directly
                partial(
                    self.db_service.get_chat_messages_by_tags,
                    tags_to_filter=tags,
                    logic="OR",
                    limit=limit
                )
            )
            
            history_for_ui = []
            for msg in filtered_messages_from_db:
                history_for_ui.append({
                    "message_id": msg.message_id,
                    "sender_nickname": msg.sender_nickname,
                    "content": msg.content,
                    "timestamp": msg.timestamp.isoformat(), 
                    "tags": msg.tags if msg.tags else [] 
                })
            
            logger.debug(f"Returning {len(history_for_ui)} messages filtered by tags: {tags}.")
            return history_for_ui
            
        except Exception as e:
            error_msg = f"Error filtering chat history by tags asynchronously: {e}"
            logger.error(error_msg, exc_info=True)
            return [{"error": error_msg, "details": str(e)}]
        
    # Placeholder for other methods that might be needed from the old API or new requirements
    async def get_agent_details_async(self, agent_id: str) -> Optional[Dict[str, Any]]:
        """
        获取特定代理的详细信息。
        This method was in PywebviewOuterApi placeholder, so adding a corresponding async version.
        """
        logger.debug(f"InternalApiLogic: Async getting agent details for agent_id: {agent_id}")
        try:
            # AgentManager is already in the logic loop
            agent = self.agent_manager.get_agent_by_id(agent_id)
            if agent:
                # Assuming agent has a method or properties to convert to dict for UI
                # This is a simplified example; actual details depend on LLMAgent structure
                return {
                    "participant_id": agent.participant_config.participant_id,
                    "nickname": agent.participant_config.nickname,
                    "role": agent.participant_config.role,
                    "persona_summary": agent.persona.get_summary() if agent.persona else "N/A", # Example
                    "status": agent.current_status.value if hasattr(agent, 'current_status') else "UNKNOWN" # Example
                }
            else:
                return None
        except Exception as e:
            logger.error(f"Error getting agent details for {agent_id}: {e}", exc_info=True)
            # Consistent error structure for methods returning Optional[Dict] might be just to return None on error
            # or raise, to be caught by outer_api_bridge
            raise # Let outer_api_bridge handle exception formatting for future.result()

    async def get_all_participants_async(self) -> List[Dict[str, Any]]:
        """
        获取所有参与者信息。
        This method was in PywebviewOuterApi placeholder.
        """
        logger.debug("InternalApiLogic: Async getting all participants.")
        try:
            # Participants are stored in ChatroomState
            participants_ui = []
            for participant_id, participant in self.chatroom_state.participants.items():
                participants_ui.append({
                    "participant_id": participant_id,
                    "nickname": participant.nickname,
                    "role": participant.role
                    # Add other relevant fields for UI if necessary
                })
            return participants_ui
        except Exception as e:
            logger.error(f"Error getting all participants: {e}", exc_info=True)
            raise # Let outer_api_bridge handle

    async def get_all_tags_async(self) -> List[Dict[str, Any]]:
        """
        获取所有全局标签。
        This method was in PywebviewOuterApi placeholder.
        """
        logger.debug("InternalApiLogic: Async getting all tags.")
        try:
            # Global tags are in ChatroomState, or fetch fresh from DB
            # Using ChatroomState's version for now
            tags_ui = []
            for tag_name, tag_obj in self.chatroom_state.global_tags.items():
                tags_ui.append({
                    "tag_name": tag_obj.tag_name, # or just "name"
                    "occurrence_count": tag_obj.occurrence_count,
                    "last_seen_timestamp": tag_obj.last_seen_timestamp.isoformat()
                })
            return tags_ui
        except Exception as e:
            logger.error(f"Error getting all tags: {e}", exc_info=True)
            raise