import json
from pathlib import Path
from typing import Any, Dict, List

from core.managers.prompt_manager import CharacterConfig
from core.prompts.system_template import get_character_template
from core.prompts.user_template import get_user_template
from core.utils.log import mylogger


class DialogueManager:
    """Manages dialogue generation logic.

    Handles character configuration, prompt templating, and LLM interaction.
    """

    # Configuration constants
    HISTORY_LIMIT = 20
    ZH_LANGUAGE_PROMPT = "重要提示：你必须严格使用中文回答，禁止使用任何其他语言（包括日文、英文等）。对话场景：中文环境。你的人设必须严格保持，不能跳脱角色，也不能以AI身份自称。"

    def __init__(self, character_name: str, llm, model_type: str = "default"):
        self.character_name = character_name
        self.llm = llm
        self.model_type = model_type
        self.history: List[Dict[str, str]] = []

        # Initialize character configuration
        self.character_config = CharacterConfig(character_name, model_name=model_type)
        self.character_data = self._load_character_data()
        self.character_template, self.template_params = get_character_template(
            self.character_data
        )

        mylogger.info(f"DialogueManager initialized for {character_name} with model {model_type}")

    def _load_character_data(self) -> Dict[str, Any]:
        """Load character data from configuration."""
        return self.character_config.load_character_data()

    def _load_scenario_data(self) -> List[Dict[str, Any]]:
        """Load scenario data from data/scenario.json."""
        scenario_file_path = (
            Path(__file__).parent.parent.parent / "data" / "scenario.json"
        )

        try:
            with open(scenario_file_path, "r", encoding="utf-8") as f:
                scenario_data = json.load(f)
            mylogger.info(f"Successfully loaded scenario file: {scenario_file_path}")
            return scenario_data if isinstance(scenario_data, list) else [scenario_data]
        except (FileNotFoundError, json.JSONDecodeError) as e:
            mylogger.error(f"Failed to read scenario file {scenario_file_path}: {e}")
            return []

    def _create_system_message(self) -> str:
        """Create system prompt for dialogue generation."""
        self.template_params["language_prompt"] = self.ZH_LANGUAGE_PROMPT
        # 只有当memory_context未设置或为空时才使用传统记忆文件
        if not self.template_params.get("memory_context"):
            self.template_params["memory_context"] = (
                self.character_config.load_long_term_memory()
            )
        return self.character_template.format(**self.template_params)

    def _create_user_message(self, user_input: str, scenario_index: int = 0) -> str:
        """Create user prompt for dialogue generation."""
        scenario_data = self._load_scenario_data()

        # Default scenario and status
        scene = "默认场景"
        status = "默认状态"
        extra_info = ""

        # Select scenario based on index
        if scenario_data and len(scenario_data) > 0:
            if 0 <= scenario_index < len(scenario_data):
                selected_scenario = scenario_data[scenario_index]
            else:
                selected_scenario = scenario_data[0]
                mylogger.warning(
                    f"Scenario index {scenario_index} out of range, using first scenario"
                )

            scene = selected_scenario.get("scene", "默认场景")
            status = selected_scenario.get("status", "默认状态")
            extra_info = selected_scenario.get("extra", "")

            mylogger.info(f"Using scenario index {scenario_index}: {scene[:50]}...")

        # Build user prompt data
        user_data = {
            "user_input": user_input,
            "character_name": self.character_name,
            "scene_data": {"scene_description": scene},
            "status_data": {"character_status": status},
            "short_memory": self.character_config.load_short_term_memory(),
            "response_guidance": f"\n\n# 额外信息\n{extra_info}" if extra_info else "",
        }
        return get_user_template(user_data)

    async def generate_response(
        self,
        user_input: str,
        scenario_index: int = 0,
        temperature: float = 0.8,
        frequency_penalty: float = 0.1,
        presence_penalty: float = 0.0,
    ) -> str:
        """Generate dialogue response.

        Args:
            user_input: User's input message
            scenario_index: Index of scenario to use
            temperature: LLM temperature
            frequency_penalty: LLM frequency penalty
            presence_penalty: LLM presence penalty

        Returns:
            Generated response
        """
        system_message = self._create_system_message()
        user_message = self._create_user_message(user_input, scenario_index)

        messages = [
            {"role": "system", "content": system_message},
            {"role": "user", "content": user_message},
        ]

        mylogger.info(f"System message: {system_message}")
        mylogger.info(f"User message: {user_message}")

        # Call LLM
        response = await self.llm.chat(
            messages,
            temperature=temperature,
            frequency_penalty=frequency_penalty,
            presence_penalty=presence_penalty,
        )

        # Update conversation history
        self.history.append({"role": "user", "content": user_input})
        self.history.append({"role": "assistant", "content": response})

        # Limit history length
        if len(self.history) > self.HISTORY_LIMIT:
            self.history = self.history[-self.HISTORY_LIMIT :]

        mylogger.info(f"LLM response: {response}")
        return response

    def get_history(self) -> List[Dict[str, str]]:
        """Get conversation history."""
        return self.history
