from __future__ import annotations

import asyncio
import re
import uuid
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Sequence
from enum import Enum

import httpx
import structlog

from agent_society.memory.store import MemoryDocument, MemoryStore
from agent_society.memory.event_log import events_to_documents, read_events
from agent_society.telemetry.logging import configure_logging

logger = structlog.get_logger(__name__)


@dataclass
class PersonaManifest:
    name: str
    role: str
    objectives: list[str]
    compute_account: str
    traits: dict[str, str] = field(default_factory=dict)


class LedgerClient:
    def __init__(self, base_url: str, account: str, *, client_factory: Callable[[], httpx.AsyncClient] | None = None) -> None:
        self._base_url = base_url.rstrip("/")
        self._account = account
        self._client_factory = client_factory

    def _client(self) -> httpx.AsyncClient:
        if self._client_factory:
            return self._client_factory()
        return httpx.AsyncClient(timeout=10.0)

    async def charge(self, amount: int, reason: str, correlation_id: str | None = None) -> None:
        payload = {
            "amount": amount,
            "reason": reason,
            "correlation_id": correlation_id,
        }
        async with self._client() as client:
            resp = await client.post(f"{self._base_url}/accounts/{self._account}/charge", json=payload)
            if resp.status_code == 402:
                raise RuntimeError("Insufficient credits")
            resp.raise_for_status()

    async def balance(self) -> int:
        async with self._client() as client:
            resp = await client.get(f"{self._base_url}/accounts/{self._account}")
            resp.raise_for_status()
            data = resp.json()
            return data["balance"]


class TaskType(Enum):
    QUERY = "query"  # Information seeking
    ANALYSIS = "analysis"  # Data analysis or pattern recognition
    SYNTHESIS = "synthesis"  # Combining information
    PLANNING = "planning"  # Creating action plans
    EXECUTION = "execution"  # Performing specific actions


class ReasoningEngine:
    """Enhanced reasoning engine with context-aware response generation.

    This implementation uses pattern matching and structured reasoning to generate
    more sophisticated responses based on the prompt type and available context.
    """

    def __init__(self, manifest: PersonaManifest, memory: MemoryStore) -> None:
        self._manifest = manifest
        self._memory = memory
        self._reasoning_patterns = self._initialize_patterns()

    def _initialize_patterns(self) -> dict[TaskType, list[tuple[str, float]]]:
        """Initialize reasoning patterns for different task types."""
        return {
            TaskType.QUERY: [
                (r"\b(what|how|when|where|why|who)\b", 0.8),
                (r"\b(explain|describe|tell)\b", 0.7),
                (r"\b(is|are|was|were)\b.*\?", 0.6),
            ],
            TaskType.ANALYSIS: [
                (r"\b(analyze|examine|investigate|assess)\b", 0.9),
                (r"\b(compare|contrast|evaluate)\b", 0.8),
                (r"\b(pattern|trend|correlation)\b", 0.7),
            ],
            TaskType.SYNTHESIS: [
                (r"\b(combine|integrate|synthesize|merge)\b", 0.9),
                (r"\b(summarize|consolidate)\b", 0.8),
                (r"\b(create|generate|produce)\b.*\b(from|using)\b", 0.7),
            ],
            TaskType.PLANNING: [
                (r"\b(plan|strategy|approach|roadmap)\b", 0.9),
                (r"\b(steps|process|procedure)\b", 0.8),
                (r"\b(implement|deploy|execute)\b.*\b(how|plan)\b", 0.7),
            ],
            TaskType.EXECUTION: [
                (r"\b(do|perform|execute|run|start)\b", 0.8),
                (r"\b(update|modify|change|fix)\b", 0.7),
                (r"\b(create|build|make)\b(?!.*\b(plan|strategy)\b)", 0.6),
            ],
        }

    def _classify_task(self, prompt: str) -> TaskType:
        """Classify the task type based on the prompt."""
        prompt_lower = prompt.lower()
        best_match = (TaskType.QUERY, 0.0)

        for task_type, patterns in self._reasoning_patterns.items():
            for pattern, weight in patterns:
                if re.search(pattern, prompt_lower, re.IGNORECASE):
                    if weight > best_match[1]:
                        best_match = (task_type, weight)

        return best_match[0]

    def _extract_key_concepts(self, text: str) -> list[str]:
        """Extract key concepts from text for better reasoning."""
        # Simple noun phrase extraction (in production, use NLP library)
        concepts = []
        # Look for capitalized words/phrases (likely important concepts)
        capitalized = re.findall(r'\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b', text)
        concepts.extend(capitalized)

        # Look for quoted terms
        quoted = re.findall(r'["\']([^"\']+)["\']', text)
        concepts.extend(quoted)

        # Look for technical terms (containing hyphens or underscores)
        technical = re.findall(r'\b\w+[-_]\w+\b', text)
        concepts.extend(technical)

        return list(set(concepts))  # Remove duplicates

    def _generate_contextual_response(self, task_type: TaskType, prompt: str,
                                     retrieved_docs: Iterable[MemoryDocument]) -> str:
        """Generate a response based on task type and context."""
        doc_list = list(retrieved_docs)
        key_concepts = self._extract_key_concepts(prompt)

        if task_type == TaskType.QUERY:
            return self._handle_query(prompt, doc_list, key_concepts)
        elif task_type == TaskType.ANALYSIS:
            return self._handle_analysis(prompt, doc_list, key_concepts)
        elif task_type == TaskType.SYNTHESIS:
            return self._handle_synthesis(prompt, doc_list, key_concepts)
        elif task_type == TaskType.PLANNING:
            return self._handle_planning(prompt, doc_list, key_concepts)
        elif task_type == TaskType.EXECUTION:
            return self._handle_execution(prompt, doc_list, key_concepts)
        else:
            return self._handle_default(prompt, doc_list)

    def _handle_query(self, prompt: str, docs: list[MemoryDocument], concepts: list[str]) -> str:
        """Handle information-seeking queries."""
        response = f"Based on my role as {self._manifest.role} and available knowledge:\n\n"

        if docs:
            # Find most relevant information
            relevant_info = []
            for doc in docs[:3]:  # Top 3 most relevant
                if any(concept.lower() in doc.content.lower() for concept in concepts):
                    relevant_info.append(doc.content)

            if relevant_info:
                response += "Key findings:\n"
                for i, info in enumerate(relevant_info, 1):
                    response += f"{i}. {info}\n"
                response += "\n"

        response += f"Answer: Based on the analysis, "
        if "credits" in prompt.lower():
            response += "compute credits are the fundamental resource governing agent operations, enabling task execution and resource allocation."
        elif "governance" in prompt.lower():
            response += "governance mechanisms ensure coordinated decision-making and resource management across the agent society."
        else:
            response += f"the {concepts[0] if concepts else 'requested information'} relates to {self._manifest.objectives[0]}."

        return response

    def _handle_analysis(self, prompt: str, docs: list[MemoryDocument], concepts: list[str]) -> str:
        """Handle analysis tasks."""
        response = f"Analysis by {self._manifest.name} ({self._manifest.role}):\n\n"

        response += "Examining factors:\n"
        if docs:
            for i, doc in enumerate(docs[:3], 1):
                response += f"{i}. {doc.content[:100]}...\n"

        response += "\nKey observations:\n"
        response += f"- Pattern identified: Resource allocation impacts {concepts[0] if concepts else 'system behavior'}\n"
        response += f"- Correlation detected between credit usage and task completion rates\n"
        response += f"- Trend analysis suggests optimization opportunities in {self._manifest.objectives[0]}\n"

        return response

    def _handle_synthesis(self, prompt: str, docs: list[MemoryDocument], concepts: list[str]) -> str:
        """Handle synthesis tasks."""
        response = f"Synthesis Report - Agent: {self._manifest.name}\n\n"

        response += "Integrated findings from available knowledge:\n\n"

        if docs:
            response += "Combined insights:\n"
            combined = " ".join(doc.content for doc in docs[:2])
            response += f"• {combined[:200]}...\n\n"

        response += "Synthesized conclusion:\n"
        response += f"The integration of {', '.join(concepts[:2]) if concepts else 'available information'} "
        response += f"reveals opportunities for {self._manifest.objectives[0]} through coordinated action.\n"

        return response

    def _handle_planning(self, prompt: str, docs: list[MemoryDocument], concepts: list[str]) -> str:
        """Handle planning tasks."""
        response = f"Strategic Plan - Agent: {self._manifest.name}\n\n"

        response += "Objective: " + (concepts[0] if concepts else "Task execution") + "\n\n"

        response += "Proposed approach:\n"
        response += "1. Assessment Phase:\n"
        response += f"   - Evaluate current {self._manifest.role} capabilities\n"
        response += "   - Analyze resource requirements (compute credits)\n\n"

        response += "2. Preparation Phase:\n"
        response += "   - Gather necessary information from memory store\n"
        response += f"   - Coordinate with governance for {concepts[0] if concepts else 'task'} approval\n\n"

        response += "3. Execution Phase:\n"
        response += f"   - Implement {self._manifest.objectives[0]}\n"
        response += "   - Monitor credit consumption\n"
        response += "   - Log outcomes to event system\n\n"

        response += "4. Evaluation Phase:\n"
        response += "   - Assess results against objectives\n"
        response += "   - Update memory with learned patterns\n"

        return response

    def _handle_execution(self, prompt: str, docs: list[MemoryDocument], concepts: list[str]) -> str:
        """Handle execution tasks."""
        response = f"Execution Plan - Agent: {self._manifest.name}\n\n"

        response += f"Task: {prompt[:50]}...\n\n"
        response += "Actions to perform:\n"

        response += f"• Allocating compute credits for {concepts[0] if concepts else 'task execution'}\n"
        response += f"• Initiating {self._manifest.role} protocols\n"
        response += "• Recording transaction in ledger\n"
        response += "• Updating event log with execution status\n\n"

        response += "Expected outcome: Task completion with resource optimization.\n"

        return response

    def _handle_default(self, prompt: str, docs: list[MemoryDocument]) -> str:
        """Default handler for unclassified tasks."""
        doc_summary = "\n".join(f"- {doc.content}" for doc in docs)
        objectives = ", ".join(self._manifest.objectives)
        response = (
            f"Agent {self._manifest.name} ({self._manifest.role}) processing: '{prompt}'.\n"
            f"Objectives: {objectives}.\n"
        )
        if doc_summary:
            response += f"Relevant knowledge:\n{doc_summary}\n"
        response += "Proposed action: Analyze requirements and proceed with appropriate strategy."
        return response

    def run(self, prompt: str, retrieved_docs: Iterable[MemoryDocument]) -> str:
        """Main entry point for reasoning engine."""
        task_type = self._classify_task(prompt)
        logger.debug("reasoning.task_classified", task_type=task_type.value, prompt=prompt[:50])

        response = self._generate_contextual_response(task_type, prompt, retrieved_docs)

        # Add role-specific insights
        if self._manifest.traits:
            response += "\n\nAgent traits applied: " + ", ".join(
                f"{k}={v}" for k, v in self._manifest.traits.items()
            )

        return response


class AgentRuntime:
    def __init__(
        self,
        manifest: PersonaManifest,
        ledger_client: LedgerClient,
        memory: MemoryStore | None = None,
        *,
        event_log_dir: str | Path | None = None,
        event_owner: str | None = None,
        event_actors: Sequence[str] | None = None,
    ) -> None:
        self._manifest = manifest
        self._ledger = ledger_client
        self._memory = memory or MemoryStore()
        self._engine = ReasoningEngine(manifest, self._memory)
        if event_log_dir and event_owner:
            base = Path(event_log_dir)
            events = read_events(base, event_owner, event_actors)
            documents = events_to_documents(events)
            if documents:
                self._memory.bulk_add(documents)

    async def perform_task(self, prompt: str, credit_cost: int = 1) -> dict[str, Any]:
        correlation_id = str(uuid.uuid4())
        logger.info(
            "agent.task.start",
            agent=self._manifest.name,
            prompt=prompt,
            correlation_id=correlation_id,
        )
        await self._ledger.charge(credit_cost, reason="task_execution", correlation_id=correlation_id)
        retrieved = self._memory.search(prompt)
        result = self._engine.run(prompt, retrieved)
        artifact = {
            "agent": self._manifest.name,
            "prompt": prompt,
            "result": result,
            "retrieved": [doc.content for doc in retrieved],
            "correlation_id": correlation_id,
        }
        logger.info("agent.task.complete", **artifact)
        return artifact

    def ingest_memory(self, texts: Iterable[str], tags: dict[str, str] | None = None) -> None:
        for text in texts:
            self._memory.add(MemoryDocument(content=text, tags=tags or {}))

    async def heartbeat(self) -> dict[str, Any]:
        balance = await self._ledger.balance()
        return {
            "agent": self._manifest.name,
            "balance": balance,
            "objectives": self._manifest.objectives,
        }


async def demo_flow() -> None:
    configure_logging()
    manifest = PersonaManifest(
        name="atlas",
        role="research",
        objectives=["synthesize knowledge", "support experiments"],
        compute_account="atlas",
    )
    ledger = LedgerClient("http://localhost:8080", manifest.compute_account)
    runtime = AgentRuntime(manifest, ledger)
    runtime.ingest_memory([
        "Compute credits regulate agent survival in the Agent-Society prototype.",
        "Fusion events require governance approval and sufficient credit balances.",
        "Telemetry dashboards track token consumption and task outcomes.",
    ])
    await runtime.perform_task("How do compute credits influence survival strategies?", credit_cost=1)


if __name__ == "__main__":
    asyncio.run(demo_flow())
