import itertools
import logging
from datetime import datetime
from pathlib import Path
from typing import Mapping, Iterator

from chromadb import PersistentClient
from chromadb.config import Settings
from chromadb.utils.embedding_functions import SentenceTransformerEmbeddingFunction

from a_mem.memory_note import MemoryNote, serialize, serialize_to_document, deserialize_from_document
from a_mem.llm import evolve_llm, analyze_content
from .prompt import evolve_prompt


type meta = Mapping[str, str | float | int | bool | None]
logger = logging.getLogger(__name__)


class AgenticMemorySystem:
    def __init__(self, memory_storage_path: str):
        self.memory_storage_path = memory_storage_path
        self.chroma_client = PersistentClient(
            path=self.memory_storage_path,
            settings=Settings(
                allow_reset=True,
                anonymized_telemetry=False
            )
        )
        self.embedding_function = SentenceTransformerEmbeddingFunction(
            model_name="all-MiniLM-L6-v2",
            device="cuda"
        )
        self.collection = self.chroma_client.create_collection(
            name="memories",
            embedding_function=self.embedding_function,
            get_or_create=True
        )

    def add_note(self, content: str, time: str = ""):
        # note construction
        analysis = analyze_content(content)
        keywords, summary, tags = analysis.keywords, analysis.summary, analysis.tags
        if time:
            note = MemoryNote(
                content=content,
                keywords=keywords,
                summary=summary,
                tags=tags,
                timestamp=time
            )
        else:
            note = MemoryNote(
                content=content,
                keywords=keywords,
                summary=summary,
                tags=tags
            )

        # link generation and note evolution
        note = self._process_memory(note)
        logger.info(f"Note added: {note}")
        ids, document, metadata = serialize(note)
        self.collection.add(ids=ids, documents=document, metadatas=metadata)

    def find_related_memories(self, query: str, k: int = 5) \
            -> tuple[list | list[str], list | list[str], list | list[str]]:
        result = self.collection.query(query_texts=query, n_results=k)
        indices, documents, metadatas = result["ids"][0], result["documents"][0], result["metadatas"][0]
        if len(indices) == 0:
            return [], [], []

        return indices, documents, metadatas

    def search(self, query: str, k: int = 5) -> tuple[list[str], list[str]]:
        top_k_documents = []
        top_k_datetime = []
        results = self.collection.query(query_texts=query, n_results=k)
        current_time = datetime.now().strftime("%Y%m%d%H%M%S")
        indices, documents, metadatas = results["ids"][0], results["documents"][0], results["metadatas"][0]
        if len(indices) == 0:
            return [], []

        self._update_metadatas(current_time, indices, metadatas)
        for document, document_datetime in self._top_k_documents(documents, metadatas):
            top_k_documents.append(document)
            top_k_datetime.append(document_datetime)
            if len(top_k_documents) == k:
                break
        return top_k_documents, top_k_datetime

    def clear(self):
        if Path(self.memory_storage_path).exists():
            self.chroma_client.reset()
            self.collection = self.chroma_client.create_collection(name="memories",
                                                                   embedding_function=self.embedding_function,
                                                                   get_or_create=True)

    def _process_memory(self, note: MemoryNote) -> MemoryNote:
        query = serialize_to_document(note)
        indices, documents, metadatas = self.find_related_memories(query)

        documents_str = [ d.replace("\n", "\t", 3) for d in documents ]
        document_str = "\n".join([ f"id: {id}\t{d}" for id, d in zip(indices, documents_str)])

        if indices:
            return note

        prompt = evolve_prompt.format(
            summary=note.summary,
            content=note.content,
            keywords=note.keywords,
            nearest_neighbors_memories=document_str,
            neighbor_number=len(indices)
        )
        evolution = evolve_llm.invoke(prompt)

        if evolution.should_evolve:
            for action in evolution.actions:
                   if action == "strengthen":
                       suggested_connections = evolution.suggested_connections
                       new_tags = evolution.tags_to_update
                       note = note.model_copy(update={
                           "links": note.links + suggested_connections,
                           "tags": new_tags
                       })
                   elif action == "update_neighbor":
                       new_summary_neighborhood = evolution.new_summary_neighborhood
                       new_tags_neighborhood = evolution.new_tags_neighborhood
                       new_documents = []
                       for document, new_summary, new_tag \
                               in zip(documents, new_summary_neighborhood, new_tags_neighborhood):
                           document_dict = deserialize_from_document(document)
                           document_dict["summary"] = new_summary
                           document_dict["tags"] = new_tag
                           new_documents.append(serialize_to_document(document_dict))

                       # prevent embeddings from not being updated
                       self.collection.update(
                           ids=indices,
                           embeddings=self.embedding_function(new_documents),
                           documents=new_documents,
                           metadatas=metadatas
                       )

        return note

    def _update_metadatas(self, current_time: str, indices: list[str], metadatas: list[meta]) -> None:
        for metadata in metadatas:
            metadata["last_accessed"] = current_time
            metadata["retrieval_count"] += 1

        self.collection.update(
            ids=indices,
            metadatas=metadatas
        )

    def _get_links(self, ids: list[str]) -> tuple[list[str] | list, list[meta]]:
        results = self.collection.get(ids=ids)
        current_time = datetime.now().strftime("%Y%m%d%H%M%S")
        indices, documents, metadatas = results["ids"], results["documents"], results["metadatas"]

        if len(indices) == 0:
            return [], []

        self._update_metadatas(current_time, indices, metadatas)

        return documents, metadatas

    def _top_k_documents(
            self,
            documents: list[str],
            metadatas: list[meta]
    ) -> Iterator[tuple[str, str]]:
        top_k_documents = set()
        for document, metadata in zip(documents, metadatas):
            timestamp = metadata.get("timestamp")
            linked_documents, linked_metadatas = self._get_links(metadata.get("links", []))
            for candidate, candidate_timestamp in itertools.chain(
                [(document, timestamp)],
                zip(linked_documents,
                    (linked_metadata.get("timestamp")
                     for linked_metadata in linked_metadatas))
            ):
                if candidate not in top_k_documents:
                    top_k_documents.add(candidate)
                    yield candidate, candidate_timestamp
