from ast import List
import os
import json
import numpy as np
from .memoryitem import MemoryItem
from rank_bm25 import BM25Okapi
from typing import List, Dict, Optional
from sklearn.feature_extraction.text import TfidfVectorizer
from memory.importance import ImportanceCalculator

# 动态导入 faiss
try:
    import faiss
    FAISS_AVAILABLE = True
except ImportError:
    FAISS_AVAILABLE = False
    print("警告: faiss 模块未安装，部分功能可能受限。")


class AgentMemory:
    def __init__(self, agent_id: str):
        self.agent_id = agent_id
        self.sensory_memory: List[str] = []
        self.short_term_memory: List[MemoryItem] = []
        self.long_term_memory: List[MemoryItem] = []
        self.embedding_dim = 364
        self.bm25: Optional[BM25Okapi] = None
        self.importance_calc = ImportanceCalculator()
        self.vectorizer = TfidfVectorizer(max_features=self.embedding_dim)

        # 初始化 faiss 索引（如果可用）
        if FAISS_AVAILABLE:
            self.faiss_index = faiss.IndexFlatL2(self.embedding_dim)
        else:
            self.faiss_index = None
            print("警告: faiss 不可用，将使用 BM25 作为主要检索方式。")

        self._init_bm25()

        
    def loadMemory(self):
        json_file_path = f"{self.agent_id}_memory.json"
        vector_file_path = f"{self.agent_id}_faiss.index"
        if os.path.exists(json_file_path):
            with open(json_file_path, "r") as f:
                data = json.load(f)
                self.short_term_memory = [MemoryItem.from_dict(item) for item in data.get("short_term", [])]
                self.long_term_memory = [MemoryItem.from_dict(item) for item in data.get("long_term", [])]
        if FAISS_AVAILABLE and os.path.exists(vector_file_path):
            self.faiss_index = faiss.read_index(vector_file_path)


    def saveMemory(self):
        data = {
            "short_term": [item.toDict() for item in self.short_term_memory],
            "long_term": [item.toDict() for item in self.long_term_memory]
        }
        json_file_path = f"{self.agent_id}_memory.json"
        vector_file_path = f"{self.agent_id}_faiss.index"
        with open(json_file_path, 'w') as f:
            json.dump(data, f)
        if FAISS_AVAILABLE and self.faiss_index is not None:
            faiss.write_index(self.faiss_index, vector_file_path)

    def addMemory(self, text:str, recent_messages:List[str], keywords:Optional[List[str]] = None):
        self.sensory_memory.append(text)
        if len(self.sensory_memory) > 10:
            self.sensory_memory.pop(0)
        item = MemoryItem(text)
        self.short_term_memory.append(item)
        if len(self.short_term_memory) > 100:
            self.short_term_memory.pop(0)
        item.importance = self.importance_calc.calculate(text, recent_messages, self.long_term_memory, keywords)
        if item.importance > 2.0:
            self.long_term_memory.append(item)
        self._updateIndices()

       
    def _init_bm25(self):
        texts = [m.text for m in self.short_term_memory + self.long_term_memory]
        tokenized_texts = [text.split() for text in texts]
        if not texts:
            return
        self.bm25 = BM25Okapi(tokenized_texts)

    def _updateIndices(self):
        texts = [m.text for m in self.short_term_memory + self.long_term_memory]
        vectors = self.vectorizer.fit_transform(texts).toarray().astype('float32')
        if FAISS_AVAILABLE:
            if vectors.shape[1] != self.faiss_index.d:
                self.faiss_index = faiss.IndexFlatL2(vectors.shape[1])
            self.faiss_index.reset()
            self.faiss_index.add(vectors)
        self._init_bm25()


    def generateSummary(text:str, top_n : int = 2) ->str:
        sentences = [s for s in text.split("。") if s.strip()]
        if len(sentences) <= 1:
            return text        
        vectorizer = TfidfVectorizer()
        x = vectorizer.fit_transform(sentences)
        scores = np.array(x.sum(axis=1)).flatten()
        top_indices = np.argsort(scores)[-top_n:][::-1]
        return '。'.join(sentences[i] for i in sorted(top_indices)) + '。'

    def hybridSearch(self, query: str, k: int = 5) -> List[str]:
        tokenized_query = query.split()
        if not self.bm25:
            return []
        bm25_scores = self.bm25.get_scores(tokenized_query)
        bm25_indices = np.argsort(bm25_scores)[-k:][::-1]

        if FAISS_AVAILABLE and self.faiss_index is not None:
            query_vec = self.vectorizer.transform([query]).toarray().astype('float32')
            distances, indices = self.faiss_index.search(query_vec, k)
            faiss_indices_list = indices.flatten().tolist()
            combined_indices = list(set(bm25_indices).union(set(faiss_indices_list)))
        else:
            combined_indices = bm25_indices

        all_memories = self.short_term_memory + self.long_term_memory
        return [all_memories[i].text for i in combined_indices[:k]]
        
    def buildContext(self, query:str, k:int = 5) ->str:
        memories = self.hybridSearch(query, k)
        sorted_memories = sorted(memories, key=lambda x: (x["importance"], x["last_mentioned"]), reverse=True)
        filtered = [m for m in sorted_memories if m["importance"] > 0.5]
        context = "用户历史记忆\n"
        for i, mem in enumerate(filtered):
            summary = mem["text"]
            context += f"{i+1}. {summary}(重要性：{mem['importance']:.1f})\n"
        return context