#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
最小可运行示例：为“智能体”对话添加短期记忆（STM）与简单向量库长期记忆（LTM）。
特点：零外部依赖，仅用标准库。
运行：
  python agent_memory_min_cn.py

示例展示：
- 短期记忆：按“词数近似 token 预算”保留最近回合，超限触发简要“摘要”，并裁剪旧消息。
- 长期记忆：用“哈希向量 + 余弦相似度”的极简实现做相似检索，支持 JSON 持久化。
- 写入策略：基于“新颖度 + 提示关键词（如：记住/偏好）”的 salience 打分决定是否写入 LTM。
- 检索融合：查询会拼上 STM 摘要后检索 LTM，返回命中片段与分数，再与 STM 一起组装上下文。
- 响应策略：为了无依赖可跑，演示使用模板响应（可替换为任意 LLM 调用）。
"""

from __future__ import annotations
from dataclasses import dataclass, field
from typing import List, Dict, Any, Tuple, Optional
import re
import math
import time
import json
import os
from collections import defaultdict, deque
from heapq import nlargest

# -----------------------------
# 工具函数
# -----------------------------

def now_ts() -> float:
    return time.time()

def tokenize(text: str) -> List[str]:
    # 极简分词：小写化，只保留字母/数字/中日韩统一表意文字
    return re.findall(r"[a-zA-Z0-9\u4e00-\u9fff]+", text.lower())

def cosine(a: List[float], b: List[float]) -> float:
    # 余弦相似度
    dot = 0.0
    na = 0.0
    nb = 0.0
    for x, y in zip(a, b):
        dot += x * y
        na += x * x
        nb += y * y
    if na == 0.0 or nb == 0.0:
        return 0.0
    return dot / math.sqrt(na * nb)

# -----------------------------
# 哈希向量嵌入（无依赖）
# -----------------------------

class HashingEmbedding:
    """极简嵌入器：哈希技巧的词袋向量。"""
    def __init__(self, dim: int = 512, seed: int = 1315423911):
        self.dim = dim
        self.seed = seed

    def _hash(self, token: str) -> int:
        # 简化版字符串哈希
        h = self.seed
        for ch in token:
            h ^= ((h << 5) + ord(ch) + (h >> 2)) & 0xFFFFFFFF
        return h & 0x7FFFFFFF

    def embed(self, text: str) -> List[float]:
        vec = [0.0] * self.dim
        toks = tokenize(text)
        if not toks:
            return vec
        for t in toks:
            idx = self._hash(t) % self.dim
            vec[idx] += 1.0
        # L2 归一化
        norm = math.sqrt(sum(v * v for v in vec)) or 1.0
        return [v / norm for v in vec]

# -----------------------------
# 向量库（内存版）
# -----------------------------

@dataclass
class VSItem:
    id: str
    text: str
    meta: Dict[str, Any]
    vec: List[float]
    ts: float = field(default_factory=now_ts)

class SimpleVectorStore:
    def __init__(self, embedder: HashingEmbedding):
        self.embedder = embedder
        self.items: Dict[str, VSItem] = {}
        self._id_counter = 0

    def add(self, text: str, meta: Optional[Dict[str, Any]] = None) -> str:
        if meta is None: meta = {}
        self._id_counter += 1
        _id = f"vs_{self._id_counter}"
        vec = self.embedder.embed(text)
        self.items[_id] = VSItem(id=_id, text=text, meta=meta, vec=vec)
        return _id

    def delete(self, _id: str) -> bool:
        return self.items.pop(_id, None) is not None

    def search(self, query: str, k: int = 5) -> List[Tuple[VSItem, float]]:
        qv = self.embedder.embed(query)
        scored = ((it, cosine(qv, it.vec)) for it in self.items.values())
        topk = nlargest(k, scored, key=lambda p: p[1])
        return topk

    # 可选：持久化为 JSON（仅示例用途）
    def save(self, path: str) -> None:
        data = [
            {"id": it.id, "text": it.text, "meta": it.meta, "vec": it.vec, "ts": it.ts}
            for it in self.items.values()
        ]
        with open(path, "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

    def load(self, path: str) -> None:
        if not os.path.exists(path):
            return
        with open(path, "r", encoding="utf-8") as f:
            data = json.load(f)
        self.items.clear()
        for obj in data:
            self.items[obj["id"]] = VSItem(
                id=obj["id"],
                text=obj["text"],
                meta=obj.get("meta", {}),
                vec=obj["vec"],
                ts=obj.get("ts", now_ts())
            )
        # 重置自增计数器
        max_id = 0
        for key in self.items.keys():
            try:
                n = int(key.split("_")[-1])
                max_id = max(max_id, n)
            except:
                pass
        self._id_counter = max_id

# -----------------------------
# 短期记忆（STM）
# -----------------------------

@dataclass
class Message:
    role: str   # "user" / "assistant" / "system"
    content: str
    ts: float = field(default_factory=now_ts)

class ShortTermMemory:
    """滚动窗口 + 词数预算（词数近似 token 预算）。"""
    def __init__(self, max_turns: int = 12, max_words: int = 800):
        self.turns: deque[Message] = deque()
        self.max_turns = max_turns
        self.max_words = max_words
        self._summary: Optional[str] = None

    def append(self, role: str, content: str):
        self.turns.append(Message(role=role, content=content))
        while len(self.turns) > self.max_turns:
            self.turns.popleft()
        self._trim_if_needed()

    def _trim_if_needed(self):
        # 用词数控制上下文体积
        text = self.render(include_summary=False)
        words = tokenize(text)
        if len(words) > self.max_words:
            # 极简“摘要”：取最近若干回合的首句，拼接成一行
            candidate = self._naive_summary()
            self._summary = candidate
            # 裁旧：丢弃一半最早的回合
            half = len(self.turns) // 2
            for _ in range(half):
                self.turns.popleft()

    def _naive_summary(self) -> str:
        # 取最近 N 条消息的第一句，合并为“对话摘要”
        N = min(6, len(self.turns))
        last = list(self.turns)[-N:]
        sents = []
        for m in last:
            s0 = re.split(r"[。.!?！？]", m.content.strip())
            if s0 and s0[0]:
                sents.append(f"{m.role}: {s0[0]}")
        return " | ".join(sents[:8])

    def render(self, include_summary: bool = True) -> str:
        chunks = []
        if include_summary and self._summary:
            chunks.append(f"[STM-SUMMARY] {self._summary}")
        for m in self.turns:
            chunks.append(f"{m.role}: {m.content}")
        return "\n".join(chunks)

    @property
    def summary(self) -> str:
        return self._summary or ""

# -----------------------------
# 长期记忆（LTM）写入策略
# -----------------------------

def estimate_novelty(vecstore: SimpleVectorStore, text: str) -> float:
    # 估计“新颖度”：1 - 与库内最高相似度（范围 0..1）
    if not vecstore.items:
        return 1.0
    top1 = vecstore.search(text, k=1)
    best = top1[0][1] if top1 else 0.0
    return max(0.0, 1.0 - best)

def important_hint(text: str) -> float:
    # 简单关键词提示：出现这些词则更可能需要被“记住”
    hints = [
        "记住", "我的偏好", "我喜欢", "不要忘", "prefer", "i like", "my preference", "remember"
    ]
    t = text.lower()
    return 1.0 if any(h in t for h in hints) else 0.0

def compute_salience(vecstore: SimpleVectorStore, user_msg: str, agent_msg: str) -> float:
    # 极简 salience：新颖度 + 提示词（取加权和并裁剪到 [0,1]）
    novelty = estimate_novelty(vecstore, user_msg)
    hint = important_hint(user_msg)
    sal = 0.7 * novelty + 0.3 * hint
    return max(0.0, min(1.0, sal))

# -----------------------------
# 记忆管理器（粘合 STM + LTM）
# -----------------------------

class MemoryManager:
    def __init__(self, embedder: HashingEmbedding, vecstore: SimpleVectorStore,
                 stm_max_turns: int = 12, stm_max_words: int = 800):
        self.embedder = embedder
        self.vs = vecstore
        self.stm = ShortTermMemory(max_turns=stm_max_turns, max_words=stm_max_words)

    def observe_user(self, text: str):
        self.stm.append("user", text)

    def observe_agent(self, text: str):
        self.stm.append("assistant", text)

    def maybe_persist(self, user_msg: str, agent_msg: str, threshold: float = 0.55) -> Optional[str]:
        sal = compute_salience(self.vs, user_msg, agent_msg)
        if sal >= threshold:
            meta = {"source": "chat", "salience": sal, "ts": now_ts()}
            _id = self.vs.add(text=user_msg, meta=meta)
            return _id
        return None

    def retrieve_context(self, query: str, k: int = 4) -> Dict[str, Any]:
        # 将 STM 摘要拼入检索查询中，有助于 LTM 命中
        q = query
        if self.stm.summary:
            q = f"{query} || {self.stm.summary}"
        hits = self.vs.search(q, k=k)
        results = [{"id": it.id, "text": it.text, "score": float(score), "meta": it.meta} for it, score in hits if score > 0.05]
        return {
            "stm": self.stm.render(),
            "ltm_hits": results
        }

# -----------------------------
# 极简“Agent”——用记忆增强的模板式回复（可替换为 LLM）
# -----------------------------

def agent_reply(query: str, context: Dict[str, Any]) -> str:
    """演示用：根据 LTM 命中与 STM 文本拼接出更贴合用户偏好的回复。
    真实系统中可改为调用任意 LLM。
    """
    ltm = context.get("ltm_hits", [])
    stm_text = context.get("stm", "")
    # 简单规则：如果 LTM 出现偏好/忌口，则在答案中体现
    likes = []
    dislikes = []
    for h in ltm:
        t = h["text"]
        if any(k in t for k in ["我喜欢", "i like", "my preference"]):
            likes.append(t)
        if any(k in t for k in ["不喜欢", "不要", "dislike"]):
            dislikes.append(t)

    parts = [f"你问：{query}"]
    if likes or dislikes:
        parts.append("我记得你的偏好：")
        if likes:
            parts.append(" - 偏好：" + "；".join(likes[:2]))
        if dislikes:
            parts.append(" - 忌口：" + "；".join(dislikes[:2]))

    # 若用户问饮料/喝的，使用“偏好/忌口”给出建议
    if "饮料" in query or "喝" in query:
        if any("不喜欢太甜" in x for x in dislikes) or "不喜欢太甜" in stm_text:
            parts.append("建议：乌龙茶/无糖绿茶/美式咖啡，避免含糖饮品。")
        elif any("乌龙茶" in x for x in likes):
            parts.append("建议：乌龙茶或半糖青茶。")
        else:
            parts.append("建议：尝试清爽的茶饮或黑咖啡。")
    elif "昨天" in query and ("喜欢什么茶" in query or "偏好" in query):
        # 模拟“回忆”
        if any("乌龙茶" in x for x in likes):
            parts.append("你说过喜欢乌龙茶。")
        else:
            parts.append("我还没记录到具体的茶类偏好。")
    else:
        parts.append("（如需更智能回答，可用 LLM 替换本函数。）")

    # 透明化：展示前 2 条命中记忆与分数
    if ltm:
        top2 = ltm[:2]
        parts.append("参考记忆：")
        for h in top2:
            parts.append(f"  • [{h['score']:.2f}] {h['text']}")

    return "\n".join(parts)

# -----------------------------
# 演示对话
# -----------------------------

def run_demo(save_path: Optional[str] = None):
    print("== 最小可运行的记忆增强 Agent 演示 ==")
    embedder = HashingEmbedding(dim=512)
    vs = SimpleVectorStore(embedder)
    mm = MemoryManager(embedder, vs, stm_max_turns=10, stm_max_words=200)

    # 如已存在历史 LTM，可加载
    if save_path and os.path.exists(save_path):
        vs.load(save_path)
        print(f"[LTM] 已加载：{save_path}，条目数：{len(vs.items)}")

    # 演示脚本
    dialogue = [
        ("user", "你好！"),
        ("assistant", "你好，我是你的智能助理。"),
        ("user", "我喜欢乌龙茶，不喜欢太甜的饮料，请记住。"),
        ("assistant", "好的，我已记录你的饮品偏好。"),
        ("user", "推荐一杯饮料？"),
        ("assistant", None),  # 由程序生成
        ("user", "昨天我说我喜欢什么茶？"),
        ("assistant", None),
        ("user", "给个不含糖的下午茶建议。"),
        ("assistant", None),
    ]

    for role, content in dialogue:
        if role == "user":
            print(f"\n[user] {content}")
            mm.observe_user(content)
        else:
            if content is None:
                # 组装上下文 -> 生成回复 -> 更新 STM -> 视情况写入 LTM
                q = mm.stm.turns[-1].content if mm.stm.turns else ""
                ctx = mm.retrieve_context(q, k=4)
                ans = agent_reply(q, ctx)
                print(f"[assistant]\n{ans}")
                mm.observe_agent(ans)
                # 把上一条用户消息视作候选记忆
                user_msg = mm.stm.turns[-2].content if len(mm.stm.turns) >= 2 else ""
                persisted = mm.maybe_persist(user_msg, ans, threshold=0.55)
                if persisted:
                    print(f"[LTM] 已写入 id={persisted}")
            else:
                print(f"\n[assistant] {content}")
                mm.observe_agent(content)

    # 保存 LTM（可选）
    if save_path:
        vs.save(save_path)
        print(f"\n[LTM] 已保存到 {save_path}（当前条目数={len(vs.items)}）")

if __name__ == "__main__":
    # 默认把向量库保存到当前目录的 JSON 文件
    run_demo(save_path="vectorstore_demo.json")
