﻿from __future__ import annotations

from typing import Any, AsyncIterator, Iterable

import structlog
from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from pathlib import Path

from app.core.config import settings

try:
  from langchain_community.chat_models.tongyi import ChatTongyi
except ImportError:  # pragma: no cover
  ChatTongyi = None

logger = structlog.get_logger('love_notes.langchain')


class LoveNotesAI:
  def __init__(self) -> None:
    self.default_provider = settings.ai_provider
    self._llm_cache: dict[str, Any] = {}
    self._prompts_dir = Path(__file__).resolve().parent / 'prompts'

    def _load(name: str, default_text: str) -> str:
      p = self._prompts_dir / name
      try:
        return p.read_text(encoding='utf-8')
      except FileNotFoundError:
        return default_text

    def _esc(text: str) -> str:
      # Escape braces so ChatPromptTemplate doesn't treat JSON braces as variables
      return text.replace('{', '{{').replace('}', '}}')

    self.summary_prompt = ChatPromptTemplate.from_messages(
      [
        (
          'system',
          _esc(_load('diary_summary_system.md', '你是一名恋爱关系记录教练，请以温柔积极的语气用不超过 80 字总结输入日记，并在末尾括号列出情绪标签。')),
        ),
        ('human', '日记内容：{content}\n心情：{mood}\n标签：{tags}\n请输出摘要：'),
      ]
    )
    self.suggestion_prompt = ChatPromptTemplate.from_messages(
      [
        (
          'system',
          _esc(_load('suggestion_system.md', '你是一名贴心的情侣生活助理，根据当前上下文给出具体、可执行且不超过 60 字的下一步建议。')),
        ),
        ('human', '上下文：{context}\n当前心情：{mood}\n关键词：{tags}\n请给出一个建议：'),
      ]
    )
    self.chat_prompt = ChatPromptTemplate.from_messages(
      [
        (
          'system',
          _esc(_load('chat_system_prompt.md', '你是一名长期陪伴情侣的情感教练，会记住既往互动，总结偏好，提供具有共情且具体的建议，请使用中文回答。')),
        ),
        ('placeholder', '{history}'),
        ('human', '{message}'),
      ]
    )
    self.history_summary_prompt = ChatPromptTemplate.from_messages(
      [
        (
          'system',
          _esc(_load('history_summary_system.md', '你是恋爱关系档案员，将过去的对话凝练成不超过 120 字的摘要，保留双方偏好、语气和约定。')),
        ),
        ('human', '已有摘要：{previous}\n新增对话：\n{conversation}\n请输出新的摘要：'),
      ]
    )
    self.output_parser = StrOutputParser()
    self.pref_extract_prompt = ChatPromptTemplate.from_messages(
      [
        (
          'system',
          _esc(_load(
            'pref_extract_system.md',
            '你是“稳定偏好画像”整理助手。仅提取明确而长期有效的偏好（喜好/禁忌/礼物/宠物/口味等），忽略临时情绪与模糊表述。\n'
            '输出严格 JSON（UTF-8，无注释、无多余文字）。\n'
            '字段：{"likes":[],"dislikes":[],"topics":[],"tone_preference":[],"activities":[],"food":[],"pets":[],"gifts":[],"travel":[],"schedule":[],"boundaries":[],"nicknames":[],"other":[],"food_detail":{},"gifts_detail":{},"boundaries_detail":[]}\n'
            '规则：1) 追加不重复条目；2) 处理否定与冲突，最近表达优先；3) 值用精简中文短语；4) 只保留重要信息。',
          )),
        ),
        (
          'human',
          '已有画像：\n{previous}\n\n对话片段（角色:内容，每行一条，按时间排序）：\n{conversation}\n\n只输出合并后的 JSON：',
        ),
      ]
    )
    self.memory_extract_prompt = ChatPromptTemplate.from_messages(
      [
        (
          'system',
          _esc(_load('memory_extract_system.md', '请按要求输出 JSON')),
        ),
        (
          'human',
          '已有记忆：\n{previous}\n\n对话片段（角色:内容，每行一条，按时间排序）：\n{conversation}\n\n只输出合并后的 JSON：',
        ),
      ]
    )
    self.extract_prompt = ChatPromptTemplate.from_messages(
      [
        (
          'system',
          '从日记文本中提取“心情(单个中文词或短语)”与“标签(最多6个中文短语)”，用严格的 JSON 返回：{{"mood":"…","tags":["…"]}}。不要输出多余文字。',
        ),
        ('human', '日记内容：\n{content}\n若文本无明显线索，你可以给出合理的温和心情（如“平静”）与 1-3 个通用标签。'),
      ]
    )

  def _build_llm(self, provider: str) -> Any | None:
    temperature = settings.langchain_temperature
    if provider == 'openai':
      if not settings.openai_api_key:
        return None
      return ChatOpenAI(
        model=settings.langchain_model,
        api_key=settings.openai_api_key,
        temperature=temperature,
      )
    if provider == 'deepseek':
      if not settings.deepseek_api_key:
        return None
      return ChatOpenAI(
        model=settings.langchain_model,
        api_key=settings.deepseek_api_key,
        temperature=temperature,
        base_url=settings.deepseek_api_base,
      )
    if provider == 'dashscope':
      if not settings.dashscope_api_key or ChatTongyi is None:
        if ChatTongyi is None:
          logger.warning('DashScope model unavailable: please install tongyi extra package')
        return None
      return ChatTongyi(model_name=settings.dashscope_model, api_key=settings.dashscope_api_key, temperature=temperature)
    logger.warning('Unknown AI provider: %s', provider)
    return None

  def get_llm(self, provider: str | None = None) -> Any | None:
    provider_key = provider or self.default_provider
    if provider_key not in self._llm_cache:
      self._llm_cache[provider_key] = self._build_llm(provider_key)
      if not self._llm_cache[provider_key]:
        logger.warning('LangChain disabled: missing provider config', provider=provider_key)
    return self._llm_cache.get(provider_key)

  async def summarize_diary(
    self,
    *,
    content: str,
    mood: str | None,
    tags: list[str],
    provider: str | None = None,
  ) -> str | None:
    llm = self.get_llm(provider)
    if not llm:
      return None
    chain = self.summary_prompt | llm | self.output_parser
    result = await chain.ainvoke({'content': content, 'mood': mood or '未知', 'tags': ','.join(tags)})
    return result.strip()

  async def extract_mood_and_tags(
    self,
    *,
    content: str,
    provider: str | None = None,
  ) -> dict | None:
    llm = self.get_llm(provider)
    if not llm:
      return None
    chain = self.extract_prompt | llm | self.output_parser
    text = await chain.ainvoke({'content': content})
    try:
      data = text.strip()
      # Ensure valid JSON substring
      start = data.find('{')
      end = data.rfind('}')
      if start != -1 and end != -1:
        import json
        obj = json.loads(data[start : end + 1])
        mood = obj.get('mood')
        tags = obj.get('tags') or []
        if isinstance(mood, str) and isinstance(tags, list):
          tags = [str(t).strip() for t in tags if str(t).strip()]
          return {'mood': mood.strip(), 'tags': tags[:6]}
    except Exception:
      logger.warning('extract_mood_and_tags parse failed', raw=text)
    return None

  async def suggest_next_step(
    self,
    *,
    context: str,
    mood: str | None,
    tags: list[str],
    provider: str | None = None,
  ) -> str | None:
    llm = self.get_llm(provider)
    if not llm:
      return None
    chain = self.suggestion_prompt | llm | self.output_parser
    result = await chain.ainvoke(
      {
        'context': context,
        'mood': mood or '平静',
        'tags': ','.join(tags) if tags else '无',
      }
    )
    return result.strip()

  async def chat(
    self,
    *,
    history: list[dict[str, str]],
    message: str,
    provider: str | None = None,
    summary: str | None = None,
  ) -> str | None:
    llm = self.get_llm(provider)
    if not llm:
      return None
    messages = self._convert_history(history, summary)
    chain = self.chat_prompt | llm | self.output_parser
    result = await chain.ainvoke({'history': messages, 'message': message})
    return result.strip()

  async def chat_stream(
    self,
    *,
    history: list[dict[str, str]],
    message: str,
    provider: str | None = None,
    summary: str | None = None,
  ) -> AsyncIterator[str]:
    llm = self.get_llm(provider)
    if not llm:
      return

    messages = self._convert_history(history, summary)
    pipeline = self.chat_prompt | llm

    # Prefer native streaming if supported by the LLM expression.
    stream_method = getattr(pipeline, 'astream', None)
    if callable(stream_method):
      async for chunk in stream_method({'history': messages, 'message': message}):
        content: str | None = None
        if isinstance(chunk, AIMessageChunk):
          content = chunk.content
        elif isinstance(chunk, dict):
          content = chunk.get('content')
        elif isinstance(chunk, str):
          content = chunk
        if content:
          yield content
      return

    # Fallback to non-streaming mode.
    result = await self.chat(history=history, message=message, provider=provider, summary=summary)
    if result:
      yield result

  async def extract_and_merge_user_prefs(
    self,
    *,
    previous: dict | None,
    messages: Iterable[dict[str, str]],
    provider: str | None = None,
  ) -> dict | None:
    llm = self.get_llm(provider)
    # Fallback: 无 LLM 时启用启发式抽取，保证基本可用
    if not llm:
      return self._heuristic_merge_prefs(previous or {}, list(messages))
    text = '\n'.join(f"{m.get('role','user')}: {m.get('content','')}" for m in messages)
    chain = self.pref_extract_prompt | llm | self.output_parser
    raw = await chain.ainvoke({'previous': (previous or {}), 'conversation': text})
    try:
      data = raw.strip()
      start = data.find('{')
      end = data.rfind('}')
      if start != -1 and end != -1:
        import json as _json
        obj = _json.loads(data[start : end + 1])
        if isinstance(obj, dict):
          obj = self._normalize_prefs(obj)
          obj = self._resolve_prefs_conflicts(obj, list(messages))
          return obj
    except Exception:
      logger.warning('extract_user_prefs parse failed', raw=raw)
    # 回退到启发式抽取，尽量不丢失关键信息
    return self._heuristic_merge_prefs(previous or {}, list(messages))

  async def extract_and_merge_user_memories(
    self,
    *,
    previous: dict | None,
    messages: Iterable[dict[str, str]],
    provider: str | None = None,
  ) -> dict | None:
    llm = self.get_llm(provider)
    if not llm:
      return self._heuristic_memories(previous or {}, list(messages))
    text = '\n'.join(f"{m.get('role','user')}: {m.get('content','')}" for m in messages)
    chain = self.memory_extract_prompt | llm | self.output_parser
    raw = await chain.ainvoke({'previous': (previous or {}), 'conversation': text})
    try:
      data = raw.strip()
      start = data.find('{')
      end = data.rfind('}')
      if start != -1 and end != -1:
        import json as _json
        obj = _json.loads(data[start : end + 1])
        if isinstance(obj, dict):
          return self._normalize_memories(obj)
    except Exception:
      logger.warning('extract_user_memories parse failed', raw=raw)
    return self._heuristic_memories(previous or {}, list(messages))

  @staticmethod
  def _normalize_prefs(prefs: dict) -> dict:
    def _norm_list(val):
      items = []
      if isinstance(val, list):
        for it in val:
          s = str(it).strip()
          if s and len(s) <= 20:  # 丢弃过长/啰嗦项
            items.append(s)
      # 去重并保持顺序
      seen = set()
      unique = []
      for s in items:
        if s not in seen:
          seen.add(s)
          unique.append(s)
      return unique
    keys = [
      'likes','dislikes','topics','tone_preference','activities','food','pets','gifts','travel','schedule','boundaries','nicknames','other'
    ]
    out = {k: _norm_list(prefs.get(k, [])) for k in keys}
    # 细粒度字段：允许部分字典/列表结构透传并做简单裁剪
    fd = prefs.get('food_detail')
    if isinstance(fd, dict):
      out['food_detail'] = {
        'taste': _norm_list(fd.get('taste', [])),
        'allergies': _norm_list(fd.get('allergies', [])),
        'avoids': _norm_list(fd.get('avoids', [])),
      }
    gd = prefs.get('gifts_detail')
    if isinstance(gd, dict):
      pr = str(gd.get('price_range') or '').strip()
      cats = _norm_list(gd.get('categories', []))
      out['gifts_detail'] = {'price_range': pr[:32], 'categories': cats}
    bd = prefs.get('boundaries_detail')
    if isinstance(bd, list):
      norm_bd = []
      for it in bd[:10]:
        if isinstance(it, dict):
          nm = str(it.get('item') or '').strip()[:20]
          lv = str(it.get('level') or '').strip()[:12]
          if nm:
            norm_bd.append({'item': nm, 'level': lv})
      out['boundaries_detail'] = norm_bd
    return out

  @staticmethod
  def _resolve_prefs_conflicts(prefs: dict, messages: list[dict[str, str]]) -> dict:
    # 简单中文极性判别：最近提及优先
    positive_words = ('喜欢','爱','偏好','想要','要','想吃','爱吃','适合')
    negative_words = ('不喜欢','不爱','讨厌','不要','拒绝','不吃','忌','过敏','避免')
    def last_polarity(term: str) -> int:
      for m in reversed(messages):
        text = str(m.get('content',''))
        if term in text:
          if any(w in text for w in negative_words):
            return -1
          if any(w in text for w in positive_words):
            return 1
      return 0
    likes = set(prefs.get('likes', []))
    dislikes = set(prefs.get('dislikes', []))
    both = likes & dislikes
    for term in list(both):
      pol = last_polarity(term)
      if pol >= 0:
        # 偏好或未知 -> 移出禁忌
        dislikes.discard(term)
      else:
        # 否定 -> 移出喜欢
        likes.discard(term)
    prefs['likes'] = list(likes)
    prefs['dislikes'] = list(dislikes)
    return prefs

  @staticmethod
  def _tokenize_cn(text: str) -> list[str]:
    # 粗粒度中文分词：按标点与空白切分
    import re
    parts = re.split(r"[，。、“”\s,;.!?]+", text)
    return [p.strip() for p in parts if p.strip()]

  def _heuristic_merge_prefs(self, previous: dict, messages: list[dict[str, str]]) -> dict:
    import re
    prefs = self._normalize_prefs(previous or {})
    likes = set(prefs.get('likes', []))
    dislikes = set(prefs.get('dislikes', []))
    food = set(prefs.get('food', []))

    like_patterns = [
      r"喜欢吃(?P<obj>[^，。,.\s]{1,8})",
      r"爱吃(?P<obj>[^，。,.\s]{1,8})",
      r"喜欢(?P<obj>[^，。,.\s]{1,6})",
      r"爱(?P<obj>[^，。,.\s]{1,6})",
    ]
    dislike_patterns = [
      r"不喜欢吃(?P<obj>[^，。,.\s]{1,8})",
      r"不吃(?P<obj>[^，。,.\s]{1,8})",
      r"不要(?P<obj>[^，。,.\s]{1,6})",
      r"不喜欢(?P<obj>[^，。,.\s]{1,6})",
      r"讨厌(?P<obj>[^，。,.\s]{1,6})",
      r"对(?P<obj>[^，。,.\s]{1,6})过敏",
    ]

    def _match_any(patterns: list[str], text: str) -> list[str]:
      hits: list[str] = []
      for pat in patterns:
        for m in re.finditer(pat, text):
          obj = (m.group('obj') or '').strip()
          if obj and obj not in hits:
            hits.append(obj)
      return hits

    for m in messages:
      content = str(m.get('content') or '')
      if not content:
        continue
      pos = _match_any(like_patterns, content)
      neg = _match_any(dislike_patterns, content)
      for obj in pos:
        likes.add(obj)
        # 食物相关常见词：吃/口味直接纳入 food
        food.add(obj)
      for obj in neg:
        dislikes.add(obj)
        food.add(obj)

    prefs['likes'] = list(likes)
    prefs['dislikes'] = list(dislikes)
    prefs['food'] = list(food)
    # 再做一次冲突消解（最近表达优先）
    prefs = self._resolve_prefs_conflicts(prefs, messages)
    return prefs

  @staticmethod
  def summarize_prefs_text(prefs: dict) -> str:
    if 'likes' not in prefs and isinstance(prefs.get('prefs'), dict):
      prefs = prefs['prefs']
    def _fmt(key: str, title: str) -> str | None:
      vals = [str(x).strip() for x in prefs.get(key, []) if str(x).strip()]
      if not vals:
        return None
      return f"- {title}：" + '、'.join(vals[:8])
    parts = [
      _fmt('likes', '喜好'),
      _fmt('dislikes', '不喜欢'),
      _fmt('topics', '常聊话题'),
      _fmt('tone_preference', '偏好语气'),
      _fmt('activities', '偏好活动'),
      _fmt('food', '口味/餐饮'),
      _fmt('pets', '宠物偏好'),
      _fmt('gifts', '礼物偏好'),
      _fmt('travel', '出行/旅行'),
      _fmt('schedule', '作息/时间'),
      _fmt('boundaries', '边界/禁忌'),
      _fmt('nicknames', '昵称/称呼'),
      _fmt('other', '其他'),
    ]
    # 细粒度字段
    fd = prefs.get('food_detail') or {}
    if isinstance(fd, dict):
      taste = [str(x).strip() for x in (fd.get('taste') or []) if str(x).strip()]
      if taste:
        parts.append(f"- 口味偏好：{'、'.join(taste[:8])}")
      allerg = [str(x).strip() for x in (fd.get('allergies') or []) if str(x).strip()]
      if allerg:
        parts.append(f"- 过敏/忌口：{'、'.join(allerg[:8])}")
    gd = prefs.get('gifts_detail') or {}
    if isinstance(gd, dict):
      pr = str(gd.get('price_range') or '').strip()
      cats = [str(x).strip() for x in (gd.get('categories') or []) if str(x).strip()]
      if pr:
        parts.append(f"- 礼物价位：{pr}")
      if cats:
        parts.append(f"- 礼物类别：{'、'.join(cats[:8])}")
    bd = prefs.get('boundaries_detail') or []
    if isinstance(bd, list) and bd:
      bounds = []
      for item in bd[:6]:
        if isinstance(item, dict):
          nm = str(item.get('item') or '').strip()
          lv = str(item.get('level') or '').strip()
          if nm:
            bounds.append(f"{nm}{'('+lv+')' if lv else ''}")
      if bounds:
        parts.append(f"- 明确边界：{'、'.join(bounds)}")
    lines = [p for p in parts if p]
    return '\n'.join(lines) if lines else '暂无偏好画像'

  @staticmethod
  def summarize_memories_text(mem: dict) -> str:
    parts: list[str] = []
    if isinstance(mem.get('facts'), dict):
      facts = mem['facts']
      name = str(facts.get('name') or '').strip()
      if name:
        parts.append(f"- 姓名：{name}")
      birthday = str(facts.get('birthday') or '').strip()
      if birthday:
        parts.append(f"- 生日：{birthday}")
      nicks = facts.get('nicknames') or []
      if nicks:
        parts.append(f"- 昵称：{'、'.join([str(x) for x in nicks[:6]])}")
    ags = mem.get('agreements') or []
    if isinstance(ags, list) and ags:
      active = [a for a in ags if isinstance(a, dict) and (a.get('status') in (None, 'active'))]
      if active:
        parts.append(f"- 进行中的约定：{'、'.join([str(a.get('text')) for a in active[:6] if a.get('text')])}")
    rems = mem.get('reminders') or []
    if isinstance(rems, list) and rems:
      parts.append(f"- 提醒事项：{'、'.join([str(r.get('text')) for r in rems[:6] if r.get('text')])}")
    bds = mem.get('boundaries') or []
    if isinstance(bds, list) and bds:
      parts.append(f"- 边界/禁忌：{'、'.join([str(b.get('item')) for b in bds[:6] if b.get('item')])}")
    return '\n'.join(parts) if parts else '暂无重要记忆'

  @staticmethod
  def _normalize_memories(mem: dict) -> dict:
    out: dict = {}
    facts_in = mem.get('facts') if isinstance(mem.get('facts'), dict) else {}
    out['facts'] = {
      'name': str(facts_in.get('name') or '').strip()[:64],
      'birthday': str(facts_in.get('birthday') or '').strip()[:32],
      'timezone': str(facts_in.get('timezone') or '').strip()[:64],
      'nicknames': [str(x).strip() for x in (facts_in.get('nicknames') or []) if str(x).strip()][:10],
    }
    def _norm_list_of_obj(items, keys):
      res = []
      if isinstance(items, list):
        for it in items[:20]:
          if isinstance(it, dict):
            obj = {}
            for k in keys:
              obj[k] = str(it.get(k) or '').strip()
            res.append(obj)
      return res
    out['agreements'] = _norm_list_of_obj(mem.get('agreements'), ['text', 'status'])
    out['reminders'] = _norm_list_of_obj(mem.get('reminders'), ['text', 'due'])
    out['boundaries'] = _norm_list_of_obj(mem.get('boundaries'), ['item', 'level'])
    out['notes'] = mem.get('notes') if isinstance(mem.get('notes'), list) else []
    # preferences 规范化
    prefs = []
    raw_prefs = mem.get('preferences')
    if isinstance(raw_prefs, list):
      for it in raw_prefs[:50]:
        if not isinstance(it, dict):
          continue
        item = str(it.get('item') or '').strip()[:32]
        pol = str(it.get('polarity') or '').strip().lower()
        sub = str(it.get('subject') or '').strip().lower()
        if item and pol in ('like','dislike') and sub in ('self','partner'):
          prefs.append({'item': item, 'polarity': pol, 'subject': sub})
    out['preferences'] = prefs
    return out

  def _heuristic_memories(self, previous: dict, messages: list[dict[str, str]]) -> dict:
    import re
    mem = self._normalize_memories(previous or {})
    facts = mem.get('facts') or {}
    agreements = mem.get('agreements') or []
    reminders = mem.get('reminders') or []
    boundaries = mem.get('boundaries') or []

    name_patterns = [r"我叫(?P<name>[^，。,.\s]{1,12})", r"我的名字叫(?P<name>[^，。,.\s]{1,12})"]
    bday_patterns = [r"(生日|生辰)[是为在于]?[:：\s]?(?P<date>\d{4}[年/-]\d{1,2}[月/-]\d{1,2}日?|\d{1,2}月\d{1,2}日)"]
    agree_patterns = [r"我们(约好|约定|说好|答应)(?P<text>[^，。,.]{2,30})"]
    remind_patterns = [r"记得(?P<text>[^，。,.]{2,30})", r"提醒我(?P<text>[^，。,.]{2,30})"]
    bound_patterns = [r"(不要|禁止|别|不可以)(?P<item>[^，。,.]{1,16})"]
    pref_like_patterns_self = [r"我喜欢(?P<item>[^，。,.\s]{1,12})", r"我爱吃(?P<item>[^，。,.\s]{1,12})"]
    pref_dislike_patterns_self = [r"我不喜欢(?P<item>[^，。,.\s]{1,12})", r"我不吃(?P<item>[^，。,.\s]{1,12})"]
    pref_like_patterns_partner = [r"(女朋友|他|她)喜欢(?P<item>[^，。,.\s]{1,12})", r"(女朋友|他|她)爱吃(?P<item>[^，。,.\s]{1,12})"]
    pref_dislike_patterns_partner = [r"(女朋友|他|她)不喜欢(?P<item>[^，。,.\s]{1,12})", r"(女朋友|他|她)不吃(?P<item>[^，。,.\s]{1,12})"]

    def _first(patterns, text, group):
      for pat in patterns:
        m = re.search(pat, text)
        if m:
          return (m.group(group) or '').strip()
      return ''

    for msg in messages:
      content = str(msg.get('content') or '')
      if not content:
        continue
      nm = _first(name_patterns, content, 'name')
      if nm:
        facts['name'] = nm
      bd = _first(bday_patterns, content, 'date')
      if bd:
        facts['birthday'] = bd
      ag = _first(agree_patterns, content, 'text')
      if ag:
        agreements.append({'text': ag, 'status': 'active'})
      rm = _first(remind_patterns, content, 'text')
      if rm:
        reminders.append({'text': rm, 'due': ''})
      b = _first(bound_patterns, content, 'item')
      if b:
        boundaries.append({'item': b, 'level': 'medium'})
      # 偏好（主体 + 极性）
      for pat in pref_like_patterns_self:
        m = re.search(pat, content)
        if m:
          it = (m.group('item') or '').strip()
          if it:
            prefs = mem.get('preferences') or []
            prefs.append({'subject': 'self', 'polarity': 'like', 'item': it})
            mem['preferences'] = prefs
            break
      for pat in pref_dislike_patterns_self:
        m = re.search(pat, content)
        if m:
          it = (m.group('item') or '').strip()
          if it:
            prefs = mem.get('preferences') or []
            prefs.append({'subject': 'self', 'polarity': 'dislike', 'item': it})
            mem['preferences'] = prefs
            break
      for pat in pref_like_patterns_partner:
        m = re.search(pat, content)
        if m:
          it = (m.group('item') or '').strip()
          if it:
            prefs = mem.get('preferences') or []
            prefs.append({'subject': 'partner', 'polarity': 'like', 'item': it})
            mem['preferences'] = prefs
            break
      for pat in pref_dislike_patterns_partner:
        m = re.search(pat, content)
        if m:
          it = (m.group('item') or '').strip()
          if it:
            prefs = mem.get('preferences') or []
            prefs.append({'subject': 'partner', 'polarity': 'dislike', 'item': it})
            mem['preferences'] = prefs
            break

    mem['facts'] = facts
    mem['agreements'] = agreements[-30:]
    mem['reminders'] = reminders[-30:]
    mem['boundaries'] = boundaries[-30:]
    return mem

  def _convert_history(
    self,
    history: list[dict[str, str]],
    summary: str | None = None,
  ) -> list[HumanMessage | AIMessage]:
    converted: list[HumanMessage | AIMessage] = []
    if summary:
      converted.append(AIMessage(content=f'累计对话摘要：{summary}'))
    for item in history:
      role = item.get('role')
      content = item.get('content', '')
      if role == 'assistant':
        converted.append(AIMessage(content=content))
      else:
        converted.append(HumanMessage(content=content))
    return converted

  async def summarize_history(
    self,
    *,
    previous: str | None,
    new_messages: Iterable[dict[str, str]],
    provider: str | None = None,
  ) -> str | None:
    llm = self.get_llm(provider)
    if not llm:
      return previous
    text = '\n'.join(f"{msg.get('role', 'user')}: {msg.get('content', '')}" for msg in new_messages)
    chain = self.history_summary_prompt | llm | self.output_parser
    result = await chain.ainvoke({'previous': previous or '无', 'conversation': text})
    return result.strip()


langchain_service = LoveNotesAI()
