﻿from __future__ import annotations

import json
import calendar
from datetime import date, datetime, timedelta, timezone
from pathlib import Path
from typing import Any, AsyncIterator

from sqlalchemy import and_, or_, select
from sqlalchemy.ext.asyncio import AsyncSession

from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.tools import tool

from app.models.anniversary import Anniversary
from app.models.cycle import CycleLog, CyclePrediction
from app.models.diary import DiaryEntry
from app.models.user import User
from app.models.memory_node import MemoryNode
from app.models.memory_edge import MemoryEdge
from app.services.langchain_service import langchain_service
from app.core.config import settings
import structlog
import asyncio
from duckduckgo_search import DDGS  # type: ignore
from app.services.lunar_utils import parse_lunar_str, next_solar_from_lunar


class LoveNotesAgentService:
  def __init__(self) -> None:
    system_prompt = self._load_system_prompt()

    self.prompt = ChatPromptTemplate.from_messages(
      [
        ('system', system_prompt),
        MessagesPlaceholder(variable_name='chat_history'),
        ('human', '{input}'),
        MessagesPlaceholder(variable_name='agent_scratchpad'),
      ]
    )

  @staticmethod
  def _load_system_prompt() -> str:
    prompt_path = Path(__file__).resolve().parent / 'prompts' / 'ai_agent_system_prompt.md'
    try:
      return prompt_path.read_text(encoding='utf-8')
    except FileNotFoundError:
      return '你是小蛋（Love Notes AI），请保持温柔、结构化的中文回答。'

  async def _build_tools(self, session: AsyncSession, user: User):
    relationship_id = user.active_relationship_id

    def _parse_tool_args(raw: str) -> dict[str, Any]:
      raw = (raw or '').strip()
      if not raw:
        return {}
      try:
        parsed = json.loads(raw)
        if isinstance(parsed, dict):
          return parsed
        if isinstance(parsed, list):
          return {'items': parsed}
        return {'value': parsed}
      except json.JSONDecodeError:
        return {'query': raw}

    def _parse_bool(value: Any) -> bool:
      if isinstance(value, bool):
        return value
      if isinstance(value, str):
        return value.strip().lower() in {'1', 'true', 'yes', 'y', 'on'}
      if isinstance(value, (int, float)):
        return bool(value)
      return False

    def _ensure_tz(value: datetime | None) -> datetime | None:
      if not value:
        return None
      if value.tzinfo is None:
        return value.replace(tzinfo=timezone.utc)
      return value

    def _coerce_iso(value: Any) -> datetime | None:
      if not value or not isinstance(value, str):
        return None
      try:
        parsed = datetime.fromisoformat(value)
        return _ensure_tz(parsed)
      except ValueError:
        return None

    @tool('get_datetime', return_direct=False)
    async def get_datetime(_: str = "") -> str:
      """获取当前的本地日期时间（UTC+8）以及对应的星期。"""
      tz_local = timezone(timedelta(hours=8))
      now_local = datetime.now(tz_local)
      week_map = ['周一', '周二', '周三', '周四', '周五', '周六', '周日']
      weekday = week_map[now_local.weekday()]
      return (
        f"- 当前日期：{now_local.strftime('%Y-%m-%d')} ({weekday})\n"
        f"- 当前时间：{now_local.strftime('%H:%M')}"
      )

    @tool('get_diary', return_direct=False)
    async def get_diary(input_text: str = "") -> str:
      """获取情侣共享日记，默认最近 3 篇。支持 limit(1-20)、since/until(ISO8601)。"""
      params = _parse_tool_args(input_text)
      limit = int(params.get('limit', 3))
      limit = max(1, min(limit, 20))
      filters = [DiaryEntry.deleted_at.is_(None)]
      if relationship_id:
        filters.append(
          or_(
            DiaryEntry.relationship_id == relationship_id,
            DiaryEntry.user_id == user.id,
          )
        )
      else:
        filters.append(DiaryEntry.user_id == user.id)
      since = _coerce_iso(params.get('since'))
      until = _coerce_iso(params.get('until'))
      if since:
        filters.append(DiaryEntry.created_at >= since)
      if until:
        filters.append(DiaryEntry.created_at <= until)
      result = await session.scalars(
        select(DiaryEntry)
        .where(*filters)
        .order_by(DiaryEntry.created_at.desc())
        .limit(limit)
      )
      entries = result.all()
      if not entries:
        return '暂无日记记录'
      lines = []
      for item in entries:
        lines.append(
          f"- {item.created_at:%Y-%m-%d}: {item.title}（心情：{item.mood or '未知'}）\n  摘要：{item.ai_summary or item.content_md[:120]}"
        )
      return '\n'.join(lines)

    @tool('get_anniversary', return_direct=False)
    async def get_anniversary(input_text: str = "") -> str:
      """获取纪念日，默认列出未来 5 项。支持 limit/from/to/include_past。"""
      params = _parse_tool_args(input_text)
      limit = int(params.get('limit', 5))
      limit = max(1, min(limit, 20))
      include_past = _parse_bool(params.get('include_past', False))
      from_dt = _coerce_iso(params.get('from'))
      to_dt = _coerce_iso(params.get('to'))
      reference_start = (from_dt or datetime.utcnow()).date()
      reference_today = datetime.utcnow().date()
      window_start = reference_start if include_past else max(reference_start, reference_today)
      window_end = to_dt.date() if to_dt else None

      def _normalize_day(year: int, month: int, day: int) -> date:
        last_day = calendar.monthrange(year, month)[1]
        return date(year, month, min(day, last_day))

      def _next_occurrence(base: date, recurrence: str, reference: date) -> date | None:
        if recurrence == 'once':
          return base if base >= reference else None
        if recurrence == 'monthly':
          year, month = reference.year, reference.month
          candidate = _normalize_day(year, month, base.day)
          if candidate < reference:
            if month == 12:
              year += 1
              month = 1
            else:
              month += 1
            candidate = _normalize_day(year, month, base.day)
          return candidate
        # yearly/custom 默认按年复用
        year = reference.year
        candidate = _normalize_day(year, base.month, base.day)
        if candidate < reference:
          candidate = _normalize_day(year + 1, base.month, base.day)
        return candidate

      async def _fetch_anniversaries() -> list[Anniversary]:
        filters = [Anniversary.deleted_at.is_(None)]
        if relationship_id:
          filters.append(
            or_(
              Anniversary.user_id == user.id,
              Anniversary.relationship_id == relationship_id,
            )
          )
        else:
          filters.append(Anniversary.user_id == user.id)
        stmt = (
          select(Anniversary)
          .where(*filters)
          .order_by(Anniversary.date.asc())
          .limit(limit * 5)
        )
        result = await session.scalars(stmt)
        return result.all()

      anniversaries = await _fetch_anniversaries()
      occurrences: list[tuple[date, Anniversary]] = []
      for ann in anniversaries:
        occurrence: date | None = None
        if ann.is_lunar and ann.lunar_date_str:
          patt = parse_lunar_str(ann.lunar_date_str)
          if patt:
            occurrence = next_solar_from_lunar(patt, window_start)
        if not occurrence:
          base = ann.date
          occurrence = _next_occurrence(base, ann.recurrence or 'yearly', window_start)
        if not occurrence:
          continue
        if not include_past and occurrence < reference_today:
          continue
        if window_end and occurrence > window_end:
          continue
        occurrences.append((occurrence, ann))

      if not occurrences:
        return '暂无即将到来的纪念日'

      occurrences.sort(key=lambda x: x[0])
      limited = occurrences[:limit]
      lines = [
        f"- {occ.isoformat()}: {ann.name} · 提前 {ann.reminder_offset_days} 天提醒"
        for occ, ann in limited
      ]
      return '\n'.join(lines)

    @tool('get_cycle', return_direct=False)
    async def get_cycle(input_text: str = "") -> str:
      """汇总最近的生理期记录（含日历）并可附带预测结果，支持 limit/include_predictions。"""
      params = _parse_tool_args(input_text)
      limit = int(params.get('limit', 3))
      limit = max(1, min(limit, 10))
      include_predictions = params.get('include_predictions', True)

      filters = [CycleLog.deleted_at.is_(None)]
      if relationship_id:
        # 情侣共享生理期：优先情侣维度，但兼容历史未写入 relationship_id 的本人记录
        filters.append(
          or_(
            CycleLog.relationship_id == relationship_id,
            CycleLog.user_id == user.id,
          )
        )
      else:
        filters.append(CycleLog.user_id == user.id)
      result = await session.scalars(
        select(CycleLog).where(*filters).order_by(CycleLog.start_date.desc()).limit(limit)
      )
      logs = result.all()
      lines = []
      if logs:
        for log in logs:
          lines.append(
            f"- {log.start_date} ~ {log.end_date or '进行中'} · 症状：{log.symptoms_json or {}}"
          )
      else:
        # Fallback: 使用日级记录（cycle_day_logs）概览
        import sqlalchemy as sa
        where_parts = ["deleted_at IS NULL"]
        params_sql: dict[str, object] = {}
        if relationship_id:
          where_parts.append("(relationship_id=:rid OR user_id=:uid)")
          params_sql['rid'] = relationship_id
          params_sql['uid'] = user.id
        else:
          where_parts.append("user_id=:uid")
          params_sql['uid'] = user.id
        where_clause = " AND ".join(where_parts)
        day_rows = await session.execute(
          sa.text(
            f"SELECT date, flow_level FROM cycle_day_logs WHERE {where_clause} ORDER BY date DESC LIMIT :lim"
          ),
          {**params_sql, 'lim': limit * 10},
        )
        days = [ (row[0], (row[1] or '').lower()) for row in day_rows ]
        if not days:
          return '暂无生理期记录'
        # 仅取存在流量的日期
        flow_dates = sorted([d for (d, level) in days if level and level != 'none'], reverse=True)
        if not flow_dates:
          last_date = days[0][0]
          lines.append(f"- 最近打点 {len(days)} 天（日历），最近日期：{last_date}")
        else:
          # 从最近流量日向前找连续天，作为最近一次经期
          from datetime import timedelta as _td
          used = set(flow_dates)
          cycles: list[tuple] = []
          seen = set()
          for start_candidate in flow_dates:
            if start_candidate in seen:
              continue
            end = start_candidate
            cur = start_candidate - _td(days=1)
            while cur in used:
              seen.add(cur)
              end = cur
              cur = cur - _td(days=1)
            cycles.append((end, start_candidate))
            if len(cycles) >= limit:
              break
          latest_start, latest_end = cycles[0]
          span = (latest_start, latest_end)
          days_count = (span[1] - span[0]).days + 1
          lines.append(f"- 最近一次经期：{span[0]} ~ {span[1]} · 天数 {days_count}")
      prediction_filters = [CyclePrediction.deleted_at.is_(None), _prediction_filter(user)]
      if include_predictions:
        prediction_limit = int(params.get('prediction_limit', limit))
        prediction_limit = max(1, min(prediction_limit, 10))
        future = await session.scalars(
          select(CyclePrediction)
          .where(
            and_(
              *prediction_filters,
              CyclePrediction.start_date >= datetime.utcnow().date() - timedelta(days=3),
            )
          )
          .order_by(CyclePrediction.start_date)
          .limit(prediction_limit)
        )
        predictions = future.all()
        if predictions:
          lines.append('预测：')
          for pred in predictions:
            lines.append(
              f"- {pred.start_date} ~ {pred.end_date} · 阶段：{pred.phase} · 置信度：{pred.confidence:.2f}"
            )
        elif lines:
          lines.append('预测：可前往“生理期关怀”查看基于历史数据的阶段推测。')
      return '\n'.join(lines)

    @tool('get_tags', return_direct=False)
    async def get_tags(input_text: str = "") -> str:
      """统计最近日记中的高频标签，默认取最近 10 篇，返回最多 6 个标签及出现次数。"""
      params = _parse_tool_args(input_text)
      limit = int(params.get('limit', 10))
      limit = max(1, min(limit, 20))
      filters = [DiaryEntry.deleted_at.is_(None)]
      if relationship_id:
        filters.append(DiaryEntry.relationship_id == relationship_id)
      else:
        filters.append(DiaryEntry.user_id == user.id)
      result = await session.scalars(
        select(DiaryEntry).where(*filters).order_by(DiaryEntry.created_at.desc()).limit(limit)
      )
      items = result.all()
      from collections import Counter
      counter: Counter[str] = Counter()
      for it in items:
        for t in (it.tags_json or []):
          counter[t] += 1
      if not counter:
        return '暂无标签'
      tops = ', '.join([f"{k}×{v}" for k, v in counter.most_common(6)])
      return f"Top 标签：{tops}"

    @tool('duck_search', return_direct=False)
    async def duck_search(input_text: str = "") -> str:
      """DuckDuckGo 搜索。参数示例：{"query":"周末上海活动", "count":5}。无需 API Key。"""
      params = _parse_tool_args(input_text)
      query = str(params.get('query') or params.get('q') or '').strip()
      count = int(params.get('count', 5))
      count = max(1, min(count, 10))
      if not query:
        return '请提供 query 关键词，例如 {"query":"上海赏花路线", "count":5}'
      try:
        proxies = None
        if settings.https_proxy or settings.http_proxy:
          proxies = {}
          if settings.http_proxy:
            proxies['http'] = settings.http_proxy
          if settings.https_proxy:
            proxies['https'] = settings.https_proxy
        def _search():
          with DDGS(proxies=proxies, timeout=15) as ddgs:
            return list(ddgs.text(query, max_results=count, region='cn-zh', safesearch='moderate'))
        items = await asyncio.to_thread(_search)
        if not items:
          return '未找到相关网页'
        lines = []
        for it in items[:count]:
          title = it.get('title') or it.get('href')
          desc = it.get('body') or ''
          link = it.get('href')
          lines.append(f"- {title}\n  {desc}\n  {link}")
        return '\n'.join(lines)
      except Exception as e:
        hint = '（可在后端 .env 设置 http_proxy/https_proxy 以通过代理访问）'
        return f'网络检索暂时不可用，原因：{e}。{hint}'

    @tool('get_user_prefs', return_direct=False)
    async def get_user_prefs(_: str = "") -> str:
      """获取当前用户的偏好画像（如喜好/禁忌/礼物/宠物等）。无需入参。"""
      rid = user.active_relationship_id
      # 图谱聚合：person/self -> prefers/avoids/mentions 到 preference/topic 节点
      person = await session.scalar(
        select(MemoryNode).where(
          MemoryNode.user_id == user.id,
          MemoryNode.relationship_id == rid,
          MemoryNode.type == 'person',
          MemoryNode.label.in_(['我','self']),
        )
      )
      likes: list[str] = []
      dislikes: list[str] = []
      topics: list[str] = []
      if person:
        edges = (await session.scalars(
          select(MemoryEdge).where(
            MemoryEdge.user_id == user.id,
            MemoryEdge.relationship_id == rid,
            MemoryEdge.src_id == person.id,
          )
        )).all()
        for e in edges:
          if e.type in ('prefers','avoids','mentions'):
            node = await session.get(MemoryNode, e.dst_id)
            if not node:
              continue
            if e.type == 'prefers':
              likes.append(node.label)
            elif e.type == 'avoids':
              dislikes.append(node.label)
            elif e.type == 'mentions':
              topics.append(node.label)
      agg = {'likes': list(dict.fromkeys(likes)), 'dislikes': list(dict.fromkeys(dislikes)), 'topics': list(dict.fromkeys(topics))}
      data = {'prefs': agg}
      text = langchain_service.summarize_prefs_text(data)
      return text

    @tool('get_user_memories', return_direct=False)
    async def get_user_memories(_: str = "") -> str:
      """获取当前用户的重要记忆（如姓名/昵称、约定、提醒、边界等）。无需入参。"""
      rid = user.active_relationship_id
      person = await session.scalar(
        select(MemoryNode).where(
          MemoryNode.user_id == user.id,
          MemoryNode.relationship_id == rid,
          MemoryNode.type == 'person',
          MemoryNode.label.in_(['我','self']),
        )
      )
      agreements: list[dict] = []
      reminders: list[dict] = []
      boundaries: list[dict] = []
      facts: dict = {}
      if person:
        edges = (await session.scalars(
          select(MemoryEdge).where(
            MemoryEdge.user_id == user.id,
            MemoryEdge.relationship_id == rid,
            MemoryEdge.src_id == person.id,
          )
        )).all()
        for e in edges:
          node = await session.get(MemoryNode, e.dst_id)
          if not node:
            continue
          if e.type == 'agreed_with' and node.type == 'agreement':
            agreements.append({'text': node.label, 'status': (node.props_json or {}).get('status')})
          if e.type == 'reminds' and node.type == 'reminder':
            reminders.append({'text': node.label, **(node.props_json or {})})
          if e.type == 'has_boundary' and node.type == 'boundary':
            boundaries.append({'item': node.label, 'level': (node.props_json or {}).get('level')})
      mem = {'facts': facts, 'agreements': agreements, 'reminders': reminders, 'boundaries': boundaries, 'notes': []}
      text = langchain_service.summarize_memories_text(mem if isinstance(mem, dict) else {})
      return text

    return [
      get_datetime,
      get_diary,
      get_anniversary,
      get_cycle,
      get_tags,
      duck_search,
      get_user_prefs,
      get_user_memories,
    ]

  async def arun(
    self,
    *,
    session: AsyncSession,
    user: User,
    message: str,
    provider: str | None = None,
    history: list[dict[str, str]] | None = None,
    summary: str | None = None,
  ) -> str:
    logger = structlog.get_logger('love_notes.agent')
    llm = langchain_service.get_llm(provider)
    logger.info('agent.arun.llm', provider=provider or 'default', llm_type=type(llm).__name__ if llm else None)
    if not llm:
      return 'AI 服务未配置，暂时无法调用智能助手。'

    tools = await self._build_tools(session, user)
    agent = create_tool_calling_agent(llm, tools, self.prompt)
    executor = AgentExecutor(agent=agent, tools=tools, verbose=False, handle_parsing_errors=True)
    # Assemble conversational context: optional summary, recent history, and current user message
    # chat_history as LangChain BaseMessages
    chat_history: list[HumanMessage | AIMessage] = []
    if summary:
      chat_history.append(AIMessage(content=f'累计对话摘要：{summary}'))
    if history:
      for item in history[-20:]:
        role = item.get('role', 'user')
        content = item.get('content', '')
        if not isinstance(content, str):
          content = str(content)
        chat_history.append(AIMessage(content=content) if role == 'assistant' else HumanMessage(content=content))

    result = await executor.ainvoke({'input': message, 'chat_history': chat_history})
    return result.get('output', '未获取到 AI 回复。')

  async def astream(
    self,
    *,
    session: AsyncSession,
    user: User,
    message: str,
    provider: str | None = None,
    history: list[dict[str, str]] | None = None,
    summary: str | None = None,
  ) -> AsyncIterator[str]:
    logger = structlog.get_logger('love_notes.agent')
    llm = langchain_service.get_llm(provider)
    logger.info('agent.astream.llm', provider=provider or 'default', llm_type=type(llm).__name__ if llm else None)
    if not llm:
      yield 'AI 服务未配置，暂时无法调用智能助手。'
      return

    tools = await self._build_tools(session, user)
    agent = create_tool_calling_agent(llm, tools, self.prompt)
    executor = AgentExecutor(agent=agent, tools=tools, verbose=False, handle_parsing_errors=True)

    # 按事件流输出模型增量内容（仅输出最终回答的文本 Token）
    buffer: list[str] = []

    # 工具提示映射
    def _tool_cname(event: dict[str, Any]) -> str:
      name: str | None = None
      if isinstance(event, dict):
        raw_name = event.get('name') or event.get('tool')
        if isinstance(raw_name, str):
          name = raw_name
        elif isinstance(raw_name, dict):
          name = raw_name.get('name') if isinstance(raw_name.get('name'), str) else None
        if not name:
          serialized = event.get('serialized')
          if isinstance(serialized, dict):
            s_name = serialized.get('name')
            if isinstance(s_name, str):
              name = s_name
      mapping = {
        'get_datetime': '获取当前时间',
        'get_diary': '查询日记',
        'get_anniversary': '查询纪念日',
        'get_cycle': '汇总生理期',
        'get_tags': '统计标签',
        'duck_search': 'DuckDuckGo 搜索',
      }
      if name:
        return mapping.get(name, name)
      return '工具'

    # Assemble conversational context for streaming
    chat_history: list[HumanMessage | AIMessage] = []
    if summary:
      chat_history.append(AIMessage(content=f'累计对话摘要：{summary}'))
    if history:
      for item in history[-20:]:
        role = item.get('role', 'user')
        content = item.get('content', '')
        if not isinstance(content, str):
          content = str(content)
        chat_history.append(AIMessage(content=content) if role == 'assistant' else HumanMessage(content=content))

    async for event in executor.astream_events(
        {'input': message, 'chat_history': chat_history},
        version='v1',
      ):
      etype = event.get('event')
      data = event.get('data') or {}
      # 在流中输出工具阶段提示（纯 JSON 字符串，外层 SSE 负责包装）
      if etype == 'on_tool_start':
        payload = json.dumps({'type': 'tool', 'label': _tool_cname(data), 'status': 'running'}, ensure_ascii=False)
        yield payload
        continue
      if etype == 'on_tool_end':
        payload = json.dumps({'type': 'tool', 'label': _tool_cname(data), 'status': 'done'}, ensure_ascii=False)
        yield payload
        continue
      if etype == 'on_chat_model_stream':
        chunk = data.get('chunk')
        content: str | None = None
        # 兼容不同实现：对象/字典/字符串
        if chunk is None:
          content = None
        elif isinstance(chunk, str):
          content = chunk
        else:
          # langchain_core outputs chunk with .content or dict
          content = getattr(chunk, 'content', None)
          if content is None and isinstance(chunk, dict):
            content = chunk.get('content')
        if content:
          buffer.append(content)
          yield content
      elif etype == 'on_chain_end':
        # 兜底：如果没有流式 token，则在链结束时尝试输出最终结果
        output = data.get('output') if isinstance(data, dict) else None
        if isinstance(output, dict):
          text = output.get('output')
          if isinstance(text, str) and not buffer:
            buffer.append(text)
            yield text
    # 结束时如果没有任何 token，交由上层处理（会写入空文本或错误）
    return


def _prediction_filter(user: User):
  if user.active_relationship_id:
    return or_(
      CyclePrediction.user_id == user.id,
      CyclePrediction.relationship_id == user.active_relationship_id,
    )
  return CyclePrediction.user_id == user.id


love_notes_agent = LoveNotesAgentService()
