from langchain_deepseek import ChatDeepSeek
from langchain_core.output_parsers import StrOutputParser
from langchain.prompts import ChatPromptTemplate
from dotenv import load_dotenv
from pathlib import Path
import os, time
from typing import Optional, Dict, Any, List, Union

from .prompts import (
    create_base_chat_prompt,
    create_structured_output_prompt,
    create_classification_prompt,
)
from .tools import (
    validate_input, clean_text, parse_query_params,  # helper stubs in utils.helpers
    get_current_time, get_weather_info, search_knowledge_base,
)
from config.settings import settings
from utils.memory import ConversationMemory, ContextManager
from utils.helpers import setup_logging, ResponseFormatter, extract_json_from_text

class SmartAssistant:
    """LLM‑powered chat assistant"""
    def __init__(self, model: Optional[str] = None, temperature: float = 0.7):
        # load .env once
        load_dotenv(Path(__file__).resolve().parents[1]/'.env')
        self.logger = setup_logging()
        self.llm = ChatDeepSeek(
            model=model or settings.default_model,
            temperature=temperature,
            api_key=settings.siliconflow_api_key,
            base_url=settings.siliconflow_api_base,
        )
        self.memory = ConversationMemory()
        self.context = ContextManager()
        self.output_parser = StrOutputParser()
        self.chat_chain = create_base_chat_prompt() | self.llm | self.output_parser

    # **仅保留最基本 chat 功能供 recommender 服务使用**
    def chat(self, text: str) -> str:
        if not validate_input(text):
            return "输入不合法"
        result = self.chat_chain.invoke({"user_input": clean_text(text), "chat_history": self.memory.get_formatted_history()})
        self.memory.add_message("user", text)
        self.memory.add_message("assistant", result)
        return result