from typing import List, Optional
from langchain.chains import create_history_aware_retriever, create_retrieval_chain, LLMChain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.chat_history import BaseChatMessageHistory
from app.core.factories import create_llm_instance, get_tools
from app.core.prompts import (
    agent_prompt,
    rephrase_question_prompt,
    get_rag_answer_prompt,
    rag_answer_prompt,
    SIMPLE_CHAT_PROMPT
)
from app.core.vector_store import get_vector_store

def get_rag_chain(style:Optional[str]=None):
    vector_store = get_vector_store()
    retriever = vector_store.as_retriever()
    llm = create_llm_instance()
    answer_prompt = get_rag_answer_prompt(style)

    history_aware_retriever = create_history_aware_retriever(
        llm, retriever, rephrase_question_prompt
    )
    question_answer_chain = create_stuff_documents_chain(llm, get_rag_answer_prompt(style))
    rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
    return rag_chain

def get_conversation_chain(
    tool_names: List[str],
    chat_history_backend: Optional[BaseChatMessageHistory] = None,
    memory_window_size: int = 5,
    style: Optional[str] = None
):
    llm = create_llm_instance()
    tools = get_tools(tool_names)

    memory = ConversationBufferWindowMemory(
        k=memory_window_size,
        memory_key="chat_history",
        chat_memory=chat_history_backend,
        return_messages=True,
    )

    if tools:
        print("--- Creating Agent chain with tools ---")
        prompt = agent_prompt
        agent = create_tool_calling_agent(llm, tools, prompt)
        return AgentExecutor(
            agent=agent,
            tools=tools,
            memory=memory,
            verbose=True
        )
    else:
        print("--- No tools provided, creating simple LLM chain ---")
        prompt = SIMPLE_CHAT_PROMPT
        return LLMChain(
            llm=llm,
            prompt=prompt,
            memory=memory,
            verbose=True
        )