import os
from typing import List, Literal
from dotenv import load_dotenv
import getpass
from langchain_openai import ChatOpenAI
from pydantic import SecretStr
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.vectorstores import VectorStore
from langchain_core.vectorstores import (
    VectorStore,
    InMemoryVectorStore,
)
from langchain_core.documents import Document
import json
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.tools import tool
from langchain.agents import create_agent

def load_langsmith_tracing(is_debugging: Literal["true", "false"]):
    if is_debugging:
        if not os.environ.get("LANGSMITH_API_KEY"):
            os.environ["LANGSMITH_API_KEY"] = getpass.getpass("Enter LANGSMITH_API_KEY: ")
        os.environ["LANGSMITH_TRACING"] = is_debugging

def load_openrouter_model() -> BaseChatModel:
    """Default use OPENROUTER model"""
    if not os.environ.get("OPENROUTER_API_KEY"):
        os.environ["OPENROUTER_API_KEY"] = getpass.getpass("Enter OPENROUTER_API_KEY: ")

    llm = ChatOpenAI(
        api_key=SecretStr(os.environ["OPENROUTER_API_KEY"]),
        base_url="https://openrouter.ai/api/v1",
        model="nvidia/nemotron-nano-9b-v2:free",
    )
    return llm

def load_hg_embedding_model() -> Embeddings:
    if not os.environ.get("TOKENIZERS_PARALLELISM"):
        os.environ["TOKENIZERS_PARALLELISM"] = "false"
    embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
    return embeddings

def get_vector_store(embedding_model: Embeddings) -> VectorStore:
    vector_store = InMemoryVectorStore(embedding_model)
    return vector_store

def load_and_chunk(grpc_json_data: str) -> List[Document]: 
    parsed = json.loads(grpc_json_data)
    doc = Document(
        page_content = parsed.get("content", ""),
        metadata = {"title": parsed.get("title", "")}
    )
    docs = [doc]
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
    all_splits = text_splitter.split_documents(docs)
    return all_splits

def run_rag_agent(user_query: str, post_content: str):
    if os.getenv("ENV", "dev") == "dev":
        load_dotenv()
    load_langsmith_tracing("true")
    llm = load_openrouter_model()
    embedding_model = load_hg_embedding_model()
    vector_store = get_vector_store(embedding_model)
    all_splits = load_and_chunk(post_content)
    _ = vector_store.add_documents(documents=all_splits)

    @tool(response_format="content_and_artifact")
    def retrieve_context(query: str):
        """Retrieve information to help answer a query."""
        retrieved_docs = vector_store.similarity_search(query, k=2)
        serialized = "\n\n".join(
            (f"Source: {doc.metadata}\nContent: {doc.page_content}")
            for doc in retrieved_docs
        )
        return serialized, retrieved_docs

    tools = [retrieve_context]
    prompt = (
        "You have access to a tool taht retrieves context from a blog post."
        "Use the tool to help answer user queries."
    )
    agent = create_agent(llm, tools, prompt=prompt)

    for event in agent.stream(
        {"messages":[{"role": "user", "content": user_query}]},
        stream_mode="values",
    ):
        yield event["messages"][-1].pretty_repr(html=False)
