from langchain_openai import OpenAIEmbeddings
from langchain_chroma import Chroma
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
import rss_config

def query_news(question):
    api_key=rss_config.openai_api_key
    embeddings = OpenAIEmbeddings(base_url=rss_config.openai_api_url, model=rss_config.openai_embedding_model, api_key=api_key, dimensions=1536)

    vector_store = Chroma(
        collection_name=rss_config.chromadb_name,
        embedding_function=embeddings,
        persist_directory=rss_config.chromadb_path,  # Where to save data locally, remove if not necessary
    )

    retriever = vector_store.as_retriever(
        search_type="similarity",
        search_kwargs={"k": 5},
    )

    llm = ChatOpenAI(model=rss_config.openai_chat_model, base_url=rss_config.openai_api_url, api_key=api_key)
    message = """
    Answer this question using the provided context only.
    
    {question}
    
    Context:
    {context}
    """

    prompt = ChatPromptTemplate.from_messages([("human", message)])
    rag_chain = {"context": retriever, "question": RunnablePassthrough()} | prompt | llm
    response = rag_chain.invoke(question)

    print(response.content)
    return response.content