import os
from pathlib import Path
from typing import List, Optional

from llama_index.core import Settings
from llama_index.core import (
    SimpleDirectoryReader,
    VectorStoreIndex,
    StorageContext,
    load_index_from_storage,
)
from llama_index.core import SummaryIndex
from llama_index.core.agent import AgentRunner
from llama_index.core.agent import FunctionCallingAgentWorker
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.objects import ObjectIndex
from llama_index.core.tools import FunctionTool
from llama_index.core.tools import QueryEngineTool
from llama_index.core.vector_stores import MetadataFilters, FilterCondition
from llama_index.embeddings.ollama import OllamaEmbedding
from llama_index.llms.ollama import Ollama

llm = Ollama(model="qwen2.5:7b-instruct-q4_0", request_timeout=120.0)

Settings.llm = llm
Settings.embed_model = OllamaEmbedding(
    model_name="bge-m3",
    base_url="http://127.0.0.1:11434",
)


def get_doc_tools(
        file_path: str,
        name: str,
) -> str:
    """Get vector query and summary query tools from a document."""
    storage_path = f"./storage/{name}"
    if os.path.exists(storage_path):
        print(f"Loading index from {storage_path}")
        vector_storage_context = StorageContext.from_defaults(
            persist_dir=f"{storage_path}/vector"
        )
        summary_storage_context = StorageContext.from_defaults(
            persist_dir=f"{storage_path}/summary"
        )
        vector_index = load_index_from_storage(vector_storage_context)
        summary_index = load_index_from_storage(summary_storage_context)
    else:
        print(f"Creating index at {storage_path}")
        # load documents
        documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
        splitter = SentenceSplitter(chunk_size=1024)
        nodes = splitter.get_nodes_from_documents(documents)
        vector_index = VectorStoreIndex(nodes)
        vector_index.storage_context.persist(persist_dir=f"{storage_path}/vector")
        summary_index = SummaryIndex(nodes)
        summary_index.storage_context.persist(persist_dir=f"{storage_path}/summary")

    def vector_query(
            query: str,
            page_numbers: Optional[List[str]] = None
    ) -> str:
        """Use to answer questions over a given paper.

        Useful if you have specific questions over the paper.
        Always leave page_numbers as None UNLESS there is a specific page you want to search for.

        Args:
            query (str): the string query to be embedded.
            page_numbers (Optional[List[str]]): Filter by set of pages. Leave as NONE
                if we want to perform a vector search
                over all pages. Otherwise, filter by the set of specified pages.

        """

        page_numbers = page_numbers or []
        metadata_dicts = [
            {"key": "page_label", "value": p} for p in page_numbers
        ]

        query_engine = vector_index.as_query_engine(
            similarity_top_k=2,
            filters=MetadataFilters.from_dicts(
                metadata_dicts,
                condition=FilterCondition.OR
            )
        )
        response = query_engine.query(query)
        return response

    vector_query_tool = FunctionTool.from_defaults(
        name=f"vector_tool_{name}",
        fn=vector_query
    )

    summary_query_engine = summary_index.as_query_engine(
        response_mode="tree_summarize",
        use_async=True,
    )
    summary_tool = QueryEngineTool.from_defaults(
        name=f"summary_tool_{name}",
        query_engine=summary_query_engine,
        description=(
            f"Useful for summarization questions related to {name}"
        ),
    )

    return vector_query_tool, summary_tool


papers = [
    "../examples/pdf/llm/DeepSeek_V3.pdf",
    "../examples/pdf/llm/Llama3.pdf",
    "../examples/pdf/llm/Qwen2.5.pdf",
]

paper_to_tools_dict = {}
for paper in papers:
    print(f"Getting tools for paper: {paper}")
    vector_tool, summary_tool = get_doc_tools(paper, Path(paper).stem)
    # paper_to_tools_dict[paper] = [vector_tool, summary_tool]
    paper_to_tools_dict[paper] = [vector_tool]


all_tools = [t for paper in papers for t in paper_to_tools_dict[paper]]

# define an "object" index and retriever over these tools

obj_index = ObjectIndex.from_objects(
    all_tools,
    index_cls=VectorStoreIndex,
)
obj_retriever = obj_index.as_retriever(similarity_top_k=3)
# tools = obj_retriever.retrieve(
#     "deepseek v3 使用什么GPU进行训练的"
# )
# print(tools)

agent_worker = FunctionCallingAgentWorker.from_tools(
    tool_retriever=obj_retriever,
    llm=llm,
    system_prompt=""" \
    You are an agent designed to answer queries over a set of given papers.
    Please always use the tools provided to answer a question. Do not rely on prior knowledge.\

    """,
    verbose=True
)
agent = AgentRunner(agent_worker)
response = agent.query(
    "Tell me about the evaluation dataset used in DeepSeek V3"
)
print(str(response))


response = agent.query(
    "Tell me about the evaluation dataset used in DeepSeek V3, Qwen 2.5 and Llama 3"
    # "Among the evaluation dataset used in DeepSeek V3, which ones are also used in Qwen 2.5 and Llama 3?"
)
print(str(response))

