# Later, load the index
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.agent.workflow import FunctionAgent
from llama_index.llms.openai import OpenAI
import asyncio
import os
from llama_index.llms.openai_like import OpenAILike
from llama_index.embeddings.openai_like import OpenAILikeEmbedding
from llama_index.core import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from utils.api_connector import ModelConnector
from utils.doc_indexer import SemanticDocumentSplitter
from config.config import *

# Settings.embed_model = OpenAILikeEmbedding(
#     model_name="qwen3-32b-fp8",
#     api_base="http://v2.open.venus.oa.com/llmproxy",
#     api_key="lqySgeYWKMK3kmoaBJQ9ClRG@2745",
#     embed_batch_size=10,
# )
Settings.embed_model = HuggingFaceEmbedding(
    model_name="BAAI/bge-small-en-v1.5"
)

llm = OpenAILike(
    model=ModelConfig.NAME,
    api_base=ModelConfig.API_BASE,
    api_key=ModelConfig.TOKEN,
    is_chat_model=True,
    is_function_calling_model=True,
)
Settings.llm = llm
# Path to storage directory




class AgentConnector(ModelConnector):
    """
    AgentConnector class to handle LLM requests and responses.
    This class is used to interact with the LLM and perform tasks like searching documents.
    """
    def __init__(self, tools=None,
                 system_prompt="You are an excellent testing engineer."):
        self.llm = llm

        # Create an enhanced workflow with both tools
        if not tools:
            tools = []
        self.agent = FunctionAgent(
            tools=tools,
            llm=llm,
            system_prompt=system_prompt
        )
        
    async def run(self, prompt: str) -> str:
        """
        Run the agent with the given prompt and return the response.
        """
        if not prompt:
            raise ValueError("Prompt cannot be empty.")
        
        # Use the agent to run the prompt
        response = await self.agent.run(prompt)
        return str(response)

    def send_request(self, prompt, verbose=False) -> str:
        """
        Send a request to the LLM and return the response.
        """
        # Use the agent to run the prompt
        response = asyncio.run(self.run(prompt))
        if not response:
            if verbose:
                print("No response received from the LLM.")
            return None
        if verbose:
            print("Response from LLM:")
            print(response)
        return str(response)

# Run the agent
if __name__ == "__main__":
    agent = AgentConnector()
    print(agent.send_request("What is the purpose of this project?"))
    
    