from llama_cpp import Llama
from llama_cpp_agent.providers import LlamaCppPythonProvider
from llama_cpp_agent import LlamaCppAgent

# Create an instance of the Llama class and load the model
llama_model = Llama("/home/spike/code/deepseek-r1-distill-qwen-7b-q4_k_m.gguf", n_batch=1024, n_threads=10, n_gpu_layers=40)
provider = LlamaCppPythonProvider(llama_model
)
# Create the provider by passing the Llama class instance to the LlamaCppPythonProvider class
agent = LlamaCppAgent(provider)
#agent_output = agent.get_chat_response("do not output thinking process, give me formula of minumum curvature")
agent_output = agent.get_chat_response("give me formula of minumum curvature")
print(f"Agent: {agent_output.strip()}")
