File size: 3,277 Bytes
cf51709
 
 
968a67a
cf51709
968a67a
9ff7774
ca6fbc3
cf51709
 
 
 
968a67a
cf51709
 
 
ae14774
968a67a
 
 
 
 
 
 
cf51709
 
 
 
 
 
968a67a
 
 
 
 
 
 
 
 
 
 
cf51709
 
 
 
 
 
 
 
ae14774
968a67a
cf51709
 
ae14774
 
 
 
968a67a
 
cf51709
 
 
 
 
 
 
968a67a
 
 
 
 
cf51709
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0d91eab
cf51709
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
from smolagents import (
    CodeAgent,
    LiteLLMModel,
    Tool,
)
from typing import Callable


class MyAgent:
    def __init__(
        self,
        provider: str = "litellm",
        model_id: str = "gemini/gemini-2.0-flash-lite",
        api_base: str | None = None,
        api_key: str | None = None,
        planning_interval: int = 3,
        num_ctx: int = 8192,
        tools: list[Tool] = [],
        add_base_tools: bool = True,
        temperature: float = 0.2,
        additional_authorized_imports: list[str] = [],
        step_callbacks: list[Callable] = [],
        max_steps: int = 20,
        verbosity_level: int = 2,
    ):
        """
        Initializes the agent depending on the provider and model ID.
        Args:
            provider (str): The provider of the model (e.g., "litellm", "huggingface").
            model_id (str): The ID of the model to be used.
            tools (list[Tool]): The tools to be used by the agent.
            api_base (str | None): The base URL of the API.
            api_key (str | None): The API key.
            planning_interval (int): The interval for planning.
            num_ctx (int): The number of context tokens.
            add_base_tools (bool): Whether to add base tools.
            temperature (float): The temperature for the model.
            additional_authorized_imports (list[str]): The additional authorized imports.
            step_callbacks (list[Callable]): The step callbacks.
            max_steps (int): The maximum steps.
            verbosity_level (int): The verbosity level.
        Returns:
            None: None
        """
        self.provider = provider
        self.model_id = model_id
        self.api_base = api_base
        self.api_key = api_key
        self.planning_interval = planning_interval
        self.num_ctx = num_ctx
        self.temperature = temperature

        model = LiteLLMModel(
            model_id=self.model_id,
            api_base=self.api_base,
            api_key=self.api_key,
            num_ctx=self.num_ctx,
            add_base_tools=add_base_tools,
            temperature=self.temperature,
        )

        # Initialize the agent with the specified provider and model ID
        if provider == "litellm":
            self.agent = CodeAgent(
                model=model,
                tools=tools,
                planning_interval=self.planning_interval,
                additional_authorized_imports=additional_authorized_imports,
                step_callbacks=step_callbacks,
                max_steps=max_steps,
                verbosity_level=verbosity_level,
            )
        else:
            raise ValueError(f"Unsupported provider: {provider}")

        print(f"Agent initialized with provider: {provider}, model ID: {model_id}")

    def __call__(self, question: str) -> str:
        """
        Given a question, run the agent and return the answer.

        Args:
            question (str): The question to be answered.
        Returns:
            str: The answer to the question.
        """

        final_answer = self.agent.run(question)
        print(f"Agent received question (last 50 chars): {question[-50:]}...")
        print(f"Agent returning fixed answer: {final_answer}")
        return final_answer