File size: 2,177 Bytes
82ccb03
 
 
 
 
 
 
 
 
971c1eb
218536f
 
 
82ccb03
218536f
 
 
 
 
 
 
 
 
 
 
82ccb03
218536f
 
82ccb03
218536f
82ccb03
 
 
 
971c1eb
 
82ccb03
 
971c1eb
82ccb03
971c1eb
82ccb03
971c1eb
82ccb03
 
971c1eb
 
82ccb03
 
218536f
 
82ccb03
218536f
 
 
82ccb03
 
971c1eb
82ccb03
 
971c1eb
82ccb03
218536f
 
 
 
 
 
 
971c1eb
82ccb03
 
971c1eb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from doctest import debug

import yaml
from smolagents import (
    load_tool,
    CodeAgent,
    HfApiModel,
    VisitWebpageTool,
)

from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor

from openinference.instrumentation.smolagents import SmolagentsInstrumentor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor

from gradio_ui import GradioUI

endpoint = "http://0.0.0.0:6006/v1/traces"
trace_provider = TracerProvider()
trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint)))

SmolagentsInstrumentor().instrument(tracer_provider=trace_provider)

model_id='Qwen/Qwen2.5-Coder-32B-Instruct'
# model_id='meta-llama/Llama-3.3-70B-Instruct'
model = HfApiModel(
max_tokens=200,
temperature=0.5,
model_id=model_id,
custom_role_conversions=None,
)


# Import tool from Hub
image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True)

model = HfApiModel(model_id)

visitWebpageTool = VisitWebpageTool()

with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)


examples = [
    [{"text": "what are AI agents ?"}],
    [{"text": "how are tools used and how do they fit in ?"}],
    [{"text": "What do you know about tokenization"}],
    [{"text": "What do you know about ReAct ?"}],
    [{"text": "What are special tokens ?"}],
    [{"text": "What are actions ?"}],
    [{"text": "What is the role of an LLM ?"}],
    [{"text": "What are some of the topics covered in the course ?"}],
]

# Initialize the agent with the image generation tool
agent = CodeAgent(tools=[visitWebpageTool, image_generation_tool], model=model, prompt_templates=prompt_templates)

gr = GradioUI(agent).launch()
# Loop over each example and call agent.run() with the text
# for prompt in prompts:
#     text = prompt[0]["text"]
#     response = agent.run(text)
#     print(f"Input: {text}\nResponse: {response}\n")



# messages = gr.State([])
# messages.append(gr.ChatMessage(role="assistant", content="testing"))