from doctest import debug import yaml from smolagents import ( load_tool, CodeAgent, HfApiModel, GradioUI, VisitWebpageTool, ) # from opentelemetry import trace # from opentelemetry.sdk.trace import TracerProvider # from opentelemetry.sdk.trace.export import BatchSpanProcessor # # from openinference.instrumentation.smolagents import SmolagentsInstrumentor # from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter # from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor # # endpoint = "http://0.0.0.0:6006/v1/traces" # trace_provider = TracerProvider() # trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) # # SmolagentsInstrumentor().instrument(tracer_provider=trace_provider) model_id='Qwen/Qwen2.5-Coder-32B-Instruct' model = HfApiModel( max_tokens=2096, temperature=0.5, model_id=model_id, custom_role_conversions=None, ) # Import tool from Hub image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True) model = HfApiModel(model_id) visitWebpageTool = VisitWebpageTool() with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) examples = [ [{"text": "what are AI agents ?"}], [{"text": "What do you know about ReAct ?"}], [{"text": "What are some of the topics covered in the course ?"}], ] # Initialize the agent with the image generation tool agent = CodeAgent(tools=[visitWebpageTool, image_generation_tool], model=model, prompt_templates=prompt_templates) gr = GradioUI(agent).launch() # messages = gr.State([]) # messages.append(gr.ChatMessage(role="assistant", content="testing"))