from smolagents import CodeAgent, HfApiModel,load_tool,tool
import yaml
from tools.final_answer import FinalAnswerTool
from local_model import LocalApiModel

from Gradio_UI import GradioUI
from customfunctions import current_time# neccessary. for scanning the custom functions

from huggingface_hub import login

USE_HF_API = False
USE_LOCAL_MODEL_API = True


if not USE_HF_API and not USE_LOCAL_MODEL_API:
    raise Exception("What api should use?")
if USE_HF_API:
    login("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")

final_answer = FinalAnswerTool()

# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' 

model = None
if USE_HF_API:
    model = HfApiModel(
    max_tokens=2096,
    temperature=0.5,
        token="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",
    model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
    custom_role_conversions=None,
    )
elif USE_LOCAL_MODEL_API:
    model = LocalApiModel()


# Import tool from Hub
if USE_HF_API:
    image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)

with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)
    
agent = CodeAgent(
    model=model,
    tools=[final_answer,current_time], ## add your tools here (don't remove final answer)
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None,
    prompt_templates=prompt_templates,
    additional_authorized_imports=["requests"]
)


GradioUI(agent).launch()