from smolagents import Model, ChatMessage, Tool
from typing import Optional, Dict, List, Union
import requests
import json
from openai import OpenAI

class LocalApiModel(Model):
    local_model_url = "http://localhost:11434/api/generate"

    def __init__(
        self,
        provider: Optional[str] = None,
        use_ollama = True,
        custom_role_conversions: Optional[Dict[str, str]] = None,
        **kwargs,
    ):
        super().__init__(**kwargs)
        self.use_ollama = use_ollama
        self.provider = provider
        self.custom_role_conversions = custom_role_conversions
        self.have_learned = False
        self.learned_context = None

    def __call__(
        self,
        messages: List[Dict[str, str]],
        stop_sequences: Optional[List[str]] = None,
        grammar: Optional[str] = None,
        tools_to_call_from: Optional[List[Tool]] = None,
        **kwargs,
    ) -> ChatMessage:
        if self.use_ollama:
            response = self.__request_ollama(messages)
            self.last_input_token_count = response.usage.prompt_tokens
            self.last_output_token_count = response.usage.completion_tokens
            message = ChatMessage.from_hf_api(response.choices[0].message, raw=response)
            if tools_to_call_from is not None:
                return parse_tool_args_if_needed(message)
            return message
        else:
            if not self.have_learned:
                learned_json = self.__request_local_model(messages[0]["content"][0]["text"])
                self.have_learned = True
                self.learned_context = learned_json["context"]
            response_json = self.__request_local_model(messages[-1]["content"][0]["text"])
            self.last_input_token_count = response_json["prompt_eval_count"]
            self.last_output_token_count = response_json["eval_count"]
            if self.last_input_token_count is None:
                self.last_input_token_count = 0
            if self.last_output_token_count is None:
                self.last_output_token_count = 0
            repack_message = PackMessage(response_json["response"], "user")
            message = ChatMessage.from_hf_api(repack_message, raw=response_json)
            return message

    def __request_local_model(self, question_text):
        params = {
        'model':'llama3',
        'prompt': question_text,
        'stream':False}
        if self.have_learned:
            params["context"] = self.learned_context
        rsp = requests.post(LocalApiModel.local_model_url, json=params)
        return json.loads(rsp.text)
    
    def __request_ollama(self, messages):
        ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')
        model_name = "llama3"
        response = ollama.chat.completions.create(model=model_name, messages=messages)
        return response
    
class PackMessage:
    def __init__(self, content, role):
        self.content = content
        self.role = role

def parse_json_if_needed(arguments: Union[str, dict]) -> Union[str, dict]:
    if isinstance(arguments, dict):
        return arguments
    else:
        try:
            return json.loads(arguments)
        except Exception:
            return arguments

def parse_tool_args_if_needed(message: ChatMessage) -> ChatMessage:
    for tool_call in message.tool_calls:
        tool_call.function.arguments = parse_json_if_needed(tool_call.function.arguments)
    return message