from inference.models import api_model
import requests
import json

class llama_factory(api_model):
    def __init__(self, workers=10, **kwargs):
        super().__init__(workers)
        host = kwargs.get("ip", "")
        model_id = kwargs.get("model_id", "gpt-4")
        self.workers = workers
        self.model_name = model_id
        self.temperature = 0.7
        self.url = f"http://{host}:8000/v1/chat/completions"

    def get_api_result(self, sample):
        question = sample["question"]
        temperature = sample.get("temperature", self.temperature)

        payload = {
            "model": "string",
            "messages": [
                {
                    "role": "user",
                    "content": question}
            ],
            # "repetition_penalty": repetition_penalty,
            "temperature": temperature,
            # "do_sample": do_sample,
            "n": 1,
            "max_tokens": 2048,
            "stream": False,
            "callback": False,
            "callback_url": "string"
        }

        headers = {"Content-Type": "application/json"}
        res = None
        try:
            response = requests.request("POST", self.url, json=payload, headers=headers, timeout=120)
            res = response.text
            res = json.loads(res)["choices"][0]["message"]["content"]
        except Exception as e:
            if res is None:
                res = str(e)

        return res
