import os
import requests
from typing import (
    TYPE_CHECKING,
    Any,
    Callable,
    Optional,
    Union,
    cast,
)
import logging

logger = logging.getLogger(__name__)

ENV_GPT_API = "AI_SERVER_BASE_URL"
ENV_GPT_KEY = "AI_API_KEY"
ENV_GPT_MODEL = "AI_MODEL"
API_PATH = "/chat/completions"
MAX_TOKENS = 3000


def parse_llm_response(response_json) -> str:
    try:
        return response_json["choices"][0]["message"]["content"]
    except Exception as err:
        print(f"parse_llm_response error:\n{err}")
        return None


class AiApi:
    def __init__(self, system_message: str):
        self.system_message = system_message
        self.api_url = os.getenv(ENV_GPT_API)
        self.api_key = os.getenv(ENV_GPT_KEY)

    def call(self, prompt: str, stop=None) -> str:
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json",
        }
        data = {
            "messages": [
                {"role": "system", "content": self.system_message},
                {"role": "user", "content": prompt},
            ],
            "max_tokens": MAX_TOKENS,
            "model": os.getenv(ENV_GPT_MODEL),
        }
        response = requests.post(
            f"{self.api_url}{API_PATH }", json=data, headers=headers
        )
        response.raise_for_status()
        result = parse_llm_response(response.json())
        logger.info(f"call result:\n{result}")
        return result
