import os
import time
from typing import Dict, Iterable, List, Union

from openai import OpenAI

from .common import GenerationResult

__all__ = ["instruct", "instruct_with_profile"]


client = OpenAI(
    api_key=os.environ["OPENAI_API_KEY"],
    base_url=os.environ.get("OPENAI_API_BASE", None),
)


def instruct(
    user_input,
    system_prompt="",
    prefix="",
    historys: List[Dict[str, str]] = [],
    streaming=False,
    failing_default="",
) -> Union[str, Iterable[str]]:
    if system_prompt:
        messages = [dict(role="system", content=system_prompt)]
    else:
        messages = []
    messages.extend(historys)
    messages.append(dict(role="user", content=user_input))

    if prefix:
        messages.append(dict(role="assistant", content=prefix))
    try:
        if streaming:
            response = client.chat.completions.create(
                model=os.environ.get("OPENAI_MODEL_ID"),
                messages=messages,
                temperature=0.0,
                stream=True,
            )
            return (chunk.choices[0].delta.content or "" for chunk in response)

        else:
            response = client.chat.completions.create(
                model=os.envirom.get("OPENAI_MODEL_ID"),
                messages=messages,
                temperature=0.0,
            )
            return response.choices[0].message["content"]
    except Exception:
        return failing_default


def instruct_with_profile(
    user_input,
    system_prompt="",
    prefix="",
    historys: List[Dict[str, str]] = [],
    failing_default="",
) -> GenerationResult:
    if system_prompt:
        messages = [dict(role="system", content=system_prompt)]
    else:
        messages = []
    messages.extend(historys)
    messages.append(dict(role="user", content=user_input))

    if prefix:
        messages.append(dict(role="assistant", content=prefix))

    start_time = time.time()
    response = client.chat.completions.create(
        model=os.environ.get("OPENAI_MODEL_ID"),
        messages=messages,
        temperature=0.0,
        stream=True,
    )
    prefiling_time = 0.0
    raw_text = ""
    try:
        for i, message in enumerate(response):
            if i == 0:
                prefiling_time = time.time() - start_time
            raw_text += message.choices[0].delta.content or ""

        token_usage = message.usage
    except Exception:
        raw_text = failing_default
        token_usage = {
            "prompt_tokens": -1,
            "completion_tokens": -1,
        }
    generation_time = time.time() - start_time + prefiling_time
    try:
        prefilling_tokens = token_usage.prompt_tokens
        generation_tokens = token_usage.completion_tokens
    except:
        prefilling_tokens = -1
        generation_tokens = -1

    return GenerationResult(
        prefilling_time=prefiling_time,
        generation_time=generation_time,
        time_per_token_generation=generation_time / generation_tokens,
        total_tokens_generation=generation_tokens,
        time_per_token_prefilling=prefiling_time / prefilling_tokens,
        total_tokens_prefilling=prefilling_tokens,
        llm_response=raw_text,
    )
