import os
import time
from typing import Iterable, List, Union

from pydantic import BaseModel
from sparkai.llm.llm import ChatMessage, ChatSparkLLM

__all__ = ["spark", "spark_streaming", "instruct", "instruct_with_profile"]


if os.environ.get("SPARKAI_MODEL", None) == "v4":
    params = dict(
        spark_api_url=os.environ["SPARKAI_V4_URL"],
        spark_app_id=os.environ["SPARKAI_V4_APP_ID"],
        spark_api_key=os.environ["SPARKAI_V4_API_KEY"],
        spark_api_secret=os.environ["SPARKAI_V4_API_SECRET"],
        spark_llm_domain=os.environ["SPARKAI_V4_DOMAIN"],
    )
elif os.environ.get("SPARKAI_MODEL", None) == "v4pre":
    params = dict(
        spark_api_url=os.environ["SPARKAI_V4_PRE_URL"],
        spark_app_id=os.environ["SPARKAI_V4_APP_ID"],
        spark_api_key=os.environ["SPARKAI_V4_API_KEY"],
        spark_api_secret=os.environ["SPARKAI_V4_API_SECRET"],
        spark_llm_domain=os.environ["SPARKAI_V4_PRE_DOMAIN"],
    )
elif os.environ.get("SPARKAI_MODEL", None) == "pre":
    params = dict(
        spark_api_url=os.environ["SPARKAI_PRE_URL"],
        spark_app_id=os.environ["SPARKAI_APP_ID"],
        spark_api_key=os.environ["SPARKAI_API_KEY"],
        spark_api_secret=os.environ["SPARKAI_API_SECRET"],
        spark_llm_domain=os.environ["SPARKAI_PRE_DOMAIN"],
    )
elif os.environ.get("SPARKAI_MODEL", None) == "backup":
    params = dict(
        spark_api_url=os.environ["SPARKAI_BACKUP_URL"],
        spark_app_id=os.environ["SPARKAI_BACKUP_APP_ID"],
        spark_api_key=os.environ["SPARKAI_BACKUP_API_KEY"],
        spark_api_secret=os.environ["SPARKAI_BACKUP_API_SECRET"],
        spark_llm_domain=os.environ["SPARKAI_BACKUP_DOMAIN"],
    )
elif os.environ.get("SPARKAI_MODEL", None) == "xingchen":
    params = dict(
        spark_api_url=os.environ["SPARKAI_XINGCHEN_URL"],
        spark_app_id=os.environ["SPARKAI_XINGCHEN_APP_ID"],
        spark_api_key=os.environ["SPARKAI_XINGCHEN_API_KEY"],
        spark_api_secret=os.environ["SPARKAI_XINGCHEN_API_SECRET"],
        spark_llm_domain=os.environ["SPARKAI_XINGCHEN_DOMAIN"],
    )
else:
    params = dict(
        spark_api_url=os.environ["SPARKAI_URL"],
        spark_app_id=os.environ["SPARKAI_APP_ID"],
        spark_api_key=os.environ["SPARKAI_API_KEY"],
        spark_api_secret=os.environ["SPARKAI_API_SECRET"],
        spark_llm_domain=os.environ["SPARKAI_DOMAIN"],
    )


spark = ChatSparkLLM(
    **params,
    request_timeout=60,
    streaming=False,
)

spark_streaming = ChatSparkLLM(
    **params,
    request_timeout=60,
    streaming=True,
)


def instruct(
    user_input,
    system_prompt="",
    prefix="",
    historys: List[ChatMessage] = [],
    streaming=False,
    failing_default="",
    temperature=0.0,
) -> Union[str, Iterable[str]]:
    if system_prompt:
        messages = [ChatMessage(role="system", content=system_prompt)]
    else:
        messages = []
    messages.extend(historys)
    messages.append(ChatMessage(role="user", content=user_input))

    if prefix:
        messages.append(ChatMessage(role="assistant", content=prefix))
    try:
        if not streaming:
            response = spark.generate([messages], temperature=temperature)
            raw_text = response.generations[0][0].message.content
            return raw_text
        else:
            response = spark_streaming.stream(messages, temperature=temperature)
            raw_text = ""
            for _, message in enumerate(response):
                raw_text += message.content
            return raw_text
    except Exception:
        return failing_default


class GenerationResult(BaseModel):
    prefilling_time: float = 0.0
    generation_time: float = 0.0
    time_per_token_generation: float = 0.0
    total_tokens_generation: int = 0
    time_per_token_prefilling: float = 0.0
    total_tokens_prefilling: int = 0
    llm_response: str = ""


def instruct_with_profile(
    user_input,
    system_prompt="",
    prefix="",
    historys: List[ChatMessage] = [],
    failing_default="",
    temperature=0.0,
) -> GenerationResult:
    if system_prompt:
        messages = [ChatMessage(role="system", content=system_prompt)]
    else:
        messages = []
    messages.extend(historys)
    messages.append(ChatMessage(role="user", content=user_input))

    if prefix:
        messages.append(ChatMessage(role="assistant", content=prefix))

    start_time = time.time()
    response = spark_streaming.stream(messages, temperature=temperature)
    prefiling_time = 0.0
    raw_text = ""
    try:
        for i, message in enumerate(response):
            if i == 0:
                prefiling_time = time.time() - start_time
            raw_text += message.content

        token_usage = message.additional_kwargs["token_usage"]
    except Exception:
        raw_text = failing_default
        token_usage = {
            "prompt_tokens": -1,
            "completion_tokens": -1,
        }
    generation_time = time.time() - start_time + prefiling_time
    prefilling_tokens = token_usage["prompt_tokens"]
    generation_tokens = token_usage["completion_tokens"]
    return GenerationResult(
        prefilling_time=prefiling_time,
        generation_time=generation_time,
        time_per_token_generation=generation_time / generation_tokens,
        total_tokens_generation=generation_tokens,
        time_per_token_prefilling=prefiling_time / prefilling_tokens,
        total_tokens_prefilling=prefilling_tokens,
        llm_response=raw_text,
    )
