# # Copyright 2024 the LlamaFactory team.
# # Copyright (c) 2024 Huawei Technologies Co., Ltd.
# #
# # This code is inspired by the LLaMA-Factory.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# #     http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.


import os
import asyncio
import concurrent.futures
from threading import Thread
from typing import Any, AsyncGenerator, List, Dict, Callable
import torch
from transformers import (
    PreTrainedModel,
    PreTrainedTokenizer,
    TextIteratorStreamer,
    GenerationConfig,
    LogitsProcessorList,
    InfNanRemoveLogitsProcessor,
)

from openmind.flow.chat.base_engine import BaseEngine
from openmind.flow.datasets.template import Template
from openmind.flow.model import get_model, get_tokenizer
from openmind.flow.datasets import get_template
from openmind.utils import logging

logger = logging.get_logger()
logging.set_verbosity_info()


def get_logits_processor() -> "LogitsProcessorList":
    r"""
    Gets logits processor that removes NaN and Inf logits.
    """
    logits_processor = LogitsProcessorList()
    logits_processor.append(InfNanRemoveLogitsProcessor())

    return logits_processor


class HfEngine(BaseEngine):
    def __init__(self, args) -> None:

        self.args = args
        self.tokenizer = get_tokenizer()
        self.tokenizer.padding_side = "left"
        self.template = get_template()
        self.model = get_model()

        try:
            asyncio.get_event_loop()
        except RuntimeError:
            logger.warning("No current event loop found, creating a new one.")
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

        self.semaphore = asyncio.Semaphore(int(os.environ.get("OPENMIND_MAX_CONCURRENT", "1")))

    def process_generation_args(
        self,
        model: "PreTrainedModel",
        tokenizer: "PreTrainedTokenizer",
        template: "Template",
        messages_context: List[Dict[str, str]],
    ) -> Dict[str, Any]:
        paired_messages_context = messages_context + [{"role": "assistant", "content": ""}]
        _, prompt_ids = template.encode_oneturn(tokenizer=tokenizer, messages=paired_messages_context, system="")

        inputs = torch.tensor([prompt_ids[0]], device=model.device)
        attention_mask = torch.ones_like(inputs, dtype=torch.bool)

        generation_args = {
            "do_sample": self.args.do_sample,
            "temperature": self.args.temperature,
            "top_p": self.args.top_p,
            "top_k": self.args.top_k,
            "num_beams": self.args.num_beams,
            "max_length": self.args.max_length,
            "max_new_tokens": self.args.max_new_tokens,
            "repetition_penalty": self.args.repetition_penalty,
            "length_penalty": self.args.length_penalty,
            "num_return_sequences": 1,
            "eos_token_id": [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids,
            "pad_token_id": tokenizer.pad_token_id,
            "use_cache": self.args.use_cache,
        }

        if not self.args.temperature:
            generation_args["do_sample"] = False

        if not self.args.do_sample:
            generation_args.pop("temperature")
            generation_args.pop("top_p")

        gen_kwargs = dict(
            inputs=inputs,
            attention_mask=attention_mask,
            generation_config=GenerationConfig(**generation_args),
            logits_processor=get_logits_processor(),
        )

        return gen_kwargs

    async def stream_chat(self, messages_context: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
        loop = asyncio.get_running_loop()

        input_args = (
            self.args,
            self.model,
            self.tokenizer,
            self.template,
            messages_context,
        )

        async with self.semaphore:
            with concurrent.futures.ThreadPoolExecutor() as pool:
                stream = self._stream_chat(*input_args)
                while True:
                    try:
                        yield await loop.run_in_executor(pool, stream)
                    except StopAsyncIteration:
                        break

    def _stream_chat(
        self,
        args,
        model: "PreTrainedModel",
        tokenizer: "PreTrainedTokenizer",
        template: "Template",
        messages_context: List[Dict[str, str]],
    ) -> Callable[[], str]:
        gen_kwargs = self.process_generation_args(model, tokenizer, template, messages_context)
        streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
        gen_kwargs["streamer"] = streamer

        thread = Thread(target=model.generate, kwargs=gen_kwargs, daemon=True)
        thread.start()

        def stream():
            try:
                return streamer.__next__()
            except StopIteration:
                raise StopAsyncIteration()

        return stream
