import onnxruntime
import numpy as np
import torch
from transformers import Qwen2TokenizerFast,PretrainedConfig
from typing import List
import os
import logging
import gc

from .base_interface import BaseLLMInterface

from ChatApp.app_modules.utils import (
    is_stop_word_or_prefix,
    convert_to_markdown,
    shared_state,
)


class Tokenizer:
    def __init__(self, json_path: str):
        # reload tokenizer
        assert os.path.isfile(json_path), json_path
        self.fast_tokenizer = Qwen2TokenizerFast(tokenizer_file=json_path)
        # self.fast_tokenizer.padding_side = "left"
        
        # BOS / EOS token IDs
        self.bos_id: int = self.fast_tokenizer.bos_token_id
        # self.bos_id: int = 151643
        self.eos_id: int = self.fast_tokenizer.eos_token_id
        # self.pad_id: int = self.fast_tokenizer.eos_token_id
        # self.fast_tokenizer.add_special_tokens({'pad_token': '[PAD]'})


    def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
        assert type(s) is str
        t = self.fast_tokenizer.encode(s)
        if bos:
            t = [self.bos_id] + t
        if eos:
            t = t + [self.eos_id]
        return t

    def decode(self, t: List[int]) -> str:
        return self.fast_tokenizer.decode(t)
    
    def get_tokenizer(self) -> Qwen2TokenizerFast:
        return self.fast_tokenizer
    
    def get_input_para(self, text):
        # encodings_dict = self.fast_tokenizer([text],padding=True)
        encodings_dict = self.fast_tokenizer([text])
        input_ids = torch.tensor(encodings_dict["input_ids"], dtype=torch.int64)
        attention_mask = torch.tensor(encodings_dict["attention_mask"], dtype=torch.int64)
        position_ids = attention_mask.long().cumsum(-1) - 1
        position_ids.masked_fill_(position_ids < 0, 0)
        position_ids = position_ids.to(torch.int64)
        
        return input_ids, attention_mask, position_ids




class QwenOnnxInterface(BaseLLMInterface):
    def __init__(self, onnx_file="",  tokenizer_path="", config_path=""):
        super().__init__()

        self.onnx_file = onnx_file
        self.tokenizer_path = tokenizer_path
        self.config_path = config_path
        self.device = torch.device("cpu")
        self.total_count = 0
        self.past_key = []
        self.past_value = []
        

    def initialize(self):
        # Create the ONNX session
        logging.info(f"Creating ONNX session for [{self.onnx_file}]")
        options = onnxruntime.SessionOptions()
        self.llm_session = onnxruntime.InferenceSession(
            self.onnx_file,
            sess_options=options,
            providers=[
                "CPUExecutionProvider"
            ],
        )

        # Load the model config
        logging.info(f"Loading config from [{self.config_path}]")
        config = PretrainedConfig.from_json_file(self.config_path)

        # get the data type used by the model
        data_type_str = self.llm_session.get_inputs()[3].type
        if data_type_str == "tensor(float16)":
            self.data_type = np.float16
        elif data_type_str == "tensor(float32)":
            self.data_type = np.float32
        elif data_type_str == "tensor(float)":
            self.data_type = np.float32
        # elif data_type_str == "tensor(int64)":
        #     self.data_type = np.int64
        else:
            raise Exception(f"Unknown data type {data_type_str}")

        logging.info(f"Detected Data Type [{self.data_type}]")

        self.hidden_size = config.hidden_size
        self.max_seq_len = config.max_position_embeddings
        self.n_layers = config.num_hidden_layers
        self.n_heads = config.num_attention_heads

        # Initialize the tokenizer and produce the initial tokens.
        self.tokenizer = Tokenizer(json_path=self.tokenizer_path)


    def shutdown(self):
        pass

    def generate_prompt_with_history(self, text, history, tokenizer, max_length=2048):
        prompt = "[|Human|]Hey there I am a human that would like to have \
a conversation with you.\n[|AI|]Sure, I am happy to answer most questions\
\n[|Human|]Great, I insist that we take turns.\n[|AI|]I agree, we should\
 take turns.\n[|Human|]Great, can we also keep answers short\n[|AI|]Yes, \
short answers are usually best"

        history = ["\n[|Human|]{}\n[|AI|]{}".format(x[0], x[1]) for x in history]
        history.append("\n[|Human|]{}\n[|AI|]".format(text))
        history_text = ""
        flag = False
        for x in history[::-1]:
            # tokens = self.tokenizer.encode(text, bos=True, eos=False)
            if (
                len(
                    # self.tokenizer.encode(
                    #     prompt + history_text + x, bos=True, eos=False
                    # )
                    tokenizer.encode(
                        prompt + history_text + x, bos=True, eos=False
                    )
                )
                <= max_length
            ):
                history_text = x + history_text
                flag = True
            else:
                break
        if flag:
            # return prompt + history_text, torch.tensor(
            #     self.tokenizer.encode(prompt + history_text, bos=True, eos=False)
            # ).unsqueeze(0)
            return prompt + history_text
        else:
            return None
    

        

    def sample_logits(
        self,
        logits: np.ndarray,
        sampling_method: str = "greedy",
        sampling_value: float = None,
        temperature: float = 1.0,
    ) -> np.ndarray:
        if temperature == 0 or sampling_method == "greedy":
            next_token = np.argmax(logits, axis=-1).astype(np.int64)

        elif sampling_method == "top_k" or sampling_method == "top_p":
            assert sampling_value is not None

            # temperature, converting to probabilities and sorting are common to both top-k and top-p
            # convert logits to 32-bit float to avoid numerical issues with np.exp
            logits = logits.astype(np.float32)
            # Scale the logits by the temperature
            logits /= temperature
            # Convert logits to probabilities
            probs = np.exp(logits) / np.sum(np.exp(logits))
            
            sorted_probs = np.sort(probs)[:, ::-1]
            sorted_indices = np.argsort(probs)[:, ::-1]

            # find the index of interest for each of the methods.
            if sampling_method == "top_k":
                index_of_interest = int(sampling_value)
            elif sampling_method == "top_p":
                p = sampling_value
                cumulative_probs = np.cumsum(sorted_probs, axis=-1)
                # find the value of the first cumalitive probability that exceeds p
                # logging.info(f"cumulative_probs = {cumulative_probs}")
                for index_of_interest, cumulative_prob in enumerate(
                    cumulative_probs[0]
                    # cumulative_probs
                ):
                    if cumulative_prob > p:
                        break

            probs_of_interest = sorted_probs[:, : index_of_interest + 1]
            indices_of_interest = sorted_indices[:, : index_of_interest + 1]
            
            # Normalize the probabilities and select the next token
            probs_of_interest /= np.sum(probs_of_interest)
            next_token = np.array(
                [np.random.choice(indices_of_interest[0], p=probs_of_interest[0])]
            )
        else:
            raise Exception(f"Unknown sampling method {sampling_method}")

        return next_token

    def greedy_search(
        self,
        input_text,
        session,
        tokenizer,
        stop_words: list,
        max_length: int,
        temperature: float = 1.0,
        top_p: float = 1.0,
        top_k: int = 25,
    ):
        generated_tokens = []

        input_ids, attention_mask, position_ids = tokenizer.get_input_para(input_text)
        batch_size = input_ids.size(0)
        sequence_length = input_ids.size(1)
        logging.info(f"Batch size = {batch_size}, sequence length = {sequence_length}, hidden_size = {self.hidden_size}, n_heads = {self.n_heads}, n_layers = {self.n_layers}")
        
        self.past_key = []
        self.past_value = []
        ort_inputs = {}
        
        # self.past_shape = [batch_size, self.n_heads, sequence_length, self.hidden_size // self.n_heads]
        self.past_shape = [batch_size, self.n_heads, 0, self.hidden_size // self.n_heads]
        logging.info(f"{self.past_shape}")
        for i in range(self.n_layers):
            self.past_key.append(torch.empty(self.past_shape).type(torch.float32).to(self.device))
        for i in range(self.n_layers):
            self.past_value.append(torch.empty(self.past_shape).type(torch.float32).to(self.device))
            
            
        for j, key in enumerate(self.past_key):
            ort_inputs[f"past_key_values.{j}.key"] = np.ascontiguousarray(key.cpu().numpy())
        for k, value in enumerate(self.past_value):
            ort_inputs[f"past_key_values.{k}.value"] = np.ascontiguousarray(value.cpu().numpy())
        
        input_ids, attention_mask, position_ids = tokenizer.get_input_para(input_text)
        
        all_token_ids = input_ids.clone()
        
        
        
        for i in range(max_length):
            logging.info(f"Input text : {input_text}")

            
            ort_inputs["input_ids"] = np.ascontiguousarray(input_ids.cpu().numpy())
            ort_inputs["attention_mask"] = np.ascontiguousarray(attention_mask.cpu().numpy())
            ort_inputs["position_ids"] = np.ascontiguousarray(position_ids.cpu().numpy())    
        
            # logging.info(f"{input_ids}")
            # logging.info(f"{attention_mask}")
            # logging.info(f"{position_ids}")

            logging.info(f"{ort_inputs['input_ids']}")
            logging.info(f"{ort_inputs['attention_mask']}")
            logging.info(f"{ort_inputs['position_ids']}")
            # logging.info(f"{ort_inputs['past_key_values.1.key']}")
            # logging.info(f"Eos id in tokenizer is {tokenizer.eos_id}")
            # logging.info(f"Bos id in tokenizer is {tokenizer.bos_id}")
            
            results = session.run(None, ort_inputs)
            # logits, k_out, v_out = results[:3]
            logging.info(f"Logit shape: {results[0].shape}")
            logging.info(f"Key shape: {results[1].shape}")
            logging.info(f"Value shape: {results[2].shape}")
            logits = results[0]
            predictions = logits[:, -1, :]
            
            
            next_token = self.sample_logits(predictions, "top_p", top_p, temperature)
            # next_token = self.sample_logits(predictions, "greedy", top_p, temperature)
            next_token = next_token.reshape(1, -1)
            next_token_str = tokenizer.decode(next_token[0].item())
            logging.info(f"Generated token id {next_token}, which is {next_token_str}")
            

            # Stop if/when we get an ENDOFTEXT token before reaching maximum sequence length
            if next_token[0] == tokenizer.eos_id or next_token[0] == 151645:
                logging.info(f"Inference sequence end.")
                del logits
                gc.collect()
                return

            # input_ids = torch.cat((input_ids, torch.tensor(next_token)), dim=-1)
            input_text = input_text + next_token_str
            
            has_eos = torch.zeros(batch_size, dtype=torch.bool)
            has_eos = has_eos | (next_token == tokenizer.eos_id)
            token_to_add = torch.from_numpy(next_token).masked_fill(has_eos.bool(), tokenizer.eos_id)
            
            
            all_token_ids = torch.cat([all_token_ids, torch.from_numpy(next_token[0]).unsqueeze(-1)], dim=-1)
            generated_tokens.append(next_token[0].item())
            text = tokenizer.decode(generated_tokens)
            
            logging.info(f"Generated text : {text}")
            
            input_ids = token_to_add.clone().detach().reshape([batch_size, 1]).to(self.device)
            position_ids = (position_ids[:, -1] + 1).reshape(batch_size, 1)
            attention_mask = torch.cat([attention_mask, torch.ones([batch_size, 1]).type_as(attention_mask)], 1).to(self.device)
            
            for k in range(self.n_layers):
                self.past_key[k]=results[1+2*k]
                # logging.info(f"Past key {k} shape: {self.past_key[k].shape}")
                self.past_value[k]=results[2+2*k]
                # logging.info(f"Past value {k} shape: {self.past_value[k].shape}")
            for j, key in enumerate(self.past_key):
                ort_inputs[f"past_key_values.{j}.key"] = key
            for k, value in enumerate(self.past_value):
                ort_inputs[f"past_key_values.{k}.value"] = value

            yield text

            if any([x in text for x in stop_words]):
                logging.info(f"Inference sequence end.")
                del logits
                gc.collect()
                return

    def predict(
        self,
        text,
        chatbot,
        history,
        top_p,
        temperature,
        max_length_tokens,
        max_context_length_tokens,
    ):
        if text == "":
            yield chatbot, history, "Empty context."
            return
        try:
            self.llm_session
        except (ValueError, RuntimeError, TypeError):
            yield [[text, "No Model Found"]], [], "No Model Found"
            return

        inputs = self.generate_prompt_with_history(
            text, history, self.tokenizer, max_length=max_context_length_tokens
        )

        if inputs is None:
            yield chatbot, history, "Input too long."
            return
        else:
            prompt = inputs


        # global total_count
        self.total_count += 1
        logging.info(self.total_count)


        for x in self.greedy_search(
            prompt,
            self.llm_session,
            self.tokenizer,
            stop_words=["[|Human|]", "[|AI|]", "<|im_end|>"],
            max_length=max_length_tokens,
            temperature=temperature,
            top_p=top_p,
        ):
            # logging.info(f"x class: {type(x).__name__}, x: {x}")
            if is_stop_word_or_prefix(x, ["[|Human|]", "[|AI|]", "<|im_end|>"]) is False:
                if "[|Human|]" in x:
                    x = x[: x.index("[|Human|]")].strip()
                if "[|AI|]" in x:
                    x = x[: x.index("[|AI|]")].strip()
                if "<|im_end|>" in x:
                    x = x[: x.index("<|im_end|>")].strip()
                x = x.strip()
                # logging.info(f"Striped x: {x}")
                a, b = [[y[0], convert_to_markdown(y[1])] for y in history] + [
                    [text, convert_to_markdown(x)]
                ], history + [[text, x]]
                # logging.info(f"a: {a}, b: {b}")
                yield a, b, "Generating..."
            if shared_state.interrupted:
                shared_state.recover()
                try:
                    yield a, b, "Stop: Success"
                    return
                except Exception as e:
                    print(type(e).__name__, e)
                    pass

        # del input_ids
        del prompt
        gc.collect()
        # torch.cuda.empty_cache()
        # logging.info(f"Search ended.a: {a}, b: {b}")

        try:
            yield a, b, "Generate: Success"
        except Exception as e:
            print(type(e).__name__, e)
            pass

        return

    def retry(
        self,
        text,
        chatbot,
        history,
        top_p,
        temperature,
        max_length_tokens,
        max_context_length_tokens,
    ):
        logging.info("Retry...")
        if len(history) == 0:
            yield chatbot, history, "Empty context"
            return
        chatbot.pop()
        inputs = history.pop()[0]
        for x in self.predict(
            inputs,
            chatbot,
            history,
            top_p,
            temperature,
            max_length_tokens,
            max_context_length_tokens,
        ):
            yield x
