import numpy as np
import os
import sys
from ais_bench.infer.interface import InferSession
from sentencepiece import SentencePieceProcessor
from typing import Generator, List, Tuple
import gc
from enum import Enum
from threading import Lock

from session import Session
from config import InferenceConfig
from janus.models import VLChatProcessor
from janus.utils.io import load_pil_images
from einops import rearrange
import numpy as np
import torch
import time

class LlamaInterface:
    def __init__(self, config: InferenceConfig) -> None:
        self.vision_model = InferSession(device_id=0, model_path="models/vision.om")
        self.embedding_model = InferSession(device_id=0, model_path="models/Janus-Embedding_linux_aarch64.om")
        self.max_length = config.max_length
        self.vl_chat_processor = VLChatProcessor.from_pretrained("tokenizer")
        self.tokenizer = self.vl_chat_processor.tokenizer
        self.sampling_method = config.sampling_method
        self.sampling_value = config.sampling_value
        self.temperature = config.temperature
        self.session = Session.fromConfig(config)
        self.prompt = config.prompt
        self.max_cache_size = config.max_cache_size
        self.state: dict[str, any] = {"code": 200, "isEnd": False, "message": ""}
        self.reset()
        self.lock = Lock()
        self.first = True
        self.prefill = True
        print("init success")

    def generate_cache(self, prompt: str):
        if len(prompt) == 0:
            return
        self.first = False
        input_ids = np.asarray(self.tokenizer.encode(prompt), dtype=np.int64).reshape(1, -1)
        logits = self.session.run(input_ids)[0]
        return self.sample_logits(logits[0][-1:], self.sampling_method, self.sampling_value, self.temperature), logits

    def sample_logits(
        self,
        logits: np.ndarray,
        sampling_method: str = "greedy",
        sampling_value: float = None,
        temperature: float = 1.0,
    ) -> np.ndarray:
        if temperature == 0 or sampling_method == "greedy":
            next_token = np.argmax(logits, axis=-1).astype(np.int64)
        elif sampling_method == "top_k" or sampling_method == "top_p":
            assert sampling_value is not None
            logits = logits.astype(np.float32)
            logits /= temperature
            probs = np.exp(logits) / np.sum(np.exp(logits))
            sorted_probs = np.sort(probs)[:, ::-1]
            sorted_indices = np.argsort(probs)[:, ::-1]

            if sampling_method == "top_k":
                index_of_interest = int(sampling_value)
            elif sampling_method == "top_p":
                p = sampling_value
                cumulative_probs = np.cumsum(sorted_probs, axis=-1)
                for index_of_interest, cumulative_prob in enumerate(
                    cumulative_probs[0]
                ):
                    if cumulative_prob > p:
                        break

            probs_of_interest = sorted_probs[:, : index_of_interest + 1]
            indices_of_interest = sorted_indices[:, : index_of_interest + 1]
            probs_of_interest /= np.sum(probs_of_interest)
            next_token = np.array(
                [np.random.choice(indices_of_interest[0], p=probs_of_interest[0])]
            )
        else:
            raise Exception(f"Unknown sampling method {sampling_method}")

        return next_token

    def predict(self, text, image):
        with self.lock:
            self.state['isEnd'], self.state['message'] = False, ""
        if text == "":
            return

        conversation = [
            {
                "role": "<|User|>",
                "content": f"<image_placeholder>\n{text}",
                "images": [image], #../data
            },
            {"role": "<|Assistant|>", "content": ""},
        ]

        image_path_str = conversation[0]["images"][0]

        print("准备执行加载图片")
        pil_images = load_pil_images(conversation)

        prepare_inputs = self.vl_chat_processor(
            conversations=conversation, images=pil_images, force_batchify=True
        )

        input_ids = prepare_inputs["input_ids"]
  
        images = rearrange(prepare_inputs["pixel_values"], "b n c h w -> (b n) c h w")
        image_embeds = torch.from_numpy(self.vision_model.infer(images.half())[0]).half()

        images_emb_mask = rearrange(prepare_inputs["images_emb_mask"], "b n t -> b (n t)")
        input_ids[input_ids < 0] = 0

        input_embeds = torch.from_numpy(self.embedding_model.infer([input_ids], mode="dymshape", custom_sizes=10000000)[0]).half()
        input_embeds[prepare_inputs["images_seq_mask"]] = image_embeds[images_emb_mask]

        self.first = False
        ids_list = []
        count = 0
        for i in range(self.max_length):
            # 如果会话的运行次数超过最大缓存大小，则重置会话
            if self.session.run_times >= self.max_cache_size:
                self.reset()
                break
            if self.prefill:
                logits = self.session.run_prefill(input_embeds.numpy(), prepare_inputs["images_seq_mask"])[0] 
                self.prefill = False
            else:

                input_embeds = self.embedding_model.infer([input_ids], mode="dymshape", custom_sizes=10000000)[0].astype(np.half)
                logits = self.session.run(input_embeds)[0]

            input_ids = self.sample_logits(logits[0][-1:], self.sampling_method, self.sampling_value, self.temperature)
            input_ids = input_ids.reshape(1, -1)
            count += 1
            if input_ids[0] == self.tokenizer.eos_token_id:
                text_out = self.tokenizer.decode(ids_list)
                sys.stdout.write(f'{text_out}\n')
                sys.stdout.write(f'count: {count}\n')
                sys.stdout.flush()
                with self.lock:
                    self.state['message'], self.state['isEnd'] = text_out.strip(), True
                del logits
                gc.collect()
                break
            ids_list.append(input_ids[0][0])
            text_out = self.tokenizer.decode(ids_list)
            '''
            stop_word = is_stop_word_or_prefix(text_out,["<|user|>","<|assistant|>"])

            if stop_word != "":
                with self.lock:
                    self.state['message'],self.state['isEnd'] = text_out[:-len(stop_word)].strip(),True
                self.session.rollback(self.stop_mp[stop_word])
                break
            else:
                with self.lock:
                    self.state['message'],self.state['isEnd'] = text_out.strip(), False
            '''
        return text_out

    def reset(self):
        self.first = True
        self.session.run_times = 0
        self.session.reset()
        self.generate_cache(self.prompt)

    def getState(self):
        with self.lock:
            return self.state.copy()


def preprocess(text: str) -> str:
    return f"<|user|>\n{text}</s>\n<|assistant|>"

def is_stop_word_or_prefix(s: str, stop_words: list) -> int:
    for stop_word in stop_words:
        if s.endswith(stop_word):
            return stop_word
    return ""
