import os
os.environ['UNLOAD_WEIGHT'] = '1'

import time
from typing import List, Iterator
import ctypes
from ctypes import (
    c_int,
    POINTER,
    c_char,
    c_char_p,
    c_long,
)

import numpy as np
import numpy.ctypeslib as npct
from transformers import AutoTokenizer, Qwen2Model

from llm_server.types import CreateCompletionResponse

_lib = ctypes.CDLL("/home/xzl/llm/libllm.so.1")

def init_model(model_url: str) -> int:
    return _lib.init_model(model_url.encode('utf-8'))

_lib.init_model.argtypes = [c_char_p]
_lib.init_model.restype = c_int

def init_session() -> int:
    fd = _lib.init_session()
    return fd

def init_embedding_table(emb_url: str):
    data = _lib.init_embedding(emb_url.encode("utf-8"))
    return data

_lib.init_embedding.argtypes = [c_char_p]
_lib.init_embedding.restype = POINTER(c_char)
def llm_run(handle: int, index: int, pos: int, embedd_ptr: POINTER, emb_dim: int, vocab_size: int):
    res = np.zeros(vocab_size, dtype=np.float32)
    _lib.run(handle, index, pos, embedd_ptr, emb_dim, res)
    return res

_lib.run.argtypes = [c_long, c_int , c_int, POINTER(c_char), c_int, npct.ndpointer(dtype=np.float32, flags='C_CONTIGUOUS')]
_lib.run.restype = None

def llm_prefill(handle: int, tokens: List[int], embedd_ptr: POINTER, emb_dim: int, vocab_size: int):
    res = np.zeros(vocab_size, dtype=np.float32)
    c_array = (c_int * len(tokens))(*tokens)
    _lib.pre_fill(handle, c_array, len(tokens), embedd_ptr, emb_dim, res)
    return res

_lib.pre_fill.argtypes = [c_long, POINTER(c_int), c_int, POINTER(c_char), c_int, npct.ndpointer(dtype=np.float32, flags='C_CONTIGUOUS')]
_lib.pre_fill.restype = None

class LLM:
    def __init__(self, model_url:str, emb_dim:int, vocab_size: int):
        self.model_name = model_url
        init_model(model_url)
        self.handler = init_session()
        emb_url = [x for x in os.listdir(model_url) if x.startswith('token_embd')][0]
        self.emb_data = init_embedding_table(f'{model_url}/{emb_url}')
        self.emb_dim = emb_dim
        self.vocab_size = vocab_size
        self.tokenizer = AutoTokenizer.from_pretrained('/home/xzl/llm/qwen2_tokenizer')


    def run(self, token: int, pos: int):
        assert token >= 0 and pos >= 0 
        logits = llm_run(self.handler, token, pos, self.emb_data, self.emb_dim, self.vocab_size)
        return logits

    def _prefill(self, tokens: List[int]):
        assert len(tokens) >= 1
        logits = llm_prefill(self.handler, tokens, self.emb_data, self.emb_dim, self.vocab_size)
        return logits


    def generate(self, tokens: List[int], total_len=512, step=100, tempture=0.0):

        print(f'doing prefill about {len(tokens)/100:.2}s')
        t = time.time()
        logits = self._prefill(tokens)
        print(f"prefill cost about {time.time()-t:.2}s")
        next_token = int(np.argmax(logits))
        
        output_tokens = [next_token]
        
        for cur_pos in range(len(tokens), total_len):
            logits = self.run(next_token, pos=cur_pos)
            next_token = int(np.argmax(logits))
            if next_token == 151645:
                break
            yield next_token


    def __call__(
        self, prompt: str, 
        total_len: int = 512,
        step: int = 100,
        temperature: float = 0.0, **kwargs):

        messages = [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ]

        prompt = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        completion_or_chunks = self.create_completion(prompt, total_len, step, temperature)
        chunks: Iterator[CreateCompletionResponse] = completion_or_chunks

        # return self._convert_completion_to_chat(chunks)
        return self._convert_completion(chunks)
        # return chunks

    def _convert_completion(self, chunks):
        for chunk in chunks:
            print("chunkkkkkkkkkkkkkkkk: ", chunk)
            yield chunk


    def create_completion(
        self, 
        prompt: str, 
        total_len: int = 512,
        step: int = 100,
        temperature: float = 0.0, **kwargs):

        tokens = self.tokenizer([prompt], return_tensors="np")['input_ids']
        tokens = tokens.flatten().tolist()

        for token in self.generate(tokens):

            ts = self.tokenizer.decode(token)

            yield {
                "object": "text_completion",
                "model": self.model_name,
                "choices": [
                    {
                        "text": ts,
                        "index": 0,
                        "logprobs": None,
                        "finish_reason": None,
                    }
                ],
            }


    def create_chat_completion(
        self, messages,
        total_len: int = 512,
        step: int = 100,
        temperature: float = 0.0, **kwargs):

        prompt = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        completion_or_chunks = self.create_completion(prompt, total_len, step, temperature)
        chunks: Iterator[CreateCompletionResponse] = completion_or_chunks

        return self._convert_completion_to_chat(chunks)


    def _convert_completion_to_chat(self, chunks):
        for i, chunk in enumerate(chunks):
            if i == 0:
                yield {
                    "model": chunk["model"],
                    "object": "chat.completion.chunk",
                    "choices": [
                        {
                            "index": 0,
                            "delta": {
                                "role": "assistant",
                            },
                            "logprobs": None,
                            "finish_reason": None,
                        }
                    ],
                }
            yield {
                "model": chunk["model"],
                "object": "chat.completion.chunk",
                "choices": [
                    {
                        "index": 0,
                        "delta": (
                            {
                                "content": chunk["choices"][0]["text"],
                            }
                            if chunk["choices"][0]["finish_reason"] is None
                            else {}
                        ),
                        "logprobs": chunk["choices"][0]["logprobs"],
                        "finish_reason": chunk["choices"][0]["finish_reason"],
                    }
                ],
            }


class Qwen2(LLM):
    def __init__(self, 
                 model_url='/opt/llm/model/qwen2-7B-bf16-hf', 
                 emb_dim=3584,
                 vocab_size=152064,
                 ):
        super().__init__(model_url, emb_dim, vocab_size)


class Llama3(LLM):
    def __init__(self, 
                 model_url='/opt/llm/model/llama-8B-bf16', 
                 emb_dim=4096,
                 vocab_size=128256,
                 ):
        super().__init__(model_url, emb_dim, vocab_size)


if __name__ == '__main__':
    llm = Qwen2()    
    generate(llm, tokenizer)

