import torch
import random
import asyncio
import argparse
import contextlib
import ctypes

import numpy as np
import torch.nn.functional as F

from pathlib import Path

from pydantic import BaseModel
from sentencepiece import SentencePieceProcessor

from config import settings

from gemma import config
from gemma import model as gemma_model

from model import *

tokenizer_path = "./tinyllm.tokenizer.model"
checkpoint_path = settings.tiny_llm_ckpt
InferLLM_lib_path = "./libchatglm.so"
device = "cpu"
tokenizer = SentencePieceProcessor(model_file=tokenizer_path)


@contextlib.contextmanager
def _set_default_tensor_type(dtype: torch.dtype):
    """Sets the default torch dtype to the given dtype."""
    torch.set_default_dtype(dtype)
    yield
    torch.set_default_dtype(torch.float)


class PromptData(BaseModel):
    prompt: str
    model: str = "gemma"
    max_tokens: int = 128
    temperature: int = None

    model_config = {"config": ()}


class GeneratedData(BaseModel):
    gen_text: str
    seq_: int
    is_last_: bool
    full_update: bool = False


class Llm:
    """using for testing purposes only."""

    def __init__(self):
        print("Llm initialized")

    async def generate(self, prompt_data: PromptData):
        text = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."""

        for word in prompt_data.prompt.split(" "):
            await asyncio.sleep(0.05)
            yield word

        for i, word in enumerate(text.split(" ")):
            await asyncio.sleep(0.05)
            yield f" {word}"
            if i >= prompt_data.max_tokens:
                break


class Gemma:
    """gemma model"""

    def __init__(self):
        model_config = config.get_model_config("2b")
        model_config.dtype = "float32"
        model_config.quant = settings.quant
        reply_size = settings.reply_size
        seed = 12345
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        device = torch.device(settings.device)
        with _set_default_tensor_type(model_config.get_dtype()):
            model = gemma_model.GemmaForCausalLM(model_config)
            model.load_weights(settings.ckpt)
            model = model.to(device).eval()
        print("Gemma model loading done")
        self.model = model
        self.device = device
        self.reply_size = reply_size

    async def generate(self, prompt_data: PromptData):
        async for word in self.model.generate_async(
            prompt_data.prompt, self.device, self.reply_size
        ):
            yield word
        print("Gemma Generate finish!")

    async def generate_sync(self, prompt_data: PromptData):
        result = self.model.generate(prompt_data.prompt, self.device, self.reply_size)

        for i, word in enumerate(result.split(" ")):
            yield f" {word}"
            if i >= prompt_data.max_tokens:
                break


def remove_unwanted_prefix_from_state_dict(state_dict, unwanted_prefix):
    for k, v in list(state_dict.items()):
        if k.startswith(unwanted_prefix):
            state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k)
    return state_dict


def load_model(checkpoint_path, device, unwanted_prefix="_orig_mod"):
    print(f"Loading model from {checkpoint_path}")
    checkpoint = torch.load(checkpoint_path, map_location=device)
    config = (
        checkpoint["model_args"]
        if isinstance(checkpoint["model_args"], ModelArgs)
        else ModelArgs(**checkpoint["model_args"])
    )
    model = Transformer(config)
    if checkpoint.get("lora_finetune"):
        apply_lora(
            model,
            targets=checkpoint["lora_targets"],
            rank=checkpoint["lora_rank"],
            dropout=checkpoint["lora_dropout"],
            alpha=checkpoint["lora_alpha"],
        )
    print(f"Number of parameters: {sum([p.nelement() for p in model.parameters()])}")
    state_dict = checkpoint["model"]
    state_dict = remove_unwanted_prefix_from_state_dict(
        state_dict=state_dict, unwanted_prefix=unwanted_prefix
    )
    model.load_state_dict(state_dict, strict=True)
    model.eval()
    model.to(device)
    # torchinfo.summary(model, input_data=torch.Tensor(1, 25).long())
    return model, checkpoint


async def generate_paragraph(
    model, prompt, max_new_tokens=400, temperature=0.1, top_k=10
):
    tokenized_prompt = [tokenizer.bos_id()] + tokenizer.encode(prompt)
    tokenized_prompt = torch.tensor(tokenized_prompt, dtype=torch.long, device=device)[
        None, ...
    ]

    paragraph = []
    context_tokens = tokenized_prompt
    for i in range(max_new_tokens):
        context_tokens = context_tokens[
            :, -min(model.params.max_seq_len, context_tokens.size(1)) :
        ]
        output = model(context_tokens)
        logits = output[:, -1, :]
        logits = logits / temperature
        v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
        logits[logits < v[:, [-1]]] = -float("Inf")
        probs = F.softmax(logits, dim=-1)
        next_token = torch.multinomial(probs, num_samples=1)
        context_tokens = torch.cat((context_tokens, next_token), dim=1)
        paragraph.append(next_token.item())
        if i % 2 == 1:
            yield tokenizer.decode(paragraph[:])
        if (
            next_token.item() == tokenizer.eos_id()
            or tokenizer.decode(paragraph[-3:]) == "The end."
        ):
            break


class TinyLLM:
    """Tiny LLM model"""

    def __init__(self):
        instruct_model, ckpt = load_model(
            checkpoint_path=checkpoint_path,
            device=device,
            unwanted_prefix="",
        )
        self.model = instruct_model
        self.device = device
        self.reply_size = settings.reply_size * 4

    async def generate(self, prompt_data: PromptData):
        async for word in generate_paragraph(
            model=self.model,
            prompt=prompt_data.prompt,
            max_new_tokens=self.reply_size,
            temperature=0.1,
            top_k=10,
        ):
            yield word
        print("TinyLLM Generate finish!")

ll = ctypes.cdll.LoadLibrary
lib = ll(InferLLM_lib_path)
def load_inferllm(argc, argv):
    argv = (ctypes.c_char_p * argc)(*map(lambda s: s.encode('utf-8'), argv))
    lib.init(argc, argv)


def get_remain_token():
    lib.get_remain_token.restype = ctypes.c_int
    return lib.get_remain_token()

def generate_inferllm_paragraph(prompt_data):

    lib.decode.restype = ctypes.c_char_p
    output = lib.decode(prompt_data.encode('utf-8')).decode('utf-8')
    yield output
    
    while(get_remain_token() > 0):
        if(lib.get_token() == lib.get_etoken()):
            print("")
            break

        lib.decode_iter.restype = ctypes.c_char_p
        try:
            o = lib.decode_iter().decode('utf-8')
        except UnicodeDecodeError:
            o = lib.decode_iter().decode('utf-8', 'ignore')
        # o = lib.decode_iter().decode('utf-8')
        yield o


class InferLLM:
    ''' InferLLM model '''

    def __init__(self):
        argv = [""]
        argv.append("-m")
        model_path = settings.infer_llm_model_path
        argv.append(model_path)
        argv.append("-t")
        thread_num = 8
        argv.append(str(thread_num))
        # argv.append("-c")
        # ctx_size = 128
        # argv.append(str(ctx_size))
        
        argc = len(argv)
        load_inferllm(argc, argv)
        
    async def generate(self, prompt_data: PromptData):
        for word in generate_inferllm_paragraph(prompt_data.prompt):
            yield word
        print("Infer LLM Generate finish!")