from osc_transformers import registry, Config
from osc_transformers.tokenizer import Tokenizer
from lightning import Fabric
from lightning.fabric.plugins import BitsandbytesPrecision
from typing import Optional, Any, Literal, Iterator, Tuple
import torch._dynamo
torch._dynamo.config.suppress_errors = True
import torch
import time
import sys
from jsonargparse import CLI


def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization
    q = torch.empty_like(probs_sort).exponential_(1)
    return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)


def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None):
    logits = logits / max(temperature, 1e-5)
    if top_k is not None:
        v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
        pivot = v.select(-1, -1).unsqueeze(-1)
        logits = torch.where(logits < pivot, -float("Inf"), logits)
    probs = torch.nn.functional.softmax(logits, dim=-1)
    return probs


def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None):
    probs = logits_to_probs(logits[0, -1], temperature, top_k)
    idx_next = multinomial_sample_one_no_sync(probs)
    return idx_next, probs


def prefill(model, input_ids: torch.Tensor, input_pos: torch.Tensor, **sampling_kwargs) -> torch.Tensor:
    logits = model(input_ids.view(1, -1), input_pos)
    return sample(logits, **sampling_kwargs)[0]

def decode_one_token(model, input_ids, input_pos, **sampling_kwargs):
    assert input_pos.shape[-1] == 1
    logits = model(input_ids, input_pos)
    return sample(logits, **sampling_kwargs)

def decode_n_tokens(model, tokenizer, input_ids, input_pos, n, **sampling_kwargs):

    num_new_tokens = 0
    for i in range(n):
        with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True):
            input_ids = input_ids.view(1, -1)
            idx_next, _ = decode_one_token(model=model, input_ids=input_ids, input_pos=input_pos, **sampling_kwargs)
            num_new_tokens += 1
            if idx_next == tokenizer.eos_id:
                print("\n", flush=True, end='')
                break
            print(tokenizer.decode(idx_next), flush=True, end='')
            input_pos += 1
        input_ids = idx_next
    return num_new_tokens
        

@torch.inference_mode()
def generate(
    input_ids: torch.Tensor,
    model: torch.nn.Module,
    max_length: int,
    tokenizer: Tokenizer,
    temperature: float = 1.0,
    top_k: int = 1,
):
    """Generate text from a given prompt using a language model.

    Args:
        input_ids (torch.Tensor): seq_len
        model (torch.nn.Module): 
        max_length (int): 
        temperature (float, optional): _description_. Defaults to 1.0.
        k (int, optional): _description_. Defaults to 1.
    """
    T = input_ids.shape[0]
    input_pos = torch.arange(0, T, device=input_ids.device)
    input_ids = prefill(model, input_ids, input_pos, temperature=temperature, top_k=top_k)
    input_pos = torch.tensor([T], device=input_ids.device)
    new_tokens = decode_n_tokens(model=model, 
                                 tokenizer=tokenizer, 
                                 input_ids=input_ids, 
                                 input_pos=input_pos, 
                                 n=max_length, 
                                 temperature=temperature, 
                                 top_k=top_k)
    return new_tokens

def load_checkpoint(fabric: Fabric, checkpoint: str):
    
    time0 = time.perf_counter()
    ckpt = fabric.load(checkpoint)
    time1 = time.perf_counter()
    fabric.print(f"Checkpoint loaded in {time1 - time0:.02f} seconds", file=sys.stderr)
    
    config = Config(ckpt['config'])
    tokenizer = ckpt['tokenizer']
    
    time2 = time.perf_counter()
    with fabric.init_module(empty_init=True):
        llm = registry.resolve(config)['model']
        llm.build_caches(batch_size=1)
    llm.load_state_dict(ckpt['model'])
    time3 = time.perf_counter()
    fabric.print(f"Model state loaded in {time3 - time2:.02f} seconds", file=sys.stderr)
    llm.eval()
    return llm, tokenizer
    

@torch.inference_mode()
def main(
    checkpoint: str = './chinese-alpaca-2-7B.ckpt',
    prompt: str = "帮我写首关于春天的作文,要尽可能多的生成",
    device: int = 7,
    temperature: float = 1.0,
    top_k: int = 1,
    max_length: int = 512,
    compile: bool = False,
    quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8"]] = None,
    precision: Optional[str] = '16-true',
):

    fabric = Fabric(devices=[device], accelerator='cuda', precision=BitsandbytesPrecision(mode='nf4'))
    
    llm, tokenizer = load_checkpoint(fabric, checkpoint)
    
    b_inst, e_inst = "[INST]", "[/INST]"
    b_sys, e_sys = "<<SYS>>\n", "\n<</SYS>>\n\n"
    system_prompt = (
        f"{b_inst} {b_sys}You are a helpful assistant, 你是一个乐于助人的助手.{e_sys} {{prompt}} {e_inst} "
    )
    prompt = system_prompt.format(prompt=prompt)
    

    if compile:
        fabric.print("Compiling model")
        global decode_one_token, prefill
        llm = torch.compile(llm, mode="reduce-overhead", fullgraph=True)
        llm = fabric.setup_module(llm)
        fabric.print("Warmup model")
        input_ids = tokenizer.encode(prompt, device=fabric.device)
        _ = generate(input_ids=input_ids, model=llm, max_length=max_length, tokenizer=tokenizer, temperature=temperature, top_k=top_k)

    input_ids = tokenizer.encode(prompt, device=fabric.device)
    time0 = time.perf_counter()
    num_new_tokens = generate(input_ids=input_ids, model=llm, max_length=max_length, tokenizer=tokenizer, temperature=temperature, top_k=top_k)
    time1 = time.perf_counter()
    t = time1 - time0
    fabric.print(f"Generated {num_new_tokens} tokens in {t:.02f} seconds, {num_new_tokens / t} tokens/second", file=sys.stderr)
    fabric.print(f"Memory used: {torch.cuda.max_memory_allocated() / 1e9:.02f} GB", file=sys.stderr)


if __name__ == "__main__":
    
    torch.set_float32_matmul_precision('high')
    
    CLI(main)
