import torch
from typing import List, Union, Optional, Tuple
from pathlib import Path
from osc_transformers import registry, Config, Tokenizer
import torch.nn as nn
import time
import torch
import torch._dynamo.config
import torch._inductor.config
import itertools
from jsonargparse import CLI 

def device_sync(device):
    if "cuda" in device:
        torch.cuda.synchronize(device)
    elif "cpu" in device:
        pass
    else:
        print(f"device={device} is not yet suppported")


torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future
torch._dynamo.config.automatic_dynamic_shapes = True


def load_model(checkpoint_dir: Union[str, Path], precision: str, device: str):
    config_path = Path(checkpoint_dir) / 'config.cfg'
    config = Config().from_disk(config_path)
    # with torch.device('meta'):
    model: torch.nn.Module = registry.resolve(config)['model']
    checkpoint_dir = Path(checkpoint_dir)
    checkpoint = torch.load(str(checkpoint_dir / 'osc_model.pth'), mmap=True, weights_only=True)
    model.load_state_dict(checkpoint, assign=True)
    model = model.to(device=device, dtype=precision)
    return model.eval()


def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization
    q = torch.empty_like(probs_sort).exponential_(1)
    return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)

def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None):
    logits = logits / max(temperature, 1e-5)

    if top_k is not None:
        v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
        pivot = v.select(-1, -1).unsqueeze(-1)
        logits = torch.where(logits < pivot, -float("Inf"), logits)
    probs = torch.nn.functional.softmax(logits, dim=-1)
    return probs

def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None):
    probs = logits_to_probs(logits[0, -1], temperature, top_k)
    idx_next = multinomial_sample_one_no_sync(probs)
    return idx_next, probs

def prefill(model: nn.Module, x: torch.Tensor, input_pos: torch.Tensor, **sampling_kwargs) -> torch.Tensor:
    # input_pos: [B, S]
    logits = model(x, input_pos)
    return sample(logits, **sampling_kwargs)[0]

def decode_one_token(model: nn.Module, x: torch.Tensor, input_pos: torch.Tensor, **sampling_kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
    # input_pos: [B, 1]
    assert input_pos.shape[-1] == 1
    logits = model(x, input_pos)
    return sample(logits, **sampling_kwargs)

def decode_n_tokens(model: nn.Module, cur_token: torch.Tensor, input_pos: torch.Tensor, num_new_tokens: int, callback=lambda _: _, **sampling_kwargs):
    new_tokens, new_probs = [], []
    for i in range(num_new_tokens):
        with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True): # Actually better for Inductor to codegen attention here
            next_token, next_prob = decode_one_token(
                model, cur_token, input_pos, **sampling_kwargs
            )
            input_pos += 1
            new_tokens.append(next_token.clone())
            callback(new_tokens[-1])
            new_probs.append(next_prob.clone())
            cur_token = next_token.view(1, -1)

    return new_tokens, new_probs


def model_forward(model, x, input_pos):
    return model(x, input_pos)


def warmup(prompt: torch.Tensor, model: nn.Module):
    """由于torch.compile在第一次forward时候才会编译，因此需要一个warmup的过程来提前编译
    """
    seq, metrics = generate(model=model, prompt=prompt, max_new_tokens=2, temperature=1.0, top_k=1)


@torch.no_grad()
def generate(
    model: nn.Module,
    prompt: torch.Tensor,
    max_new_tokens: int,
    *,
    callback = lambda x: x,
    **sampling_kwargs
) -> torch.Tensor:
    """
    Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
    """

    # create an empty tensor of the expected final shape and fill in the current tokens
    T = prompt.size(0)
    T_new = T + max_new_tokens

    device, dtype = prompt.device, prompt.dtype
    
    with torch.device(device):
        model.build_caches(batch_size=1)

    # create an empty tensor of the expected final shape and fill in the current tokens
    empty = torch.empty(T_new, dtype=dtype, device=device)
    empty[:T] = prompt
    seq = empty
    input_pos = torch.arange(0, T, device=device)

    next_token = prefill(model, prompt.view(1, -1), input_pos, **sampling_kwargs)
    seq[T] = next_token

    input_pos = torch.tensor([T], device=device, dtype=torch.long)

    generated_tokens, _ = decode_n_tokens(model, next_token.view(1, -1), input_pos, max_new_tokens - 1, callback=callback, **sampling_kwargs)
    seq[T + 1:] = torch.cat(generated_tokens)

    generate_stats = {
        'accept_counts': 0
    }
    return seq, generate_stats



def main(
    prompt: str = "帮我写一篇赞美春天的作文",
    num_samples: int = 5,
    max_new_tokens: int = 500,
    top_k: int = 200,
    temperature: float = 0.8,
    checkpoint_dir: Path = Path("/home/wangmengdi/osc-llm/checkpoints/hfl/chinese-alpaca-2-1.3b"),
    compile: bool = True,
    compile_prefill: bool = True,
    device='cuda:4',
) -> None:
    """Generates text samples based on a pre-trained Transformer model and tokenizer.
    """

    print(f"Using device={device}")
    precision = torch.bfloat16

    print("Loading model ...")
    t0 = time.time()
    model = load_model(checkpoint_dir, device=device, precision=precision)

    device_sync(device=device) # MKG
    print(f"Time to load model: {time.time() - t0:.02f} seconds")

    
    b_inst, e_inst = "[INST]", "[/INST]"
    b_sys, e_sys = "<<SYS>>\n", "\n<</SYS>>\n\n"
    system_prompt = (
        f"{b_inst} {b_sys}You are a helpful assistant, 你是一个乐于助人的助手.{e_sys} {{prompt}} {e_inst} "
    )
    prompt = system_prompt.format(prompt=prompt)
    
    tokenizer = Tokenizer(checkpoint_dir=checkpoint_dir)
    
    encoded = tokenizer.encode(prompt, device=device)
    prompt_length = len(encoded)
    print('prompt length: ', prompt_length)
    

    torch.manual_seed(1234)
    model_size = sum([p.numel() * p.dtype.itemsize for p in itertools.chain(model.parameters(), model.buffers())])

    if compile:
        global decode_one_token, prefill
        decode_one_token = torch.compile(decode_one_token, mode="reduce-overhead", fullgraph=True, dynamic=True)

        # Uncomment to squeeze more perf out of prefill
        if compile_prefill:
            prefill = torch.compile(prefill, fullgraph=True, dynamic=True)
        
        warmup_prompt = tokenizer.encode('你好', device=device)
        t = time.perf_counter()
        warmup(warmup_prompt, model)
        print(f"Time for warmup: {time.perf_counter() - t:.2f} seconds")


    aggregate_metrics = {
        'tokens_per_sec': [],
        'accept_counts': [],
    }
    start = 0

    for i in range(start, num_samples):
        device_sync(device=device) # MKG
        callback = lambda x : x
        
        t0 = time.perf_counter()
        y, metrics = generate(
            model,
            encoded,
            max_new_tokens,
            callback=callback,
            temperature=temperature,
            top_k=top_k,
        )
        aggregate_metrics['accept_counts'].append(metrics['accept_counts'])
        # if i == -1:
        #     print(f"Compilation time: {time.perf_counter() - t0:.2f} seconds")
        #     continue
        device_sync(device=device) # MKG
        t = time.perf_counter() - t0

        print(tokenizer.decode(y))
        
        tokens_generated = y.size(0) - prompt_length
        tokens_sec = tokens_generated / t
        aggregate_metrics['tokens_per_sec'].append(tokens_sec)
        print(f"Time for inference {i + 1}: {t:.02f} sec total, {tokens_sec:.02f} tokens/sec")
        print(f"Bandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s")
    print("==========")
    print(f"Average tokens/sec: {torch.mean(torch.tensor(aggregate_metrics['tokens_per_sec'])).item():.2f}")
    print(f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB")


if __name__ == "__main__":
    CLI(main)