# =============================================================================
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================

import json
import os
import random
from typing import List

import torch
import torch.distributed as dist
import torch_npu
from tqdm import tqdm
from transformers import AutoTokenizer, TextStreamer

from model import ModelArgs, PanguForCausalLM
from quant_tool import (
    kv_collect,
    save_comm_scaling_factors,
    save_kv_scaling_factors,
)
from utils import (
    ARGS,
    InferenceParams,
    batch_padding,
    compile_graph,
    generate_attention,
    init_parallel_group,
    load_ckpt_and_quant_scale,
    print_model,
    print_rank_0,
    set_args,
    setup_seed,
    use_internal_format_weight,
    update_model_args_from_config
)


# logic is that model_max_seq_len > en-seq-len + max_new_tokens (kv cache len)
# Truncate input to align en-seq-len
def calculate_prefill_len(prompt_lens: List[int]):
    if ARGS.hardware == '300I':
        align_seq_len = 128
        prefill_seq_len = int((max(prompt_lens) + align_seq_len - 1) // align_seq_len * align_seq_len)
    else:
        prefill_seq_len = max(prompt_lens)
    return prefill_seq_len


@torch.inference_mode()
def generate(
    model: PanguForCausalLM,
    prompt_tokens: List[List[int]],
    streamer: TextStreamer,
    max_new_tokens: int,
    eos_id: int,
) -> List[List[int]]:

    batch_size = len(prompt_tokens)
    # for 300I, prefill len must be a multiple of 128
    prompt_lens = [len(t) for t in prompt_tokens]
    total_len = max_new_tokens + ARGS.en_sequence_len

    # prepare token id & position id & attention mask
    device = torch.device("cpu")
    tokens = torch.full((batch_size, total_len), 0, dtype=torch.long, device=device)
    position_ids = torch.arange(total_len, dtype=torch.long, device=device).expand(batch_size, -1)
    
    for i, t in enumerate(prompt_tokens):
        tokens[i, :len(t)] = torch.tensor(t, dtype=torch.long, device=device)

    attention_mask_full = generate_attention(tokens)
    finished = torch.tensor([False] * len(prompt_tokens), device=device).unsqueeze(1)
    finished_seq_len = torch.ones(batch_size, dtype=torch.long, device=device) * max(prompt_lens)

    inference_params = InferenceParams(
        temperature=ARGS.temperature,
        top_k=ARGS.top_k,
        top_p=ARGS.top_p,
    )

    batch_context_lengths = torch.tensor(prompt_lens, dtype=torch.long, device=device)
    max_prompt_len = max(prompt_lens)

    with (torch.no_grad()):
        for cur_pos in range(max_prompt_len, min(max_prompt_len + max_new_tokens, total_len)):
            if cur_pos == max(prompt_lens):
                # prefill
                prefill_seq_len = calculate_prefill_len(prompt_lens)
                attention_mask2use = attention_mask_full[..., :prefill_seq_len, :prefill_seq_len].npu()
                tokens2use = tokens[:, :prefill_seq_len].npu()
                positions2use = position_ids[:, :prefill_seq_len].npu()
                inference_params.is_prefill = True
                if streamer is not None:
                    print_rank_0("\n" + "=" * 80)
                    streamer.put(tokens)
            else:
                # decode
                gen_idxs = (batch_context_lengths - 1)[:, None]
                tokens2use = torch.gather(tokens, 1, gen_idxs).npu()
                positions2use = torch.gather(position_ids, 1, gen_idxs).npu()
                attention_mask2use = torch.index_select(attention_mask_full[0,0,:,:], dim=0, index=gen_idxs.squeeze()).unsqueeze(1).unsqueeze(2).npu()
                inference_params.is_prefill = False

            # move input to npu
            batch_context_lengths2use = batch_context_lengths.npu()
            
            # forward
            next_token = model(
                tokens=tokens2use,
                position_ids=positions2use,
                start_pos=batch_context_lengths2use,
                attention_mask=attention_mask2use,
                inference_params=inference_params
            ).cpu()

            if streamer is not None:
                streamer.put(next_token)

            # update tokens
            tokens = torch.scatter(tokens, 1, batch_context_lengths[:, None], next_token)
            batch_context_lengths += 1
            
            # update finished sequence when encounter eos
            done_token = (next_token == eos_id).bool()
            just_finished = (done_token & ~finished).bool()
            finished_seq_len[just_finished.view(-1)[:batch_size]] = cur_pos + 1
            finished = finished | done_token
            
            if finished.all():
                break

    # update finished sequence when exceed max genetate length
    finished_seq_len[(~finished).view(-1)[:batch_size]] = cur_pos + 1
    
    # kv calibrate
    if ARGS.kv_quant_calibrate:
        kv_collect(model, cur_pos)
        
    # from tokens get the generated result, leaving out the prompt
    completion_tokens = []
    for i, toks in enumerate(tokens.tolist()):
        toks = toks[prompt_lens[i]:finished_seq_len[i]]
        if eos_id in toks:
            toks = toks[:toks.index(eos_id)]
        completion_tokens.append(toks)

    if streamer is not None:
        streamer.end()
        print_rank_0("\n" + "=" * 80)

    return completion_tokens


def main(
    model,
    prompts: List,
    prompt_tokens: List,
    streamer: TextStreamer,
) -> None:
    # setting for profiling
    if ARGS.profiling_path is not None:
        print_rank_0("-------- start profiling -------")
        experimental_config = torch_npu.profiler._ExperimentalConfig(
            profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
            aic_metrics=torch_npu.profiler.AiCMetrics.PipeUtilization)
        prof = torch_npu.profiler.profile(
            activities=[
                torch_npu.profiler.ProfilerActivity.NPU,
                torch_npu.profiler.ProfilerActivity.CPU],
            with_stack=False, record_shapes=True, profile_memory=True,
            experimental_config=experimental_config,
            schedule=torch_npu.profiler.schedule(wait=0, warmup=1,
                                                 active=1, repeat=1, skip_first=0),
            on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(ARGS.profiling_path))
        prof.start()
    
    profile_steps = 2
    do_profile = True
    MAX_COUNT = len(prompt_tokens)
    count = 0
    while count < MAX_COUNT:
        cur_prompt_tokens = prompt_tokens[count: count + ARGS.global_batch_size]
        cur_prompts = prompts[count: count + ARGS.global_batch_size]
        num_padding_bs = ARGS.global_batch_size_after_padding - len(cur_prompt_tokens)
        if num_padding_bs > 0:
            cur_prompt_tokens.extend(ARGS.tokenizer([ARGS.tokenizer.eos_token for _ in range(num_padding_bs)])['input_ids'])
            print_rank_0(f"Padding batch from #{ARGS.global_batch_size} to #{ARGS.global_batch_size_after_padding}".center(80))
        
        if ARGS.profiling_path is not None and do_profile:
            for _ in range(profile_steps):
                completion_tokens = generate(
                    model, 
                    cur_prompt_tokens,
                    streamer,
                    ARGS.max_new_tokens, 
                    ARGS.tokenizer.eos_token_id
                    )
                prof.step()
            prof.stop()
            do_profile = False
        else:
            completion_tokens = generate(
                model, 
                cur_prompt_tokens,
                streamer,
                ARGS.max_new_tokens, 
                ARGS.tokenizer.eos_token_id
                )

        count += ARGS.global_batch_size

        completions = ARGS.tokenizer.batch_decode(completion_tokens, skip_special_tokens=True)
        if not ARGS.streaming and (not dist.is_initialized() or dist.get_rank() == 0):
            for prompt, completion in zip(cur_prompts, completions):
                print_rank_0("Prompt:", prompt)
                print_rank_0("Completion:", completion)

    if ARGS.kv_quant_calibrate:
        save_kv_scaling_factors(ARGS.kv_scale_path)
    if ARGS.comm_quant_calibrate:
        save_comm_scaling_factors(ARGS.comm_scale_path)

    if world_size > 1:
        dist.destroy_process_group()


if __name__ == "__main__":
    # Initialize distributed environment
    world_size = int(os.getenv("WORLD_SIZE", "1"))
    rank = int(os.getenv("RANK", "0"))
    torch.npu.set_device(rank)
    dist.init_process_group("hccl", rank=rank, world_size=world_size)

    set_args()

    # check paramter
    if ARGS.hardware == '300I' and ARGS.en_sequence_len % 128 != 0:
        raise ValueError(f"Input must be aligned to {128} for 300I.")
    if ARGS.max_new_tokens + ARGS.en_sequence_len > ModelArgs().model_max_seq_len:
        raise ValueError("Input + output length exceeds model's max sequence length.")

    init_parallel_group()
    print_rank_0(ARGS)
    setup_seed(42)
    
    # init tokenizer
    tokenizer = AutoTokenizer.from_pretrained(
        ARGS.ckpt_path,
        truncation_side='left',
        trust_remote_code=True,
        local_files_only=True
    )
    ARGS.tokenizer = tokenizer

    input_check = [ARGS.input_texts, ARGS.input_file is not None, ARGS.input_dummy]
    if not input_check.count(True) == 1:
        raise ValueError("You must provide one and only one type of input: --input-texts, --input-file, or --input-dummy.")
    
    if ARGS.input_texts:
        texts = ["如果今天我只做一件让自己开心的小事，你会建议我做什么？"]

        if not texts:
            raise ValueError("In 'input_texts' mode, you should provide non-empty texts here.")

    elif ARGS.input_file:
        if not ARGS.input_file.endswith(".json"):
            raise ValueError("Only JSON format is supported for --input-file")

        print_rank_0(f"[INFO] Reading JSON file: {ARGS.input_file} ...")
        with open(ARGS.input_file, 'r', encoding='utf-8') as f:
            lines = [line.strip() for line in tqdm(f.readlines(), desc="Reading lines", leave=False)]
        try:
            texts = [json.loads(line)["prompt"] for line in tqdm(lines, desc="Parsing JSON", leave=False)]
        except (json.JSONDecodeError, KeyError) as e:
            raise ValueError(f"Error parsing input file: {e}")

        if not texts:
            raise ValueError("Parsed prompt from input file are empty.")

    elif ARGS.input_dummy:
        vocab_size = tokenizer.vocab_size
        total_tokens = ARGS.global_batch_size * ARGS.en_sequence_len

        if ARGS.deterministic:
            ordered_token_ids = torch.arange(total_tokens) % vocab_size
            ordered_token_ids = ordered_token_ids.reshape((ARGS.global_batch_size, ARGS.en_sequence_len))
            prompt_tokens = ordered_token_ids.tolist()
        else:
            prompt_tokens = [
                [random.randint(0, vocab_size - 1) for _ in range(ARGS.en_sequence_len)]
                for _ in range(ARGS.global_batch_size)
            ]
        prompts = prompt_tokens  # In dummy case, texts are already token IDs

    else:
        raise ValueError("You must provide one type of input: --input-texts, --input-file, or --input-dummy.")

    # Apply chat template and tokenize, only if not dummy
    if not ARGS.input_dummy:
        prompts = []
        for text in texts:
            messages = [
                {"role": "system", "content": ""},  # Optionally customize system content
                {"role": "user", "content": text}
            ]
            prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
            prompts.append(prompt)

        prompt_tokens = tokenizer(prompts, max_length=ARGS.en_sequence_len, truncation=True)['input_ids']


    if ARGS.global_batch_size > len(prompt_tokens):
        raise ValueError(
            f"--global-batch-size ({ARGS.global_batch_size}) exceeds number of available prompts ({len(prompt_tokens)})."
        )
    
    if getattr(ARGS, "streaming", False):
        if ARGS.atten_dp_size > 1:
            raise ValueError("Streaming mode does not support data parallelism (dp > 1). Please set dp=1.")
        
        if ARGS.global_batch_size != 1:
            print("Warning: Streaming mode requires global_batch_size=1. Overriding to 1.")
            ARGS.global_batch_size = 1

    batch_padding(ARGS.global_batch_size)
    
    # init model
    model_args = ModelArgs()
    model_args = update_model_args_from_config(model_args, ARGS.ckpt_path)
    model = PanguForCausalLM(model_args).npu()
    # print_model(model) 

    load_ckpt_and_quant_scale(model, tokenizer)
    use_internal_format_weight(model)

    # init compile setting
    if ARGS.compile:
        model = compile_graph(model)

    if dist.get_rank() == 0 and ARGS.global_batch_size == 1:
        streamer = TextStreamer(
            tokenizer,
            skip_prompt=False,
            skip_special_tokens=True
        )
    else:
        streamer = None

    main(model, prompts, prompt_tokens, streamer)
