import acl

import mindspore as ms
from mindspore import Model, Tensor
from mindspore.common import initializer

from mindformers import MindFormerConfig
from mindformers import build_context
from mindformers.tools.logger import logger
from mindformers.trainer.utils import transform_and_load_checkpoint
from mindformers.core.parallel_config import build_parallel_config

from deepseek3_model import DeepseekV3ForCausalLM
from deepseek3_config import DeepseekV3Config
from mindformers.models.llama.llama_tokenizer_fast import LlamaTokenizerFast

# 输入
input_questions = ["生抽和老抽的区别是什么？"]
text = "xiyou.txt"
with open(text, 'r') as f:
    info = "".join(f.readlines())
    input_questions = [info[:10400]]

# set model config
yaml_file = "./predict_deepseek3_671B.yaml"
config = MindFormerConfig(yaml_file)
build_context(config)
build_parallel_config(config)
model_config = config.model.model_config
model_config.parallel_config = config.parallel_config
model_config.moe_config = config.moe_config
model_config = DeepseekV3Config(**model_config)

# build tokenizer
tokenizer = LlamaTokenizerFast(config.processor.tokenizer.vocab_file,
                                config.processor.tokenizer.tokenizer_file,
                                unk_token=config.processor.tokenizer.unk_token,
                                bos_token=config.processor.tokenizer.bos_token,
                                eos_token=config.processor.tokenizer.eos_token,
                                fast_tokenizer=True, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token

# build model from config
logger.info("build model.")
network = DeepseekV3ForCausalLM(model_config)
ms_model = Model(network)
if config.load_checkpoint and False:
    logger.info("----------------Transform and load checkpoint----------------")
    seq_length = model_config.seq_length
    input_ids = Tensor(shape=(model_config.batch_size, seq_length), dtype=ms.int32, init=initializer.One())
    infer_data = network.prepare_inputs_for_predict_layout(input_ids)
    transform_and_load_checkpoint(config, ms_model, network, infer_data, do_predict=True)

logger.info("start generate.")
#batch_sizes = [1, 4, 16, 64, 1, 4 ,16, 64, 1, 4, 16, 64]
#input_len = [256] * 4 + [512] * 4 + [1024] * 4
#output_len = [256] * 4 + [512] * 4 + [1024] * 4
batch_sizes = [192]
input_len = [256]
output_len = [256]

if len(batch_sizes) > len(input_len):
    input_len = input_len * len(batch_sizes)
if len(batch_sizes) > len(output_len):
    output_len = output_len * len(batch_sizes)
assert len(batch_sizes) == len(input_len) == len(output_len)

for bs, input_len, output_len in zip(batch_sizes, input_len, output_len):
    logger.info(f"hhh now test: [{bs}, {input_len}, {output_len}]")
    input_questions_new = input_questions * bs
    inputs = tokenizer(input_questions_new)["input_ids"]
    for i in range(len(inputs)):
        inputs[i] = inputs[i][:input_len]
    logger.info(f"hhh inputs: {len(inputs)} inputs[0]:{len(inputs[0])}")
    for i in range(4):
        outputs = network.generate(inputs,
                                   max_length=8192,
                                   do_sample=False,
                                   top_k=5,
                                   top_p=1,
                                   max_new_tokens=output_len)
        answer = tokenizer.decode(outputs)
        print(f"idx: {i} shape: {bs} x [{input_len}, {output_len}]")
        print("answer", answer)

'''
logger.info("start generate.")
inputs = tokenizer(input_questions)["input_ids"]
bs = len(inputs)
for i in range(len(inputs)):
    inputs[i] = inputs[i][:256]
logger.info(f"hhh inputs: {len(inputs)} inputs[0]:{len(inputs[0])}")
for i in range(3):
    """
    free_mem_1, total_mem_1, _ = acl.rt.get_mem_info(1)
    peak_mem_1 = total_mem_1 - free_mem_1
    free_mem, total_mem, _ = acl.rt.get_mem_info(4)
    peak_mem = total_mem - free_mem
    logger.info(f'{i}-->hhh after forward acl, peak_mem: {peak_mem}, free_mem: {free_mem}, total_memory: {total_mem}')
    logger.info(f'{i}-->hhh after forward acl_all, peak_mem: {peak_mem_1}, free_mem: {free_mem_1}, total_memory: {total_mem_1}')
    msmax_allocmem = ms.hal.max_memory_allocated(device_target='Ascend')
    msmax_resmem   = ms.hal.max_memory_reserved(device_target='Ascend')
    logger.info(f'{i}-->hhh after forward ms, msmax_allocmem:{msmax_allocmem} msmax_resmem:{msmax_resmem}')
    """
    outputs = network.generate(inputs,
                               max_length=8192,
                               do_sample=False,
                               top_k=5,
                               top_p=1,
                               max_new_tokens=256)
    answer = tokenizer.decode(outputs)
    print("answer: ", answer)