import os
import time
import numpy as np
import pandas as pd

import mindspore as ms
from mindspore.train import Model
from mindspore import load_checkpoint, load_param_into_net
from mindspore.parallel import set_algo_parameters
from mindspore.parallel._cost_model_context import _set_multi_subgraphs

from mindformers import pipeline
from mindformers import LlamaForCausalLM, LlamaConfig, AutoTokenizer, LlamaTokenizer
from mindformers import init_context
from mindformers.modules import TransformerOpParallelConfig
from mindformers.trainer.utils import get_last_checkpoint
from mindformers.tools import logger

SEQ_LENGTH = 2048
DISTRIBUTED_CKPT_PATH = os.getenv("DISTRIBUTED_CKPT_PATH", "")


# set context
context_config = {"device_target": "Ascend", "mode": 0,  "max_device_memory": "31GB"}
parallel_context_config = {"parallel_mode": 1, "gradients_mean": False, "full_batch": True}
rank_id, device_num = init_context(use_parallel=True, context_config=context_config, parallel_config=parallel_context_config)
set_algo_parameters(elementwise_op_strategy_follow=True, fully_use_devices=True)
_set_multi_subgraphs()


# config blooom 7.1b
config = LlamaConfig(
    use_parallel= True,
    embedding_init_type="float32",
    checkpoint_name_or_path="",
    seq_length=SEQ_LENGTH,
    hidden_size=5120,
    num_layers=40,
    num_heads=40,
    vocab_size=32000,
    multiple_of=256,
    pad_token_id=32000,
    max_decode_length=1024,
    hidden_dropout_rate=0.0,
    attention_dropout_rate=0.0,
    top_k=3,
    top_p=1,
    do_sample=True,
    parallel_config=TransformerOpParallelConfig(
        data_parallel=1,
        model_parallel=1,
        pipeline_stage=1,
        vocab_emb_dp=True
        )
    )

path = 'testcase.csv'
test_case = pd.read_csv(path)


def chat():
    # init bloom
    # TODO: change tokenizer model path
    tokenizer = LlamaTokenizer("/home/ma-user/work/chat_old/tokenizer.model")
    llama = LlamaForCausalLM(config)
    llama.set_train(False)
    print(llama.config)
    print("*********************************************")
    print(llama.lm_head.weight.shape)
    print("*********************************************")
    print(llama.config.parallel_config.vocab_emb_dp)
    if DISTRIBUTED_CKPT_PATH:
        # find the sharded ckpt path for this rank
        ckpt_path = os.path.join(DISTRIBUTED_CKPT_PATH, "rank_{}".format(rank_id))
        ckpt_path = get_last_checkpoint(ckpt_path)
        logger.info("ckpt path: %s", str(ckpt_path))

        # shard bloom and load sharded ckpt
        #m = Model(llama)
        #m.infer_predict_layout(ms.Tensor(np.ones(shape=(1, SEQ_LENGTH)), ms.int32))
        infer_data=(ms.Tensor(np.ones(shape=(1, SEQ_LENGTH)), ms.int32),)
        llama.set_auto_parallel()
        llama.compile(*infer_data)
        print(llama.lm_head.weight.shape)
        print("*******************************************")
        checkpoint_dict = load_checkpoint(ckpt_path)
        not_load_network_params = load_param_into_net(llama, checkpoint_dict)
        logger.info("Network parameters are not loaded: %s", str(not_load_network_params))

    output_text = []
    for idx, row in test_case.iterrows():
        question = f"{row['人设信息']}\n{row['对话历史']}"
        t1=time.time()
        inputs = tokenizer.encode(question)
        inputs = np.array([inputs]).astype(np.int32) # add batch dim
        outputs = llama.generate(inputs, max_length=SEQ_LENGTH, do_sample=True, eos_token_id=2)
        outputs = outputs[0] # remove batch dim
        print(f"Question {idx}:")
        text = tokenizer.decode(outputs)
        print(text)
        output_text.append(text)
        print("chat time :",time.time()-t1)
    test_case["输出"] = output_text
    test_case.to_csv("output_testcase.csv", index=False)

if __name__ == "__main__":
    chat()
