import os
import time
import numpy as np
from pprint import pprint

import mindspore as ms
from mindspore.train import Model
from mindspore import load_checkpoint, load_param_into_net
from mindspore.parallel import set_algo_parameters
from mindspore.parallel._cost_model_context import _set_multi_subgraphs

from mindformers import pipeline
from mindformers import LlamaConfig,LlamaForCausalLM, LlamaTokenizer
from mindformers import init_context
from mindformers.modules import TransformerOpParallelConfig
from mindformers.trainer.utils import get_last_checkpoint
from mindformers.tools import logger
from mindformers.tools.register import MindFormerConfig

# from fastapi import FastAPI
# from fastapi.middleware.cors import CORSMiddleware
# from bo.schemas import PromptRequest
# from utils.prompter import Prompter

DISTRIBUTED_CKPT_PATH = os.getenv("DISTRIBUTED_CKPT_PATH", "")
#此代码只能用于评估及推理的策略文件转化

SEQ_LENGTH = 16000
# set context
context_config = {"device_target": "Ascend", 
                  "mode": 0, 
                  "enable_graph_kernel": False,
                  "save_graphs": False, 
                  "max_device_memory": "31GB",
                  "graph_kernel_flags": 
                  "--disable_expand_ops=Softmax,Dropout --enable_parallel_fusion=true --reduce_fuse_depth=8 --enable_auto_tensor_inplace=true"}
parallel_context_config = {"parallel_mode": 1, "gradients_mean": False, "full_batch": True, "strategy_ckpt_save_file": "./llama_7b_stragety.ckpt","enable_alltoall":False}
rank_id, device_num = init_context(use_parallel=True, context_config=context_config, parallel_config=parallel_context_config)
set_algo_parameters(elementwise_op_strategy_follow=True, fully_use_devices=True)
_set_multi_subgraphs()


# config 
config = MindFormerConfig("/home/zhangsenzhen/2023Q2/mindformers/configs/llama/run_llama_13b_ziya_predict_multi.yaml")
config.model.model_config.parallel_config = TransformerOpParallelConfig(**config.parallel_config)
pprint(config.model.model_config)
model_config = LlamaConfig(**config.model.model_config)

api_prefix = "/api/v1"
base_model = 'llama_7b'
prompt_templates = "wenxiu_template"

def chat():
    # init bloom
    tokenizer = LlamaTokenizer(vocab_file=config.processor.tokenizer.vocab_file)
    llama = LlamaForCausalLM(model_config)
    llama.set_train(False)

    if DISTRIBUTED_CKPT_PATH:
        # find the sharded ckpt path for this rank
        ckpt_path = os.path.join(DISTRIBUTED_CKPT_PATH, "rank_{}".format(rank_id))
        ckpt_path = get_last_checkpoint(ckpt_path)
        logger.info("ckpt path: %s", str(ckpt_path))

        # shard bloom and load sharded ckpt
        m = Model(llama)
        if model_config.use_past:
            m._network.add_flags_recursive(use_past=False)
        m.infer_predict_layout(ms.Tensor(np.ones(shape=(1, SEQ_LENGTH)), ms.int32))
        if model_config.use_past:
            m._network.add_flags_recursive(use_past=True)

        # for name, param in m._network.parameters_and_names():
        #     print(f"{name}: {param.shape}")
        checkpoint_dict = load_checkpoint(ckpt_path)
        not_load_network_params = load_param_into_net(llama, checkpoint_dict)
        logger.info("Network parameters are not loaded: %s", str(not_load_network_params))

    question_list = [
        "你是谁",
        "当地时间7月19日，中国驻美国大使谢锋参加阿斯彭安全论坛时表示，中国不希望发生贸易战或者科技战，如果美国对中国的芯片行业实施更多限制，中国肯定会做出回应。谢锋说，中国并不回避竞争，但美国定义竞争的方式并不公平。谢锋提到美国正考虑建立对外投资审查机制并进一步禁止向中国出口人工智能芯片的话题时表示，中国政府不会袖手旁观，中方不会主动挑衅，但也不会因挑衅而退缩。",
        "江西省水利厅7月20日下午发布消息称，当日11时，鄱阳湖代表站星子站水位退至11.99米，为1951年有纪录以来同期最低水位，鄱阳湖提前进入枯水期，2023年成为有纪录以来最早进入枯水期的年份。",
        "江西省水利厅7月20日表示，根据气象预测，未来十天江西省降雨仍偏少，加之五河及长江来水偏少，鄱阳湖水位将维持波动退水态势，建议关注鄱阳湖水位持续下降对湖区生产生活生态带来的不利影响，做好用水管理及抗旱保水工作。"
        ]

    for text in question_list:
        # prompter = Prompter(prompt_templates)
        # prompt = prompter.generate_prompt('将以下内容翻译成英文', text)
        inputs = tokenizer.encode(text, add_special_tokens=False)
        inputs = np.array([inputs]).astype(np.int32) # add batch dim
        st=time.time()
        outputs = llama.generate(inputs, max_length=None, do_sample=False, eos_token_id=2)
        ut=time.time()-st
        outputs = outputs[0] # remove batch dim
        n=len(outputs)
        print(tokenizer.decode(outputs))
        print(f'速度：{n/ut}')

if __name__ == "__main__":
    chat()