from os.path import abspath, join
import numpy as np
from mindspore.common.tensor import Tensor
import mindspore.common.dtype as mstype
from mindspore import load_checkpoint, load_param_into_net
from mindspore.train.model import Model
from mindspore.context import set_context
import mindspore as ms
from threading import Lock

from backend.experiment.framework.lmmodel import GenerateLMModel
from backend.experiment.pangu.pangu_dropout_recompute_eos_fp16 \
    import EvalNet_p, \
    PANGUALPHA as PANGUALPHA_fp16
from backend.experiment.pangu.pangu_dropout_recompute_eos import \
    PANGUALPHA as PANGUALPHA_fp32
from backend.experiment.pangu.pangu_wrapcell_gradient_scale_eos import \
    VirtualDatasetOneInputCell
from backend.experiment.pangu.utils_fix import PANGUALPHAConfig
from backend.experiment.args import model_type

# 记得运行以下指令：
"""
export PATH=/usr/local/cuda-10.2/bin/:$PATH
export LD_LIBRARY_PATH=/home/linyongliang/miniconda3/envs/pangu/lib 
"""

context_limit = 1024
model_name = model_type
vocab_name = 'default_vocab'
vocabulary = 40000
gpu_id = 1

# Pangu貌似不支持多线程调用
model_lock = Lock()


def get_model_13b_fp16(load_ckpt_path):
    # model_parallel_num = 8
    model_parallel_num = 1
    data_parallel_num = int(1 / model_parallel_num)
    per_batch_size = 1
    batch_size = per_batch_size * data_parallel_num
    config = PANGUALPHAConfig(
        data_parallel_num=data_parallel_num,
        model_parallel_num=model_parallel_num,
        batch_size=batch_size,
        seq_length=1024,
        vocab_size=40000,
        embedding_size=5120,  # 5120,  # 353M   8B
        num_layers=40,  # 40,
        num_heads=40,  # ,
        expand_ratio=4,
        post_layernorm_residual=False,
        dropout_rate=0.1,  # 0.0,
        compute_dtype=ms.float16,
        use_past=False,
        self_layernorm=True,
        forward_reduce_scatter=True,
        word_emb_dp=True,
        eod_reset=False)
    print("===config is: ", config, flush=True)
    set_context(device_id=gpu_id)
    pangu = PANGUALPHA_fp16(config)
    pangu_ = VirtualDatasetOneInputCell(pangu)
    eval_pangu = EvalNet_p(pangu_, generate=True)
    eval_pangu.set_train(False)
    _model = Model(eval_pangu)

    param_dict = load_checkpoint(load_ckpt_path)
    load_param_into_net(eval_pangu, param_dict)

    print('#### Load ckpt success!!! ####')
    return _model


def get_model_2b6(load_ckpt_path):
    eod_reset = False
    model_parallel_num = 1
    data_parallel_num = int(1 / model_parallel_num)
    per_batch_size = 1
    batch_size = per_batch_size * data_parallel_num

    config = PANGUALPHAConfig(
        data_parallel_num=data_parallel_num,
        model_parallel_num=model_parallel_num,
        batch_size=batch_size,
        seq_length=1024,
        vocab_size=vocabulary,
        embedding_size=2560,  # 353M   8B
        num_layers=32,
        num_heads=32,
        expand_ratio=4,
        post_layernorm_residual=False,
        dropout_rate=0.1,
        compute_dtype=ms.float16,
        use_past=False,
        self_layernorm=True,
        forward_reduce_scatter=True,
        word_emb_dp=True,
        eod_reset=eod_reset)
    print("===config is: ", config, flush=True)
    set_context(device_id=gpu_id)
    pangu = PANGUALPHA_fp32(config)
    pangu_ = VirtualDatasetOneInputCell(pangu)
    eval_pangu = EvalNet_p(pangu_, generate=True)
    eval_pangu.set_train(False)
    _model = Model(eval_pangu)

    param_dict = load_checkpoint(load_ckpt_path)
    load_param_into_net(eval_pangu, param_dict)

    print('#### Load ckpt success!!! ####')
    return _model


class PanguLMModel(GenerateLMModel):

    @property
    def output_logits(self) -> bool:
        return False

    def __init__(self, pad_id: int):
        super().__init__(context_limit)
        self.__context_length = context_limit
        self.__pad_id = pad_id
        self.__model = None

    def predict(self, inputs: np.ndarray, *args, **kwargs) -> np.ndarray:
        with model_lock:
            bs, valid_length = inputs.shape
            pad_length = self.context_length - inputs.shape[-1]
            input_ids = np.pad(
                inputs, ((0, 0), (0, pad_length)), 'constant',
                constant_values=(0, self.__pad_id))
            # print(f'input_ids shape: {input_ids.shape}')
            inputs = Tensor(input_ids, mstype.int32)
            logits = self.__get_model().predict(inputs).asnumpy()
            # print(f'logits 1: {logits.shape}')
            logits = logits.reshape(bs, self.context_length, -1)
            # print(f'logits 2: {logits.shape}')
            logits = logits[0, 0: valid_length, :]
            # print(f'logits 3: {logits.shape}')
        return logits

    def generate(self, inputs: np.ndarray, generate_count: int,
                 top_k: int = 10, **kwargs) -> np.ndarray:
        with model_lock:
            pad_id = 6
            end_token = 50256
            seq_length = self.context_length
            valid_length = len(inputs)
            bs = 1
            pad_length = seq_length - inputs.shape[-1]
            input_ids = np.pad(
                inputs.reshape((1, -1)),
                ((0, 0), (0, pad_length)),
                'constant',
                constant_values=(0, pad_id)
            )
            # print("input_ids is ", input_ids)
            cnt = 0
            outputs = input_ids
            while valid_length < seq_length:
                inputs = Tensor(input_ids, mstype.int32)
                logits = self.__get_model().predict(inputs).asnumpy()
                logits = logits.reshape(bs, seq_length, -1)
                probs = logits[0, valid_length - 1, :]
                p_args = probs.argsort()[::-1][:top_k]

                p = probs[p_args]
                p = p / sum(p)
                target_index = np.random.choice(len(p), p=p)
                if p_args[target_index] == end_token or \
                        valid_length == seq_length - 1 \
                        or cnt >= generate_count:
                    outputs = input_ids
                    break
                input_ids[0][valid_length] = p_args[target_index]
                valid_length += 1
                cnt += 1

            length = np.sum(outputs != pad_id)
            outputs = outputs[0][:length]
        return outputs

    def __get_model(self):
        if self.__model is None:
            ms.context.set_context(
                save_graphs=False, mode=ms.context.GRAPH_MODE,
                device_target="GPU")
            if model_type == 'pangu-2.6b':
                self.__model = get_model_13b_fp16(
                    abspath(join(__file__, '../PanguAlpha_13b_fp16.ckpt')))
            elif model_type == 'pangu-13b-fp16':
                self.__model = get_model_13b_fp16(
                    abspath(join(__file__, '../PanguAlpha_13b_fp16.ckpt')))
            else:
                raise ValueError(f'model_type: {model_type}')
        return self.__model


model = PanguLMModel(6)
