# coding:utf-8

"""Classify-bert with lstm-predict

Author:
    name: reeseimk
    email: reeseimk@163.com

Homepage: https://gitee.com/reeseimk/mindspore_bert
"""

import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname("__file__"), "./")))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname("__file__"), "../")))

import mindspore as ms
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import src.generate_mindrecord.tokenization as tokenization
import mindspore.nn as nn
from mindspore.common.initializer import TruncatedNormal
from mindspore.ops import operations as P
import mindspore.ops as ops
from mindspore import context

from src.bert_model import BertModel
from utils.set_config import SetConfig


class BertCLSModel(nn.Cell):
    """
    This class is responsible for classification task evaluation, i.e. XNLI(num_labels=3),
    LCQMC(num_labels=2), Chnsenti(num_labels=2). The returned output represents the final
    logits as the results of log_softmax is proportional to that of softmax.
    """

    def __init__(self, args, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False,
                 assessment_method=""):
        super(BertCLSModel, self).__init__()
        if not is_training:
            config.hidden_dropout_prob = 0.0
            config.hidden_probs_dropout_prob = 0.0
            
        self.bert = BertModel(config, is_training, use_one_hot_embeddings)
        
        # # 加载原始预训练权重
        # self._param_dict = load_checkpoint(args.pretrain_load_ckpt_path)
        # param_dict = {}
        # for k, v in self._param_dict.items():
        #     new_k = k.replace('bert.bert.', 'bert.')
        #     param_dict[new_k] = v
        # load_param_into_net(self.bert, param_dict)
        
        self.cast = P.Cast()
        self.weight_init = TruncatedNormal(config.initializer_range)
        self.log_softmax = P.LogSoftmax(axis=-1)
        self.dtype = config.dtype
        self.num_labels = num_labels
        self.lstm = nn.LSTM(768,
                            128,
                            num_layers=1,
                            bidirectional=True,
                            batch_first=True)
        # dense的第一个参数，需要是lstm的隐藏层的2倍（如果是双向lstm的话）
        self.dense_1 = nn.Dense(256, self.num_labels, weight_init=self.weight_init, has_bias=True).to_float(config.compute_type)
        self.dropout = nn.Dropout(p=dropout_prob)
        self.assessment_method = assessment_method

    def construct(self, input_ids, input_mask, token_type_id):
        sequence_output, pooled_output, _ = self.bert(input_ids, token_type_id, input_mask)
        # sequence_output是原始输出，dim=3，bert后接lstm时，需要用这个输出
        # pooled_output是取了CLS的输出，dim=2，bert后直接Dense时，需要用这个输出
        cls = self.cast(sequence_output, self.dtype)
        
        cls = self.dropout(cls)
        _, (hidden, _) = self.lstm(cls)  # 分类模型，需要用处于hidden这个位置的输出
        hidden = ops.concat((hidden[-2, :, :], hidden[-1, :, :]), axis=1)
        cls = self.dropout(hidden)
        logits = self.dense_1(cls)
        logits = self.cast(logits, self.dtype)
        if self.assessment_method != "spearman_correlation":
            logits = self.log_softmax(logits)
        return logits

examples = [
    ("news_entertainment", "江疏影甜甜圈自拍，迷之角度竟这么好看，美吸引一切事物"),
    ("news_military", "以色列大规模空袭开始！伊朗多个军事目标遭遇打击，誓言对等反击"),
    ("news_finance", "出栏一头猪亏损300元，究竟谁能笑到最后！"),
    ("news_culture", "走进荀子的世界 触摸二千年前的心灵温度"),
    ("news_finance", "区块链投资心得，能做到就不会亏钱"),]

# 和训练集的构造顺序相同，键值对方向相反
label_map = {
        0: "news_military",
        1: "news_stock",
        2: "news_tech",
        3: "news_sports",
        4: "news_edu",
        5: "news_story",
        6: "news_culture",
        7: "news_game",
        8: "news_agriculture",
        9: "news_travel",
        10: "news_finance",
        11: "news_house",
        12: "news_entertainment",
        13: "news_world",
        14: "news_car"
    }

def convert_single_example(args, text, max_seq_length, tokenizer):
    tokens = tokenizer.tokenize(text)
    if len(tokens) > max_seq_length - 2:
        tokens = tokens[0:(max_seq_length - 2)]

    all_tokens = []
    segment_ids = []
    all_tokens.append("[CLS]")
    segment_ids.append(0)
    for token in tokens:
        all_tokens.append(token)
        segment_ids.append(0)
    all_tokens.append("[SEP]")
    segment_ids.append(0)

    input_ids = tokenization.convert_tokens_to_ids(args.vocab_file_path, all_tokens)
    input_mask = [1] * len(input_ids)
    while len(input_ids) < max_seq_length:
        input_ids.append(0)
        input_mask.append(0)
        segment_ids.append(0)

    input_ids = ms.Tensor([input_ids,], dtype=ms.int32)
    input_mask = ms.Tensor([input_mask,], dtype=ms.int32)
    segment_ids = ms.Tensor([segment_ids,], dtype=ms.int32)

    return input_ids, input_mask, segment_ids

def main():
    set_config = SetConfig()
    args, base_bert_cfg = set_config.get_config()
    is_training = False
    cls_bert = BertCLSModel(args, base_bert_cfg, is_training, num_labels=15, dropout_prob=0.0)
    
    # 加载fine-tune权重
    weight_path = "/usr/local/teamwork/mindspore_dir/my_bert/cache_ckpt/cls_bert_lstm.ckpt"
    _param_dict = load_checkpoint(weight_path)
    load_param_into_net(cls_bert, _param_dict)

    cls_bert.set_train(False)
    model = Model(cls_bert)

    argmax = P.Argmax()
    tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file_path, do_lower_case=True)
    for label, text in examples:
        input_ids, input_mask, segment_ids = convert_single_example(args, text, base_bert_cfg.seq_length, tokenizer)
        logit = model.predict(input_ids, input_mask, segment_ids)

        print("sentence: {}".format(text))
        print("label: {}".format(label))
        val_logit = int(argmax(logit)[0].asnumpy())
        # print(val_logit, type(val_logit))
        print("prediction: {}\n".format(label_map[val_logit]))
        # print("prediction: {}\n".format(label_map[argmax(logit)[0]]))
        
if __name__ == "__main__":
    main()