# coding:utf-8

"""Classify-bert with lstm-trainer

Author:
    name: reeseimk
    email: reeseimk@163.com

Homepage: https://gitee.com/reeseimk/mindspore_bert
"""

import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname("__file__"), "./")))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname("__file__"), "../")))

import mindspore as ms
from mindspore import Tensor
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import src.generate_mindrecord.tokenization as tokenization
import mindspore.nn as nn
from mindspore.common.initializer import TruncatedNormal
from mindspore.ops import operations as P
import mindspore.ops as ops
from mindspore import context
from tqdm import tqdm
import numpy as np
from mindspore import context
context.set_context(device_target='CPU')

from src.bert_model import BertModel
from utils.set_config import SetConfig
from utils.deal_data import load_data_cls

class BertCLSModel(nn.Cell):
    """
    This class is responsible for classification task evaluation, i.e. XNLI(num_labels=3),
    LCQMC(num_labels=2), Chnsenti(num_labels=2). The returned output represents the final
    logits as the results of log_softmax is proportional to that of softmax.
    """

    def __init__(self, args, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False,
                 assessment_method=""):
        super(BertCLSModel, self).__init__()
        self.currentDir = os.path.abspath(os.path.join(os.path.dirname("__file__"), "./"))
        self.parentDir = os.path.abspath(os.path.join(os.path.dirname("__file__"), "../"))
        
        # if not is_training:
        #     config.hidden_dropout_prob = 0.0
        #     config.hidden_probs_dropout_prob = 0.0
        
        # 构建模型 / 加载bert预训练模型=========================
        # 构建模型
        self.bert = BertModel(config, is_training, use_one_hot_embeddings)
        # self.bert.set_train(True)
        self._param_dict = load_checkpoint(args.pretrain_load_ckpt_path)
        # 预训练权重中的层名称，需要根据模型构建情况进行修改
        param_dict = {}
        for k, v in self._param_dict.items():
            new_k = k.replace('bert.bert.', 'bert.')
            param_dict[new_k] = v
        # 加载权重
        load_param_into_net(self.bert, param_dict)
        # ====================================================
        
        # self.embedding = nn.Embedding(vocab_size, embedding_dim, embedding_table=ms.Tensor(embeddings), padding_idx=pad_idx)
        self.cast = P.Cast()
        self.weight_init = TruncatedNormal(config.initializer_range)
        self.log_softmax = P.LogSoftmax(axis=-1)
        self.dtype = config.dtype
        self.num_labels = num_labels
        self.lstm = nn.LSTM(768,
                            128,
                            num_layers=1,
                            bidirectional=True,
                            batch_first=True)
        # dense的第一个参数，需要是lstm的隐藏层的2倍（如果是双向lstm的话）
        self.dense_1 = nn.Dense(256, self.num_labels, weight_init=self.weight_init, has_bias=True).to_float(config.compute_type)
        self.dropout = nn.Dropout(p=dropout_prob)
        self.assessment_method = assessment_method
        nn.Embedding

    def construct(self, input_ids, input_mask, token_type_id):
        sequence_output, pooled_output, _ = self.bert(input_ids, token_type_id, input_mask)
        # sequence_output是原始输出，dim=3，bert后接lstm时，需要用这个输出
        # pooled_output是取了CLS的输出，dim=2，bert后直接Dense时，需要用这个输出
        cls = self.cast(sequence_output, self.dtype)
        
        cls = self.dropout(cls)
        _, (hidden, _) = self.lstm(cls)  # 分类模型，需要用处于hidden这个位置的输出
        hidden = ops.concat((hidden[-2, :, :], hidden[-1, :, :]), axis=1)
        cls = self.dropout(hidden)
        logits = self.dense_1(cls)
        logits = self.cast(logits, self.dtype)
        if self.assessment_method != "spearman_correlation":
            logits = self.log_softmax(logits)
        return logits
    
    def get_abs_path(self, path_relative: str=""):
        path = os.path.join(self.parentDir, path_relative)
        return path
    
    
if __name__ == "__main__":
    set_config = SetConfig()
    args, base_bert_cfg = set_config.get_config()
    is_training = True
    cls_bert = BertCLSModel(args, base_bert_cfg, is_training, num_labels=15, dropout_prob=0.5)
    # print(cls_bert)
    
    dataset_dev = load_data_cls(data_path="data/cls_data/dev_1.json", batch_size=8)
    dataset_train = load_data_cls(data_path="data/cls_data/dev_1.json", batch_size=32)
    
    lr = 0.00001
    loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False, reduction='mean')  # sparse=False，label采用one_hot编码
    optimizer = nn.Adam(cls_bert.trainable_params(), learning_rate=lr)
    
    def forward_fn(input_ids, input_mask, token_type_id, label):
        logits = cls_bert(input_ids, input_mask, token_type_id)
        label = label.asnumpy()
        # nn.SoftmaxCrossEntropyWithLogits，softmax交叉熵损失函数在sparse=False的时候，需要把label转成one_hot编码
        labels_np = np.eye(15)[label].astype(np.float32).reshape(-1, 15)  # 对label降一维
        labels = Tensor(labels_np)

        loss = loss_fn(logits, labels)
        return loss

    grad_fn = ms.value_and_grad(forward_fn, None, optimizer.parameters)

    def train_step(input_ids, input_mask, token_type_id, label):
        loss, grads = grad_fn(input_ids, input_mask, token_type_id, label)
        optimizer(grads)
        return loss

    def train_one_epoch(model, dataset, epoch=0):
        model.set_train(True)
        total = dataset.get_dataset_size()
        # print(total)
        loss_total = 0
        step_total = 0
        
        with tqdm(total=total) as t:
            t.set_description('Epoch %i' % epoch)
            for item in dataset.create_tuple_iterator():
                # print(item)
                input_ids, input_mask, segment_ids, label_ids = item
                loss = train_step(input_ids, input_mask, segment_ids, label_ids)
                loss_total += loss.asnumpy()
                step_total += 1
                loss_mean = loss_total/step_total
                t.set_postfix(loss=loss_mean)
                t.update(1)
        return loss_mean

    best_valid_acc = 0.0
    best_valid_loss = 100.0
    
    cache_dir = cls_bert.get_abs_path(path_relative="cache_ckpt")
    ckpt_file_name = os.path.join(cache_dir, 'cls_bert_lstm.ckpt')
    epoch_num = 30
    
    for epoch in range(epoch_num): 
        train_loss = train_one_epoch(cls_bert, dataset_train, epoch=epoch)
        
        cls_bert.set_train(False)
        # 训练效果评估
        dev_model = Model(cls_bert)
        acc_num = 0
        total_num = 0
        loss_total = 0
        step_total = 0
        for dev_item in dataset_dev.create_tuple_iterator():
            input_ids, input_mask, segment_ids, label = dev_item
            val_logit = dev_model.predict(input_ids, input_mask, segment_ids)  # 预测，返回值为模型construct函数的返回值
            
            # 计算预测loss值
            label = label.asnumpy()
            labels_np = np.eye(15)[label].astype(np.float32).reshape(-1, 15)  # 对label进行one_hot编码，并降一维，这个操作是为了满足loss_fn的输入条件
            labels = Tensor(labels_np)
            loss = loss_fn(val_logit, labels)
            loss_total += loss.asnumpy()
            step_total += 1
            
            # 计算预测acc值
            label = np.reshape(label, -1)  # 对非one_hot编码的label进行拉平处理
            logits = val_logit.asnumpy()
            logit_id = np.argmax(logits, axis=-1)  # 挑选模型预测的softmax最大值作为预测分类
            acc_num += np.sum(label == logit_id)
            total_num += len(label)
        acc = acc_num / total_num
        val_loss=loss_total/step_total
        # print(acc, val_loss)
        print(f"acc={acc}", f"val_loss={val_loss}")
        
        # if acc > best_valid_acc:
        #     best_valid_acc = acc
        #     ms.save_checkpoint(cls_bert, ckpt_file_name)
            
        if train_loss < best_valid_loss:
            best_valid_loss = train_loss
            ms.save_checkpoint(cls_bert, ckpt_file_name)
            