import mindspore
from mindformers import GPT2LMHeadModel, GPT2Tokenizer
import mindspore.common.dtype as mstype
from mindformers.models.gpt2  import GPT2Config
from mindformers.core.loss import CrossEntropyLoss
from mindformers.modules.transformer import AttentionMask, VocabEmbedding
import copy
from mindspore.ops import operations as P
from mindspore.ops import functional as F
import mindspore.nn as nn

class MyGPT2LMHeadModel(GPT2LMHeadModel):
    def __init__(self, config: GPT2Config = None):
        config = config if config is not None else GPT2Config()
        super().__init__(config)

        self.eos_token_id = self.config.eos_token_id
        parallel_config = self.config.parallel_config
        self.stridedslice = P.StridedSlice().shard(((parallel_config.data_parallel, 1),))
        self.not_equal = P.NotEqual().shard(((parallel_config.data_parallel, 1), ()))

        self.get_attention_mask = AttentionMask(seq_length=config.seq_length,
                                                parallel_config=parallel_config.dp_mp_config)

        loss_parallel_config = copy.deepcopy(parallel_config)

        self.loss = CrossEntropyLoss(parallel_config=loss_parallel_config)
        self.reshape = P.Reshape()
        self.cast = P.Cast()
        self.load_checkpoint(config)
        self.add = P.Add().shard(((parallel_config.data_parallel, 1), ()))
        self.is_opt = False
    def construct(self, input_ids, attention_mask=None):
        if attention_mask is None:
            attention_mask = self.not_equal(input_ids, self.eos_token_id)

        batch_size, seq_length = input_ids.shape
        attention_mask = self.cast(attention_mask, mstype.float32)
        loss_mask = attention_mask

        if self.phase != "train":
            tokens = input_ids
        else:
            tokens = self.stridedslice(input_ids, (0, 0), (batch_size, seq_length - 1), (1, 1))
            attention_mask = self.stridedslice(attention_mask, (0, 0), (batch_size, seq_length - 1), (1, 1))

        attention_mask = self.get_attention_mask(attention_mask)

        # [batch_size, seq_length, vocab_size]
        output_states, embedding_table = self.backbone(tokens, attention_mask)
        logits = self.head(output_states, embedding_table)
        logits = logits.astype(mindspore.float32)

        if self.phase != 'train':
            logits = self.reshape(logits, (batch_size, seq_length, -1))

            # makes cast effective to avoid allgather issue in Mindspore1.10
            loss_mask = self.add(loss_mask, 1)
            return logits, tokens, loss_mask
        if self.is_opt== True :
            loss_mask = self.stridedslice(loss_mask, (0, 1), (batch_size, seq_length), (1, 1))
            labels = self.stridedslice(input_ids, (0, 1), (batch_size, seq_length), (1, 1))
            labels = self.reshape(labels, (-1,))
            loss_mask = self.reshape(loss_mask, (-1,))
            loss = self.loss(logits, labels, loss_mask)
            return loss

        logits = self.reshape(logits, (batch_size, seq_length-1, -1))
        return logits

        # loss_mask = self.stridedslice(loss_mask, (0, 1), (batch_size, seq_length), (1, 1))
        # labels = self.stridedslice(input_ids, (0, 1), (batch_size, seq_length), (1, 1))
        # labels = self.reshape(labels, (-1,))
        # loss_mask = self.reshape(loss_mask, (-1,))
        # loss = self.loss(logits, labels, loss_mask)
        # return loss

    def set_isopt(self,is_opt=False):
        self.is_opt = is_opt
