import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlenlp.transformers.t5.modeling import T5ForConditionalGeneration


class CopyGenerator(nn.Layer):
    def __init__(self, d_model):
        super().__init__()
        self.prob_proj = nn.Linear(d_model * 2, 1)

    def forward(self, src, decode_output, decode_attn, memory, gen_logits):
        decode_attn = paddle.mean(decode_attn, 1)
        batch_size, steps, seq = decode_attn.shape
        # vocab
        copy_logits = paddle.zeros_like(gen_logits)
        # print(copy_logits.shape, src.shape)
        copy_logits = paddle.concat([simple_scatter(x, s, update) for (x, s, update) in zip(
            copy_logits.chunk(batch_size, 0),
            src.chunk(batch_size, 0),
            decode_attn.chunk(batch_size, 0)
        )], 0)
        context = paddle.matmul(decode_attn, memory)
        prob = F.sigmoid(self.prob_proj(paddle.concat([context, decode_output], -1)))

        gen_logits = prob * F.softmax(gen_logits, -1)
        copy_logits = (1 - prob) * F.softmax(copy_logits, -1)
        final_logits = gen_logits + copy_logits
        return final_logits


class T5Copy(T5ForConditionalGeneration):
    def __init__(self, t5):
        super().__init__(t5)
        self.generator = CopyGenerator(self.t5.config["d_model"])
        self.init_weights()

    def forward(self,
                input_ids=None,
                attention_mask=None,
                decoder_input_ids=None,
                decoder_attention_mask=None,
                encoder_output=None,
                cache=None,
                labels=None,
                use_cache=True,
                output_attentions=False,
                output_hidden_states=False,
                **kwargs):
        # Encode if needed (training, first prediction pass)
        if encoder_output is None:
            # Convert encoder inputs in embeddings if needed
            encoder_output = self.t5.encoder(
                input_ids=input_ids,
                attention_mask=attention_mask,
                output_attentions=output_attentions,
                output_hidden_states=output_hidden_states)

        hidden_states = encoder_output[0]

        if labels is not None and decoder_input_ids is None:
            # get decoder inputs from shifting lm labels to the right
            decoder_input_ids = self._shift_right(labels)

        # If decoding with past key value states, only the last tokens
        # should be given as an input
        if cache is not None:
            assert (
                    labels is None
            ), "Decoder should not use cached key value states when training."
            if decoder_input_ids is not None:
                decoder_input_ids = decoder_input_ids[:, -1:]

        # Decode
        decoder_outputs = self.t5.decoder(
            input_ids=decoder_input_ids,
            attention_mask=decoder_attention_mask,
            cache=cache,
            encoder_hidden_states=hidden_states,
            encoder_attention_mask=attention_mask,
            use_cache=use_cache,
            output_attentions=True,
            output_hidden_states=output_hidden_states)

        sequence_output = decoder_outputs[0]

        if self.t5.config["tie_word_embeddings"]:
            # Rescale output before projecting on vocab
            # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
            sequence_output = sequence_output * (self.t5.config["d_model"]
                                                 ** -0.5)
            lm_logits = paddle.matmul(
                sequence_output, self.t5.shared.weight, transpose_y=True)
        else:
            lm_logits = self.lm_head(sequence_output)

        loss = None
        if labels is not None:
            loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
            loss = loss_fct(
                lm_logits.reshape(shape=[-1, lm_logits.shape[-1]]),
                labels.flatten())
        src = input_ids if input_ids else kwargs['src']
        lm_logits = self.copy(src, sequence_output, decoder_outputs[-1][-1], hidden_states, lm_logits)
        output = (lm_logits,) + decoder_outputs[1:] + encoder_output
        return ((loss,) + output) if loss is not None else output

    def prepare_encoder_decoder_kwargs_for_generation(self, input_ids,
                                                      model_kwargs):
        if "encoder_output" not in model_kwargs:
            # retrieve encoder hidden states
            encoder = self.get_encoder()
            encoder_kwargs = {
                argument: value
                for argument, value in model_kwargs.items()
                if not (argument.startswith("decoder_") or argument.startswith(
                    "cross_attn"))
            }
            encoder_kwargs.pop('src')
            model_kwargs["encoder_output"] = encoder(input_ids,
                                                     **encoder_kwargs)

        return model_kwargs

    def prepare_inputs_for_generation(self,
                                      input_ids,
                                      cache=None,
                                      attention_mask=None,
                                      use_cache=None,
                                      encoder_output=None,
                                      **kwargs):

        # cut decoder_input_ids if past is used
        if cache is not None:
            input_ids = input_ids[:, -1:]
        return {
            "decoder_input_ids": input_ids,
            "cache": cache,
            "encoder_output": encoder_output,
            "attention_mask": attention_mask,
            "use_cache": use_cache,
            'src': kwargs['src']
        }

    @staticmethod
    def expand_inputs_for_generation(input_ids,
                                     expand_size,
                                     attention_mask=None,
                                     **model_kwargs):
        index = paddle.tile(
            paddle.arange(input_ids.shape[0]).unsqueeze(-1),
            [1, expand_size]).reshape([-1])
        input_ids = paddle.index_select(input_ids, index)

        if attention_mask is not None:
            model_kwargs["attention_mask"] = paddle.index_select(attention_mask,
                                                                 index)

        if "token_type_ids" in model_kwargs:
            token_type_ids = model_kwargs["token_type_ids"]
            model_kwargs["token_type_ids"] = paddle.index_select(token_type_ids,
                                                                 index)

        if "position_ids" in model_kwargs:
            position_ids = model_kwargs["position_ids"]
            model_kwargs["position_ids"] = paddle.index_select(position_ids,
                                                               index)

        if "seq_len" in model_kwargs:
            seq_len = model_kwargs["seq_len"]
            model_kwargs["seq_len"] = paddle.index_select(seq_len, index)

        if "encoder_output" in model_kwargs:
            encoder_output = model_kwargs["encoder_output"]
            if isinstance(encoder_output, tuple):
                model_kwargs["encoder_output"] = (paddle.index_select(
                    encoder_output[0], index),) + encoder_output[1:]
            else:
                model_kwargs["encoder_output"] = paddle.index_select(
                    encoder_output, index)
        model_kwargs['src'] = paddle.index_select(model_kwargs['src'], index)
        return input_ids, model_kwargs


def simple_scatter(x, index, src):
    x = x.transpose([2, 0, 1])
    index = index.reshape([-1, 1])
    src = src.transpose([2, 0, 1])
    return paddle.scatter_nd_add(x, index, src).transpose([1, 2, 0])


if __name__ == '__main__':
    model = T5Copy.from_pretrained('t5-small')
    from paddlenlp.transformers import T5Tokenizer

    tokenizer = T5Tokenizer.from_pretrained('t5-small')
    inputs = "PaddleNLP is a powerful NLP library with Awesome pre-trained models and easy-to-use interface, supporting wide-range of NLP tasks from research to industrial applications."
    input_ids = tokenizer(inputs)["input_ids"]
    input_ids = paddle.to_tensor(input_ids, dtype='int64').unsqueeze(0).tile([2, 1])

    outputs, _ = model.generate(
        input_ids=input_ids,
        forced_bos_token_id=102,
        decode_strategy="beam_search",
        num_beams=4,
        max_length=50,
        use_faster=True,
        src=input_ids
    )
    print(outputs)
    print(input_ids)
