import logging
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from oasr.module.norm import LayerNorm
from oasr.module.pos import PositionalEncoding, MixedPositionalEncoding
from oasr.module.ffn import PositionwiseFeedForward
from oasr.module.mha import MultiHeadedAttention, MultiHeadedSelfAttention, MultiHeadedCrossAttention
from oasr.data import PAD
from oasr.decoder.utils import get_transformer_decoder_mask

logger = logging.getLogger(__name__)



class TransformerDecoderLayer(nn.Module):

    def __init__(self, n_heads, d_model, d_ff, slf_attn_dropout_rate, src_attn_dropout_rate, 
                 ffn_dropout_rate, residual_dropout_rate, normalize_before=True, concat_after=False, activation='relu',
                 apply_initialization=False):
        super(TransformerDecoderLayer, self).__init__()

        self.apply_initialization = apply_initialization

        self.self_attn = MultiHeadedAttention(n_heads, d_model, slf_attn_dropout_rate)
        self.src_attn = MultiHeadedAttention(n_heads, d_model, src_attn_dropout_rate)
        self.feed_forward = PositionwiseFeedForward(d_model, d_ff, ffn_dropout_rate, activation)

        self.norm1 = LayerNorm(d_model)
        self.norm2 = LayerNorm(d_model)
        self.norm3 = LayerNorm(d_model)

        self.dropout1 = nn.Dropout(residual_dropout_rate)
        self.dropout2 = nn.Dropout(residual_dropout_rate)
        self.dropout3 = nn.Dropout(residual_dropout_rate)

        self.normalize_before = normalize_before
        self.concat_after = concat_after

        if self.concat_after:
            self.concat_linear1 = nn.Linear(d_model * 2, d_model)
            self.concat_linear2 = nn.Linear(d_model * 2, d_model)

        if self.apply_initialization:
            self.init_parameters()

    def forward(self, tgt, tgt_mask, memory, memory_mask):
        """Compute decoded features

        :param flow.Tensor tgt: decoded previous target features (batch, max_time_out, size)
        :param flow.Tensor tgt_mask: mask for x (batch, max_time_out)
        :param flow.Tensor memory: encoded source features (batch, max_time_in, size)
        :param flow.Tensor memory_mask: mask for memory (batch, max_time_in)
        """

        residual = tgt
        if self.normalize_before:
            tgt = self.norm1(tgt)
        if self.concat_after:
            tgt_concat = flow.cat((tgt, self.self_attn(tgt, tgt, tgt, tgt_mask)), dim=-1)
            x = residual + self.concat_linear1(tgt_concat)
        else:
            x = residual + self.dropout1(self.self_attn(tgt, tgt, tgt, tgt_mask))
        if not self.normalize_before:
            x = self.norm1(x)

        residual = x
        if self.normalize_before:
            x = self.norm2(x)
        if self.concat_after:
            x_concat = flow.cat((x, self.src_attn(x, memory, memory, memory_mask)), dim=-1)
            x = residual + self.concat_linear2(x_concat)
        else:
            x = residual + self.dropout2(self.src_attn(x, memory, memory, memory_mask))
        if not self.normalize_before:
            x = self.norm2(x)

        residual = x
        if self.normalize_before:
            x = self.norm3(x)
        x = residual + self.dropout3(self.feed_forward(x))
        if not self.normalize_before:
            x = self.norm3(x)

        return x, tgt_mask

    def inference(self, x, x_mask, memory, memory_mask=None, cache=None):
        # x: [batch_size, 1, model_size]
        # memory: [batch_size, ]

        residual = x
        if self.normalize_before:
            x = self.norm1(x)

        slf_cache = cache['slf'] if cache is not None else None

        slf_out = self.self_attn(x, x, x, x_mask)
        if self.concat_after:
            x_concat = flow.cat((x, slf_out), dim=-1)
            x = residual + self.concat_linear1(x_concat)
        else:
            x = residual + self.dropout1(slf_out)
        if not self.normalize_before:
            x = self.norm1(x)

        residual = x
        if self.normalize_before:
            x = self.norm2(x)
        src_cache = cache['src'] if cache is not None else None
        src_out, src_cache = self.src_attn.inference_src(x, memory, memory, memory_mask, cache=src_cache)

        if self.concat_after:
            x_concat = flow.cat((x, src_out), dim=-1)
            x = residual + self.concat_linear2(x_concat)
        else:
            x = residual + self.dropout2(src_out)
        if not self.normalize_before:
            x = self.norm2(x)

        residual = x
        if self.normalize_before:
            x = self.norm3(x)
        x = residual + self.dropout3(self.feed_forward(x))
        if not self.normalize_before:
            x = self.norm3(x)

        cache = {
            'slf': slf_cache,
            'src': src_cache
        }
        return x, cache

    def init_parameters(self):
        self.self_attn.init_parameters()
        self.src_attn.init_parameters()
        self.feed_forward.init_parameters()
        logger.debug('===== Initialize %s =====' % self.__class__.__name__)


class TransformerDecoder(nn.Module):
    def __init__(self, output_size, d_model=256, n_heads=4, d_ff=2048, n_blocks=6,
                 pos_dropout_rate=0.0, slf_attn_dropout_rate=0.0, src_attn_dropout_rate=0.0, ffn_dropout_rate=0.0,
                 residual_dropout_rate=0.1, activation='relu', normalize_before=True, concat_after=False,
                 share_embedding=False, apply_initialization=False):
        super(TransformerDecoder, self).__init__()

        self.decoder_type = 'transformer'
        self.normalize_before = normalize_before
        self.apply_initialization = apply_initialization

        self.d_model = d_model

        self.embedding = nn.Embedding(output_size, d_model)
        self.pos_encoding = PositionalEncoding(d_model, pos_dropout_rate)

        self.blocks = nn.ModuleList([
            TransformerDecoderLayer(
                n_heads, d_model, d_ff, slf_attn_dropout_rate, src_attn_dropout_rate,
                ffn_dropout_rate, residual_dropout_rate, normalize_before=normalize_before, concat_after=concat_after,
                activation=activation) for _ in range(n_blocks)
        ])

        if self.normalize_before:
            self.after_norm = LayerNorm(d_model)

        self.output_layer = nn.Linear(d_model, output_size)

        if self.apply_initialization:
            self.init_parameters()

        if share_embedding:
            assert self.embedding.weight.size() == self.output_layer.weight.size()
            self.output_layer.weight = self.embedding.weight
            logger.info('Tie the weights between the embedding and output layer.')

    def forward(self, targets, memory, memory_mask):

        dec_output = self.embedding(targets)
        dec_output = self.pos_encoding(dec_output)

        dec_mask = get_transformer_decoder_mask(targets)

        for _, block in enumerate(self.blocks):
            dec_output, dec_mask = block(dec_output, dec_mask, memory, memory_mask)

        if self.normalize_before:
            dec_output = self.after_norm(dec_output)

        logits = self.output_layer(dec_output)

        return logits, dec_mask

    def recognize(self, preds, memory, memory_mask, last=True):

        dec_output = self.embedding(preds)
        dec_output = self.pos_encoding(dec_output)
        dec_mask = get_transformer_decoder_mask(preds)

        for _, block in enumerate(self.blocks):
            dec_output, dec_mask = block(dec_output, dec_mask, memory, memory_mask)

        if self.normalize_before:
            dec_output = self.after_norm(dec_output)

        logits = self.output_layer(dec_output)

        log_probs = F.log_softmax(logits[:, -1] if last else logits, dim=-1)

        return log_probs

    def inference(self, preds, memory, memory_mask=None, cache=None):
        # preds [batch_size, label_length]
        # memory: [batch_size, time_steps, model_size]
        # memory_mask: [batch_size, 1, time_steps]
        # cache

        assert preds.dim() == 2
        # dec_output = self.embedding(preds[:, -1].unsqueeze(-1))
        dec_output = self.embedding(preds)
        # dec_output = self.pos_encoding.inference(dec_output, step=(preds.size(1) - 1))
        dec_output = self.pos_encoding(dec_output)
        mask = get_transformer_decoder_mask(preds)

        new_cache = {}
        for i, block in enumerate(self.blocks):
            # dec_output [batch_size, 1, model_size]
            block_cache = cache['decoder_%d' % i] if cache is not None else None
            dec_output, block_cache = block.inference(dec_output, mask, memory, memory_mask, cache=block_cache)
            new_cache['decoder_%d' % i] = block_cache

        if self.normalize_before:
            dec_output = self.after_norm(dec_output)

        logits = self.output_layer(dec_output) # logits [batch_size, 1, model_size]

        log_probs = F.log_softmax(logits[:, -1, :], dim=-1) # logits [batch_size, 1, model_size]

        return log_probs, new_cache

    def inference_with_cache(self, preds, memory, memory_mask=None, cache=None, step=0):
        # preds [batch_size, label_length]
        # memory: [batch_size, time_steps, model_size]
        # memory_mask: [batch_size, 1, time_steps]
        # cache

        assert preds.dim() == 2
        dec_output = self.embedding(preds[:, -1].unsqueeze(-1))
        dec_output = self.pos_encoding.inference(dec_output, step=step)
        # mask = get_seq_mask(preds)
        mask = None

        new_cache = {}
        for i, block in enumerate(self.blocks):
            # dec_output [batch_size, 1, model_size]
            block_cache = cache['decoder_%d' % i] if cache is not None else None
            dec_output, block_cache = block.inference(dec_output, mask, memory, memory_mask, cache=block_cache)
            new_cache['decoder_%d' % i] = block_cache

        if self.normalize_before:
            dec_output = self.after_norm(dec_output)

        logits = self.output_layer(dec_output) # logits [batch_size, 1, model_size]

        log_probs = F.log_softmax(logits[:, -1, :], dim=-1) # logits [batch_size, 1, model_size]

        return log_probs, new_cache

    def init_parameters(self):
        nn.init.normal_(self.embedding.weight, mean=0., std=self.d_model ** -0.5)
        nn.init.constant_(self.embedding.weight[PAD], 0.)
        nn.init.xavier_uniform_(self.output_layer.weight)
        nn.init.constant_(self.output_layer.bias, 0.)

        for _, block in enumerate(self.blocks):
            block.init_parameters()
        
        logger.info('===== Initialized TransformerDecoder =====')


class TransformerDecoderLayerV2(nn.Module):

    def __init__(self, n_heads, d_model, d_ff, source_dim, slf_attn_dropout_rate=0.0, src_attn_dropout_rate=0.0,
                 ffn_dropout_rate=0.0, residual_dropout_rate=0.1, normalize_before=True,
                 concat_after=False, share_slf_qvk_proj=False, share_src_vk_proj=False, activation='relu'):
        super(TransformerDecoderLayerV2, self).__init__()

        self.slf_attn = MultiHeadedSelfAttention(n_heads, d_model, slf_attn_dropout_rate, share_qvk_proj=share_slf_qvk_proj)
        self.src_attn = MultiHeadedCrossAttention(n_heads, d_model, source_dim, src_attn_dropout_rate, share_vk_proj=share_src_vk_proj)
        self.feed_forward = PositionwiseFeedForward(d_model, d_ff, ffn_dropout_rate, activation)

        self.norm1 = LayerNorm(d_model)
        self.norm2 = LayerNorm(d_model)
        self.norm3 = LayerNorm(d_model)

        self.dropout1 = nn.Dropout(residual_dropout_rate)
        self.dropout2 = nn.Dropout(residual_dropout_rate)
        self.dropout3 = nn.Dropout(residual_dropout_rate)

        self.normalize_before = normalize_before
        self.concat_after = concat_after

        if self.concat_after:
            self.concat_linear1 = nn.Linear(d_model * 2, d_model)
            self.concat_linear2 = nn.Linear(d_model * 2, d_model)

    def forward(self, tgt, tgt_mask, memory, memory_mask):
        """Compute decoded features

        :param flow.Tensor tgt: decoded previous target features (batch, max_time_out, size)
        :param flow.Tensor tgt_mask: mask for x (batch, max_time_out)
        :param flow.Tensor memory: encoded source features (batch, max_time_in, size)
        :param flow.Tensor memory_mask: mask for memory (batch, max_time_in)
        """

        if self.normalize_before:
            tgt = self.norm1(tgt)
        residual = tgt
        slf_attn_out, slf_attn_weights = self.slf_attn(tgt, tgt_mask)
        if self.concat_after:
            x = residual + self.concat_linear1(flow.cat((tgt, slf_attn_out), dim=-1))
        else:
            x = residual + self.dropout1(slf_attn_out)
        if not self.normalize_before:
            x = self.norm1(x)

        if self.normalize_before:
            x = self.norm2(x)
        residual = x
        src_attn_out, src_attn_weights = self.src_attn(x, memory, memory_mask.unsqueeze(1))
        if self.concat_after:
            x = residual + self.concat_linear2(flow.cat((x, src_attn_out), dim=-1))
        else:
            x = residual + self.dropout2(src_attn_out)
        if not self.normalize_before:
            x = self.norm2(x)

        if self.normalize_before:
            x = self.norm3(x)
        residual = x
        x = residual + self.dropout3(self.feed_forward(x))
        if not self.normalize_before:
            x = self.norm3(x)

        return x, {'slf_attn_weights': slf_attn_weights, 'src_attn_weights': src_attn_weights}

    def inference(self, x, xmask, memory, memory_mask=None, cache={'slf': None, 'src': None}):

        if self.normalize_before:
            x = self.norm1(x)
        residual = x
        slf_attn_out, slf_attn_weight, slf_cache = self.slf_attn.inference(x, xmask, cache=['slf'])
        if self.concat_after:
            x = residual + self.concat_linear1(flow.cat((x, slf_attn_out), dim=-1))
        else:
            x = residual + self.dropout1(slf_attn_out)
        if not self.normalize_before:
            x = self.norm1(x)

        if self.normalize_before:
            x = self.norm2(x)
        residual = x
        src_attn_out, src_attn_weight, src_cache = self.src_attn.inference(x, memory, memory_mask.unsqueeze(1), cache['src'])
        if self.concat_after:
            x = residual + self.concat_linear2(flow.cat((x, src_attn_out), dim=-1))
        else:
            x = residual + self.dropout2(src_attn_out)
        if not self.normalize_before:
            x = self.norm2(x)

        if self.normalize_before:
            x = self.norm3(x)
        residual = x
        x = residual + self.dropout3(self.feed_forward(x))
        if not self.normalize_before:
            x = self.norm3(x)

        return x, {'slf_attn_weight': slf_attn_weight, 'src_attn_weight': src_attn_weight}, {'slf': slf_cache, 'src': src_cache}


class TransformerDecoderV2(nn.Module):
    def __init__(self, vocab_size, d_model=256, n_heads=4, d_ff=2048, source_dim=2048, n_blocks=6,
                 pos_dropout_rate=0.0, slf_attn_dropout_rate=0.0, src_attn_dropout_rate=0.0, ffn_dropout_rate=0.0,
                 residual_dropout_rate=0.1, activation='relu', normalize_before=True, concat_after=False, share_slf_qvk_proj=False,
                 share_src_vk_proj=False, share_embedding=False):
        super(TransformerDecoderV2, self).__init__()

        self.decoder_type = 'transformer'
        self.normalize_before = normalize_before

        self.d_model = d_model

        self.embedding = nn.Embedding(vocab_size, d_model)
        self.pos_encoding = MixedPositionalEncoding(d_model, pos_dropout_rate)

        self.blocks = nn.ModuleList([
            TransformerDecoderLayerV2(
                n_heads, d_model, d_ff, source_dim, slf_attn_dropout_rate, src_attn_dropout_rate,
                ffn_dropout_rate, residual_dropout_rate, normalize_before=normalize_before, concat_after=concat_after,
                share_slf_qvk_proj=share_slf_qvk_proj, share_src_vk_proj=share_src_vk_proj, activation=activation) for _ in range(n_blocks)
        ])

        if self.normalize_before:
            self.after_norm = LayerNorm(d_model)

        self.output_layer = nn.Linear(d_model, vocab_size)

        if share_embedding:
            assert self.embedding.weight.size() == self.output_layer.weight.size()
            self.output_layer.weight = self.embedding.weight
            logger.info('Tie the weights between the embedding and output layer.')

    def forward(self, targets, memory, memory_mask):

        dec_output = self.embedding(targets)
        dec_output, _ = self.pos_encoding(dec_output)

        dec_mask = get_transformer_decoder_mask(targets)

        attn_weights = {}
        for i, block in enumerate(self.blocks):
            dec_output, attn_weight = block(dec_output, dec_mask, memory, memory_mask)
            attn_weights['dec_block_%d' % i] = attn_weight

        if self.normalize_before:
            dec_output = self.after_norm(dec_output)

        logits = self.output_layer(dec_output)

        return logits, attn_weights

    def inference(self, preds, memory, memory_mask=None, cache=None):

        assert preds.dim() == 2
        dec_output = self.embedding(preds)
        dec_output, _ = self.pos_encoding.inference(dec_output)
        mask = get_transformer_decoder_mask(preds)

        new_caches = []
        attn_weights = {}
        for i, block in enumerate(self.blocks):
            block_cache = cache[i] if cache is not None else {'slf': None, 'src': None}
            dec_output, attn_weight, block_cache = block.inference(dec_output, mask, memory, memory_mask, cache=block_cache)
            attn_weights['dec_block_%d' % i] = attn_weight
            new_caches.append(block_cache)

        if self.normalize_before:
            dec_output = self.after_norm(dec_output)

        logits = self.output_layer(dec_output) # logits [batch_size, 1, model_size]

        log_probs = F.log_softmax(logits[:, -1, :], dim=-1) # logits [batch_size, 1, model_size]

        return log_probs, new_caches, attn_weights


