import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
import re
import paddle.nn.initializer as init
from paddlenlp.embeddings import TokenEmbedding
from paddlenlp.transformers import AutoModel


class MyTokenEmbedding(TokenEmbedding):
    def __init__(self, embedding_name):
        super().__init__(embedding_name, trainable=False)
        self.filter_useless()

    def filter_useless(self):
        idx = [self._word_to_idx['[PAD]']]
        for i, v in enumerate(self._idx_to_word):
            if len(v) > 1 and re.search('[\u4e00-\u9fa5]', v):
                idx.append(i)
        new_weight = paddle.index_select(self.weight, paddle.to_tensor(idx))
        del self.weight
        self.weight = paddle.create_parameter(new_weight.shape, 'float32')
        self.weight.stop_gradinet = True
        self.weight.set_value(new_weight)
        self._padding_idx = 0
        self._idx_to_word = [self._idx_to_word[x] for x in idx]
        print('Now vocab size: {}'.format(self.weight.shape[0]))


class RelPositionEmbedding(nn.Layer):
    def __init__(self, max_len, dim):
        super(RelPositionEmbedding, self).__init__()
        self.max_len = max_len
        num_embedding = max_len * 2 - 1
        half_dim = int(dim // 2)
        emb = math.log(10000) / (half_dim - 1)
        emb = paddle.exp(paddle.arange(half_dim, dtype=paddle.float32) * -emb)
        emb = paddle.arange(-max_len + 1, max_len, dtype=paddle.float32).unsqueeze(1) * emb.unsqueeze(0)
        emb = paddle.concat([paddle.sin(emb), paddle.cos(emb)], 1).reshape([num_embedding, -1])
        if dim % 2 == 1:
            print('embedding dim is odd')
            emb = paddle.concat([emb, paddle.zeros(num_embedding, 1)], 1)
        self.register_buffer('emb', emb)
        self.dim = dim

    def forward(self, pos):
        pos = pos + (self.max_len - 1)
        pos_shape = pos.shape
        pos_emb = self.emb[pos.flatten()]
        pos_emb = pos_emb.reshape(list(pos_shape) + [self.dim])
        return pos_emb


class MultiHeadAttentionRel(nn.Layer):
    def __init__(self, input_size, num_heads, scaled=True, attn_dropout=0.1):
        super().__init__()
        self.num_heads = num_heads
        self.scaled = scaled
        self.dropout = nn.Dropout(attn_dropout)
        self.w_k = nn.Linear(input_size, input_size)
        self.w_q = nn.Linear(input_size, input_size)
        self.w_v = nn.Linear(input_size, input_size)
        self.w_r = nn.Linear(input_size, input_size)

        self.per_head_size = input_size // self.num_heads

        self.u = paddle.create_parameter([self.num_heads, self.per_head_size], 'float32',
                                         default_initializer=init.XavierUniform())
        self.v = paddle.create_parameter([self.num_heads, self.per_head_size], 'float32',
                                         default_initializer=init.XavierUniform())

    def forward(self, key, query, value, pos, key_mask):
        key = self.w_k(key)
        query = self.w_q(query)
        value = self.w_v(value)
        rel_pos_embedding = self.w_r(pos)

        batch, _, hidden_size = key.shape

        # batch * seq_len * n_head * d_head
        key = paddle.reshape(key, [batch, -1, self.num_heads, self.per_head_size])
        query = paddle.reshape(query, [batch, -1, self.num_heads, self.per_head_size])
        value = paddle.reshape(value, [batch, -1, self.num_heads, self.per_head_size])
        rel_pos_embedding = paddle.reshape(rel_pos_embedding,
                                           list(rel_pos_embedding.shape[:3]) + [self.num_heads, self.per_head_size])

        # batch * n_head * seq_len * d_head
        key = key.transpose([0, 2, 3, 1])
        query = query.transpose([0, 2, 1, 3])
        value = value.transpose([0, 2, 1, 3])

        # batch * n_head * d_head * key_len
        # key = key.transpose(-1, -2)

        u_for_c = self.u.unsqueeze(0).unsqueeze(-2)
        query_and_u_for_c = query + u_for_c
        A_C = paddle.matmul(query_and_u_for_c, key)

        rel_pos_embedding_for_b = rel_pos_embedding.transpose([0, 3, 1, 4, 2])
        query_for_b = query.reshape([batch, self.num_heads, query.shape[2], 1, self.per_head_size])
        query_for_b_and_v_for_d = query_for_b + self.v.reshape([1, self.num_heads, 1, 1, self.per_head_size])
        B_D = paddle.matmul(query_for_b_and_v_for_d, rel_pos_embedding_for_b).squeeze(-2)

        attn_score_raw = A_C + B_D

        if self.scaled:
            attn_score_raw = attn_score_raw / math.sqrt(self.per_head_size)

        mask = key_mask.unsqueeze(1).unsqueeze(1)
        attn_score_raw_masked = attn_score_raw - (1 - mask) * 1e15
        attn_score = F.softmax(attn_score_raw_masked, -1)

        attn_score = self.dropout(attn_score)

        value_weighted_sum = paddle.matmul(attn_score, value)

        result = value_weighted_sum.transpose([0, 2, 1, 3]).reshape([batch, -1, hidden_size])

        return result


class FlatEncoderLayer(nn.TransformerEncoderLayer):
    def __init__(self,
                 d_model=160,
                 nhead=8,
                 dim_feedforward=512,
                 dropout=0.1,
                 activation="relu"):
        super().__init__(d_model, nhead, dim_feedforward, dropout, activation)
        del self.self_attn
        self.self_attn = MultiHeadAttentionRel(d_model, nhead, False, dropout)

    def forward(self, src, src_mask, pos_embedding):
        # src_mask = _convert_attention_mask(src_mask, src.dtype)
        residual = src
        if self.normalize_before:
            src = self.norm1(src)
        # Add cache for encoder for the usage like UniLM
        src = self.self_attn(src, src, src, pos_embedding, src_mask)

        src = residual + self.dropout1(src)
        if not self.normalize_before:
            src = self.norm1(src)

        residual = src
        if self.normalize_before:
            src = self.norm2(src)
        src = self.linear2(self.dropout(self.activation(self.linear1(src))))
        src = residual + self.dropout2(src)
        if not self.normalize_before:
            src = self.norm2(src)
        return src


class FlatEmbedding(nn.Layer):
    def __init__(self, args):
        super().__init__()
        self.hidden_size = args.flat_hidden_size
        self.bert = AutoModel.from_pretrained(args.model_path)
        self.w2v = MyTokenEmbedding(args.embedding_name)
        if self.w2v.embedding_dim != self.hidden_size:
            self.w2v_linear = nn.Linear(self.w2v.embedding_dim, self.hidden_size)
        else:
            self.w2v_linear = nn.Identity()
        if self.bert.config['hidden_size'] != self.hidden_size:
            self.bert_linear = nn.Linear(self.bert.config['hidden_size'], self.hidden_size)
        else:
            self.bert_linear = nn.Identity()

        self.dropout = nn.Dropout(0.1)
        self.layer_norm = nn.LayerNorm(self.hidden_size)

    def forward(self, char_ids, word_ids, char_mask, word_mask):
        char_vec = self.bert_linear(self.bert(char_ids)[0]) * char_mask[..., None]
        word_vec = self.w2v(word_ids)
        word_vec = self.w2v_linear(word_vec) * word_mask[..., None]

        batch_size, word_len, embedding_size = word_vec.shape

        char_vec = paddle.concat(
            [char_vec, paddle.zeros((batch_size, word_len - char_vec.shape[1], embedding_size))],
            1)
        char_word_vec = char_vec + word_vec
        char_word_vec = self.layer_norm(self.dropout(char_word_vec))
        return char_word_vec


class FlatTransformer(nn.Layer):
    def __init__(self, args):
        super().__init__()
        hidden_size = 160
        args.flat_hidden_size = hidden_size
        self.config = {'hidden_size': hidden_size}

        self.pe = RelPositionEmbedding(args.max_lattice_len, hidden_size)
        self.pos_dense = nn.Sequential(nn.Linear(hidden_size * 4, hidden_size), nn.ReLU())
        self.embeddings = FlatEmbedding(args)
        self.encoder = nn.LayerList([FlatEncoderLayer()])

    def forward(self, input_ids, word_ids, word_mask, char_word_mask, char_word_s, char_word_e, attention_mask):
        max_len = attention_mask.sum(1).max()
        pe_ss = self.pe(char_word_s.unsqueeze(2) - char_word_s.unsqueeze(1))
        pe_se = self.pe(char_word_s.unsqueeze(2) - char_word_e.unsqueeze(1))
        pe_es = self.pe(char_word_e.unsqueeze(2) - char_word_s.unsqueeze(1))
        pe_ee = self.pe(char_word_e.unsqueeze(2) - char_word_e.unsqueeze(1))
        pos_embedding = self.pos_dense(paddle.concat([pe_ss, pe_se, pe_es, pe_ee], -1))

        hidden = self.embeddings(input_ids, word_ids, attention_mask, word_mask)
        for layer in self.encoder:
            hidden = layer(hidden, char_word_mask, pos_embedding)
        char_vec = hidden[:, :max_len] * attention_mask[..., None]
        return (char_vec, )

