import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np



class WordEmbedding(nn.Module):
    def __init__(self, pretrain_file=None, embedding_size=None, vocab_size=None, weight=None, require_grad=False):
        super().__init__()
        self.pretrain_file = pretrain_file
        self.embedding_size = embedding_size
        self.vocab_size = vocab_size

        if pretrain_file is not None:
            self.weight = nn.Parameter(self.load_pretrain(pretrain_file))
        elif weight is not None:
            self.weight = nn.Parameter(weight)
            self.vocab_size, self.embedding_size = self.weight.shape
        elif embedding_size and vocab_size:
            self.weight = nn.Parameter(torch.FloatTensor([vocab_size, embedding_size]))
            nn.init.uniform_(self.weight)
        if not require_grad:
            self.stop_grad()

    def load_pretrain(self):
        embeddings = []
        for idx, line in enumerate(open(self.pretrain_file, encoding='utf-8')):
            vocab, *vector = line.strip().split()
            embeddings.append(np.array(vector).astype('float32'))
        embeddings = np.vstack(embeddings)
        weight = torch.from_numpy(embeddings)
        self.vocab_size, self.embedding_size = embeddings.shape
        print('Embedding shape: {}'.format(embeddings.shape))
        return weight

    def forward(self, input_ids):
        return F.embedding(input_ids, self.weight)

    def stop_grad(self):
        self.weight.requires_grad = False

    def start_grad(self):
        self.weight.requires_grad = True

    @classmethod
    def from_pretrained(cls, pretrain_file):
        model = cls(pretrain_file)
        return model

class RelPositionEmbedding(nn.Module):
    def __init__(self, max_len, dim):
        super(RelPositionEmbedding, self).__init__()
        self.max_len = max_len
        num_embedding = max_len * 2 - 1
        half_dim = int(dim // 2)
        emb = math.log(10000) / (half_dim - 1)
        emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
        emb = torch.arange(-max_len + 1, max_len, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
        emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embedding, -1)
        if dim % 2 == 1:
            print('embedding dim is odd')
            emb = torch.cat([emb, torch.zeros(num_embedding, 1)], dim=1)
        self.emb = nn.Parameter(emb, requires_grad=False)
        self.dim = dim

    def forward(self, pos):
        pos = pos + (self.max_len - 1)
        pos_shape = pos.shape
        pos_emb = self.emb[pos.view(-1)]
        pos_emb = pos_emb.reshape(list(pos_shape) + [self.dim])
        return pos_emb


class MultiHeadAttentionRel(nn.Module):
    def __init__(self, input_size, num_heads, scaled=True, attn_dropout=0.1):
        super().__init__()
        self.num_heads = num_heads
        self.scaled = scaled
        self.dropout = nn.Dropout(attn_dropout)
        self.w_k = nn.Linear(input_size, input_size)
        self.w_q = nn.Linear(input_size, input_size)
        self.w_v = nn.Linear(input_size, input_size)
        self.w_r = nn.Linear(input_size, input_size)

        self.per_head_size = input_size // self.num_heads

        u = nn.init.xavier_normal_(
            torch.randn(self.num_heads, self.per_head_size)
        )
        v = nn.init.xavier_normal_(
            torch.randn(self.num_heads, self.per_head_size))
        self.register_parameter('u', nn.Parameter(u, requires_grad=True))
        self.register_parameter('v', nn.Parameter(v, requires_grad=True))

    def forward(self, key, query, value, pos, key_mask):
        key = self.w_k(key)
        query = self.w_q(query)
        value = self.w_v(value)
        rel_pos_embedding = self.w_r(pos)

        batch, _, hidden_size = key.shape

        # batch * seq_len * n_head * d_head
        key = torch.reshape(key, [batch, -1, self.num_heads, self.per_head_size])
        query = torch.reshape(query, [batch, -1, self.num_heads, self.per_head_size])
        value = torch.reshape(value, [batch, -1, self.num_heads, self.per_head_size])
        rel_pos_embedding = torch.reshape(rel_pos_embedding,
                                          list(rel_pos_embedding.shape[:3]) + [self.num_heads, self.per_head_size])

        # batch * n_head * seq_len * d_head
        key = key.transpose(1, 2)
        query = query.transpose(1, 2)
        value = value.transpose(1, 2)

        # batch * n_head * d_head * key_len
        key = key.transpose(-1, -2)

        u_for_c = self.u.unsqueeze(0).unsqueeze(-2)
        query_and_u_for_c = query + u_for_c
        A_C = torch.matmul(query_and_u_for_c, key)

        rel_pos_embedding_for_b = rel_pos_embedding.permute(0, 3, 1, 4, 2)
        query_for_b = query.view([batch, self.num_heads, query.size(2), 1, self.per_head_size])
        query_for_b_and_v_for_d = query_for_b + self.v.view(1, self.num_heads, 1, 1, self.per_head_size)
        B_D = torch.matmul(query_for_b_and_v_for_d, rel_pos_embedding_for_b).squeeze(-2)

        attn_score_raw = A_C + B_D

        if self.scaled:
            attn_score_raw = attn_score_raw / math.sqrt(self.per_head_size)

        mask = key_mask.unsqueeze(1).unsqueeze(1)
        attn_score_raw_masked = attn_score_raw - (1 - mask) * 1e15
        attn_score = F.softmax(attn_score_raw_masked, dim=-1)

        attn_score = self.dropout(attn_score)

        value_weighted_sum = torch.matmul(attn_score, value)

        result = value_weighted_sum.transpose(1, 2).contiguous(). \
            reshape(batch, -1, hidden_size)

        return result

class FeedForward(nn.Module):
    def __init__(self, input_size, intermediate_size=1024):
        super().__init__()
        self.dense = nn.Sequential(nn.Linear(input_size, intermediate_size),
                                   nn.GELU(),
                                   nn.Linear(intermediate_size, input_size))

    def forward(self, x):
        return self.dense(x)

class RelTransformerEncoderLayer(nn.Module):
    def __init__(self, input_size, num_heads, scaled=True, dropout=0.1, intermediate_size=1024):
        super().__init__()
        self.attn = MultiHeadAttentionRel(input_size, num_heads, scaled, dropout)
        self.ff = FeedForward(input_size, intermediate_size=intermediate_size)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.layer_norm1 = nn.LayerNorm(input_size)
        self.layer_norm2 = nn.LayerNorm(input_size)

    def forward(self, hidden, mask, pos_embedding):
        attn_out = self.attn(hidden, hidden, hidden, pos_embedding, mask)
        attn_out = self.dropout1(attn_out)
        out1 = self.layer_norm1(hidden + attn_out)
        ff_out = self.ff(out1)
        ff_out = self.dropout2(ff_out)
        out2 = self.layer_norm2(out1 + ff_out)
        return out2

class FlatEmbedding(nn.Module):
    def __init__(self, model_path, w2v_file, model_class, dropout=0.1):
        super().__init__()
        self.bert = model_class.from_pretrained(model_path)
        self.w2v = WordEmbedding(w2v_file)
        self.w2v_linear = nn.Linear(self.w2v.embedding_size, self.bert.config.hidden_size)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(self.bert.config.hidden_size)

    def forward(self, char_ids, word_ids, char_mask, word_mask):
        char_vec = self.bert(char_ids, char_mask)[0] * char_mask[..., None]
        word_vec = self.w2v(word_ids) * word_mask[..., None]
        word_vec = self.w2v_linear(word_vec) * word_mask[..., None]

        word_vec = self.dropout(word_vec)
        word_vec = self.layer_norm(word_vec)

        batch_size, word_len, embedding_size = word_vec.size()

        char_vec = torch.cat(
            [char_vec, torch.zeros((batch_size, word_len - char_vec.size(1), embedding_size)).to(char_vec)],
            dim=1)
        char_word_vec = char_vec + word_vec
        return char_word_vec
class FLatTransformer(nn.Module):
    def __init__(self, input_size, max_len, hidden_size, num_heads, num_layers=1, scaled=False, dropout=0.1):
        super().__init__()
        self.max_len = max_len
        self.hidden_size = hidden_size
        self.pe = RelPositionEmbedding(max_len, hidden_size)
        self.adapter = nn.Linear(input_size, hidden_size)
        self.pos_dense = nn.Sequential(nn.Linear(hidden_size * 4, hidden_size),
                                       nn.ReLU())
        self.encoder_layers = []
        for _ in range(num_layers):
            encoder_layer = RelTransformerEncoderLayer(hidden_size, num_heads, scaled=scaled, dropout=dropout)
            self.encoder_layers.append(encoder_layer)
        self.encoder_layers = nn.ModuleList(self.encoder_layers)

    def forward(self, char_word_vec, char_word_mask, start, end, attention_mask):
        char_word_s = start
        char_word_e = end
        char_mask = attention_mask
        max_len = char_mask.sum(1).max()
        hidden = self.adapter(char_word_vec)

        pe_ss = self.pe(char_word_s.unsqueeze(dim=2) - char_word_s.unsqueeze(dim=1))
        pe_se = self.pe(char_word_s.unsqueeze(dim=2) - char_word_e.unsqueeze(dim=1))
        pe_es = self.pe(char_word_e.unsqueeze(dim=2) - char_word_s.unsqueeze(dim=1))
        pe_ee = self.pe(char_word_e.unsqueeze(dim=2) - char_word_e.unsqueeze(dim=1))
        pos_embedding = self.pos_dense(torch.cat([pe_ss, pe_se, pe_es, pe_ee], -1))

        for layer in self.encoder_layers:
            hidden = layer(hidden, char_word_mask, pos_embedding)
        # char_vec = hidden[:, :max_len] * char_mask[..., None]
        # char_vec = sequence_masking(hidden[:, :max_len], char_mask)
        return hidden[:, :max_len]
