# -*- coding: utf-8 -*-
"""
Created on Sun Apr 17 15:37:19 2022

@author: 11325
"""

import re
import pandas as pd
import numpy as np
import random as rd
import math
import copy
import tqdm 
import os
import torch
import torch.nn as nn
'''
# BERT Parameters
maxlen = 30
batch_size = 6
max_pred = 5 # max tokens of prediction
n_layers = 6
n_heads = 12
model_dimention = 768
d_ff = 768*4 # 4*d_model, FeedForward dimension
d_k = d_v = 64  # dimension of K(=Q), V
num_segments = 2#句子个数
'''
def get_attn_pad_mask(seq_q,seq_k):
    batch_size, seq_len = seq_q.size()
    # eq(zero) is PAD token
    pad_attn_mask = seq_q.data.eq(0).unsqueeze(1)  # [batch_size, 1, seq_len]
    return pad_attn_mask.expand(batch_size, seq_len, seq_len)

def gelu(x):
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))


class Embedding(nn.Module):
    def __init__(self,vocab_size,model_dimension,num_segments,maxlen):
        super(Embedding, self).__init__()
        
        self.tok_embed = nn.Embedding(vocab_size, model_dimension)  # token embedding
        self.pos_embed = nn.Embedding(maxlen, model_dimension)  # position embedding
        self.seg_embed = nn.Embedding(num_segments, model_dimension)  # segment(token type) embedding
        self.norm = nn.LayerNorm(model_dimension)

    def forward(self, x, seg):
        test_1=x.cpu().numpy()
        test_2=seg.cpu().numpy()
        seq_len = x.size(1)
        pos = torch.arange(seq_len, dtype=torch.long)
        pos = pos.unsqueeze(0).expand_as(x)  # [seq_len] -> [batch_size, seq_len]
        embedding = self.tok_embed(x) + self.pos_embed(pos) + self.seg_embed(seg)
        return self.norm(embedding)
    
    
class ScaledDotProductAttention(nn.Module):
    def __init__(self):
        super(ScaledDotProductAttention, self).__init__()

    def forward(self, Q, K, V, d_k,d_v,attn_mask):
        scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k) # scores : [batch_size, n_heads, seq_len, seq_len]
        scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is one.
        attn = nn.Softmax(dim=-1)(scores)
        context = torch.matmul(attn, V)
        return context

class MultiHeadAttention(nn.Module):
    def __init__(self,model_dimension,d_k ,d_v, n_heads):
        super(MultiHeadAttention, self).__init__()
        self.n_heads=n_heads
        self.d_k=d_k
        self.d_v=d_v
        self.model_dimension=model_dimension
        self.W_Q = nn.Linear(model_dimension, d_k * n_heads)
        self.W_K = nn.Linear(model_dimension, d_k * n_heads)
        self.W_V = nn.Linear(model_dimension, d_v * n_heads)
        
    def forward(self, Q, K, V, attn_mask):
        # q: [batch_size, seq_len, d_model], k: [batch_size, seq_len, d_model], v: [batch_size, seq_len, d_model]
        residual, batch_size = Q, Q.size(0)
        # (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)
        q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2)  # q_s: [batch_size, n_heads, seq_len, d_k]
        k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2)  # k_s: [batch_size, n_heads, seq_len, d_k]
        v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1,2)  # v_s: [batch_size, n_heads, seq_len, d_v]

        attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len]

        # context: [batch_size, n_heads, seq_len, d_v], attn: [batch_size, n_heads, seq_len, seq_len]
        context = ScaledDotProductAttention()(q_s, k_s, v_s,self.d_k ,self.d_v,attn_mask)
        context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v) # context: [batch_size, seq_len, n_heads * d_v]
        output = nn.Linear(self.n_heads * self.d_v, self.model_dimension)(context)
        return nn.LayerNorm(self.model_dimension)(output + residual) # output: [batch_size, seq_len, d_model]

class PoswiseFeedForwardNet(nn.Module):
    def __init__(self,model_dimension,d_ff):
        super(PoswiseFeedForwardNet, self).__init__()
        self.fc1 = nn.Linear(model_dimension, d_ff)
        self.fc2 = nn.Linear(d_ff, model_dimension)

    def forward(self, x):
        # (batch_size, seq_len, d_model) -> (batch_size, seq_len, d_ff) -> (batch_size, seq_len, d_model)
        return self.fc2(gelu(self.fc1(x)))

class EncoderLayer(nn.Module):
    def __init__(self,model_dimension,d_k ,d_v, n_heads,d_ff):
        super(EncoderLayer, self).__init__()
        self.enc_self_attn = MultiHeadAttention(model_dimension,d_k ,d_v, n_heads)
        self.pos_ffn = PoswiseFeedForwardNet(model_dimension,d_ff)

    def forward(self, enc_inputs, enc_self_attn_mask):
        enc_outputs = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask) # enc_inputs to same Q,K,V
        enc_outputs = self.pos_ffn(enc_outputs) # enc_outputs: [batch_size, seq_len, d_model]
        return enc_outputs