import torch
import math
import numpy as np
from transformers import BertModel


bert = BertModel.from_pretrained(r"D:\\BAIDU\\week6\\bert-base-chinese\\bert-base-chinese", return_dict=False)
# print(bert.state_dict())
state_dict = bert.state_dict()
bert.eval() # 开始训练
a = np.array([2450, 15486, 102, 2110])
torch_x = torch.LongTensor([a])

# print(bert.state_dict().keys()) #  查看所有矩阵的名称
#softmax归一化
def softmax(x):
    return np.exp(x)/np.sum(np.exp(x), axis=-1, keepdims=True)

#gelu激活函数
def gelu(x):
    return 0.5 * x * (1 + np.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * np.power(x, 3))))
class MyBert():
    def __init__(self):
        self.num_attention_heads = 12
        self.hidden_size = 768
        self.num_layers = 6  # 有几层
        self.load_weights(state_dict)

    def load_weights(self, state_dict):
        # embdding 层的canshu
        self.word_embeddings = state_dict["embeddings.word_embeddings.weight"].numpy()
        self.position_embeddings = state_dict["embeddings.position_embeddings.weight"].numpy() # 单词顺序  共512 个
        self.token_type_embeddings = state_dict["embeddings.token_type_embeddings.weight"].numpy()  # 句子顺序
        self.emb_layer_weight = state_dict["embeddings.LayerNorm.weight"].numpy()  # 出Embding 层需要先归一化
        self.emb_layer_bias = state_dict["embeddings.LayerNorm.bias"].numpy()
        self.weight = []
        for i in range(self.num_layers):
            q_w = state_dict["encoder.layer.%d.attention.self.query.weight" % i].numpy()
            q_b = state_dict["encoder.layer.%d.attention.self.query.bias" % i].numpy()
            k_w = state_dict["encoder.layer.%d.attention.self.key.weight" % i].numpy()
            k_b = state_dict["encoder.layer.%d.attention.self.key.bias" % i].numpy()
            v_w = state_dict["encoder.layer.%d.attention.self.value.weight" % i].numpy()
            v_b = state_dict["encoder.layer.%d.attention.self.value.bias" % i].numpy()
            attention_w = state_dict["encoder.layer.%d.attention.output.dense.weight" % i].numpy()
            attention_b = state_dict["encoder.layer.%d.attention.output.dense.bias" % i].numpy()
            # 出attention 之后要标准化
            layerNorm_w = state_dict["encoder.layer.%d.attention.output.LayerNorm.weight" % i].numpy()
            layerNorm_b = state_dict["encoder.layer.%d.attention.output.LayerNorm.bias" % i].numpy()
            ## 以下是 feed_word层的参数
            intermediate_w = state_dict["encoder.layer.%d.intermediate.dense.weight" % i].numpy()
            intermediate_b = state_dict["encoder.layer.%d.intermediate.dense.bias" % i].numpy()
            ##
            ff_out_w = state_dict["encoder.layer.%d.output.dense.weight" % i].numpy()
            ff_out_b = state_dict["encoder.layer.%d.output.dense.bias" % i].numpy()
            ln_ff_w = state_dict["encoder.layer.%d.output.LayerNorm.weight" % i].numpy()
            ln_ff_b = state_dict["encoder.layer.%d.output.LayerNorm.bias" % i].numpy()
            self.weight.append([q_w,q_b,k_w,k_b,v_w,v_b,attention_w,attention_b,layerNorm_w,layerNorm_b
                                   ,intermediate_w,intermediate_b,ff_out_w,ff_out_b,ln_ff_w,ln_ff_b])
        # pooler层
        self.pooler_dense_weight = state_dict["pooler.dense.weight"].numpy()
        self.pooler_dense_bias = state_dict["pooler.dense.bias"].numpy()

    def get_embdding(self,embedding_matrix, x):
        return np.array( [ embedding_matrix[index] for index in x])

    def transpose_for_scores(self, x, attention_head_size, num_attention_heads):
        max_len, hidden_size = x.shape
        x =x.reshape(max_len, num_attention_heads, attention_head_size)
        return x.swapaxes(1, 0)

    def normal_layezation(self,x,w,b):
        x= (x-np.mean(x, axis=1, keepdims=True)) / np.std(x, axis=1, keepdims=True) # 标准化  -均值除以标准差
        x = x * w + b
        return x

    def embedding_forward(self, x):
        word_embeddings = self.get_embdding(self.word_embeddings, x)  # 正常的单词的向量
        position_embeddings = self.get_embdding(self.position_embeddings, [i for i in range(len(x))])  # 顺序向量
        token_type_embeddings = self.get_embdding(self.token_type_embeddings, [0]*len(x))  # 语序向量
        # embdding 最后还有标准化一下
        return self.normal_layezation(word_embeddings+position_embeddings+token_type_embeddings,self.emb_layer_weight , self.emb_layer_bias)
    def all_transformer_layer_forward(self, x):
        for i in range(self.num_layers):
            x = self.single_transformer_layer_forward(x, i)
        return x
    def self_attention(self,x,q_w, q_b, k_w, k_b, v_w, v_b,attention_w, attention_b):
        q = np.dot(x, q_w.T) + q_b
        k = np.dot(x, k_w.T) + k_b
        v = np.dot(x, v_w.T) + v_b
        attention_head_size = self.hidden_size//self.num_attention_heads
        q = self.transpose_for_scores(q,attention_head_size,self.num_attention_heads)
        k = self.transpose_for_scores(k,attention_head_size,self.num_attention_heads)
        v = self.transpose_for_scores(v,attention_head_size,self.num_attention_heads)
        qk = np.matmul(q,k.swapaxes(1, 2))
        qk /= np.sqrt(attention_head_size)
        qk = softmax(qk)
        qkv = np.matmul(qk, v)
        qkv = qkv.swapaxes(0, 1).reshape(-1,self.hidden_size)
        return np.dot(qkv, attention_w.T)+attention_b
    def feed_forward(self,x,intermediate_w,intermediate_b,ff_out_w,ff_out_b):
        x = np.dot(x, intermediate_w.T) + intermediate_b
        x = gelu(x)
        return np.dot(x, ff_out_w.T) + ff_out_b

    def single_transformer_layer_forward(self, x, i):
        weight = self.weight[i]
        q_w, q_b, k_w, k_b, v_w, v_b, attention_w, attention_b, layerNorm_w, layerNorm_b\
        , intermediate_w, intermediate_b, ff_out_w, ff_out_b, ln_ff_w, ln_ff_b = weight
        attention_x = self.self_attention(x,q_w, q_b, k_w, k_b, v_w, v_b,attention_w, attention_b)
        x = self.normal_layezation(x+attention_x,layerNorm_w,layerNorm_b)
        feed_forward_x = self.feed_forward(x,intermediate_w,intermediate_b,ff_out_w,ff_out_b)
        x = self.normal_layezation(x+feed_forward_x, ln_ff_w, ln_ff_b)
        return x
        # 链接[cls] token的输出层

    def pooler_output_layer(self, x):
        x = np.dot(x, self.pooler_dense_weight.T) + self.pooler_dense_bias
        x = np.tanh(x)
        return x
    def forward(self,torch_x):
        embedding_x = self.embedding_forward(torch_x)
        x = self.all_transformer_layer_forward(embedding_x)
        pooler_output =  self.pooler_output_layer(x[0])
        return x,pooler_output
seqence_output, pooler_output = bert(torch_x)
print(seqence_output,end="\n")
myBert = MyBert()
x,pooler_out = myBert.forward(a)
print(x)
