# -*- coding: utf-8 -*-
"""
Created on Sat Apr 16 13:58:19 2022

@author: 11325
"""

import re
import math
import torch
import numpy as np
from random import *
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
import Transformer_encoder
import dataloader

'''
# BERT Parameters
maxlen = 30
batch_size = 6
max_pred = 5 # max tokens of prediction
n_layers = 6
n_heads = 12
model_dimension = 768
d_ff = 768*4 # 4*d_model, FeedForward dimension
d_k = d_v = 64  # dimension of K(=Q), V
num_segments = 2#句子个数
vocab_size#单词总数
'''
def gelu(x):
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))

class bert(nn.Module):
    def __init__(self,maxlen,batch_size,max_pred,num_layers,n_heads,model_dimension,d_ff,d_k,d_v,num_segments,vocab_size):
        super(bert,self).__init__()
        self.model_dimension=model_dimension
        #单词的字典向量层
        self.embedding=Transformer_encoder.Embedding(vocab_size, model_dimension, num_segments, maxlen)
        #设置编码层，此处为n个Transformer_encoder叠加
        self.layers=nn.ModuleList([Transformer_encoder.EncoderLayer(model_dimension, d_k, d_v, n_heads, d_ff) for _ in range(num_layers)])
        #前馈神经网络(全连接神经网络)-针对cls字符
        self.fc=nn.Linear(model_dimension, model_dimension)
        #激活函数-cls任务用的
        self.activ1=nn.Tanh()
        #mlm人物用的线性网络
        self.linear=nn.Linear(model_dimension, model_dimension)
        #激活函数-mlm
        self.activ2=gelu
        #正则化
        self.norm=nn.LayerNorm(model_dimension)
        #cls输出分类器-2分类器
        self.classfier=nn.Linear(model_dimension, 2)
        
        #解码器-deconder，不同deconder层共享权重
        embed_weight=self.embedding.tok_embed.weight
        #解码器设置
        #n_vocab,n_demension=embed_weight.size()
        self.deconder=nn.Linear(model_dimension,vocab_size,bias=False)
        self.deconder_bias=nn.Parameter(torch.zeros(model_dimension))
        
    def forward(self,input_id,segment_id,maked_pos):
        #生成id对应输出字典向量
        output=self.embedding(input_id,segment_id)
        #遮挡部分词语
        enc_self_attn_mask = Transformer_encoder.get_attn_pad_mask(input_id, input_id) 
        #前一层输出与遮挡向量作为下一层的输入，
        for layer in self.layers:
            output=layer(output,enc_self_attn_mask)
        #output：[batchsize,maxlen,model_dimension],enc_self_attn_mask:[]batchsize,maxlen,maxlen]
        h_pooled=self.fc(output[:,0])#将cls放入前馈神经网络中,hpooled:[batch_size,model_dimension]
        los_cls=self.classfier(h_pooled)#计算cls任务损失
        
        masked_pos=maked_pos[:,:,None].expand(-1,-1,self.model_dimension)#masked数组增加维度[batch_size,maxpre,model_dimension]
        h_masked = torch.gather(output, 1, masked_pos)#[22,5,7]为遮挡数组，gather将第22，5，7个数据向量依次放在头部，达成匹配
        h_masked=self.norm(self.activ2(self.linear(h_masked)))
        los_mlm=self.deconder(h_masked)
        
        return los_cls,los_mlm
    
    
def train(model,loader,num_epoch,learning_rate,vocab_size):
    optimizer=torch.optim.Adadelta(model.parameters(), lr=learning_rate)
    criterion = nn.CrossEntropyLoss()
    loss_sum=torch.cuda.FloatTensor([0.0])
    
    for i in range(num_epoch):
        for input_ids,segment_ids,masked_tokens,masked_pos,is_next in loader:
            loss_cls,loss_mlm=model(input_ids,segment_ids,masked_pos)
            loss_lm=criterion(loss_mlm.view(-1,vocab_size),masked_tokens.view(-1))
            loss_lm=(loss_lm.float()).mean()
            loss_cl=criterion(loss_cls,is_next)
            loss=loss_lm+loss_cl
            if (i+1)%5==0:
                print('Epoch: {} loss:{}'.format(i+1, loss))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        
    
        
        
if __name__=='__main__' :  
   # BERT Parameters
   maxlen = 30
   batch_size = 6
   max_pred = 5 # max tokens of prediction
   num_layers = 6
   num_heads = 12
   model_dimension = 768
   d_ff = 768*4 # 4*d_model, FeedForward dimension
   d_k = 64
   d_v = 64  # dimension of K(=Q), V
   num_segments = 2#句子个数
   #vocab_size#单词总数    
   data=dataloader.data_processing(dataloader.test_path, max_pred,maxlen,batch_size)
   data.get_sentens()
   batch=data.make_batch()
   dataSet=dataloader.dataset(batch)
   dataSet.processing_batch()
   dataLoader=torch.utils.data.DataLoader(dataSet,batch_size,True)
   vocab_size=data.vocab_size
   Bert=bert(maxlen, batch_size, max_pred, num_layers, num_heads, model_dimension, d_ff, d_k, d_v, num_segments, vocab_size)
   train(Bert, dataLoader, 100, 0.001, vocab_size)
   i=1
   
        
        
