# -*- coding: utf-8 -*-
"""
Created on Tue Apr 19 19:26:12 2022

@author: 11325
"""

import re
import math
import torch
import numpy as np
from random import *
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
import Transformer_encoder
import dataloader
import model


def train(model,loader,num_epoch,learning_rate,vocab_size):
    optimizer=torch.optim.Adadelta(model.parameters(), lr=learning_rate)
    criterion = nn.CrossEntropyLoss()
    loss_sum=torch.cuda.FloatTensor([0.0])
    
    for i in range(num_epoch):
        for input_ids,segment_ids,masked_tokens,masked_pos,is_next in loader:
            loss_cls,loss_mlm=model(input_ids,segment_ids,masked_pos)
            loss_lm=criterion(loss_mlm.view(-1,vocab_size),masked_tokens.view(-1))
            loss_lm=(loss_lm.float()).mean()
            loss_cl=criterion(loss_cls,is_next)
            loss=loss_lm+loss_cl
            if (i+1)%5==0:
                print('Epoch: {} loss:{}'.format(i+1, loss))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        
def test(model,data,batch):
    input_ids,segment_ids,masked_tokens,masked_pos,is_next =batch[0]
    print([data.idx_word_dic[w] for w in input_ids if data.idx_word_dic[w] != '[PAD]'])
    loss_cls,loss_mlm=model(torch.unsqueeze(torch.LongTensor(input_ids), 0),torch.unsqueeze(torch.LongTensor(segment_ids), 0),torch.unsqueeze(torch.LongTensor(masked_pos), 0))
    loss_cls=loss_cls.data.max(1)[1].data.numpy()
    loss_mlm=loss_mlm.data.max(2)[1][0].data.numpy()
    print('masked tokens list : ',[pos for pos in masked_tokens if pos != 0])
    print('predict masked tokens list : ',[pos for pos in loss_mlm if pos != 0])
    print('isNext : ', True if is_next else False)
    print('predict isNext : ',True if loss_cls else False)


if __name__=='__main__' : 
    # BERT Parameters
    maxlen = 30
    batch_size = 6
    max_pred = 5 # max tokens of prediction
    num_layers = 6
    num_heads = 12
    model_dimension = 768
    d_ff = 768*4 # 4*d_model, FeedForward dimension
    d_k = 64
    d_v = 64  # dimension of K(=Q), V
    num_segments = 2#句子个数
    
    print("Loading train datas and initialize dataloader......")
    data=dataloader.data_processing(dataloader.test_path, max_pred,maxlen,batch_size)
    data.get_sentens()
    batch=data.make_batch()
    dataSet=dataloader.dataset(batch)
    dataSet.processing_batch()
    dataLoader=torch.utils.data.DataLoader(dataSet,batch_size,True)
    #print('\n Load complete {} train traiples {} test_triples'.format(len(train_triple), len(test_triple)))
    
    print('Intialize model...')
    #dataLoad=dataloader.DataLoad(train_enity, train_enity)
    vocab_size=data.vocab_size
    model=model.bert(maxlen, batch_size, max_pred, num_layers, num_heads, model_dimension, d_ff, d_k, d_v, num_segments, vocab_size)
    
    print('Start training...')
    #train(model, dataLoad, Loss_function, train_triple, test_triple, learning_rate=0.01, num_epoch=1, batch_size=1000)
    train(model, dataLoader, 1, 0.001, vocab_size)
    print('Start testing...')
    test(model, data, batch)
