from tensorflow import  keras
import tensorflow as tf
import os


from pretraining import TrainingDatasetRoBERTa
from pretraining import build_transformer_model_for_pretraining


tf_record_path = "/home/qiufengfeng/nlp/competition/天池/CCKS2021中文NLP地址要素解析/data/tfrecord"
model_saved_path = os.path.join(os.path.dirname(tf_record_path),"train_model")
pretrain_model_path = r'/home/qiufengfeng/nlp/pre_trained_model/chinese_L-12_H-768_A-12/chinese_L-12_H-768_A-12/'
model_vocab_path = os.path.join(pretrain_model_path,'vocab.txt')
model_config_path = os.path.join(pretrain_model_path,'bert_config.json')
model_ckpt = os.path.join(pretrain_model_path,"bert_model.ckpt")

model = 'roberta'
sequence_length = 512
batch_size = 4096
num_warmup_steps = 3125
num_train_steps = 125000
steps_per_epoch = 10000
grad_accum_steps = 16  # 大于1即表明使用梯度累积
epochs = num_train_steps * grad_accum_steps // steps_per_epoch


strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
    train_model = build_transformer_model_for_pretraining(model,model_config_path,model_ckpt)
    train_model.summary()


class ModelCheckpoint(keras.callbacks.Callback):
    """自动保存最新模型
    """
    def on_epoch_end(self, epoch, logs=None):
        self.model.save_weights_as_checkpoint(model_saved_path, overwrite=True)


# 保存模型
checkpoint = ModelCheckpoint()
# 记录日志
csv_logger = keras.callbacks.CSVLogger('training.log')

# 模型训练
corpus_paths = []
for file in os.listdir(tf_record_path):
    corpus_paths.append(os.path.join(tf_record_path,file))


if model == 'roberta':

    dataset = TrainingDatasetRoBERTa.load_tfrecord(
        record_names=corpus_paths,
        sequence_length=sequence_length,
        batch_size=batch_size // grad_accum_steps,
    )


train_model.fit(
    dataset,
    steps_per_epoch=1,
    epochs=epochs,
    callbacks=[checkpoint, csv_logger],
)
