def train(model,pre_train = False):
    # scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=lr, T_max=total_epoch)
    # learning_rate=scheduler,
    opt = paddle.optimizer.Adam(learning_rate=lr, parameters=model.parameters())

    start_epoch = 0
    if pre_train:
        # 载入模型参数、优化器参数和最后一个epoch保存的检查点
        layer_state_dict = paddle.load(model_check_point_dir + "model.pdparams")
        opt_state_dict = paddle.load(model_check_point_dir + "opt.pdopt")
        final_checkpoint_dict = paddle.load(model_check_point_dir + "final_checkpoint.pkl")
        # 将load后的参数与模型关联起来
        model.set_state_dict(layer_state_dict)
        opt.set_state_dict(opt_state_dict)
        start_epoch = final_checkpoint_dict["epoch"]
        # 打印出来之前保存的 checkpoint 信息
        print(
            "Loaded Final Checkpoint. Epoch : {}, Loss : {}".format(
                final_checkpoint_dict["epoch"], final_checkpoint_dict["loss"].numpy()
            )
        )

    # criterion = paddle.nn.BCEWithLogitsLoss()
    # optimizer = paddle.optimizer.RMSProp(learning_rate=lr, parameters=model.parameters())

    # model.train()
    # model = paddle.DataParallel(model)
    # model,opt = paddle.amp.decorate(models=model, level='O2', optimizers=opt,excluded_layers=paddle.nn.Embedding)
    model,opt = paddle.amp.decorate(models=model, level='O2', optimizers=opt,excluded_layers=model.excloud_amp_layer())
    scaler = paddle.amp.GradScaler(init_loss_scaling=1024)

    # 打印数据集样本数
    train_dataset = SmileDataSet()
    train_loader = paddle.io.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=0,
        drop_last=True,
    )

    total_step = train_dataset.__len__() // batch_size
    print("epoch_size=%s,total_step=%s,batch_size=%s" % (total_epoch, total_step, batch_size))

    for epoch in range(start_epoch,total_epoch):
        for batch_id, data in enumerate(train_loader()):
            scr, tgt, lbl = data
            with paddle.amp.auto_cast(level='O2'):
                pre = model(scr, tgt)
                loss = paddle.nn.functional.cross_entropy(pre, lbl).mean()
            scaled = scaler.scale(loss)
            scaled.backward()

            if (batch_id + 1) % accumulate_batchs_num == 0:
                scaler.step(opt)
                scaler.update()
                opt.clear_grad(set_to_zero=False)

            if batch_id % 10 == 0:
                print("epoch:[%s/%s] step[%s/%s] loss is :%.16f" % (epoch, total_epoch, batch_id, total_step, loss))
        if epoch % 30 == 0:
            with paddle.amp.auto_cast(level='O2'):
                predict(model, predict=False)
            # model.train()
        if epoch % 100 == 0 and epoch - start_epoch > 99:
            with paddle.amp.auto_cast(level='O2'):
                predict(model, predict=True, epoch_num=epoch)
            # model.train()
            save_model(epoch,loss,model,opt)

