def predict(model, predict=False, epoch_num=9999):
    # model.eval()

    test_dataset = SmilePredDataSet()
    test_loader = paddle.io.DataLoader(
        test_dataset,
        batch_size=pred_batch_size,
        shuffle=True,
        num_workers=0,
        drop_last=True,
    )

    if predict:
        pred_df = pd.DataFrame(columns=['smiles', 'pred'])
        for batch_id, data in enumerate(test_loader()):
            scr = data
            output = model(scr, paddle.zeros([pred_batch_size, max_len]).astype('int64'))
            for i in range(len(output)):
                input_smile = test_dataset.get_vocabs(scr[i])
                pre_result = np.argmax(output[i], axis=-1)
                pre_result = test_dataset.get_vocabs(pre_result)
                # print('predict result input smile num : %s,output label : %s' % (input_smile, pre_result))
                input_smile = input_smile.replace('<eos>', '')
                pre_result = pre_result.replace('<eos>', '')
                pre_result = pre_result.replace('<bos>', '')
                pre_result = pre_result.replace(' ', '')
                # pred_df = pred_df.append({'smiles': input_smile, 'pred': pre_result}, ignore_index=True)
                pred_df = pd.concat([pred_df, pd.DataFrame([[input_smile, pre_result]], columns=pred_df.columns)], axis=0)
        pred_output_file = pred_output_file_path + str(epoch_num) + '_pred.csv'
        print('saving predict result : ' + pred_output_file)
        pred_df.to_csv(pred_output_file)
    else:
        for batch_id, data in enumerate(test_loader()):
            scr = data
            output = model(scr, paddle.zeros([pred_batch_size, max_len]).astype('int64'))
            for i in range(len(output)):
                input_smile = test_dataset.get_vocabs(scr[i])
                pre_result = np.argmax(output[i], axis=-1)
                pre_result = test_dataset.get_vocabs(pre_result)
                print('input smile num : %s,output label : %s' % (input_smile, pre_result))
            break
    test_dataset = SmilePredDataSet()
    test_loader = paddle.io.DataLoader(
        test_dataset,
        batch_size=pred_batch_size,
        shuffle=True,
        num_workers=0,
        drop_last=True,
    )