package com.sh.data.engine.domain.integration.online.service;

import com.alibaba.fastjson.JSONObject;
import com.sh.data.engine.common.enumDefinition.DSType;
import com.sh.data.engine.common.exception.BusinessException;
import com.sh.data.engine.common.util.FlinkSqlUtil;
import com.sh.data.engine.domain.integration.datasource.model.domain.DataSourceDetailDomain;
import com.sh.data.engine.domain.integration.datasource.service.DataSourceService;
import com.sh.data.engine.domain.integration.online.model.FlinkConstant;
import com.sh.data.engine.domain.integration.online.model.domain.TableConfigDomain;
import com.sh.data.engine.domain.integration.online.model.domain.TableConfigDomain.FieldInfo;
import com.sh.data.engine.repository.dao.integration.online.entity.FlinkDataSyncTaskEntity;
import com.sh.data.engine.repository.dao.integration.online.entity.FlinkDataSyncTaskEntity.FieldMapping;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;

import java.util.List;
import java.util.Objects;
import java.util.StringJoiner;
import java.util.stream.Collectors;

/**
 * @author: zigui.zdf
 * @description:
 * @date: 2021/7/13 11:35
 */
@Service
@Slf4j
public class FlinkDataSyncExecService {

    @Autowired
    private DataSourceService dataSourceService;

    // -------------------------------------flinksql--------------------------------------------------//

    /**
     * 源表是cdc的时候 源表 目标表 都使用flink sql构建
     */
    public String buildFlinkSqlTable(TableConfigDomain tableConfigDomain) {
        DSType dsType = DSType.valueOf(tableConfigDomain.getDsType());

        // 源表 && cdc
        if (tableConfigDomain.getIsSource() && tableConfigDomain.getUseCdc()) {
            switch (dsType) {
                case MySQL:
                case PostgreSQL:
                case Oracle:
                case SQLServer:
                case Mongodb:
                case Kafka:
                case TiDB:
                    return buildCdcSourceFlinkSqlTable(tableConfigDomain);
                default:
                    throw new UnsupportedOperationException("cdc 源表不支持该数据源种类:" + dsType);
            }
        }

        // 不是源表 or 不是cdc
        switch (dsType) {
            case MySQL:
            case PostgreSQL:
            case Oracle:
            case SQLServer:
            case TiDB:
                return buildJdbcFlinkSqlTable(tableConfigDomain);
            case Hive:
                return buildHiveFlinkSqlTable(tableConfigDomain);
            case Kafka:
                return buildKafkaFlinkSqlTable(tableConfigDomain);
            case HBase:
                return buildHbaseFlinkSqlTable(tableConfigDomain);
            default:
                throw new UnsupportedOperationException("不支持的数据库种类:" + dsType);
        }
    }

    /**
     * 生成cdc的flink源表建表sql
     */
    private String buildCdcSourceFlinkSqlTable(TableConfigDomain tableConfigDomain) {
        Long dsId = tableConfigDomain.getDsId();
        DSType dsType = DSType.valueOf(tableConfigDomain.getDsType());
        String tableName = tableConfigDomain.getTableName();
        String schema = tableConfigDomain.getSchema();
        if (!tableName.contains(".") && StringUtils.isNotBlank(schema)) {
            tableName = schema + "." + tableName;
        }
        List<FieldInfo> fieldInfoList = tableConfigDomain.getFieldInfoList();

        DataSourceDetailDomain datasource = dataSourceService.getDataSourceDetailById(dsId, false);

        StringBuilder tableBuilder = new StringBuilder();

        // 用来生成primary key(id) not enforced
        StringBuilder notEnforcedBuilder = new StringBuilder();

        switch (dsType) {
            // oracle的table name是带有schema的
            case Oracle:
            case PostgreSQL:
            case SQLServer:
                tableBuilder
                    .append("create table ")
                    .append("`")
                    .append(tableName.split("\\.")[1] + "_source")
                    .append("`")
                    .append(" ( ");
                break;
            //  case Mongodb:
            //    tableBuilder.append(FlinkConstant.HUFU_FLINK_SQL_ESCAPE);
            default:
                tableBuilder
                    .append("create table ")
                    .append("`")
                    .append(tableName + "_source")
                    .append("`")
                    .append(" ( ");
        }

        // 字段
        for (FieldInfo fieldInfo : fieldInfoList) {
            tableBuilder
                .append("`")
                .append(fieldInfo.getName())
                .append("`")
                .append(" ")
                .append(FlinkSqlUtil.convertDbToFlink(tableConfigDomain.getDsType(), fieldInfo.getType()))
                .append(",");

            // 如果是主键的话
            if (fieldInfo.getIsPk()) {
                notEnforcedBuilder
                    .append("primary key (")
                    .append("`")
                    .append(fieldInfo.getName())
                    .append("`")
                    .append(") not enforced,");
            }
        }
        tableBuilder.append(notEnforcedBuilder);
        tableBuilder.deleteCharAt(tableBuilder.length() - 1);
        String startUpMode = "";
        if (Objects.nonNull(tableConfigDomain.getStart())) {
            startUpMode = tableConfigDomain.getStart() == 0 ? "initial" : "latest-offset";
        }
        // 属性
        tableBuilder.append(" ) WITH ( ");
        // 不同的cdc connector
        switch (dsType) {
            case MySQL:
                tableBuilder.append("'connector'='mysql-cdc',");
                // mysql binlog 要注意server-id
                // https://ververica.github.io/flink-cdc-connectors/release-2.1/content/connectors/mysql-cdc.html
                // TODO 此处需要验证server-id的用途
                //        tableBuilder
                //            .append("'server-id'='")
                //            .append(10000 + UUID.randomUUID().toString())
                //            .append("',");
        /*
                if (null != tableConfigDomain.getNodeId()) {
                  tableBuilder
                      .append("'server-id'='")
                      .append(10000 + tableConfigDomain.getNodeId())
                      .append("',");
                }
        */
                if (StringUtils.isNotBlank(startUpMode)) {
                    tableBuilder.append("'scan.startup.mode'='").append(startUpMode).append("',");
                }
                break;
            case PostgreSQL:
                tableBuilder.append("'connector'='postgres-cdc',");
                // https://ververica.github.io/flink-cdc-connectors/release-2.1/content/connectors/postgres-cdc.html
                // slot.name is recommended to set for different tables to avoid the potential
                // PSQLException: ERROR: replication slot "flink" is active for PID 1090
                // TODO 待验证 1.14配置该参数后报错
                //        tableBuilder
                //            .append("'slot.name'='")
                //            .append("flink_cdc_node_id_" + UUID.randomUUID().toString())
                //            .append("',");
        /*
                tableBuilder
                    .append("'slot.name'='")
                    .append("flink_cdc_node_id_" + tableConfigDomain.getNodeId())
                    .append("',");
        */
                break;
            case Oracle:
                tableBuilder.append("'connector'='oracle-cdc',");
                if (StringUtils.isNotBlank(startUpMode)) {
                    tableBuilder.append("'scan.startup.mode'='").append(startUpMode).append("',");
                }
                break;
            case Mongodb:
                tableBuilder.append("'connector'='mongodb-cdc',");
                break;
            case SQLServer:
                tableBuilder.append("'connector'='sqlserver-cdc',");
                if (StringUtils.isNotBlank(startUpMode)) {
                    tableBuilder.append("'scan.startup.mode'='").append(startUpMode).append("',");
                }
                break;
            case TiDB:
                tableBuilder.append("'connector'='tidb-cdc',");
                if (StringUtils.isNotBlank(startUpMode)) {
                    tableBuilder.append("'scan.startup.mode'='").append(startUpMode).append("',");
                }
                break;
            case Kafka:
                tableBuilder.append("'connector'='kafka',");
            default:
                throw new UnsupportedOperationException("cdc 源表不支持该数据源种类:" + dsType);
        }

        switch (dsType) {
            case MySQL:
            case PostgreSQL:
            case Oracle:
            case SQLServer:
                tableBuilder
                    .append("'hostname'='")
                    .append(datasource.getRdbmsConfig().getServer())
                    .append("',");
                tableBuilder.append("'port'='").append(datasource.getRdbmsConfig().getPort()).append("',");
            case TiDB:
                tableBuilder.append("'database-name'='").append(datasource.getDbName()).append("',");
                tableBuilder.append("'username'='").append(datasource.getUsername()).append("',");
                tableBuilder.append("'password'='").append(datasource.getPassword()).append("',");

                // 如果是oracle,pg还要指明schema
                switch (dsType) {
                    // 目前根据虎符的逻辑来看其默认用的是PUBLIC
                    case PostgreSQL:
                        tableBuilder.append("'schema-name'='").append(tableName.split("\\.")[0]).append("',");
                        tableBuilder.append("'slot.name'='flink',");
                        tableBuilder.append("'decoding.plugin.name'='pgoutput',");
                        tableBuilder.append("'scan.incremental.snapshot.enabled'='true',");
                        break;
                    // 网上说用户的名字便是对应的schema
                    // 其实是不对的,获取到的表名是带有schema前缀的
                    case Oracle:
                        tableBuilder.append("'url'='").append(datasource.getDsLink()).append("',");
                        tableBuilder.append("'schema-name'='").append(tableName.split("\\.")[0]).append("',");
                        break;
                }

                switch (dsType) {
                    // oracle的table name是带有schema的
                    case Oracle:
                    case PostgreSQL:
                        tableBuilder.append("'table-name'='").append(tableName.split("\\.")[1]).append("'");
                        break;
                    default:
                        tableBuilder.append("'table-name'='").append(tableName).append("'");
                }
                break;
            case Mongodb:
                tableBuilder
                    .append("'hosts'='")
                    .append(
                        datasource.getMongoConfig().getServer()
                            + ":"
                            + datasource.getMongoConfig().getPort())
                    .append("',");
                tableBuilder.append("'username'='").append(datasource.getUsername()).append("',");
                tableBuilder.append("'password'='").append(datasource.getPassword()).append("',");
                tableBuilder.append("'database'='").append(datasource.getDbName()).append("',");
                tableBuilder.append("'collection'='").append(tableName).append("',");
                tableBuilder
                    .append("'connection.options'='")
                    .append("authSource=" + datasource.getMongoConfig().getAuthDbName())
                    .append("'");
                break;
            case Kafka:
                tableBuilder.append("'topic'='").append(tableName).append("',");
                tableBuilder
                    .append("'properties.bootstrap.servers'")
                    .append(datasource.getDsLink())
                    .append("',");
                tableBuilder.append("'value.format' = 'debezium-json'");
        }

        tableBuilder.append(");");

        // if(DSType.Mongodb.equals(dsType)){
        // tableBuilder.append(FlinkConstant.HUFU_FLINK_SQL_ESCAPE);
        // }

        return tableBuilder.toString();
    }

    /**
     * 生成jdbc的flink建表sql
     */
    private String buildJdbcFlinkSqlTable(TableConfigDomain tableConfigDomain) {

        Long dsId = tableConfigDomain.getDsId();
        DSType dsType = DSType.valueOf(tableConfigDomain.getDsType());
        String tableName = tableConfigDomain.getTableName();
        String tableNameTemp = null;
        if (tableName.contains(".")) {
            String[] split = tableName.split("\\.");
            tableNameTemp = split[1];
            tableName = "\"" + split[0] + "\"." + "\"" + split[1] + "\"";
        }

        if (Objects.isNull(tableNameTemp)) {
            tableNameTemp = tableName;
        }
        List<FieldInfo> fieldInfoList = tableConfigDomain.getFieldInfoList();

        DataSourceDetailDomain dataSource = dataSourceService.getDataSourceDetailById(dsId, false);

        StringBuilder tableBuilder = new StringBuilder();

        // 用来生成primary key(id) not enforced
        StringBuilder notEnforcedBuilder = new StringBuilder();

        String suffix = tableConfigDomain.getIsSource() ? "_source" : "_target";
        tableBuilder.append("create table `").append(tableNameTemp + suffix).append("` ( ");

        // 字段
        for (FieldInfo fieldInfo : fieldInfoList) {
            tableBuilder
                .append("`")
                .append(fieldInfo.getName())
                .append("`")
                .append(" ")
                .append(FlinkSqlUtil.convertDbToFlink(tableConfigDomain.getDsType(), fieldInfo.getType()))
                .append(",");

            // 如果是主键的话
            if (fieldInfo.getIsPk()) {
                notEnforcedBuilder
                    .append("primary key (")
                    .append("`")
                    .append(fieldInfo.getName())
                    .append("`")
                    .append(") not enforced,");
            }
        }
        tableBuilder.append(notEnforcedBuilder);
        tableBuilder.deleteCharAt(tableBuilder.length() - 1);

        // 属性
        tableBuilder.append(" ) WITH ( ");
        tableBuilder.append("'connector'='jdbc',");
        tableBuilder.append("'url'='").append(dataSource.getDsLink()).append("',");
        tableBuilder.append("'username'='").append(dataSource.getUsername()).append("',");
        tableBuilder.append("'password'='").append(dataSource.getPassword()).append("',");

        switch (dsType) {
            case TiDB:
            case MySQL:
                tableBuilder.append("'table-name'='").append(tableNameTemp).append("',");
                tableBuilder.append("'driver'='").append("com.mysql.cj.jdbc.Driver").append("'");
                break;
            case PostgreSQL:
                tableBuilder.append("'table-name'='").append(tableName).append("',");
                tableBuilder.append("'driver'='").append("org.postgresql.Driver").append("'");
                break;
            case Oracle:
                tableBuilder.append("'table-name'='").append(tableName).append("',");
                tableBuilder.append("'driver'='").append("oracle.jdbc.driver.OracleDriver").append("'");
                break;
        }

        tableBuilder.append(");");

        return tableBuilder.toString();
    }

    /**
     * 生成hive的flink建表sql,目前只会是cdc的目标表<br>
     * 临时创建新的真实hive表 offlineTaskServiceImpl.getCreateSql
     */
    private String buildHiveFlinkSqlTable(TableConfigDomain tableConfigDomain) {

        Long dsId = tableConfigDomain.getDsId();
        DSType dsType = DSType.valueOf(tableConfigDomain.getDsType());
        String tableName = tableConfigDomain.getTableName();
        List<FieldInfo> fieldInfoList = tableConfigDomain.getFieldInfoList();

        DataSourceDetailDomain dataSource = dataSourceService.getDataSourceDetailById(dsId, false);

        StringBuilder tableBuilder = new StringBuilder();

        String catalogName = "hiveCatalog";
        // String dbName = "default";
        String dbName = dataSource.getHiveConfig().getDbName();

        // hive catalog
        tableBuilder.append("CREATE CATALOG `").append(catalogName).append("`  WITH (");
        tableBuilder.append("'type'='hive',");
        tableBuilder.append("'default-database' = '").append(dbName).append("',");
        // TODO 此处的hive-conf需要确认是否就是hive-site-address的值
        tableBuilder
            .append("'hive-conf-dir'='")
            .append(dataSource.getHiveConfig().getHiveSiteAddress())
            .append("'");
        tableBuilder.append(");");

        // dialect
        tableBuilder.append(FlinkConstant.ENGINE_FLINK_SQL_ESCAPE);
        tableBuilder.append("SET table.sql-dialect=hive;");
        tableBuilder.append(FlinkConstant.ENGINE_FLINK_SQL_ESCAPE);

        // hive表
        {
            tableBuilder.append(FlinkConstant.ENGINE_FLINK_SQL_ESCAPE);
            tableBuilder.append(" CREATE TABLE IF NOT EXISTS ");
            tableBuilder.append("`").append(catalogName).append("`");
            tableBuilder.append(".");
            tableBuilder.append("`").append(dbName).append("`");
            tableBuilder.append(".");
            String suffix = tableConfigDomain.getIsSource() ? "_source" : "_target";
            tableBuilder.append("`").append(tableName + suffix).append("`");
            tableBuilder.append(" ( ");
            for (FieldInfo fieldInfo : fieldInfoList) {
                // 分区字段 跳过
                if (fieldInfo.getIsPartition()) {
                    continue;
                }
                tableBuilder.append("`").append(fieldInfo.getName()).append("`");
                tableBuilder.append(" ");
                tableBuilder.append(fieldInfo.getChangeType()).append(",");
            }
            tableBuilder.deleteCharAt(tableBuilder.length() - 1);
            tableBuilder.append(" ) ");

            // partition field
            tableBuilder.append(" PARTITIONED BY ( ");
            for (FieldInfo fieldInfo : fieldInfoList) {
                // 不是分区字段 跳过
                if (!fieldInfo.getIsPartition()) {
                    continue;
                }

                tableBuilder
                    .append("`")
                    .append(fieldInfo.getName())
                    .append("`")
                    .append(" ")
                    .append(fieldInfo.getChangeType())
                    .append(",");
            }
            tableBuilder.deleteCharAt(tableBuilder.length() - 1);
            tableBuilder.append(")");

            // TBLPROPERTIES
            tableBuilder.append(
                " TBLPROPERTIES('sink.partition-commit.policy.kind'='metastore,success-file');");
            tableBuilder.append(FlinkConstant.ENGINE_FLINK_SQL_ESCAPE);
        }

        // alter table
        // ALTER TABLE `hiveCatalog`.`default`.`hive_table_par_mongo` SET TBLPROPERTIES
        // ('sink.partition-commit.policy.kind'='metastore,success-file');
        tableBuilder.append(FlinkConstant.ENGINE_FLINK_SQL_ESCAPE);
        tableBuilder.append("ALTER TABLE ");
        tableBuilder.append("`").append(catalogName).append("`");
        tableBuilder.append(".");
        tableBuilder.append("`").append(dbName).append("`");
        tableBuilder.append(".");
        tableBuilder.append("`").append(tableName).append("`");
        tableBuilder.append(
            " SET TBLPROPERTIES ('sink.partition-commit.policy.kind'='metastore,success-file');");
        tableBuilder.append(FlinkConstant.ENGINE_FLINK_SQL_ESCAPE);

        // dialect
        tableBuilder.append(FlinkConstant.ENGINE_FLINK_SQL_ESCAPE);
        tableBuilder.append("SET table.sql-dialect=default;");
        tableBuilder.append(FlinkConstant.ENGINE_FLINK_SQL_ESCAPE);

        return tableBuilder.toString();
    }

    /**
     * 真实的创建hive表的sql
     */
    private void getCreateHiveTableSql() {
    }

    /**
     * 生成kafka的flink建表sql,目前只会是在cdc模式的目标表<br>
     * 目前源表是kafka的情况已经归并到flinkx,那么当前必然是目标表了 而且又是在源表是cdc模式的前提下的<br>
     * 那么使用的connector要是upsert-kafka,且没有必要去映射的意义 <br>
     * upset-kafka 要有primary key 要配置key.format<br>
     * column直接使用源表的
     */
    private String buildKafkaFlinkSqlTable(TableConfigDomain tableConfigDomain) {
        Long dsId = tableConfigDomain.getDsId();
        DSType dsType = DSType.valueOf(tableConfigDomain.getDsType());
        String tableName = tableConfigDomain.getTableName();
        List<FieldInfo> fieldInfoList = tableConfigDomain.getFieldInfoList();

        DataSourceDetailDomain dataSource = dataSourceService.getDataSourceDetailById(dsId, false);

        // List<DatasourceKafkaFieldDomain> datasourceKafkaFieldDomainList =
        // tbDatasourceKafkaFieldService.getByDsId(dsId);

        // 用来生成primary key(id) not enforced
        StringBuilder notEnforcedBuilder = new StringBuilder();
        StringBuilder tableBuilder = new StringBuilder();

        String suffix = tableConfigDomain.getIsSource() ? "_source" : "_target";
        tableBuilder
            .append("create table ")
            .append("`")
            .append(tableName + suffix)
            .append("`")
            .append(" ( ");

        for (FieldInfo fieldDomain : fieldInfoList) {
            tableBuilder
                .append("`")
                .append(fieldDomain.getName())
                .append("`")
                .append(" ")
                .append(fieldDomain.getChangeType())
                .append(",");

            if (fieldDomain.getIsPk()) {
                notEnforcedBuilder
                    .append("primary key (")
                    .append("`")
                    .append(fieldDomain.getName())
                    .append("`")
                    .append(") not enforced,");
            }
        }
        tableBuilder.append(notEnforcedBuilder);
        tableBuilder.deleteCharAt(tableBuilder.length() - 1);

        tableBuilder.append(" ) WITH ( ");
        // 目前只会是cdc模式的目标表
        tableBuilder.append("'connector'='upsert-kafka',");
        tableBuilder.append("'topic'='").append(tableConfigDomain.getTableName()).append("',");
        tableBuilder
            .append("'properties.bootstrap.servers'='")
            .append(dataSource.getDsLink())
            .append("',");
        tableBuilder.append("'key.format'='json',");
        tableBuilder.append("'value.format'='json',");
        tableBuilder.append("'value.json.fail-on-missing-field'='false',");
        tableBuilder.append("'value.json.ignore-parse-errors'='true'");
        tableBuilder.append(");");

        return tableBuilder.toString();
    }

    private String buildHbaseFlinkSqlTable(TableConfigDomain tableConfigDomain) {
        Long dsId = tableConfigDomain.getDsId();
        DSType dsType = DSType.valueOf(tableConfigDomain.getDsType());
        String tableName = tableConfigDomain.getTableName();
        List<FieldInfo> fieldInfoList = tableConfigDomain.getFieldInfoList();
        DataSourceDetailDomain dataSource = dataSourceService.getDataSourceDetailById(dsId, false);
        String connectionProperty = dataSource.getConnectionProperty();
        JSONObject hbaseConfig = JSONObject.parseObject(connectionProperty);
        Object rootdir = hbaseConfig.get("hbase.rootdir");
        Object quorm = hbaseConfig.get("hbase.zookeeper.quorum");
        if (Objects.isNull(rootdir) || Objects.isNull(quorm)) {
            throw new BusinessException("Hbase配置有误，缺少hbase.rootdir或hbase.zookeeper.quorum信息");
        }

        StringBuilder tableBuilder = new StringBuilder();

        String suffix = tableConfigDomain.getIsSource() ? "_source" : "_target";
        tableBuilder
            .append("create table ")
            .append("`")
            .append(tableName + suffix)
            .append("`")
            .append(" ( ");

        List<FieldInfo> pk =
            fieldInfoList.stream().filter(i -> i.getIsPk()).collect(Collectors.toList());
        List<FieldInfo> fields =
            fieldInfoList.stream().filter(i -> !i.getIsPk()).collect(Collectors.toList());
        if (CollectionUtils.isEmpty(pk)) {
            throw new BusinessException("目标表为Hbase时，源表写入字段必须存在主键");
        }
        FieldInfo pkInfo = pk.get(0);
        tableBuilder.append(pkInfo.getName()).append(" STRING,").append("detailInfo ROW(");
        fields.forEach(
            field -> {
                tableBuilder.append(field.getName()).append(" STRING,");
            });
        tableBuilder.deleteCharAt(tableBuilder.length() - 1);
        // TODO hbase版本 hbase-1.4 hbase-2.2 区分
        tableBuilder
            .append(") WITH (")
            .append("'connector.type' = 'hbase-2.2',")
            .append("'connector.table-name' = '")
            .append(tableName)
            .append("'")
            .append("'connector.zookeeper.quorum' = '")
            .append(quorm)
            .append("'")
            .append(");");
        return tableBuilder.toString();
    }

    // insert into target (a) select d from source where source.aa >1
    public String buildTransformSql(FlinkDataSyncTaskEntity tableMapping) {
        // 应对oracle的得到的表名是带有schema
        String sourceTableName = tableMapping.getSourceTableOrTopic();
        if (sourceTableName.contains(".")) {
            sourceTableName = sourceTableName.split("\\.")[1];
        }

        String targetTableName = tableMapping.getTargetTableOrTopic();
        if (targetTableName.contains(".")) {
            targetTableName = targetTableName.split("\\.")[1];
        }

        StringBuilder stringBuilder = new StringBuilder();

        stringBuilder.append("insert into `").append(targetTableName + "_target").append("`");
        String targetDsType = tableMapping.getTargetDsType();
        StringJoiner sourceFields = new StringJoiner(",");
        StringJoiner targetFields = new StringJoiner(",");

        for (FieldMapping fieldMapping : tableMapping.getFieldMappings()) {
            sourceFields.add("`" + fieldMapping.getSourceField() + "`");
            targetFields.add("`" + fieldMapping.getTargetField() + "`");
        }
        if (DSType.from(tableMapping.getTargetDsType()).equals(DSType.HBase)) {
            List<FieldMapping> fildsMappingPk =
                tableMapping.getFieldMappings().stream()
                    .filter(i -> i.isSourceFieldPk())
                    .collect(Collectors.toList());
            if (CollectionUtils.isEmpty(fildsMappingPk)) {
                throw new BusinessException("来源表同步字段无主键，无法进行实时同步");
            }
            List<FieldMapping> fildsMappingNoPk =
                tableMapping.getFieldMappings().stream()
                    .filter(i -> !i.isSourceFieldPk())
                    .collect(Collectors.toList());
            FieldMapping pk = fildsMappingPk.get(0);
            for (FieldMapping fieldMapping : fildsMappingNoPk) {
                sourceFields.add("`" + fieldMapping.getSourceField() + "`");
                targetFields.add("`" + fieldMapping.getTargetField() + "`");
            }
            stringBuilder
                .append(" select ")
                .append(pk.getSourceField())
                .append(",")
                .append("ROW(")
                .append(sourceFields)
                .append(")")
                .append(" as detailInfo")
                .append(" from ");
        } else {
            stringBuilder.append(" (").append(targetFields).append(") ");
            stringBuilder.append(" select ").append(sourceFields).append("  from ");
        }
        stringBuilder.append("`").append(sourceTableName + "_source").append("`");
    /*String whereClause = tableConfigDomainSource.getWhereClause();
    if (StringUtils.isNotBlank(whereClause)) {
      stringBuilder.append(whereClause);
    }*/

        stringBuilder.append(";");

        return stringBuilder.toString();
    }

    // --------------------------------------flinksql-----------------------------------------------//
    public static void main(String[] args) {
        String tablename = "public.test";
        boolean contains = tablename.contains("\\.");
        System.out.println(contains);
    }
}
