package com.navi.service.impl;

import com.alibaba.fastjson.JSONObject;
import com.navi.entity.ColumnEntity;
import com.navi.entity.TableEntity;
import com.navi.service.IGploaderService;
import com.navi.util.LogUtil;
import com.navi.util.TableJdbcUtil;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import redis.clients.jedis.Jedis;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.sql.*;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

import static com.navi.constant.GenericDef.*;


/**
 * Created by lin.xin on 2017/10/25.
 */
@Service
public class GploaderService implements IGploaderService {

    @Value("${stagetable.schema}")
    private String stageTableSchema;

    @Value("${local.csv.path}")
    private String localCsvPath;
    @Value("${gpfdist.host}")
    private String gpfdistHost;
    @Value("${gpfdist.port}")
    private String gpfdistPort;
    @Value("${external.table.schema}")
    private String extTableSchema;
    @Value("${app.conf.table.sht_ope_his}")
    private String shtOpeHisTables;
    @Value("${app.conf.table.eda_pds_glass_summary}")
    private String edaPdsGlassSummary;
    @Value("${app.conf.table.eda_mds_glass_summary}")
    private String edaMdsGlassSummary;

    @Autowired
    private ExtTableOperationService extTableOperationService;
    @Autowired
    private StageTableOperationService stageTableOperationService;
    @Autowired
    private TableMetaService tableMetaService;


    private Logger logger = LoggerFactory.getLogger(this.getClass());


    @Override
    public int insertProc(Connection connection, String tableSchema, String tableName, String stageTableName, String extTableName, List<JSONObject> jsonObjectList, boolean stageAlreadyCreateFlg) throws SQLException {
        Statement statement = connection.createStatement();
        try {
            if (StringUtils.isEmpty(stageTableName)) {
                stageTableName = String.format("templates11_%s", tableName);
            }
            final TableEntity tableEntity = tableMetaService.getTableMetaInfo(tableSchema, tableName, statement);

            if (!stageAlreadyCreateFlg) {
                writeFile(jsonObjectList, tableEntity);
                extTableOperationService.createExtTable(statement, extTableName, tableEntity);
                stageTableOperationService.createStageTable(statement, stageTableName, tableEntity);
                stageTableOperationService.doInsertStageTable(statement, extTableName, stageTableName, tableEntity, jsonObjectList);
            }

            int res = doInsert(statement, stageTableSchema, stageTableName, tableSchema, tableName, tableEntity);
            return res;
        } catch (SQLException e) {
            throw e;
        } finally {
            statement.close();
        }
    }


    private int doInsert(Statement statement, String stageTableSchema, String stageTableName, String tableSchema, String tableName, TableEntity tableEntity) throws SQLException {
        StringBuffer sqlBuffer = new StringBuffer("insert into ").append(tableSchema).append(".").append(tableName);
        String columnString = tableEntity.getColumnEntityList().stream().map(ColumnEntity::getColumnName).collect(Collectors.joining(","));
        sqlBuffer.append("(").append(columnString).append(")");
        sqlBuffer.append("(select ").append(columnString).append(" from ").append(stageTableSchema).append(".").append(stageTableName).append(")");

        final String sql = sqlBuffer.toString();
        logger.info("insert into qualified table, sql :{} ", sql);

        return statement.executeUpdate(sql);
    }


    @Override
    /**
     * merge logic:
     * 1.create txt file
     * 2.create external table
     * 1.create stage_table
     * 2.insert into stage_table ( stage_table will act as from_table role)
     * 3.update into_table set into_table.column_name = from_table.column_name , ... from from_table where into_table.column_name = from_table.column_name and ...
     * 4.insert into into_table (select * from stage_table where records not exists in into_table)
     *
     * @param connection
     * @param tableSchema
     * @param tableName
     * @param tableEntity
     * @param jsonStr
     * @throws SQLException
     */
    public int mergeProc(Connection connection, String tableSchema, String tableName,
                         String stageTableName, String extTableName, List<JSONObject> jsonObjectList,
                         String limit_time,
                         String deleteInsert) throws SQLException {
        final int messageCnt = jsonObjectList.size();
        final String messageCntLog = "message count: " + messageCnt;

        long beginTime = System.currentTimeMillis();

        //create stage table
        Statement statement = connection.createStatement();
        try {
            beginTime = LogUtil.printCost(LOG_MERGE_PROC_CS, tableName, messageCntLog, beginTime);

            final TableEntity tableEntity = tableMetaService.getTableMetaInfo(tableSchema, tableName, statement);
            LogUtil.printPerformance(LOG_MERGE_PROC_GET_MATA, tableName, -1, beginTime);
            beginTime = LogUtil.printCost(LOG_MERGE_PROC_GET_MATA, tableName, messageCntLog, beginTime);

            writeFile(jsonObjectList, tableEntity);
            LogUtil.printPerformance(LOG_MERGE_PROC_WF, tableName, jsonObjectList.size(), beginTime);
            beginTime = LogUtil.printCost(LOG_MERGE_PROC_WF, tableName, messageCntLog, beginTime);

            extTableOperationService.createExtTable(statement, extTableName, tableEntity);
            LogUtil.printPerformance(LOG_MERGE_PROC_EXT, tableName, -1, beginTime);
            beginTime = LogUtil.printCost(LOG_MERGE_PROC_EXT, tableName, messageCntLog, beginTime);

            stageTableOperationService.createStageTable(statement, stageTableName, tableEntity);
            LogUtil.printPerformance(LOG_MERGE_PROC_STAGE, tableName, -1, beginTime);
            beginTime = LogUtil.printCost(LOG_MERGE_PROC_STAGE, tableName, messageCntLog, beginTime);

            //insert into stage table
            int stageTableRows = stageTableOperationService.doInsertStageTable(statement, extTableName, stageTableName, tableEntity, jsonObjectList);
            LogUtil.printPerformance(LOG_MERGE_PROC_INSERT_STAGE, tableName, stageTableRows, beginTime);
            beginTime = LogUtil.printCost(LOG_MERGE_PROC_INSERT_STAGE, tableName, "Ext [" + messageCnt + "] rows -> stage [" + stageTableRows + "] rows.", beginTime);


            //real_flg 逻辑
            // 对 WPP_SHT_OPE_HIS_TABLE 增加stage 表的处理逻辑
            boolean newOpeHis=Arrays.asList((WPP_SHT_OPE_HIS_TABLE).split(",")).stream().filter(element -> stageTableName.contains(element)).count() > 0;
            boolean chamberHis=Arrays.asList((CHAMBER_HST_TABLE).split(",")).stream().filter(element -> stageTableName.contains(element)).count() > 0;
            boolean mdsGlass=Arrays.asList((MDS_GLASS_TABLE).split(",")).stream().filter(element -> stageTableName.contains(element)&&!stageTableName.contains("summary")).count() > 0;

            if(newOpeHis||chamberHis||mdsGlass){
                //insert into stage table  from targe table
                int targetTostageTableRows =stageTableOperationService.doInsertStageTableFromTarget( statement, stageTableName, tableSchema, tableName,  tableEntity);
                LogUtil.printPerformance(LOG_MERGE_PROC_INSERT_STAGE, tableName, targetTostageTableRows, beginTime);
                beginTime = LogUtil.printCost(LOG_MERGE_PROC_INSERT_STAGE, tableName, "targe [" + messageCnt + "] rows -> stage [" + targetTostageTableRows + "] rows.", beginTime);
                //update stage table
                int updateStageTableRows =stageTableOperationService.updateStageTable(statement, stageTableName, tableEntity);
                LogUtil.printPerformance(LOG_MERGE_PROC_INSERT_STAGE, tableName, updateStageTableRows, beginTime);
                beginTime = LogUtil.printCost(LOG_MERGE_PROC_INSERT_STAGE, tableName, "stage [" + messageCnt + "] rows -> stage [" + updateStageTableRows + "] rows.", beginTime);
            }


            /**
             * 这段逻辑限定了表里需要有的列名: evt_Timestamp & ope_id，算是为defect表量身打造
             * 但是抄ERROR Q的时候不是针对Q启动，所以不能进入这个逻辑
             *  后续可以在redis里面增加配置，针对不同表来决定'deleteInsert'参数， 而不是靠启动参数
             */
            if (TRUE.equals(deleteInsert)) {
                // [delete count , insert count]
//            int[] processCount = stageData2TargetTable(statement,
//                    stageTableSchema, stageTableName, tableSchema, tableName, tableEntity, stageTableRows);

                int[] processCount = stageTableOperationService.stageData2TargetTableFast(statement,
                        stageTableSchema, stageTableName, tableSchema, tableName, tableEntity);

                LogUtil.printPerformance(LOG_MERGE_PROC_DELETE_INSERT, tableName, jsonObjectList.size(), beginTime);
                LogUtil.printCost(LOG_MERGE_PROC_DELETE_INSERT, tableName,
                        messageCntLog + ", Delete [" + processCount[0] + "], insert [" + processCount[1] + "]", beginTime);
                return processCount[1];
            } else {
                //特殊处理逻辑 mds_glass 部分不做这部分处理
                boolean pds=Arrays.asList(edaPdsGlassSummary.split(",")).stream().filter(ele -> tableName.startsWith(ele)).count() > 0;
                boolean mdsSummaryAndResult=Arrays.asList(edaMdsGlassSummary.split(",")).stream().filter(ele -> tableName.startsWith(ele)&&(tableName.contains("summary")||tableName.contains("result"))).count() > 0;
                if (pds||mdsSummaryAndResult) {
                    int[] processCount = prepareData6OpeHis(statement, stageTableSchema, stageTableName, tableSchema, tableName, tableEntity);

                    LogUtil.printPerformance(LOG_MERGE_PROC_DELETE_INSERT, tableName, jsonObjectList.size(), beginTime);
                    LogUtil.printCost(LOG_MERGE_PROC_DELETE_INSERT, tableName,
                            messageCntLog + ", Delete [" + processCount[0] + "], insert [" + processCount[1] + "]", beginTime);
                    return processCount[1];
                }  else if (shtOpeHisTables.contains(tableName)) {
                    //delete from stage table
                    int deleteStageCount, deleteSorCount, insertSorCount;
                    int[] processCount = new int[3];
                    //if (shtOpeHisTables.contains(tableName)) {
                    processCount = prepareData4OpeHis(statement, stageTableSchema, stageTableName, tableSchema, tableName, tableEntity, stageTableRows);
                    //}else {
                    //    processCount = prepareData5OpeHis(statement, stageTableSchema, stageTableName, tableSchema, tableName, tableEntity, stageTableRows);
                    //}
                    deleteStageCount = processCount[0];
                    deleteSorCount = processCount[1];
                    insertSorCount = processCount[2];
                    LogUtil.printPerformance(LOG_MERGE_PROC_PROCESS_FOR_OPE_HIS, stageTableName, deleteStageCount + deleteSorCount + insertSorCount, beginTime);
                    LogUtil.printCost(LOG_MERGE_PROC_PROCESS_FOR_OPE_HIS, stageTableName,
                            "delete [" + deleteStageCount + "] from stage, delete [" + deleteSorCount + "] from sor, insert [" + insertSorCount + "] to sor.", beginTime);

                    if (deleteStageCount + insertSorCount != stageTableRows) {
                        logger.error("{} stage count {} != delete stage count {} + insert sor count {} .",
                                LOG_MERGE_PROC_PROCESS_FOR_OPE_HIS, stageTableRows, deleteStageCount, insertSorCount);
                        //todo 返回值默认从0改为-1了， 这里也要改
                    }

                    return insertSorCount;
                }else if(newOpeHis||mdsGlass||chamberHis){
                    int[] processCount = stageTableOperationService.stageData2TargetTableForOpeHis(statement,
                            stageTableSchema, stageTableName, tableSchema, tableName, tableEntity);

                    LogUtil.printPerformance(LOG_MERGE_PROC_DELETE_INSERT, tableName, jsonObjectList.size(), beginTime);
                    LogUtil.printCost(LOG_MERGE_PROC_DELETE_INSERT, tableName,
                            messageCntLog + ", Delete [" + processCount[0] + "], insert [" + processCount[1] + "]", beginTime);
                    return processCount[1];
                } else {
                    //insert into into_table
                    final int insertedRows = stageTableOperationService.do_insert_records_not_existed(statement,
                            stageTableSchema, stageTableName, tableSchema, tableName, tableEntity);
                    LogUtil.printPerformance(LOG_MERGE_PROC_INSERT, tableName, insertedRows, beginTime);
                    beginTime = LogUtil.printCost(LOG_MERGE_PROC_INSERT, tableName, messageCntLog, beginTime);

                    //优化性能，如果所有的消息都是insert逻辑，且已经执行成功了，那么就不再需要执行update逻辑。
                    if (insertedRows == stageTableRows) {
                        logger.info("{}, 当前所有消息已经insert完成，不再执行update. 消息数量:{} , insert数量 {}", LOG_MERGE_PROC_INSERT, stageTableRows, insertedRows);
                        return insertedRows;
                    }
                    //update into_table
                    //todo 应该先把stage中已经insert的数据删除，减少update的数量
                    final int updatedRows = stageTableOperationService.doUpdate(statement, stageTableName, tableSchema, tableName, tableEntity, limit_time);
                    LogUtil.printPerformance(LOG_MERGE_PROC_UPDATE, tableName, updatedRows, beginTime);
                    LogUtil.printCost(LOG_MERGE_PROC_UPDATE, tableName, messageCntLog, beginTime);
                    return insertedRows + updatedRows;
                }
            }
        }catch (SQLException e){
            throw e;
        }finally {
            statement.close();
        }
    }


    /**
     * 根据条件删除stage/sor表中部分数据，然后从stage表全量insert
     * sht_ope_his表更新时检查字段
     * 更新时保留logoff_time最新的数据
     * 删除sor数据，保证db_timestamp更新
     *
     * @param statement
     * @param stageTableSchema
     * @param stageTableName
     * @param tableSchema
     * @param intoTableName
     * @param tableEntity
     * @return
     * @throws SQLException
     */
    public int[] prepareData4OpeHis(Statement statement, String stageTableSchema, String stageTableName,
                                    String tableSchema, String intoTableName, TableEntity tableEntity,
                                    int stageTableRows) throws SQLException {

        // deleteStageCount, deleteSorCount, insertSorCount
        int[] processCount = {-1, -1, -1};
        int deleteStageCount, deleteSorCount, insertSorCount;

        final List<ColumnEntity> columnEntityList = tableEntity.getColumnEntityList();
        final List<ColumnEntity> primaryColumnList = columnEntityList.stream().filter(ColumnEntity::getPrimaryKeyFlg)
                .filter(entity -> !"logof_timestamp".equals(entity.getColumnName()))
                .filter(entity -> !"logofTimestamp".equals(entity.getColumnName()))
                .collect(Collectors.toList());

        // delete old data from stage table
        StringBuilder delSqlBuffer = new StringBuilder("delete from ");
        delSqlBuffer.append(stageTableSchema).append(".").append(stageTableName).append(" stage_table using ")
                .append(tableSchema).append(".").append(intoTableName).append(" sor_table ");

        final String onConditionString = TableJdbcUtil.getOnConditionStringByPk(primaryColumnList, "stage_table", "sor_table");
        delSqlBuffer.append(" where ").append(onConditionString)
                .append(" and stage_table.").append("logof_timestamp < ")
                .append(" sor_table.logof_timestamp ");

        String delSql = delSqlBuffer.toString();
        logger.info("do delete from stage table, sql: {}", delSql);
        deleteStageCount = statement.executeUpdate(delSql);
        logger.info("delete records from stage table rows {}", deleteStageCount);

        processCount[0] = deleteStageCount;
        if (deleteStageCount == stageTableRows) {
            logger.info("All message in stage table {} is delete，pass delete from sor table. stageTableRows:{} ",
                    stageTableName, stageTableRows);
            return processCount;
        }

        // delete same PK row from sor table
        // 这里删除sor表是为了后面的INSERT有新的 db_timestamp， 这样其他etl才能同步到新数据
        String delSorSql = "delete from " + tableSchema + "." + intoTableName + " sor_table using " +
                stageTableSchema + "." + stageTableName + " stage_table " +
                " where " + onConditionString;
        logger.info("do delete from sor table, sql: {}", delSorSql);
        deleteSorCount = statement.executeUpdate(delSorSql);
        logger.info("delete records from sor table rows {}", deleteSorCount);
        processCount[1] = deleteSorCount;

        //insert all row
        final String columnString = TableJdbcUtil.getColumnString(columnEntityList);
        insertSorCount = stageTableOperationService.insertStage2TargetTable(statement, stageTableSchema, stageTableName, tableSchema, intoTableName, columnString);
        processCount[2] = insertSorCount;

        return processCount;
    }


    /**
     * eda 的特殊逻辑处理。 】9.26下午会议后徐帆提出。eda逻辑变更】
     * 对应4支Que [eda_pds_glass_summary、da_pds_result、eda_result、edc_data]
     */
    public int[] prepareData6OpeHis(Statement statement, String stageTableSchema, String stageTableName,
                                    String tableSchema, String intoTableName, TableEntity tableEntity) throws SQLException {
        int[] processCount = {-1, -1};

        final List<ColumnEntity> columnEntityList = tableEntity.getColumnEntityList();
        final String columnString = TableJdbcUtil.getColumnString(columnEntityList);
        final List<ColumnEntity> primaryColumnList = columnEntityList.stream().filter(ColumnEntity::getPrimaryKeyFlg).collect(Collectors.toList());
        Map<String, String> stageInfoMap = stageTableOperationService.edaCollectStageTableDataInfo(statement, intoTableName, stageTableSchema, stageTableName);

        int insertRows = 0;
        int deleteRows = -1;
        try {
            //insert all row
            insertRows = stageTableOperationService.insertStage2TargetTable(statement, stageTableSchema, stageTableName, tableSchema, intoTableName, columnString);
        } catch (SQLException e) {
            if (SQL_STATE_UNIQUE_VIOLATION.equals(e.getSQLState())) {
                deleteRows = stageTableOperationService.deleteEdaTargetTableByStage(statement, stageTableSchema, stageTableName, tableSchema, intoTableName, stageInfoMap, primaryColumnList);
                insertRows = stageTableOperationService.insertStage2TargetTable(statement, stageTableSchema, stageTableName, tableSchema, intoTableName, columnString);
            } else {
                throw e;
            }
        }
        processCount[0] = deleteRows;
        processCount[1] = insertRows;
        return processCount;
    }

    /**
     * 更新字段
     * <p>
     * 1. 先同步数据到临时表
     * 2. join删除目标表数据
     * 3. select 临时表数据 insert 到主表
     * <p>
     * {
     * "update_flg" : "true",
     * "ope_tbl_name": "wpp_fdefect_f",
     * "query_conditions" : {
     * "glass_id" :"C182Q00PAM",
     * "ope_id" :"C5963",
     * "jge_cnt" :"2",
     * "evt_timestamp" :"2018-03-25 10:11:16"
     * },
     * "update_columns":{
     * "etl_timestamp" : "2018-03-31 10:43:38.0",
     * "real_flg" : "N"
     * }
     * }
     *
     * @param connection
     * @param updateList
     * @return
     * @throws SQLException
     */
    @Override
    public int updateProc(Connection connection, Jedis jedis, List<JSONObject> updateList,
                          String tableSchema, String tableName, String stageTableName) throws SQLException {
        Integer effectRows = 0;
        long beginTime = System.currentTimeMillis();
        Statement statement = connection.createStatement();

        try {
            // create stage table
            final TableEntity tableEntity = tableMetaService.getTableMetaInfo(tableSchema, tableName, statement);
            beginTime = LogUtil.printCost(LOG_UPDATE_REAL_FLG_GET_MATA, tableName, " ", beginTime);

            stageTableOperationService.createStageTable(statement, stageTableName, tableEntity);
            beginTime = LogUtil.printCost(LOG_UPDATE_REAL_FLG_STAGE, tableName, " ", beginTime);

            effectRows = stageTableOperationService.updateTargetTableByStageTable(jedis, connection, statement, tableSchema, tableName, stageTableName, tableEntity, updateList);
            beginTime = LogUtil.printCost(LOG_UPDATE_REAL_FLG_DELETE_INSERT, tableName, " ", beginTime);

            return effectRows;
        }catch (SQLException e){
            throw e;
        }finally {
            statement.close();
        }
    }

    /**
     * real_flg更新逻辑
     *
     * @param connection
     * @param jsonObjectList
     * @return
     * @throws SQLException
     */
    @Override
    public int updateProc(Connection connection, List<JSONObject> jsonObjectList) throws SQLException {
        Integer effectRows = 0;
        Statement statement = connection.createStatement();
        try {
            for (int i = 0; i < jsonObjectList.size(); i++) {
                String updateSql = jsonObjectList.get(i).get("real_flg").toString();
                logger.info("update sql {}", updateSql);
                final int j = statement.executeUpdate(updateSql);
                effectRows += j;
            }
            return effectRows;
        }catch (SQLException e){
            throw e;
        }finally {
            statement.close();
        }
    }


    private void writeFile(List<JSONObject> jsonObjectList, TableEntity tableEntity) {
        long beginTime = System.currentTimeMillis();
        String logMsg = "";
        //build file content
        final List<String> fileContent = new ArrayList<>();
        final List<ColumnEntity> columnEntityList = tableEntity.getColumnEntityList();
        Map<String, String> colCameNameMap = new HashMap<>();
        columnEntityList.forEach(c -> colCameNameMap.put(c.getColumnName(), underline2Camel(c.getColumnName(), true)));

        jsonObjectList.forEach(jsonObject -> {
            String line = columnEntityList.stream().map(columnEntity -> {
                String columnName = columnEntity.getColumnName();
                String value = jsonObject.getString(columnName);

                if (StringUtils.isEmpty(value)) {
                    String colCamelName = colCameNameMap.get(columnEntity.getColumnName());
                    if (null == colCamelName) {
                        //todo 理论上不会为null
                        colCamelName = underline2Camel(columnName, true);
                    }
                    value = jsonObject.getString(colCamelName);
                }
                if (!StringUtils.isEmpty(value) && value.contains("^")) {
                    return value.replace("^", "~");
                } else if (!StringUtils.isEmpty(value) && value.contains("\\")) {
                    return value.replace("\\", "/");
                } else {
                    return StringUtils.isEmpty(value) ? "" : value;
                }
                //return StringUtils.isEmpty(value) ? "" : value;

            }).collect(Collectors.joining("^"));

            //line.trim().replaceAll("\uFFFD", "")
            fileContent.add(line);
        });
        final long dataCount = jsonObjectList.size();
        final String tableName = tableEntity.getTableName();
        logMsg = "处理Json数量:" + dataCount;
        LogUtil.printPerformance(LOG_WRITE_FILE_PROC_PROCESS_JSON, tableName, dataCount, beginTime);
        beginTime = LogUtil.printCost(LOG_WRITE_FILE_PROC_PROCESS_JSON, tableName, logMsg, beginTime);

        //write content into file each line
        String compFilePath = String.format("%s/%s.txt", localCsvPath, tableName);
        try {
            Files.write(Paths.get(compFilePath), fileContent, StandardCharsets.UTF_8);
        } catch (IOException e) {
            LogUtil.pringStackTrace(e);
            throw new RuntimeException(e);
        }
        LogUtil.printPerformance(LOG_WRITE_FILE_PROC_WRITE_FILE, tableName, dataCount, beginTime);
        LogUtil.printCost(LOG_WRITE_FILE_PROC_WRITE_FILE, tableName, logMsg, beginTime);
        logger.info("success write {} lines into  template file : {}", fileContent.size(), compFilePath);
    }


    public static String underline2Camel(String line, boolean smallCamel) {
        if (line == null || "".equals(line)) {
            return "";
        }
        StringBuffer sb = new StringBuffer();
        Pattern pattern = Pattern.compile("([A-Za-z\\d]+)(_)?");
        Matcher matcher = pattern.matcher(line);
        while (matcher.find()) {
            String word = matcher.group();
            sb.append(smallCamel && matcher.start() == 0 ? Character.toLowerCase(word.charAt(0)) : Character.toUpperCase(word.charAt(0)));
            int index = word.lastIndexOf('_');
            if (index > 0) {
                sb.append(word.substring(1, index).toLowerCase());
            } else {
                sb.append(word.substring(1).toLowerCase());
            }
        }
        return sb.toString();
    }


    public static void main(String[] args) throws IOException {
        System.out.println(underline2Camel("glass_id", true));
        System.out.println(underline2Camel("glassId", true));
    }












    /**
     * 数据从target表插入stage表
     *
     * @param stmt
     * @param tableSchema
     * @param stageTableName
     * @param tableName
     * @param tableEntity
     * @return
     * @throws SQLException
     */
    public int doInsertStageTableFromTarget(Statement stmt, String stageTableName,String tableSchema,String tableName, TableEntity tableEntity) throws SQLException {
        long lBeginTime = System.currentTimeMillis();
        logger.info(" do Insert Stage Table From Target" );

        // over (partition by
        final List<ColumnEntity> columnEntityList = tableEntity.getColumnEntityList();
        final List<String> columnNameList = columnEntityList.stream().map(ColumnEntity::getColumnName).collect(Collectors.toList());
        final String columnString = columnNameList.stream().collect(Collectors.joining(","));
        List<ColumnEntity> primaryColumnList = columnEntityList.stream().filter(ColumnEntity::getPrimaryKeyFlg).collect(Collectors.toList());
        primaryColumnList = primaryColumnList.stream()
                .filter(entity -> !"logof_timestamp".equals(entity.getColumnName()))
                .filter(entity -> !"logofTimestamp".equals(entity.getColumnName()))
                .collect(Collectors.toList());
        //if (Arrays.asList(edaPdsGlassSummary.split(",")).stream().filter(element -> stageTableName.contains(element)).count() > 0) {
        //    primaryColumnList = primaryColumnList.stream()
        //            .filter(entity -> !"glass_start_time".equals(entity.getColumnName()))
        //            .filter(entity -> !"glassStartTime".equals(entity.getColumnName()))
        //            .collect(Collectors.toList());
        //}

        final String matchString = primaryColumnList.stream().map(ColumnEntity::getColumnName).collect(Collectors.joining(","));

        final String fromColumnString = TableJdbcUtil.getColumnStringWithTableAlias(columnEntityList, "from_table");

        // insert
        StringBuilder insertSqlBuilder = new StringBuilder("insert into ");
        insertSqlBuilder.append(stageTableSchema).append(".").append(stageTableName).append("(").append(columnString).append(")");
        insertSqlBuilder.append(" select ").append(fromColumnString).append(" from ")
                .append(" (select * ")
                .append(" from ").append(tableSchema).append(".").append(tableName)
                .append( " where (").append(matchString) .append(") in(").append(" select distinct ").append(matchString).append(" from ")
                .append(stageTableSchema).append(".").append(stageTableName)
                .append(")")
                .append(")").append(" as from_table");

        final String sql = insertSqlBuilder.toString();
        logger.info("targe insert stage sql : {}", sql);
        int effectRows = stmt.executeUpdate(sql);
        logger.info("executeBatch insert {} rows into stage table {} costs {} millis", effectRows, stageTableName, (System.currentTimeMillis() - lBeginTime));

        return effectRows;
    }
}
