package com.hexinfo.dmpro.sparing.service.impl;

import cn.hutool.core.date.BetweenFormater;
import cn.hutool.core.date.DateTime;
import cn.hutool.core.date.DateUnit;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.io.file.FileAppender;
import cn.hutool.core.io.file.FileReader;
import cn.hutool.core.io.file.FileWriter;
import cn.hutool.core.text.StrSpliter;
import cn.hutool.core.util.ObjectUtil;
import cn.hutool.core.util.StrUtil;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import com.hexinfo.dmpro.common.model.ClusterSparingConf;
import com.hexinfo.dmpro.common.utils.ScanCommonConstants;
import com.hexinfo.dmpro.component.comparison.entity.TableContrastParamEntity;
import com.hexinfo.dmpro.component.comparison.entity.TableContrastResultEntity;
import com.hexinfo.dmpro.component.comparison.enums.TableContrastEnums;
import com.hexinfo.dmpro.component.comparison.service.ITableContrastService;
import com.hexinfo.dmpro.sparing.async.AsyncTableDistcp;
import com.hexinfo.dmpro.sparing.dto.DistCPDTO;
import com.hexinfo.dmpro.sparing.dto.StateDataDTO;
import com.hexinfo.dmpro.sparing.dto.TableDistCPDTO;
import com.hexinfo.dmpro.sparing.dto.WhereScriptDTO;
import com.hexinfo.dmpro.sparing.model.*;
import com.hexinfo.dmpro.sparing.service.*;
import com.hexinfo.dmpro.sparing.util.Logger;
import com.xqfunds.job.core.log.XxlJobLogger;
import liquibase.pro.packaged.S;
import lombok.extern.slf4j.Slf4j;
import org.apache.derby.vti.IFastPath;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Lazy;
import org.springframework.scheduling.annotation.Async;
import org.springframework.scheduling.annotation.AsyncResult;
import org.springframework.stereotype.Service;

import javax.el.ELClass;
import java.io.File;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.stream.Collectors;

/**
 * 按源
 * 异步方法，执行LS命令，获取
 */
@Service
@Slf4j
public class LsFilterSouServiceImpl implements LsFilterSouService {

    @Value("${hdfs.threshold}")
    private int threshold;        //hdfs数量阈值
    @Value("${hdfs.conHour}")
    private int conHour;
    @Value("${hdfs.dbNames}")
    private String dbNames;
    @Value("${hdfs.duHdfsFiles}")
    private String duHdfsFiles;
    @Value("${hdfs.duDbFilePath}")
    private String duDbFilePath;
    @Value("${hdfs.tarPrefix}")
    private String tarPrefix;

    @Lazy
    @Autowired
    private BackupSourceService backupSourceService;
    @Autowired
    private DataFilterService dataFilterService;
    @Autowired
    private DistCPService distCPService;
    @Autowired
    private DataRefreshService dataRefreshService;
    @Autowired
    private ITableContrastService iTableContrastService;
    @Autowired
    private UatSyncLogsService uatSyncLogsService;
    @Autowired
    private HotStandbyLogSourceService hotStandbyLogSourceService;
    @Autowired
    private HotStandbyLogTableService hotStandbyLogTableService;
    @Autowired
    private ImpalaService impalaService;
    @Autowired
    private FutureService futureService;
    @Autowired
    private AsyncTableDistcp asyncTableDistcp;

    // 过滤后的每个表的hdfs相关信息
    private Map<String, List<TableDistCPDTO>> tableDistCPDTOMap = new ConcurrentHashMap<String, List<TableDistCPDTO>>();
    private synchronized void updateTableDistCPDTOMap(String souName,List<TableDistCPDTO> tableDistCPDTOs) {
        tableDistCPDTOMap.put(souName, tableDistCPDTOs);
    }
    @Override
    public synchronized List<TableDistCPDTO> getTableDistCPDTOMap(String souName) {
        return tableDistCPDTOMap.get(souName);
    }

    // ls过滤后，是否还有表同步，running：进行中，continue：继续，heatEnd：结束
    private Map<String, String> lsStatusMap = new ConcurrentHashMap<String, String>();
    @Override
    public synchronized void updateLsStatusMap(String souName,String status) {
        lsStatusMap.put(souName, status);
    }
    @Override
    public String getLsStatus(String souName) {
        return lsStatusMap.get(souName);
    }
    @Override
    public synchronized void delLsStatus(String souName) {
        lsStatusMap.remove(souName);
    }

    @Override
    @Async("threadPoolTaskExecutor")
    public Future<Boolean> allLsFilter(List<TableDistCPDTO> tableDistCPDTOs, TableDistCPDTO tableDistCPDTO,
                                       FileAppender heatlog, FileAppender tableLog,
                                       List<TableDistCPDTO> wholeTableDistCPDTOs) {
        String dbTableName = tableDistCPDTO.getDbTableName();
        try {
            int size = tableDistCPDTO.getHdfsHeatBackups().size();     //hdfs初始数量
            tableLog.append(logTime(dbTableName+"表LS过滤开始--hdfs数量初始："+size));
            //3、增量、全量分类（全量无时间分区字段；可能有其他分区字段，或者无分区字段，只有一个hdfs路径）
            if (tableDistCPDTO.getSyncRange() == null || tableDistCPDTO.getSyncRange() == 0){
                //添加进走全量的表集合
                wholeTableDistCPDTOs.add(tableDistCPDTO);
                TableDistCPDTO tableDistCPDTO1 = copyTableDistCPDTO(tableDistCPDTO);
                //4、全量--获取LS过滤后每个表的hdfs实体类
                dataFilterService.allLsFilter(tableDistCPDTO1,heatlog,tableLog);
                if (ObjectUtil.isEmpty(tableDistCPDTO1.getHdfsHeatBackups())){
                    tableLog.append(logTime("【全量】无符合文件，不进行热备操作  "+dbTableName));
                }else {
                    logLog(heatlog,tableLog,"【全量】hdfs数量初始："+size+"  过滤后hdfs数量:"+tableDistCPDTO1.getHdfsHeatBackups().size()+"  "+dbTableName,"1");
                    tableDistCPDTOs.add(tableDistCPDTO1);
                }
            }else {
                //4、增量--获取LS过滤后每个表的hdfs实体类
                dataFilterService.addLsFilter(tableDistCPDTO,heatlog,tableLog);
                if (ObjectUtil.isEmpty(tableDistCPDTO.getHdfsHeatBackups())){
                    tableLog.append(logTime("【增量】无符合文件，不进行热备操作  "+dbTableName));
                }else {
                    logLog(heatlog,tableLog,"【增量】hdfs数量初始："+size+"  过滤后hdfs数量:"+tableDistCPDTO.getHdfsHeatBackups().size()+"  "+dbTableName,"1");
                    tableDistCPDTOs.add(tableDistCPDTO);
                }
            }
        }catch (Exception e){
            logLog(heatlog,tableLog,dbTableName+"--LS过滤【异常】："+e,"1");
            log.error(dbTableName+"--LS过滤【异常】："+e);
            backupSourceService.updateTaskState(tableDistCPDTO.getSourceDataName(),false);
        }
        tableLog.append(logTime("-------------------------【LS过滤结束】"));
        tableLog.flush();     //刷进日志中
        logLog(heatlog,tableLog,"","2"); //刷进日志中
        return new AsyncResult<>(true);
    }

    /**
     * 浅度复制实体类
     * @return
     */
    private TableDistCPDTO copyTableDistCPDTO(TableDistCPDTO tableDistCPDTO) {
        TableDistCPDTO tableDistCP = new TableDistCPDTO();
        tableDistCP.setSourceDataName(tableDistCPDTO.getSourceDataName());
        tableDistCP.setDbName(tableDistCPDTO.getDbName());
        tableDistCP.setTableName(tableDistCPDTO.getTableName());
        tableDistCP.setDbNameBack(tableDistCPDTO.getDbNameBack());
        tableDistCP.setTableNameBack(tableDistCPDTO.getTableNameBack());
        tableDistCP.setDbNameTar(tableDistCPDTO.getDbNameTar());
        tableDistCP.setTableNameTar(tableDistCPDTO.getTableNameTar());
        tableDistCP.setDate(tableDistCPDTO.getDate());
        tableDistCP.setWhereScriptDTOS(tableDistCPDTO.getWhereScriptDTOS());
        tableDistCP.setPkeyName(tableDistCPDTO.getPkeyName());
        tableDistCP.setSyncRange(tableDistCPDTO.getSyncRange());
        tableDistCP.setHdfsFileSize(tableDistCPDTO.getHdfsFileSize());
        tableDistCP.setLogPath(tableDistCPDTO.getLogPath());
        tableDistCP.setTableHdfsPath(tableDistCPDTO.getTableHdfsPath());
        tableDistCP.setHdfsHeatBackups(tableDistCPDTO.getHdfsHeatBackups());
        tableDistCP.setTbHdfsNum(tableDistCPDTO.getTbHdfsNum());
        tableDistCP.setTableId(tableDistCPDTO.getTableId());
        tableDistCP.setUatSyncLogsTask(tableDistCPDTO.getUatSyncLogsTask());
        return tableDistCP;
    }

    @Override
    public String allLsFilterWgToUat(TableDistCPDTO tableDistCPDTO) {
        String str = "";
        Set<String> tableList = new HashSet<>();
        Set<String> tableListUnc = new HashSet<>();
        for (HdfsHeatBackup hdfsHeatBackup : tableDistCPDTO.getHdfsHeatBackups()){
            UatSyncLogs uatSyncLogs = new UatSyncLogs();
            //当前开始时间
            Date staDate = DateUtil.date();
            uatSyncLogs.setTaskId(tableDistCPDTO.getUatSyncLogsTask().getId());
            uatSyncLogs.setTaskName(tableDistCPDTO.getUatSyncLogsTask().getTaskName());
            uatSyncLogs.setSourceName(tableDistCPDTO.getSourceDataName());
            uatSyncLogs.setLibraryTableName(hdfsHeatBackup.getTargetName());
            uatSyncLogs.setTaskState(ScanCommonConstants.ClusterName.JXZ.value);
            uatSyncLogsService.save(uatSyncLogs);
            DistCPDTO distCPDTO = new DistCPDTO(
                    tableDistCPDTO.getSourceDataName(),
                    hdfsHeatBackup,
                    tableDistCPDTO.getHdfsHeatBackups().size(),
                    null,
                    null);
            try {
                //第一步：热备到UAT
                log.warn("======================热备到UAT");
                String i = distCPService.execShellWgToUAT(distCPDTO,uatSyncLogs);
                //当前结束时间
                Date endDate = DateUtil.date();
                uatSyncLogs.setStandbyTime(DateUtil.formatBetween(staDate,endDate, BetweenFormater.Level.SECOND));
                if(i.contains("失败")){
                    uatSyncLogs.setTaskState(ScanCommonConstants.ClusterName.SB.value);
                    str = ScanCommonConstants.ClusterName.SB.value;
                    uatSyncLogsService.updateById(uatSyncLogs);
                    continue;
                }
                uatSyncLogs.setTaskState(ScanCommonConstants.ClusterName.YWC.value);
                uatSyncLogsService.updateById(uatSyncLogs);
                tableList.add(uatSyncLogs.getLibraryTableName());
                //非常规路径不做建表操作
                String unc = "/user/hive/warehouse/"+hdfsHeatBackup.getSourceDbName()+".db/";
                if(!hdfsHeatBackup.getSourceAddress().contains(unc)){
                    tableListUnc.add(uatSyncLogs.getLibraryTableName());
                }
            } catch (Exception e) {
                if(StrUtil.isNotBlank(uatSyncLogs.getLog())&&StrUtil.isNotBlank(e.getMessage())){
                    uatSyncLogs.setLog(uatSyncLogs.getLog()+"\n["+DateUtil.now()+"]"+e.getMessage());
                }
                uatSyncLogs.setTaskState(ScanCommonConstants.ClusterName.SB.value);
                str = ScanCommonConstants.ClusterName.SB.value;
                uatSyncLogsService.updateById(uatSyncLogs);
            }
        }
        if(ObjectUtil.isNotEmpty(tableList)){
            //建表
            UatSyncLogs uatSyncLogsJs = new UatSyncLogs();
            //当前开始时间
            Date staDateJs = DateUtil.date();
            uatSyncLogsJs.setTaskId(tableDistCPDTO.getUatSyncLogsTask().getId());
            uatSyncLogsJs.setTaskName(tableDistCPDTO.getUatSyncLogsTask().getTaskName());
            uatSyncLogsJs.setSourceName(tableDistCPDTO.getSourceDataName());
            uatSyncLogsJs.setLibraryTableName("【建表、刷表操作】");
            uatSyncLogsJs.setLog("["+DateUtil.now()+"]"+"【自动化建表、刷表】");
            uatSyncLogsJs.setTaskState(ScanCommonConstants.ClusterName.JXZ.value);
            uatSyncLogsService.save(uatSyncLogsJs);
            int x = 0;
            for (String table : tableList) {
                x++;
                try {
                    String createSql = "create table if not exists "+ table +" like "+ table.replaceAll("uat_","");
                    String createErr = "";
                    //表为非常规表
                    if(tableListUnc.contains(table)){
                        uatSyncLogsJs.setLog(uatSyncLogsJs.getLog()+"\n["+DateUtil.now()+"]【Location指定路径的表不做建表操作，请人工建表！】");
                        createErr = "非常规表";
                    }else{
                        uatSyncLogsJs.setLog(uatSyncLogsJs.getLog()+"\n["+DateUtil.now()+"]【建表sql】"+createSql);
                        createErr = impalaService.create(createSql);
                    }

                    //当前结束时间
                    Date endDate = DateUtil.date();
                    uatSyncLogsJs.setStandbyTime(DateUtil.formatBetween(staDateJs,endDate, BetweenFormater.Level.SECOND));
                    if(createErr.contains("异常")){
                        uatSyncLogsJs.setTaskState(ScanCommonConstants.ClusterName.SB.value);
                        uatSyncLogsJs.setLog(uatSyncLogsJs.getLog()+"\n["+DateUtil.now()+"]"+createErr);
                        str = ScanCommonConstants.ClusterName.SB.value;
                        uatSyncLogsService.updateById(uatSyncLogsJs);
                        continue;
                    }else{
                        //表不为非常规表且建表成功 才显示建表成功
                        if(!tableListUnc.contains(table)) {
                            uatSyncLogsJs.setLog(uatSyncLogsJs.getLog() + "\n[" + DateUtil.now() + "]" + "【建表成功】" + table);
                        }
                        uatSyncLogsService.updateById(uatSyncLogsJs);
                        //第二步: 表刷新
                        log.warn("======================表刷新");
                        String state = dataRefreshService.souTableIsFinishWgToUat(table,uatSyncLogsJs);
                        //当前结束时间
                        Date endDateJs = DateUtil.date();
                        uatSyncLogsJs.setStandbyTime(DateUtil.formatBetween(staDateJs,endDateJs, BetweenFormater.Level.SECOND));
                        if(!state.contains("异常")){
                            //最后一个才会去修改已完成状态
                            if(x==tableList.size()) {
                                uatSyncLogsJs.setTaskState(ScanCommonConstants.ClusterName.YWC.value);
                            }
                        }else{
                            uatSyncLogsJs.setTaskState(ScanCommonConstants.ClusterName.SB.value);
                            str = ScanCommonConstants.ClusterName.SB.value;
                        }
                        uatSyncLogsService.updateById(uatSyncLogsJs);
                    }
                } catch (Exception e) {
                    //当前结束时间
                    Date endDate = DateUtil.date();
                    uatSyncLogsJs.setStandbyTime(DateUtil.formatBetween(staDateJs,endDate, BetweenFormater.Level.SECOND));
                    if(StrUtil.isNotBlank(uatSyncLogsJs.getLog())&&StrUtil.isNotBlank(e.getMessage())){
                        uatSyncLogsJs.setLog(uatSyncLogsJs.getLog()+"\n["+DateUtil.now()+"]"+e.getMessage());
                    }
                    uatSyncLogsJs.setTaskState(ScanCommonConstants.ClusterName.SB.value);
                    str = ScanCommonConstants.ClusterName.SB.value;
                    uatSyncLogsService.updateById(uatSyncLogsJs);
                }
            }
        }
        if(StrUtil.isNotBlank(str)&&ScanCommonConstants.ClusterName.SB.value.equals(str)){
            return str;
        }else{
            str = "成功";
            return str;
        }
    }

    /**
     * 数据备份同步
     */
    @Override
    public List<String> backupData(List<TableDistCPDTO> tableDistCPDTOs,String souName, FileAppender heatlog,String logId,
                           List<TableDistCPDTO> wholeTableDistCPDTOs, Set<String> syncRuleConfSet) {
        int sizesStart = tableDistCPDTOs.size();   //过滤后表数量
        //2、有无源分类(1：有源分区字段，2：无源分区字段（时间窗口）),时间窗口超过阈值走表级全量
        if ("无源".equals(souName)){
            heatlog.append(logTime("--------------------------------【时间窗口阈值判断--阈值:"+threshold+"】----------------------------------------"));
            timeBackup(tableDistCPDTOs,heatlog);
        }

        int hdfsSize1 = 0;
        for (TableDistCPDTO tableDistCPDTO : tableDistCPDTOs) {
            hdfsSize1 += tableDistCPDTO.getHdfsHeatBackups().size();
        }
        heatlog.append(logTime("【LS初步过滤后表数量："+sizesStart+"--hdfs总数量："+hdfsSize1+"】"));

        heatlog.append(logTime("--------------------------------【表数据变更判断开始】----------------------------------------"));
        if (ObjectUtil.isNotEmpty(syncRuleConfSet)){
            heatlog.append(logTime("源分区规则："+syncRuleConfSet.toString()));
        }else {
            heatlog.append(logTime("源分区规则：无"));
        }
        List<TableDistCPDTO> finalTableDistCPDTOs = new ArrayList<>();
        if (ObjectUtil.isEmpty(wholeTableDistCPDTOs)){
            heatlog.append(logTime("规则过滤后没有表同步"));
        }else {
            //sh命令hdfs dfs -du判断走全量的表是否做了删除
            List<TableDistCPDTO> tableDistCPDTOS = dataChange(wholeTableDistCPDTOs, heatlog, souName, syncRuleConfSet);
            finalTableDistCPDTOs.addAll(tableDistCPDTOS);
            Map<String, TableDistCPDTO> map = tableDistCPDTOS.stream()
                    .collect(Collectors.toMap(TableDistCPDTO::getDbTableName, a -> a, (a1, a2) -> a1));
            //获取LS表集合在数据变更表集合中没有的表实体
            for (TableDistCPDTO tableDistCPDTO : tableDistCPDTOs) {
                TableDistCPDTO tableDistCPDTO1 = map.get(tableDistCPDTO.getDbTableName());
                if (ObjectUtil.isEmpty(tableDistCPDTO1)){
                    finalTableDistCPDTOs.add(tableDistCPDTO);
                }
            }
        }
        heatlog.append(logTime("--------------------------------【表数据变更判断结束】----------------------------------------"));

        int sizes = finalTableDistCPDTOs.size();   //全部过滤后表数量
        int hdfsSize = 0;
        //过滤后HDFS路径的总数量
        for (TableDistCPDTO tableDistCPDTO : finalTableDistCPDTOs) {
            hdfsSize += tableDistCPDTO.getHdfsHeatBackups().size();
            //添加热备表级日志分析表记录-- 待执行状态
            String tableId = hotStandbyLogTableService.addHotStandbyLogTable(tableDistCPDTO, logId);
            tableDistCPDTO.setTableId(tableId);
        }
        heatlog.append(logTime("【全源的表LS过滤完成，过滤后表数量："+sizes+"--过滤后hdfs总数量："+hdfsSize+"】"));
        heatlog.flush(); //刷进日志中
        hotStandbyLogSourceService.upHotLog(logId, DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss")+"\n");
        //没有表符合同步要求，直接退出
        if(sizes == 0){
            heatlog.append(logTime("没有表符合同步要求，直接退出"));
            heatlog.flush(); //刷进日志中
            updateLsStatusMap(souName, ScanCommonConstants.ClusterName.HEATEND.value);
            return new ArrayList<>();
        }
        //获取过滤后的每个表的hdfs相关信息List集合
        updateTableDistCPDTOMap(souName,finalTableDistCPDTOs);
        updateLsStatusMap(souName, ScanCommonConstants.ClusterName.CONTINUE.value);

        //日志分析记录修改--用时详情修改，表数量/HDFS数量
        hotStandbyLogSourceService.upSyncTableHdfsNumber(logId,sizes+"/"+hdfsSize,
                "【数据同步】"+ DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss")+"--");

        heatlog.append(logTime("--------------------------------② 数据同步----------------------------------------"));
        Map<String,Future<Boolean>> futureMap = new HashMap<>();
        for (TableDistCPDTO distCPDTO : finalTableDistCPDTOs) {
            //表日志文件缓存
            FileAppender tableLog1 = new FileAppender(new File(distCPDTO.getLogPath()), 100, true);
            tableLog1.append(logTime("-------------------------【数据同步开始】"));
            tableLog1.flush();
            try{
                //表级日志分析记录修改--执行状态修改
                hotStandbyLogTableService.upByIdExec(distCPDTO.getTableId(),ScanCommonConstants.ClusterName.JXZ.value,
                        "【数据同步】"+DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss")+"--");
                //3、增量、全量分类（全量无时间分区字段；可能有其他分区字段，或者无分区字段，只有一个hdfs路径）
                Future<Boolean> booleanFuture = asyncTableDistcp.allQuantity(distCPDTO, tableLog1, heatlog, sizes);
                futureMap.put(distCPDTO.getDbTableName(),booleanFuture);
            }catch (Exception e){
                log.error(distCPDTO.getDbTableName()+"新增表级日志分析记录异常："+e);
                logLog(heatlog,tableLog1,distCPDTO.getDbTableName()+"新增表级日志分析记录异常："+e,"1");
                heatlog.flush(); //刷进日志中
                backupSourceService.updateTaskState(souName,false);
            }
        }
        //判断全部表是否完成
        StateDataDTO stateDataDTO = futureService.futureStatusMap(futureMap, heatlog,"数据同步");
        heatlog.append(logTime("【全部表同步完成】"));
        heatlog.flush();
        return stateDataDTO.getDataList();
    }

    /**
     * sh命令hdfs dfs -du判断走全量的表是否做了删除
     * @return
     */
    private List<TableDistCPDTO> dataChange(List<TableDistCPDTO> wholeTableDistCPDTOs,
                                            FileAppender heatlog, String souName, Set<String> syncRuleConfSet) {
        try{
            TableDistCPDTO tableDistCPDTO = wholeTableDistCPDTOs.get(0);
            HdfsHeatBackup hdfsHeatBackup = tableDistCPDTO.getHdfsHeatBackups().get(0);
            //获取  库.db前的hdfs路径
            String targetDbPath = StrSpliter.split(hdfsHeatBackup.getTargetAddress(), "/"+hdfsHeatBackup.getTargetTableName(),true, true)
                    .get(0);
            String targetHdfsPath = targetDbPath.substring(0, targetDbPath.lastIndexOf("/")+1);;
            String sourceDbPath = tableDistCPDTO.getTableHdfsPath().substring(0, tableDistCPDTO.getTableHdfsPath().lastIndexOf("/"));
            String sourceHdfsPath = sourceDbPath.substring(0, sourceDbPath.lastIndexOf("/")+1);
            String[] splits = dbNames.split(",");
            Map<String, String> sourceMap = new HashMap<>();
            Map<String, String> targetMap = new HashMap<>();
            for (String split : splits) {
                String targetDb = tarPrefix+split;
                String sourceFilePath = duDbFilePath + souName +"_"+ split + "_source.txt";
                String targetFilePath = duDbFilePath + souName +"_"+ targetDb + "_target.txt";
                String sourceShell = duHdfsFiles+" "+sourceHdfsPath + split+".db"+" "+sourceFilePath; //shell命令
                String targetShell = duHdfsFiles+" "+targetHdfsPath + targetDb+".db"+" "+targetFilePath; //shell命令
                int sourceCode = execShellCode(sourceShell,heatlog);
                if (sourceCode == 999){
                    backupSourceService.updateTaskState(souName,false);
                    return new ArrayList<>();
                }else {
                    int targetCode = execShellCode(targetShell,heatlog);
                    if (targetCode == 999){
                        backupSourceService.updateTaskState(souName,false);
                        return new ArrayList<>();
                    }else {
                        sourceMap.putAll(readTimeFile(sourceFilePath, split));
                        targetMap.putAll(readTimeFile(targetFilePath, targetDb));
                    }
                }
            }
            // 全量 获取删除了数据的TableDistCPDTO集合
            List<TableDistCPDTO> deleteTableDistCPs = getDeleteTableDistCP(sourceMap, targetMap, wholeTableDistCPDTOs,
                    heatlog, souName, syncRuleConfSet);
            // 增量 获取删除了数据的TableDistCPDTO集合
//            List<TableDistCPDTO> tableDistCPDTOS = dateDataChange(sourceMap, targetMap, dateTableDistCPDTOs,
//                    heatlog, souName, syncRuleConfSet);
//            dataChangeTables.addAll(tableDistCPDTOS);
            return deleteTableDistCPs;
        }catch (Exception e){
            log.error("全量表数据变更判断异常："+e.toString());
            heatlog.append(logTime("全量表数据变更判断异常："+e));
            heatlog.flush();
            backupSourceService.updateTaskState(souName,false);
            return new ArrayList<>();
        }
    }

    /**
     * 没用做数据更新，做了删表、删源分区、删日期分区（同步删除做了）
     * @return
     */
    private List<TableDistCPDTO> dateDataChange(Map<String, String> sourceMap,Map<String, String> targetMap,
                                                List<TableDistCPDTO> dateTableDistCPDTOs, FileAppender heatlog,
                                                String souName, Set<String> syncRuleConfSet) {
        List<TableDistCPDTO> tableDistCPDTOS = new ArrayList<>();
        for (TableDistCPDTO dateTableDistCPDTO : dateTableDistCPDTOs) {
            //表日志文件缓存
            FileAppender tableLog = new FileAppender(new File(dateTableDistCPDTO.getLogPath()), 100, true);
            tableLog.append(logTime("-------------------------【增量表数据变更判断开始】"));
            try{
                TableDistCPDTO tableDistCPDTO = getDateTableDistCPDTO(sourceMap,targetMap,dateTableDistCPDTO,heatlog,tableLog,syncRuleConfSet);
                if (ObjectUtil.isNotEmpty(tableDistCPDTO)){
                    tableDistCPDTOS.add(tableDistCPDTO);
                }
            }catch (Exception e){
                log.error("获取是否需要同步的增量表 {} 异常：{}",dateTableDistCPDTO.getDbTableName(),e.toString());
                logLog(heatlog,tableLog,"获取是否需要同步的增量表 "+dateTableDistCPDTO.getDbTableName()+"异常："+e,"1");
//                Logger.log("获取是否需要同步的增量表 "+dateTableDistCPDTO.getDbTableName()+"异常："+e);
                backupSourceService.updateTaskState(souName,false);
             }
            tableLog.append(logTime("-------------------------【增量表数据变更判断结束】"));
            tableLog.flush();
            heatlog.flush();
        }
        return tableDistCPDTOS;
    }

    /**
     * 增量判断
     * @return
     */
    private TableDistCPDTO getDateTableDistCPDTO(Map<String, String> sourceMap,Map<String, String> targetMap,
                                                   TableDistCPDTO dateTableDistCPDTO,FileAppender heatlog,
                                                   FileAppender tableLog, Set<String> syncRuleConfSet) {
        TableDistCPDTO tableDistCPDTO = null;
        String sourceSize = sourceMap.get(dateTableDistCPDTO.getDbTableName());
        String targetSize = targetMap.get(dateTableDistCPDTO.getTargetName());
        tableLog.append(logTime("源表："+dateTableDistCPDTO.getDbTableName()+"，du总大小："+sourceSize));
        tableLog.append(logTime("目标表："+dateTableDistCPDTO.getTargetName()+"，du总大小："+targetSize));
        //判断表是否存在
        if (StrUtil.isBlank(sourceSize) || StrUtil.isBlank(targetSize)){
            if (StrUtil.isBlank(sourceSize)){
                logLog(heatlog,tableLog,"【源表："+dateTableDistCPDTO.getDbTableName()+"的表hdfs路径不存在】","1");
            }
            if (StrUtil.isBlank(targetSize)){
                logLog(heatlog,tableLog,"【目标表："+dateTableDistCPDTO.getTargetName()+"的表hdfs路径不存在】","1");
            }
            if ((StrUtil.isBlank(sourceSize) && StrUtil.isNotBlank(targetSize)) || (StrUtil.isNotBlank(sourceSize) && StrUtil.isBlank(targetSize))){
                backupSourceService.updateTaskState(dateTableDistCPDTO.getSourceDataName(),false);
            }
            return tableDistCPDTO;
        }else {
            //判断增量表的hdfs路径是否是到表的
            List<HdfsHeatBackup> hdfsHeatBackups = dateTableDistCPDTO.getHdfsHeatBackups();
            HdfsHeatBackup hdfsHeatBackup = hdfsHeatBackups.get(0);
            String sourceAddress = hdfsHeatBackup.getSourceAddress();
            String targetAddress = hdfsHeatBackup.getTargetAddress();
            int count = StrUtil.count(sourceAddress, "/");
            if (count <= 7 && (!sourceSize.equals(targetSize))){
                tableDistCPDTO = dateTableDistCPDTO;
                logLog(heatlog,tableLog,"【增量】表需要同步："+dateTableDistCPDTO.getDbTableName(),"1");
            }else if (ObjectUtil.isNotEmpty(syncRuleConfSet)){
                String sourceTableAddress = dateTableDistCPDTO.getTableHdfsPath();
                //目标集群到表的hdfs路径
                String targetTableAddress = StrSpliter.split(hdfsHeatBackup.getTargetAddress(), hdfsHeatBackup.getTargetTableName(),true, true)
                        .get(0) + hdfsHeatBackup.getTargetTableName();
                String sourceFilePath = duDbFilePath + dateTableDistCPDTO.getName() + "_source.txt";
                String targetFilePath = duDbFilePath + dateTableDistCPDTO.getTarName() + "_target.txt";
                String sourceShell = duHdfsFiles+" "+sourceTableAddress+" "+sourceFilePath; //shell命令
                String targetShell = duHdfsFiles+" "+targetTableAddress+" "+targetFilePath; //shell命令
                int sourceCode = execShellCodeTable(sourceShell,heatlog,tableLog);
                if (sourceCode == 999){
                    backupSourceService.updateTaskState(dateTableDistCPDTO.getSourceDataName(),false);
                }else {
                    int targetCode = execShellCodeTable(targetShell,heatlog,tableLog);
                    if (targetCode == 999){
                        backupSourceService.updateTaskState(dateTableDistCPDTO.getSourceDataName(),false);
                    }else {
                        Map<String, String> souMap = readTimeFileTable(sourceFilePath, syncRuleConfSet);
                        Map<String, String> tarMap = readTimeFileTable(targetFilePath, syncRuleConfSet);
                        //判断目标表的源分区在源表中是否都存在
                        boolean containsAllKeys = souMap.keySet().containsAll(tarMap.keySet());
                        if (!containsAllKeys){
                            logLog(heatlog,tableLog,"【增量】表需要同步："+dateTableDistCPDTO.getDbTableName()+"（源表的源分区被删除）","1");
                            tableDistCPDTO = tableSync(hdfsHeatBackup,sourceTableAddress,targetTableAddress,
                                    dateTableDistCPDTO,tableLog);
                        }
                    }
                }
            }
        }
        return tableDistCPDTO;
    }

    /**
     * 执行shell脚本，只返回执行结果
     * @param pathOrCommand 脚本路径或者命令
     * @return
     */
    private int execShellCode(String pathOrCommand, FileAppender heatlog) {
        int exitValue = 0;
        try {
            Process ps = Runtime.getRuntime().exec(pathOrCommand);
            exitValue = ps.waitFor();
            heatlog.append(logTime("【du命令】"+pathOrCommand+"---执行结果："+exitValue+"（成功）"));
        } catch (Exception e) {
            log.error("执行shell脚本失败命令：{}，执行shell脚本失败报错：{}",pathOrCommand,e);
            heatlog.append(logTime("【du命令】"+pathOrCommand+"---执行异常："+e));
//            Logger.log("【du命令】"+pathOrCommand+"---执行异常："+e);
            return 999;
        }
        return exitValue;
    }

    /**
     * 获取库下面所有表名及表数据文件大小
     * filePath du文件读取路径
     * dbName 库名
     * @return
     */
    private Map<String,String> readTimeFile(String filePath,String dbName) {
        String db = dbName+".db";
        Map<String,String> map = new HashMap<>();
        FileReader fileReader = new FileReader(filePath);
        List<String> hdfsPaths = fileReader.readLines();
        for (String hdfsPath : hdfsPaths) {
            if (StrUtil.isNotBlank(hdfsPath)){
                map.put(dbName+"."+hdfsPath.split(db+"/")[1], hdfsPath.substring(0, hdfsPath.indexOf(" ")));
            }
        }
        return map;
    }

    /**
     * 获取这个源内的全量表不做热备并且源集群进行了删除操作的表数据信息
     * sourceMap 源集群所有库下的 库.表=文件大小 map集合
     * targetMap 目标集群所有库下的 库.表=文件大小 map集合
     * wholeTableDistCPDTOs 源下未进行热备操作的全量表信息
     * @return
     */
    private List<TableDistCPDTO> getDeleteTableDistCP(Map<String, String> sourceMap,Map<String, String> targetMap,
                                                      List<TableDistCPDTO> wholeTableDistCPDTOs,FileAppender heatlog,
                                                      String souName, Set<String> syncRuleConfSet) {
        List<TableDistCPDTO> tableDistCPDTOS = new ArrayList<>();
        for (TableDistCPDTO wholeTableDistCPDTO : wholeTableDistCPDTOs) {
            //表日志文件缓存
            FileAppender tableLog = new FileAppender(new File(wholeTableDistCPDTO.getLogPath()), 100, true);
            tableLog.append(logTime("-------------------------【全量表数据变更判断开始】"));
            try{
                TableDistCPDTO tableDistCPDTO = getDeleteTableDistCPDTO(sourceMap,targetMap,wholeTableDistCPDTO,heatlog,tableLog,syncRuleConfSet);
                if (ObjectUtil.isNotEmpty(tableDistCPDTO)){
                    tableDistCPDTOS.add(tableDistCPDTO);
                }
            }catch (Exception e){
                log.error("获取是否需要同步的全量表 {} 异常：{}",wholeTableDistCPDTO.getDbTableName(),e.toString());
                logLog(heatlog,tableLog,"获取是否需要同步的全量表 "+wholeTableDistCPDTO.getDbTableName()+"异常："+e,"1");
//                Logger.log("获取是否需要同步的全量表 "+wholeTableDistCPDTO.getDbTableName()+"异常："+e);
                backupSourceService.updateTaskState(souName,false);
            }
            tableLog.append(logTime("-------------------------【全量表数据变更判断结束】"));
            tableLog.flush();
            heatlog.flush();
        }
        return tableDistCPDTOS;
    }

    /**
     * 获取这个源内的全量表不做热备并且源集群进行了删除操作的表数据信息
     * @return
     */
    private TableDistCPDTO getDeleteTableDistCPDTO(Map<String, String> sourceMap,Map<String, String> targetMap,
                                                   TableDistCPDTO wholeTableDistCPDTO,FileAppender heatlog,
                                                   FileAppender tableLog, Set<String> syncRuleConfSet) {
        TableDistCPDTO tableDistCPDTO = null;
        HdfsHeatBackup hdfsHeatBackup = wholeTableDistCPDTO.getHdfsHeatBackups().get(0);
        int size = wholeTableDistCPDTO.getHdfsHeatBackups().size();
        String sourceSize = sourceMap.get(wholeTableDistCPDTO.getDbTableName());
        String targetSize = targetMap.get(wholeTableDistCPDTO.getTargetName());
        tableLog.append(logTime("源表："+wholeTableDistCPDTO.getDbTableName()+"，du总大小："+sourceSize));
        tableLog.append(logTime("目标表："+wholeTableDistCPDTO.getTargetName()+"，du总大小："+targetSize));
        //判断表是否存在
        if (StrUtil.isBlank(sourceSize) || StrUtil.isBlank(targetSize)){
            if (StrUtil.isBlank(sourceSize)){
                logLog(heatlog,tableLog,"【源表："+wholeTableDistCPDTO.getDbTableName()+"的表hdfs路径不存在】","1");
            }
            if (StrUtil.isBlank(targetSize)){
                logLog(heatlog,tableLog,"【目标表："+wholeTableDistCPDTO.getTargetName()+"的表hdfs路径不存在】","1");
            }
            if ((StrUtil.isBlank(sourceSize) && StrUtil.isNotBlank(targetSize)) || (StrUtil.isNotBlank(sourceSize) && StrUtil.isBlank(targetSize))){
                backupSourceService.updateTaskState(wholeTableDistCPDTO.getSourceDataName(),false);
            }
            return tableDistCPDTO;
        }
        //当表数据大小不一致  或者  同步的hdfs路径大于1
        if (!sourceSize.equals(targetSize) || size > 1){
            String sourceAddress = wholeTableDistCPDTO.getHdfsHeatBackups().get(0).getSourceAddress();
            String targetAddress = wholeTableDistCPDTO.getHdfsHeatBackups().get(0).getTargetAddress();
            int count = StrUtil.count(sourceAddress, "/");
            if (count > 7){
                String sourceTableAddress = wholeTableDistCPDTO.getTableHdfsPath();
                //目标集群到表的hdfs路径
                String targetTableAddress = StrSpliter.split(hdfsHeatBackup.getTargetAddress(), hdfsHeatBackup.getTargetTableName(),true, true)
                        .get(0) + hdfsHeatBackup.getTargetTableName();
                String sourceFilePath = duDbFilePath + wholeTableDistCPDTO.getName() + "_source.txt";
                String targetFilePath = duDbFilePath + wholeTableDistCPDTO.getTarName() + "_target.txt";
                String sourceShell = duHdfsFiles+" "+sourceTableAddress+" "+sourceFilePath; //shell命令
                String targetShell = duHdfsFiles+" "+targetTableAddress+" "+targetFilePath; //shell命令
                int sourceCode = execShellCodeTable(sourceShell,heatlog,tableLog);
                if (sourceCode == 999){
                    backupSourceService.updateTaskState(wholeTableDistCPDTO.getSourceDataName(),false);
                }else {
                    int targetCode = execShellCodeTable(targetShell,heatlog,tableLog);
                    if (targetCode == 999){
                        backupSourceService.updateTaskState(wholeTableDistCPDTO.getSourceDataName(),false);
                    }else {
                        Map<String, String> souMap = readTimeFileTable(sourceFilePath, syncRuleConfSet);
                        Map<String, String> tarMap = readTimeFileTable(targetFilePath, syncRuleConfSet);
                        //判断目标表的源分区在源表中是否都存在
                        boolean containsAllKeys = souMap.keySet().containsAll(tarMap.keySet());
                        if (!containsAllKeys){
                            logLog(heatlog,tableLog,"【全量】表需要同步："+wholeTableDistCPDTO.getDbTableName()+"（源表的源分区被删除）","1");
                            tableDistCPDTO = tableSync(hdfsHeatBackup,sourceTableAddress,targetTableAddress,
                                    wholeTableDistCPDTO,tableLog);
                        }else {
                            //数据量是否一致
                            for (String key : souMap.keySet()) {
                                String souSize = souMap.get(key);
                                String tarSize = tarMap.get(key);
                                tableLog.append(logTime("源分区："+key+"，源du总大小："+souSize+"，目标du总大小："+tarSize));
                                if(!souSize.equals(tarSize)){
                                    tableDistCPDTO = wholeTableDistCPDTO;
                                }
                            }
                            if (ObjectUtil.isNotEmpty(tableDistCPDTO)){
                                logLog(heatlog,tableLog,"【全量】表需要同步："+wholeTableDistCPDTO.getDbTableName(),"1");
                            }
                        }
                    }
                }
            }else {
                tableDistCPDTO = wholeTableDistCPDTO;
                logLog(heatlog,tableLog,"【全量】表需要同步："+wholeTableDistCPDTO.getDbTableName(),"1");
            }
        }
        return tableDistCPDTO;
    }

    /**
     * 改为表级同步
     */
    private TableDistCPDTO tableSync(HdfsHeatBackup hdfsHeatBackup,String sourceTableAddress,String targetTableAddress,
                                     TableDistCPDTO wholeTableDistCPDTO, FileAppender tableLog) {
        //只保留一个到表的hdfs热备对象
        List<HdfsHeatBackup> hdfsHeatBackups = new ArrayList<>();
        hdfsHeatBackup.setSourceAddress(sourceTableAddress);
        hdfsHeatBackup.setTargetAddress(targetTableAddress);
        hdfsHeatBackups.add(hdfsHeatBackup);
        wholeTableDistCPDTO.setHdfsHeatBackups(hdfsHeatBackups);
        tableLog.append(logTime("源hdfs同步路径变更为："+sourceTableAddress));
        tableLog.append(logTime("目标hdfs同步路径变更为："+targetTableAddress));
        return wholeTableDistCPDTO;
    }

    /**
     * 执行shell脚本，只返回执行结果
     * @param pathOrCommand 脚本路径或者命令
     * @return
     */
    private int execShellCodeTable(String pathOrCommand, FileAppender heatlog, FileAppender tableLog) {
        int exitValue = 0;
        try {
            Process ps = Runtime.getRuntime().exec(pathOrCommand);
            exitValue = ps.waitFor();
            tableLog.append(logTime("【du命令】"+pathOrCommand+"---执行结果："+exitValue+"（成功）"));
        } catch (Exception e) {
            log.error("执行shell脚本失败命令：{}，执行shell脚本失败报错：{}",pathOrCommand,e);
            logLog(heatlog,tableLog,"【du命令】"+pathOrCommand+"---执行异常："+e,"1");
//            Logger.log("【du命令】"+pathOrCommand+"---执行异常："+e);
            return 999;
        }
        return exitValue;
    }

    /**
     * 获取表下面所有分区大小
     * filePath du文件读取路径
     * dbName 库名
     * @return
     */
    private Map<String,String> readTimeFileTable(String filePath, Set<String> syncRuleConfSet) {
        Map<String,String> map = new HashMap<>();
        FileReader fileReader = new FileReader(filePath);
        List<String> hdfsPaths = fileReader.readLines();
        for (String yncRuleConf : syncRuleConfSet) {
            for (String hdfsPath : hdfsPaths) {
                if (StrUtil.isNotBlank(hdfsPath)){
                    boolean result = hdfsPath.contains(yncRuleConf);
                    if (result){
                        map.put(yncRuleConf,hdfsPath.substring(0, hdfsPath.indexOf(" ")));
                        break;
                    }
                }
            }
        }
        return map;
    }

    /**
     * 时间窗口--阈值判断
     */
    private void timeBackup(List<TableDistCPDTO> tableDistCPDTOs,FileAppender heatlog) {
        for (TableDistCPDTO tableDistCPDTO : tableDistCPDTOs) {
            //表日志文件缓存
            FileAppender tableLog1 = new FileAppender(new File(tableDistCPDTO.getLogPath()), 100, true);
            tableLog1.append(logTime("-------------------------【时间窗口阈值判断开始】"));
            tableLog1.append(logTime("阈值:"+threshold));
            int size = tableDistCPDTO.getHdfsHeatBackups().size();
            List<HdfsHeatBackup> hdfsHeatBackups = new ArrayList<HdfsHeatBackup>();
            HdfsHeatBackup hdfsHeatBackup = tableDistCPDTO.getHdfsHeatBackups().get(0);
            if (size > threshold){
                logLog(heatlog,tableLog1,tableDistCPDTO.getDbTableName()+"LS过滤后表hdfs数量："+size+"【超过阈值--走表级全量】","1");
//                Logger.log(tableDistCPDTO.getDbTableName()+"LS过滤后表hdfs数量："+size+"【超过阈值--走表级全量】");
                String souTableHdfsPath = tableDistCPDTO.getTableHdfsPath();
                String tarTableHdfsPath = StrSpliter.split(hdfsHeatBackup.getTargetAddress(), hdfsHeatBackup.getTargetTableName(),true, true)
                        .get(0) + hdfsHeatBackup.getTargetTableName();
                hdfsHeatBackup.setSourceAddress(souTableHdfsPath);
                hdfsHeatBackup.setTargetAddress(tarTableHdfsPath);
                logLog(heatlog,tableLog1,"源hdfs路径："+souTableHdfsPath+"--目标hdfs路径："+tarTableHdfsPath,"1");
//                Logger.log("源hdfs路径："+souTableHdfsPath+"--目标hdfs路径："+tarTableHdfsPath);
                hdfsHeatBackups.add(hdfsHeatBackup);
                //替换原先的hdfs路径集合
                tableDistCPDTO.setHdfsHeatBackups(hdfsHeatBackups);
                //同步范围 改为 0为全量
                tableDistCPDTO.setSyncRange(0);
            }else {
                logLog(heatlog,tableLog1,tableDistCPDTO.getDbTableName()+"LS过滤后表hdfs数量："+size+"未超过阈值","1");
//                Logger.log(tableDistCPDTO.getDbTableName()+"LS过滤后表hdfs数量："+size+"未超过阈值");
            }
            tableLog1.append(logTime("-------------------------【时间窗口阈值判断结束】"));
            tableLog1.flush();
        }
    }

    /**
     * 刷新hive表+刷新impala表
     */
    @Override
    public List<String> dataRefresh(String sourceDataName, String backupSort, FileAppender heatlog) {
        Map<String,Future<Boolean>> futureMap = new HashMap<>();
        // 获取过滤后的每个表的hdfs相关信息List集合
        List<TableDistCPDTO> tableDistCPDTOs = tableDistCPDTOMap.get(sourceDataName);
        int sizes = tableDistCPDTOs.size();   //过滤后表数量
        for (TableDistCPDTO tableDistCPDTO : tableDistCPDTOs) {
            //表级日志分析记录修改--用时详情修改
            hotStandbyLogTableService.upHotLog(tableDistCPDTO.getTableId(),"【刷目标表】"+DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss")+"--");
            //表日志文件缓存
            FileAppender tableLog = new FileAppender(new File(tableDistCPDTO.getLogPath()), 100, true);
            tableLog.append(logTime("-------------------------【刷新目标表开始】"));
            Future<Boolean> booleanFuture = dataRefreshService.execShell(tableDistCPDTO, backupSort, sizes, heatlog, tableLog);
            futureMap.put(tableDistCPDTO.getDbTableName(),booleanFuture);
        }
        //判断刷新全部结束
        StateDataDTO stateDataDTO = futureService.futureStatusMap(futureMap, heatlog,"LS过滤");
        heatlog.append(logTime("【全部表刷新完成】"));
        heatlog.flush();
        return stateDataDTO.getDataList();
    }

    /**
     * 数据比对
     */
    @Override
    public void dataContrast(String sourceDataName, FileAppender heatlog, List<WhereScriptDTO> whereScriptDTOS) {
        heatLogLog(heatlog,"【初始】比对where条件数量："+whereScriptDTOS.size(),"1");

        // 获取同步了的所有表的where条件
        List<WhereScriptDTO> whereScriptDTOs = new ArrayList<WhereScriptDTO>();
        List<TableDistCPDTO> tableDistCPDTOs = tableDistCPDTOMap.get(sourceDataName);
        String logPath = tableDistCPDTOs.get(0).getLogPath();
        String tbLogPath = logPath.substring(0, logPath.lastIndexOf("/"))+"/";
        Map<String,Integer> tbNameMap = new HashMap<>();
        for (TableDistCPDTO tableDistCPDTO : tableDistCPDTOs) {
            whereScriptDTOs.addAll(tableDistCPDTO.getWhereScriptDTOS());
            //where条件有2个及以上的记录
            int size = tableDistCPDTO.getWhereScriptDTOS().size();
            if (size > 1){
                tbNameMap.put(tableDistCPDTO.getDbTableName(),size);
            }
        }

        int sizes = whereScriptDTOs.size();   //过滤后表数量
        heatLogLog(heatlog,"----------------------------【过滤】后比对where条件------------------------------------","1");
        heatLogLog(heatlog,"【过滤】后比对where条件数量："+sizes,"1");
        heatLogLog(heatlog,"","2");     //刷进日志
        //数据比对
        whereScriptDTOs.forEach(whereScriptDTO -> {
            //表日志文件缓存
            FileWriter writer = new FileWriter(tbLogPath + whereScriptDTO.getDbTableName() + ".txt");
//            writer.append(logTime("-------------------------【数据比对开始】"+"\n"));
            iTableContrastService.createTableContrastTask(createModel(whereScriptDTO));
        });

        //判断比对是否全部完成
        Set<String> stringSet = new HashSet<>();
        //累计是否异常信息
        Map<String,String> errMap = new HashMap<>();
        DateTime date1 = DateUtil.date();
        while (sizes != stringSet.size()){
            //超过几个小时，自动跳出，防止死循环
            long betweenDay = DateUtil.between(date1, DateUtil.date(), DateUnit.HOUR);
            if (betweenDay >= conHour){
                heatLogLog(heatlog,"【比对时间超过"+conHour+"小时，强制结束】","1");
                backupSourceService.updateTaskState(sourceDataName,false);
                break;
            }
            for (WhereScriptDTO whereScriptDTO : whereScriptDTOs) {
                if (!stringSet.contains(whereScriptDTO.getTableFlag())){
                    //获取每张表的比对结果
                    TableContrastResultEntity tableContrastResultEntity = iTableContrastService
                            .queryTableContrastResult(whereScriptDTO.getTableFlag());
                    if (!tableContrastResultEntity.getStatus().equals(TableContrastEnums.RUNNING.getValue())){
                        stringSet.add(whereScriptDTO.getTableFlag());
                        //表日志文件缓存
                        FileAppender tableLog = new FileAppender(new File(tbLogPath + whereScriptDTO.getDbTableName() + ".txt"), 100, true);
                        tableLog.append(logTime(whereScriptDTO.getDbTableName()+"表sql条件："+whereScriptDTO.getSourceQueryCriteria()));
                        heatLogLog(heatlog,whereScriptDTO.getDbTableName()+"表sql条件："+whereScriptDTO.getSourceQueryCriteria(),"1");
                        //获取表级日志分析记录表，的id
                        String tableId = "";
                        for (TableDistCPDTO tableDistCPDTO : tableDistCPDTOs) {
                            if (whereScriptDTO.getDbTableName().equals(tableDistCPDTO.getDbTableName())){
                                tableId = tableDistCPDTO.getTableId();
                                break;
                            }
                        }
                        //不包含该表信息添加进行中状态
                        if(!errMap.containsKey(whereScriptDTO.getDbTableName())){
                            log.warn("=========触发表信息添加进行中状态");
                            errMap.put(whereScriptDTO.getDbTableName(),ScanCommonConstants.ClusterName.JXZ.value);
                        }
                        //判断任务执行结果是否异常
                        if (tableContrastResultEntity.getStatus().equals(TableContrastEnums.FAIL.getValue())){
                            //判断是否可忽略报错
                            if(!errHD(tableContrastResultEntity.getFalseMessage())){
                                //出现异常报错正常修改忽略状态为失败
                                errMap.put(whereScriptDTO.getDbTableName(),ScanCommonConstants.ClusterName.SB.value);
                                tableLog.append(logTime("比对完成--状态："+ tableContrastResultEntity.getStatus()+"--"+stringSet.size()+"（完成顺序）"));
                                tableLog.append(logTime("信息："+ tableContrastResultEntity.getFalseMessage()));
                                heatLogLog(heatlog,"【"+whereScriptDTO.getDbTableName()+"表比对完成--状态："+
                                        tableContrastResultEntity.getStatus()+"--信息："+tableContrastResultEntity.getFalseMessage()+"】--"+stringSet.size()+"（完成顺序）","1");
                                //判断表的where是否有1个以上
                                upStateHotLog(tbNameMap,whereScriptDTO.getDbTableName(), tableId,
                                        ScanCommonConstants.ClusterName.BDSB.value,
                                        "【数据比对】" + tableContrastResultEntity.getContrastStartTime() + "--" + DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss"),
                                        tableContrastResultEntity.getTableDataTotal(),
                                        tableContrastResultEntity.getTargetTableDataTotal(),errMap.get(whereScriptDTO.getDbTableName()),tableLog);
                                backupSourceService.updateTaskState(sourceDataName,false);
                            }else{
                                String errHDY = "";
                                //出现忽略SQL修改忽略状态为异常
                                if(ScanCommonConstants.ClusterName.JXZ.value.equals(errMap.get(whereScriptDTO.getDbTableName()))){
                                    log.warn("=========触发忽略状态异常");
                                    errHDY = ScanCommonConstants.ClusterName.YC.value;
                                }
                                tableLog.append(logTime("比对完成--状态："+ tableContrastResultEntity.getStatus()+"--"+stringSet.size()+"（完成顺序）"));
                                tableLog.append(logTime("信息："+ tableContrastResultEntity.getFalseMessage()+ScanCommonConstants.ClusterName.DBTX.value));
                                heatLogLog(heatlog,"【"+whereScriptDTO.getDbTableName()+"表比对完成--状态："+
                                        tableContrastResultEntity.getStatus()+"--信息："+tableContrastResultEntity.getFalseMessage()+ScanCommonConstants.ClusterName.DBTX.value+"】--"+stringSet.size()+"（完成顺序）","1");
                                //判断表的where是否有1个以上
                                upStateHotLog(tbNameMap,whereScriptDTO.getDbTableName(), tableId,
                                        ScanCommonConstants.ClusterName.CG.value,
                                        "【数据比对】" + tableContrastResultEntity.getContrastStartTime() + "--" + DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss"),
                                        tableContrastResultEntity.getTableDataTotal(),
                                        tableContrastResultEntity.getTargetTableDataTotal(),errHDY,tableLog);
                                HotStandbyLogTable tableFlag = hotStandbyLogTableService.getById(tableId);
                                if(tableFlag.getTaskState().equals(ScanCommonConstants.ClusterName.BDSB.value)){
                                    backupSourceService.updateTaskState(sourceDataName,false);
                                }
                            }
                        }else {
                            //出现成功 修改忽略状态为成功
                            errMap.put(whereScriptDTO.getDbTableName(),ScanCommonConstants.ClusterName.CG.value);
                            tableLog.append(logTime("比对完成--状态："+ tableContrastResultEntity.getStatus()+"--"+stringSet.size()+"（完成顺序）"));
                            tableLog.append(logTime("count总数："+ tableContrastResultEntity.getTableDataTotal()));
                            heatLogLog(heatlog,whereScriptDTO.getDbTableName()+"表比对完成--状态："+
                                    tableContrastResultEntity.getStatus()+"--"+stringSet.size()+"（完成顺序）","1");
                            //判断表的where是否有1个以上
                            upStateHotLog(tbNameMap,whereScriptDTO.getDbTableName(), tableId,
                                    ScanCommonConstants.ClusterName.CG.value,
                                    "【数据比对】"+tableContrastResultEntity.getContrastStartTime()+"--"+ tableContrastResultEntity.getTableContrastEndTime(),
                                    tableContrastResultEntity.getTableDataTotal(),
                                    tableContrastResultEntity.getTargetTableDataTotal(),errMap.get(whereScriptDTO.getDbTableName()),tableLog);
                        }
                        tableLog.flush();
                        heatLogLog(heatlog,"","2");     //刷进日志
                    }
                }
            }
            try {
                Thread.sleep(2000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
        log.warn("【累计是否异常信息量】"+errMap.size());
        log.warn("【比对完成数据量】"+stringSet.size());
        heatLogLog(heatlog,"【全部表比对完成】","1"); //刷进日志中
        heatLogLog(heatlog,"","2");     //刷进日志
        tableDistCPDTOMap.remove(sourceDataName);  //删除key
    }

    /**
     * 判断表的where是否有1个以上
     * @return
     */
    private void upStateHotLog(Map<String,Integer> tbNameMap,String dbTableName,String tableId,String state,String str,
                               Long souCountNum,Long tarCountNum,String errHD,FileAppender tableLog) {
        if (tbNameMap.containsKey(dbTableName) && tbNameMap.get(dbTableName) > 1) {
                hotStandbyLogTableService.upByIdStateHotLog(tableId, state, str, souCountNum, tarCountNum);
                tbNameMap.put(dbTableName, tbNameMap.get(dbTableName) - 1);
        }else {
            tableLog.append(logTime("-------------------------【数据比对结束】"));
            //表级日志分析记录修改--执行状态修改、用时详情修改
            hotStandbyLogTableService.upStateHotLog(tableId, state, str, souCountNum, tarCountNum,errHD);
        }
    }

    /**
     * 源库和目标库比对信息
     * @return
     */
    private TableContrastParamEntity createModel(WhereScriptDTO whereScriptDTOS) {
        TableContrastParamEntity tableContrastParam = new TableContrastParamEntity();
        tableContrastParam.setContrastType(TableContrastEnums.WARM.getValue());
//        tableContrastParam.setTableFlag(whereScriptDTOS.getTableFlag());
        tableContrastParam.setSourceDbName(whereScriptDTOS.getSourceDbName());
        tableContrastParam.setSourceTableName(whereScriptDTOS.getSourceTableName());
        tableContrastParam.setSourceQueryCriteria(whereScriptDTOS.getSourceQueryCriteria());
        tableContrastParam.setTargetDbName(whereScriptDTOS.getTargetDbName());
        tableContrastParam.setTargetTableName(whereScriptDTOS.getTargetTableName());
        tableContrastParam.setTargetQueryCriteria(whereScriptDTOS.getTargetQueryCriteria());
        return tableContrastParam;
    }

    /**
     * 加锁，防止并发问题
     * @param heatlog
     * @param logStr
     * @param str   1、添加日志 2、把日志刷入文件
     */
    private synchronized void heatLogLog(FileAppender heatlog,String logStr,String str) {
        if (str.equals("1")){
            heatlog.append(logTime(logStr));
        }else {
            heatlog.flush();
        }
    }

    /**
     * 加锁，防止并发问题
     * @param heatlog
     * @param tableLog
     * @param logStr
     * @param str   1、添加日志 2、把日志刷入文件
     */
    private synchronized void logLog(FileAppender heatlog,FileAppender tableLog,String logStr,String str) {
        if (str.equals("1")){
            heatlog.append(logTime(logStr));
            tableLog.append(logTime(logStr));
        }else {
            heatlog.flush();
        }
    }

    /**
     * 日志加日期时间
     * @param str
     * @return
     */
    private String logTime(String str) {
        return "["+ DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss")+"]" +str;
    }

    /**
     * 获取第几个斜杆所在位置
     * @return
     */
    public int getSlashIndex(String url,int num){
        int slashCount = 0; // 记录斜杆的数量
        int slashIndex = -1; // 记录第几个斜杆的位置
        for (int i = 0; i < url.length(); i++) {
            if (url.charAt(i) == '/') {
                slashCount++;
                if (slashCount == num) {
                    slashIndex = i;
                    break;
                }
            }
        }
        return slashIndex;
    }

    /**
     * 判断是否可忽略报错
     * @param err
     * @return
     */
    public boolean errHD(String err){
        String errHD = err.replaceAll(" ","").replaceAll(":","").replaceAll("/","").toUpperCase(Locale.ROOT);
        String dberrSQL = ScanCommonConstants.ClusterName.DBSQLERR.value.replaceAll(" ","").replaceAll(":","").replaceAll("/","").toUpperCase(Locale.ROOT);
        return  errHD.contains(dberrSQL);
    }
}
