package com.hexinfo.dmpro.sparing.service.impl;

import cn.hutool.core.date.DateUtil;
import cn.hutool.core.io.file.FileAppender;
import cn.hutool.core.util.ObjectUtil;
import com.hexinfo.dmpro.common.model.MessageCenter;
import com.hexinfo.dmpro.common.utils.ScanCommonConstants;
import com.hexinfo.dmpro.component.comparison.entity.TableContrastParamEntity;
import com.hexinfo.dmpro.component.comparison.entity.TableContrastResultEntity;
import com.hexinfo.dmpro.component.comparison.enums.TableContrastEnums;
import com.hexinfo.dmpro.component.comparison.service.ITableContrastService;
import com.hexinfo.dmpro.sparing.dto.DistCPDTO;
import com.hexinfo.dmpro.sparing.dto.RuleToDistCPDTO;
import com.hexinfo.dmpro.sparing.dto.TableDistCPDTO;
import com.hexinfo.dmpro.sparing.dto.WhereScriptDTO;
import com.hexinfo.dmpro.sparing.model.HdfsHeatBackup;
import com.hexinfo.dmpro.sparing.service.*;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;

import javax.sql.DataSource;
import java.io.File;
import java.sql.Connection;
import java.sql.Statement;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;

/**
 * 无源（时间窗口）
 * 异步方法，执行LS命令，获取
 */
@Service
@Slf4j
public class LsFilterServiceImpl implements LsFilterService {

    @Autowired
    private DataFilterService dataFilterService;
    @Autowired
    private DistCPService distCPService;
    @Autowired
    private DistCPTimeService distCPTimeService;
    @Autowired
    private DataRefreshService dataRefreshService;
    @Autowired
    private ITableContrastService iTableContrastService;

    @Autowired
    @Qualifier("connPoolHiveSource")
    private DataSource connPoolSource;

    //获取完成LS过滤的表数量
    private Map<String, Integer> statusMap = new ConcurrentHashMap<String, Integer>();
    private synchronized void updateStatus(String sourceDataName) {
        statusMap.put(sourceDataName, statusMap.getOrDefault(sourceDataName,0)+1);
    }

    // 过滤后的每个表的hdfs相关信息
    private Map<String, List<TableDistCPDTO>> tableDistCPDTOMap = new ConcurrentHashMap<String, List<TableDistCPDTO>>();
    private synchronized void updateTableDistCPDTOMap(String souName,List<TableDistCPDTO> tableDistCPDTOs) {
        tableDistCPDTOMap.put(souName, tableDistCPDTOs);
    }

    // ls过滤后，是否还有表同步，running：进行中，continue：继续，heatEnd：结束
    private Map<String, String> lsStatusMap = new ConcurrentHashMap<String, String>();
    @Override
    public synchronized void updateLsStatusMap(String souName,String status) {
        lsStatusMap.put(souName, status);
    }
    @Override
    public String getLsStatus(String souName) {
        return lsStatusMap.get(souName);
    }
    @Override
    public synchronized void delLsStatus(String souName) {
        lsStatusMap.remove(souName);
    }

    @Override
    @Async("threadPoolTaskExecutor")
    public void allLsFilter(List<TableDistCPDTO> tableDistCPDTOs, TableDistCPDTO tableDistCPDTO,
                            FileAppender heatlog, FileAppender tableLog,Integer tableSize) {
        String dbTableName = tableDistCPDTO.getDbTableName();
        try {
            int size = tableDistCPDTO.getHdfsHeatBackups().size();     //hdfs初始数量
            tableLog.append(logTime(dbTableName+"表LS过滤开始--hdfs数量初始："+size));
            //3、增量、全量分类（全量无时间分区字段；可能有其他分区字段，或者无分区字段，只有一个hdfs路径）
            if (tableDistCPDTO.getSyncRange() == null || tableDistCPDTO.getSyncRange() == 0){
                //4、全量--获取LS过滤后每个表的hdfs实体类
                dataFilterService.allLsFilter(tableDistCPDTO,heatlog,tableLog);
                if (ObjectUtil.isEmpty(tableDistCPDTO.getHdfsHeatBackups())){
                    tableLog.append(logTime(dbTableName+"--全量--无符合文件，不进行热备操作"));
                }else {
                    logLog(heatlog,tableLog,"【"+dbTableName+"--全量--hdfs数量初始："+size+"--过滤后hdfs数量:"+tableDistCPDTO.getHdfsHeatBackups().size() +"】","1");
                    tableDistCPDTOs.add(tableDistCPDTO);
                }
            }else {
                //4、增量--获取LS过滤后每个表的hdfs实体类
                dataFilterService.addLsFilter(tableDistCPDTO,heatlog,tableLog);
                if (ObjectUtil.isEmpty(tableDistCPDTO.getHdfsHeatBackups())){
                    tableLog.append(logTime(dbTableName+"--增量--无符合文件，不进行热备操作"));
                }else {
                    logLog(heatlog,tableLog,"【"+dbTableName+"--增量--hdfs数量初始："+size+"--过滤后hdfs数量:"+tableDistCPDTO.getHdfsHeatBackups().size()
                            +"--过滤后hdfs文件总大小："+tableDistCPDTO.getHdfsFileSize()+"】","1");
                    tableDistCPDTOs.add(tableDistCPDTO);
                }
            }
        }catch (Exception e){
            logLog(heatlog,tableLog,dbTableName+"--LS过滤异常："+e,"1");
            log.error(dbTableName+"--LS过滤异常："+e);
        }
        backupData(tableDistCPDTOs,tableDistCPDTO.getSourceDataName(),tableLog,heatlog,tableSize);
    }

    /**
     * 判断所有表是否Ls完成
     * @param tableLog
     * @param heatlog
     * @param tableSize
     */
    private void backupData(List<TableDistCPDTO> tableDistCPDTOs,String souName,
                            FileAppender tableLog,FileAppender heatlog,Integer tableSize) {
        updateStatus(souName);
        //logLog(heatlog,tableLog,"初始大小--"+tableSize+"--变更大小--"+statusMap.get(souName),"1");
        tableLog.flush();     //刷进日志中
        logLog(heatlog,tableLog,"","2"); //刷进日志中
        int i = statusMap.get(souName).compareTo(tableSize);
        if (i >= 0 || statusMap.get(souName).equals(tableSize) || statusMap.get(souName) == tableSize){
            statusMap.remove(souName);  //移除key
            int sizes = tableDistCPDTOs.size();   //过滤后表数量
            heatlog.append(logTime("【全源的表LS过滤完成，过滤后表数量："+sizes+"】"));
            logLog(heatlog,tableLog,"","2"); //刷进日志中
            //没有表符合同步要求，直接退出
            if(sizes == 0){
                updateLsStatusMap(souName, ScanCommonConstants.ClusterName.HEATEND.value);
                return;
            }
            //获取过滤后的每个表的hdfs相关信息List集合
            updateTableDistCPDTOMap(souName,tableDistCPDTOs);
            updateLsStatusMap(souName, ScanCommonConstants.ClusterName.CONTINUE.value);

            tableDistCPDTOs.forEach(distCPDTO -> {
                //3、增量、全量分类（全量无时间分区字段；可能有其他分区字段，或者无分区字段，只有一个hdfs路径）
                if (distCPDTO.getSyncRange() == null || distCPDTO.getSyncRange() == 0){
                    //全量热备
                    allQuantity(distCPDTO,tableLog,heatlog,sizes);
                }else {
                    //增量热备
                    addQuantity(distCPDTO,tableLog,heatlog,sizes);
                }
            });
        }
    }

    /**
     * 无源（时间窗口）--无时间分区字段--全量热备
     */
    private void allQuantity(TableDistCPDTO tableDistCPDTO,FileAppender tableLog,FileAppender heatlog ,int sizes) {
        for (HdfsHeatBackup hdfsHeatBackup : tableDistCPDTO.getHdfsHeatBackups()){
            DistCPDTO distCPDTO = new DistCPDTO(
                    tableDistCPDTO.getSourceDataName(),
                    hdfsHeatBackup,
                    tableDistCPDTO.getHdfsHeatBackups().size(),
                    heatlog,
                    tableLog);
            //第二步：热备到备份---备份到目标
            //distCPService.execShell(distCPDTO,sizes);
        }
    }

    /**
     * 无源（时间窗口）--有时间分区字段--增量热备
     */
    private void addQuantity(TableDistCPDTO tableDistCPDTO,FileAppender tableLog,FileAppender heatlog,int sizes) {
        for (HdfsHeatBackup hdfsHeatBackup : tableDistCPDTO.getHdfsHeatBackups()){
            DistCPDTO distCPDTO = new DistCPDTO(
                    tableDistCPDTO.getSourceDataName(),
                    hdfsHeatBackup,
                    tableDistCPDTO.getHdfsHeatBackups().size(),
                    heatlog,
                    tableLog);
            //第二步：热备到备份---备份到目标
            //distCPService.execShell(distCPDTO,sizes);
        }
    }

    /**
     * 数据同步
     */
    @Override
    public void dataSync(String sourceDataName, FileAppender heatlog) {
        //------------------------------① 数据同步------------------------------------
        // 获取过滤后的每个表的hdfs相关信息List集合
        List<TableDistCPDTO> tableDistCPDTOs = tableDistCPDTOMap.get(sourceDataName);
        int sizes = tableDistCPDTOs.size();   //过滤后表数量
        tableDistCPDTOs.forEach(distCPDTO -> {
            //表日志文件缓存
            FileAppender tableLog = new FileAppender(new File(distCPDTO.getLogPath()), 100, true);
            //3、增量、全量分类（全量无时间分区字段；可能有其他分区字段，或者无分区字段，只有一个hdfs路径）
            if (distCPDTO.getSyncRange() == null || distCPDTO.getSyncRange() == 0){
                //全量热备
                allQuantitySync(distCPDTO,tableLog,heatlog,sizes);
            }else {
                //增量热备
                addQuantitySync(distCPDTO,tableLog,heatlog,sizes);
            }
        });
    }

    /**
     * 无源（时间窗口）--无时间分区字段--全量热备
     */
    private void allQuantitySync(TableDistCPDTO tableDistCPDTO,FileAppender tableLog,FileAppender heatlog ,int sizes) {
        for (HdfsHeatBackup hdfsHeatBackup : tableDistCPDTO.getHdfsHeatBackups()){
            DistCPDTO distCPDTO = new DistCPDTO(
                    tableDistCPDTO.getSourceDataName(),
                    hdfsHeatBackup,
                    tableDistCPDTO.getHdfsHeatBackups().size(),
                    heatlog,
                    tableLog);
            //第二步：热备到备份---备份到目标
            distCPTimeService.execShell(distCPDTO,sizes);
        }
    }

    /**
     * 无源（时间窗口）--有时间分区字段--增量热备
     */
    private void addQuantitySync(TableDistCPDTO tableDistCPDTO,FileAppender tableLog,FileAppender heatlog,int sizes) {
        for (HdfsHeatBackup hdfsHeatBackup : tableDistCPDTO.getHdfsHeatBackups()){
            DistCPDTO distCPDTO = new DistCPDTO(
                    tableDistCPDTO.getSourceDataName(),
                    hdfsHeatBackup,
                    tableDistCPDTO.getHdfsHeatBackups().size(),
                    heatlog,
                    tableLog);
            //第二步：热备到备份---备份到目标
            distCPTimeService.execShell(distCPDTO,sizes);
        }
    }

    /**
     * 刷新hive表+刷新impala表
     */
    @Override
    public void dataRefresh(String sourceDataName, String backupSort, FileAppender heatlog) {
        // 获取过滤后的每个表的hdfs相关信息List集合
        List<TableDistCPDTO> tableDistCPDTOs = tableDistCPDTOMap.get(sourceDataName);
        int sizes = tableDistCPDTOs.size();   //过滤后表数量
        tableDistCPDTOs.forEach(tableDistCPDTO -> {
            //表日志文件缓存
            FileAppender tableLog = new FileAppender(new File(tableDistCPDTO.getLogPath()), 100, true);
            //第二步：热备到备份---备份到目标
            dataRefreshService.execShell(tableDistCPDTO,backupSort,sizes,heatlog,tableLog);
        });
    }

    /**
     * 数据比对
     */
    @Override
    public void dataContrast(String sourceDataName, FileAppender heatlog, List<WhereScriptDTO> whereScriptDTOS) {
        // 排除未热备的表
        List<TableDistCPDTO> tableDistCPDTOs = tableDistCPDTOMap.get(sourceDataName);
        //int sizes = tableDistCPDTOs.size();   //过滤后表数量
                                                                        //备份库.表
        List<String> dbTableFilters = tableDistCPDTOs.stream().map(e -> e.getBackupName()).collect(Collectors.toList());
        List<WhereScriptDTO> whereScriptDTOs = whereScriptDTOS.stream()
                .filter(whereScr -> dbTableFilters.stream()
                        .anyMatch(dbTableName -> whereScr.getDbTableName().equals(dbTableName)))
                .collect(Collectors.toList());
        int sizes = whereScriptDTOs.size();   //过滤后表数量
        heatLogLog(heatlog,"比对where条件数量："+sizes,"1");
        heatLogLog(heatlog,"","2");     //刷进日志
        //数据比对
        whereScriptDTOs.forEach(whereScriptDTO -> {
            iTableContrastService.createTableContrastTask(createModel(whereScriptDTO));
        });

        //判断比对是否全部完成
        Set<String> stringSet = new HashSet<>();
        while (sizes != stringSet.size()){
            for (WhereScriptDTO whereScriptDTO : whereScriptDTOs) {
                if (!stringSet.contains(whereScriptDTO.getTableFlag())){
                    //获取每张表的比对结果
                    TableContrastResultEntity tableContrastResultEntity = iTableContrastService
                            .queryTableContrastResult(whereScriptDTO.getTableFlag());
                    if (!tableContrastResultEntity.getStatus().equals(TableContrastEnums.RUNNING.getValue())){
                        stringSet.add(whereScriptDTO.getTableFlag());
                        heatLogLog(heatlog,"【"+whereScriptDTO.getDbTableName()+"表比对完成--状态：【"+
                                tableContrastResultEntity.getStatus()+"】--信息："+tableContrastResultEntity.getFalseMessage()+"】--"+stringSet.size(),"1");
                        heatLogLog(heatlog,"","2");     //刷进日志
                    }
                }
            }
            try {
                Thread.sleep(2000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
        heatLogLog(heatlog,"【全部表比对完成】","1"); //刷进日志中
        heatLogLog(heatlog,"","2");     //刷进日志
        tableDistCPDTOMap.remove(sourceDataName);  //删除key
    }

    /**
     * 源库和目标库比对信息
     * @return
     */
    private TableContrastParamEntity createModel(WhereScriptDTO whereScriptDTOS) {
        TableContrastParamEntity tableContrastParam = new TableContrastParamEntity();
        tableContrastParam.setContrastType(TableContrastEnums.WARM.getValue());
//        tableContrastParam.setTableFlag(whereScriptDTOS.getTableFlag());
        tableContrastParam.setSourceDbName(whereScriptDTOS.getSourceDbName());
        tableContrastParam.setSourceTableName(whereScriptDTOS.getSourceTableName());
        tableContrastParam.setSourceQueryCriteria(whereScriptDTOS.getSourceQueryCriteria());
        tableContrastParam.setTargetDbName(whereScriptDTOS.getTargetDbName());
        tableContrastParam.setTargetTableName(whereScriptDTOS.getTargetTableName());
        tableContrastParam.setTargetQueryCriteria(whereScriptDTOS.getTargetQueryCriteria());
        return tableContrastParam;
    }

    /**
     * 加锁，防止并发问题
     * @param heatlog
     * @param tableLog
     * @param logStr
     * @param str   1、添加日志 2、把日志刷入文件
     */
    private synchronized void logLog(FileAppender heatlog,FileAppender tableLog,String logStr,String str) {
        if (str.equals("1")){
            heatlog.append(logTime(logStr));
            tableLog.append(logTime(logStr));
        }else {
            heatlog.flush();
        }
    }

    /**
     * 加锁，防止并发问题
     * @param heatlog
     * @param logStr
     * @param str   1、添加日志 2、把日志刷入文件
     */
    private synchronized void heatLogLog(FileAppender heatlog,String logStr,String str) {
        if (str.equals("1")){
            heatlog.append(logTime(logStr));
        }else {
            heatlog.flush();
        }
    }

    /**
     * 日志加日期时间
     * @param str
     * @return
     */
    private String logTime(String str) {
        return "["+ DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss")+"]" +str;
    }

}
