
package com.hexinfo.dmpro.sparing.service.impl;

import cn.hutool.core.date.DateField;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.text.StrSpliter;
import cn.hutool.core.util.ObjectUtil;
import cn.hutool.core.util.StrUtil;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.hexinfo.dmpro.common.model.ClusterSparingConf;
import com.hexinfo.dmpro.common.model.MessageCenter;
import com.hexinfo.dmpro.common.service.ClusterSparingConfService;
import com.hexinfo.dmpro.common.service.MessageCenterService;
import com.hexinfo.dmpro.common.utils.CommonConstants;
import com.hexinfo.dmpro.sparing.dto.BatchSyncTypeDTO;
import com.hexinfo.dmpro.sparing.dto.LsHdfsDTO;
import com.hexinfo.dmpro.sparing.dto.ScanMetadataDTO;
import com.hexinfo.dmpro.sparing.dto.ScanSyncRuleDTO;
import com.hexinfo.dmpro.sparing.model.ScanMetadata;
import com.hexinfo.dmpro.sparing.dao.ScanMetadataMapper;
import com.hexinfo.dmpro.sparing.service.HiveToOracleService;
import com.hexinfo.dmpro.sparing.service.ScanMetadataService;
import com.hexinfo.dmpro.common.utils.ScanCommonConstants;
import lombok.AllArgsConstructor;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.stream.Collectors;


/**
 * 元数据扫描表
 *
 * @author yemw
 * @date 2023-08-28 15:44:04
 */
@Service
@AllArgsConstructor
public class ScanMetadataServiceImpl extends ServiceImpl<ScanMetadataMapper, ScanMetadata> implements ScanMetadataService {

    private final HiveToOracleService hiveToOracleService;

    private final MessageCenterService messageCenterService;

    private final ClusterSparingConfService clusterSparingConfService;

    private final ScanMetadataMapper scanMetadataMapper;

    @Override
    public Page queryList(Page page, ScanMetadata scanMetadata) {
        LambdaQueryWrapper<ScanMetadata> wrapper = Wrappers.lambdaQuery();
        wrapper.like(StrUtil.isNotBlank(scanMetadata.getTblDatabaseName()),ScanMetadata::getTblDatabaseName,scanMetadata.getTblDatabaseName())
                .like(StrUtil.isNotBlank(scanMetadata.getTblName()),ScanMetadata::getTblName,scanMetadata.getTblName())
                .eq(StrUtil.isNotBlank(scanMetadata.getSyncType()),ScanMetadata::getSyncType,scanMetadata.getSyncType())
                .eq(ScanMetadata::getDel, CommonConstants.STATUS_NORMAL);
        Page returnPage = this.baseMapper.selectPage(page, wrapper);
        return returnPage;
    }

    /**
     * 获取首次元数据信息 写入元数据扫描表中和数据同步配置表中（热备）
     */
    @Override
    public String hiveMetadataToOracle(String source){
        if(StrUtil.isBlank(source)){
            source = "无源";
        }
        //查询数据量条数 分批插入数据
        Integer eg = hiveToOracleService.selectHiveTableCount();
        //消息中心
        MessageCenter mess = new MessageCenter();
        mess.setMenu(ScanCommonConstants.ClusterName.SJRB.value);
        mess.setOperator("admin");
        mess.setDataType(ScanCommonConstants.ClusterName.JXZ.value);
        mess.setMessageSubject(ScanCommonConstants.ClusterName.YSJHQ.value+source);
        mess.setMessageWeight(ScanCommonConstants.ClusterName.ONE.value);
        messageCenterService.save(mess);
        boolean bol = false;
        //一次写入多少数据 因为查询的元数据的sds表 会出现和sql查询出来的条数不匹配 因为sds的表信息条数大于查询sql出来的信息条数 所以使用定型分页查询
        int page = 10000;
        int number =  eg / page;
        for (int i = 0; i <= number; i++) {
            int sta = (i * page) + 1;
            int end = page * (i+1);
            //获取hive元数据写入元数据扫描表 同步状态为待确认
            bol = this.saveOrUpdateBatch(hiveToOracleService.selectHiveTableInfo(sta,end),10000);
        }
        //更新
//        mess.setDataType(ScanCommonConstants.ClusterName.YWC.value);
//        messageCenterService.updateById(mess);
        return mess.getId();
    }


    /**
     * 获取增量元数据信息 写入元数据扫描表中和数据同步配置表中（热备）
     */
    @Override
    public String hiveSqlOracleIncrement(String source) {
        if(StrUtil.isBlank(source)){
            source = "无源";
        }
        //消息中心
        MessageCenter mess = new MessageCenter();
        mess.setMenu(ScanCommonConstants.ClusterName.SJRB.value);
        mess.setOperator("admin");
        mess.setDataType(ScanCommonConstants.ClusterName.JXZ.value);
        mess.setMessageSubject(ScanCommonConstants.ClusterName.YSJHQ.value+source);
        mess.setMessageWeight(ScanCommonConstants.ClusterName.ONE.value);
        messageCenterService.save(mess);
        //删除重复数据
        int x = this.baseMapper.delRepeatData();
        log.warn("================删除重复数据"+x+"条======Q@Q");
        MessageCenter times = messageCenterService.orderToCreateTimeONE(ScanCommonConstants.ClusterName.YSJHQ.value);
        log.warn("================上一次时间"+times.getCreateTime()+"======Q@Q");
        //上一次时间戳
        Long staTime = times.getCreateTime().getTime() / 1000;
        //当前时间戳
        Long endTime = System.currentTimeMillis() / 1000;
        try{
            //查询数据量条数 分批插入数据  这个位置的sql需要优化
            Integer egER = hiveToOracleService.hiveSqlOracleIncrementCount(staTime,endTime);
            if(ObjectUtil.isNotEmpty(egER)&&egER>0){
                int eg = egER.intValue();
                //一次写入多少数据
                int page = 5000;
                int number =  eg / page;
                log.warn("================更新和修改总数"+(eg)+"Q@Q");
                for (int i = 0; i <= number; i++) {
                    log.warn("================"+(page)+"一次/第"+(i+1)+"Q@Q");
                    int sta = (i * page) + 1;
                    int end = page * (i+1);
                    //获取hive元数据写入元数据扫描表  当数据为空时跳出当前循环
                    List<ScanMetadata> metadataList =  hiveToOracleService.hiveSqlOracleIncrement(staTime,endTime,sta,end);
                    //触发器
                    if(metadataList!=null&&metadataList.size()>0){
                        log.warn("================第"+(i+1)+"批次");
                        log.warn("================元数据扫描表数据"+metadataList.size()+"条");
                        this.saveOrUpdateBatch(metadataList,page);
                    }else{
                        break;
                    }
                }
            }else {
                log.warn("================更新和修改总数0条Q@Q");
            }
        }catch (Exception e){
            log.warn("================查询hive数据量条数异常");
        }
        //删除重复数据
        x = this.baseMapper.delRepeatData();
        log.warn("================删除重复数据"+x+"条======Q@Q");
        return mess.getId();
    }
    /**
     * 根据规则查询sql  （not in 已过滤）
     * @param dto
     * @return
     */
    @Override
    public List<ScanMetadata> qyScanMetadataWhere(ScanSyncRuleDTO dto) {
        return this.baseMapper.qyScanMetadataWhere(dto);
    }

    /**
     * 根据规则查询sql （GROUP BY ）
     * @param dto
     * @return
     */
    @Override
    public List<ScanMetadata> qyScanMetadataWhereWord(ScanSyncRuleDTO dto) {
        return this.baseMapper.qyScanMetadataWhereWord(dto);
    }

    /**
     * 手动同步 根据规则查询sql （GROUP BY ）
     * @param dto
     * @return
     */
    @Override
    public List<ScanMetadata> qyScanMetadataTable(ScanSyncRuleDTO dto) {
        return this.baseMapper.qyScanMetadataTable(dto);
    }

    /**
     * 根据规则查询sql  （not in 已确认 已过滤）
     * @param dto
     * @return
     */
    @Override
    public List<ScanMetadata> qyScanMetadataWhereYQR(ScanSyncRuleDTO dto) {
        return this.baseMapper.qyScanMetadataWhereYQR(dto);
    }

    @Override
    @Transactional
    public ClusterSparingConf wrapperClusterSparingConf(LambdaQueryWrapper<ClusterSparingConf> wrapper,String clusterName){
        wrapper.select().like(ClusterSparingConf::getClusterName,clusterName).eq(ClusterSparingConf::getDel, CommonConstants.STATUS_NORMAL);
        return clusterSparingConfService.getOne(wrapper);
    }

    @Override
    public List<ScanMetadata> wrapperScanMetadata(LambdaQueryWrapper<ScanMetadata> wrapper){
        Date today = DateUtil.date();
        Date yesterday = DateUtil.yesterday();
        Date before = DateUtil.offset(today, DateField.DAY_OF_MONTH, -2);
        //当天 前一天的触发时间到今天的触发时间
        List<ScanMetadata> scanMetadataList1 = this.baseMapper.selectList(wrapper.between(ScanMetadata::getCreateTime,yesterday,today).eq(ScanMetadata::getDel,CommonConstants.STATUS_NORMAL));
        //前一天 前两天的触发时间到前一天的触发时间
        List<ScanMetadata> scanMetadataList2 = this.baseMapper.selectList(wrapper.between(ScanMetadata::getCreateTime,before,yesterday).eq(ScanMetadata::getDel,CommonConstants.STATUS_NORMAL));
        //差集 当天对比前一天的hdfs路径
        List<ScanMetadata> dis = scanMetadataList1.stream()
                .filter(item -> !scanMetadataList2.stream()
                        .map( e -> e.getLocation())
                        .collect(Collectors.toList())
                        .contains(item.getLocation())).collect(Collectors.toList());
        return  dis;
    }

    /**
     * 修改状态和同步规则ID
     * @param syncType
     * @param syncRuleId
     * @param partName
     * @param location
     * @return
     */
    @Override
    public int updateToSyncType(String syncType, String syncRuleId, String partName, String location) {
        return this.baseMapper.updateToSyncType(syncType,syncRuleId,partName,location);
    }

    /**
     * 批量过滤/批量确认
     * @param dto
     * @return
     */
    @Override
    public boolean updateBatchSyncType(BatchSyncTypeDTO dto) {
        List<ScanMetadata> scanMetadataList = new ArrayList<>();
        dto.getScanMetadataDTO().forEach(li ->{
            ScanMetadata scanMetadata = new ScanMetadata();
            scanMetadata.setId(li.getId());
            //已确认要将同步规则id设置为空
            if(ScanCommonConstants.ClusterName.YGL.value.equals(dto.getSyncType())){
                scanMetadata.setSyncRuleId(li.getSyncRuleId());
            }else{
                scanMetadata.setSyncRuleId(null);
            }
            scanMetadata.setSyncType(dto.getSyncType());
            scanMetadataList.add(scanMetadata);
        });
        return this.saveOrUpdateBatch(scanMetadataList);
    }

    /**
     * 批量同步规则ID变更
     * @param dto
     * @return
     */
    @Override
    public boolean syncConfIdChange(ScanMetadataDTO dto) {
        return false;
//        List<ScanMetadata> scanMetadataList = new ArrayList<>();
//        dty.forEach(li ->{
//            ScanMetadata scanMetadata = new ScanMetadata();
//            scanMetadata.setId(li);
//            scanMetadata.setSyncRuleId(dto.getSyncRuleId());
//            scanMetadata.setSyncType(ScanCommonConstants.ClusterName.YQR.value);
//            scanMetadataList.add(scanMetadata);
//        });
//        return this.saveOrUpdateBatch(scanMetadataList);
    }

    @Override
    public int lsHdfs(String baseName) {
        int a = 0;
        List<String> baseNames  = StrSpliter.split(baseName,StrUtil.COMMA,true,true);
        if(ObjectUtil.isNotEmpty(baseNames)){
            //查询集群信息
            LambdaQueryWrapper<ClusterSparingConf> jQWrapper = Wrappers.lambdaQuery();
            ClusterSparingConf jQCluster = this.wrapperClusterSparingConf(jQWrapper,"金桥");
            for (String base: baseNames) {
                LsHdfsDTO dto = new LsHdfsDTO();
                dto.setSh("sh /opt/script/ls_hdfs_files.sh");
                dto.setHdfs(jQCluster.getHdfsPrefixPath()+base+".db/");
                dto.setHdfsPath("/opt/script/hdfs/ls_hdfs_"+base+".txt");
                dto.setBaseName(base);
                a += hiveToOracleService.lsHdfs(dto);
            }
        }
        return a;
    }

    /**
     * @Method queryTimePartitionTable
     * @Param
     * @Return java.util.List<java.lang.String>
     * @Description 获取以执行时间分区字段分区的表
     * @Author Wang zhihao
     * @Date 2023/11/7 20:17
     * @Version V1.0
     */
    @Override
    public List<String> queryTimePartitionTable() {
        return scanMetadataMapper.queryTimePartitionTable();
    }

}
