package com.pingan.haofang.searchcloud.index.service.impl;

import com.alibaba.fastjson.JSON;
import com.google.common.collect.Lists;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonElement;
import com.google.gson.JsonNull;
import com.google.gson.JsonObject;
import com.google.gson.JsonPrimitive;
import com.pingan.haofang.framework.common.functional.lang.control.base.Enums;
import com.pingan.haofang.searchcloud.api.IndexDataPacket;
import com.pingan.haofang.searchcloud.api.IndexRowData;
import com.pingan.haofang.searchcloud.api.OperateEnum;
import com.pingan.haofang.searchcloud.api.StorageCluster;
import com.pingan.haofang.searchcloud.api.constants.EngineType;
import com.pingan.haofang.searchcloud.api.facade.IndexDataPushFacade;
import com.pingan.haofang.searchcloud.api.matedata.FieldMeta;
import com.pingan.haofang.searchcloud.api.matedata.IndexMeta;
import com.pingan.haofang.searchcloud.api.matedata.constant.MetadataFieldType;
import com.pingan.haofang.searchcloud.common.constants.QueryOptimizeEnum;
import com.pingan.haofang.searchcloud.common.constants.Status;
import com.pingan.haofang.searchcloud.common.dto.FetchDataRetryDTO;
import com.pingan.haofang.searchcloud.common.dto.FetchDatasDTO;
import com.pingan.haofang.searchcloud.common.dto.PageDTO;
import com.pingan.haofang.searchcloud.common.exception.SystemException;
import com.pingan.haofang.searchcloud.common.fetch.IStreamFetchIterator;
import com.pingan.haofang.searchcloud.common.fetch.MultiThreadStreamFetchIterator;
import com.pingan.haofang.searchcloud.common.fetch.StreamFetchGatherInfo;
import com.pingan.haofang.searchcloud.common.fetch.StreamFetchIterator;
import com.pingan.haofang.searchcloud.common.fetch.StreamFetcher;
import com.pingan.haofang.searchcloud.common.redis.RedisLuaScript;
import com.pingan.haofang.searchcloud.common.rpc.RPC;
import com.pingan.haofang.searchcloud.common.rpc.RPCConfigProperties;
import com.pingan.haofang.searchcloud.common.rpc.RPCResultData;
import com.pingan.haofang.searchcloud.common.utils.DateUtils;
import com.pingan.haofang.searchcloud.index.constants.IndexBuildConstants;
import com.pingan.haofang.searchcloud.index.constants.IndexBuildOperation;
import com.pingan.haofang.searchcloud.index.constants.IndexBuildRunType;
import com.pingan.haofang.searchcloud.index.constants.IndexBuildStatus;
import com.pingan.haofang.searchcloud.index.constants.IndexBuildType;
import com.pingan.haofang.searchcloud.index.constants.IndexDataSourceType;
import com.pingan.haofang.searchcloud.index.dto.IndexBuildDataFetchRequest;
import com.pingan.haofang.searchcloud.index.dto.IndexBuildIncDTO;
import com.pingan.haofang.searchcloud.index.dto.IndexBuildProgressDTO;
import com.pingan.haofang.searchcloud.index.dto.IndexBuildRunningProgressDTO;
import com.pingan.haofang.searchcloud.index.exception.PauseException;
import com.pingan.haofang.searchcloud.index.service.IndexBuildProgressService;
import com.pingan.haofang.searchcloud.index.service.IndexBuildService;
import com.pingan.haofang.searchcloud.index.service.IndexService;
import com.pingan.haofang.searchcloud.indexdatasource.dto.IndexDatasourceDTO;
import com.pingan.haofang.searchcloud.indexdatasource.dto.RpcIndexDataSourceDTO;
import com.pingan.haofang.searchcloud.indexdatasource.service.IndexDatasourceService;
import com.pingan.haofang.searchcloud.mail.service.MailService;
import com.pingan.haofang.searchcloud.quartz.dto.ScheduleTask;
import com.pingan.haofang.searchcloud.register.metadata.DBIndexMetadataRegister;
import com.pingan.haofang.searchcloud.solr.constants.SolrConstant;
import com.pingan.haofang.searchcloud.user.dao.SearchProjectDao;
import com.pingan.haofang.searchcloud.user.service.SearchUserService;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.quartz.JobDetail;
import org.quartz.JobExecutionContext;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.http.HttpHeaders;
import org.springframework.jmx.export.annotation.ManagedOperation;
import org.springframework.jmx.export.annotation.ManagedResource;
import org.springframework.stereotype.Service;
import org.springframework.util.StopWatch;
import org.springframework.web.client.RestTemplate;

import java.util.ArrayList;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;

import static com.pingan.haofang.searchcloud.index.constants.IndexBuildConstants.EXPIRE_IN_DAYS;
import static com.pingan.haofang.searchcloud.index.constants.IndexBuildConstants.HF_SEARCHCLOUD_PROGRESS_ID;

@Service
@ManagedResource
public class IndexBuildServiceImpl implements IndexBuildService {

    static Logger LOG = LoggerFactory.getLogger(IndexBuildServiceImpl.class);

    private static Gson GSON = new GsonBuilder().setDateFormat("yyyy-MM-dd HH:mm:ss").create();

    private static final int MIN_BATCH_WRITE_SIZE = 50;

    @Autowired
    protected StringRedisTemplate redisTemplate;

    @Value("${indexBuild.successProportion}")
    protected float successProportion;

    @Value("${indexBuild.maxCollectionCount}")
    private int maxCollectionCount;

    @Value("${indexBuild.indexWriteTimeout}")
    private int indexWriteTimeout;

/*    @Value("${dashboard.report.env}")
    private String env;

    @Value("${dashboard.report.rpc.proportion:80}")
    private int rpcProportion;*/

    /**
     * 抓取数据时每次查询允许出现最大异常次数
     */
    @Value("${indexBuild.singleQueryMaxRetryTime}")
    private int singleQueryMaxRetryTime;

    /**
     * 抓取数据时所有查询累计允许出现最大异常次数
     */
    @Value("${indexBuild.allQueryMaxRetryTime}")
    private int allQueryMaxRetryTime;

    /**
     * 抓取数据时异常重试时间间隔（单位毫秒）
     */
    @Value("${indexBuild.retryTimeInterval}")
    private long retryTimeInterval;

    @Autowired
    private IndexService indexService;

    @Autowired
    private IndexDataPushFacade indexDataPushFacade;

    @Autowired
    private IndexDatasourceService indexDatasourceService;

    @Autowired
    private RestTemplate restTemplate;

    @Autowired
    private IndexBuildProgressService indexBuildProgressService;

    @Autowired
    private DBIndexMetadataRegister dBIndexMetadataRegister;

    @Autowired
    private SearchProjectDao searchProjectDao;

    @Autowired
    private SearchUserService searchUserService;

    @Autowired
    private MailService mailService;

/*    @Autowired
    private MailReceiverProperties mailReceiverProperties;*/

    @Autowired
    private Scheduler scheduler;

    /**
     * 索引基准构建
     *
     * @param progressDTO
     * @param ignoreError
     * @param indexDatasourceList
     * @param jobExecutionContext
     * @param isResumeJob
     */
    @Override
    public void executeIndexBuild(IndexBuildProgressDTO progressDTO, boolean ignoreError, List<IndexDatasourceDTO>
            indexDatasourceList, JobExecutionContext jobExecutionContext, boolean isResumeJob) {

        final Long progressId = progressDTO.getId();

        boolean run = true;

        final boolean isRecoveringJob;

        isRecoveringJob = jobExecutionContext.isRecovering() || isResumeJob;

        try {
            //如果RECOVERING_JOB存在,则跳过校验锁和加锁的操作
            if (!isRecoveringJob) {
                // 校验锁
                if (checkLock(progressDTO.getProjectCode(), progressDTO.getIndexName())) {
                    progressDTO.setStatus(IndexBuildStatus.FAIL.value);
                    progressDTO.setFailReason("another index build progress for this index is running, now terminate");
                    LOG.info("another index build progress for this index is running, now terminate");
                    indexBuildProgressService.updateProgress(progressDTO);
                    run = false;
                    return;
                }
                // 加锁
                lock(progressDTO.getStartTime(), progressDTO.getProjectCode(), progressDTO.getIndexName());
            }

            //更新progressId到quartz中
            updateProgressIdToQuartz(progressDTO, jobExecutionContext);

            progressDTO.setExecuteStartTime(new Date());
            progressDTO.setStatus(IndexBuildStatus.RUNNING.value);
            indexBuildProgressService.updateProgress(progressDTO);

            // 获取collection
            String collectionName = null;
            if (progressDTO.getRunType() == IndexBuildRunType.HANDING.value
                    && StringUtils.isNotBlank(progressDTO.getDatasourceName())) {
                collectionName =
                        indexService.getCollectionOfStorage(progressDTO.getProjectCode(), progressDTO.getIndexName());
            } else {
                collectionName = indexService.getOrCreateCollectionOfStorage(progressDTO.getProjectCode(),
                        progressDTO.getIndexName());
            }

            // 如果RECOVERING_JOB存在则获取旧的collectionName
            if (isRecoveringJob) {
                collectionName = (String) redisTemplate.opsForHash().get(IndexBuildConstants.SOLR_PROGRESS_ID_PREFIX
                                + progressId,
                        IndexBuildConstants.COLLECTION_NAME);
            } else {
                redisTemplate.opsForHash().put(IndexBuildConstants.SOLR_PROGRESS_ID_PREFIX + progressId,
                        IndexBuildConstants.COLLECTION_NAME, collectionName);
            }

            progressDTO.setCollectionName(collectionName);

            // 索引元信息
            IndexMeta meta = dBIndexMetadataRegister.findIndexMetaByCollection(progressDTO.getIndexName(), collectionName);

            // 执行过程信息
            IndexBuildRunningProgressDTO runningProgressInfo = new IndexBuildRunningProgressDTO();
            runningProgressInfo.setId(progressDTO.getId());

            // 遍历数据源集合，执行索引构建
            for (IndexDatasourceDTO dto : indexDatasourceList) {

                int fetchSize = dto.getFullDatasourceRpc().getPageSize();

                //是否查询优化 ，1是，0否
                QueryOptimizeEnum queryOptimize = QueryOptimizeEnum.of(Optional.of(dto)
                        .map(IndexDatasourceDTO::getFullDatasourceRpc)
                        .map(RpcIndexDataSourceDTO::getQueryOptimize).orElse(QueryOptimizeEnum.DISABLE.code()));

                // 获取solr导入进度key前缀
                String indexSolrProgressKeyPrefix = spliceIndexSolrProgressKeyPrefix(dto.getFullDatasourceRpc());

                final long before = getBefore(progressDTO);

                IStreamFetchIterator<FetchDatasDTO<IndexRowData>> iter =
                        buildStreamFetchIterator(before, dto, meta,
                                indexSolrProgressKeyPrefix, progressDTO.getId());

                // 执行具体构建
                build(runningProgressInfo, iter, collectionName, fetchSize, ignoreError, meta,
                        indexSolrProgressKeyPrefix, queryOptimize);
            }

            // 是否成功
            float currentProportion = 0f;
            if (runningProgressInfo.getFetchTotalCount() > 0) {
                currentProportion = (float) runningProgressInfo.getImportSuccessCount() * 100
                        / runningProgressInfo.getFetchTotalCount();
            }

            boolean success = currentProportion >= successProportion;
            //es中引入版本号，无法排除因版本号一致导致的数据过滤
            if(EngineType.ES.getValue() == meta.getEngineType()){
                success = true;
            }

            // 构建统计结果
            progressDTO.setExecuteEndTime(new Date());
            progressDTO.setFetchSuccessCount(runningProgressInfo.getFetchSuccessCount());
            progressDTO.setFetchTotalCount(runningProgressInfo.getFetchTotalCount());
            progressDTO.setImportSuccessCount(runningProgressInfo.getImportSuccessCount());
            progressDTO.setImportTotalCount(runningProgressInfo.getImportTotalCount());
            progressDTO.setExecuteCostTime(DateUtils.getBetween(progressDTO.getExecuteStartTime(),
                    progressDTO.getExecuteEndTime(), DateUtils.SECOND_RETURN));
            progressDTO.setRpcCostTime(runningProgressInfo.getRpcCostTime().get() / SolrConstant.THOUSAND_UNIT);
            progressDTO.setDataImportCostTime(runningProgressInfo.getDataImportCostTime().get() / SolrConstant.THOUSAND_UNIT);

            if (success) {
                progressDTO.setStatus(IndexBuildStatus.SUCCESS.value);
                // 注册新collection
                indexService.registerCollectionOfStorage(progressDTO.getProjectCode(), progressDTO.getIndexName(),
                        collectionName);

                //发送导入成功的邮件
                /*this.sendSuccessFullBuildMail(progressDTO);*/

                // 清理过期的collection
                dBIndexMetadataRegister.clearOldConfigAndCollection(progressDTO.getIndexName(), maxCollectionCount);
            } else {
                    progressDTO.setStatus(IndexBuildStatus.FAIL.value);
                    progressDTO.setFailReason("success proportion is too low, actual:[" + currentProportion + "], expect:["
                            + successProportion + "]");
                    //發送失敗提醒郵件
                    /*sendFullBuildNotifyMail(progressDTO, MailConstants.FULL_BUILD_FAIL_NOTIFY, Constants.FULL_IMPORT_FAIL_REPORT_MAIL_TITLE);*/
            }

            indexBuildProgressService.updateProgress(progressDTO);

        } catch (PauseException e) {
            LOG.error(e.getMessage(), e);
            progressDTO.setStatus(e.getStastus().getValue());
            progressDTO.setFailReason(e.getMessage());
            indexBuildProgressService.updateProgress(progressDTO);
        } catch (Throwable e) {
            LOG.error("execute index build fail, progressId: {}", progressDTO.getId(), e);
            progressDTO.setStatus(IndexBuildStatus.FAIL.value);
            progressDTO.setFailReason(e.getMessage());
            indexBuildProgressService.updateProgress(progressDTO);
            LOG.error("start to send warning mail ...");
//            sendFullBuildNotifyMail(progressDTO, MailConstants.FULL_BUILD_FAIL_NOTIFY, Constants.FULL_IMPORT_FAIL_REPORT_MAIL_TITLE);
        } finally {
            unInterrupt(progressDTO.getId());
            //暂停 不删除redis中的进度等信息
            if (!checkPause(progressDTO.getId())) {
                //构建基准索引后删除redis中的solr导入进度
                deleteProgressAfterIndexBuild(indexDatasourceList, progressId);
                if (run) {
                    unLock(progressDTO.getProjectCode(), progressDTO.getIndexName());
                }

            }

        }
    }

    /**
     * 更新progressId到quartz中
     *
     * @param progressDTO
     * @param jobExecutionContext
     */
    private void updateProgressIdToQuartz(IndexBuildProgressDTO progressDTO, JobExecutionContext jobExecutionContext) {

        final Long progressId = progressDTO.getId();

        JobDetail jobDetail = jobExecutionContext.getJobDetail();

        String taskStr = (String) jobDetail.getJobDataMap().get(IndexBuildConstants.ARGS);
        ScheduleTask task = GSON.fromJson(taskStr, ScheduleTask.class);

        task.getArgs().setProgressId(progressId);
        String newTaskStr = GSON.toJson(task, ScheduleTask.class);

        jobDetail.getJobDataMap().put(IndexBuildConstants.ARGS, newTaskStr);
        jobExecutionContext.getTrigger().getJobDataMap().put(IndexBuildConstants.ARGS, newTaskStr);
        jobExecutionContext.getMergedJobDataMap().put(IndexBuildConstants.ARGS, newTaskStr);

        //根据progressId将task保存到redis中
        redisTemplate.opsForHash().put(IndexBuildConstants.SOLR_PROGRESS_ID_PREFIX + progressId, IndexBuildConstants
                .SCHEDULE_TASK, newTaskStr);

        try {
            scheduler.addJob(jobDetail, true);
            LOG.info("update progressId :{} to quartz.", progressId);
        } catch (SchedulerException e) {
            throw new SystemException(e);
        }
    }

    /**
     * 获取before ，根据当前时间获取，如果redis中有值则取redis中的
     *
     * @param progressDTO
     * @return
     */
    private long getBefore(IndexBuildProgressDTO progressDTO) {

        final Long progressId = progressDTO.getId();
        String beforeStr = (String) redisTemplate.opsForHash().get(IndexBuildConstants.SOLR_PROGRESS_ID_PREFIX +
                progressId, IndexBuildConstants.BEFORE);
        long before;
        if (beforeStr == null) {
            before = progressDTO.getStartTime().getTime() / SolrConstant.THOUSAND_UNIT;
            redisTemplate.opsForHash().put(IndexBuildConstants.SOLR_PROGRESS_ID_PREFIX + progressId,
                    IndexBuildConstants.BEFORE, Long.toString(before));
            redisTemplate.expire(IndexBuildConstants.SOLR_PROGRESS_ID_PREFIX + progressId, EXPIRE_IN_DAYS, TimeUnit.DAYS);
        } else {
            before = Long.valueOf(beforeStr);
        }
        return before;
    }

    /**
     * 发送导入成功邮件
     *
     * @param progressDTO
     */
   /* private void sendSuccessFullBuildMail(IndexBuildProgressDTO progressDTO) {

        if (progressDTO.getExecuteCostTime() == BigInteger.ZERO.longValue()) {
            return;
        }
        Long proportion = progressDTO.getRpcCostTime() * SolrConstant.HUNDRED_UNIT / progressDTO.getExecuteCostTime();
        if (proportion < rpcProportion) {
            return;
        }
        //发送导入成功的邮件,发送失败不能导致构建失败
        try {
            sendFullBuildNotifyMail(progressDTO, MailConstants.FULL_BUILD_SUCCESS_NOTIFY, Constants.FULL_IMPORT_SUCCESS_REPORT_MAIL_TITLE);
        } catch (Exception e) {
            LOG.error("full build success,but mail send failed：{}", e.getMessage());
            e.printStackTrace();
        }
    }*/

    /**
     * 發送提醒郵件
     *
     * @param progressDTO
     */
  /*  public void sendFullBuildNotifyMail(IndexBuildProgressDTO progressDTO, String templateName, String mailTitle) {
        LOG.warn("start to send warning mail ...");
        Objects.requireNonNull(templateName, "mail templateName is null");
        Objects.requireNonNull(mailTitle, "mail title is null");

        SearchProject project = searchProjectDao.findByProjectCode(progressDTO.getProjectCode());
        Objects.requireNonNull(project, "project is null");
        SearchUserDto user = searchUserService.getUserById(project.getMasterId());

        MailDto<FailedProcessForMailDto> mailDto = new MailDto<>();
        mailDto.setTemplateName(templateName);

        List<String> mailTos = new ArrayList<>(mailReceiverProperties.getMailList());
        if (StringUtils.isNotBlank(project.getMailTo())) {
            mailTos.addAll(0, Arrays.stream(project.getMailTo().split(Constants.SPLITOR)).collect(Collectors.toList()));
        }
        mailTos.add(0, user.getEmail());
        mailDto.setEmailTo(mailTos);

        mailDto.setSubject(String.format(Constants.REPORT_TEMPLATE_MAIL_TITLE, env, mailTitle,
                DateUtils.formatDate(new Date())));

        FailedProcessForMailDto failedProcessForMailDto = BeanCopy.of(progressDTO, new FailedProcessForMailDto())
                .copy(BeanUtils::copyProperties).get();
        //转换时间
        String startTime = Optional.ofNullable(progressDTO.getStartTime()).map(DateUtils::formatDate).orElse
                (StringUtils.EMPTY);
        String execStartTime = Optional.ofNullable(progressDTO.getExecuteStartTime()).map(DateUtils::formatDate)
                .orElse(StringUtils.EMPTY);
        String execEndTime = Optional.ofNullable(progressDTO.getExecuteEndTime()).map(DateUtils::formatDate).orElse
                (StringUtils.EMPTY);

        failedProcessForMailDto.setStartTime(startTime);
        failedProcessForMailDto.setExecuteStartTime(execStartTime);
        failedProcessForMailDto.setExecuteEndTime(execEndTime);
        failedProcessForMailDto.setProjectName(project.getProjectName());
        failedProcessForMailDto.setRunType(IndexBuildRunType.get(progressDTO.getRunType()).getDesc());
        failedProcessForMailDto.setRpcCostTime(Optional.ofNullable(progressDTO.getRpcCostTime()).orElse(0L));
        failedProcessForMailDto.setDataImportCostTime(Optional.ofNullable(progressDTO.getDataImportCostTime()).orElse(0L));

        failedProcessForMailDto.setMaster(user.getUserName());
        mailDto.setModel(failedProcessForMailDto);
        mailService.sendMail(mailDto);
        LOG.warn("success to send warning mail");
    }*/

    /**
     * 增量合并索引或增量写入索引
     *
     * @param message
     */
    @Override
    public void mergeOrExecuteIndexInc(IndexBuildIncDTO message) {
        // 得到数据源信息
        LOG.debug("mergeOrExecuteIndexInc start:" + System.currentTimeMillis());
        IndexDatasourceDTO indexDatasourceDTO = indexDatasourceService.findDatasource(message.getProjectCode(),
                message.getIndexName(), message.getDataSourceName());
        RpcIndexDataSourceDTO incDatasourceRpc = indexDatasourceDTO.getIncDatasourceRpc();

        if (incDatasourceRpc != null && Status.NORMAL.getValue() == incDatasourceRpc.getBufferStatus()) {
            incMsgMerge(message, incDatasourceRpc.getMaxWaitTime());
            return;
        }

        executeIndexInc(message);
    }

    /**
     * 增量写入索引
     *
     * @param message
     */
    @Override
    public void executeIndexInc(IndexBuildIncDTO message) {
        LOG.debug("executeIndexInc start:" + System.currentTimeMillis());
        IndexBuildProgressDTO progress = indexBuildProgressService.get(message.getProjectCode(), message.getIndexName(),
                message.getDataSourceName(), IndexBuildType.INC);

        // 如果不存在则创建
        if (progress == null) {
            progress = new IndexBuildProgressDTO();
            progress.setIndexBuildType(IndexBuildType.INC.value);
            progress.setIndexName(message.getIndexName());
            progress.setProjectCode(message.getProjectCode());
            progress.setRunType(IndexBuildRunType.HANDING.value);
            progress.setDatasourceName(message.getDataSourceName());
            progress.setStatus(IndexBuildStatus.RUNNING.value);
            progress.setStartTime(new Date());
            progress.setExecuteStartTime(new Date());
            indexBuildProgressService.updateProgress(progress);
        }
        // 构建统计结果
        IndexBuildRunningProgressDTO runningInfo = indexBuildProgressService.getRunningInfo(progress.getId());

        if (runningInfo == null) {
            runningInfo = new IndexBuildRunningProgressDTO();
            runningInfo.setId(progress.getId());
        }

        // 总记录数
        runningInfo.setFetchTotalCount(runningInfo.getFetchTotalCount() + (message.getIdList() == null ? 0 : message.getIdList().size()));
        indexBuildProgressService.updateRunningProgress(runningInfo);

        while (true) {
            boolean lock = checkLock(message.getProjectCode(), message.getIndexName());

            // 锁不存在，则说明基准尚未启动，增量可以继续处理
            if (!lock) {

                // 取上一次基准时间
                long lockTime = getLockTime(message.getProjectCode(), message.getIndexName());
                // 如果为空或者未过期，则不做消息过滤
                LOG.info("index build lock not exists, current lockTime: {}, message lockTime: {}", lockTime, message.getTimestamp());
                if (lockTime <= 0 || lockTime < message.getTimestamp()) {
                    try {
                        // 得到数据源信息
                        IndexDatasourceDTO dto = indexDatasourceService.findDatasource(message.getProjectCode(),
                                message.getIndexName(), message.getDataSourceName());
                        if (dto == null) {
                            break;
                        }
                        // 获取collection名称
                        String collectionName =
                                indexService.getCollectionOfStorage(message.getProjectCode(), message.getIndexName());

                        // 索引元信息
                        IndexMeta meta = dBIndexMetadataRegister.findIndexMetaByCollection(progress.getIndexName(), collectionName);

                        //idList 删除数据
                        if (message.getOperation() == IndexBuildOperation.DELETE.value) {
                            // 数据转换
                            List<IndexRowData> indexDatas = dataConvertForDelete(message.getIdList(), meta);
                            if (CollectionUtils.isEmpty(indexDatas)) {
                                break;
                            }
                            this.handleImport(new StorageCluster(meta.getHost(), meta.getEngineType()), indexDatas, collectionName,
                                    true, false);
                        } else {
                            //单次增量
                            if (message.getContent() != null && message.getContent().size() != 0) {
                                this.incMsgImport(message.getContent(), meta, runningInfo, collectionName);
                            } else {
                                //定时增量
                                //redis 消息队列那边已经判断空了，如果为空就是定时的增量任务
                                Long maxVersion = indexDataPushFacade.getMaxValue(new StorageCluster(meta.getHost(), meta.getEngineType())
                                        , collectionName, meta.getVersionField().getName());
                                if (maxVersion == null) {
                                    LOG.error("executeIndexInc get max data version error , indexName:{}", collectionName);
                                    break;
                                }
                                LOG.info("executeIndexInc get max data version:{},indexName:{}", maxVersion, collectionName);
                                long startPage = 1;
                                while (true) {
                                    List<RPCResultData> resultDatas = this.incDataFetch(maxVersion, dto, startPage);
                                    if (resultDatas == null || resultDatas.isEmpty()) {
                                        break;
                                    }
                                    try {
                                        this.incMsgImport(resultDatas, meta, runningInfo, collectionName);
                                    }catch (Exception e){
                                        LOG.error(e.getMessage(), e);
                                    }
                                    startPage++;
                                }
                            }
                        }
                    } catch (Exception e) {
                        LOG.error(e.getMessage(), e);
                    }
                    break;
                } else {
                    // 不处理消息，直接丢弃
                    LOG.info("message expire, ignore it, current lockTime: {}, message lockTime: {}", lockTime, message.getTimestamp());
                    break;
                }
            } else {
                // block消息的接收
                LOG.info("index build lock exists, waiting for release");
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
    }

    /**
     * 增量数据导入
     *
     * @param resultDatas
     * @param meta
     * @param runningInfo
     * @param collectionName
     * @return
     */
    private boolean incMsgImport(List<RPCResultData> resultDatas, IndexMeta meta, IndexBuildRunningProgressDTO runningInfo, String collectionName) {
        // 数据转换
        LOG.debug("executeIndexInc indexDatas start:" + System.currentTimeMillis());
        List<IndexRowData> indexDatas = dataConvert(resultDatas, meta);
        LOG.debug("executeIndexInc indexDatas end:" + System.currentTimeMillis());
        if (CollectionUtils.isEmpty(indexDatas)) {
            return false;
        }

        runningInfo.setFetchSuccessCount(runningInfo.getFetchSuccessCount() + indexDatas.size());
        runningInfo.setImportTotalCount(runningInfo.getImportTotalCount() + indexDatas.size());
        indexBuildProgressService.updateRunningProgress(runningInfo);

        LOG.debug("executeIndexInc handleImport start:" + System.currentTimeMillis());
        int successCount = handleImport(new StorageCluster(meta.getHost(), meta.getEngineType()),
                indexDatas, collectionName, true, false);
        LOG.debug("executeIndexInc handleImport end:" + System.currentTimeMillis());
        runningInfo.setImportSuccessCount(runningInfo.getImportSuccessCount() + successCount);
        indexBuildProgressService.updateRunningProgress(runningInfo);
        return true;
    }

    /**
     * 加redis锁
     *
     * @param indexBuildDate
     * @param projectCode
     * @param indexName
     */
    @Override
    public void lock(Date indexBuildDate, String projectCode, String indexName) {
        long lockTime = indexBuildDate.getTime() / 1000;
        String key = getKey(projectCode, indexName);

        LOG.info("index build lock: [{}] [{}]", key, lockTime);

        redisTemplate.opsForHash().put(IndexBuildConstants.LOCK_TIME, key, String.valueOf(lockTime));
        redisTemplate.opsForHash().put(IndexBuildConstants.LOCK, key, "lock");
    }

    /**
     * 解锁
     *
     * @param projectCode
     * @param indexName
     */
    @Override
    @ManagedOperation
    public void unLock(String projectCode, String indexName) {
        String key = getKey(projectCode, indexName);
        LOG.info("index build unlock: [{}]", key);
        redisTemplate.opsForHash().delete(IndexBuildConstants.LOCK, key);
    }

    @ManagedOperation
    public Map<Object, Object> allLocks() {
        return redisTemplate.opsForHash().entries(IndexBuildConstants.LOCK);
    }

    @Override
    @ManagedOperation
    public boolean checkLock(String projectCode, String indexName) {
        String key = getKey(projectCode, indexName);
        return redisTemplate.opsForHash().hasKey(IndexBuildConstants.LOCK, key);
    }


    @Override
    public long getLockTime(String projectCode, String indexName) {
        String key = getKey(projectCode, indexName);
        String val = (String) redisTemplate.opsForHash().get(IndexBuildConstants.LOCK_TIME, key);
        try {
            return Long.parseLong(val);
        } catch (Exception e) {
            return -1L;
        }
    }

    @Override
    @Deprecated
    public void incMsgMerge(IndexBuildIncDTO message, long maxTime) {
        String key = getKey(message.getProjectCode(), message.getIndexName());
        String msgkey = key + IndexBuildConstants.INC_MERGE_REDIS_KEY_SUFFIX;
        String maxTimeKey = key + IndexBuildConstants.INC_MERGE_MAX_TIME_REDIS_KEY_SUFFIX;

        List<String> keys = Lists.newArrayList(msgkey, maxTimeKey);
        String time = (message.getTimestamp() + maxTime) + "";
        redisTemplate.execute(RedisLuaScript.msgMergeScript(),
                keys, JSON.toJSONString(message), time);
        LOG.info("inc msg merged,key:{},msg:{}", key, message.toString());
    }

    @Override
    @Deprecated
    public void incMsgConsume(String key, long maxSize) {
        String msgkey = key + IndexBuildConstants.INC_MERGE_REDIS_KEY_SUFFIX;
        String maxTimeKey = key + IndexBuildConstants.INC_MERGE_MAX_TIME_REDIS_KEY_SUFFIX;

        String maxWaitTimeStr = redisTemplate.opsForValue().get(maxTimeKey);
        LOG.info("merged index consume msgkey:{} maxTimeKey:{} maxWaitTimeStr:{}", msgkey, maxTimeKey,
                maxWaitTimeStr);
        if (null == maxWaitTimeStr) {
            return;
        }

        long maxWaitTime = Long.parseLong(maxWaitTimeStr);
        long size = redisTemplate.opsForList().size(msgkey);

        LOG.info("merged index consume maxWaitTime:{} size:{}", maxWaitTime, size);

        if ((System.currentTimeMillis() / 1000 >= maxWaitTime && 0 <= size) || maxSize <= size) {
            List<String> keys = Lists.newArrayList(msgkey, maxTimeKey);
            List list = redisTemplate.execute(RedisLuaScript.msgConsumeScript(),
                    keys);

            // 分页
            int offset = 0;
            int limit = 1000;
            int count = list.size();
            while (count > offset) {
                LOG.info("inc msg consuming,key:{},page:{}", key, (offset / limit) + 1);
                int fromIndex = offset;
                int toIndex = limit + offset;
                if (count < toIndex) {
                    toIndex = offset + count % limit;
                }
                List fragment = list.subList(fromIndex, toIndex);
                offset += limit;

                IndexBuildIncDTO incDTO = new IndexBuildIncDTO();
                int op = -1;
                for (Object o : fragment) {
                    IndexBuildIncDTO dto = JSON.parseObject(o.toString(), IndexBuildIncDTO.class);
                    // set op and merge inc
                    if (0 > op) {
                        op = dto.getOperation();
                        incDTO = incMsgMerge(incDTO, dto);
                    } else {
                        // change op and build merged inc then reset it
                        if (op != dto.getOperation()) {
                            try {
                                executeIndexInc(incDTO);
                                LOG.info("inc msg consumed,merged consume key:{},data:{}", key, incDTO.toString());
                            } catch (Exception e) {
                                LOG.error("inc msg consume failed,merged consume key:{},data:{}", key, incDTO
                                                .toString(),
                                        e);
                            }
                            incDTO = new IndexBuildIncDTO();
                            op = dto.getOperation();
                            incMsgMerge(incDTO, dto);
                        } else {
                            incDTO = incMsgMerge(incDTO, dto);
                        }
                    }
                }
                // in the end,build merged inc
                try {
                    executeIndexInc(incDTO);
                    LOG.info("inc msg consumed,merged consume key:{},data:{}", key, incDTO.toString());
                } catch (Exception e) {
                    LOG.error("inc msg consume failed,merged consume key:{},data:{}", key, incDTO.toString(),
                            e);
                }
            }
        }
    }

    /**
     * 合并增量消息
     *
     * @param mergeDTO
     * @param dto
     * @return
     */
    @Deprecated
    private IndexBuildIncDTO incMsgMerge(IndexBuildIncDTO mergeDTO, IndexBuildIncDTO dto) {
        while (true) {
            boolean lock = checkLock(dto.getProjectCode(), dto.getIndexName());

            // 锁不存在，则说明基准尚未启动，增量可以继续处理
            if (!lock) {

                // 取上一次基准时间
                long lockTime = getLockTime(dto.getProjectCode(), dto.getIndexName());
                // 如果为空或者未过期，则不做消息过滤
                LOG.info("index build lock not exists, current lockTime: {}, message lockTime: {}", lockTime,
                        dto.getTimestamp());
                if (lockTime <= 0 || lockTime < dto.getTimestamp()) {
                    try {

                        List<String> mergedID = Optional.ofNullable(mergeDTO.getIdList()).orElse(new ArrayList<>());
                        List<String> id = dto.getIdList();
                        BeanUtils.copyProperties(dto, mergeDTO);
                        mergedID.addAll(id);
                        mergeDTO.setIdList(mergedID);

                    } catch (Exception e) {
                        LOG.error(e.getMessage(), e);
                    }
                    break;
                } else {
                    // 不处理消息，直接丢弃
                    LOG.info("message expire, ignore it, current lockTime: {}, message lockTime: {}", lockTime,
                            dto.getTimestamp());
                    break;
                }
            } else {
                // block消息的接收
                LOG.info("index build lock exists, waiting for release");
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
        return mergeDTO;
    }

    /**
     * 索引基准构建
     *
     * @param runningProgressInfo
     * @param iter
     * @param collectionName
     * @param batchWriteSize
     * @param ignoreError
     * @param meta
     * @param indexSolrProgressKeyPrefix 获取solr导入进度key前缀
     * @param queryOptimize              是否查询优化
     */
    private void build(IndexBuildRunningProgressDTO runningProgressInfo,
                       IStreamFetchIterator<FetchDatasDTO<IndexRowData>> iter,
                       String collectionName, int batchWriteSize, boolean ignoreError, IndexMeta meta,
                       String indexSolrProgressKeyPrefix, QueryOptimizeEnum queryOptimize) {

        iter.setIgnoreError(ignoreError);

        final Long progressId = runningProgressInfo.getId();

        // 轮训处理
        while (iter.hasNext()) {
            if (checkInterrupt(progressId)) {
                LOG.info("job interrupted,progressId:{}", progressId);
                iter.stop();
                throw new PauseException(IndexBuildStatus.STOP, "job interrupted,progressId:" + progressId);
            }

            if (checkPause(progressId)) {
                LOG.info("job pause,progressId:{}", progressId);
                iter.stop();
                throw new PauseException(IndexBuildStatus.PAUSE, "job pause,progressId:" + progressId);
            }

            //获取并转换数据
            FetchDatasDTO<IndexRowData> fetchDatasDTO = iter.next();

            if (fetchDatasDTO == null) {
                continue;
            }

            List<IndexRowData> indexDatas = fetchDatasDTO.getIndexDatas();

            final Integer skipedImportSuccessCount = Optional.of(fetchDatasDTO).map
                    (FetchDatasDTO::getSkipedImportSuccessCount).orElse(0);

            final Integer skipedIndexDatasSize = Optional.of(fetchDatasDTO).map
                    (FetchDatasDTO::getSkipedIndexDatasSize).orElse(0);

            int successCount = 0;

            final int importTotalCount = indexDatas == null ? 0 : indexDatas.size();

            if (CollectionUtils.isNotEmpty(indexDatas)) {
                //处理solr导入,并记录进度
                successCount = handleImport(new StorageCluster(meta.getHost(), meta.getEngineType()), fetchDatasDTO,
                        collectionName, ignoreError, false, indexSolrProgressKeyPrefix, queryOptimize,
                        runningProgressInfo);
            }


            // 校准后的统计信息
            StreamFetchGatherInfo correctStreamFetchGatherInfo = fetchDatasDTO.getFetchDatasProgress()
                    .getCorrectStreamFetchGatherInfo();

            runningProgressInfo.setFetchSuccessCount(
                    runningProgressInfo.getFetchSuccessCount() + correctStreamFetchGatherInfo.getSuccessCount());
            runningProgressInfo
                    .setFetchTotalCount(runningProgressInfo.getFetchTotalCount() + correctStreamFetchGatherInfo
                            .getTotalCount());
            runningProgressInfo.setImportSuccessCount(runningProgressInfo.getImportSuccessCount() + successCount
                    + skipedImportSuccessCount);
            runningProgressInfo.setImportTotalCount(runningProgressInfo.getImportTotalCount() + importTotalCount
                    + skipedIndexDatasSize);
            indexBuildProgressService.updateRunningProgress(runningProgressInfo);
            LOG.info("progressId:{} runningProgressInfo:{}", progressId, runningProgressInfo);
        }

        //iter 执行完毕，获取rpc总耗时
        runningProgressInfo.getRpcCostTime().addAndGet(iter.getRpcCostTime());
    }

    /**
     * 处理solr导入,并记录进度
     *
     * @param storageCluster
     * @param fetchDatasDTO
     * @param collectionName
     * @param ignoreError
     * @param hasError
     * @param indexSolrProgressKeyPrefix 获取solr导入进度表key前缀
     * @return
     */
    private int handleImport(StorageCluster storageCluster, FetchDatasDTO<IndexRowData> fetchDatasDTO,
                             String collectionName,
                             boolean ignoreError, boolean hasError, String indexSolrProgressKeyPrefix,
                             QueryOptimizeEnum queryOptimize, IndexBuildRunningProgressDTO runningProgressInfo) {
        List<IndexRowData> indexDatas = fetchDatasDTO.getIndexDatas();
        StopWatch sw = new StopWatch("import data cost:");
        sw.start();
        int successCount = handleImport(storageCluster, indexDatas, collectionName, ignoreError, hasError);
        sw.stop();
        LOG.info("{} {} ", sw.getId(), sw.getLastTaskTimeMillis());
        //设置solr 本次导入耗时
        runningProgressInfo.getDataImportCostTime().addAndGet(sw.getLastTaskTimeMillis());
        //设置solr导入成功数量
        fetchDatasDTO.getFetchDatasProgress().setImportSuccessCount(successCount);
        //将导入solr进度转化为json
        String fetchDatasProgressStr = GSON.toJson(fetchDatasDTO.getFetchDatasProgress());
        //记录solr导入进度到redis
        redisTemplate.opsForHash().put(indexSolrProgressKeyPrefix + fetchDatasDTO.getThreadSeq(),
                fetchDatasDTO.getProgress(queryOptimize), fetchDatasProgressStr);
        redisTemplate.expire(indexSolrProgressKeyPrefix + fetchDatasDTO.getThreadSeq(), EXPIRE_IN_DAYS, TimeUnit.DAYS);

        return successCount;
    }

    /**
     * 处理导入
     *
     * @param storageCluster
     * @param indexDatas
     * @param collectionName
     * @param ignoreError
     * @param hasError
     * @return
     */
    private int handleImport(StorageCluster storageCluster, List<IndexRowData> indexDatas, String collectionName,
                             boolean ignoreError, boolean hasError) {
        if (hasError && indexDatas.size() <= MIN_BATCH_WRITE_SIZE) {
            return 0;
        }
        int successCount = 0;
        try {
            IndexDataPacket packet = new IndexDataPacket(collectionName);
            packet.setIndexRowData(indexDatas);
            indexDataPushFacade.repository(storageCluster, packet, indexWriteTimeout, false);
            successCount = indexDatas.size();
        } catch (Exception e) {
            LOG.error("import index data failed", e);

            // 处理异常情况
            if (!ignoreError) {
                int subSize = indexDatas.size() / 2;
                LOG.info("handle index build, collection: [{}], current batch size: {}", collectionName, subSize);
                successCount += handleImport(storageCluster, indexDatas.subList(0, subSize), collectionName,
                        ignoreError, true);
                successCount += handleImport(storageCluster, indexDatas.subList(subSize + 1, indexDatas.size()),
                        collectionName,
                        ignoreError, true);
            }
        }
        return successCount;
    }

    /**
     * 获取LOCK KEY
     *
     * @param projectCode
     * @param indexName
     * @return
     */
    @Override
    public String getKey(String projectCode, String indexName) {
        return StringUtils.join(new String[]{projectCode, indexName}, "_");
    }

    /**
     * 构建索引抓取迭代器
     *
     * @param before
     * @param dto
     * @param meta
     * @param indexSolrProgressKeyPrefix
     * @param progressId
     * @return
     */
    private IStreamFetchIterator<FetchDatasDTO<IndexRowData>> buildStreamFetchIterator(long before, IndexDatasourceDTO
            dto, IndexMeta meta, String indexSolrProgressKeyPrefix, Long progressId) {
        //线程数量
        Integer threadNum = Optional.ofNullable(dto).map(IndexDatasourceDTO::getFullDatasourceRpc).map
                (RpcIndexDataSourceDTO::getThreadNum).orElse(null);

        //默认id，如果设置了其他字段，则启用其他字段
        String indexField = Optional.of(dto).map(IndexDatasourceDTO::getFullDatasourceRpc).map(RpcIndexDataSourceDTO::getIndexField).orElse(null);

        IndexDataSourceType indexDataSourceType =
                Enums.findValue(dto.getFullDatasourceType(), IndexDataSourceType.class);

        int fetchSize = dto.getFullDatasourceRpc().getPageSize();

        //是否查询优化 ，1是，0否
        QueryOptimizeEnum queryOptimize = QueryOptimizeEnum.of(Optional.of(dto)
                .map(IndexDatasourceDTO::getFullDatasourceRpc)
                .map(RpcIndexDataSourceDTO::getQueryOptimize).orElse(QueryOptimizeEnum.DISABLE.code()));

        StreamFetcher<RPCResultData> fetcher = null;

        HttpHeaders httpHeaders = new HttpHeaders();

        httpHeaders.add(HF_SEARCHCLOUD_PROGRESS_ID, progressId.toString());

        switch (indexDataSourceType) {
            case JSON_RPC:
                fetcher = new StreamFetcher<RPCResultData>() {
                    @Override
                    public PageDTO<RPCResultData> fetch(int pageNo, int pageSize) {
                        RPCConfigProperties config = new RPCConfigProperties();
                        config.setUrl(dto.getFullDatasourceRpc().getRpcUrl());
                        config.setMethod(dto.getFullDatasourceRpc().getRpcMethod());
                        config.setSuccessCode("0");
                        config.setJsonrpc("2.0");

                        return RPC.executeForPage(restTemplate, config, RPCResultData.class, httpHeaders,
                                new IndexBuildDataFetchRequest(pageNo, pageSize, before));
                    }

                    @Override
                    public PageDTO<RPCResultData> fetch(int pageNo, int pageSize, Long lastMaxId) {
                        RPCConfigProperties config = new RPCConfigProperties();
                        config.setUrl(dto.getFullDatasourceRpc().getRpcUrl());
                        config.setMethod(dto.getFullDatasourceRpc().getRpcMethod());
                        config.setSuccessCode("0");
                        config.setJsonrpc("2.0");

                        return RPC.executeForPage(restTemplate, config, RPCResultData.class, httpHeaders,
                                new IndexBuildDataFetchRequest(pageNo, pageSize, before, lastMaxId));
                    }
                };
                break;
            default:
                break;
        }

        if (fetcher == null) {
            return null;
        }
        //包含dataConvert方法的匿名内部类
        BiFunction<List<RPCResultData>, IndexMeta, List<IndexRowData>> dataConvertFunction = this::dataConvert;


        FetchDataRetryDTO retryDTO = new FetchDataRetryDTO(singleQueryMaxRetryTime, allQueryMaxRetryTime, retryTimeInterval);

        //如果threadNum > 1则开启多线程支持
        if (threadNum != null && threadNum >= IndexBuildConstants.MULTI_THREAD_SUPPORT_THRESHOLD) {
            return new MultiThreadStreamFetchIterator(fetchSize, fetcher, threadNum, dataConvertFunction, meta,
                    redisTemplate, indexSolrProgressKeyPrefix, queryOptimize, progressId, indexField, retryDTO);
        }

        return StreamFetchIterator.iterator(fetchSize, fetcher, dataConvertFunction, meta, redisTemplate,
                indexSolrProgressKeyPrefix, queryOptimize, progressId, indexField, retryDTO);
    }

    /**
     * 增量抓取索引数据
     *
     * @param dto
     * @return
     */
    private List<RPCResultData> incStreamFetch(List<String> idList, IndexDatasourceDTO dto) {
        IndexDataSourceType indexDataSourceType =
                Enums.findValue(dto.getIncDatasourceType(), IndexDataSourceType.class);

        List<RPCResultData> result = null;
        switch (indexDataSourceType) {
            case JSON_RPC:
                RPCConfigProperties config = new RPCConfigProperties();
                config.setUrl(dto.getIncDatasourceRpc().getRpcUrl());
                config.setMethod(dto.getIncDatasourceRpc().getRpcMethod());
                config.setSuccessCode("0");
                config.setJsonrpc("2.0");

                result = RPC.executeForList(restTemplate, config, RPCResultData.class, new Object[]{idList});
                break;
            default:
                break;
        }

        return result;
    }

    /**
     * 增量抓取索引数据
     *
     * @param dto
     * @return
     */
    private List<RPCResultData> incDataFetch(long maxDataVersion, IndexDatasourceDTO dto, long page) {
        IndexDataSourceType indexDataSourceType =
                Enums.findValue(dto.getIncDatasourceType(), IndexDataSourceType.class);

        JsonObject param = new JsonObject();
        param.addProperty(IndexBuildConstants.INC_QUERY_PARAM_VERSION_FROM, maxDataVersion);
        param.addProperty(IndexBuildConstants.INC_QUERY_PARAM_VERSION_TO, System.currentTimeMillis());
        param.addProperty("pageSize", dto.getIncDatasourceRpc().getPageSize() <= 0 ? IndexBuildConstants.INC_DEFAULT_PAGE_SIZE
                : dto.getIncDatasourceRpc().getPageSize());
        param.addProperty("pageNo", page);

        PageDTO result = null;
        switch (indexDataSourceType) {
            case JSON_RPC:
                RPCConfigProperties config = new RPCConfigProperties();
                config.setUrl(dto.getIncDatasourceRpc().getRpcUrl());
                config.setMethod(dto.getIncDatasourceRpc().getRpcMethod());
                config.setSuccessCode("0");
                config.setJsonrpc("2.0");
                result = RPC.executeForPage(restTemplate, config, RPCResultData.class, param);
                break;
            default:
                break;
        }
        return Optional.ofNullable(result).map(PageDTO::getDatas).orElse(new ArrayList<>());
    }

    /**
     * 数据转换校验和过滤
     *
     * @param resultDatas
     * @param meta
     * @return
     */
    private List<IndexRowData> dataConvert(List<RPCResultData> resultDatas, IndexMeta meta) {
        List<IndexRowData> indexDatas = new ArrayList<IndexRowData>(resultDatas.size());
        for (RPCResultData resultData : resultDatas) {
            IndexRowData data = dataConvert(resultData, meta);
            if (data != null) {
                indexDatas.add(data);
            }
        }
        return indexDatas;
    }

    /**
     * 数据转换(针对删除的情况）
     *
     * @param idList
     * @param meta
     * @return
     */
    private List<IndexRowData> dataConvertForDelete(List<String> idList, IndexMeta meta) {
        if (CollectionUtils.isEmpty(idList)) {
            return new ArrayList<IndexRowData>(0);
        }
        List<IndexRowData> indexDatas = new ArrayList<IndexRowData>(idList.size());
        FieldMeta pk = meta.getPk();
        FieldMeta versionField = meta.getVersionField();

        for (String id : idList) {
            IndexRowData data = new IndexRowData(pk.getName(), OperateEnum.DELETE);
            data.put(pk.getName(), id);
            data.setVersionFiled(versionField.getName());
            //根据主键Id删除的话，数据版本号默认当前时间戳
            data.setVersion(System.currentTimeMillis());
            indexDatas.add(data);
        }

        return indexDatas;
    }

    private IndexRowData dataConvert(RPCResultData resultData, IndexMeta meta) {
        Map<String, FieldMeta> fieldMetaMap = meta.getFiledMetas();

        IndexRowData result = new IndexRowData(meta.getPk().getName());
        String versionFieldName = meta.getVersionField().getName();


        if (fieldMetaMap == null) {
            LOG.error("fieldMetaMal is null");
            return null;
        }
        // 数据 版本号 for es: https://elasticsearch.cn/book/elasticsearch_definitive_guide_2.x/optimistic-concurrency-control.html
        //根据version field name获取数据version
        JsonElement version = resultData.get(versionFieldName);

        if (version == null || version instanceof JsonNull || (version instanceof JsonPrimitive && "".equals(version.getAsString()))) {
            result.setVersion(System.currentTimeMillis());
        } else {
            result.setVersion(version.getAsLong());
        }
        for (String field : fieldMetaMap.keySet()) {
            try {
                FieldMeta fieldMeta = fieldMetaMap.get(field);
                JsonElement value = resultData.get(field);
                if (value == null || value instanceof JsonNull ||
                        (value instanceof JsonPrimitive && "".equals(value.getAsString()))) {
                    if (fieldMeta.isRequired()) {
                        LOG.error("cannot found available field, indexName:{}, field:{}", meta.getIndexName(), field);
                        return null;
                    }
                } else {
                    MetadataFieldType metadataFieldType = fieldMeta.getType();
                    switch (metadataFieldType) {
                        case INT:
                            if (fieldMeta.isMultiValued()) {
                                result.put(field,
                                        GSON.fromJson(value, new RPC.ListTypeDefinition<Integer>(Integer.class)));
                            } else {
                                result.put(field, Integer.parseInt(value.getAsString()));
                            }
                            break;
                        case LONG:
                            if (fieldMeta.isMultiValued()) {
                                result.put(field, GSON.fromJson(value, new RPC.ListTypeDefinition<Long>(Long.class)));
                            } else {
                                result.put(field, Long.parseLong(value.getAsString()));
                            }
                            break;
                        case FLOAT:
                            if (fieldMeta.isMultiValued()) {
                                result.put(field, GSON.fromJson(value, new RPC.ListTypeDefinition<Float>(Float.class)));
                            } else {
                                result.put(field, Float.parseFloat(value.getAsString()));
                            }
                            break;
                        case DOUBLE:
                            if (fieldMeta.isMultiValued()) {
                                result.put(field,
                                        GSON.fromJson(value, new RPC.ListTypeDefinition<Double>(Double.class)));
                            } else {
                                result.put(field, Double.parseDouble(value.getAsString()));
                            }
                            break;
                        case STRING:
                        case MY_IK:
                        case GEO:
                        case TEXT_LIKE:
                        default:
                            if (fieldMeta.isMultiValued()) {
                                result.put(field,
                                        GSON.fromJson(value, new RPC.ListTypeDefinition<String>(String.class)));
                            } else {
                                result.put(field, value.getAsString());
                            }
                            break;
                    }
                }
            } catch (Exception e) {
                LOG.error("parse field error, field:{}", field);
                LOG.error("error parse index row data", e);
                return null;
            }
        }
        return result;

    }

    /**
     * 验证是否中断任务
     *
     * @param id
     * @return
     */
    private boolean checkInterrupt(Long id) {
        return redisTemplate.opsForHash().hasKey(IndexBuildConstants.INTERRUPT, id + "");
    }

    /**
     * 删除打断标记
     */
    @ManagedOperation
    private void unInterrupt(Long id) {
        redisTemplate.opsForHash().delete(IndexBuildConstants.INTERRUPT, id + "");
    }

    /**
     * 构建基准索引后删除redis中的进度信息
     *
     * @param indexDatasourceList
     * @param progressId
     */
    @Override
    public void deleteProgressAfterIndexBuild(List<IndexDatasourceDTO> indexDatasourceList, Long progressId) {

        //根据progressId删除redis中的信息
        redisTemplate.delete(IndexBuildConstants.SOLR_PROGRESS_ID_PREFIX + progressId);

        //删除solr导入的进度信息
        if (indexDatasourceList != null && !indexDatasourceList.isEmpty()) {


            for (IndexDatasourceDTO dto : indexDatasourceList) {

                Integer threadNum = dto.getFullDatasourceRpc().getThreadNum();
                Integer queryOptimize = dto.getFullDatasourceRpc().getQueryOptimize();
                String toDeleteKeyPrefix = spliceIndexSolrProgressKeyPrefix(dto.getFullDatasourceRpc());

                Set<String> toDeleteKeys = new HashSet<>();
                if (threadNum >= IndexBuildConstants.MULTI_THREAD_SUPPORT_THRESHOLD
                        && QueryOptimizeEnum.ENABLE.code() == queryOptimize) {
                    toDeleteKeys.add(toDeleteKeyPrefix + IndexBuildConstants.TOTAL_KEY_SUFFIX);

                    for (int i = 0; i < threadNum; i++) {
                        toDeleteKeys.add(toDeleteKeyPrefix + i);
                    }

                } else {
                    toDeleteKeys.add(toDeleteKeyPrefix + IndexBuildConstants.DEFAULT_THREAD_SEQ);
                }
                redisTemplate.delete(toDeleteKeys);

            }

        }
    }

    /**
     * 检查是否暂停任务
     *
     * @param id
     * @return
     */
    private boolean checkPause(Long id) {
        return redisTemplate.opsForHash().hasKey(IndexBuildConstants.PAUSE, Long.toString(id));
    }

    /**
     * 删除暂停标记
     *
     * @param id
     */
    @ManagedOperation
    private Long delPause(Long id) {
        return redisTemplate.opsForHash().delete(IndexBuildConstants.PAUSE, Long.toString(id));
    }

    /**
     * 获取solr导入进度key前缀
     *
     * @return
     */
    public String spliceIndexSolrProgressKeyPrefix(RpcIndexDataSourceDTO rpcIndexDataSourceDTO) {
        return "IMPORT_PROGRESS_" + rpcIndexDataSourceDTO.getId() + "_" + rpcIndexDataSourceDTO.getPageSize() + "_" +
                rpcIndexDataSourceDTO.getStartPage() + "_" + rpcIndexDataSourceDTO.getThreadNum() +
                "_" + rpcIndexDataSourceDTO.getQueryOptimize() + "_";
    }
}
