package com.lost.octopus.es.processor.scheduler;

import com.google.common.collect.Lists;
import com.lost.octopus.es.helper.service.DocumentEsService;
import com.lost.octopus.es.processor.config.es.alias.EsAliasRemoveInfo;
import com.lost.octopus.es.processor.constant.enums.LimitSyncSchedulerKeyEnum;
import com.lost.octopus.es.processor.dao.mapper.mybatis.BaseAsyncMapper;
import com.lost.octopus.es.processor.entity.IdEntity;
import com.lost.octopus.es.processor.scheduler.entity.BurstResult;
import com.lost.octopus.es.processor.scheduler.entity.FullSyncInfo;
import com.lost.octopus.es.processor.scheduler.entity.SyncProgress;
import com.sunsharing.share.common.base.exception.ShareBusinessException;
import com.sunsharing.share.common.base.exception.ShareResponseCode;
import com.sunsharing.share.common.mapper.JsonMapper;
import lombok.extern.log4j.Log4j2;
import org.apache.commons.collections4.CollectionUtils;
import org.elasticsearch.common.StopWatch;
import org.springframework.data.elasticsearch.core.mapping.IndexCoordinates;
import org.springframework.data.elasticsearch.repository.ElasticsearchRepository;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.stream.Collectors;

/**
 * 增量/全量更新基础的服务工具
 * 条件：
 * <br>
 * 0.指定存储的实体
 * 1.需要实现 {@link BaseAsyncMapper} 方法获取数据ID范围和数据对象
 * 2.需要操作该对象的 {@link ElasticsearchRepository}esDao
 * 3. ID类型，需要可比较，可排序（数据库性能高）
 * @author zhangbh
 **/
@Log4j2
@Component
public class EsSyncManagement {

    @Resource
    private DocumentEsService documentEsService;

    @Resource
    private EsAsyncManagement esAsyncManagement;

    @Resource
    private RedisJobRecorder redisJobRecorder;

    @Resource
    private RedisIndexRebuildRecorder redisIndexRebuildRecorder;

    /**
     * 需要异步仓储的索引
     */
    private static List<String> ASYNC_SEARCH_SCHEDULER_NAME = Lists.newArrayList(
        LimitSyncSchedulerKeyEnum.SYNC_RESIDENT.name()
    );

    /**
     * +
     * 全量调度统一逻辑
     * 兼容特殊场景，全量后往索引再次写入其他数据的场景可用
     * @param useIndex 往指定旧索引插入数据，且无需创建索引等步骤
     */
    public <ID> void fullSync(EsSyncIndexOperator<?, ID, ?, ?> operator, IndexCoordinates useIndex) {
        StopWatch stopWatch = new StopWatch();
        // 数据存储
        redisJobRecorder.createProgress(operator.businessKey(), useIndex.getIndexName());
        stopWatch.start("数据存储");
        doWhileSync(operator, useIndex);
        stopWatch.stop();
        jobDoneLogAndRemove(operator, stopWatch);
    }

    private <ID> void jobDoneLogAndRemove(EsSyncIndexOperator<?, ID, ?, ?> operator, StopWatch stopWatch) {
        log.info("{} 全量同步完成 总数据量 {} 耗时 \n {}",
            operator.businessKey().getLabel(), redisJobRecorder.runningJob(operator.businessKey()).get().getDataCount(),
            stopWatch.prettyPrint()
        );
        redisJobRecorder.removeProgress(operator.businessKey());
    }

    /**
     * 全量调度统一逻辑
     * @param operator 同步索引操作对象
     * @param <ID>     同步的ID类型
     */
    public <ID> IndexCoordinates fullSync(EsSyncIndexOperator<?, ID, ?, ?> operator) {
        // redis中记录上次的临时索引名称作为任务名
        StopWatch stopWatch = new StopWatch();
        // 检查历史任务,若没有，则创建新任务
        IndexCoordinates timestampAlias = checkHisOrCreateJob(operator, stopWatch);
        // 数据存储
        stopWatch.start("数据存储");
        doWhileSync(operator, timestampAlias);
        stopWatch.stop();

        // 全量调度，或者从没跑过增量，重置索引效率高
        stopWatch.start("别名");

        // 修改别名
        changeAliasIfOldExists(operator, timestampAlias);

        stopWatch.stop();
        stopWatch.start("es otherSetting");
        // 其他配置
        otherEsSettings(operator);
        stopWatch.stop();

        jobDoneLogAndRemove(operator, stopWatch);
        return timestampAlias;
    }

    /**
     * 检查历史任务,若没有，则创建新任务
     */
    <ID> IndexCoordinates checkHisOrCreateJob(EsSyncIndexOperator<?, ID, ?, ?> operator, StopWatch stopWatch) {
        stopWatch.start("别名、历史任务获取");
        Optional<FullSyncInfo> syncInfo = redisJobRecorder.runningJob(operator.businessKey());
        // 【别名存储】 创建临时索引或者复用旧的索引
        String runningJob = syncInfo.map(FullSyncInfo::getJobName).orElse(null);
        IndexCoordinates timestampAlias = operator.getEsRepository().getTimestampAlias(runningJob);
        boolean noUnFinishJob = !syncInfo.isPresent();
        stopWatch.stop();

        stopWatch.start("初始化新索引");
        if (noUnFinishJob) {
            log.info("{} 没有历史任务，创建新任务,索引名称={} ", operator.businessKey().getLabel(), timestampAlias.getIndexName());
            // 【别名存储】 创建时间戳索引 ，如果索引本身就不存在，内部已经进行别名
            operator.getEsRepository().createWithMapping(timestampAlias);
            redisJobRecorder.createProgress(operator.businessKey(), timestampAlias.getIndexName());
        }
        stopWatch.stop();
        return timestampAlias;
    }

    private <ID> void doWhileSync(EsSyncIndexOperator<?, ID, ?, ?> operator, IndexCoordinates timestampAlias) {
        // 到这边一定有任务了
        FullSyncInfo info = redisJobRecorder.runningJob(operator.businessKey()).get();
        SyncProgress progress = new SyncProgress(info.getMaxId(), operator.businessKey().getLabel(), info.getDataCount());
        List<Integer> mainTabIds = operator.scrollMainTabIds(progress.getMaxId());
        while (CollectionUtils.isNotEmpty(mainTabIds)) {
            log.info("{} 调度 第{}页 {}条数据======================================",
                progress.getScheduleLabel(), progress.getCurrentPage(), mainTabIds.size());
            // 分块存储
            int dataCount = saveOrDelForEs(mainTabIds, operator, timestampAlias, true).size();
            // 记录信息
            progress.record(mainTabIds, dataCount);
            mainTabIds = operator.scrollMainTabIds(progress.getMaxId());
            if (progress.getCurrentPage() > 1000000) {
                throw new RuntimeException(progress.getScheduleLabel() + "循环次数异常，大于 1000000 次，请确认查询内容");
            }
        }
        progress.print();
    }

    private <ID> void changeAliasIfOldExists(EsSyncIndexOperator<?, ID, ?, ?> operator, IndexCoordinates timestampAlias) {
        boolean exists = operator.getEsRepository().indexExists();
        if (!exists) {
            return;
        }

        log.info("索引别名({})切换，开始加锁...", operator.businessKey().getLabel());
        redisIndexRebuildRecorder.indexRebuildLock(operator.entityClass());
        log.info("索引别名({})切换，加锁成功，开始切换别名", operator.businessKey().getLabel());
        // 删除别名，其实会把原有的索引加一个 xxx_del的别名
        EsAliasRemoveInfo origin = operator.getEsRepository().removeAlias();
        if (origin.isOrigin()) {
            // 如果是原始索引，必须等待删除完成才能增加别名
            log.info("是原始索引，必须等待删除完成才能增加别名={}", origin.getRealIndexName());
            operator.getEsRepository().deleteIndex();
        } else {
            // 删除原有索引
            log.info("异步删除索引={}", origin.getRealIndexName());
            operator.getEsRepository().deleteAsyncIndex(origin.getRealIndexName());
        }
        // 增加别名
        operator.getEsRepository().addByOriginalAlias(timestampAlias);
        log.info("索引别名({})切换，别名切换成功，释放锁...", operator.businessKey().getLabel());
        redisIndexRebuildRecorder.unLockIndex(operator.entityClass());
    }

    /**
     * Id分块存储
     * @param mainTabIds     所有待同步的ID
     * @param operator       操作对象
     * @param timestampAlias 别名，全量时非空
     * @param <ID>           ID类型
     * @param isFull         是否全量，全量无需检查删除
     * @return 有效id
     */
    public <ID> List<ID> saveOrDelForEs(List<Integer> mainTabIds, EsSyncIndexOperator<?, ID, ?, ?> operator,
                                        IndexCoordinates timestampAlias, boolean isFull) {
        if (CollectionUtils.isEmpty(mainTabIds)) {
            return new ArrayList<>();
        }
        List<ID> effectIds = new ArrayList<>();
        burstDeal(mainTabIds, (burstIds) -> {
            List<ID> searchIds;
            if (ASYNC_SEARCH_SCHEDULER_NAME.contains(operator.businessKey().name())) {
                searchIds = asyncSearchAndAddToRepository(burstIds, operator, timestampAlias);
            } else {
                searchIds = searchAndAddToRepository(burstIds, operator, timestampAlias);
            }
            effectIds.addAll(searchIds);
            // 更新redis进度信息
            redisJobRecorder.runningJob(operator.businessKey()).ifPresent(
                info -> redisJobRecorder.updateProgress(operator.businessKey(), burstIds.get(burstIds.size() - 1),
                    searchIds.size()));
            return searchIds.size();
        }, operator);
        int removeCount = isFull ? 0 : removeDataCheck(mainTabIds, operator, effectIds, timestampAlias);
        log.info("{} 数据存储任务，一共存储 {} 条数据 删除 {} 条数据============", operator.businessKey().getLabel(), effectIds.size(), removeCount);
        return effectIds;
    }

    private <ID> int removeDataCheck(List<Integer> mainTabIds, EsSyncIndexOperator<?, ID, ?, ?> operator, List<ID> effectIds,
                                     IndexCoordinates timestampAlias) {
        if (mainTabIds.size() == effectIds.size()) {
            return 0;
        }
        List<ID> allIds = new ArrayList<>();
        for (Integer id : mainTabIds) {
            ID typeId = JsonMapper.nonNullMapper().getMapper().convertValue(id, operator.idType());
            allIds.add(typeId);
        }
        allIds.removeAll(effectIds);
        // 转换类型
        operator.deleteAllById(allIds, timestampAlias);
        return allIds.size();
    }

    public <I> int burstDeal(List<I> ids, Function<List<I>, Integer> job, EsSyncIndexOperator<?, ?, ?, ?> operator) {
        if (CollectionUtils.isEmpty(ids)) {
            return 0;
        }
        // 分片数量
        int burstSize = 1000;
        List<List<I>> partition = Lists.partition(ids, burstSize);
        log.debug("{} 需要查询{}条数据,一共需要{}分片", operator.businessKey().getLabel(), ids.size(), partition.size());
        AtomicInteger num = new AtomicInteger();
        for (int i = 0; i < partition.size(); i++) {
            List<I> burstIds = partition.get(i);
            // 当前页
            int currentPage = i + 1;
            boolean isAsync = ASYNC_SEARCH_SCHEDULER_NAME.contains(operator.businessKey().name());
            log.debug("{} 正在同步第{}分片,还剩下{}分片 isAsync {}", operator.businessKey().getLabel(), currentPage, partition.size() - currentPage, isAsync);
            // 执行任务
            int burstCount = job.apply(burstIds);
            num.addAndGet(burstCount);
        }
        return num.get();
    }

    public <T extends IdEntity<String>> BurstResult<String> saveOrDelEsByEsId(List<String> ids, EsSyncIndexOperator<T, String, ?, ?> operator,
                                                                              IndexCoordinates timestampAlias) {
        if (CollectionUtils.isEmpty(ids)) {
            return new BurstResult<>(0, ids);
        }
        List<String> allIds = new ArrayList<>(ids);
        List<String> effectedIds = new ArrayList<>();
        int num = burstDeal(ids, (burstIds) -> {
            List<T> records = operator.getByEsIds(burstIds);
            for (T record : records) {
                allIds.remove(record.getId());
            }
            operator.saveAll(records, timestampAlias);
            // 加入所有id
            effectedIds.addAll(records.stream().map(IdEntity::getId).collect(Collectors.toList()));
            return burstIds.size();
        }, operator);
        // 删除多余的数据
        if (CollectionUtils.isNotEmpty(allIds)) {
            operator.deleteAllById(allIds, timestampAlias);
        }
        log.info("{} 数据存储任务，一共存储 {} 条数据 删除 {} 条无效数据", operator.businessKey().getLabel(), ids.size() - allIds.size(), allIds.size());
        return new BurstResult<>(num, effectedIds);
    }

    private <T extends IdEntity<ID>, ID> List<ID> searchAndAddToRepository(
        List<Integer> burstIds, EsSyncIndexOperator<T, ID, ?, ?> operator, IndexCoordinates aliasIndex) {
        log.debug("{} 调度，正在同步的数据id = {}", operator.businessKey().getLabel(), burstIds);
        // 待同步的数据对象
        // 查询存储实体，包括存储前的转义动作
        List<T> records = operator.findWillSyncInfo(burstIds);
        operator.saveAll(records, aliasIndex);
        return records.stream().map(IdEntity::getId).collect(Collectors.toList());
    }


    private <T extends IdEntity<ID>, ID> List<ID> asyncSearchAndAddToRepository(
        // todo 这块可以再优化一下
        List<Integer> burstIds, EsSyncIndexOperator<T, ID, ?, ?> operator, IndexCoordinates aliasIndex) {
        StopWatch stopWatch = new StopWatch();
        // 分片数量
        int childBurstSize = 100;
        List<List<Integer>> childPartition = Lists.partition(new ArrayList<>(burstIds), childBurstSize);
        log.debug("{} 调度，当前页数据,按{}条分块异步查询数据，共需分{}块", operator.businessKey().getLabel(), childBurstSize, childPartition.size());
        List<Future<List<T>>> futureList = new ArrayList<>();
        stopWatch.start("分块异步查询数据");
        for (List<Integer> childBurstIds : childPartition) {
            // 查询存储实体，包括存储前的转义动作
            futureList.add(esAsyncManagement.asyncFindWillSyncInfo(childBurstIds, operator));
        }
        List<T> records = new ArrayList<>();
        while (true) {
            if (futureList.isEmpty()) { // 当线程容器中没有任何线程的时候 说明线程已经全部结束
                stopWatch.stop();
                log.debug("{} 调度，当前页分块异步解密查询数据已结束，查询数据存储总耗时{}", operator.businessKey().getLabel(), stopWatch.prettyPrint());
                break;
            }
            Iterator<Future<List<T>>> futureIterator = futureList.iterator();
            while (futureIterator.hasNext()) {
                Future<List<T>> listFuture = futureIterator.next();
                if (listFuture.isDone()) {
                    try {
                        records.addAll(listFuture.get());
                    } catch (Exception e) {
                        log.error("asyncAddToRepository 失败", e);
                        throw new ShareBusinessException(ShareResponseCode.BIZ_ILLEGAL.getCode(), "同步数据到Es失败，asyncAddToRepository没有返回预期的结果");
                    }
                    futureIterator.remove();
                }
            }
        }
        operator.saveAll(records, aliasIndex);
        return records.stream().map(IdEntity::getId).collect(Collectors.toList());
    }


    /**
     * 设置其他es设置操作
     */
    protected void otherEsSettings(EsSyncIndexOperator<?, ?, ?, ?> operator) {
        // 设置 max_result_window
        documentEsService.setMaxResultWindow(operator.entityClass(), 10000000);
    }
}
