package com.zmn.oms.business.impl.perf;

import com.zmn.biz.abnormal.common.dio.AbnormalOperatorCountDIO;
import com.zmn.biz.abnormal.common.dro.AbnormalOperatorCountDRO;
import com.zmn.biz.abnormal.dubbo.interfaces.AbnormalListRemoteService;
import com.zmn.biz.complain.common.dro.complain.ChsComplainCountDRO;
import com.zmn.biz.complain.dubbo.interfaces.complain.ChsComplainApiListRemoteService;
import com.zmn.oms.business.interfaces.es.OrderWorkEsBService;
import org.apache.dubbo.config.annotation.Reference;
import com.google.common.collect.Lists;
import com.zmn.common.constant.GlobalConsts;
import com.zmn.common.constant.OrderGlobalConsts;
import com.zmn.common.dto2.ResponseDTO;
import com.zmn.oms.business.interfaces.perf.PerfFactBService;
import com.zmn.oms.common.constant.DimensionTypeEnum;
import com.zmn.oms.common.constant.MeasureTypeEnum;
import com.zmn.oms.common.constant.OrderLogConsts;
import com.zmn.oms.model.entity.log.OrderLog;
import com.zmn.oms.model.entity.perf.PerfDimensionDistribute;
import com.zmn.oms.model.entity.perf.PerfFact;
import com.zmn.oms.services.interfaces.perf.PerfDimensionDistributeService;
import com.zmn.oms.services.interfaces.perf.PerfFactService;
import com.zmn.base.plat.engine.common.constant.DubboConsts;
import lombok.Getter;
import org.apache.commons.collections4.CollectionUtils;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.springframework.data.elasticsearch.core.query.NativeSearchQuery;
import org.springframework.data.elasticsearch.core.query.NativeSearchQueryBuilder;
import org.springframework.data.mongodb.core.MongoTemplate;
import org.springframework.data.mongodb.core.aggregation.Aggregation;
import org.springframework.data.mongodb.core.aggregation.AggregationResults;
import org.springframework.data.mongodb.core.query.Criteria;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import java.time.LocalDate;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.IntStream;

import static com.zmn.oms.common.constant.MeasureTypeEnum.*;
import static com.zmn.oms.common.constant.OrderLogConsts.ORDER_LOG_TYPE_TRACK;

/**
 * PerfDistributeSyncBServiceImpl
 *
 * @author hankangshuai
 * @since 2019/11/20
 **/
@Service("perfDistributeSyncBService")
public class PerfDistributeFactBServiceImpl implements PerfFactBService<PerfDimensionDistribute> {
    private static final String RPC_ERROR_MSG = "远程接口调用失败";
    @Getter
    DimensionTypeEnum dimensionFor = DimensionTypeEnum.DISTRIBUTE;
    @Resource
    MongoTemplate mongoTemplate;
    @Resource
    OrderWorkEsBService orderWorkEsBService;
    @Resource
    PerfDimensionDistributeService perfDimensionDistributeService;
    @Resource
    PerfFactService perfFactService;

    @Reference(version = DubboConsts.INTERFACE_VERSION, check = false)
    AbnormalListRemoteService abnormalListRemoteService;
    @Reference(version = com.zmn.biz.complain.common.constant.DubboConsts.INTERFACE_VERSION, check = false)
    ChsComplainApiListRemoteService chsComplainApiListRemoteService;

    /**
     * 描述: 数据基础分组值大小 数组超出该值则 切割为5组进行入库操作
     **/
    private static final Integer BASE_CROUP_NUM = 2000;

    /**
     * 同步事实
     *
     * @param syncDate 指定同步的日期
     * @return 更新数量
     */
    @Override
    public Integer syncFact(List<PerfDimensionDistribute> distributes, LocalDate syncDate) {
        Map<Integer, Integer> dimensionIdMap = distributes.stream().collect(Collectors.toMap(PerfDimensionDistribute::getDistributorId, PerfDimensionDistribute::getDimensionId));
        return IntStream.of(
                syncConfirmCount(dimensionIdMap, syncDate),
                syncDistributCount(dimensionIdMap, syncDate),
                syncNormalCount(dimensionIdMap, syncDate, DISTRIBUTED_COUNT, OrderLogConsts.ORDER_LOG_TYPE_DISTRIBUTE),
                syncLastCount(dimensionIdMap, syncDate, REMARK_COUNT, OrderLogConsts.ORDER_LOG_TYPE_REMARK),
                syncNormalCount(dimensionIdMap, syncDate, REMARKED_COUNT, OrderLogConsts.ORDER_LOG_TYPE_REMARK),
                syncLastCount(dimensionIdMap, syncDate, TRACK_COUNT, ORDER_LOG_TYPE_TRACK),
                syncNormalCount(dimensionIdMap, syncDate, TRACKED_COUNT, ORDER_LOG_TYPE_TRACK),
                syncDistributeSuceesCount(dimensionIdMap, syncDate),
                syncComplainHandleCount(dimensionIdMap, syncDate),
                syncAbnormalHandleCount(dimensionIdMap, syncDate)
        ).sum();
    }

    /**
     * 同步异常处理量
     */
    private Integer syncAbnormalHandleCount(Map<Integer, Integer> dimensionIdMap, LocalDate syncDate) {
        Date date = Date.from(syncDate.atStartOfDay(ZoneId.systemDefault()).toInstant());
        AbnormalOperatorCountDIO abnormalOperatorCountDIO = AbnormalOperatorCountDIO.builder()
                .startTime(date).endTime(date).operatorIdList(Lists.newArrayList(dimensionIdMap.keySet())).build();
        com.zmn.common.dto2.ResponseDTO<List<AbnormalOperatorCountDRO>> abnorAbnormalLogCountDROResponseDTO = abnormalListRemoteService.countAbnormalProcessByQuery(abnormalOperatorCountDIO);
        List<AbnormalOperatorCountDRO> abnorAbnormalLogCountDROS = Objects.requireNonNull(Objects.requireNonNull(abnorAbnormalLogCountDROResponseDTO, RPC_ERROR_MSG).getData(), RPC_ERROR_MSG);
        int timeDimension = Integer.parseInt(syncDate.format(DateTimeFormatter.ofPattern("yyyyMMdd")));
        List<PerfFact> perfFacts = abnorAbnormalLogCountDROS.stream().map(dro -> {
            PerfFact perfFact = new PerfFact();
            Integer dimensionId = dimensionIdMap.get(dro.getOperatorId());
            perfFact.setDimensionType(DimensionTypeEnum.DISTRIBUTE);
            perfFact.setDimensionId(dimensionId);
            perfFact.setTimeDimension(timeDimension);
            perfFact.setMeasureType(ABNORMAL_HANDLING_COUNT);
            perfFact.setMeasureValue(Long.valueOf(dro.getCount()));
            return perfFact;
        }).collect(Collectors.toList());
        return CollectionUtils.isEmpty(perfFacts) ? 0 : perfFactService.insertOnDuplicateUpdateBatch(perfFacts);
    }

    /**
     * 同步投诉处理量
     */
    private Integer syncComplainHandleCount(Map<Integer, Integer> dimensionIdMap, LocalDate syncDate) {
        String strSyncDate = syncDate.format(DateTimeFormatter.ofPattern("yyyy-MM-dd"));
        ResponseDTO<List<ChsComplainCountDRO>> trackComplainLogCountDROResponseDTO = chsComplainApiListRemoteService.listCountHandleNumByQuery(Lists.newArrayList(dimensionIdMap.keySet()), strSyncDate, strSyncDate);
        List<ChsComplainCountDRO> logCountDROS = Objects.requireNonNull(Objects.requireNonNull(trackComplainLogCountDROResponseDTO, RPC_ERROR_MSG).getData(), RPC_ERROR_MSG);
        int timeDimension = Integer.parseInt(syncDate.format(DateTimeFormatter.ofPattern("yyyyMMdd")));
        List<PerfFact> perfFacts = logCountDROS.stream().map(dro -> {
            PerfFact perfFact = new PerfFact();
            perfFact.setDimensionType(DimensionTypeEnum.DISTRIBUTE);
            Integer dimensionId = dimensionIdMap.get(dro.getStaffId());
            perfFact.setDimensionId(dimensionId);
            perfFact.setTimeDimension(timeDimension);
            perfFact.setMeasureType(COMPLAINT_HANDLING_COUNT);
            perfFact.setMeasureValue(Long.valueOf(dro.getCountNum()));
            return perfFact;
        }).collect(Collectors.toList());
        return CollectionUtils.isEmpty(perfFacts) ? 0 : perfFactService.insertOnDuplicateUpdateBatch(perfFacts);
    }

    /**
     * 同步派单单量
     *
     * @param dimensionIdMap
     * @param syncDate
     * @return
     */
    private Integer syncDistributCount(Map<Integer, Integer> dimensionIdMap, LocalDate syncDate) {
        NativeSearchQueryBuilder nativeSearchQueryBuilder = new NativeSearchQueryBuilder();
        NativeSearchQuery build = nativeSearchQueryBuilder
                .withQuery(QueryBuilders.boolQuery()
                        .filter(QueryBuilders.termsQuery("distributerId", dimensionIdMap.keySet()))
                        .filter(QueryBuilders.rangeQuery("distributeTime").from(syncDate.toString()).to(syncDate.toString()))
                ).addAggregation(
                        AggregationBuilders.terms("distributerId").field("distributerId").size(10000)
                ).build();
        int timeDimension = Integer.parseInt(syncDate.format(DateTimeFormatter.ofPattern("yyyyMMdd")));
        Aggregations aggregations = orderWorkEsBService.getAggregationsByQuery(build);
        ParsedLongTerms distributerIdTerms = aggregations.get("distributerId");
        List<? extends Terms.Bucket> buckets = distributerIdTerms.getBuckets();
        List<PerfFact> perfFacts =  buckets.stream().map(bucket -> {
                int distributerId = bucket.getKeyAsNumber().intValue();
                Integer dimensionId = dimensionIdMap.get(distributerId);
                PerfFact perfFact = new PerfFact();
                perfFact.setDimensionType(DimensionTypeEnum.DISTRIBUTE);
                perfFact.setDimensionId(dimensionId);
                perfFact.setTimeDimension(timeDimension);
                perfFact.setMeasureType(DISTRIBUTE_COUNT);
                perfFact.setMeasureValue(bucket.getDocCount());
                return perfFact;
            }).collect(Collectors.toList());
        return perfFactService.insertOnDuplicateUpdateBatch(perfFacts);
    }

    /**
     * 同步派单成功单量
     *
     * @param dimensionIdMap
     * @param syncDate
     * @return
     */
    private Integer syncDistributeSuceesCount(Map<Integer, Integer> dimensionIdMap, LocalDate syncDate) {
        NativeSearchQueryBuilder nativeSearchQueryBuilder = new NativeSearchQueryBuilder();
        NativeSearchQuery build = nativeSearchQueryBuilder
                .withQuery(QueryBuilders.boolQuery()
                        .filter(QueryBuilders.termsQuery("distributerId", dimensionIdMap.keySet()))
                        .filter(QueryBuilders.termQuery("resultStatus", OrderGlobalConsts.WORK_RESULT_SUCCESS))
                        .filter(QueryBuilders.rangeQuery("distributeTime").from(syncDate.toString()).to(syncDate.toString()))
                ).addAggregation(
                        AggregationBuilders.terms("distributerId").field("distributerId").size(10000)
                ).build();
        int timeDimension = Integer.parseInt(syncDate.format(DateTimeFormatter.ofPattern("yyyyMMdd")));
        Aggregations aggregations = orderWorkEsBService.getAggregationsByQuery(build);
        ParsedLongTerms distributerIdTerms = aggregations.get("distributerId");
        List<? extends Terms.Bucket> buckets = distributerIdTerms.getBuckets();
        List<PerfFact> perfFacts = buckets.stream().map(bucket -> {
                int distributerId = bucket.getKeyAsNumber().intValue();
                Integer dimensionId = dimensionIdMap.get(distributerId);
                PerfFact perfFact = new PerfFact();
                perfFact.setDimensionType(DimensionTypeEnum.DISTRIBUTE);
                perfFact.setDimensionId(dimensionId);
                perfFact.setTimeDimension(timeDimension);
                perfFact.setMeasureType(DISTRIBUTE_SUCCESS_COUNT);
                perfFact.setMeasureValue(bucket.getDocCount());
                return perfFact;
            }).collect(Collectors.toList());
        return perfFactService.insertOnDuplicateUpdateBatch(perfFacts);
    }

    private Integer syncNormalCount(Map<Integer, Integer> dimensionIdMap, LocalDate syncDate, MeasureTypeEnum measureTypeEnum, int type) {
        Aggregation agg = Aggregation.newAggregation(
                Aggregation.project("type", "workId", "orderId", "operator", "createTime", "operatorId",
                        "operatorType"),
                Aggregation.match(
                        Criteria.where("operatorType").is(GlobalConsts.OPERATE_USER_TYPE_STAFF)
                                .and("createTime").gte(new Date(syncDate.atTime(LocalTime.MIN).toInstant(ZoneOffset.ofHours(8)).toEpochMilli()))
                                .lte(new Date(syncDate.atTime(LocalTime.MAX).toInstant(ZoneOffset.ofHours(8)).toEpochMilli()))
                                .and("type").is(type)
                                .and("operatorId").in(dimensionIdMap.keySet())
                ),
                Aggregation.group("operatorId").first("operatorId").as("factId")
                        .count().as("measureValue"),
                Aggregation.project("factId", "measureValue")
        );
        AggregationResults<PerfFact> aggregate = mongoTemplate.aggregate(agg, OrderLog.class, PerfFact.class);
        List<PerfFact> mappedResults = aggregate.getMappedResults();
        int timeDimension = Integer.parseInt(syncDate.format(DateTimeFormatter.ofPattern("yyyyMMdd")));
        for (PerfFact perfFact : mappedResults) {
            Integer factId = perfFact.getFactId();
            Integer dimensionId = dimensionIdMap.get(factId);
            perfFact.setFactId(null);
            perfFact.setDimensionType(DimensionTypeEnum.DISTRIBUTE);
            perfFact.setDimensionId(dimensionId);
            perfFact.setTimeDimension(timeDimension);
            perfFact.setMeasureType(measureTypeEnum);
        }
        return perfFactService.insertOnDuplicateUpdateBatch(mappedResults);
    }

    /**
     * 获取最后一次计数
     *
     * @param syncDate
     * @param measureTypeEnum
     * @param type
     * @return
     */
    private Integer syncLastCount(Map<Integer, Integer> dimensionIdMap, LocalDate syncDate, MeasureTypeEnum measureTypeEnum, int type) {
        Aggregation agg = Aggregation.newAggregation(
                Aggregation.project("type", "workId", "orderId", "operator", "createTime", "operatorId",
                        "operatorType"),
                Aggregation.match(
                        Criteria.where("operatorType").is(GlobalConsts.OPERATE_USER_TYPE_STAFF)
                                .and("createTime").gte(new Date(syncDate.atTime(LocalTime.MIN).toInstant(ZoneOffset.ofHours(8)).toEpochMilli()))
                                .lte(new Date(syncDate.atTime(LocalTime.MAX).toInstant(ZoneOffset.ofHours(8)).toEpochMilli()))
                                .and("type").is(type)
                                .and("operatorId").in(dimensionIdMap.keySet())
                ),
                Aggregation.group("orderId").last("operatorId").as("factId"),
                Aggregation.project("factId"),
                Aggregation.group("factId").count().as("measureValue").first("factId").as("factId"),
                Aggregation.project("measureValue", "factId")
        );
        AggregationResults<PerfFact> aggregate = mongoTemplate.aggregate(agg, OrderLog.class, PerfFact.class);
        List<PerfFact> mappedResults = aggregate.getMappedResults();
        int timeDimension = Integer.parseInt(syncDate.format(DateTimeFormatter.ofPattern("yyyyMMdd")));
        for (PerfFact perfFact : mappedResults) {
            Integer factId = perfFact.getFactId();
            Integer dimensionId = dimensionIdMap.get(factId);
            perfFact.setFactId(null);
            perfFact.setDimensionType(DimensionTypeEnum.DISTRIBUTE);
            perfFact.setDimensionId(dimensionId);
            perfFact.setTimeDimension(timeDimension);
            perfFact.setMeasureType(measureTypeEnum);
        }
        return perfFactService.insertOnDuplicateUpdateBatch(mappedResults);
    }

    /**
     * 同步确认单量
     *
     * @param syncDate
     * @return
     */
    private Integer syncConfirmCount(Map<Integer, Integer> dimensionIdMap, LocalDate syncDate) {
        NativeSearchQueryBuilder nativeSearchQueryBuilder = new NativeSearchQueryBuilder();
        NativeSearchQuery build = nativeSearchQueryBuilder
                .withQuery(QueryBuilders.boolQuery()
                        .filter(QueryBuilders.termsQuery("confirmerId", dimensionIdMap.keySet()))
                        .filter(QueryBuilders.rangeQuery("confirmTime").from(syncDate.toString()).to(syncDate.toString()))
                ).addAggregation(
                        AggregationBuilders.terms("confirmerId").field("confirmerId").size(10000)
                ).build();
        int timeDimension = Integer.parseInt(syncDate.format(DateTimeFormatter.ofPattern("yyyyMMdd")));
        Aggregations aggregations = orderWorkEsBService.getAggregationsByQuery(build);
        ParsedLongTerms confirmerIdTerms = aggregations.get("confirmerId");
        List<? extends Terms.Bucket> buckets = confirmerIdTerms.getBuckets();
        List<PerfFact> perfFacts =  buckets.stream().map(bucket -> {
                int confirmerId = bucket.getKeyAsNumber().intValue();
                Integer dimensionId = dimensionIdMap.get(confirmerId);
                PerfFact perfFact = new PerfFact();
                perfFact.setDimensionType(DimensionTypeEnum.DISTRIBUTE);
                perfFact.setDimensionId(dimensionId);
                perfFact.setTimeDimension(timeDimension);
                perfFact.setMeasureType(CONFIRM_COUNT);
                perfFact.setMeasureValue(bucket.getDocCount());
                return perfFact;
            }).collect(Collectors.toList());
        return perfFactService.insertOnDuplicateUpdateBatch(perfFacts);
    }

}
