package com.dongzili.demo.common.utils;

import cn.hutool.json.JSONUtil;
import com.dongzili.demo.common.config.es.EsUtils;
import com.dongzili.demo.customer.model.Relation;
import com.dongzili.demo.customer.model.ResourceModel;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sun.istack.internal.NotNull;
import lombok.Data;
import lombok.val;
import org.dromara.easyes.core.conditions.select.LambdaEsQueryWrapper;
import org.dromara.easyes.core.kernel.BaseEsMapper;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.search.aggregations.*;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.springframework.util.StringUtils;

import java.io.IOException;
import java.time.LocalDate;
import java.util.*;
import java.util.stream.Collectors;

public class AggregationsParser {


    public static void main(String[] args) {

        LambdaEsQueryWrapper<Relation> wrapper = new LambdaEsQueryWrapper<>();
        wrapper.eq(Relation::getFlag, false);
        wrapper.size(0);

        AggregationBuilder libraryAgg = AggregationBuilders
                .terms("libraryName").size(100)
                .field("libraryName.keyword").order(BucketOrder.count(false));
        String fieldName = "";
        ManageBrowseLogQuery query = new ManageBrowseLogQuery();
        query.setDimension(DimensionEnum.year);
        AggregationBuilder aggregation ;
        if(DimensionEnum.year.equals(query.getDimension())){
            fieldName = "orgID,createTime";
            aggregation = getYearBuilder(fieldName,"createTime", libraryAgg);
        }else if (DimensionEnum.month.equals(query.getDimension())){
            fieldName = "orgID,createTimeYYMM.keyword";
//            aggregation = getMonthBuilder(fieldName, libraryAgg);
            aggregation = getGeneralBuilder(fieldName, libraryAgg);
        }else if (DimensionEnum.department.equals(query.getDimension())){
            fieldName = "orgID,department.keyword";
            aggregation = getGeneralBuilder(fieldName, libraryAgg);
        }else if (DimensionEnum.subject.equals(query.getDimension())){
            fieldName = "orgID,department.keyword,subjectName.keyword";
            aggregation = getGeneralBuilder(fieldName, libraryAgg);
        }else if (DimensionEnum.clazz.equals(query.getDimension())){
            fieldName = "orgID,department.keyword,clazz.keyword";
            aggregation = getGeneralBuilder(fieldName, libraryAgg);
        }else if (DimensionEnum.user.equals(query.getDimension())){
            fieldName = "orgID,userName.keyword,realName.keyword";
            aggregation = getGeneralBuilder(fieldName, libraryAgg);
        }else if (DimensionEnum.resource.equals(query.getDimension())){
            fieldName = "orgID,resCode.keyword,resName.keyword";
            aggregation = getGeneralBuilder(fieldName, libraryAgg);
        }else
        {
            throw new RuntimeException("不支持的查询维度");
        }
        List<String> fieldNames = new ArrayList<>(Arrays.asList(fieldName.split(","))).stream().map(f -> f.replace(".keyword","")).collect(Collectors.toList());
        fieldNames.add("libraryName");
        BaseEsMapper<Relation> mapper = EsUtils.getMapper(Relation.class);
        SearchSourceBuilder sourceBuilder = mapper.getSearchSourceBuilder(wrapper);
        sourceBuilder.aggregation(aggregation);
        wrapper.setSearchSourceBuilder(sourceBuilder);

        SearchResponse searchResponse = mapper.search(wrapper);
        List<Map<String, Object>> flatList = AggregationsParser.parseAggregations(searchResponse.getAggregations(), fieldNames);


    }

    public static AggregationBuilder getYearBuilder(String field, String yearField, AggregationBuilder child){
        if (field.contains(",")) {
            List<String> fields = Arrays.asList(field.split(","));
            fields = fields.stream().filter(f ->!f.equals(yearField)).collect(Collectors.toList());
            child = AggregationBuilders
                    .dateHistogram(yearField)
                    .field(yearField)
                    .calendarInterval(DateHistogramInterval.YEAR)
                    .format("yyyy")
                    .subAggregation(child);
            return buildAggregation(fields, 0, child);
        } else {
            // 单一字段的情况直接构建聚合
            child = AggregationBuilders
                    .dateHistogram(yearField)
                    .field(yearField)
                    .calendarInterval(DateHistogramInterval.YEAR)
                    .format("yyyy")
                    .subAggregation(child);
            return AggregationBuilders.terms(field).size(10000).field(field).subAggregation(child);
        }
    }
    public static AggregationBuilder getGeneralBuilder(String field, AggregationBuilder child) {
        // 如果字段包含逗号，则拆分为多个字段进行多级聚合
        if (field.contains(",")) {
            List<String> fields = Arrays.asList(field.split(","));
            return buildAggregation(fields, 0, child);
        } else {
            // 单一字段的情况直接构建聚合
            return AggregationBuilders.terms(field).size(10000).field(field).subAggregation(child);
        }
    }

    private static AggregationBuilder buildAggregation(List<String> fields, int index, AggregationBuilder child) {
        // 检查是否还有剩余字段需要处理
        if (index < fields.size()) {
            String currentField = fields.get(index).trim();
            // 构建当前层级的聚合，并递归地为下一层级构建子聚合
            return AggregationBuilders.terms(currentField).size(10000)
                    .field(currentField)
                    .subAggregation(buildAggregation(fields, index + 1, child));
        } else {
            // 如果没有更多字段，则返回最内层的子聚合
            return child;
        }
    }


    private static List<Map<String, Object>> parseAggregations(Aggregations aggregations, List<String> fieldNames) {
        Map<String, Object> aggregationMap = AggregationsParser.parseAggregations(aggregations);
        List<Map<String, Object>> flatList = new ArrayList<>();
        ObjectMapper mapper = new ObjectMapper();
        try{
            String resultStr = JSONUtil.toJsonStr(aggregationMap);
            System.out.println( resultStr);
            JsonNode rootNode = mapper.readTree(resultStr);
            AggregationsParser.flatten(rootNode, fieldNames, flatList, new HashMap<>());
        }catch (IOException e){
            throw new RuntimeException(e);
        }
        return flatList;
    }

    private static Map<String, Object> parseAggregations(Aggregations aggregations) {
        Map<String, Object> result = new HashMap<>();

        if (aggregations == null) {
            return result;
        }

        for (Aggregation agg : aggregations) {
            if (agg instanceof Terms) {
                Terms terms = (Terms) agg;
                List<Map<String, Object>> bucketsList = new ArrayList<>();
                for (Terms.Bucket bucket : terms.getBuckets()) {
                    Map<String, Object> bucketMap = new HashMap<>();
                    // 如果有子聚合，则递归调用parseAggregations
                    if (!bucket.getAggregations().asList().isEmpty()) {
                        bucketMap.putAll(parseAggregations(bucket.getAggregations()));
                    }
                    bucketMap.put("key", StringUtils.hasText(bucket.getKeyAsString())?bucket.getKeyAsString():bucket.getKey());
                    bucketMap.put("doc_count", bucket.getDocCount());
                    bucketsList.add(bucketMap);
                }
                result.put(agg.getName().replace(".keyword", ""), bucketsList);
            } else if(agg instanceof ParsedDateHistogram){
                ParsedDateHistogram createTime = (ParsedDateHistogram) agg;
                List<Map<String, Object>> bucketsList = new ArrayList<>();
                List<? extends Histogram.Bucket> buckets = createTime.getBuckets();
                for (Histogram.Bucket bucket : buckets) {
                    Map<String, Object> bucketMap = new HashMap<>();
                    // 如果有子聚合，则递归调用parseAggregations
                    if (!bucket.getAggregations().asList().isEmpty()) {
                        bucketMap.putAll(parseAggregations(bucket.getAggregations()));
                    }
                    bucketMap.put("key", StringUtils.hasText(bucket.getKeyAsString())?bucket.getKeyAsString():bucket.getKey());
                    bucketMap.put("doc_count", bucket.getDocCount());
                    bucketsList.add(bucketMap);
                }
                result.put(agg.getName().replace(".keyword", ""), bucketsList);
            }
        }
        return result;
    }



    private static void flatten(JsonNode node, List<String> path, List<Map<String, Object>> result, Map<String, Object> currentRecord) {
        if (node.isArray()) {
            for (JsonNode element : node) {
                if (element.isObject()) {
                    processElement(element, path, result, currentRecord);
                }
            }
        } else if (node.isObject()) {
            processElement(node, path, result, currentRecord);
        }
    }

    private static void processElement(JsonNode element, List<String> path, List<Map<String, Object>> result, Map<String, Object> currentRecord) {
        // 如果路径为空，则直接返回
        if (path.isEmpty()) return;

        // 获取当前层级的名称
        String currentLevel = path.get(0);
        // 遍历当前节点的所有字段
        Iterator<Map.Entry<String, JsonNode>> fields = element.fields();

        while (fields.hasNext()) {
            Map.Entry<String, JsonNode> field = fields.next();
            // 如果字段名与当前层级匹配
            if (field.getKey().equals(currentLevel)) {
                JsonNode nextNode = field.getValue();
                // 如果下一个节点是数组或对象，则继续处理
                if (nextNode.isArray() || nextNode.isObject()) {
                    // 为每次迭代创建当前记录的新副本
                    Map<String, Object> recordCopy = new HashMap<>(currentRecord);

                    // 如果节点是一个对象，则提取 'key' 值并放入记录副本中
                    if (field.getValue().isObject()) {
                        recordCopy.put(currentLevel, field.getValue().get("key").asText());
                    }

                    // 如果节点是一个数组，则遍历其元素，并为每个元素创建独立的记录
                    if (field.getValue().isArray()) {
                        if (field.getKey().equals(path.get(path.size() - 1))){
                            addLibraryData(field.getValue(), recordCopy, result);
                        }else{
                            for (JsonNode item : field.getValue()) {
                                // 对于数组中的每个元素，创建当前记录的另一个副本
                                Map<String, Object> itemRecordCopy = new HashMap<>(recordCopy);
                                itemRecordCopy.put(currentLevel, item.get("key").asText());
                                // 如果还有更多层级，则递归展平；如果这是最后一层，则添加图书馆数据
                                if (path.size() > 1) {
                                    flatten(item, path.subList(1, path.size()), result, itemRecordCopy);
                                } else {
                                    addLibraryData(item, itemRecordCopy, result);
                                }
                            }
                        }

                    } else {
                        // 如果还有更多层级，则继续展平
                        if (path.size() > 1) {
                            flatten(nextNode, path.subList(1, path.size()), result, recordCopy);
                        } else {
                            addLibraryData(nextNode, recordCopy, result);
                        }
                    }
                }
            }
        }
    }

    private static void addLibraryData(JsonNode libraryNodes, Map<String, Object> record, List<Map<String, Object>> result) {
        Map<String, Integer> libraryCounts = new HashMap<>();

        if (libraryNodes != null && libraryNodes.isArray()){
            for (JsonNode library : libraryNodes) {
                if (library==null || library.get("key")==null){
                    continue;
                }
                String key = library.get("key").asText();
                int docCount = library.get("doc_count").asInt();
                libraryCounts.put(key, docCount);
            }
        }else if (libraryNodes != null && libraryNodes.isObject()){
            String key = libraryNodes.get("key").asText();
            int docCount = libraryNodes.get("doc_count").asInt();
            libraryCounts.put(key, docCount);
        }

        record.putAll(libraryCounts);
        result.add(record);
    }

    /**
     * 查询维度
     */
    private enum DimensionEnum{
        year,
        month,
        department,
        subject,
        clazz,
        user,
        resource
        ;
        public static DimensionEnum getEnum(String name){
            for (DimensionEnum value : values()) {
                if(value.name().equals(name)){
                    return value;
                }
            }
            return null;
        }
    }

    /**
     * 浏览统计查询VO
     */
    @Data
    public static class ManageBrowseLogQuery {
//        @NotNull( = "查询维度不能为空")
        private DimensionEnum dimension;
        private LocalDate startDate;
        private LocalDate endDate;
        private String termName;
        private String subjectName;
        private String format;
        private Boolean isPC;
        private Integer orgId;
        private String clazz;
        private String department;
    }
}
