package com.chengyanan.elasticsearch;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.metrics.max.ParsedMax;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.util.HashMap;


public class ElasticSearchUtils {

    private static Logger logger;

    static {
        logger = LogManager.getFormatterLogger();
    }


    /**
     * 此方法在程序启动时被调用一次。
     * 数据保存到ES时已经将该数据来自topic的哪个分区写入数据中，此方法根据kafka分区号查询已写入ES中各分区当前offset的最大值，
     * 并按照（分区号，ES中该分区最大的offset）形式返回一个HashMap，下次重启程序时从各分区最大offset+1处开始消费。
     * 注意：第一次启动程序时，ES中此Index中没有相应的offset则返回（分区号，-9223372036854775808），即long类型的最小值
     * @param index ES中对应的索引
     * @param numOfPartition Kafka中topic的分区数量
     * @return
     */
    public static HashMap<Integer,Long> QueryLargestOffset(String index, int numOfPartition){
        RestHighLevelClient client = null;
        try {
            HashMap<Integer,Long> partitionAndLargestOffset= new HashMap();
            client = ElasticSearchPoolUtil.getClient();
            //从0~numOfPartition-1（即分区号）循环查询ES中当前最大的offset
            for(int i=0;i<numOfPartition;i++){
                SearchRequest searchRequest = new SearchRequest(index);
                SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
                //kafkaPartition和下面的consumer_offset是ES中的字段名
                searchSourceBuilder.query(QueryBuilders.matchPhraseQuery("kafkaPartition",i));
                //maxOffset是给这个聚合取得别名，可以随意指定
                searchSourceBuilder.aggregation(AggregationBuilders.max("maxOffset").field("consumer_offset"));
                searchRequest.source(searchSourceBuilder);
                SearchResponse search = client.search(searchRequest);
                ParsedMax maxOffset = search.getAggregations().get("maxOffset");
                //这里保存为long类型，是为了在main方法中解析方便，否则会默认保存为Double类型。
                partitionAndLargestOffset.put(i,(long) maxOffset.getValue());
            }
            return partitionAndLargestOffset;
        }catch (Exception e){
            e.printStackTrace();
        }finally {
            ElasticSearchPoolUtil.returnClient(client);
        }
        return null;
    }

    /**
     * 测试用
     * @param args
     */
    public static void main(String[] args) {
        HashMap es_analytics_data = QueryLargestOffset("es_analytics_data", 3);
        System.out.println(es_analytics_data);
    }

    /**
     * 将数据插入ElasticSearch中
     * @param jsonString
     * @param index
     */
    public static void indexForJson(String jsonString,String index){

        try {
            IndexRequest indexRequest = new IndexRequest(index,"doc");
            indexRequest.source(jsonString, XContentType.JSON);
            RestHighLevelClient client = ElasticSearchPoolUtil.getClient();
            client.index(indexRequest);
            ElasticSearchPoolUtil.returnClient(client);
            logger.info("ES中写入数据: "+ jsonString);
        }catch (Exception e){
            e.printStackTrace();
        }
    }

}
