package com.aurora.advanced;

import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.xcontent.XContentType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * 描述：自定义elasticsearch sink 算子函数
 * ElasticsearchSinkFunction 是用于将数据流写入 Elasticsearch 的接口。
 * 它允许您自定义如何将 Flink 流式处理的数据写入 Elasticsearch 索引
 *
 * @author 浅夏的猫
 * @version 1.0.0
 * @date 2024-02-12 23:49:22
 */
public class CustomElasticsearchSinkFunction implements ElasticsearchSinkFunction<JSONObject> {

    private static final Logger logger = LoggerFactory.getLogger(CustomElasticsearchSinkFunction.class);

    private ElasticsearchEntity elasticsearchEntity;

    public CustomElasticsearchSinkFunction(ElasticsearchEntity elasticsearchEntity) {
        this.elasticsearchEntity = elasticsearchEntity;
    }

    /**
     * 描述：用于将数据发送到 Elasticsearch方法
     *
     * @param element 表示要处理的输入元素。这通常是您从 Flink 流中接收到的数据，其类型由您在实现 ElasticsearchSinkFunction 时指定的泛型类型 T 决定。
     * @param runtimeContext 提供了有关运行时上下文的信息，例如任务的名称、并行度等。您可以使用此上下文对象来访问 Flink 运行时的状态
     * @param indexer 用于将准备好的 Elasticsearch 请求发送到 Elasticsearch 服务器。您可以使用此对象将准备好的请求发送到 Elasticsearch。
     */
    @Override
    public void process(JSONObject element, RuntimeContext runtimeContext, RequestIndexer indexer) {
        String transId = element.getString("transId");
        String tradeTime = element.getString("tradeTime");
        String index = elasticsearchEntity.getIndexPrefix() + tradeTime;
        logger.info("交易流水={},数据写入索引{}成功", tradeTime, index);
        IndexRequest indexRequest = Requests.indexRequest().index(index).type(elasticsearchEntity.getType()).id(transId).source(element, XContentType.JSON);
        indexer.add(indexRequest);
    }
}
