package com.example.yckjbigdataflink.job;


import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.AsyncDataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.functions.async.ResultFuture;


import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;

import java.util.Collections;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

public class FlinkAsyncHBaseQueryJob {

    public static void main(String[] args) throws Exception {
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 假设输入是主键流（如RowKey流），这里演示简单的主键流
        DataStreamSource<String> rowKeyStream = env.fromElements("rowKey1", "rowKey2", "rowKey3");

        // 异步查询HBase
        SingleOutputStreamOperator<String> resultStream = AsyncDataStream.unorderedWait(
                rowKeyStream,
                new AsyncHBaseRequestFunction(),
                10000, // 超时时间，单位毫秒
                java.util.concurrent.TimeUnit.MILLISECONDS,
                100 // 异步请求最大并发数，视资源调节
        );

        resultStream.print();

        env.execute("Flink Async HBase Query Job");
    }

    // 异步HBase查询函数
    public static class AsyncHBaseRequestFunction extends org.apache.flink.streaming.api.functions.async.RichAsyncFunction<String, String> {

        private transient AsyncConnection connection;
        private transient AsyncTable<AdvancedScanResultConsumer> asyncTable;
        private transient ExecutorService executorService;

        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);

            org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create();
            conf.set("hbase.zookeeper.quorum", "zk1,zk2,zk3");
            conf.set("hbase.zookeeper.property.clientPort", "2181");
            conf.set("hbase.security.authentication", "kerberos");
            // 加载Kerberos相关配置，若需手动登录，请在程序入口登录

            // 初始化异步连接
            connection = ConnectionFactory.createAsyncConnection(conf).get();

            asyncTable = connection.getTable(TableName.valueOf("your_table_name"));

            executorService = Executors.newFixedThreadPool(20);  // 线程池大小根据资源调节
        }

        @Override
        public void close() throws Exception {
            if (connection != null) {
                connection.close();
            }
            if (executorService != null) {
                executorService.shutdown();
            }
            super.close();
        }

        @Override
        public void asyncInvoke(String rowKey, ResultFuture<String> resultFuture) throws Exception {
            // 异步Get请求
            Get get = new Get(Bytes.toBytes(rowKey));
            get.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("qualifier"));

            CompletableFuture<Result> futureResult = asyncTable.get(get);

            futureResult.whenCompleteAsync((result, throwable) -> {
                if (throwable != null) {
                    // 失败时返回空或异常日志
                    resultFuture.complete(Collections.singleton("ERROR:" + throwable.getMessage()));
                } else {
                    if (result.isEmpty()) {
                        resultFuture.complete(Collections.singleton("EMPTY"));
                    } else {
                        String val = Bytes.toString(result.getValue(Bytes.toBytes("cf"), Bytes.toBytes("qualifier")));
                        resultFuture.complete(Collections.singleton(rowKey + ":" + val));
                    }
                }
            }, executorService);
        }
    }
}

