package com.flink.hbase.multithreaded;

import com.flink.hbase.User;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;

/**
 * 多线程 HBase 读写作业启动器
 * 演示如何使用多线程 HBase Source 和 Sink
 */
public class MultithreadedJobLauncher {
    private static final Logger LOG = LoggerFactory.getLogger(MultithreadedJobLauncher.class);
    
    public static void main(String[] args) throws Exception {
        // 解析参数
        ParameterTool params = ParameterTool.fromArgs(args);
        
        // 验证必要参数
        validateParameters(params);
        
        // 创建执行环境
        StreamExecutionEnvironment env = createExecutionEnvironment(params);
        
        // 配置 HBase 连接
        Map<String, String> hbaseConfig = createHBaseConfig(params);
        
        // 创建数据流
        DataStream<User> userStream = createUserStream(env, params, hbaseConfig);
        
        // 处理数据
        DataStream<User> processedStream = processUserStream(userStream);
        
        // 写入到目标表
        writeToHBase(processedStream, params, hbaseConfig);
        
        // 执行作业
        env.execute("Multithreaded HBase Read-Write Job");
    }
    
    /**
     * 验证必要参数
     */
    private static void validateParameters(ParameterTool params) {
        String[] requiredParams = {
            "source.table", "source.column.family",
            "sink.table", "sink.column.family",
            "hbase.zookeeper.quorum"
        };
        
        for (String param : requiredParams) {
            if (!params.has(param)) {
                throw new IllegalArgumentException("Missing required parameter: " + param);
            }
        }
        
        LOG.info("Parameter validation completed successfully");
    }
    
    /**
     * 创建执行环境
     */
    private static StreamExecutionEnvironment createExecutionEnvironment(ParameterTool params) {
        Configuration config = new Configuration();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(config);
        
        // 设置全局参数
        env.getConfig().setGlobalJobParameters(params);
        
        // 设置并行度
        int parallelism = params.getInt("parallelism", 4);
        env.setParallelism(parallelism);
        
        // 设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(
            params.getInt("restart.attempts", 3),
            org.apache.flink.api.common.time.Time.seconds(params.getInt("restart.delay", 10))
        ));
        
        // 启用检查点
        if (params.getBoolean("checkpoint.enabled", true)) {
            long checkpointInterval = params.getLong("checkpoint.interval", 60000);
            env.enableCheckpointing(checkpointInterval, CheckpointingMode.EXACTLY_ONCE);
            env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000);
            env.getCheckpointConfig().setCheckpointTimeout(60000);
        }
        
        // 设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        
        LOG.info("Execution environment created with parallelism: {}", parallelism);
        return env;
    }
    
    /**
     * 创建 HBase 配置
     */
    private static Map<String, String> createHBaseConfig(ParameterTool params) {
        Map<String, String> hbaseConfig = new HashMap<>();
        
        // 基础配置
        hbaseConfig.put("hbase.zookeeper.quorum", params.get("hbase.zookeeper.quorum"));
        hbaseConfig.put("hbase.zookeeper.property.clientPort", params.get("hbase.zookeeper.port", "2181"));
        hbaseConfig.put("hbase.rootdir", params.get("hbase.rootdir", "/hbase"));
        hbaseConfig.put("hbase.cluster.distributed", params.get("hbase.cluster.distributed", "true"));
        
        // 客户端配置
        hbaseConfig.put("hbase.client.connection.pool.size", params.get("hbase.client.pool.size", "10"));
        hbaseConfig.put("hbase.client.write.buffer", params.get("hbase.client.write.buffer", "2097152"));
        hbaseConfig.put("hbase.client.scanner.caching", params.get("hbase.client.scanner.caching", "100"));
        hbaseConfig.put("hbase.client.scanner.timeout.period", params.get("hbase.client.scanner.timeout", "60000"));
        
        // Kerberos 配置
        if (params.getBoolean("security.kerberos.enabled", false)) {
            hbaseConfig.put("hbase.security.authentication", "kerberos");
            hbaseConfig.put("hbase.security.authorization", "true");
            hbaseConfig.put("hbase.security.authentication.principal", params.get("security.kerberos.principal"));
            hbaseConfig.put("hbase.security.authentication.keytab", params.get("security.kerberos.keytab"));
        }
        
        LOG.info("HBase configuration created with {} properties", hbaseConfig.size());
        return hbaseConfig;
    }
    
    /**
     * 创建用户数据流
     */
    private static DataStream<User> createUserStream(StreamExecutionEnvironment env, 
                                                   ParameterTool params, 
                                                   Map<String, String> hbaseConfig) {
        
        // Source 配置
        String sourceTable = params.get("source.table");
        String sourceColumnFamily = params.get("source.column.family");
        int pageSize = params.getInt("source.page.size", 1000);
        int asyncThreads = params.getInt("source.async.threads", 4);
        long scanTimeout = params.getLong("source.scan.timeout", 30000);
        
        // 创建 Source
        UserMultithreadedSource source = UserMultithreadedSource.create(
            sourceTable, sourceColumnFamily, pageSize, asyncThreads, scanTimeout, hbaseConfig
        );
        
        // 添加到数据流
        DataStream<User> userStream = env.addSource(source)
            .name("HBase Multithreaded Source")
            .uid("hbase-source");
        
        LOG.info("User stream created from table: {}", sourceTable);
        return userStream;
    }
    
    /**
     * 处理用户数据流
     */
    private static DataStream<User> processUserStream(DataStream<User> userStream) {
        return userStream
            .process(new ProcessFunction<User, User>() {
                @Override
                public void processElement(User user, Context ctx, Collector<User> out) throws Exception {
                    // 数据清洗和转换
                    if (user != null && isValidUser(user)) {
                        // 可以在这里添加业务逻辑
                        out.collect(user);
                    }
                }
                
                private boolean isValidUser(User user) {
                    return user.getUserId() != null && !user.getUserId().isEmpty()
                        && user.getName() != null && !user.getName().trim().isEmpty()
                        && user.getAge() >= 0 && user.getAge() <= 150;
                }
            })
            .name("User Data Processing")
            .uid("user-processing");
    }
    
    /**
     * 写入到 HBase
     */
    private static void writeToHBase(DataStream<User> userStream, 
                                   ParameterTool params, 
                                   Map<String, String> hbaseConfig) {
        
        // Sink 配置
        String sinkTable = params.get("sink.table");
        String sinkColumnFamily = params.get("sink.column.family");
        int batchSize = params.getInt("sink.batch.size", 100);
        int asyncThreads = params.getInt("sink.async.threads", 4);
        long flushInterval = params.getLong("sink.flush.interval", 10000);
        
        // 创建 Sink
        UserMultithreadedSink sink = UserMultithreadedSink.create(
            sinkTable, sinkColumnFamily, batchSize, asyncThreads, flushInterval, hbaseConfig
        );
        
        // 添加到数据流
        userStream
            .addSink(sink)
            .name("HBase Multithreaded Sink")
            .uid("hbase-sink");
        
        LOG.info("User stream will be written to table: {}", sinkTable);
    }
    
    /**
     * 打印使用说明
     */
    public static void printUsage() {
        System.out.println("Usage: MultithreadedJobLauncher [OPTIONS]");
        System.out.println("Required parameters:");
        System.out.println("  --source.table <table_name>           Source HBase table name");
        System.out.println("  --source.column.family <cf>           Source column family");
        System.out.println("  --sink.table <table_name>             Sink HBase table name");
        System.out.println("  --sink.column.family <cf>             Sink column family");
        System.out.println("  --hbase.zookeeper.quorum <hosts>      HBase ZooKeeper quorum");
        System.out.println("");
        System.out.println("Optional parameters:");
        System.out.println("  --parallelism <number>                Parallelism (default: 4)");
        System.out.println("  --source.page.size <size>             Source page size (default: 1000)");
        System.out.println("  --source.async.threads <threads>      Source async threads (default: 4)");
        System.out.println("  --sink.batch.size <size>              Sink batch size (default: 100)");
        System.out.println("  --sink.async.threads <threads>        Sink async threads (default: 4)");
        System.out.println("  --security.kerberos.enabled <bool>    Enable Kerberos (default: false)");
        System.out.println("  --checkpoint.enabled <bool>           Enable checkpointing (default: true)");
        System.out.println("");
        System.out.println("Example:");
        System.out.println("  java -cp flink-hbase-1.0-SNAPSHOT.jar com.flink.hbase.multithreaded.MultithreadedJobLauncher \\");
        System.out.println("    --source.table user_table \\");
        System.out.println("    --source.column.family cf \\");
        System.out.println("    --sink.table user_backup \\");
        System.out.println("    --sink.column.family cf \\");
        System.out.println("    --hbase.zookeeper.quorum zk1,zk2,zk3 \\");
        System.out.println("    --parallelism 8 \\");
        System.out.println("    --source.async.threads 6 \\");
        System.out.println("    --sink.async.threads 6");
    }
} 