package com.flink.hbase.sql2hdfs;

import com.flink.hbase.sql2hdfs.config.HBaseConnectionConfig;
import com.flink.hbase.sql2hdfs.model.UserData;
import com.flink.hbase.sql2hdfs.udf.AgeGroupUDF;
import com.flink.hbase.sql2hdfs.udf.DataProcessUDF;
import com.flink.hbase.sql2hdfs.udf.SalaryLevelUDF;
import com.flink.hbase.sql2hdfs.udf.UserLevelUDF;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.formats.parquet.ParquetWriterFactory;
import org.apache.flink.formats.parquet.avro.ParquetAvroWriters;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.PartFileInfo;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.DataTypes;
import org.apache.flink.types.Row;
import org.apache.hadoop.conf.Configuration;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Properties;

/**
 * Flink SQL HBase 到 HDFS 作业
 * 
 * 功能流程：
 * 1. 通过 Flink SQL 外表读取 HBase 数据（支持 Kerberos 认证）
 * 2. 使用 UDF 函数处理数据（数据清洗、等级计算等）
 * 3. 转换为 Flink Table 进行 SQL 操作
 * 4. 转换为 DataStream 流数据
 * 5. 通过 FileSink 写入 HDFS Parquet 格式文件
 */
public class FlinkSqlHBaseToHdfsJob {
    private static final Logger LOG = LoggerFactory.getLogger(FlinkSqlHBaseToHdfsJob.class);
    
    private final StreamExecutionEnvironment env;
    private final StreamTableEnvironment tableEnv;
    private final HBaseConnectionConfig hbaseConfig;
    private final String hdfsOutputPath;
    private final String checkpointPath;
    
    public FlinkSqlHBaseToHdfsJob(HBaseConnectionConfig hbaseConfig, String hdfsOutputPath, String checkpointPath) {
        this.hbaseConfig = hbaseConfig;
        this.hdfsOutputPath = hdfsOutputPath;
        this.checkpointPath = checkpointPath;
        
        // 初始化 Flink 环境
        this.env = StreamExecutionEnvironment.getExecutionEnvironment();
        
        // 创建 Table 环境
        EnvironmentSettings settings = EnvironmentSettings
            .newInstance()
            .inStreamingMode()
            .build();
        this.tableEnv = StreamTableEnvironment.create(env, settings);
        
        // 配置环境
        configureEnvironment();
    }
    
    /**
     * 配置 Flink 环境
     */
    private void configureEnvironment() {
        // 设置并行度
        env.setParallelism(4);
        
        // 启用 Checkpoint
        if (checkpointPath != null && !checkpointPath.trim().isEmpty()) {
            env.enableCheckpointing(60000); // 60秒间隔
            env.getCheckpointConfig().setCheckpointStorage(checkpointPath);
            env.getCheckpointConfig().setMinPauseBetweenCheckpoints(30000);
            env.getCheckpointConfig().setCheckpointTimeout(600000);
        }
        
        // 设置状态后端
        try {
            env.setStateBackend(new org.apache.flink.contrib.streaming.state.RocksDBStateBackend(
                checkpointPath != null ? checkpointPath : "file:///tmp/flink-checkpoints"));
        } catch (Exception e) {
            LOG.warn("Failed to set RocksDB state backend, using default: {}", e.getMessage());
        }
        
        // 设置重启策略
        env.setRestartStrategy(org.apache.flink.api.common.restartstrategy.RestartStrategies
            .fixedDelayRestart(3, org.apache.flink.api.common.time.Time.seconds(10)));
        
        LOG.info("Flink environment configured - Parallelism: {}, Checkpoint: {}", 
                env.getParallelism(), checkpointPath);
    }
    
    /**
     * 注册 UDF 函数
     */
    private void registerUDFs() {
        LOG.info("Registering UDF functions...");
        
        // 注册自定义 UDF
        tableEnv.createTemporaryFunction("user_level", UserLevelUDF.class);
        tableEnv.createTemporaryFunction("age_group", AgeGroupUDF.class);
        tableEnv.createTemporaryFunction("salary_level", SalaryLevelUDF.class);
        tableEnv.createTemporaryFunction("process_time", DataProcessUDF.class);
        
        LOG.info("UDF functions registered successfully");
    }
    
    /**
     * 创建 HBase 外表
     */
    private void createHBaseTable() {
        LOG.info("Creating HBase external table...");
        
        // 构建 HBase 连接配置字符串
        StringBuilder connectorOptions = new StringBuilder();
        connectorOptions.append("'connector' = 'hbase-2.2'");
        connectorOptions.append(", 'table-name' = '").append(hbaseConfig.getHbaseTableName()).append("'");
        connectorOptions.append(", 'zookeeper.quorum' = '").append(hbaseConfig.getZookeeperQuorum()).append("'");
        connectorOptions.append(", 'zookeeper.znode.parent' = '").append(hbaseConfig.getHbaseZnodeParent()).append("'");
        
        // Kerberos 认证配置
        if (hbaseConfig.isKerberosEnabled()) {
            connectorOptions.append(", 'properties.hbase.security.authentication' = '")
                           .append(hbaseConfig.getHbaseSecurityAuth()).append("'");
            connectorOptions.append(", 'properties.hadoop.security.authentication' = 'kerberos'");
            
            if (hbaseConfig.getKrb5ConfPath() != null) {
                connectorOptions.append(", 'properties.java.security.krb5.conf' = '")
                               .append(hbaseConfig.getKrb5ConfPath()).append("'");
            }
            
            if (hbaseConfig.getKeytabPath() != null && hbaseConfig.getPrincipal() != null) {
                connectorOptions.append(", 'properties.hbase.security.auth.enable' = 'true'");
            }
        }
        
        // 性能优化配置
        connectorOptions.append(", 'properties.hbase.client.connection.pool.size' = '")
                       .append(hbaseConfig.getConnectionPoolSize()).append("'");
        connectorOptions.append(", 'properties.hbase.client.retries.number' = '")
                       .append(hbaseConfig.getMaxRetries()).append("'");
        connectorOptions.append(", 'properties.hbase.client.scanner.caching' = '1000'");
        
        // 创建 HBase 表 DDL
        String createTableDDL = String.format(
            "CREATE TABLE hbase_users (\n" +
            "  rowkey STRING,\n" +
            "  info ROW<\n" +
            "    user_name STRING,\n" +
            "    age INT,\n" +
            "    gender STRING,\n" +
            "    email STRING,\n" +
            "    phone STRING,\n" +
            "    address STRING,\n" +
            "    city STRING,\n" +
            "    country STRING,\n" +
            "    department STRING,\n" +
            "    position STRING,\n" +
            "    salary DOUBLE,\n" +
            "    create_time TIMESTAMP(3),\n" +
            "    update_time TIMESTAMP(3)\n" +
            "  >,\n" +
            "  PRIMARY KEY (rowkey) NOT ENFORCED\n" +
            ") WITH (\n" +
            "  %s\n" +
            ")", connectorOptions.toString());
        
        LOG.info("Creating HBase table with DDL:\n{}", createTableDDL);
        tableEnv.executeSql(createTableDDL);
        
        LOG.info("HBase external table created successfully");
    }
    
    /**
     * 执行数据处理和转换
     */
    private DataStream<UserData> processData() {
        LOG.info("Processing data with UDF functions...");
        
        // 使用 SQL 查询和 UDF 处理数据
        String processingSQL = 
            "SELECT \n" +
            "  rowkey as user_id,\n" +
            "  process_time.cleanUserName(info.user_name) as user_name,\n" +
            "  process_time.cleanAge(info.age) as age,\n" +
            "  info.gender,\n" +
            "  process_time.cleanEmail(info.email) as email,\n" +
            "  process_time.cleanPhone(info.phone) as phone,\n" +
            "  process_time.cleanAddress(info.address) as address,\n" +
            "  info.city,\n" +
            "  info.country,\n" +
            "  info.department,\n" +
            "  info.position,\n" +
            "  process_time.cleanSalary(info.salary) as salary,\n" +
            "  info.create_time,\n" +
            "  info.update_time,\n" +
            "  -- UDF 计算的字段\n" +
            "  user_level(info.age, info.salary, info.department, info.position) as user_level,\n" +
            "  age_group(info.age) as age_group,\n" +
            "  salary_level(info.salary, info.city) as salary_level,\n" +
            "  process_time.getCurrentProcessTime() as process_time,\n" +
            "  process_time.getCurrentPartitionDate() as partition_date\n" +
            "FROM hbase_users\n" +
            "WHERE info.user_name IS NOT NULL\n" +
            "  AND process_time.isValidAge(info.age) = true";
        
        LOG.info("Executing processing SQL:\n{}", processingSQL);
        
        // 执行 SQL 查询
        Table resultTable = tableEnv.sqlQuery(processingSQL);
        
        // 定义输出类型
        RowTypeInfo rowTypeInfo = new RowTypeInfo(
            // 原始字段类型
            org.apache.flink.api.common.typeinfo.Types.STRING, // user_id
            org.apache.flink.api.common.typeinfo.Types.STRING, // user_name
            org.apache.flink.api.common.typeinfo.Types.INT,    // age
            org.apache.flink.api.common.typeinfo.Types.STRING, // gender
            org.apache.flink.api.common.typeinfo.Types.STRING, // email
            org.apache.flink.api.common.typeinfo.Types.STRING, // phone
            org.apache.flink.api.common.typeinfo.Types.STRING, // address
            org.apache.flink.api.common.typeinfo.Types.STRING, // city
            org.apache.flink.api.common.typeinfo.Types.STRING, // country
            org.apache.flink.api.common.typeinfo.Types.STRING, // department
            org.apache.flink.api.common.typeinfo.Types.STRING, // position
            org.apache.flink.api.common.typeinfo.Types.DOUBLE, // salary
            org.apache.flink.api.common.typeinfo.Types.LOCAL_DATE_TIME, // create_time
            org.apache.flink.api.common.typeinfo.Types.LOCAL_DATE_TIME, // update_time
            // UDF 计算字段类型
            org.apache.flink.api.common.typeinfo.Types.STRING, // user_level
            org.apache.flink.api.common.typeinfo.Types.STRING, // age_group
            org.apache.flink.api.common.typeinfo.Types.STRING, // salary_level
            org.apache.flink.api.common.typeinfo.Types.STRING, // process_time
            org.apache.flink.api.common.typeinfo.Types.STRING  // partition_date
        );
        
        // 转换为 DataStream
        DataStream<Row> rowStream = tableEnv.toDataStream(resultTable);
        
        // 转换为 UserData 对象
        DataStream<UserData> userDataStream = rowStream.map(UserData::fromRow)
            .returns(UserData.class)
            .name("Convert-to-UserData");
        
        LOG.info("Data processing completed, converted to UserData stream");
        
        return userDataStream;
    }
    
    /**
     * 创建 Parquet FileSink
     */
    private FileSink<UserData> createParquetSink() {
        LOG.info("Creating Parquet FileSink for HDFS output: {}", hdfsOutputPath);
        
        // 创建 Avro Schema（用于 Parquet）
        org.apache.avro.Schema avroSchema = org.apache.avro.SchemaBuilder
            .record("UserData")
            .namespace("com.flink.hbase.sql2hdfs.model")
            .fields()
            .requiredString("user_id")
            .optionalString("user_name")
            .optionalInt("age")
            .optionalString("gender")
            .optionalString("email")
            .optionalString("phone")
            .optionalString("address")
            .optionalString("city")
            .optionalString("country")
            .optionalString("department")
            .optionalString("position")
            .optionalDouble("salary")
            .optionalString("create_time")
            .optionalString("update_time")
            .optionalString("user_level")
            .optionalString("age_group")
            .optionalString("salary_level")
            .optionalString("process_time")
            .requiredString("partition_date")
            .endRecord();
        
        // 创建 Parquet Writer
        ParquetWriterFactory<UserData> parquetWriterFactory = 
            ParquetAvroWriters.forGenericRecord(avroSchema);
        
        // 创建 FileSink
        FileSink<UserData> fileSink = FileSink
            .forBulkFormat(new Path(hdfsOutputPath), parquetWriterFactory)
            .withBucketAssigner(new DateTimeBucketAssigner<>("yyyy-MM-dd"))
            .withRollingPolicy(
                DefaultRollingPolicy.builder()
                    .withRolloverInterval(Duration.ofMinutes(15)) // 15分钟滚动
                    .withInactivityInterval(Duration.ofMinutes(5))  // 5分钟无数据滚动
                    .withMaxPartSize(128 * 1024 * 1024L)           // 128MB 文件大小
                    .build())
            .withOutputFileConfig(
                OutputFileConfig.builder()
                    .withPartPrefix("user_data")
                    .withPartSuffix(".parquet")
                    .build())
            .build();
        
        LOG.info("Parquet FileSink created with 128MB rolling policy");
        
        return fileSink;
    }
    
    /**
     * 运行作业
     */
    public void run() throws Exception {
        LOG.info("Starting Flink SQL HBase to HDFS job...");
        
        try {
            // 1. 注册 UDF 函数
            registerUDFs();
            
            // 2. 创建 HBase 外表
            createHBaseTable();
            
            // 3. 处理数据
            DataStream<UserData> processedStream = processData();
            
            // 4. 创建 Parquet Sink
            FileSink<UserData> parquetSink = createParquetSink();
            
            // 5. 写入 HDFS
            processedStream
                .sinkTo(parquetSink)
                .name("Write-to-HDFS-Parquet")
                .setParallelism(2); // 设置写入并行度
            
            // 6. 执行作业
            LOG.info("Submitting job for execution...");
            env.execute("Flink-SQL-HBase-to-HDFS-Parquet");
            
        } catch (Exception e) {
            LOG.error("Job execution failed", e);
            throw e;
        }
    }
    
    /**
     * 主方法
     */
    public static void main(String[] args) throws Exception {
        // 解析命令行参数
        if (args.length < 3) {
            System.err.println("Usage: FlinkSqlHBaseToHdfsJob <zk-quorum> <table-name> <hdfs-output-path> [checkpoint-path] [kerberos-principal] [keytab-path]");
            System.exit(1);
        }
        
        String zkQuorum = args[0];
        String tableName = args[1];
        String hdfsOutputPath = args[2];
        String checkpointPath = args.length > 3 ? args[3] : null;
        String principal = args.length > 4 ? args[4] : null;
        String keytabPath = args.length > 5 ? args[5] : null;
        
        // 创建 HBase 配置
        HBaseConnectionConfig.Builder configBuilder = new HBaseConnectionConfig.Builder(
            zkQuorum, tableName, "info");
        
        // 如果提供了 Kerberos 参数
        if (principal != null && keytabPath != null) {
            configBuilder.enableKerberos(principal, keytabPath);
        }
        
        HBaseConnectionConfig hbaseConfig = configBuilder.build();
        
        // 创建并运行作业
        FlinkSqlHBaseToHdfsJob job = new FlinkSqlHBaseToHdfsJob(hbaseConfig, hdfsOutputPath, checkpointPath);
        job.run();
    }
} 