package com.bigdata.hudi;

import lombok.extern.log4j.Log4j;
import lombok.extern.log4j.Log4j2;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hudi.avro.model.HoodieCleanMetadata;
import org.apache.hudi.client.SparkRDDWriteClient;
import org.apache.hudi.client.common.HoodieSparkEngineContext;
import org.apache.hudi.common.model.HoodieCleaningPolicy;
import org.apache.hudi.common.util.JsonUtils;
import org.apache.hudi.config.HoodieCleanConfig;
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

import static jdk.nashorn.internal.runtime.regexp.joni.Config.log;

/**
 * description
 *
 * @author Cyber
 * <p> Created By 2025/2/26
 * @version 1.0
 */
@Log4j2
public class SparkHudiOperations {

    private static final String tableName = "hudi_table";

    private static final String tablePath = "hdfs://master:9000/user/hive/warehouse/myhudi.db/hudi_table";

    public static void main(String[] args) {
//        hudiTable_Writer();
        // hudi表孤立文件清理优化
//        hudiTableClear_IsolateFiles();
        // alter 操作
//        hudiTable_Alter();
    }

    private static void hudiTable_Alter() {
        log.info("hudiTable_Alter start...");

        SparkSession spark = SparkSession.builder()
                .appName("Hudi table Java Example")
                .config("spark.sql.extensions", "org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
                .config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.hudi.catalog.HoodieCatalog")
                .config("spark.kryo.registrator", "org.apache.spark.HoodieSparkKryoRegistrar")
                .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .master("local[*]")
                .config("hoodie.schema.on.read.enable","true")
                .enableHiveSupport()
                .getOrCreate();

        spark.sql("use spark_catalog");
        spark.sql("use myhudi");

        Dataset<org.apache.spark.sql.Row> dataset1 =spark.sql("alter table hudi_table DROP COLUMNS(ext1)");
        dataset1.show();
        log.info("=============hudiTable_Alter drop columns completed.=================");
        Dataset<org.apache.spark.sql.Row> df = spark.sql("alter table hudi_table ADD COLUMNS(ext1 string)");
        df.show();
        log.info("-------------hudiTable_Alter add columns completed.------------------");

        Dataset<org.apache.spark.sql.Row> data = spark.sql("select * from hudi_table");
        data.show();

        Dataset<org.apache.spark.sql.Row> dataset =spark.sql("alter table hudi_table DROP COLUMNS(ext1)");
        dataset.show();
        log.info("=============hudiTable_Alter drop columns completed.=================");
    }

    private static void hudiTableClear_IsolateFiles() {
        log.info("hudiTableClear_IsolateFiles start...");

        // 初始化Spark上下文
        SparkConf conf = new SparkConf()
                .setAppName("Hudi Clean Orphan Files")
                .setMaster("local[*]")
                .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .set("spark.kryo.registrator", "org.apache.spark.HoodieSparkKryoRegistrar")
                .set("spark.sql.extensions", "org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
                .set("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.hudi.catalog.HoodieCatalog");

        JavaSparkContext javaSparkContext = new JavaSparkContext(conf);
        HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(javaSparkContext);

        // 创建HoodieWriteConfig（配置清理策略）
        HoodieWriteConfig config = HoodieWriteConfig.newBuilder()
                .withSchema("myhudi") // 可选
                .forTable(tableName)
                .withPath(tablePath)
                .withCleanConfig(HoodieCleanConfig.newBuilder()
                        .withAutoClean(false) // 禁用自动清理（手动触发）
                        .withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS)
                        // 清理策略：
                        /*
                        KEEP_LATEST_COMMITS。此项为默认值。保留最近的N个commit。默认N为10。可以通过配置项hoodie.cleaner.commits.retained修改N的值。
                        KEEP_LATEST_BY_HOURS。保留最近N小时内的commit。默认N为24。可以通过配置项hoodie.cleaner.hours.retained修改N的值。
                        KEEP_LATEST_FILE_VERSIONS。保留最近的N个版本。默认N为3。可以通过配置项hoodie.cleaner.fileversions.retained修改N的值。
                        */
                        .retainCommits(3) // 保留最近5个commit
                        .build())
                .build();

        try (SparkRDDWriteClient<?> client = new SparkRDDWriteClient<>(engineContext, config)) {
            // 执行清理操作
            HoodieCleanMetadata hoodieCleanMetadata = client.clean();
            log.info("cleaned result: {}", JsonUtils.toString(hoodieCleanMetadata));
        }
        log.info("hudiTableClear_IsolateFiles completed.");
        javaSparkContext.stop();
    }


    private static void hudiTable_Writer() {
        log.info("hudiTableWriter start...");

        // loginKerberos();
        // 初始化 SparkSession
        SparkSession spark = SparkSession.builder()
                .appName("Hudi table Java Example")
                .config("spark.sql.extensions", "org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
                .config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.hudi.catalog.HoodieCatalog")
                .config("spark.kryo.registrator", "org.apache.spark.HoodieSparkKryoRegistrar")
                .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .master("local[1]")
                .getOrCreate();

/*     需要再 spark-sql 执行hudi建表语句，使元数据保存在hms中;程序执行是往hdfs上写hudi表元数据和表数据(没有同步元数据到hms)
       CREATE TABLE hudi_table1 (
                id BIGINT,
                NAME STRING,
                age INT,
                city STRING,
                birthdate DATE
        ) USING hudi TBLPROPERTIES (
                type = 'MERGE_ON_READ',
                primaryKey = 'id',
                preCombineField = 'id'
        )
        PARTITIONED BY ( city, birthdate )
        // LOCATION 'hdfs://master:9000/user/hive/warehouse/myhudi.db/hudi_table1'
        ;
        */
        // 定义 schema
        StructType schema = new StructType(new StructField[]{
                new StructField("id", DataTypes.IntegerType, false, Metadata.empty()),
                new StructField("name", DataTypes.StringType, false, Metadata.empty()),
                new StructField("age", DataTypes.IntegerType, false, Metadata.empty()),
                new StructField("city", DataTypes.StringType, false, Metadata.empty()),
                new StructField("birthdate", DataTypes.DateType, false, Metadata.empty()),
        });

        // 创建数据
        java.util.List<org.apache.spark.sql.Row> listDatas = java.util.Arrays.asList(
//                org.apache.spark.sql.RowFactory.create(1, "张三", 30, "北京-北京市", java.sql.Date.valueOf("2006-09-30")),
//                org.apache.spark.sql.RowFactory.create(2, "李四", 25, "上海-上海市", java.sql.Date.valueOf("1986-10-30")),
//                org.apache.spark.sql.RowFactory.create(3, "王二", 35, "河北省-保定市", java.sql.Date.valueOf("1990-04-30")),
//                org.apache.spark.sql.RowFactory.create(4, "小李", 20, "湖北省-武汉市", java.sql.Date.valueOf("1985-07-30")),
//                org.apache.spark.sql.RowFactory.create(5, "念念", 10, "河北省-保定市", java.sql.Date.valueOf("2014-01-30")),
//                org.apache.spark.sql.RowFactory.create(6, "程程", 2, "河北省-保定市", java.sql.Date.valueOf("2023-03-03")),
//                org.apache.spark.sql.RowFactory.create(7, "旺旺", 35, "河北省-石家庄市", java.sql.Date.valueOf("1995-09-30")),
//                org.apache.spark.sql.RowFactory.create(8, "晗晗", 35, "云南省-贵州市", java.sql.Date.valueOf("1989-11-06")),
//                org.apache.spark.sql.RowFactory.create(9, "欣欣", 5, "江西省-吉安市", java.sql.Date.valueOf("2020-08-30")),
//                org.apache.spark.sql.RowFactory.create(10, "垚垚", 15, "陕西省-安康市", java.sql.Date.valueOf("2014-09-30")),
//                org.apache.spark.sql.RowFactory.create(11, "玮玮", 6, "陕西省-安康市", java.sql.Date.valueOf("2018-09-30")),
//                org.apache.spark.sql.RowFactory.create(12, "小六子", 6, "陕西省-安康市", java.sql.Date.valueOf("2018-09-30")),
//                org.apache.spark.sql.RowFactory.create(13, "王冉", 34, "河南省-信阳市", java.sql.Date.valueOf("1993-09-30")),
//                org.apache.spark.sql.RowFactory.create(14, "小王", 28, "陕西省-安康市", java.sql.Date.valueOf("2018-09-30")),
//                org.apache.spark.sql.RowFactory.create(15, "齐齐", 31, "黑龙江-鸡西市", java.sql.Date.valueOf("2016-09-30"))
//                org.apache.spark.sql.RowFactory.create(16, "浩浩", 31, "黑龙江-鸡西市", java.sql.Date.valueOf("2016-09-30"))
//                org.apache.spark.sql.RowFactory.create(17, "成龙", 31, "北京-北京市", java.sql.Date.valueOf("2015-09-30"))
//                org.apache.spark.sql.RowFactory.create(18, "东东", 22, "广东省-东湾市", java.sql.Date.valueOf("2016-09-30"))
//                org.apache.spark.sql.RowFactory.create(19, "宝强", 41, "广东省-佛山市", java.sql.Date.valueOf("1985-09-30"))
                org.apache.spark.sql.RowFactory.create(20, "东东", 22, "广东省-东湾市", java.sql.Date.valueOf("2016-09-30"))
        );

        Dataset<org.apache.spark.sql.Row> df = spark.createDataFrame(listDatas, schema);

        // 配置 Hudi 写入选项
//        核心配置选项
//        配置项	说明
//        hoodie.table.name	Hudi 表名
//        hoodie.datasource.write.table.type	表类型 (COPY_ON_WRITE 或 MERGE_ON_READ)
//        hoodie.datasource.write.operation	写入操作 (upsert, insert, bulk_insert, delete)
//        hoodie.datasource.write.recordkey.field	主键字段名
//        hoodie.datasource.write.precombine.field	预合并字段（用于去重，通常为时间戳字段）
//        hoodie.datasource.write.keygenerator.class	主键生成器类（默认 SimpleKeyGenerator）
//        hoodie.datasource.write.hive_style_partitioning	是否使用 Hive 风格分区路径 (true/false)
//        hoodie.datasource.write.partitionpath.field	分区字段名
//        hoodie.upsert.shuffle.parallelism	Upsert 操作的并行度
//        hoodie.insert.shuffle.parallelism	Insert 操作的并行度
//        hoodie.cleaner.policy	数据清理策略 (KEEP_LATEST_COMMITS, KEEP_LATEST_FILE_VERSIONS)
//        hoodie.compact.inline	是否启用在线压缩 (true/false)
        Map<String, String> hudiOptions = new HashMap<>();
        hudiOptions.put("hoodie.table.name", tableName);
        hudiOptions.put("hoodie.datasource.write.recordkey.field", "id");
        hudiOptions.put("hoodie.datasource.write.partitionpath.field", "city,birthdate");
        hudiOptions.put("hoodie.datasource.write.table.name", tableName);
        hudiOptions.put("hoodie.datasource.write.operation", "insert");
        hudiOptions.put("hoodie.datasource.write.precombine.field", "id");
        hudiOptions.put("hoodie.datasource.write.storage.type", "MERGE_ON_READ");//COPY_ON_WRITE/MERGE_ON_READ
        hudiOptions.put("hoodie.upsert.shuffle.parallelism", "2");

        // 写入数据到 Hudi 表
        df.write()
                .format("hudi")
                .options(hudiOptions)
                .mode(SaveMode.Append)  // 数据追加模式/覆盖模式
                .save("hdfs://master:9000/user/hive/warehouse/myhudi.db/" + tableName);  // Hudi 表路径
        spark.stop();
    }

    private static void loginKerberos() throws IOException {
        System.setProperty("HADOOP_USER_NAME", "lifangyu");
//        System.setProperty("java.security.krb5.realm", "");
//        System.setProperty("java.security.krb5.conf", "");
//        System.setProperty("java.security.krb5.kdc", "");
        Configuration conf = getConf();
        UserGroupInformation.setConfiguration(conf);
//        UserGroupInformation.loginUserFromKeytab("hive/node1@TEST.COM", "/Users/lifangyu/Documents/project/bigdata/bigdata-project/hudi/src/main/resources/confs/hive.keytab");
//        UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
//        System.out.println("loginUser = " + loginUser);
    }

    private static Configuration getConf() {
        Configuration conf = new Configuration();
        conf.addResource("/Users/lifangyu/Documents/project/bigdata/bigdata-project/hudi/src/main/resources/confs/core-site.xml");
        conf.addResource("/Users/lifangyu/Documents/project/bigdata/bigdata-project/hudi/src/main/resources/confs/hdfs-site.xml");
        conf.addResource("/Users/lifangyu/Documents/project/bigdata/bigdata-project/hudi/src/main/resources/confs/hive-site.xml");
        return conf;
    }
}
