package com.bigdata.hudi;

import cn.hutool.json.JSONUtil;
import lombok.extern.log4j.Log4j2;
import org.apache.hadoop.conf.Configuration;
import org.apache.hudi.avro.model.HoodieCleanFileInfo;
import org.apache.hudi.avro.model.HoodieCleanMetadata;
import org.apache.hudi.avro.model.HoodieCleanPartitionMetadata;
import org.apache.hudi.avro.model.HoodieCleanerPlan;
import org.apache.hudi.client.HoodieJavaWriteClient;
import org.apache.hudi.client.common.HoodieJavaEngineContext;
import org.apache.hudi.common.model.HoodieCleaningPolicy;
import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.config.HoodieCleanConfig;
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hudi.storage.StorageConfiguration;
import org.apache.hudi.storage.hadoop.HadoopStorageConfiguration;
import org.apache.hudi.table.HoodieJavaCopyOnWriteTable;
import org.apache.hudi.table.HoodieJavaMergeOnReadTable;
import org.apache.hudi.table.HoodieJavaTable;
import org.apache.hudi.table.action.clean.CleaningTriggerStrategy;

import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

/**
 * description
 *
 * @author Cyber
 * <p> Created By 2025/3/4
 * @version 1.0
 */
@Log4j2
public class HudiTableService {

    private static final String TABLE_NAME = "hudi_table";
    private static final String TABLE_PATH = "hdfs://master:9000/user/hive/warehouse/myhudi.db/" + TABLE_NAME;


    private static final int CLEAN_RETAINER_COMMITS = 3;

    public static void main(String[] args) throws IOException {
        HoodieWriteConfig writeConfig = HoodieWriteConfig.newBuilder()
                .forTable(TABLE_NAME)
                .withPath(TABLE_PATH)
                .withCleanConfig(getHoodieCleanConfig())
                .withEmbeddedTimelineServerEnabled(false)
                .build();

        Configuration conf = new Configuration();
        conf.addResource("conf/hdfs-site.xml");
        conf.addResource("conf/core-site.xml");
        HadoopStorageConfiguration storageConfiguration = new HadoopStorageConfiguration(conf);

        HoodieJavaEngineContext hoodieJavaEngineContext = new HoodieJavaEngineContext(storageConfiguration);
//        HoodieJavaTable<Object> table = HoodieJavaCopyOnWriteTable.create(writeConfig, hoodieJavaEngineContext);
        HoodieJavaTable<?> table = HoodieJavaMergeOnReadTable.create(writeConfig, hoodieJavaEngineContext);

        HoodieJavaWriteClient<?> hoodieJavaWriteClient = new HoodieJavaWriteClient<>(hoodieJavaEngineContext, writeConfig);
        String planCleanInst = hoodieJavaWriteClient.createNewInstantTime();
        log.info("clean plan = " + planCleanInst);

        // 调度清理
        Option<HoodieCleanerPlan> hoodieCleanerPlanOption = table.scheduleCleaning(hoodieJavaEngineContext, planCleanInst, Option.empty());
        hoodieCleanerPlanOption.ifPresent(clean -> {
            log.info("clean.getSchema() = " + clean.getSchema());
            log.info("clean.getPolicy() = " + clean.getPolicy());
            log.info("clean.getVersion() = " + clean.getVersion());
            log.info("clean.getEarliestInstantToRetain() = " + clean.getEarliestInstantToRetain());
            log.info("clean.getLastCompletedCommitTimestamp() = " + clean.getLastCompletedCommitTimestamp());
            Map<String, List<HoodieCleanFileInfo>> filePathsToBeDeletedPerPartition = clean.getFilePathsToBeDeletedPerPartition();
            filePathsToBeDeletedPerPartition.forEach((k, v) -> {
                log.info("k = " + k);
                for (HoodieCleanFileInfo hoodieCleanFileInfo : v) {
                    log.info("hoodieCleanFileInfo = " + hoodieCleanFileInfo);
                    log.info("hoodieCleanFileInfo.getSpecificData() = " + hoodieCleanFileInfo.getSpecificData());
                    log.info("hoodieCleanFileInfo.getFilePath() = " + hoodieCleanFileInfo.getFilePath());
                    log.info("hoodieCleanFileInfo.getSchema() = " + hoodieCleanFileInfo.getSchema());
                    log.info("hoodieCleanFileInfo.getIsBootstrapBaseFile() = " + hoodieCleanFileInfo.getIsBootstrapBaseFile());
                }
            });
            try {
                log.info("clean.getFilesToBeDeletedPerPartition() = " + org.apache.hudi.common.util.JsonUtils.toString(clean.getFilesToBeDeletedPerPartition()));
            } catch (Exception exception) {
                exception.printStackTrace();
            }
            try {
                log.info("clean.getPartitionsToBeDeleted() = " + JSONUtil.toJsonStr(clean.getPartitionsToBeDeleted()));
            } catch (Exception exception) {
                exception.printStackTrace();
            }
            log.info("clean.getSpecificData() = " + clean.getSpecificData());
        });

        // 手动立即执行清理
        HoodieJavaTable<Object> table2 = HoodieJavaCopyOnWriteTable.create(writeConfig, hoodieJavaEngineContext);
        HoodieCleanMetadata clean = table2.clean(hoodieJavaEngineContext, planCleanInst);
        if (clean != null) {
            log.info("clean.getSchema() = " + clean.getSchema());
            log.info("clean.getStartCleanTime() = " + clean.getStartCleanTime());
            log.info("clean.getVersion() = " + clean.getVersion());
            log.info("clean.getEarliestCommitToRetain() = " + clean.getEarliestCommitToRetain());
            log.info("clean.getLastCompletedCommitTimestamp() = " + clean.getLastCompletedCommitTimestamp());
            log.info("clean.getTotalFilesDeleted() = " + clean.getTotalFilesDeleted());
            log.info("clean.getTimeTakenInMillis() = " + clean.getTimeTakenInMillis());
            Map<String, HoodieCleanPartitionMetadata> partitionMetadata = clean.getPartitionMetadata();
            log.info("\n\n===================================================== 分割线 ===================================================================\n\n");
            log.info("k", "PartitionPath", "SuccessDeleteFiles", "DeletePathPatterns", "FailedDeleteFiles", "Schema", "PartitionDeleted", "Policy", "SpecificData");
            partitionMetadata.forEach((k, v) -> {
                log.info(k, v.getPartitionPath(), JSONUtil.toJsonStr(v.getSuccessDeleteFiles()), JSONUtil.toJsonStr(v.getDeletePathPatterns())
                        , JSONUtil.toJsonStr(v.getFailedDeleteFiles()), v.getSchema(), v.getIsPartitionDeleted(), v.getPolicy(), v.getSpecificData());
            });
            log.info("\n\n===================================================== 分割线 ===================================================================\n\n");
            log.info("\n\n===================================================== 分割线 ===================================================================\n\n");
            Map<String, HoodieCleanPartitionMetadata> bootstrapPartitionMetadata = clean.getBootstrapPartitionMetadata();
            bootstrapPartitionMetadata.forEach((k, v) -> {
                log.info("k = " + k);
                try {
                    log.info("v.getSuccessDeleteFiles() = " + JSONUtil.toJsonStr(v.getSuccessDeleteFiles()));
                } catch (Exception exception) {
                    log.error(exception);
                }
                try {
                    log.info("v.getDeletePathPatterns() = " + JSONUtil.toJsonStr(v.getDeletePathPatterns()));
                } catch (Exception exception) {
                    log.error(exception);
                }
                try {
                    log.info("v.getFailedDeleteFiles() = " + JSONUtil.toJsonStr(v.getFailedDeleteFiles()));
                } catch (Exception exception) {
                    log.error(exception);
                }
                log.info("v.getSchema() = " + v.getSchema());
                log.info("v.getIsPartitionDeleted() = " + v.getIsPartitionDeleted());
                log.info("v.getPolicy() = " + v.getPolicy());
                log.info("v.getSpecificData() = " + v.getSpecificData());
            });
            log.info("\n\n===================================================== 分割线 ===================================================================\n\n");
        }
//
//        HoodieTimeline completedCleanTimeline = table2.getCompletedCleanTimeline();
//        List<HoodieInstant> collect1 = completedCleanTimeline.getInstantsOrderedByStateTransitionTime().collect(Collectors.toList());
//        log.info("action", "state", "file_name", "time_stamp", "state_transition_time");
//        for (HoodieInstant hoodieInstant : collect1) {
//            log.info(hoodieInstant.getAction(), hoodieInstant.getState(), hoodieInstant.getFileName(), hoodieInstant.getTimestamp(), hoodieInstant.getStateTransitionTime());
//        }
//        log.info("\n\n===================================================== 分割线 ===================================================================\n\n");
//        HoodieTimeline completedCommitsTimeline = table2.getCompletedCommitsTimeline();
//        List<HoodieInstant> collect = completedCommitsTimeline.getInstantsOrderedByStateTransitionTime().collect(Collectors.toList());
//        log.info("action", "state", "file_name", "time_stamp", "state_transition_time");
//        for (HoodieInstant hoodieInstant : collect) {
//            log.info(hoodieInstant.getAction(), hoodieInstant.getState(), hoodieInstant.getFileName(), hoodieInstant.getTimestamp(), hoodieInstant.getStateTransitionTime());
//        }
    }

    private static HoodieCleanConfig getHoodieCleanConfig() {
        return HoodieCleanConfig.newBuilder()
                .withAutoClean(false)
                .withAsyncClean(false)
                // 清除策略：最后提交的三个commit
                .cleanerNumHoursRetained(CLEAN_RETAINER_COMMITS)
                .withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS)
                .withCleaningTriggerStrategy(CleaningTriggerStrategy.NUM_COMMITS.name())
                .build();
    }
}
