package com.bigdata.paimon;

import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.paimon.catalog.Catalog;
import org.apache.paimon.catalog.Catalog.DatabaseAlreadyExistException;
import org.apache.paimon.catalog.Catalog.DatabaseNotExistException;
import org.apache.paimon.catalog.CatalogContext;
import org.apache.paimon.catalog.CatalogFactory;
import org.apache.paimon.catalog.Database;
import org.apache.paimon.catalog.Identifier;
import org.apache.paimon.catalog.PropertyChange;
import org.apache.paimon.data.GenericRow;
import org.apache.paimon.data.InternalRow;
import org.apache.paimon.fs.Path;
import org.apache.paimon.options.Options;
import org.apache.paimon.predicate.PredicateBuilder;
import org.apache.paimon.reader.RecordReader;
import org.apache.paimon.schema.Schema;
import org.apache.paimon.schema.SchemaChange;
import org.apache.paimon.table.Table;
import org.apache.paimon.table.sink.BatchTableCommit;
import org.apache.paimon.table.sink.BatchTableWrite;
import org.apache.paimon.table.sink.BatchWriteBuilder;
import org.apache.paimon.table.sink.CommitMessage;
import org.apache.paimon.table.source.ReadBuilder;
import org.apache.paimon.table.source.Split;
import org.apache.paimon.table.source.TableRead;
import org.apache.paimon.types.DataTypes;
import org.apache.paimon.types.RowType;
import org.apache.paimon.utils.JsonSerdeUtil;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

/**
 * description
 * <p>
 * https://paimon.apache.org/docs/1.0/program-api/java-api
 * </p>
 *
 * @author Cyber
 * <p> Created By 2025/4/1
 * @version 1.0
 */
@Slf4j
public class PaimonCatalog {

    public final static String HIVE_CATALOG_NAME = "paimon_mycatalog";

    public final static String FILESYSTEM_CATALOG_NAME = "paimon_mycatalog";

    public final static String METASTORE_TYPE_HIVE = "hive";

    public final static String METASTORE_TYPE_FILESYSTEM = "filesystem";

    public final static String HMS_METASTORE_URL = "thrift://master:9083";

    public final static String HIVE_WAREHOUSE_PATH = "hdfs://master:9000/user/" + HIVE_CATALOG_NAME + "/warehouse";

    public final static String FILESYSTEM_WAREHOUSE_PATH = "hdfs://master:9000/user/" + FILESYSTEM_CATALOG_NAME + "/warehouse2";

    public static void main(String[] args) {
        log.info("CreatePaimonCatalog sync metastore catalog start ...");
        Catalog catalog = PaimonCatalog.getPaimonCatalog(HIVE_CATALOG_NAME, METASTORE_TYPE_HIVE);
        log.info("catalog: {}", catalog);
        try {
            catalog.createDatabase("default", true);
        } catch (DatabaseAlreadyExistException e) {
            log.info("catalog db {} is ", HIVE_CATALOG_NAME);
        }
        List<String> databases = catalog.listDatabases();
        databases.forEach(database -> {
            log.info("database: {}", database);
        });
    }

    public static Catalog getPaimonCatalog(String catalogName, String metastoreType) {
        if (metastoreType.equals(METASTORE_TYPE_HIVE)) {
            return createPaimonCatalogByHiveCatalog(catalogName);
        } else if (metastoreType.equals(METASTORE_TYPE_FILESYSTEM)) {
            return createFilesystemCatalog(catalogName);
        } else {
            throw new RuntimeException("metastoreType " + metastoreType + " is not support");
        }


    }

    public static boolean createPaimonDatabase(Catalog catalog, String databaseName) {
        try {
            Database database = catalog.getDatabase(databaseName);
            if (java.util.Objects.nonNull(database)) {
                return true;
            }
        } catch (DatabaseNotExistException e) {
            log.info("database {} not exist", databaseName);
        }
        try {
            catalog.createDatabase(databaseName, true);
            log.info("create database {} success", databaseName);
            return true;
        } catch (DatabaseAlreadyExistException e) {
            log.info("database {} already exist", databaseName);
            return true;
        }
    }

    public static List<String> listPaimonDatabases(Catalog catalog) {
        try {
            return catalog.listDatabases();
        } catch (Exception e) {
            log.error("list paimon databases error", e);
            return Collections.emptyList();
        }
    }

    public static boolean alterPaimonDatabase(Catalog catalog, String databaseName) {
        List<PropertyChange> changes = new ArrayList<>();
        changes.add(PropertyChange.setProperty("version", "1.0.1"));
        changes.add(PropertyChange.removeProperty("v1"));
        try {
            catalog.alterDatabase(databaseName, changes, true);
            log.info("alter database {} success", databaseName);
            return true;
        } catch (DatabaseNotExistException e) {
            log.error("alter database {} not exist", databaseName, e);
        }
        return false;
    }

    public static boolean dropPaimonDatabase(Catalog catalog, String databaseName) {
        try {
            catalog.dropDatabase(databaseName, false, false);
            log.info("drop database {} success", databaseName);
            return true;
        } catch (DatabaseNotExistException e) {
            log.error("database {} not exist", databaseName, e);
        } catch (org.apache.paimon.catalog.Catalog.DatabaseNotEmptyException e) {
            log.error("database {} is not empty(Existing table)", databaseName, e);
        }
        return false;
    }


    public static boolean createPaimonTable(Catalog catalog, String databaseName, String tableName, Schema schema) {
       /*
        Builder schemaBuilder = Schema.newBuilder();
        schemaBuilder.primaryKey("id");
        schemaBuilder.partitionKeys("city");
        schemaBuilder.column("name", org.apache.paimon.types.DataTypes.STRING());
        schemaBuilder.column("age", org.apache.paimon.types.DataTypes.INT());
        schemaBuilder.column("city", org.apache.paimon.types.DataTypes.STRING());
        Schema schema = schemaBuilder.build();
        */

        Identifier identifier = Identifier.create(databaseName, tableName);
        try {
            catalog.createTable(identifier, schema, false);
            log.info("create table {}.{} success", databaseName, tableName);
            return true;
        } catch (Catalog.TableAlreadyExistException e) {
            log.error("table {}.{} already exist", databaseName, tableName, e);
        } catch (DatabaseNotExistException e) {
            log.error("database {} not exist", databaseName, e);
        }
        return false;
    }

    public static Table getPaimonTable(Catalog catalog, String databaseName, String tableName) {
        Identifier identifier = Identifier.create(databaseName, tableName);
        try {
            return catalog.getTable(identifier);
        } catch (Catalog.TableNotExistException e) {
            log.error("table {}.{} not exist", databaseName, tableName, e);
            throw new RuntimeException("table " + databaseName + "." + tableName + " not exist");
        }
    }

    public static List<String> listPaimonTables(org.apache.paimon.catalog.Catalog catalog, String databaseName) {
        try {
            return catalog.listTables(databaseName);
        } catch (DatabaseNotExistException e) {
            throw new RuntimeException(e);
        }
    }

    public static boolean dropPaimonTable(Catalog catalog, String databaseName, String tableName) {
        Identifier identifier = Identifier.create(databaseName, tableName);
        try {
            catalog.dropTable(identifier, true);
            log.info("drop table {}.{} success", databaseName, tableName);
            return true;
        } catch (Catalog.TableNotExistException e) {
            log.error("table {}.{} not exist", databaseName, tableName, e);
        }
        return false;
    }

    public static boolean renamePaimonTable(Catalog catalog, String databaseName, String oldTableName, String newTableName) {
        Identifier fromIdentifier = Identifier.create(databaseName, oldTableName);
        Identifier newIdentifier = Identifier.create(databaseName, newTableName);
        try {
            catalog.renameTable(fromIdentifier, newIdentifier, false);
            log.info("rename table {}.{} to {}.{} success", databaseName, oldTableName, databaseName, newTableName);
            return true;
        } catch (Catalog.TableNotExistException e) {
            log.error("table {}.{} not exist", databaseName, oldTableName, e);
        } catch (Catalog.TableAlreadyExistException e) {
            log.error("table {}.{} already exist", databaseName, newTableName, e);
        }
        return false;
    }

    public static boolean alterPaimonTable(Catalog catalog, String databaseName, String tableName, List<SchemaChange> schemaChanges) {
        Identifier identifier = Identifier.create(databaseName, tableName);

//        // add option
//        org.apache.paimon.schema.SchemaChange addOption = org.apache.paimon.schema.SchemaChange.setOption("snapshot.time-retained", "2h");
//        // remove option
//        org.apache.paimon.schema.SchemaChange removeOption = org.apache.paimon.schema.SchemaChange.removeOption("compaction.max.file-num");
        // add column
//        org.apache.paimon.schema.SchemaChange addColumn = org.apache.paimon.schema.SchemaChange.addColumn("col1_after", org.apache.paimon.types.DataTypes.STRING());
        // add a column after col1
        SchemaChange.Move after = SchemaChange.Move.after("col1_after", "col1");
        SchemaChange addColumnAfterField = SchemaChange.addColumn("col7", DataTypes.STRING(), "", after);
//        // rename column
//        org.apache.paimon.schema.SchemaChange renameColumn = org.apache.paimon.schema.SchemaChange.renameColumn("col3", "col3_new_name");
//        // drop column
//        org.apache.paimon.schema.SchemaChange dropColumn = org.apache.paimon.schema.SchemaChange.dropColumn("col6");
//        // update column comment
//        org.apache.paimon.schema.SchemaChange updateColumnComment =
//                org.apache.paimon.schema.SchemaChange.updateColumnComment(new String[]{"col4"}, "col4 field");
//        // update nested column comment
//        org.apache.paimon.schema.SchemaChange updateNestedColumnComment =
//                org.apache.paimon.schema.SchemaChange.updateColumnComment(new String[]{"col5", "f1"}, "col5 f1 field");
//        // update column type
//        org.apache.paimon.schema.SchemaChange updateColumnType = org.apache.paimon.schema.SchemaChange.updateColumnType("col4", org.apache.paimon.types.DataTypes.DOUBLE());
//        // update column position, you need to pass in a parameter of type Move
//        org.apache.paimon.schema.SchemaChange updateColumnPosition =
//                org.apache.paimon.schema.SchemaChange.updateColumnPosition(org.apache.paimon.schema.SchemaChange.Move.first("col4"));
//        // update column nullability
//        org.apache.paimon.schema.SchemaChange updateColumnNullability =
//                org.apache.paimon.schema.SchemaChange.updateColumnNullability(new String[]{"col4"}, false);
//        // update nested column nullability
//        org.apache.paimon.schema.SchemaChange updateNestedColumnNullability =
//                org.apache.paimon.schema.SchemaChange.updateColumnNullability(new String[]{"col5", "f2"}, false);

//        org.apache.paimon.schema.SchemaChange[] schemaChanges =
//                new org.apache.paimon.schema.SchemaChange[]{
//                        addOption,
//                        removeOption,
//                        addColumn,
//                        addColumnAfterField,
//                        renameColumn,
//                        dropColumn,
//                        updateColumnComment,
//                        updateNestedColumnComment,
//                        updateColumnType,
//                        updateColumnPosition,
//                        updateColumnNullability,
//                        updateNestedColumnNullability
//                };
        SchemaChange[] schemaChangesArray = new SchemaChange[]{addColumnAfterField};

        try {
            catalog.alterTable(identifier, schemaChanges, false);
            return true;
        } catch (Catalog.TableNotExistException e) {
            log.error("table {}.{} not exist", databaseName, tableName, e);
        } catch (Catalog.ColumnAlreadyExistException e) {
            log.error("table {}.{} column {} already exist", databaseName, tableName, JsonSerdeUtil.toJson(schemaChanges), e);
        } catch (Catalog.ColumnNotExistException e) {
            log.error("table {}.{} column {} not exist", databaseName, tableName, JsonSerdeUtil.toJson(schemaChanges), e);
        }
        return false;
    }

    public static boolean batchWriteTableData(Catalog catalog, String databaseName, String tableName, List<GenericRow> RowsDataList) {
        Table table = PaimonCatalog.getPaimonTable(catalog, databaseName, tableName);

        BatchWriteBuilder writeBuilder = table.newBatchWriteBuilder().withOverwrite();

        // 2. Write records in distributed tasks
        BatchTableWrite write = writeBuilder.newWrite();

        // If this is a distributed write, you can use writeBuilder.newWriteSelector.
        // WriteSelector determines to which logical downstream writers a record should be written to.
        // If it returns empty, no data distribution is required.
        for (GenericRow record : RowsDataList) {
            try {
//                int bucket = Math.abs(record.getField(0).hashCode()) % 3; // 假设3个分桶
//                write.write(record, bucket);
                write.write(record);// 表启用动态分桶时无需指定分桶
            } catch (Exception e) {
                log.error("write record {} error", record.toString(), e);
                return false;
            }
        }
        /*RowsDataList.forEach(record -> {
            try {
                write.write(record);
            } catch (Exception e) {
                log.error("write record {} error", record.toString(), e);
                throw new RuntimeException(e);
            }
        });*/

        try {
            java.util.List<CommitMessage> messages = null;
            messages = write.prepareCommit();
            // 3. Collect all CommitMessages to a global node and commit
            BatchTableCommit commit = writeBuilder.newCommit();
            commit.commit(messages);
            return true;
        } catch (Exception e) {
            log.error("commit error", e);
        }
        return false;
    }

    public static RecordReader<InternalRow> batchReadTableData(Catalog catalog, String databaseName, String tableName) {
        // 1. Create a ReadBuilder and push filter (`withFilter`)
        // and projection (`withProjection`) if necessary
        Table table = PaimonCatalog.getPaimonTable(catalog, databaseName, tableName);

        PredicateBuilder builder = new PredicateBuilder(
                RowType.of(
                        DataTypes.INT(),
                        DataTypes.STRING(),
                        DataTypes.INT(),
                        DataTypes.STRING()
                )
        );
        // id字段和城市字段不能为空
/*
        Predicate notNull1 = builder.isNotNull(0);
        Predicate notNull2 = builder.isNotNull(3);
        // 年龄大于等于0
        Predicate greaterOrEqual = builder.greaterOrEqual(2, 0);
        int[] projection = new int[]{0, 1, 2, 3};
        ReadBuilder readBuilder = table.newReadBuilder().withProjection(projection).withFilter(Lists.newArrayList(notNull1, notNull2, greaterOrEqual));
*/

        ReadBuilder readBuilder = table.newReadBuilder();

        // 2. Plan splits in 'Coordinator' (or named 'Driver')
        List<Split> splits = readBuilder.newScan().plan().splits();

        // 3. Distribute these splits to different tasks

        // 4. Read a split in task
        TableRead read = readBuilder.newRead();
        try {
            RecordReader<InternalRow> reader = read.createReader(splits);
            return reader;
        } catch (java.io.IOException e) {
            log.error("batch read error", e);
        }
        return null;

    }


    private static void createMetastoreHmsCatalog(String catalogName) {
        log.info("create metastore catalog:{}", catalogName);
        org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
        // 通过"hive.metastore.uris"参数提供HMS连接信息
        conf.set("hive.metastore.uris", "thrift://master:9083");
        // 通过hive-site.xml方式提供HMS连接信息
        // conf.addResource("hive-site.xml");
        IMetaStoreClient client = null;
        try {
            client = RetryingMetaStoreClient.getProxy(conf, false);
        } catch (org.apache.hadoop.hive.metastore.api.MetaException e) {
            throw new RuntimeException(e);
        }
        org.apache.hadoop.hive.metastore.api.Catalog catalog = new org.apache.hadoop.hive.metastore.api.Catalog(catalogName, "hdfs://master:9000/user/" + catalogName + "/warehouse");
        catalog.setDescription("create metastore catalog " + catalogName);
        try {
            try {
                org.apache.hadoop.hive.metastore.api.Catalog metastoreCatalog = client.getCatalog(catalogName);
                if (metastoreCatalog != null) {
                    log.warn("Catalog metastore " + catalogName + " already exists");
                    return;
                }
            } catch (NoSuchObjectException e) {
                log.info("Catalog " + catalogName + " does not exist");
            }

            client.createCatalog(catalog);
            log.info("Catalog " + catalogName + " created successfully");
        } catch (org.apache.thrift.TException e) {
            log.error("创建Catalog失败", e);
        }
    }


    public static Catalog createFilesystemCatalog(String catalogName) {
        CatalogContext context = CatalogContext.create(new Path(FILESYSTEM_WAREHOUSE_PATH));
        return CatalogFactory.createCatalog(context);
    }

    public static Catalog createPaimonCatalogByHiveCatalog(String catalogName) {
        // Paimon Hive catalog relies on Hive jars
        // You should add hive classpath or hive bundled jar.
        Options options = new Options();
        options.set("warehouse", HIVE_WAREHOUSE_PATH);
        options.set("metastore", "hive");
        options.set("metastore.catalog.default", catalogName);
        options.set("uri", HMS_METASTORE_URL);
//        options.set("hive-conf-dir", "/Users/lifangyu/soft/apache-hive-3.1.3-bin/conf");
//        options.set("hadoop-conf-dir", "/Users/lifangyu/soft/hadoop-3.3.6/etc/hadoop");
        CatalogContext context = CatalogContext.create(options);
        Catalog catalog = CatalogFactory.createCatalog(context);
        createMetastoreHmsCatalog(catalogName);
        return catalog;
    }
}
